From: "Alice Ferrazzi" <alicef@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Wed, 19 Aug 2020 09:28:36 +0000 (UTC) [thread overview]
Message-ID: <1597829283.9059eacca2ff0577649ea045d1f50f8c63c4b7cb.alicef@gentoo> (raw)
commit: 9059eacca2ff0577649ea045d1f50f8c63c4b7cb
Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Wed Aug 19 09:18:57 2020 +0000
Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Wed Aug 19 09:28:03 2020 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9059eacc
Linux patch 5.4.59
Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
0000_README | 4 +
1058_linux-5.4.59.patch | 9156 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 9160 insertions(+)
diff --git a/0000_README b/0000_README
index 6dd92dc..78d0290 100644
--- a/0000_README
+++ b/0000_README
@@ -275,6 +275,10 @@ Patch: 1057_linux-5.4.58.patch
From: http://www.kernel.org
Desc: Linux 5.4.58
+Patch: 1058_linux-5.4.59.patch
+From: http://www.kernel.org
+Desc: Linux 5.4.59
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1058_linux-5.4.59.patch b/1058_linux-5.4.59.patch
new file mode 100644
index 0000000..8a3a375
--- /dev/null
+++ b/1058_linux-5.4.59.patch
@@ -0,0 +1,9156 @@
+diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
+index 680451695422..c3767d4d01a6 100644
+--- a/Documentation/ABI/testing/sysfs-bus-iio
++++ b/Documentation/ABI/testing/sysfs-bus-iio
+@@ -1566,7 +1566,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_voc_raw
+ KernelVersion: 4.3
+ Contact: linux-iio@vger.kernel.org
+ Description:
+- Raw (unscaled no offset etc.) percentage reading of a substance.
++ Raw (unscaled no offset etc.) reading of a substance. Units
++ after application of scale and offset are percents.
+
+ What: /sys/bus/iio/devices/iio:deviceX/in_resistance_raw
+ What: /sys/bus/iio/devices/iio:deviceX/in_resistanceX_raw
+diff --git a/Makefile b/Makefile
+index 29948bc4a0d2..cc72b8472f24 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 58
++SUBLEVEL = 59
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+
+diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts
+index 42f3313e6988..9f507393c375 100644
+--- a/arch/arm/boot/dts/r8a7793-gose.dts
++++ b/arch/arm/boot/dts/r8a7793-gose.dts
+@@ -339,7 +339,7 @@
+ reg = <0x20>;
+ remote = <&vin1>;
+
+- port {
++ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+@@ -399,7 +399,7 @@
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ default-input = <0>;
+
+- port {
++ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+diff --git a/arch/arm/boot/dts/sunxi-bananapi-m2-plus-v1.2.dtsi b/arch/arm/boot/dts/sunxi-bananapi-m2-plus-v1.2.dtsi
+index 22466afd38a3..235994a4a2eb 100644
+--- a/arch/arm/boot/dts/sunxi-bananapi-m2-plus-v1.2.dtsi
++++ b/arch/arm/boot/dts/sunxi-bananapi-m2-plus-v1.2.dtsi
+@@ -16,15 +16,27 @@
+ regulator-type = "voltage";
+ regulator-boot-on;
+ regulator-always-on;
+- regulator-min-microvolt = <1100000>;
+- regulator-max-microvolt = <1300000>;
++ regulator-min-microvolt = <1108475>;
++ regulator-max-microvolt = <1308475>;
+ regulator-ramp-delay = <50>; /* 4ms */
+ gpios = <&r_pio 0 1 GPIO_ACTIVE_HIGH>; /* PL1 */
+ gpios-states = <0x1>;
+- states = <1100000 0>, <1300000 1>;
++ states = <1108475 0>, <1308475 1>;
+ };
+ };
+
+ &cpu0 {
+ cpu-supply = <®_vdd_cpux>;
+ };
++
++&cpu1 {
++ cpu-supply = <®_vdd_cpux>;
++};
++
++&cpu2 {
++ cpu-supply = <®_vdd_cpux>;
++};
++
++&cpu3 {
++ cpu-supply = <®_vdd_cpux>;
++};
+diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
+index 71778bb0475b..a082f6e4f0f4 100644
+--- a/arch/arm/kernel/stacktrace.c
++++ b/arch/arm/kernel/stacktrace.c
+@@ -22,6 +22,19 @@
+ * A simple function epilogue looks like this:
+ * ldm sp, {fp, sp, pc}
+ *
++ * When compiled with clang, pc and sp are not pushed. A simple function
++ * prologue looks like this when built with clang:
++ *
++ * stmdb {..., fp, lr}
++ * add fp, sp, #x
++ * sub sp, sp, #y
++ *
++ * A simple function epilogue looks like this when built with clang:
++ *
++ * sub sp, fp, #x
++ * ldm {..., fp, pc}
++ *
++ *
+ * Note that with framepointer enabled, even the leaf functions have the same
+ * prologue and epilogue, therefore we can ignore the LR value in this case.
+ */
+@@ -34,6 +47,16 @@ int notrace unwind_frame(struct stackframe *frame)
+ low = frame->sp;
+ high = ALIGN(low, THREAD_SIZE);
+
++#ifdef CONFIG_CC_IS_CLANG
++ /* check current frame pointer is within bounds */
++ if (fp < low + 4 || fp > high - 4)
++ return -EINVAL;
++
++ frame->sp = frame->fp;
++ frame->fp = *(unsigned long *)(fp);
++ frame->pc = frame->lr;
++ frame->lr = *(unsigned long *)(fp + 4);
++#else
+ /* check current frame pointer is within bounds */
+ if (fp < low + 12 || fp > high - 4)
+ return -EINVAL;
+@@ -42,6 +65,7 @@ int notrace unwind_frame(struct stackframe *frame)
+ frame->fp = *(unsigned long *)(fp - 12);
+ frame->sp = *(unsigned long *)(fp - 8);
+ frame->pc = *(unsigned long *)(fp - 4);
++#endif
+
+ return 0;
+ }
+diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
+index 52665f30d236..6bc3000deb86 100644
+--- a/arch/arm/mach-at91/pm.c
++++ b/arch/arm/mach-at91/pm.c
+@@ -592,13 +592,13 @@ static void __init at91_pm_sram_init(void)
+ sram_pool = gen_pool_get(&pdev->dev, NULL);
+ if (!sram_pool) {
+ pr_warn("%s: sram pool unavailable!\n", __func__);
+- return;
++ goto out_put_device;
+ }
+
+ sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz);
+ if (!sram_base) {
+ pr_warn("%s: unable to alloc sram!\n", __func__);
+- return;
++ goto out_put_device;
+ }
+
+ sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base);
+@@ -606,12 +606,17 @@ static void __init at91_pm_sram_init(void)
+ at91_pm_suspend_in_sram_sz, false);
+ if (!at91_suspend_sram_fn) {
+ pr_warn("SRAM: Could not map\n");
+- return;
++ goto out_put_device;
+ }
+
+ /* Copy the pm suspend handler to SRAM */
+ at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
+ &at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz);
++ return;
++
++out_put_device:
++ put_device(&pdev->dev);
++ return;
+ }
+
+ static bool __init at91_is_pm_mode_active(int pm_mode)
+diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
+index 9a681b421ae1..cd861c57d5ad 100644
+--- a/arch/arm/mach-exynos/mcpm-exynos.c
++++ b/arch/arm/mach-exynos/mcpm-exynos.c
+@@ -26,6 +26,7 @@
+ #define EXYNOS5420_USE_L2_COMMON_UP_STATE BIT(30)
+
+ static void __iomem *ns_sram_base_addr __ro_after_init;
++static bool secure_firmware __ro_after_init;
+
+ /*
+ * The common v7_exit_coherency_flush API could not be used because of the
+@@ -58,15 +59,16 @@ static void __iomem *ns_sram_base_addr __ro_after_init;
+ static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
+ {
+ unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
++ bool state;
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ if (cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
+ cluster >= EXYNOS5420_NR_CLUSTERS)
+ return -EINVAL;
+
+- if (!exynos_cpu_power_state(cpunr)) {
+- exynos_cpu_power_up(cpunr);
+-
++ state = exynos_cpu_power_state(cpunr);
++ exynos_cpu_power_up(cpunr);
++ if (!state && secure_firmware) {
+ /*
+ * This assumes the cluster number of the big cores(Cortex A15)
+ * is 0 and the Little cores(Cortex A7) is 1.
+@@ -258,6 +260,8 @@ static int __init exynos_mcpm_init(void)
+ return -ENOMEM;
+ }
+
++ secure_firmware = exynos_secure_firmware_available();
++
+ /*
+ * To increase the stability of KFC reset we need to program
+ * the PMU SPARE3 register
+diff --git a/arch/arm/mach-socfpga/pm.c b/arch/arm/mach-socfpga/pm.c
+index 6ed887cf8dc9..365c0428b21b 100644
+--- a/arch/arm/mach-socfpga/pm.c
++++ b/arch/arm/mach-socfpga/pm.c
+@@ -49,14 +49,14 @@ static int socfpga_setup_ocram_self_refresh(void)
+ if (!ocram_pool) {
+ pr_warn("%s: ocram pool unavailable!\n", __func__);
+ ret = -ENODEV;
+- goto put_node;
++ goto put_device;
+ }
+
+ ocram_base = gen_pool_alloc(ocram_pool, socfpga_sdram_self_refresh_sz);
+ if (!ocram_base) {
+ pr_warn("%s: unable to alloc ocram!\n", __func__);
+ ret = -ENOMEM;
+- goto put_node;
++ goto put_device;
+ }
+
+ ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base);
+@@ -67,7 +67,7 @@ static int socfpga_setup_ocram_self_refresh(void)
+ if (!suspend_ocram_base) {
+ pr_warn("%s: __arm_ioremap_exec failed!\n", __func__);
+ ret = -ENOMEM;
+- goto put_node;
++ goto put_device;
+ }
+
+ /* Copy the code that puts DDR in self refresh to ocram */
+@@ -81,6 +81,8 @@ static int socfpga_setup_ocram_self_refresh(void)
+ if (!socfpga_sdram_self_refresh_in_ocram)
+ ret = -EFAULT;
+
++put_device:
++ put_device(&pdev->dev);
+ put_node:
+ of_node_put(np);
+
+diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
+index 080e0f56e108..61ee7b6a3159 100644
+--- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
++++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
+@@ -157,6 +157,7 @@
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-enable-ramp-delay = <125>;
++ regulator-always-on;
+ };
+
+ ldo8_reg: LDO8 {
+diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
+index e035cf195b19..8c4bfbaf3a80 100644
+--- a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
++++ b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
+@@ -530,6 +530,17 @@
+ status = "ok";
+ compatible = "adi,adv7533";
+ reg = <0x39>;
++ adi,dsi-lanes = <4>;
++ ports {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ port@0 {
++ reg = <0>;
++ };
++ port@1 {
++ reg = <1>;
++ };
++ };
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+index c14205cd6bf5..3e47150c05ec 100644
+--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
++++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+@@ -516,7 +516,7 @@
+ reg = <0x39>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <1 2>;
+- pd-gpio = <&gpio0 4 0>;
++ pd-gpios = <&gpio0 4 0>;
+ adi,dsi-lanes = <4>;
+ #sound-dai-cells = <0>;
+
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
+index 242aaea68804..1235830ffd0b 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
+@@ -508,7 +508,7 @@
+ pins = "gpio63", "gpio64", "gpio65", "gpio66",
+ "gpio67", "gpio68";
+ drive-strength = <8>;
+- bias-pull-none;
++ bias-disable;
+ };
+ };
+ cdc_pdm_lines_sus: pdm_lines_off {
+@@ -537,7 +537,7 @@
+ pins = "gpio113", "gpio114", "gpio115",
+ "gpio116";
+ drive-strength = <8>;
+- bias-pull-none;
++ bias-disable;
+ };
+ };
+
+@@ -565,7 +565,7 @@
+ pinconf {
+ pins = "gpio110";
+ drive-strength = <8>;
+- bias-pull-none;
++ bias-disable;
+ };
+ };
+
+@@ -591,7 +591,7 @@
+ pinconf {
+ pins = "gpio116";
+ drive-strength = <8>;
+- bias-pull-none;
++ bias-disable;
+ };
+ };
+ ext_mclk_tlmm_lines_sus: mclk_lines_off {
+@@ -619,7 +619,7 @@
+ pins = "gpio112", "gpio117", "gpio118",
+ "gpio119";
+ drive-strength = <8>;
+- bias-pull-none;
++ bias-disable;
+ };
+ };
+ ext_sec_tlmm_lines_sus: tlmm_lines_off {
+diff --git a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi
+index e17311e09082..216aafd90e7f 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi
+@@ -156,7 +156,7 @@
+ pinctrl-0 = <&rgmii_pins>;
+ snps,reset-active-low;
+ snps,reset-delays-us = <0 10000 50000>;
+- snps,reset-gpio = <&gpio3 RK_PB3 GPIO_ACTIVE_HIGH>;
++ snps,reset-gpio = <&gpio3 RK_PB3 GPIO_ACTIVE_LOW>;
+ tx_delay = <0x10>;
+ rx_delay = <0x10>;
+ status = "okay";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+index 62ea288a1a70..45b86933c6ea 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+@@ -101,7 +101,7 @@
+
+ vcc5v0_host: vcc5v0-host-regulator {
+ compatible = "regulator-fixed";
+- gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_HIGH>;
++ gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>;
+ enable-active-low;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_host_en>;
+@@ -157,7 +157,7 @@
+ phy-mode = "rgmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii_pins>;
+- snps,reset-gpio = <&gpio3 RK_PC0 GPIO_ACTIVE_HIGH>;
++ snps,reset-gpio = <&gpio3 RK_PC0 GPIO_ACTIVE_LOW>;
+ snps,reset-active-low;
+ snps,reset-delays-us = <0 10000 50000>;
+ tx_delay = <0x10>;
+diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c
+index 9bfa17015768..c432bfafe63e 100644
+--- a/arch/m68k/mac/iop.c
++++ b/arch/m68k/mac/iop.c
+@@ -183,7 +183,7 @@ static __inline__ void iop_writeb(volatile struct mac_iop *iop, __u16 addr, __u8
+
+ static __inline__ void iop_stop(volatile struct mac_iop *iop)
+ {
+- iop->status_ctrl &= ~IOP_RUN;
++ iop->status_ctrl = IOP_AUTOINC;
+ }
+
+ static __inline__ void iop_start(volatile struct mac_iop *iop)
+@@ -191,14 +191,9 @@ static __inline__ void iop_start(volatile struct mac_iop *iop)
+ iop->status_ctrl = IOP_RUN | IOP_AUTOINC;
+ }
+
+-static __inline__ void iop_bypass(volatile struct mac_iop *iop)
+-{
+- iop->status_ctrl |= IOP_BYPASS;
+-}
+-
+ static __inline__ void iop_interrupt(volatile struct mac_iop *iop)
+ {
+- iop->status_ctrl |= IOP_IRQ;
++ iop->status_ctrl = IOP_IRQ | IOP_RUN | IOP_AUTOINC;
+ }
+
+ static int iop_alive(volatile struct mac_iop *iop)
+@@ -244,7 +239,6 @@ void __init iop_preinit(void)
+ } else {
+ iop_base[IOP_NUM_SCC] = (struct mac_iop *) SCC_IOP_BASE_QUADRA;
+ }
+- iop_base[IOP_NUM_SCC]->status_ctrl = 0x87;
+ iop_scc_present = 1;
+ } else {
+ iop_base[IOP_NUM_SCC] = NULL;
+@@ -256,7 +250,7 @@ void __init iop_preinit(void)
+ } else {
+ iop_base[IOP_NUM_ISM] = (struct mac_iop *) ISM_IOP_BASE_QUADRA;
+ }
+- iop_base[IOP_NUM_ISM]->status_ctrl = 0;
++ iop_stop(iop_base[IOP_NUM_ISM]);
+ iop_ism_present = 1;
+ } else {
+ iop_base[IOP_NUM_ISM] = NULL;
+@@ -416,7 +410,8 @@ static void iop_handle_send(uint iop_num, uint chan)
+ msg->status = IOP_MSGSTATUS_UNUSED;
+ msg = msg->next;
+ iop_send_queue[iop_num][chan] = msg;
+- if (msg) iop_do_send(msg);
++ if (msg && iop_readb(iop, IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE)
++ iop_do_send(msg);
+ }
+
+ /*
+@@ -490,16 +485,12 @@ int iop_send_message(uint iop_num, uint chan, void *privdata,
+
+ if (!(q = iop_send_queue[iop_num][chan])) {
+ iop_send_queue[iop_num][chan] = msg;
++ iop_do_send(msg);
+ } else {
+ while (q->next) q = q->next;
+ q->next = msg;
+ }
+
+- if (iop_readb(iop_base[iop_num],
+- IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE) {
+- iop_do_send(msg);
+- }
+-
+ return 0;
+ }
+
+diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/arch/mips/cavium-octeon/octeon-usb.c
+index cc88a08bc1f7..4017398519cf 100644
+--- a/arch/mips/cavium-octeon/octeon-usb.c
++++ b/arch/mips/cavium-octeon/octeon-usb.c
+@@ -518,6 +518,7 @@ static int __init dwc3_octeon_device_init(void)
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
++ put_device(&pdev->dev);
+ dev_err(&pdev->dev, "No memory resources\n");
+ return -ENXIO;
+ }
+@@ -529,8 +530,10 @@ static int __init dwc3_octeon_device_init(void)
+ * know the difference.
+ */
+ base = devm_ioremap_resource(&pdev->dev, res);
+- if (IS_ERR(base))
++ if (IS_ERR(base)) {
++ put_device(&pdev->dev);
+ return PTR_ERR(base);
++ }
+
+ mutex_lock(&dwc3_octeon_clocks_mutex);
+ dwc3_octeon_clocks_start(&pdev->dev, (u64)base);
+diff --git a/arch/mips/pci/pci-xtalk-bridge.c b/arch/mips/pci/pci-xtalk-bridge.c
+index 6ce76b18186e..c4b1c6cf2660 100644
+--- a/arch/mips/pci/pci-xtalk-bridge.c
++++ b/arch/mips/pci/pci-xtalk-bridge.c
+@@ -539,6 +539,7 @@ err_free_resource:
+ pci_free_resource_list(&host->windows);
+ err_remove_domain:
+ irq_domain_remove(domain);
++ irq_domain_free_fwnode(fn);
+ return err;
+ }
+
+@@ -546,8 +547,10 @@ static int bridge_remove(struct platform_device *pdev)
+ {
+ struct pci_bus *bus = platform_get_drvdata(pdev);
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
++ struct fwnode_handle *fn = bc->domain->fwnode;
+
+ irq_domain_remove(bc->domain);
++ irq_domain_free_fwnode(fn);
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(bus);
+ pci_remove_root_bus(bus);
+diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h
+index dbaaca84f27f..640d46edf32e 100644
+--- a/arch/parisc/include/asm/barrier.h
++++ b/arch/parisc/include/asm/barrier.h
+@@ -26,6 +26,67 @@
+ #define __smp_rmb() mb()
+ #define __smp_wmb() mb()
+
++#define __smp_store_release(p, v) \
++do { \
++ typeof(p) __p = (p); \
++ union { typeof(*p) __val; char __c[1]; } __u = \
++ { .__val = (__force typeof(*p)) (v) }; \
++ compiletime_assert_atomic_type(*p); \
++ switch (sizeof(*p)) { \
++ case 1: \
++ asm volatile("stb,ma %0,0(%1)" \
++ : : "r"(*(__u8 *)__u.__c), "r"(__p) \
++ : "memory"); \
++ break; \
++ case 2: \
++ asm volatile("sth,ma %0,0(%1)" \
++ : : "r"(*(__u16 *)__u.__c), "r"(__p) \
++ : "memory"); \
++ break; \
++ case 4: \
++ asm volatile("stw,ma %0,0(%1)" \
++ : : "r"(*(__u32 *)__u.__c), "r"(__p) \
++ : "memory"); \
++ break; \
++ case 8: \
++ if (IS_ENABLED(CONFIG_64BIT)) \
++ asm volatile("std,ma %0,0(%1)" \
++ : : "r"(*(__u64 *)__u.__c), "r"(__p) \
++ : "memory"); \
++ break; \
++ } \
++} while (0)
++
++#define __smp_load_acquire(p) \
++({ \
++ union { typeof(*p) __val; char __c[1]; } __u; \
++ typeof(p) __p = (p); \
++ compiletime_assert_atomic_type(*p); \
++ switch (sizeof(*p)) { \
++ case 1: \
++ asm volatile("ldb,ma 0(%1),%0" \
++ : "=r"(*(__u8 *)__u.__c) : "r"(__p) \
++ : "memory"); \
++ break; \
++ case 2: \
++ asm volatile("ldh,ma 0(%1),%0" \
++ : "=r"(*(__u16 *)__u.__c) : "r"(__p) \
++ : "memory"); \
++ break; \
++ case 4: \
++ asm volatile("ldw,ma 0(%1),%0" \
++ : "=r"(*(__u32 *)__u.__c) : "r"(__p) \
++ : "memory"); \
++ break; \
++ case 8: \
++ if (IS_ENABLED(CONFIG_64BIT)) \
++ asm volatile("ldd,ma 0(%1),%0" \
++ : "=r"(*(__u64 *)__u.__c) : "r"(__p) \
++ : "memory"); \
++ break; \
++ } \
++ __u.__val; \
++})
+ #include <asm-generic/barrier.h>
+
+ #endif /* !__ASSEMBLY__ */
+diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
+index 197d2247e4db..16aec9ba2580 100644
+--- a/arch/parisc/include/asm/spinlock.h
++++ b/arch/parisc/include/asm/spinlock.h
+@@ -37,12 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
+ volatile unsigned int *a;
+
+ a = __ldcw_align(x);
+-#ifdef CONFIG_SMP
+- (void) __ldcw(a);
+-#else
+- mb();
+-#endif
+- *a = 1;
++ /* Release with ordered store. */
++ __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
+ }
+
+ static inline int arch_spin_trylock(arch_spinlock_t *x)
+diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
+index b96d74496977..873bf3434da9 100644
+--- a/arch/parisc/kernel/entry.S
++++ b/arch/parisc/kernel/entry.S
+@@ -454,7 +454,6 @@
+ nop
+ LDREG 0(\ptp),\pte
+ bb,<,n \pte,_PAGE_PRESENT_BIT,3f
+- LDCW 0(\tmp),\tmp1
+ b \fault
+ stw \spc,0(\tmp)
+ 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+@@ -464,23 +463,26 @@
+ 3:
+ .endm
+
+- /* Release pa_tlb_lock lock without reloading lock address. */
+- .macro tlb_unlock0 spc,tmp,tmp1
++ /* Release pa_tlb_lock lock without reloading lock address.
++ Note that the values in the register spc are limited to
++ NR_SPACE_IDS (262144). Thus, the stw instruction always
++ stores a nonzero value even when register spc is 64 bits.
++ We use an ordered store to ensure all prior accesses are
++ performed prior to releasing the lock. */
++ .macro tlb_unlock0 spc,tmp
+ #ifdef CONFIG_SMP
+ 98: or,COND(=) %r0,\spc,%r0
+- LDCW 0(\tmp),\tmp1
+- or,COND(=) %r0,\spc,%r0
+- stw \spc,0(\tmp)
++ stw,ma \spc,0(\tmp)
+ 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+ #endif
+ .endm
+
+ /* Release pa_tlb_lock lock. */
+- .macro tlb_unlock1 spc,tmp,tmp1
++ .macro tlb_unlock1 spc,tmp
+ #ifdef CONFIG_SMP
+ 98: load_pa_tlb_lock \tmp
+ 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+- tlb_unlock0 \spc,\tmp,\tmp1
++ tlb_unlock0 \spc,\tmp
+ #endif
+ .endm
+
+@@ -1163,7 +1165,7 @@ dtlb_miss_20w:
+
+ idtlbt pte,prot
+
+- tlb_unlock1 spc,t0,t1
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1189,7 +1191,7 @@ nadtlb_miss_20w:
+
+ idtlbt pte,prot
+
+- tlb_unlock1 spc,t0,t1
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1223,7 +1225,7 @@ dtlb_miss_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- tlb_unlock1 spc,t0,t1
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1256,7 +1258,7 @@ nadtlb_miss_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- tlb_unlock1 spc,t0,t1
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1285,7 +1287,7 @@ dtlb_miss_20:
+
+ idtlbt pte,prot
+
+- tlb_unlock1 spc,t0,t1
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1313,7 +1315,7 @@ nadtlb_miss_20:
+
+ idtlbt pte,prot
+
+- tlb_unlock1 spc,t0,t1
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1420,7 +1422,7 @@ itlb_miss_20w:
+
+ iitlbt pte,prot
+
+- tlb_unlock1 spc,t0,t1
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1444,7 +1446,7 @@ naitlb_miss_20w:
+
+ iitlbt pte,prot
+
+- tlb_unlock1 spc,t0,t1
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1478,7 +1480,7 @@ itlb_miss_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- tlb_unlock1 spc,t0,t1
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1502,7 +1504,7 @@ naitlb_miss_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- tlb_unlock1 spc,t0,t1
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1532,7 +1534,7 @@ itlb_miss_20:
+
+ iitlbt pte,prot
+
+- tlb_unlock1 spc,t0,t1
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1552,7 +1554,7 @@ naitlb_miss_20:
+
+ iitlbt pte,prot
+
+- tlb_unlock1 spc,t0,t1
++ tlb_unlock1 spc,t0
+ rfir
+ nop
+
+@@ -1582,7 +1584,7 @@ dbit_trap_20w:
+
+ idtlbt pte,prot
+
+- tlb_unlock0 spc,t0,t1
++ tlb_unlock0 spc,t0
+ rfir
+ nop
+ #else
+@@ -1608,7 +1610,7 @@ dbit_trap_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- tlb_unlock0 spc,t0,t1
++ tlb_unlock0 spc,t0
+ rfir
+ nop
+
+@@ -1628,7 +1630,7 @@ dbit_trap_20:
+
+ idtlbt pte,prot
+
+- tlb_unlock0 spc,t0,t1
++ tlb_unlock0 spc,t0
+ rfir
+ nop
+ #endif
+diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
+index 97ac707c6bff..a37814cb66c7 100644
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -640,11 +640,7 @@ cas_action:
+ sub,<> %r28, %r25, %r0
+ 2: stw %r24, 0(%r26)
+ /* Free lock */
+-#ifdef CONFIG_SMP
+-98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
+-99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+-#endif
+- stw %r20, 0(%sr2,%r20)
++ stw,ma %r20, 0(%sr2,%r20)
+ #if ENABLE_LWS_DEBUG
+ /* Clear thread register indicator */
+ stw %r0, 4(%sr2,%r20)
+@@ -658,11 +654,7 @@ cas_action:
+ 3:
+ /* Error occurred on load or store */
+ /* Free lock */
+-#ifdef CONFIG_SMP
+-98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
+-99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+-#endif
+- stw %r20, 0(%sr2,%r20)
++ stw,ma %r20, 0(%sr2,%r20)
+ #if ENABLE_LWS_DEBUG
+ stw %r0, 4(%sr2,%r20)
+ #endif
+@@ -863,11 +855,7 @@ cas2_action:
+
+ cas2_end:
+ /* Free lock */
+-#ifdef CONFIG_SMP
+-98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
+-99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+-#endif
+- stw %r20, 0(%sr2,%r20)
++ stw,ma %r20, 0(%sr2,%r20)
+ /* Enable interrupts */
+ ssm PSW_SM_I, %r0
+ /* Return to userspace, set no error */
+@@ -877,11 +865,7 @@ cas2_end:
+ 22:
+ /* Error occurred on load or store */
+ /* Free lock */
+-#ifdef CONFIG_SMP
+-98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
+-99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+-#endif
+- stw %r20, 0(%sr2,%r20)
++ stw,ma %r20, 0(%sr2,%r20)
+ ssm PSW_SM_I, %r0
+ ldo 1(%r0),%r28
+ b lws_exit
+diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
+index dfbd7f22eef5..8c69bd07ada6 100644
+--- a/arch/powerpc/boot/Makefile
++++ b/arch/powerpc/boot/Makefile
+@@ -119,7 +119,7 @@ src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \
+ elf_util.c $(zlib-y) devtree.c stdlib.c \
+ oflib.c ofconsole.c cuboot.c
+
+-src-wlib-$(CONFIG_PPC_MPC52XX) += mpc52xx-psc.c
++src-wlib-$(CONFIG_PPC_MPC52xx) += mpc52xx-psc.c
+ src-wlib-$(CONFIG_PPC64_BOOT_WRAPPER) += opal-calls.S opal.c
+ ifndef CONFIG_PPC64_BOOT_WRAPPER
+ src-wlib-y += crtsavres.S
+diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c
+index 9457863147f9..00179cd6bdd0 100644
+--- a/arch/powerpc/boot/serial.c
++++ b/arch/powerpc/boot/serial.c
+@@ -128,7 +128,7 @@ int serial_console_init(void)
+ dt_is_compatible(devp, "fsl,cpm2-smc-uart"))
+ rc = cpm_console_init(devp, &serial_cd);
+ #endif
+-#ifdef CONFIG_PPC_MPC52XX
++#ifdef CONFIG_PPC_MPC52xx
+ else if (dt_is_compatible(devp, "fsl,mpc5200-psc-uart"))
+ rc = mpc5200_psc_console_init(devp, &serial_cd);
+ #endif
+diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h
+index 7426d7a90e1e..7aba3c7ea25c 100644
+--- a/arch/powerpc/include/asm/perf_event.h
++++ b/arch/powerpc/include/asm/perf_event.h
+@@ -12,6 +12,8 @@
+
+ #ifdef CONFIG_PPC_PERF_CTRS
+ #include <asm/perf_event_server.h>
++#else
++static inline bool is_sier_available(void) { return false; }
+ #endif
+
+ #ifdef CONFIG_FSL_EMB_PERF_EVENT
+diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
+index ee3ada66deb5..c41220f4aad9 100644
+--- a/arch/powerpc/include/asm/ptrace.h
++++ b/arch/powerpc/include/asm/ptrace.h
+@@ -203,7 +203,7 @@ do { \
+ #endif /* __powerpc64__ */
+
+ #define arch_has_single_step() (1)
+-#ifndef CONFIG_BOOK3S_601
++#ifndef CONFIG_PPC_BOOK3S_601
+ #define arch_has_block_step() (true)
+ #else
+ #define arch_has_block_step() (false)
+diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
+index 3c1887351c71..bd227e0eab07 100644
+--- a/arch/powerpc/include/asm/rtas.h
++++ b/arch/powerpc/include/asm/rtas.h
+@@ -368,8 +368,6 @@ extern int rtas_set_indicator_fast(int indicator, int index, int new_value);
+ extern void rtas_progress(char *s, unsigned short hex);
+ extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
+ extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
+-extern int rtas_online_cpus_mask(cpumask_var_t cpus);
+-extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
+ extern int rtas_ibm_suspend_me(u64 handle);
+
+ struct rtc_time;
+diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h
+index d2d2c4bd8435..6047402b0a4d 100644
+--- a/arch/powerpc/include/asm/timex.h
++++ b/arch/powerpc/include/asm/timex.h
+@@ -17,7 +17,7 @@ typedef unsigned long cycles_t;
+
+ static inline cycles_t get_cycles(void)
+ {
+- if (IS_ENABLED(CONFIG_BOOK3S_601))
++ if (IS_ENABLED(CONFIG_PPC_BOOK3S_601))
+ return 0;
+
+ return mftb();
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index c5fa251b8950..01210593d60c 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -842,96 +842,6 @@ static void rtas_percpu_suspend_me(void *info)
+ __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
+ }
+
+-enum rtas_cpu_state {
+- DOWN,
+- UP,
+-};
+-
+-#ifndef CONFIG_SMP
+-static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
+- cpumask_var_t cpus)
+-{
+- if (!cpumask_empty(cpus)) {
+- cpumask_clear(cpus);
+- return -EINVAL;
+- } else
+- return 0;
+-}
+-#else
+-/* On return cpumask will be altered to indicate CPUs changed.
+- * CPUs with states changed will be set in the mask,
+- * CPUs with status unchanged will be unset in the mask. */
+-static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
+- cpumask_var_t cpus)
+-{
+- int cpu;
+- int cpuret = 0;
+- int ret = 0;
+-
+- if (cpumask_empty(cpus))
+- return 0;
+-
+- for_each_cpu(cpu, cpus) {
+- struct device *dev = get_cpu_device(cpu);
+-
+- switch (state) {
+- case DOWN:
+- cpuret = device_offline(dev);
+- break;
+- case UP:
+- cpuret = device_online(dev);
+- break;
+- }
+- if (cpuret < 0) {
+- pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
+- __func__,
+- ((state == UP) ? "up" : "down"),
+- cpu, cpuret);
+- if (!ret)
+- ret = cpuret;
+- if (state == UP) {
+- /* clear bits for unchanged cpus, return */
+- cpumask_shift_right(cpus, cpus, cpu);
+- cpumask_shift_left(cpus, cpus, cpu);
+- break;
+- } else {
+- /* clear bit for unchanged cpu, continue */
+- cpumask_clear_cpu(cpu, cpus);
+- }
+- }
+- cond_resched();
+- }
+-
+- return ret;
+-}
+-#endif
+-
+-int rtas_online_cpus_mask(cpumask_var_t cpus)
+-{
+- int ret;
+-
+- ret = rtas_cpu_state_change_mask(UP, cpus);
+-
+- if (ret) {
+- cpumask_var_t tmp_mask;
+-
+- if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL))
+- return ret;
+-
+- /* Use tmp_mask to preserve cpus mask from first failure */
+- cpumask_copy(tmp_mask, cpus);
+- rtas_offline_cpus_mask(tmp_mask);
+- free_cpumask_var(tmp_mask);
+- }
+-
+- return ret;
+-}
+-
+-int rtas_offline_cpus_mask(cpumask_var_t cpus)
+-{
+- return rtas_cpu_state_change_mask(DOWN, cpus);
+-}
+-
+ int rtas_ibm_suspend_me(u64 handle)
+ {
+ long state;
+@@ -939,8 +849,6 @@ int rtas_ibm_suspend_me(u64 handle)
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ struct rtas_suspend_me_data data;
+ DECLARE_COMPLETION_ONSTACK(done);
+- cpumask_var_t offline_mask;
+- int cpuret;
+
+ if (!rtas_service_present("ibm,suspend-me"))
+ return -ENOSYS;
+@@ -961,9 +869,6 @@ int rtas_ibm_suspend_me(u64 handle)
+ return -EIO;
+ }
+
+- if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL))
+- return -ENOMEM;
+-
+ atomic_set(&data.working, 0);
+ atomic_set(&data.done, 0);
+ atomic_set(&data.error, 0);
+@@ -972,24 +877,8 @@ int rtas_ibm_suspend_me(u64 handle)
+
+ lock_device_hotplug();
+
+- /* All present CPUs must be online */
+- cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
+- cpuret = rtas_online_cpus_mask(offline_mask);
+- if (cpuret) {
+- pr_err("%s: Could not bring present CPUs online.\n", __func__);
+- atomic_set(&data.error, cpuret);
+- goto out;
+- }
+-
+ cpu_hotplug_disable();
+
+- /* Check if we raced with a CPU-Offline Operation */
+- if (!cpumask_equal(cpu_present_mask, cpu_online_mask)) {
+- pr_info("%s: Raced against a concurrent CPU-Offline\n", __func__);
+- atomic_set(&data.error, -EAGAIN);
+- goto out_hotplug_enable;
+- }
+-
+ /* Call function on all CPUs. One of us will make the
+ * rtas call
+ */
+@@ -1000,18 +889,11 @@ int rtas_ibm_suspend_me(u64 handle)
+ if (atomic_read(&data.error) != 0)
+ printk(KERN_ERR "Error doing global join\n");
+
+-out_hotplug_enable:
+- cpu_hotplug_enable();
+
+- /* Take down CPUs not online prior to suspend */
+- cpuret = rtas_offline_cpus_mask(offline_mask);
+- if (cpuret)
+- pr_warn("%s: Could not restore CPUs to offline state.\n",
+- __func__);
++ cpu_hotplug_enable();
+
+-out:
+ unlock_device_hotplug();
+- free_cpumask_var(offline_mask);
++
+ return atomic_read(&data.error);
+ }
+ #else /* CONFIG_PPC_PSERIES */
+diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
+index eae9ddaecbcf..efb1ba40274a 100644
+--- a/arch/powerpc/kernel/vdso.c
++++ b/arch/powerpc/kernel/vdso.c
+@@ -682,7 +682,7 @@ int vdso_getcpu_init(void)
+ node = cpu_to_node(cpu);
+ WARN_ON_ONCE(node > 0xffff);
+
+- val = (cpu & 0xfff) | ((node & 0xffff) << 16);
++ val = (cpu & 0xffff) | ((node & 0xffff) << 16);
+ mtspr(SPRN_SPRG_VDSO_WRITE, val);
+ get_paca()->sprg_vdso = val;
+
+diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c
+index 66f307e873dc..432fd9fa8c3f 100644
+--- a/arch/powerpc/mm/book3s64/pkeys.c
++++ b/arch/powerpc/mm/book3s64/pkeys.c
+@@ -83,13 +83,17 @@ static int pkey_initialize(void)
+ scan_pkey_feature();
+
+ /*
+- * Let's assume 32 pkeys on P8 bare metal, if its not defined by device
+- * tree. We make this exception since skiboot forgot to expose this
+- * property on power8.
++ * Let's assume 32 pkeys on P8/P9 bare metal, if its not defined by device
++ * tree. We make this exception since some version of skiboot forgot to
++ * expose this property on power8/9.
+ */
+- if (!pkeys_devtree_defined && !firmware_has_feature(FW_FEATURE_LPAR) &&
+- cpu_has_feature(CPU_FTRS_POWER8))
+- pkeys_total = 32;
++ if (!pkeys_devtree_defined && !firmware_has_feature(FW_FEATURE_LPAR)) {
++ unsigned long pvr = mfspr(SPRN_PVR);
++
++ if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E ||
++ PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9)
++ pkeys_total = 32;
++ }
+
+ /*
+ * Adjust the upper limit, based on the number of bits supported by
+diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
+index 0a24a5a185f0..f789693f61f4 100644
+--- a/arch/powerpc/platforms/pseries/suspend.c
++++ b/arch/powerpc/platforms/pseries/suspend.c
+@@ -132,15 +132,11 @@ static ssize_t store_hibernate(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+ {
+- cpumask_var_t offline_mask;
+ int rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+- if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL))
+- return -ENOMEM;
+-
+ stream_id = simple_strtoul(buf, NULL, 16);
+
+ do {
+@@ -150,32 +146,16 @@ static ssize_t store_hibernate(struct device *dev,
+ } while (rc == -EAGAIN);
+
+ if (!rc) {
+- /* All present CPUs must be online */
+- cpumask_andnot(offline_mask, cpu_present_mask,
+- cpu_online_mask);
+- rc = rtas_online_cpus_mask(offline_mask);
+- if (rc) {
+- pr_err("%s: Could not bring present CPUs online.\n",
+- __func__);
+- goto out;
+- }
+-
+ stop_topology_update();
+ rc = pm_suspend(PM_SUSPEND_MEM);
+ start_topology_update();
+-
+- /* Take down CPUs not online prior to suspend */
+- if (!rtas_offline_cpus_mask(offline_mask))
+- pr_warn("%s: Could not restore CPUs to offline "
+- "state.\n", __func__);
+ }
+
+ stream_id = 0;
+
+ if (!rc)
+ rc = count;
+-out:
+- free_cpumask_var(offline_mask);
++
+ return rc;
+ }
+
+diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
+index 364e3a89c096..4fa7a562c6fc 100644
+--- a/arch/s390/mm/gmap.c
++++ b/arch/s390/mm/gmap.c
+@@ -2485,23 +2485,36 @@ void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
+ }
+ EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd);
+
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++static int thp_split_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
++ unsigned long end, struct mm_walk *walk)
++{
++ struct vm_area_struct *vma = walk->vma;
++
++ split_huge_pmd(vma, pmd, addr);
++ return 0;
++}
++
++static const struct mm_walk_ops thp_split_walk_ops = {
++ .pmd_entry = thp_split_walk_pmd_entry,
++};
++
+ static inline void thp_split_mm(struct mm_struct *mm)
+ {
+-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ struct vm_area_struct *vma;
+- unsigned long addr;
+
+ for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
+- for (addr = vma->vm_start;
+- addr < vma->vm_end;
+- addr += PAGE_SIZE)
+- follow_page(vma, addr, FOLL_SPLIT);
+ vma->vm_flags &= ~VM_HUGEPAGE;
+ vma->vm_flags |= VM_NOHUGEPAGE;
++ walk_page_vma(vma, &thp_split_walk_ops, NULL);
+ }
+ mm->def_flags |= VM_NOHUGEPAGE;
+-#endif
+ }
++#else
++static inline void thp_split_mm(struct mm_struct *mm)
++{
++}
++#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+ /*
+ * Remove all empty zero pages from the mapping for lazy refaulting
+diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
+index 5f6a5af9c489..77043a82da51 100644
+--- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
++++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
+@@ -127,10 +127,6 @@ ddq_add_8:
+
+ /* generate a unique variable for ddq_add_x */
+
+-.macro setddq n
+- var_ddq_add = ddq_add_\n
+-.endm
+-
+ /* generate a unique variable for xmm register */
+ .macro setxdata n
+ var_xdata = %xmm\n
+@@ -140,9 +136,7 @@ ddq_add_8:
+
+ .macro club name, id
+ .altmacro
+- .if \name == DDQ_DATA
+- setddq %\id
+- .elseif \name == XDATA
++ .if \name == XDATA
+ setxdata %\id
+ .endif
+ .noaltmacro
+@@ -165,9 +159,8 @@ ddq_add_8:
+
+ .set i, 1
+ .rept (by - 1)
+- club DDQ_DATA, i
+ club XDATA, i
+- vpaddq var_ddq_add(%rip), xcounter, var_xdata
++ vpaddq (ddq_add_1 + 16 * (i - 1))(%rip), xcounter, var_xdata
+ vptest ddq_low_msk(%rip), var_xdata
+ jnz 1f
+ vpaddq ddq_high_add_1(%rip), var_xdata, var_xdata
+@@ -180,8 +173,7 @@ ddq_add_8:
+ vmovdqa 1*16(p_keys), xkeyA
+
+ vpxor xkey0, xdata0, xdata0
+- club DDQ_DATA, by
+- vpaddq var_ddq_add(%rip), xcounter, xcounter
++ vpaddq (ddq_add_1 + 16 * (by - 1))(%rip), xcounter, xcounter
+ vptest ddq_low_msk(%rip), xcounter
+ jnz 1f
+ vpaddq ddq_high_add_1(%rip), xcounter, xcounter
+diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
+index e40bdf024ba7..9afeb58c910e 100644
+--- a/arch/x86/crypto/aesni-intel_asm.S
++++ b/arch/x86/crypto/aesni-intel_asm.S
+@@ -266,7 +266,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
+ PSHUFB_XMM %xmm2, %xmm0
+ movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv
+
+- PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
++ PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7
+ movdqu HashKey(%arg2), %xmm13
+
+ CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \
+@@ -978,7 +978,7 @@ _initial_blocks_done\@:
+ * arg1, %arg3, %arg4 are used as pointers only, not modified
+ * %r11 is the data offset value
+ */
+-.macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \
++.macro GHASH_4_ENCRYPT_4_PARALLEL_enc TMP1 TMP2 TMP3 TMP4 TMP5 \
+ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
+
+ movdqa \XMM1, \XMM5
+@@ -1186,7 +1186,7 @@ aes_loop_par_enc_done\@:
+ * arg1, %arg3, %arg4 are used as pointers only, not modified
+ * %r11 is the data offset value
+ */
+-.macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \
++.macro GHASH_4_ENCRYPT_4_PARALLEL_dec TMP1 TMP2 TMP3 TMP4 TMP5 \
+ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
+
+ movdqa \XMM1, \XMM5
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index 16699101fd2f..ea6d9da9b094 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -2348,8 +2348,13 @@ static int mp_irqdomain_create(int ioapic)
+
+ static void ioapic_destroy_irqdomain(int idx)
+ {
++ struct ioapic_domain_cfg *cfg = &ioapics[idx].irqdomain_cfg;
++ struct fwnode_handle *fn = ioapics[idx].irqdomain->fwnode;
++
+ if (ioapics[idx].irqdomain) {
+ irq_domain_remove(ioapics[idx].irqdomain);
++ if (!cfg->dev)
++ irq_domain_free_fwnode(fn);
+ ioapics[idx].irqdomain = NULL;
+ }
+ }
+diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
+index 1f30117b24ba..eb2d41c1816d 100644
+--- a/arch/x86/kernel/cpu/mce/inject.c
++++ b/arch/x86/kernel/cpu/mce/inject.c
+@@ -511,7 +511,7 @@ static void do_inject(void)
+ */
+ if (inj_type == DFR_INT_INJ) {
+ i_mce.status |= MCI_STATUS_DEFERRED;
+- i_mce.status |= (i_mce.status & ~MCI_STATUS_UC);
++ i_mce.status &= ~MCI_STATUS_UC;
+ }
+
+ /*
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index af64519b2695..da3cc3a10d63 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -316,7 +316,7 @@ static unsigned long x86_fsgsbase_read_task(struct task_struct *task,
+ */
+ mutex_lock(&task->mm->context.lock);
+ ldt = task->mm->context.ldt;
+- if (unlikely(idx >= ldt->nr_entries))
++ if (unlikely(!ldt || idx >= ldt->nr_entries))
+ base = 0;
+ else
+ base = get_desc_base(ldt->entries + idx);
+diff --git a/block/blk-iocost.c b/block/blk-iocost.c
+index 4d2bda812d9b..dcc6685d5bec 100644
+--- a/block/blk-iocost.c
++++ b/block/blk-iocost.c
+@@ -1377,7 +1377,7 @@ static void ioc_timer_fn(struct timer_list *timer)
+ * should have woken up in the last period and expire idle iocgs.
+ */
+ list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
+- if (!waitqueue_active(&iocg->waitq) && iocg->abs_vdebt &&
++ if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
+ !iocg_is_idle(iocg))
+ continue;
+
+diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
+index 728d752f7adc..85f799c9c25c 100644
+--- a/drivers/acpi/acpica/exprep.c
++++ b/drivers/acpi/acpica/exprep.c
+@@ -473,10 +473,6 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
+ (u8)access_byte_width;
+ }
+ }
+- /* An additional reference for the container */
+-
+- acpi_ut_add_reference(obj_desc->field.region_obj);
+-
+ ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
+ "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
+ obj_desc->field.start_field_bit_offset,
+diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
+index c365faf4e6cd..4c0d4e434196 100644
+--- a/drivers/acpi/acpica/utdelete.c
++++ b/drivers/acpi/acpica/utdelete.c
+@@ -568,11 +568,6 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
+ next_object = object->buffer_field.buffer_obj;
+ break;
+
+- case ACPI_TYPE_LOCAL_REGION_FIELD:
+-
+- next_object = object->field.region_obj;
+- break;
+-
+ case ACPI_TYPE_LOCAL_BANK_FIELD:
+
+ next_object = object->bank_field.bank_obj;
+@@ -613,6 +608,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
+ }
+ break;
+
++ case ACPI_TYPE_LOCAL_REGION_FIELD:
+ case ACPI_TYPE_REGION:
+ default:
+
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 565e35e69f24..bddbbf5b3dda 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -2325,6 +2325,8 @@ static void __exit loop_exit(void)
+
+ range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
+
++ mutex_lock(&loop_ctl_mutex);
++
+ idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
+ idr_destroy(&loop_index_idr);
+
+@@ -2332,6 +2334,8 @@ static void __exit loop_exit(void)
+ unregister_blkdev(LOOP_MAJOR, "loop");
+
+ misc_deregister(&loop_misc);
++
++ mutex_unlock(&loop_ctl_mutex);
+ }
+
+ module_init(loop_init);
+diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
+index 0f3a020703ab..4c7978cb1786 100644
+--- a/drivers/bluetooth/btmrvl_sdio.c
++++ b/drivers/bluetooth/btmrvl_sdio.c
+@@ -328,7 +328,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
+
+ static const struct btmrvl_sdio_device btmrvl_sdio_sd8977 = {
+ .helper = NULL,
+- .firmware = "mrvl/sd8977_uapsta.bin",
++ .firmware = "mrvl/sdsd8977_combo_v2.bin",
+ .reg = &btmrvl_reg_8977,
+ .support_pscan_win_report = true,
+ .sd_blksz_fw_dl = 256,
+@@ -346,7 +346,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8987 = {
+
+ static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = {
+ .helper = NULL,
+- .firmware = "mrvl/sd8997_uapsta.bin",
++ .firmware = "mrvl/sdsd8997_combo_v4.bin",
+ .reg = &btmrvl_reg_8997,
+ .support_pscan_win_report = true,
+ .sd_blksz_fw_dl = 256,
+@@ -1831,6 +1831,6 @@ MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
+ MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
+ MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin");
+ MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
+-MODULE_FIRMWARE("mrvl/sd8977_uapsta.bin");
++MODULE_FIRMWARE("mrvl/sdsd8977_combo_v2.bin");
+ MODULE_FIRMWARE("mrvl/sd8987_uapsta.bin");
+-MODULE_FIRMWARE("mrvl/sd8997_uapsta.bin");
++MODULE_FIRMWARE("mrvl/sdsd8997_combo_v4.bin");
+diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
+index 813338288453..b7de7cb8cca9 100644
+--- a/drivers/bluetooth/btmtksdio.c
++++ b/drivers/bluetooth/btmtksdio.c
+@@ -684,7 +684,7 @@ static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
+ const u8 *fw_ptr;
+ size_t fw_size;
+ int err, dlen;
+- u8 flag;
++ u8 flag, param;
+
+ err = request_firmware(&fw, fwname, &hdev->dev);
+ if (err < 0) {
+@@ -692,6 +692,20 @@ static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
+ return err;
+ }
+
++ /* Power on data RAM the firmware relies on. */
++ param = 1;
++ wmt_params.op = MTK_WMT_FUNC_CTRL;
++ wmt_params.flag = 3;
++ wmt_params.dlen = sizeof(param);
++ wmt_params.data = ¶m;
++ wmt_params.status = NULL;
++
++ err = mtk_hci_wmt_sync(hdev, &wmt_params);
++ if (err < 0) {
++ bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
++ return err;
++ }
++
+ fw_ptr = fw->data;
+ fw_size = fw->size;
+
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 9c3b063e1a1f..f3f0529564da 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -2792,7 +2792,7 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
+ const u8 *fw_ptr;
+ size_t fw_size;
+ int err, dlen;
+- u8 flag;
++ u8 flag, param;
+
+ err = request_firmware(&fw, fwname, &hdev->dev);
+ if (err < 0) {
+@@ -2800,6 +2800,20 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
+ return err;
+ }
+
++ /* Power on data RAM the firmware relies on. */
++ param = 1;
++ wmt_params.op = BTMTK_WMT_FUNC_CTRL;
++ wmt_params.flag = 3;
++ wmt_params.dlen = sizeof(param);
++ wmt_params.data = ¶m;
++ wmt_params.status = NULL;
++
++ err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
++ if (err < 0) {
++ bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
++ return err;
++ }
++
+ fw_ptr = fw->data;
+ fw_size = fw->size;
+
+diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
+index dacf297baf59..5df0651b6cd5 100644
+--- a/drivers/bluetooth/hci_h5.c
++++ b/drivers/bluetooth/hci_h5.c
+@@ -790,7 +790,7 @@ static int h5_serdev_probe(struct serdev_device *serdev)
+ if (!h5)
+ return -ENOMEM;
+
+- set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.flags);
++ set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.hdev_flags);
+
+ h5->hu = &h5->serdev_hu;
+ h5->serdev_hu.serdev = serdev;
+diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
+index 4652896d4990..ad2f26cb2622 100644
+--- a/drivers/bluetooth/hci_serdev.c
++++ b/drivers/bluetooth/hci_serdev.c
+@@ -357,7 +357,8 @@ void hci_uart_unregister_device(struct hci_uart *hu)
+ struct hci_dev *hdev = hu->hdev;
+
+ clear_bit(HCI_UART_PROTO_READY, &hu->flags);
+- hci_unregister_dev(hdev);
++ if (test_bit(HCI_UART_REGISTERED, &hu->flags))
++ hci_unregister_dev(hdev);
+ hci_free_dev(hdev);
+
+ cancel_work_sync(&hu->write_work);
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index f8bc052cd853..770a780dfa54 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -1371,6 +1371,10 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
++ SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff,
++ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
++ SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
++ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
+ 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
+@@ -1440,8 +1444,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
+ SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0),
+ SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0),
+ SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0),
+- SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, 0),
+- SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff, 0),
+ SYSC_QUIRK("venc", 0x58003000, 0, -ENODEV, -ENODEV, 0x00000002, 0xffffffff, 0),
+ SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0),
+ #endif
+diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
+index b161bdf60000..0941d38b2d32 100644
+--- a/drivers/char/agp/intel-gtt.c
++++ b/drivers/char/agp/intel-gtt.c
+@@ -304,8 +304,10 @@ static int intel_gtt_setup_scratch_page(void)
+ if (intel_private.needs_dmar) {
+ dma_addr = pci_map_page(intel_private.pcidev, page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+- if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
++ if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) {
++ __free_page(page);
+ return -EINVAL;
++ }
+
+ intel_private.scratch_page_dma = dma_addr;
+ } else
+diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
+index 58073836b555..1838039b0333 100644
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -386,13 +386,8 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
+ chip->cdev.owner = THIS_MODULE;
+ chip->cdevs.owner = THIS_MODULE;
+
+- chip->work_space.context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+- if (!chip->work_space.context_buf) {
+- rc = -ENOMEM;
+- goto out;
+- }
+- chip->work_space.session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+- if (!chip->work_space.session_buf) {
++ rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE);
++ if (rc) {
+ rc = -ENOMEM;
+ goto out;
+ }
+diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
+index 218cb496222a..37f010421a36 100644
+--- a/drivers/char/tpm/tpm.h
++++ b/drivers/char/tpm/tpm.h
+@@ -177,6 +177,9 @@ struct tpm_header {
+
+ #define TPM_TAG_RQU_COMMAND 193
+
++/* TPM2 specific constants. */
++#define TPM2_SPACE_BUFFER_SIZE 16384 /* 16 kB */
++
+ struct stclear_flags_t {
+ __be16 tag;
+ u8 deactivated;
+@@ -456,7 +459,7 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
+ unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
+ int tpm2_probe(struct tpm_chip *chip);
+ int tpm2_find_cc(struct tpm_chip *chip, u32 cc);
+-int tpm2_init_space(struct tpm_space *space);
++int tpm2_init_space(struct tpm_space *space, unsigned int buf_size);
+ void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space);
+ void tpm2_flush_space(struct tpm_chip *chip);
+ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
+diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
+index 982d341d8837..784b8b3cb903 100644
+--- a/drivers/char/tpm/tpm2-space.c
++++ b/drivers/char/tpm/tpm2-space.c
+@@ -38,18 +38,21 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space)
+ }
+ }
+
+-int tpm2_init_space(struct tpm_space *space)
++int tpm2_init_space(struct tpm_space *space, unsigned int buf_size)
+ {
+- space->context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
++ space->context_buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!space->context_buf)
+ return -ENOMEM;
+
+- space->session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
++ space->session_buf = kzalloc(buf_size, GFP_KERNEL);
+ if (space->session_buf == NULL) {
+ kfree(space->context_buf);
++ /* Prevent caller getting a dangling pointer. */
++ space->context_buf = NULL;
+ return -ENOMEM;
+ }
+
++ space->buf_size = buf_size;
+ return 0;
+ }
+
+@@ -311,8 +314,10 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
+ sizeof(space->context_tbl));
+ memcpy(&chip->work_space.session_tbl, &space->session_tbl,
+ sizeof(space->session_tbl));
+- memcpy(chip->work_space.context_buf, space->context_buf, PAGE_SIZE);
+- memcpy(chip->work_space.session_buf, space->session_buf, PAGE_SIZE);
++ memcpy(chip->work_space.context_buf, space->context_buf,
++ space->buf_size);
++ memcpy(chip->work_space.session_buf, space->session_buf,
++ space->buf_size);
+
+ rc = tpm2_load_space(chip);
+ if (rc) {
+@@ -492,7 +497,7 @@ static int tpm2_save_space(struct tpm_chip *chip)
+ continue;
+
+ rc = tpm2_save_context(chip, space->context_tbl[i],
+- space->context_buf, PAGE_SIZE,
++ space->context_buf, space->buf_size,
+ &offset);
+ if (rc == -ENOENT) {
+ space->context_tbl[i] = 0;
+@@ -509,9 +514,8 @@ static int tpm2_save_space(struct tpm_chip *chip)
+ continue;
+
+ rc = tpm2_save_context(chip, space->session_tbl[i],
+- space->session_buf, PAGE_SIZE,
++ space->session_buf, space->buf_size,
+ &offset);
+-
+ if (rc == -ENOENT) {
+ /* handle error saving session, just forget it */
+ space->session_tbl[i] = 0;
+@@ -557,8 +561,10 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
+ sizeof(space->context_tbl));
+ memcpy(&space->session_tbl, &chip->work_space.session_tbl,
+ sizeof(space->session_tbl));
+- memcpy(space->context_buf, chip->work_space.context_buf, PAGE_SIZE);
+- memcpy(space->session_buf, chip->work_space.session_buf, PAGE_SIZE);
++ memcpy(space->context_buf, chip->work_space.context_buf,
++ space->buf_size);
++ memcpy(space->session_buf, chip->work_space.session_buf,
++ space->buf_size);
+
+ return 0;
+ out:
+diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c
+index 7a0a7051a06f..eef0fb06ea83 100644
+--- a/drivers/char/tpm/tpmrm-dev.c
++++ b/drivers/char/tpm/tpmrm-dev.c
+@@ -21,7 +21,7 @@ static int tpmrm_open(struct inode *inode, struct file *file)
+ if (priv == NULL)
+ return -ENOMEM;
+
+- rc = tpm2_init_space(&priv->space);
++ rc = tpm2_init_space(&priv->space, TPM2_SPACE_BUFFER_SIZE);
+ if (rc) {
+ kfree(priv);
+ return -ENOMEM;
+diff --git a/drivers/clk/bcm/clk-bcm63xx-gate.c b/drivers/clk/bcm/clk-bcm63xx-gate.c
+index 98e884957db8..911a29bd744e 100644
+--- a/drivers/clk/bcm/clk-bcm63xx-gate.c
++++ b/drivers/clk/bcm/clk-bcm63xx-gate.c
+@@ -155,6 +155,7 @@ static int clk_bcm63xx_probe(struct platform_device *pdev)
+
+ for (entry = table; entry->name; entry++)
+ maxbit = max_t(u8, maxbit, entry->bit);
++ maxbit++;
+
+ hw = devm_kzalloc(&pdev->dev, struct_size(hw, data.hws, maxbit),
+ GFP_KERNEL);
+diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
+index 886f7c5df51a..e3cdb4a282fe 100644
+--- a/drivers/clk/clk-scmi.c
++++ b/drivers/clk/clk-scmi.c
+@@ -103,6 +103,8 @@ static const struct clk_ops scmi_clk_ops = {
+ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk)
+ {
+ int ret;
++ unsigned long min_rate, max_rate;
++
+ struct clk_init_data init = {
+ .flags = CLK_GET_RATE_NOCACHE,
+ .num_parents = 0,
+@@ -112,9 +114,23 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk)
+
+ sclk->hw.init = &init;
+ ret = devm_clk_hw_register(dev, &sclk->hw);
+- if (!ret)
+- clk_hw_set_rate_range(&sclk->hw, sclk->info->range.min_rate,
+- sclk->info->range.max_rate);
++ if (ret)
++ return ret;
++
++ if (sclk->info->rate_discrete) {
++ int num_rates = sclk->info->list.num_rates;
++
++ if (num_rates <= 0)
++ return -EINVAL;
++
++ min_rate = sclk->info->list.rates[0];
++ max_rate = sclk->info->list.rates[num_rates - 1];
++ } else {
++ min_rate = sclk->info->range.min_rate;
++ max_rate = sclk->info->range.max_rate;
++ }
++
++ clk_hw_set_rate_range(&sclk->hw, min_rate, max_rate);
+ return ret;
+ }
+
+diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
+index 96a36f6ff667..d7586e26acd8 100644
+--- a/drivers/clk/qcom/clk-rpmh.c
++++ b/drivers/clk/qcom/clk-rpmh.c
+@@ -143,12 +143,22 @@ static inline bool has_state_changed(struct clk_rpmh *c, u32 state)
+ != (c->aggr_state & BIT(state));
+ }
+
++static int clk_rpmh_send(struct clk_rpmh *c, enum rpmh_state state,
++ struct tcs_cmd *cmd, bool wait)
++{
++ if (wait)
++ return rpmh_write(c->dev, state, cmd, 1);
++
++ return rpmh_write_async(c->dev, state, cmd, 1);
++}
++
+ static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c)
+ {
+ struct tcs_cmd cmd = { 0 };
+ u32 cmd_state, on_val;
+ enum rpmh_state state = RPMH_SLEEP_STATE;
+ int ret;
++ bool wait;
+
+ cmd.addr = c->res_addr;
+ cmd_state = c->aggr_state;
+@@ -159,7 +169,8 @@ static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c)
+ if (cmd_state & BIT(state))
+ cmd.data = on_val;
+
+- ret = rpmh_write_async(c->dev, state, &cmd, 1);
++ wait = cmd_state && state == RPMH_ACTIVE_ONLY_STATE;
++ ret = clk_rpmh_send(c, state, &cmd, wait);
+ if (ret) {
+ dev_err(c->dev, "set %s state of %s failed: (%d)\n",
+ !state ? "sleep" :
+@@ -267,7 +278,7 @@ static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable)
+ cmd.addr = c->res_addr;
+ cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state);
+
+- ret = rpmh_write_async(c->dev, RPMH_ACTIVE_ONLY_STATE, &cmd, 1);
++ ret = clk_rpmh_send(c, RPMH_ACTIVE_ONLY_STATE, &cmd, enable);
+ if (ret) {
+ dev_err(c->dev, "set active state of %s failed: (%d)\n",
+ c->res_name, ret);
+diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
+index a905796f7f85..25f11e9ec358 100644
+--- a/drivers/cpufreq/Kconfig.arm
++++ b/drivers/cpufreq/Kconfig.arm
+@@ -41,6 +41,7 @@ config ARM_ARMADA_37XX_CPUFREQ
+ config ARM_ARMADA_8K_CPUFREQ
+ tristate "Armada 8K CPUFreq driver"
+ depends on ARCH_MVEBU && CPUFREQ_DT
++ select ARMADA_AP_CPU_CLK
+ help
+ This enables the CPUFreq driver support for Marvell
+ Armada8k SOCs.
+diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
+index aa0f06dec959..df1c941260d1 100644
+--- a/drivers/cpufreq/armada-37xx-cpufreq.c
++++ b/drivers/cpufreq/armada-37xx-cpufreq.c
+@@ -456,6 +456,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
+ /* Now that everything is setup, enable the DVFS at hardware level */
+ armada37xx_cpufreq_enable_dvfs(nb_pm_base);
+
++ memset(&pdata, 0, sizeof(pdata));
+ pdata.suspend = armada37xx_cpufreq_suspend;
+ pdata.resume = armada37xx_cpufreq_resume;
+
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index fa988bd1e606..194a6587a1de 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -616,6 +616,24 @@ static struct cpufreq_governor *find_governor(const char *str_governor)
+ return NULL;
+ }
+
++static struct cpufreq_governor *get_governor(const char *str_governor)
++{
++ struct cpufreq_governor *t;
++
++ mutex_lock(&cpufreq_governor_mutex);
++ t = find_governor(str_governor);
++ if (!t)
++ goto unlock;
++
++ if (!try_module_get(t->owner))
++ t = NULL;
++
++unlock:
++ mutex_unlock(&cpufreq_governor_mutex);
++
++ return t;
++}
++
+ static unsigned int cpufreq_parse_policy(char *str_governor)
+ {
+ if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
+@@ -635,28 +653,14 @@ static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
+ {
+ struct cpufreq_governor *t;
+
+- mutex_lock(&cpufreq_governor_mutex);
++ t = get_governor(str_governor);
++ if (t)
++ return t;
+
+- t = find_governor(str_governor);
+- if (!t) {
+- int ret;
+-
+- mutex_unlock(&cpufreq_governor_mutex);
+-
+- ret = request_module("cpufreq_%s", str_governor);
+- if (ret)
+- return NULL;
+-
+- mutex_lock(&cpufreq_governor_mutex);
+-
+- t = find_governor(str_governor);
+- }
+- if (t && !try_module_get(t->owner))
+- t = NULL;
+-
+- mutex_unlock(&cpufreq_governor_mutex);
++ if (request_module("cpufreq_%s", str_governor))
++ return NULL;
+
+- return t;
++ return get_governor(str_governor);
+ }
+
+ /**
+@@ -810,12 +814,14 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
+ goto out;
+ }
+
++ mutex_lock(&cpufreq_governor_mutex);
+ for_each_governor(t) {
+ if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
+ - (CPUFREQ_NAME_LEN + 2)))
+- goto out;
++ break;
+ i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
+ }
++ mutex_unlock(&cpufreq_governor_mutex);
+ out:
+ i += sprintf(&buf[i], "\n");
+ return i;
+@@ -1053,15 +1059,17 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
+ struct cpufreq_governor *def_gov = cpufreq_default_governor();
+ struct cpufreq_governor *gov = NULL;
+ unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
++ int ret;
+
+ if (has_target()) {
+ /* Update policy governor to the one used before hotplug. */
+- gov = find_governor(policy->last_governor);
++ gov = get_governor(policy->last_governor);
+ if (gov) {
+ pr_debug("Restoring governor %s for cpu %d\n",
+ policy->governor->name, policy->cpu);
+ } else if (def_gov) {
+ gov = def_gov;
++ __module_get(gov->owner);
+ } else {
+ return -ENODATA;
+ }
+@@ -1084,7 +1092,11 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
+ return -ENODATA;
+ }
+
+- return cpufreq_set_policy(policy, gov, pol);
++ ret = cpufreq_set_policy(policy, gov, pol);
++ if (gov)
++ module_put(gov->owner);
++
++ return ret;
+ }
+
+ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
+diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
+index 596ce28b957d..2410b23aa609 100644
+--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
++++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
+@@ -200,6 +200,7 @@ static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc)
+ int status;
+
+ memset(req_info, 0, sizeof(struct cpt_request_info));
++ req_info->may_sleep = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) != 0;
+ memset(fctx, 0, sizeof(struct fc_context));
+ create_input_list(req, enc, enc_iv_len);
+ create_output_list(req, enc_iv_len);
+diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+index 7a24019356b5..e343249c8d05 100644
+--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
++++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+@@ -133,7 +133,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
+
+ /* Setup gather (input) components */
+ g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component);
+- info->gather_components = kzalloc(g_sz_bytes, GFP_KERNEL);
++ info->gather_components = kzalloc(g_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
+ if (!info->gather_components) {
+ ret = -ENOMEM;
+ goto scatter_gather_clean;
+@@ -150,7 +150,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
+
+ /* Setup scatter (output) components */
+ s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component);
+- info->scatter_components = kzalloc(s_sz_bytes, GFP_KERNEL);
++ info->scatter_components = kzalloc(s_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
+ if (!info->scatter_components) {
+ ret = -ENOMEM;
+ goto scatter_gather_clean;
+@@ -167,7 +167,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
+
+ /* Create and initialize DPTR */
+ info->dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
+- info->in_buffer = kzalloc(info->dlen, GFP_KERNEL);
++ info->in_buffer = kzalloc(info->dlen, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
+ if (!info->in_buffer) {
+ ret = -ENOMEM;
+ goto scatter_gather_clean;
+@@ -195,7 +195,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf,
+ }
+
+ /* Create and initialize RPTR */
+- info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, GFP_KERNEL);
++ info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
+ if (!info->out_buffer) {
+ ret = -ENOMEM;
+ goto scatter_gather_clean;
+@@ -421,7 +421,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
+ struct cpt_vq_command vq_cmd;
+ union cpt_inst_s cptinst;
+
+- info = kzalloc(sizeof(*info), GFP_KERNEL);
++ info = kzalloc(sizeof(*info), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
+ if (unlikely(!info)) {
+ dev_err(&pdev->dev, "Unable to allocate memory for info_buffer\n");
+ return -ENOMEM;
+@@ -443,7 +443,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
+ * Get buffer for union cpt_res_s response
+ * structure and its physical address
+ */
+- info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL);
++ info->completion_addr = kzalloc(sizeof(union cpt_res_s), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
+ if (unlikely(!info->completion_addr)) {
+ dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n");
+ ret = -ENOMEM;
+diff --git a/drivers/crypto/cavium/cpt/request_manager.h b/drivers/crypto/cavium/cpt/request_manager.h
+index 3514b082eca7..1e8dd9ebcc17 100644
+--- a/drivers/crypto/cavium/cpt/request_manager.h
++++ b/drivers/crypto/cavium/cpt/request_manager.h
+@@ -62,6 +62,8 @@ struct cpt_request_info {
+ union ctrl_info ctrl; /* User control information */
+ struct cptvf_request req; /* Request Information (Core specific) */
+
++ bool may_sleep;
++
+ struct buf_ptr in[MAX_BUF_CNT];
+ struct buf_ptr out[MAX_BUF_CNT];
+
+diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
+index 3f68262d9ab4..87a34d91fdf7 100644
+--- a/drivers/crypto/ccp/ccp-dev.h
++++ b/drivers/crypto/ccp/ccp-dev.h
+@@ -469,6 +469,7 @@ struct ccp_sg_workarea {
+ unsigned int sg_used;
+
+ struct scatterlist *dma_sg;
++ struct scatterlist *dma_sg_head;
+ struct device *dma_dev;
+ unsigned int dma_count;
+ enum dma_data_direction dma_dir;
+diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
+index 422193690fd4..64112c736810 100644
+--- a/drivers/crypto/ccp/ccp-ops.c
++++ b/drivers/crypto/ccp/ccp-ops.c
+@@ -63,7 +63,7 @@ static u32 ccp_gen_jobid(struct ccp_device *ccp)
+ static void ccp_sg_free(struct ccp_sg_workarea *wa)
+ {
+ if (wa->dma_count)
+- dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
++ dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir);
+
+ wa->dma_count = 0;
+ }
+@@ -92,6 +92,7 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
+ return 0;
+
+ wa->dma_sg = sg;
++ wa->dma_sg_head = sg;
+ wa->dma_dev = dev;
+ wa->dma_dir = dma_dir;
+ wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
+@@ -104,14 +105,28 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
+ static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
+ {
+ unsigned int nbytes = min_t(u64, len, wa->bytes_left);
++ unsigned int sg_combined_len = 0;
+
+ if (!wa->sg)
+ return;
+
+ wa->sg_used += nbytes;
+ wa->bytes_left -= nbytes;
+- if (wa->sg_used == wa->sg->length) {
+- wa->sg = sg_next(wa->sg);
++ if (wa->sg_used == sg_dma_len(wa->dma_sg)) {
++ /* Advance to the next DMA scatterlist entry */
++ wa->dma_sg = sg_next(wa->dma_sg);
++
++ /* In the case that the DMA mapped scatterlist has entries
++ * that have been merged, the non-DMA mapped scatterlist
++ * must be advanced multiple times for each merged entry.
++ * This ensures that the current non-DMA mapped entry
++ * corresponds to the current DMA mapped entry.
++ */
++ do {
++ sg_combined_len += wa->sg->length;
++ wa->sg = sg_next(wa->sg);
++ } while (wa->sg_used > sg_combined_len);
++
+ wa->sg_used = 0;
+ }
+ }
+@@ -299,7 +314,7 @@ static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
+ /* Update the structures and generate the count */
+ buf_count = 0;
+ while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
+- nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
++ nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used,
+ dm_wa->length - buf_count);
+ nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
+
+@@ -331,11 +346,11 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
+ * and destination. The resulting len values will always be <= UINT_MAX
+ * because the dma length is an unsigned int.
+ */
+- sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
++ sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used;
+ sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
+
+ if (dst) {
+- sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
++ sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used;
+ sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
+ op_len = min(sg_src_len, sg_dst_len);
+ } else {
+@@ -365,7 +380,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
+ /* Enough data in the sg element, but we need to
+ * adjust for any previously copied data
+ */
+- op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
++ op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg);
+ op->src.u.dma.offset = src->sg_wa.sg_used;
+ op->src.u.dma.length = op_len & ~(block_size - 1);
+
+@@ -386,7 +401,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
+ /* Enough room in the sg element, but we need to
+ * adjust for any previously used area
+ */
+- op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
++ op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg);
+ op->dst.u.dma.offset = dst->sg_wa.sg_used;
+ op->dst.u.dma.length = op->src.u.dma.length;
+ }
+@@ -2028,7 +2043,7 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+ dst.sg_wa.sg_used = 0;
+ for (i = 1; i <= src.sg_wa.dma_count; i++) {
+ if (!dst.sg_wa.sg ||
+- (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
++ (sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) {
+ ret = -EINVAL;
+ goto e_dst;
+ }
+@@ -2054,8 +2069,8 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+ goto e_dst;
+ }
+
+- dst.sg_wa.sg_used += src.sg_wa.sg->length;
+- if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
++ dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg);
++ if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) {
+ dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
+ dst.sg_wa.sg_used = 0;
+ }
+diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
+index cd9c60268bf8..9bf0cce578f0 100644
+--- a/drivers/crypto/ccree/cc_cipher.c
++++ b/drivers/crypto/ccree/cc_cipher.c
+@@ -163,7 +163,6 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
+ skcipher_alg.base);
+ struct device *dev = drvdata_to_dev(cc_alg->drvdata);
+ unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize;
+- int rc = 0;
+
+ dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p,
+ crypto_tfm_alg_name(tfm));
+@@ -175,10 +174,19 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
+ ctx_p->flow_mode = cc_alg->flow_mode;
+ ctx_p->drvdata = cc_alg->drvdata;
+
++ if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
++ /* Alloc hash tfm for essiv */
++ ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
++ if (IS_ERR(ctx_p->shash_tfm)) {
++ dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
++ return PTR_ERR(ctx_p->shash_tfm);
++ }
++ }
++
+ /* Allocate key buffer, cache line aligned */
+ ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL);
+ if (!ctx_p->user.key)
+- return -ENOMEM;
++ goto free_shash;
+
+ dev_dbg(dev, "Allocated key buffer in context. key=@%p\n",
+ ctx_p->user.key);
+@@ -190,21 +198,19 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
+ if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
+ dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n",
+ max_key_buf_size, ctx_p->user.key);
+- return -ENOMEM;
++ goto free_key;
+ }
+ dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n",
+ max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr);
+
+- if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+- /* Alloc hash tfm for essiv */
+- ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
+- if (IS_ERR(ctx_p->shash_tfm)) {
+- dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
+- return PTR_ERR(ctx_p->shash_tfm);
+- }
+- }
++ return 0;
+
+- return rc;
++free_key:
++ kfree(ctx_p->user.key);
++free_shash:
++ crypto_free_shash(ctx_p->shash_tfm);
++
++ return -ENOMEM;
+ }
+
+ static void cc_cipher_exit(struct crypto_tfm *tfm)
+diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
+index c27e7160d2df..4ad4ffd90cee 100644
+--- a/drivers/crypto/hisilicon/sec/sec_algs.c
++++ b/drivers/crypto/hisilicon/sec/sec_algs.c
+@@ -175,7 +175,8 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
+ dma_addr_t *psec_sgl,
+ struct scatterlist *sgl,
+ int count,
+- struct sec_dev_info *info)
++ struct sec_dev_info *info,
++ gfp_t gfp)
+ {
+ struct sec_hw_sgl *sgl_current = NULL;
+ struct sec_hw_sgl *sgl_next;
+@@ -190,7 +191,7 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
+ sge_index = i % SEC_MAX_SGE_NUM;
+ if (sge_index == 0) {
+ sgl_next = dma_pool_zalloc(info->hw_sgl_pool,
+- GFP_KERNEL, &sgl_next_dma);
++ gfp, &sgl_next_dma);
+ if (!sgl_next) {
+ ret = -ENOMEM;
+ goto err_free_hw_sgls;
+@@ -545,14 +546,14 @@ void sec_alg_callback(struct sec_bd_info *resp, void *shadow)
+ }
+
+ static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,
+- int *steps)
++ int *steps, gfp_t gfp)
+ {
+ size_t *sizes;
+ int i;
+
+ /* Split into suitable sized blocks */
+ *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;
+- sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL);
++ sizes = kcalloc(*steps, sizeof(*sizes), gfp);
+ if (!sizes)
+ return -ENOMEM;
+
+@@ -568,7 +569,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
+ int steps, struct scatterlist ***splits,
+ int **splits_nents,
+ int sgl_len_in,
+- struct device *dev)
++ struct device *dev, gfp_t gfp)
+ {
+ int ret, count;
+
+@@ -576,12 +577,12 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
+ if (!count)
+ return -EINVAL;
+
+- *splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL);
++ *splits = kcalloc(steps, sizeof(struct scatterlist *), gfp);
+ if (!*splits) {
+ ret = -ENOMEM;
+ goto err_unmap_sg;
+ }
+- *splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL);
++ *splits_nents = kcalloc(steps, sizeof(int), gfp);
+ if (!*splits_nents) {
+ ret = -ENOMEM;
+ goto err_free_splits;
+@@ -589,7 +590,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
+
+ /* output the scatter list before and after this */
+ ret = sg_split(sgl, count, 0, steps, split_sizes,
+- *splits, *splits_nents, GFP_KERNEL);
++ *splits, *splits_nents, gfp);
+ if (ret) {
+ ret = -ENOMEM;
+ goto err_free_splits_nents;
+@@ -630,13 +631,13 @@ static struct sec_request_el
+ int el_size, bool different_dest,
+ struct scatterlist *sgl_in, int n_ents_in,
+ struct scatterlist *sgl_out, int n_ents_out,
+- struct sec_dev_info *info)
++ struct sec_dev_info *info, gfp_t gfp)
+ {
+ struct sec_request_el *el;
+ struct sec_bd_info *req;
+ int ret;
+
+- el = kzalloc(sizeof(*el), GFP_KERNEL);
++ el = kzalloc(sizeof(*el), gfp);
+ if (!el)
+ return ERR_PTR(-ENOMEM);
+ el->el_length = el_size;
+@@ -668,7 +669,7 @@ static struct sec_request_el
+ el->sgl_in = sgl_in;
+
+ ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,
+- n_ents_in, info);
++ n_ents_in, info, gfp);
+ if (ret)
+ goto err_free_el;
+
+@@ -679,7 +680,7 @@ static struct sec_request_el
+ el->sgl_out = sgl_out;
+ ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,
+ el->sgl_out,
+- n_ents_out, info);
++ n_ents_out, info, gfp);
+ if (ret)
+ goto err_free_hw_sgl_in;
+
+@@ -720,6 +721,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
+ int *splits_out_nents = NULL;
+ struct sec_request_el *el, *temp;
+ bool split = skreq->src != skreq->dst;
++ gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
+
+ mutex_init(&sec_req->lock);
+ sec_req->req_base = &skreq->base;
+@@ -728,13 +730,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
+ sec_req->len_in = sg_nents(skreq->src);
+
+ ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
+- &steps);
++ &steps, gfp);
+ if (ret)
+ return ret;
+ sec_req->num_elements = steps;
+ ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
+ &splits_in_nents, sec_req->len_in,
+- info->dev);
++ info->dev, gfp);
+ if (ret)
+ goto err_free_split_sizes;
+
+@@ -742,7 +744,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
+ sec_req->len_out = sg_nents(skreq->dst);
+ ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
+ &splits_out, &splits_out_nents,
+- sec_req->len_out, info->dev);
++ sec_req->len_out, info->dev, gfp);
+ if (ret)
+ goto err_unmap_in_sg;
+ }
+@@ -775,7 +777,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
+ splits_in[i], splits_in_nents[i],
+ split ? splits_out[i] : NULL,
+ split ? splits_out_nents[i] : 0,
+- info);
++ info, gfp);
+ if (IS_ERR(el)) {
+ ret = PTR_ERR(el);
+ goto err_free_elements;
+diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
+index 6bd8f6a2a24f..aeb03081415c 100644
+--- a/drivers/crypto/qat/qat_common/qat_uclo.c
++++ b/drivers/crypto/qat/qat_common/qat_uclo.c
+@@ -332,13 +332,18 @@ static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
+ }
+ return 0;
+ out_err:
++ /* Do not free the list head unless we allocated it. */
++ tail_old = tail_old->next;
++ if (flag) {
++ kfree(*init_tab_base);
++ *init_tab_base = NULL;
++ }
++
+ while (tail_old) {
+ mem_init = tail_old->next;
+ kfree(tail_old);
+ tail_old = mem_init;
+ }
+- if (flag)
+- kfree(*init_tab_base);
+ return -ENOMEM;
+ }
+
+diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
+index 0e7ea3591b78..5e7593753799 100644
+--- a/drivers/edac/edac_device_sysfs.c
++++ b/drivers/edac/edac_device_sysfs.c
+@@ -275,6 +275,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
+
+ /* Error exit stack */
+ err_kobj_reg:
++ kobject_put(&edac_dev->kobj);
+ module_put(edac_dev->owner);
+
+ err_out:
+diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
+index 72c9eb9fdffb..53042af7262e 100644
+--- a/drivers/edac/edac_pci_sysfs.c
++++ b/drivers/edac/edac_pci_sysfs.c
+@@ -386,7 +386,7 @@ static int edac_pci_main_kobj_setup(void)
+
+ /* Error unwind statck */
+ kobject_init_and_add_fail:
+- kfree(edac_pci_top_main_kobj);
++ kobject_put(edac_pci_top_main_kobj);
+
+ kzalloc_fail:
+ module_put(THIS_MODULE);
+diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
+index 87f737e01473..041f8152272b 100644
+--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
++++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
+@@ -85,7 +85,10 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
+ for (i = 0; i < num_domains; i++, scmi_pd++) {
+ u32 state;
+
+- domains[i] = &scmi_pd->genpd;
++ if (handle->power_ops->state_get(handle, i, &state)) {
++ dev_warn(dev, "failed to get state for domain %d\n", i);
++ continue;
++ }
+
+ scmi_pd->domain = i;
+ scmi_pd->handle = handle;
+@@ -94,13 +97,10 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
+ scmi_pd->genpd.power_off = scmi_pd_power_off;
+ scmi_pd->genpd.power_on = scmi_pd_power_on;
+
+- if (handle->power_ops->state_get(handle, i, &state)) {
+- dev_warn(dev, "failed to get state for domain %d\n", i);
+- continue;
+- }
+-
+ pm_genpd_init(&scmi_pd->genpd, NULL,
+ state == SCMI_POWER_STATE_GENERIC_OFF);
++
++ domains[i] = &scmi_pd->genpd;
+ }
+
+ scmi_pd_data->domains = domains;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index 23085b352cf2..c212d5fc665c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -404,7 +404,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
+ ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
+ }
+ amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
+- amdgpu_irq_get(adev, irq_src, irq_type);
++
++ if (irq_src)
++ amdgpu_irq_get(adev, irq_src, irq_type);
+
+ ring->fence_drv.irq_src = irq_src;
+ ring->fence_drv.irq_type = irq_type;
+@@ -539,8 +541,9 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
+ /* no need to trigger GPU reset as we are unloading */
+ amdgpu_fence_driver_force_completion(ring);
+ }
+- amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+- ring->fence_drv.irq_type);
++ if (ring->fence_drv.irq_src)
++ amdgpu_irq_put(adev, ring->fence_drv.irq_src,
++ ring->fence_drv.irq_type);
+ drm_sched_fini(&ring->sched);
+ del_timer_sync(&ring->fence_drv.fallback_timer);
+ for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
+@@ -576,8 +579,9 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
+ }
+
+ /* disable the interrupt */
+- amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+- ring->fence_drv.irq_type);
++ if (ring->fence_drv.irq_src)
++ amdgpu_irq_put(adev, ring->fence_drv.irq_src,
++ ring->fence_drv.irq_type);
+ }
+ }
+
+@@ -603,8 +607,9 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
+ continue;
+
+ /* enable the interrupt */
+- amdgpu_irq_get(adev, ring->fence_drv.irq_src,
+- ring->fence_drv.irq_type);
++ if (ring->fence_drv.irq_src)
++ amdgpu_irq_get(adev, ring->fence_drv.irq_src,
++ ring->fence_drv.irq_type);
+ }
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+index 785322cd4c6c..7241d4c20778 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+@@ -530,6 +530,8 @@ bool dm_pp_get_static_clocks(
+ &pp_clk_info);
+ else if (adev->smu.funcs)
+ ret = smu_get_current_clocks(&adev->smu, &pp_clk_info);
++ else
++ return false;
+ if (ret)
+ return false;
+
+diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+index 0922d9cd858a..c4d8c52c6b9c 100644
+--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
++++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+@@ -171,7 +171,8 @@ static int smu_v11_0_init_microcode(struct smu_context *smu)
+ chip_name = "navi12";
+ break;
+ default:
+- BUG();
++ dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type);
++ return -EINVAL;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
+diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
+index 3c70a53813bf..0b2bb485d9be 100644
+--- a/drivers/gpu/drm/arm/malidp_planes.c
++++ b/drivers/gpu/drm/arm/malidp_planes.c
+@@ -928,7 +928,7 @@ int malidp_de_planes_init(struct drm_device *drm)
+ const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
+ struct malidp_plane *plane = NULL;
+ enum drm_plane_type plane_type;
+- unsigned long crtcs = 1 << drm->mode_config.num_crtc;
++ unsigned long crtcs = BIT(drm->mode_config.num_crtc);
+ unsigned long flags = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 |
+ DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
+ unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
+index bd3165ee5354..04431dbac4a4 100644
+--- a/drivers/gpu/drm/bridge/sil-sii8620.c
++++ b/drivers/gpu/drm/bridge/sil-sii8620.c
+@@ -177,7 +177,7 @@ static void sii8620_read_buf(struct sii8620 *ctx, u16 addr, u8 *buf, int len)
+
+ static u8 sii8620_readb(struct sii8620 *ctx, u16 addr)
+ {
+- u8 ret;
++ u8 ret = 0;
+
+ sii8620_read_buf(ctx, addr, &ret, 1);
+ return ret;
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+index 0a580957c8cf..f1de4bb6558c 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+@@ -647,6 +647,12 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
+ buf[i]);
+ }
+
++ /* Clear old status bits before start so we don't get confused */
++ regmap_write(pdata->regmap, SN_AUX_CMD_STATUS_REG,
++ AUX_IRQ_STATUS_NAT_I2C_FAIL |
++ AUX_IRQ_STATUS_AUX_RPLY_TOUT |
++ AUX_IRQ_STATUS_AUX_SHORT);
++
+ regmap_write(pdata->regmap, SN_AUX_CMD_REG, request_val | AUX_CMD_SEND);
+
+ ret = regmap_read_poll_timeout(pdata->regmap, SN_AUX_CMD_REG, val,
+diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
+index eab0f2687cd6..00debd02c322 100644
+--- a/drivers/gpu/drm/drm_debugfs.c
++++ b/drivers/gpu/drm/drm_debugfs.c
+@@ -337,13 +337,13 @@ static ssize_t connector_write(struct file *file, const char __user *ubuf,
+
+ buf[len] = '\0';
+
+- if (!strcmp(buf, "on"))
++ if (sysfs_streq(buf, "on"))
+ connector->force = DRM_FORCE_ON;
+- else if (!strcmp(buf, "digital"))
++ else if (sysfs_streq(buf, "digital"))
+ connector->force = DRM_FORCE_ON_DIGITAL;
+- else if (!strcmp(buf, "off"))
++ else if (sysfs_streq(buf, "off"))
+ connector->force = DRM_FORCE_OFF;
+- else if (!strcmp(buf, "unspecified"))
++ else if (sysfs_streq(buf, "unspecified"))
+ connector->force = DRM_FORCE_UNSPECIFIED;
+ else
+ return -EINVAL;
+diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
+index 46ad14470d06..1fdc85a71cec 100644
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -710,6 +710,8 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
+ if (!objs)
+ return -ENOMEM;
+
++ *objs_out = objs;
++
+ handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
+ if (!handles) {
+ ret = -ENOMEM;
+@@ -723,8 +725,6 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
+ }
+
+ ret = objects_lookup(filp, handles, count, objs);
+- *objs_out = objs;
+-
+ out:
+ kvfree(handles);
+ return ret;
+diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
+index bd2498bbd74a..b99f96dcc6f1 100644
+--- a/drivers/gpu/drm/drm_mipi_dsi.c
++++ b/drivers/gpu/drm/drm_mipi_dsi.c
+@@ -1029,11 +1029,11 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format);
+ */
+ int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline)
+ {
+- u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, scanline >> 8,
+- scanline & 0xff };
++ u8 payload[2] = { scanline >> 8, scanline & 0xff };
+ ssize_t err;
+
+- err = mipi_dsi_generic_write(dsi, payload, sizeof(payload));
++ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_SCANLINE, payload,
++ sizeof(payload));
+ if (err < 0)
+ return err;
+
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+index d47d1a8e0219..85de8551ce86 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+@@ -713,7 +713,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
+ ret = pm_runtime_get_sync(gpu->dev);
+ if (ret < 0) {
+ dev_err(gpu->dev, "Failed to enable GPU power domain\n");
+- return ret;
++ goto pm_put;
+ }
+
+ etnaviv_hw_identify(gpu);
+@@ -802,6 +802,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
+
+ fail:
+ pm_runtime_mark_last_busy(gpu->dev);
++pm_put:
+ pm_runtime_put_autosuspend(gpu->dev);
+
+ return ret;
+@@ -842,7 +843,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
+
+ ret = pm_runtime_get_sync(gpu->dev);
+ if (ret < 0)
+- return ret;
++ goto pm_put;
+
+ dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
+ dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
+@@ -965,6 +966,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
+ ret = 0;
+
+ pm_runtime_mark_last_busy(gpu->dev);
++pm_put:
+ pm_runtime_put_autosuspend(gpu->dev);
+
+ return ret;
+@@ -978,7 +980,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
+ dev_err(gpu->dev, "recover hung GPU!\n");
+
+ if (pm_runtime_get_sync(gpu->dev) < 0)
+- return;
++ goto pm_put;
+
+ mutex_lock(&gpu->lock);
+
+@@ -997,6 +999,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
+
+ mutex_unlock(&gpu->lock);
+ pm_runtime_mark_last_busy(gpu->dev);
++pm_put:
+ pm_runtime_put_autosuspend(gpu->dev);
+ }
+
+@@ -1269,8 +1272,10 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
+
+ if (!submit->runtime_resumed) {
+ ret = pm_runtime_get_sync(gpu->dev);
+- if (ret < 0)
++ if (ret < 0) {
++ pm_runtime_put_noidle(gpu->dev);
+ return NULL;
++ }
+ submit->runtime_resumed = true;
+ }
+
+@@ -1287,6 +1292,7 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
+ ret = event_alloc(gpu, nr_events, event);
+ if (ret) {
+ DRM_ERROR("no free events\n");
++ pm_runtime_put_noidle(gpu->dev);
+ return NULL;
+ }
+
+@@ -1457,7 +1463,7 @@ static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
+ if (gpu->clk_bus) {
+ ret = clk_prepare_enable(gpu->clk_bus);
+ if (ret)
+- return ret;
++ goto disable_clk_reg;
+ }
+
+ if (gpu->clk_core) {
+@@ -1480,6 +1486,9 @@ disable_clk_core:
+ disable_clk_bus:
+ if (gpu->clk_bus)
+ clk_disable_unprepare(gpu->clk_bus);
++disable_clk_reg:
++ if (gpu->clk_reg)
++ clk_disable_unprepare(gpu->clk_reg);
+
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
+index f22cfbf9353e..2e12a4a3bfa1 100644
+--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
++++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
+@@ -212,9 +212,8 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+- hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
+- if (!hdmi)
+- return -ENOMEM;
++ hdmi = dev_get_drvdata(dev);
++ memset(hdmi, 0, sizeof(*hdmi));
+
+ match = of_match_node(dw_hdmi_imx_dt_ids, pdev->dev.of_node);
+ plat_data = match->data;
+@@ -239,8 +238,6 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
+ drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+
+- platform_set_drvdata(pdev, hdmi);
+-
+ hdmi->hdmi = dw_hdmi_bind(pdev, encoder, plat_data);
+
+ /*
+@@ -270,6 +267,14 @@ static const struct component_ops dw_hdmi_imx_ops = {
+
+ static int dw_hdmi_imx_probe(struct platform_device *pdev)
+ {
++ struct imx_hdmi *hdmi;
++
++ hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
++ if (!hdmi)
++ return -ENOMEM;
++
++ platform_set_drvdata(pdev, hdmi);
++
+ return component_add(&pdev->dev, &dw_hdmi_imx_ops);
+ }
+
+diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
+index da87c70e413b..881c36d0f16b 100644
+--- a/drivers/gpu/drm/imx/imx-drm-core.c
++++ b/drivers/gpu/drm/imx/imx-drm-core.c
+@@ -281,9 +281,10 @@ static void imx_drm_unbind(struct device *dev)
+
+ drm_kms_helper_poll_fini(drm);
+
++ component_unbind_all(drm->dev, drm);
++
+ drm_mode_config_cleanup(drm);
+
+- component_unbind_all(drm->dev, drm);
+ dev_set_drvdata(dev, NULL);
+
+ drm_dev_put(drm);
+diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
+index 695f307f36b2..9af5a08d5490 100644
+--- a/drivers/gpu/drm/imx/imx-ldb.c
++++ b/drivers/gpu/drm/imx/imx-ldb.c
+@@ -593,9 +593,8 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
+ int ret;
+ int i;
+
+- imx_ldb = devm_kzalloc(dev, sizeof(*imx_ldb), GFP_KERNEL);
+- if (!imx_ldb)
+- return -ENOMEM;
++ imx_ldb = dev_get_drvdata(dev);
++ memset(imx_ldb, 0, sizeof(*imx_ldb));
+
+ imx_ldb->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
+ if (IS_ERR(imx_ldb->regmap)) {
+@@ -703,8 +702,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
+ }
+ }
+
+- dev_set_drvdata(dev, imx_ldb);
+-
+ return 0;
+
+ free_child:
+@@ -736,6 +733,14 @@ static const struct component_ops imx_ldb_ops = {
+
+ static int imx_ldb_probe(struct platform_device *pdev)
+ {
++ struct imx_ldb *imx_ldb;
++
++ imx_ldb = devm_kzalloc(&pdev->dev, sizeof(*imx_ldb), GFP_KERNEL);
++ if (!imx_ldb)
++ return -ENOMEM;
++
++ platform_set_drvdata(pdev, imx_ldb);
++
+ return component_add(&pdev->dev, &imx_ldb_ops);
+ }
+
+diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
+index 5bbfaa2cd0f4..f91c3eb7697b 100644
+--- a/drivers/gpu/drm/imx/imx-tve.c
++++ b/drivers/gpu/drm/imx/imx-tve.c
+@@ -494,6 +494,13 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
+ return 0;
+ }
+
++static void imx_tve_disable_regulator(void *data)
++{
++ struct imx_tve *tve = data;
++
++ regulator_disable(tve->dac_reg);
++}
++
+ static bool imx_tve_readable_reg(struct device *dev, unsigned int reg)
+ {
+ return (reg % 4 == 0) && (reg <= 0xdc);
+@@ -546,9 +553,8 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
+ int irq;
+ int ret;
+
+- tve = devm_kzalloc(dev, sizeof(*tve), GFP_KERNEL);
+- if (!tve)
+- return -ENOMEM;
++ tve = dev_get_drvdata(dev);
++ memset(tve, 0, sizeof(*tve));
+
+ tve->dev = dev;
+ spin_lock_init(&tve->lock);
+@@ -618,6 +624,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
+ ret = regulator_enable(tve->dac_reg);
+ if (ret)
+ return ret;
++ ret = devm_add_action_or_reset(dev, imx_tve_disable_regulator, tve);
++ if (ret)
++ return ret;
+ }
+
+ tve->clk = devm_clk_get(dev, "tve");
+@@ -659,27 +668,23 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
+ if (ret)
+ return ret;
+
+- dev_set_drvdata(dev, tve);
+-
+ return 0;
+ }
+
+-static void imx_tve_unbind(struct device *dev, struct device *master,
+- void *data)
+-{
+- struct imx_tve *tve = dev_get_drvdata(dev);
+-
+- if (!IS_ERR(tve->dac_reg))
+- regulator_disable(tve->dac_reg);
+-}
+-
+ static const struct component_ops imx_tve_ops = {
+ .bind = imx_tve_bind,
+- .unbind = imx_tve_unbind,
+ };
+
+ static int imx_tve_probe(struct platform_device *pdev)
+ {
++ struct imx_tve *tve;
++
++ tve = devm_kzalloc(&pdev->dev, sizeof(*tve), GFP_KERNEL);
++ if (!tve)
++ return -ENOMEM;
++
++ platform_set_drvdata(pdev, tve);
++
+ return component_add(&pdev->dev, &imx_tve_ops);
+ }
+
+diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
+index 63c0284f8b3c..2256c9789fc2 100644
+--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
++++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
+@@ -438,21 +438,13 @@ static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
+ struct ipu_client_platformdata *pdata = dev->platform_data;
+ struct drm_device *drm = data;
+ struct ipu_crtc *ipu_crtc;
+- int ret;
+
+- ipu_crtc = devm_kzalloc(dev, sizeof(*ipu_crtc), GFP_KERNEL);
+- if (!ipu_crtc)
+- return -ENOMEM;
++ ipu_crtc = dev_get_drvdata(dev);
++ memset(ipu_crtc, 0, sizeof(*ipu_crtc));
+
+ ipu_crtc->dev = dev;
+
+- ret = ipu_crtc_init(ipu_crtc, pdata, drm);
+- if (ret)
+- return ret;
+-
+- dev_set_drvdata(dev, ipu_crtc);
+-
+- return 0;
++ return ipu_crtc_init(ipu_crtc, pdata, drm);
+ }
+
+ static void ipu_drm_unbind(struct device *dev, struct device *master,
+@@ -474,6 +466,7 @@ static const struct component_ops ipu_crtc_ops = {
+ static int ipu_drm_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
++ struct ipu_crtc *ipu_crtc;
+ int ret;
+
+ if (!dev->platform_data)
+@@ -483,6 +476,12 @@ static int ipu_drm_probe(struct platform_device *pdev)
+ if (ret)
+ return ret;
+
++ ipu_crtc = devm_kzalloc(dev, sizeof(*ipu_crtc), GFP_KERNEL);
++ if (!ipu_crtc)
++ return -ENOMEM;
++
++ dev_set_drvdata(dev, ipu_crtc);
++
+ return component_add(dev, &ipu_crtc_ops);
+ }
+
+diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
+index e7ce17503ae1..be55548f352a 100644
+--- a/drivers/gpu/drm/imx/parallel-display.c
++++ b/drivers/gpu/drm/imx/parallel-display.c
+@@ -204,9 +204,8 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
+ u32 bus_format = 0;
+ const char *fmt;
+
+- imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL);
+- if (!imxpd)
+- return -ENOMEM;
++ imxpd = dev_get_drvdata(dev);
++ memset(imxpd, 0, sizeof(*imxpd));
+
+ edidp = of_get_property(np, "edid", &imxpd->edid_len);
+ if (edidp)
+@@ -236,8 +235,6 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
+ if (ret)
+ return ret;
+
+- dev_set_drvdata(dev, imxpd);
+-
+ return 0;
+ }
+
+@@ -259,6 +256,14 @@ static const struct component_ops imx_pd_ops = {
+
+ static int imx_pd_probe(struct platform_device *pdev)
+ {
++ struct imx_parallel_display *imxpd;
++
++ imxpd = devm_kzalloc(&pdev->dev, sizeof(*imxpd), GFP_KERNEL);
++ if (!imxpd)
++ return -ENOMEM;
++
++ platform_set_drvdata(pdev, imxpd);
++
+ return component_add(&pdev->dev, &imx_pd_ops);
+ }
+
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+index e62b286947a7..9ea748667fab 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+@@ -713,10 +713,19 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
+ /* Turn on the resources */
+ pm_runtime_get_sync(gmu->dev);
+
++ /*
++ * "enable" the GX power domain which won't actually do anything but it
++ * will make sure that the refcounting is correct in case we need to
++ * bring down the GX after a GMU failure
++ */
++ if (!IS_ERR_OR_NULL(gmu->gxpd))
++ pm_runtime_get_sync(gmu->gxpd);
++
+ /* Use a known rate to bring up the GMU */
+ clk_set_rate(gmu->core_clk, 200000000);
+ ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
+ if (ret) {
++ pm_runtime_put(gmu->gxpd);
+ pm_runtime_put(gmu->dev);
+ return ret;
+ }
+@@ -752,19 +761,12 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
+ /* Set the GPU to the highest power frequency */
+ __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+
+- /*
+- * "enable" the GX power domain which won't actually do anything but it
+- * will make sure that the refcounting is correct in case we need to
+- * bring down the GX after a GMU failure
+- */
+- if (!IS_ERR_OR_NULL(gmu->gxpd))
+- pm_runtime_get(gmu->gxpd);
+-
+ out:
+ /* On failure, shut down the GMU to leave it in a good state */
+ if (ret) {
+ disable_irq(gmu->gmu_irq);
+ a6xx_rpmh_stop(gmu);
++ pm_runtime_put(gmu->gxpd);
+ pm_runtime_put(gmu->dev);
+ }
+
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+index ce59adff06aa..36c85c05b7cf 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+@@ -381,7 +381,7 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event)
+ spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
+
+ if (!fevent) {
+- DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
++ DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
+ return;
+ }
+
+diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
+index 5a6a79fbc9d6..d92a0ffe2a76 100644
+--- a/drivers/gpu/drm/msm/msm_gem.c
++++ b/drivers/gpu/drm/msm/msm_gem.c
+@@ -977,10 +977,8 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+
+ static int msm_gem_new_impl(struct drm_device *dev,
+ uint32_t size, uint32_t flags,
+- struct drm_gem_object **obj,
+- bool struct_mutex_locked)
++ struct drm_gem_object **obj)
+ {
+- struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gem_object *msm_obj;
+
+ switch (flags & MSM_BO_CACHE_MASK) {
+@@ -1006,15 +1004,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
+ INIT_LIST_HEAD(&msm_obj->submit_entry);
+ INIT_LIST_HEAD(&msm_obj->vmas);
+
+- if (struct_mutex_locked) {
+- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+- list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+- } else {
+- mutex_lock(&dev->struct_mutex);
+- list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+- mutex_unlock(&dev->struct_mutex);
+- }
+-
+ *obj = &msm_obj->base;
+
+ return 0;
+@@ -1024,6 +1013,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
+ uint32_t size, uint32_t flags, bool struct_mutex_locked)
+ {
+ struct msm_drm_private *priv = dev->dev_private;
++ struct msm_gem_object *msm_obj;
+ struct drm_gem_object *obj = NULL;
+ bool use_vram = false;
+ int ret;
+@@ -1044,14 +1034,15 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
+ if (size == 0)
+ return ERR_PTR(-EINVAL);
+
+- ret = msm_gem_new_impl(dev, size, flags, &obj, struct_mutex_locked);
++ ret = msm_gem_new_impl(dev, size, flags, &obj);
+ if (ret)
+ goto fail;
+
++ msm_obj = to_msm_bo(obj);
++
+ if (use_vram) {
+ struct msm_gem_vma *vma;
+ struct page **pages;
+- struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ mutex_lock(&msm_obj->lock);
+
+@@ -1086,6 +1077,15 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
+ mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
+ }
+
++ if (struct_mutex_locked) {
++ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
++ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
++ } else {
++ mutex_lock(&dev->struct_mutex);
++ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
++ mutex_unlock(&dev->struct_mutex);
++ }
++
+ return obj;
+
+ fail:
+@@ -1108,6 +1108,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
+ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
+ struct dma_buf *dmabuf, struct sg_table *sgt)
+ {
++ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gem_object *msm_obj;
+ struct drm_gem_object *obj;
+ uint32_t size;
+@@ -1121,7 +1122,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
+
+ size = PAGE_ALIGN(dmabuf->size);
+
+- ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj, false);
++ ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
+ if (ret)
+ goto fail;
+
+@@ -1146,6 +1147,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
+ }
+
+ mutex_unlock(&msm_obj->lock);
++
++ mutex_lock(&dev->struct_mutex);
++ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
++ mutex_unlock(&dev->struct_mutex);
++
+ return obj;
+
+ fail:
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
+index c9692df2b76c..46578108a430 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
+@@ -83,18 +83,20 @@ nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
+ {
+ u32 mode = 0x00;
+
+- if (asyc->dither.mode == DITHERING_MODE_AUTO) {
+- if (asyh->base.depth > asyh->or.bpc * 3)
+- mode = DITHERING_MODE_DYNAMIC2X2;
+- } else {
+- mode = asyc->dither.mode;
+- }
++ if (asyc->dither.mode) {
++ if (asyc->dither.mode == DITHERING_MODE_AUTO) {
++ if (asyh->base.depth > asyh->or.bpc * 3)
++ mode = DITHERING_MODE_DYNAMIC2X2;
++ } else {
++ mode = asyc->dither.mode;
++ }
+
+- if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
+- if (asyh->or.bpc >= 8)
+- mode |= DITHERING_DEPTH_8BPC;
+- } else {
+- mode |= asyc->dither.depth;
++ if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
++ if (asyh->or.bpc >= 8)
++ mode |= DITHERING_DEPTH_8BPC;
++ } else {
++ mode |= asyc->dither.depth;
++ }
+ }
+
+ asyh->dither.enable = mode;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+index 7dfbbbc1beea..5c314f135dd1 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
++++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+@@ -54,8 +54,10 @@ nouveau_debugfs_strap_peek(struct seq_file *m, void *data)
+ int ret;
+
+ ret = pm_runtime_get_sync(drm->dev->dev);
+- if (ret < 0 && ret != -EACCES)
++ if (ret < 0 && ret != -EACCES) {
++ pm_runtime_put_autosuspend(drm->dev->dev);
+ return ret;
++ }
+
+ seq_printf(m, "0x%08x\n",
+ nvif_rd32(&drm->client.device.object, 0x101000));
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
+index b1beed40e746..5347e5bdee8c 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
+@@ -1052,8 +1052,10 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
+
+ /* need to bring up power immediately if opening device */
+ ret = pm_runtime_get_sync(dev->dev);
+- if (ret < 0 && ret != -EACCES)
++ if (ret < 0 && ret != -EACCES) {
++ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
++ }
+
+ get_task_comm(tmpname, current);
+ snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
+@@ -1135,8 +1137,10 @@ nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ long ret;
+
+ ret = pm_runtime_get_sync(dev->dev);
+- if (ret < 0 && ret != -EACCES)
++ if (ret < 0 && ret != -EACCES) {
++ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
++ }
+
+ switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) {
+ case DRM_NOUVEAU_NVIF:
+diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
+index 1324c19f4e5c..fbfe25422774 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
+@@ -45,8 +45,10 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+- if (WARN_ON(ret < 0 && ret != -EACCES))
++ if (WARN_ON(ret < 0 && ret != -EACCES)) {
++ pm_runtime_put_autosuspend(dev);
+ return;
++ }
+
+ if (gem->import_attach)
+ drm_prime_gem_destroy(gem, nvbo->bo.sg);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+index feaac908efed..34403b810dba 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
++++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+@@ -96,12 +96,9 @@ nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
+ else
+ nvbe->ttm.ttm.func = &nv50_sgdma_backend;
+
+- if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags))
+- /*
+- * A failing ttm_dma_tt_init() will call ttm_tt_destroy()
+- * and thus our nouveau_sgdma_destroy() hook, so we don't need
+- * to free nvbe here.
+- */
++ if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) {
++ kfree(nvbe);
+ return NULL;
++ }
+ return &nvbe->ttm.ttm;
+ }
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index 8abb31f83ffc..6d9656323a3f 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -1935,7 +1935,7 @@ static const struct drm_display_mode lg_lb070wv8_mode = {
+ static const struct panel_desc lg_lb070wv8 = {
+ .modes = &lg_lb070wv8_mode,
+ .num_modes = 1,
+- .bpc = 16,
++ .bpc = 8,
+ .size = {
+ .width = 151,
+ .height = 91,
+diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
+index f9685cce1652..1e62e7bbf1b1 100644
+--- a/drivers/gpu/drm/radeon/ci_dpm.c
++++ b/drivers/gpu/drm/radeon/ci_dpm.c
+@@ -4366,7 +4366,7 @@ static int ci_set_mc_special_registers(struct radeon_device *rdev,
+ table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+ }
+ j++;
+- if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
++ if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+
+ if (!pi->mem_gddr5) {
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index 0826efd9b5f5..f9f74150d0d7 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -631,8 +631,10 @@ radeon_crtc_set_config(struct drm_mode_set *set,
+ dev = set->crtc->dev;
+
+ ret = pm_runtime_get_sync(dev->dev);
+- if (ret < 0)
++ if (ret < 0) {
++ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
++ }
+
+ ret = drm_crtc_helper_set_config(set, ctx);
+
+diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
+index 6128792ab883..c2573096d43c 100644
+--- a/drivers/gpu/drm/radeon/radeon_drv.c
++++ b/drivers/gpu/drm/radeon/radeon_drv.c
+@@ -174,12 +174,7 @@ int radeon_no_wb;
+ int radeon_modeset = -1;
+ int radeon_dynclks = -1;
+ int radeon_r4xx_atom = 0;
+-#ifdef __powerpc__
+-/* Default to PCI on PowerPC (fdo #95017) */
+ int radeon_agpmode = -1;
+-#else
+-int radeon_agpmode = 0;
+-#endif
+ int radeon_vram_limit = 0;
+ int radeon_gart_size = -1; /* auto */
+ int radeon_benchmarking = 0;
+@@ -555,8 +550,10 @@ long radeon_drm_ioctl(struct file *filp,
+ long ret;
+ dev = file_priv->minor->dev;
+ ret = pm_runtime_get_sync(dev->dev);
+- if (ret < 0)
++ if (ret < 0) {
++ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
++ }
+
+ ret = drm_ioctl(filp, cmd, arg);
+
+diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
+index 2bb0187c5bc7..709c4ef5e7d5 100644
+--- a/drivers/gpu/drm/radeon/radeon_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_kms.c
+@@ -638,8 +638,10 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
+ file_priv->driver_priv = NULL;
+
+ r = pm_runtime_get_sync(dev->dev);
+- if (r < 0)
++ if (r < 0) {
++ pm_runtime_put_autosuspend(dev->dev);
+ return r;
++ }
+
+ /* new gpu have virtual address space support */
+ if (rdev->family >= CHIP_CAYMAN) {
+diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
+index 3ab4fbf8eb0d..51571f7246ab 100644
+--- a/drivers/gpu/drm/stm/ltdc.c
++++ b/drivers/gpu/drm/stm/ltdc.c
+@@ -424,9 +424,12 @@ static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+ {
+ struct ltdc_device *ldev = crtc_to_ltdc(crtc);
++ struct drm_device *ddev = crtc->dev;
+
+ DRM_DEBUG_DRIVER("\n");
+
++ pm_runtime_get_sync(ddev->dev);
++
+ /* Sets the background color value */
+ reg_write(ldev->regs, LTDC_BCCR, BCCR_BCBLACK);
+
+diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+index 5584e656b857..8c4fd1aa4c2d 100644
+--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+@@ -143,12 +143,16 @@ static int panel_connector_get_modes(struct drm_connector *connector)
+ int i;
+
+ for (i = 0; i < timings->num_timings; i++) {
+- struct drm_display_mode *mode = drm_mode_create(dev);
++ struct drm_display_mode *mode;
+ struct videomode vm;
+
+ if (videomode_from_timings(timings, &vm, i))
+ break;
+
++ mode = drm_mode_create(dev);
++ if (!mode)
++ break;
++
+ drm_display_mode_from_videomode(&vm, mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
+index e0e9b4f69db6..c770ec7e9e8b 100644
+--- a/drivers/gpu/drm/ttm/ttm_tt.c
++++ b/drivers/gpu/drm/ttm/ttm_tt.c
+@@ -241,7 +241,6 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
+ ttm_tt_init_fields(ttm, bo, page_flags);
+
+ if (ttm_tt_alloc_page_directory(ttm)) {
+- ttm_tt_destroy(ttm);
+ pr_err("Failed allocating page table\n");
+ return -ENOMEM;
+ }
+@@ -265,7 +264,6 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
+
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
+ if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
+- ttm_tt_destroy(ttm);
+ pr_err("Failed allocating page table\n");
+ return -ENOMEM;
+ }
+@@ -287,7 +285,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
+ else
+ ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
+ if (ret) {
+- ttm_tt_destroy(ttm);
+ pr_err("Failed allocating page table\n");
+ return -ENOMEM;
+ }
+diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
+index c0392672a842..1b4997bda1c7 100644
+--- a/drivers/gpu/host1x/debug.c
++++ b/drivers/gpu/host1x/debug.c
+@@ -16,6 +16,8 @@
+ #include "debug.h"
+ #include "channel.h"
+
++static DEFINE_MUTEX(debug_lock);
++
+ unsigned int host1x_debug_trace_cmdbuf;
+
+ static pid_t host1x_debug_force_timeout_pid;
+@@ -52,12 +54,14 @@ static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo)
+ struct output *o = data;
+
+ mutex_lock(&ch->cdma.lock);
++ mutex_lock(&debug_lock);
+
+ if (show_fifo)
+ host1x_hw_show_channel_fifo(m, ch, o);
+
+ host1x_hw_show_channel_cdma(m, ch, o);
+
++ mutex_unlock(&debug_lock);
+ mutex_unlock(&ch->cdma.lock);
+
+ return 0;
+diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
+index ee2a025e54cf..b3dae9ec1a38 100644
+--- a/drivers/gpu/ipu-v3/ipu-common.c
++++ b/drivers/gpu/ipu-v3/ipu-common.c
+@@ -124,6 +124,8 @@ enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
+ case V4L2_PIX_FMT_RGBX32:
+ case V4L2_PIX_FMT_ARGB32:
+ case V4L2_PIX_FMT_XRGB32:
++ case V4L2_PIX_FMT_RGB32:
++ case V4L2_PIX_FMT_BGR32:
+ return IPUV3_COLORSPACE_RGB;
+ default:
+ return IPUV3_COLORSPACE_UNKNOWN;
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index dea9cc65bf80..e8641ce677e4 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -350,13 +350,13 @@ static int hidinput_query_battery_capacity(struct hid_device *dev)
+ u8 *buf;
+ int ret;
+
+- buf = kmalloc(2, GFP_KERNEL);
++ buf = kmalloc(4, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+- ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 2,
++ ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 4,
+ dev->battery_report_type, HID_REQ_GET_REPORT);
+- if (ret != 2) {
++ if (ret < 2) {
+ kfree(buf);
+ return -ENODATA;
+ }
+diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
+index 36cce2bfb744..6375504ba8b0 100644
+--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
++++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
+@@ -639,15 +639,14 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+
+- /* There is no point in reading a TMC in HW FIFO mode */
+- mode = readl_relaxed(drvdata->base + TMC_MODE);
+- if (mode != TMC_MODE_CIRCULAR_BUFFER) {
+- spin_unlock_irqrestore(&drvdata->spinlock, flags);
+- return -EINVAL;
+- }
+-
+ /* Re-enable the TMC if need be */
+ if (drvdata->mode == CS_MODE_SYSFS) {
++ /* There is no point in reading a TMC in HW FIFO mode */
++ mode = readl_relaxed(drvdata->base + TMC_MODE);
++ if (mode != TMC_MODE_CIRCULAR_BUFFER) {
++ spin_unlock_irqrestore(&drvdata->spinlock, flags);
++ return -EINVAL;
++ }
+ /*
+ * The trace run will continue with the same allocated trace
+ * buffer. As such zero-out the buffer so that we don't end
+diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
+index 10ae6c6eab0a..59dc9f3cfb37 100644
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -1330,6 +1330,10 @@ out:
+ return ret;
+ }
+
++static void prevent_dealloc_device(struct ib_device *ib_dev)
++{
++}
++
+ /**
+ * ib_register_device - Register an IB device with IB core
+ * @device:Device to register
+@@ -1397,11 +1401,11 @@ int ib_register_device(struct ib_device *device, const char *name)
+ * possibility for a parallel unregistration along with this
+ * error flow. Since we have a refcount here we know any
+ * parallel flow is stopped in disable_device and will see the
+- * NULL pointers, causing the responsibility to
++ * special dealloc_driver pointer, causing the responsibility to
+ * ib_dealloc_device() to revert back to this thread.
+ */
+ dealloc_fn = device->ops.dealloc_driver;
+- device->ops.dealloc_driver = NULL;
++ device->ops.dealloc_driver = prevent_dealloc_device;
+ ib_device_put(device);
+ __ib_unregister_device(device);
+ device->ops.dealloc_driver = dealloc_fn;
+@@ -1449,7 +1453,8 @@ static void __ib_unregister_device(struct ib_device *ib_dev)
+ * Drivers using the new flow may not call ib_dealloc_device except
+ * in error unwind prior to registration success.
+ */
+- if (ib_dev->ops.dealloc_driver) {
++ if (ib_dev->ops.dealloc_driver &&
++ ib_dev->ops.dealloc_driver != prevent_dealloc_device) {
+ WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1);
+ ib_dealloc_device(ib_dev);
+ }
+diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
+index 244ebf285fc3..e4905d9fecb0 100644
+--- a/drivers/infiniband/core/nldev.c
++++ b/drivers/infiniband/core/nldev.c
+@@ -702,9 +702,6 @@ static int fill_stat_counter_qps(struct sk_buff *msg,
+ continue;
+
+ qp = container_of(res, struct ib_qp, res);
+- if (qp->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
+- continue;
+-
+ if (!qp->counter || (qp->counter->id != counter->id))
+ continue;
+
+diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
+index 6c4093d0a91d..d4815f29cfd2 100644
+--- a/drivers/infiniband/core/verbs.c
++++ b/drivers/infiniband/core/verbs.c
+@@ -1648,7 +1648,7 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
+ if (!(rdma_protocol_ib(qp->device,
+ attr->alt_ah_attr.port_num) &&
+ rdma_protocol_ib(qp->device, port))) {
+- ret = EINVAL;
++ ret = -EINVAL;
+ goto out;
+ }
+ }
+diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
+index 8e927f6c1520..ed56df319d2d 100644
+--- a/drivers/infiniband/hw/qedr/qedr.h
++++ b/drivers/infiniband/hw/qedr/qedr.h
+@@ -349,10 +349,10 @@ struct qedr_srq_hwq_info {
+ u32 wqe_prod;
+ u32 sge_prod;
+ u32 wr_prod_cnt;
+- u32 wr_cons_cnt;
++ atomic_t wr_cons_cnt;
+ u32 num_elems;
+
+- u32 *virt_prod_pair_addr;
++ struct rdma_srq_producers *virt_prod_pair_addr;
+ dma_addr_t phy_prod_pair_addr;
+ };
+
+diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
+index 8b4240c1cc76..16a994fd7d0a 100644
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -3460,7 +3460,7 @@ static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
+ * count and consumer count and subtract it from max
+ * work request supported so that we get elements left.
+ */
+- used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt;
++ used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt);
+
+ return hw_srq->max_wr - used;
+ }
+@@ -3475,7 +3475,6 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ unsigned long flags;
+ int status = 0;
+ u32 num_sge;
+- u32 offset;
+
+ spin_lock_irqsave(&srq->lock, flags);
+
+@@ -3488,7 +3487,8 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ if (!qedr_srq_elem_left(hw_srq) ||
+ wr->num_sge > srq->hw_srq.max_sges) {
+ DP_ERR(dev, "Can't post WR (%d,%d) || (%d > %d)\n",
+- hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt,
++ hw_srq->wr_prod_cnt,
++ atomic_read(&hw_srq->wr_cons_cnt),
+ wr->num_sge, srq->hw_srq.max_sges);
+ status = -ENOMEM;
+ *bad_wr = wr;
+@@ -3522,22 +3522,20 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ hw_srq->sge_prod++;
+ }
+
+- /* Flush WQE and SGE information before
++ /* Update WQE and SGE information before
+ * updating producer.
+ */
+- wmb();
++ dma_wmb();
+
+ /* SRQ producer is 8 bytes. Need to update SGE producer index
+ * in first 4 bytes and need to update WQE producer in
+ * next 4 bytes.
+ */
+- *srq->hw_srq.virt_prod_pair_addr = hw_srq->sge_prod;
+- offset = offsetof(struct rdma_srq_producers, wqe_prod);
+- *((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) =
+- hw_srq->wqe_prod;
++ srq->hw_srq.virt_prod_pair_addr->sge_prod = hw_srq->sge_prod;
++ /* Make sure sge producer is updated first */
++ dma_wmb();
++ srq->hw_srq.virt_prod_pair_addr->wqe_prod = hw_srq->wqe_prod;
+
+- /* Flush producer after updating it. */
+- wmb();
+ wr = wr->next;
+ }
+
+@@ -3956,7 +3954,7 @@ static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
+ } else {
+ __process_resp_one(dev, qp, cq, wc, resp, wr_id);
+ }
+- srq->hw_srq.wr_cons_cnt++;
++ atomic_inc(&srq->hw_srq.wr_cons_cnt);
+
+ return 1;
+ }
+diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
+index 831ad578a7b2..46e111c218fd 100644
+--- a/drivers/infiniband/sw/rxe/rxe_recv.c
++++ b/drivers/infiniband/sw/rxe/rxe_recv.c
+@@ -330,10 +330,14 @@ err1:
+
+ static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
+ {
++ struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+ const struct ib_gid_attr *gid_attr;
+ union ib_gid dgid;
+ union ib_gid *pdgid;
+
++ if (pkt->mask & RXE_LOOPBACK_MASK)
++ return 0;
++
+ if (skb->protocol == htons(ETH_P_IP)) {
+ ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
+ (struct in6_addr *)&dgid);
+@@ -366,7 +370,7 @@ void rxe_rcv(struct sk_buff *skb)
+ if (unlikely(skb->len < pkt->offset + RXE_BTH_BYTES))
+ goto drop;
+
+- if (unlikely(rxe_match_dgid(rxe, skb) < 0)) {
++ if (rxe_match_dgid(rxe, skb) < 0) {
+ pr_warn_ratelimited("failed matching dgid\n");
+ goto drop;
+ }
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
+index 623129f27f5a..71358b0b8910 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
+@@ -679,6 +679,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
+ unsigned int mask;
+ unsigned int length = 0;
+ int i;
++ struct ib_send_wr *next;
+
+ while (wr) {
+ mask = wr_opcode_mask(wr->opcode, qp);
+@@ -695,6 +696,8 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
+ break;
+ }
+
++ next = wr->next;
++
+ length = 0;
+ for (i = 0; i < wr->num_sge; i++)
+ length += wr->sg_list[i].length;
+@@ -705,7 +708,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
+ *bad_wr = wr;
+ break;
+ }
+- wr = wr->next;
++ wr = next;
+ }
+
+ rxe_run_task(&qp->req.task, 1);
+diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
+index 982d796b686b..6bfb283e6f28 100644
+--- a/drivers/iommu/intel_irq_remapping.c
++++ b/drivers/iommu/intel_irq_remapping.c
+@@ -628,13 +628,21 @@ out_free_table:
+
+ static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
+ {
++ struct fwnode_handle *fn;
++
+ if (iommu && iommu->ir_table) {
+ if (iommu->ir_msi_domain) {
++ fn = iommu->ir_msi_domain->fwnode;
++
+ irq_domain_remove(iommu->ir_msi_domain);
++ irq_domain_free_fwnode(fn);
+ iommu->ir_msi_domain = NULL;
+ }
+ if (iommu->ir_domain) {
++ fn = iommu->ir_domain->fwnode;
++
+ irq_domain_remove(iommu->ir_domain);
++ irq_domain_free_fwnode(fn);
+ iommu->ir_domain = NULL;
+ }
+ free_pages((unsigned long)iommu->ir_table->base,
+diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c
+index 73eae5966a40..6ff98b87e5c0 100644
+--- a/drivers/irqchip/irq-mtk-sysirq.c
++++ b/drivers/irqchip/irq-mtk-sysirq.c
+@@ -15,7 +15,7 @@
+ #include <linux/spinlock.h>
+
+ struct mtk_sysirq_chip_data {
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ u32 nr_intpol_bases;
+ void __iomem **intpol_bases;
+ u32 *intpol_words;
+@@ -37,7 +37,7 @@ static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type)
+ reg_index = chip_data->which_word[hwirq];
+ offset = hwirq & 0x1f;
+
+- spin_lock_irqsave(&chip_data->lock, flags);
++ raw_spin_lock_irqsave(&chip_data->lock, flags);
+ value = readl_relaxed(base + reg_index * 4);
+ if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_EDGE_FALLING) {
+ if (type == IRQ_TYPE_LEVEL_LOW)
+@@ -53,7 +53,7 @@ static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type)
+
+ data = data->parent_data;
+ ret = data->chip->irq_set_type(data, type);
+- spin_unlock_irqrestore(&chip_data->lock, flags);
++ raw_spin_unlock_irqrestore(&chip_data->lock, flags);
+ return ret;
+ }
+
+@@ -212,7 +212,7 @@ static int __init mtk_sysirq_of_init(struct device_node *node,
+ ret = -ENOMEM;
+ goto out_free_which_word;
+ }
+- spin_lock_init(&chip_data->lock);
++ raw_spin_lock_init(&chip_data->lock);
+
+ return 0;
+
+diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
+index fa7488863bd0..0a35499c4672 100644
+--- a/drivers/irqchip/irq-ti-sci-inta.c
++++ b/drivers/irqchip/irq-ti-sci-inta.c
+@@ -571,7 +571,7 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ inta->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(inta->base))
+- return -ENODEV;
++ return PTR_ERR(inta->base);
+
+ domain = irq_domain_add_linear(dev_of_node(dev),
+ ti_sci_get_num_resources(inta->vint),
+diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
+index d3e83c33783e..0a4823d9797a 100644
+--- a/drivers/leds/led-class.c
++++ b/drivers/leds/led-class.c
+@@ -172,6 +172,7 @@ void led_classdev_suspend(struct led_classdev *led_cdev)
+ {
+ led_cdev->flags |= LED_SUSPENDED;
+ led_set_brightness_nopm(led_cdev, 0);
++ flush_work(&led_cdev->set_brightness_work);
+ }
+ EXPORT_SYMBOL_GPL(led_classdev_suspend);
+
+diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c
+index a5abb499574b..129f475aebf2 100644
+--- a/drivers/leds/leds-lm355x.c
++++ b/drivers/leds/leds-lm355x.c
+@@ -165,18 +165,19 @@ static int lm355x_chip_init(struct lm355x_chip_data *chip)
+ /* input and output pins configuration */
+ switch (chip->type) {
+ case CHIP_LM3554:
+- reg_val = pdata->pin_tx2 | pdata->ntc_pin;
++ reg_val = (u32)pdata->pin_tx2 | (u32)pdata->ntc_pin;
+ ret = regmap_update_bits(chip->regmap, 0xE0, 0x28, reg_val);
+ if (ret < 0)
+ goto out;
+- reg_val = pdata->pass_mode;
++ reg_val = (u32)pdata->pass_mode;
+ ret = regmap_update_bits(chip->regmap, 0xA0, 0x04, reg_val);
+ if (ret < 0)
+ goto out;
+ break;
+
+ case CHIP_LM3556:
+- reg_val = pdata->pin_tx2 | pdata->ntc_pin | pdata->pass_mode;
++ reg_val = (u32)pdata->pin_tx2 | (u32)pdata->ntc_pin |
++ (u32)pdata->pass_mode;
+ ret = regmap_update_bits(chip->regmap, 0x0A, 0xC4, reg_val);
+ if (ret < 0)
+ goto out;
+diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c
+index ac824d7b2dcf..6aa903529570 100644
+--- a/drivers/macintosh/via-macii.c
++++ b/drivers/macintosh/via-macii.c
+@@ -270,15 +270,12 @@ static int macii_autopoll(int devs)
+ unsigned long flags;
+ int err = 0;
+
++ local_irq_save(flags);
++
+ /* bit 1 == device 1, and so on. */
+ autopoll_devs = devs & 0xFFFE;
+
+- if (!autopoll_devs)
+- return 0;
+-
+- local_irq_save(flags);
+-
+- if (current_req == NULL) {
++ if (autopoll_devs && !current_req) {
+ /* Send a Talk Reg 0. The controller will repeatedly transmit
+ * this as long as it is idle.
+ */
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 68901745eb20..168d64707859 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -2091,7 +2091,14 @@ found:
+ sysfs_create_link(&c->kobj, &ca->kobj, buf))
+ goto err;
+
+- if (ca->sb.seq > c->sb.seq) {
++ /*
++ * A special case is both ca->sb.seq and c->sb.seq are 0,
++ * such condition happens on a new created cache device whose
++ * super block is never flushed yet. In this case c->sb.version
++ * and other members should be updated too, otherwise we will
++ * have a mistaken super block version in cache set.
++ */
++ if (ca->sb.seq > c->sb.seq || c->sb.seq == 0) {
+ c->sb.version = ca->sb.version;
+ memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
+ c->sb.flags = ca->sb.flags;
+diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
+index 813a99ffa86f..73fd50e77975 100644
+--- a/drivers/md/md-cluster.c
++++ b/drivers/md/md-cluster.c
+@@ -1518,6 +1518,7 @@ static void unlock_all_bitmaps(struct mddev *mddev)
+ }
+ }
+ kfree(cinfo->other_bitmap_lockres);
++ cinfo->other_bitmap_lockres = NULL;
+ }
+ }
+
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 5a378a453a2d..acef01e519d0 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -376,17 +376,18 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
+ struct mddev *mddev = q->queuedata;
+ unsigned int sectors;
+
+- if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
++ if (mddev == NULL || mddev->pers == NULL) {
+ bio_io_error(bio);
+ return BLK_QC_T_NONE;
+ }
+
+- blk_queue_split(q, &bio);
+-
+- if (mddev == NULL || mddev->pers == NULL) {
++ if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
+ bio_io_error(bio);
+ return BLK_QC_T_NONE;
+ }
++
++ blk_queue_split(q, &bio);
++
+ if (mddev->ro == 1 && unlikely(rw == WRITE)) {
+ if (bio_sectors(bio) != 0)
+ bio->bi_status = BLK_STS_IOERR;
+diff --git a/drivers/media/firewire/firedtv-fw.c b/drivers/media/firewire/firedtv-fw.c
+index 97144734eb05..3f1ca40b9b98 100644
+--- a/drivers/media/firewire/firedtv-fw.c
++++ b/drivers/media/firewire/firedtv-fw.c
+@@ -272,6 +272,8 @@ static int node_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
+
+ name_len = fw_csr_string(unit->directory, CSR_MODEL,
+ name, sizeof(name));
++ if (name_len < 0)
++ return name_len;
+ for (i = ARRAY_SIZE(model_names); --i; )
+ if (strlen(model_names[i]) <= name_len &&
+ strncmp(name, model_names[i], name_len) == 0)
+diff --git a/drivers/media/mc/mc-request.c b/drivers/media/mc/mc-request.c
+index e3fca436c75b..c0782fd96c59 100644
+--- a/drivers/media/mc/mc-request.c
++++ b/drivers/media/mc/mc-request.c
+@@ -296,9 +296,18 @@ int media_request_alloc(struct media_device *mdev, int *alloc_fd)
+ if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free))
+ return -ENOMEM;
+
++ if (mdev->ops->req_alloc)
++ req = mdev->ops->req_alloc(mdev);
++ else
++ req = kzalloc(sizeof(*req), GFP_KERNEL);
++ if (!req)
++ return -ENOMEM;
++
+ fd = get_unused_fd_flags(O_CLOEXEC);
+- if (fd < 0)
+- return fd;
++ if (fd < 0) {
++ ret = fd;
++ goto err_free_req;
++ }
+
+ filp = anon_inode_getfile("request", &request_fops, NULL, O_CLOEXEC);
+ if (IS_ERR(filp)) {
+@@ -306,15 +315,6 @@ int media_request_alloc(struct media_device *mdev, int *alloc_fd)
+ goto err_put_fd;
+ }
+
+- if (mdev->ops->req_alloc)
+- req = mdev->ops->req_alloc(mdev);
+- else
+- req = kzalloc(sizeof(*req), GFP_KERNEL);
+- if (!req) {
+- ret = -ENOMEM;
+- goto err_fput;
+- }
+-
+ filp->private_data = req;
+ req->mdev = mdev;
+ req->state = MEDIA_REQUEST_STATE_IDLE;
+@@ -336,12 +336,15 @@ int media_request_alloc(struct media_device *mdev, int *alloc_fd)
+
+ return 0;
+
+-err_fput:
+- fput(filp);
+-
+ err_put_fd:
+ put_unused_fd(fd);
+
++err_free_req:
++ if (mdev->ops->req_free)
++ mdev->ops->req_free(req);
++ else
++ kfree(req);
++
+ return ret;
+ }
+
+diff --git a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
+index 4a3b3810fd89..31390ce2dbf2 100644
+--- a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
++++ b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
+@@ -278,11 +278,7 @@ static int cros_ec_cec_probe(struct platform_device *pdev)
+ platform_set_drvdata(pdev, cros_ec_cec);
+ cros_ec_cec->cros_ec = cros_ec;
+
+- ret = device_init_wakeup(&pdev->dev, 1);
+- if (ret) {
+- dev_err(&pdev->dev, "failed to initialize wakeup\n");
+- return ret;
+- }
++ device_init_wakeup(&pdev->dev, 1);
+
+ cros_ec_cec->adap = cec_allocate_adapter(&cros_ec_cec_ops, cros_ec_cec,
+ DRV_NAME,
+diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
+index 9aaf3b8060d5..9c31d950cddf 100644
+--- a/drivers/media/platform/exynos4-is/media-dev.c
++++ b/drivers/media/platform/exynos4-is/media-dev.c
+@@ -1270,6 +1270,9 @@ static int fimc_md_get_pinctrl(struct fimc_md *fmd)
+
+ pctl->state_idle = pinctrl_lookup_state(pctl->pinctrl,
+ PINCTRL_STATE_IDLE);
++ if (IS_ERR(pctl->state_idle))
++ return PTR_ERR(pctl->state_idle);
++
+ return 0;
+ }
+
+diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
+index 803baf97f06e..6de8b3d99fb9 100644
+--- a/drivers/media/platform/marvell-ccic/mcam-core.c
++++ b/drivers/media/platform/marvell-ccic/mcam-core.c
+@@ -1940,6 +1940,7 @@ int mccic_register(struct mcam_camera *cam)
+ out:
+ v4l2_async_notifier_unregister(&cam->notifier);
+ v4l2_device_unregister(&cam->v4l2_dev);
++ v4l2_async_notifier_cleanup(&cam->notifier);
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(mccic_register);
+@@ -1961,6 +1962,7 @@ void mccic_shutdown(struct mcam_camera *cam)
+ v4l2_ctrl_handler_free(&cam->ctrl_handler);
+ v4l2_async_notifier_unregister(&cam->notifier);
+ v4l2_device_unregister(&cam->v4l2_dev);
++ v4l2_async_notifier_cleanup(&cam->notifier);
+ }
+ EXPORT_SYMBOL_GPL(mccic_shutdown);
+
+diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
+index 97d660606d98..b8c2b8bba826 100644
+--- a/drivers/media/platform/omap3isp/isppreview.c
++++ b/drivers/media/platform/omap3isp/isppreview.c
+@@ -2287,7 +2287,7 @@ static int preview_init_entities(struct isp_prev_device *prev)
+ me->ops = &preview_media_ops;
+ ret = media_entity_pads_init(me, PREV_PADS_NUM, pads);
+ if (ret < 0)
+- return ret;
++ goto error_handler_free;
+
+ preview_init_formats(sd, NULL);
+
+@@ -2320,6 +2320,8 @@ error_video_out:
+ omap3isp_video_cleanup(&prev->video_in);
+ error_video_in:
+ media_entity_cleanup(&prev->subdev.entity);
++error_handler_free:
++ v4l2_ctrl_handler_free(&prev->ctrls);
+ return ret;
+ }
+
+diff --git a/drivers/media/usb/dvb-usb/Kconfig b/drivers/media/usb/dvb-usb/Kconfig
+index 1a3e5f965ae4..2d7a5c1c84af 100644
+--- a/drivers/media/usb/dvb-usb/Kconfig
++++ b/drivers/media/usb/dvb-usb/Kconfig
+@@ -150,6 +150,7 @@ config DVB_USB_CXUSB
+ config DVB_USB_CXUSB_ANALOG
+ bool "Analog support for the Conexant USB2.0 hybrid reference design"
+ depends on DVB_USB_CXUSB && VIDEO_V4L2
++ depends on VIDEO_V4L2=y || VIDEO_V4L2=DVB_USB_CXUSB
+ select VIDEO_CX25840
+ select VIDEOBUF2_VMALLOC
+ help
+diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
+index f0263d1a1fdf..d97a243ad30c 100644
+--- a/drivers/misc/cxl/sysfs.c
++++ b/drivers/misc/cxl/sysfs.c
+@@ -624,7 +624,7 @@ static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int c
+ rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
+ &afu->dev.kobj, "cr%i", cr->cr);
+ if (rc)
+- goto err;
++ goto err1;
+
+ rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr);
+ if (rc)
+diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
+index 5f2e9696ee4d..0c2489446bd7 100644
+--- a/drivers/mmc/host/sdhci-cadence.c
++++ b/drivers/mmc/host/sdhci-cadence.c
+@@ -194,57 +194,6 @@ static u32 sdhci_cdns_get_emmc_mode(struct sdhci_cdns_priv *priv)
+ return FIELD_GET(SDHCI_CDNS_HRS06_MODE, tmp);
+ }
+
+-static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host,
+- unsigned int timing)
+-{
+- struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
+- u32 mode;
+-
+- switch (timing) {
+- case MMC_TIMING_MMC_HS:
+- mode = SDHCI_CDNS_HRS06_MODE_MMC_SDR;
+- break;
+- case MMC_TIMING_MMC_DDR52:
+- mode = SDHCI_CDNS_HRS06_MODE_MMC_DDR;
+- break;
+- case MMC_TIMING_MMC_HS200:
+- mode = SDHCI_CDNS_HRS06_MODE_MMC_HS200;
+- break;
+- case MMC_TIMING_MMC_HS400:
+- if (priv->enhanced_strobe)
+- mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400ES;
+- else
+- mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400;
+- break;
+- default:
+- mode = SDHCI_CDNS_HRS06_MODE_SD;
+- break;
+- }
+-
+- sdhci_cdns_set_emmc_mode(priv, mode);
+-
+- /* For SD, fall back to the default handler */
+- if (mode == SDHCI_CDNS_HRS06_MODE_SD)
+- sdhci_set_uhs_signaling(host, timing);
+-}
+-
+-static const struct sdhci_ops sdhci_cdns_ops = {
+- .set_clock = sdhci_set_clock,
+- .get_timeout_clock = sdhci_cdns_get_timeout_clock,
+- .set_bus_width = sdhci_set_bus_width,
+- .reset = sdhci_reset,
+- .set_uhs_signaling = sdhci_cdns_set_uhs_signaling,
+-};
+-
+-static const struct sdhci_pltfm_data sdhci_cdns_uniphier_pltfm_data = {
+- .ops = &sdhci_cdns_ops,
+- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+-};
+-
+-static const struct sdhci_pltfm_data sdhci_cdns_pltfm_data = {
+- .ops = &sdhci_cdns_ops,
+-};
+-
+ static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val)
+ {
+ struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
+@@ -278,23 +227,24 @@ static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val)
+ return 0;
+ }
+
+-static int sdhci_cdns_execute_tuning(struct mmc_host *mmc, u32 opcode)
++/*
++ * In SD mode, software must not use the hardware tuning and instead perform
++ * an almost identical procedure to eMMC.
++ */
++static int sdhci_cdns_execute_tuning(struct sdhci_host *host, u32 opcode)
+ {
+- struct sdhci_host *host = mmc_priv(mmc);
+ int cur_streak = 0;
+ int max_streak = 0;
+ int end_of_streak = 0;
+ int i;
+
+ /*
+- * This handler only implements the eMMC tuning that is specific to
+- * this controller. Fall back to the standard method for SD timing.
++ * Do not execute tuning for UHS_SDR50 or UHS_DDR50.
++ * The delay is set by probe, based on the DT properties.
+ */
+- if (host->timing != MMC_TIMING_MMC_HS200)
+- return sdhci_execute_tuning(mmc, opcode);
+-
+- if (WARN_ON(opcode != MMC_SEND_TUNING_BLOCK_HS200))
+- return -EINVAL;
++ if (host->timing != MMC_TIMING_MMC_HS200 &&
++ host->timing != MMC_TIMING_UHS_SDR104)
++ return 0;
+
+ for (i = 0; i < SDHCI_CDNS_MAX_TUNING_LOOP; i++) {
+ if (sdhci_cdns_set_tune_val(host, i) ||
+@@ -317,6 +267,58 @@ static int sdhci_cdns_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ return sdhci_cdns_set_tune_val(host, end_of_streak - max_streak / 2);
+ }
+
++static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host,
++ unsigned int timing)
++{
++ struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
++ u32 mode;
++
++ switch (timing) {
++ case MMC_TIMING_MMC_HS:
++ mode = SDHCI_CDNS_HRS06_MODE_MMC_SDR;
++ break;
++ case MMC_TIMING_MMC_DDR52:
++ mode = SDHCI_CDNS_HRS06_MODE_MMC_DDR;
++ break;
++ case MMC_TIMING_MMC_HS200:
++ mode = SDHCI_CDNS_HRS06_MODE_MMC_HS200;
++ break;
++ case MMC_TIMING_MMC_HS400:
++ if (priv->enhanced_strobe)
++ mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400ES;
++ else
++ mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400;
++ break;
++ default:
++ mode = SDHCI_CDNS_HRS06_MODE_SD;
++ break;
++ }
++
++ sdhci_cdns_set_emmc_mode(priv, mode);
++
++ /* For SD, fall back to the default handler */
++ if (mode == SDHCI_CDNS_HRS06_MODE_SD)
++ sdhci_set_uhs_signaling(host, timing);
++}
++
++static const struct sdhci_ops sdhci_cdns_ops = {
++ .set_clock = sdhci_set_clock,
++ .get_timeout_clock = sdhci_cdns_get_timeout_clock,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .platform_execute_tuning = sdhci_cdns_execute_tuning,
++ .set_uhs_signaling = sdhci_cdns_set_uhs_signaling,
++};
++
++static const struct sdhci_pltfm_data sdhci_cdns_uniphier_pltfm_data = {
++ .ops = &sdhci_cdns_ops,
++ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
++};
++
++static const struct sdhci_pltfm_data sdhci_cdns_pltfm_data = {
++ .ops = &sdhci_cdns_ops,
++};
++
+ static void sdhci_cdns_hs400_enhanced_strobe(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+ {
+@@ -377,7 +379,6 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
+ priv->hrs_addr = host->ioaddr;
+ priv->enhanced_strobe = false;
+ host->ioaddr += SDHCI_CDNS_SRS_BASE;
+- host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning;
+ host->mmc_host_ops.hs400_enhanced_strobe =
+ sdhci_cdns_hs400_enhanced_strobe;
+ sdhci_enable_v4_mode(host);
+diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
+index fa8105087d68..41a2394313dd 100644
+--- a/drivers/mmc/host/sdhci-pci-o2micro.c
++++ b/drivers/mmc/host/sdhci-pci-o2micro.c
+@@ -561,6 +561,12 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
+ slot->host->mmc_host_ops.get_cd = sdhci_o2_get_cd;
+ }
+
++ if (chip->pdev->device == PCI_DEVICE_ID_O2_SEABIRD1) {
++ slot->host->mmc_host_ops.get_cd = sdhci_o2_get_cd;
++ host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
++ host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
++ }
++
+ host->mmc_host_ops.execute_tuning = sdhci_o2_execute_tuning;
+
+ if (chip->pdev->device != PCI_DEVICE_ID_O2_FUJIN2)
+diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
+index 7bb9a7e8e1e7..c1c53b02b35f 100644
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -459,11 +459,13 @@ struct qcom_nand_host {
+ * among different NAND controllers.
+ * @ecc_modes - ecc mode for NAND
+ * @is_bam - whether NAND controller is using BAM
++ * @is_qpic - whether NAND CTRL is part of qpic IP
+ * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
+ */
+ struct qcom_nandc_props {
+ u32 ecc_modes;
+ bool is_bam;
++ bool is_qpic;
+ u32 dev_cmd_reg_start;
+ };
+
+@@ -2751,7 +2753,8 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
+ u32 nand_ctrl;
+
+ /* kill onenand */
+- nandc_write(nandc, SFLASHC_BURST_CFG, 0);
++ if (!nandc->props->is_qpic)
++ nandc_write(nandc, SFLASHC_BURST_CFG, 0);
+ nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
+ NAND_DEV_CMD_VLD_VAL);
+
+@@ -3007,12 +3010,14 @@ static const struct qcom_nandc_props ipq806x_nandc_props = {
+ static const struct qcom_nandc_props ipq4019_nandc_props = {
+ .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
+ .is_bam = true,
++ .is_qpic = true,
+ .dev_cmd_reg_start = 0x0,
+ };
+
+ static const struct qcom_nandc_props ipq8074_nandc_props = {
+ .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
+ .is_bam = true,
++ .is_qpic = true,
+ .dev_cmd_reg_start = 0x7000,
+ };
+
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 6787d560e9e3..92e4d140df6f 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -3063,7 +3063,6 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+- .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
+index ac88caca5ad4..1368816abaed 100644
+--- a/drivers/net/dsa/rtl8366.c
++++ b/drivers/net/dsa/rtl8366.c
+@@ -43,18 +43,26 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+ int ret;
+ int i;
+
++ dev_dbg(smi->dev,
++ "setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
++ vid, member, untag);
++
+ /* Update the 4K table */
+ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ if (ret)
+ return ret;
+
+- vlan4k.member = member;
+- vlan4k.untag = untag;
++ vlan4k.member |= member;
++ vlan4k.untag |= untag;
+ vlan4k.fid = fid;
+ ret = smi->ops->set_vlan_4k(smi, &vlan4k);
+ if (ret)
+ return ret;
+
++ dev_dbg(smi->dev,
++ "resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
++ vid, vlan4k.member, vlan4k.untag);
++
+ /* Try to find an existing MC entry for this VID */
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ struct rtl8366_vlan_mc vlanmc;
+@@ -65,11 +73,16 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+
+ if (vid == vlanmc.vid) {
+ /* update the MC entry */
+- vlanmc.member = member;
+- vlanmc.untag = untag;
++ vlanmc.member |= member;
++ vlanmc.untag |= untag;
+ vlanmc.fid = fid;
+
+ ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
++
++ dev_dbg(smi->dev,
++ "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
++ vid, vlanmc.member, vlanmc.untag);
++
+ break;
+ }
+ }
+@@ -384,7 +397,7 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
+ if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+ dev_err(smi->dev, "port is DSA or CPU port\n");
+
+- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
++ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ int pvid_val = 0;
+
+ dev_info(smi->dev, "add VLAN %04x\n", vid);
+@@ -407,13 +420,13 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
+ if (ret < 0)
+ return;
+ }
+- }
+
+- ret = rtl8366_set_vlan(smi, port, member, untag, 0);
+- if (ret)
+- dev_err(smi->dev,
+- "failed to set up VLAN %04x",
+- vid);
++ ret = rtl8366_set_vlan(smi, vid, member, untag, 0);
++ if (ret)
++ dev_err(smi->dev,
++ "failed to set up VLAN %04x",
++ vid);
++ }
+ }
+ EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
+
+diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+index 359a4d387185..9a0db70c1143 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+@@ -776,7 +776,7 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
+ int err = 0;
+
+ if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) {
+- err = EBADRQC;
++ err = -EBADRQC;
+ goto err_exit;
+ }
+ for (self->aq_nic_cfg->mc_list_count = 0U;
+diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+index 43d11c38b38a..4cddd628d41b 100644
+--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
++++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+@@ -1167,7 +1167,7 @@ static int cn23xx_get_pf_num(struct octeon_device *oct)
+ oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
+ CN23XX_PCIE_SRIOV_FDL_MASK);
+ } else {
+- ret = EINVAL;
++ ret = -EINVAL;
+
+ /* Under some virtual environments, extended PCI regs are
+ * inaccessible, in which case the above read will have failed.
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+index f414f5651dbd..5c45c0c6dd23 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+@@ -2185,6 +2185,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ nic->max_queues *= 2;
+ nic->ptp_clock = ptp_clock;
+
++ /* Initialize mutex that serializes usage of VF's mailbox */
++ mutex_init(&nic->rx_mode_mtx);
++
+ /* MAP VF's configuration registers */
+ nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!nic->reg_base) {
+@@ -2261,7 +2264,6 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
+ spin_lock_init(&nic->rx_mode_wq_lock);
+- mutex_init(&nic->rx_mode_mtx);
+
+ err = register_netdev(netdev);
+ if (err) {
+diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
+index 4c2fa13a7dd7..c8e434c8ab98 100644
+--- a/drivers/net/ethernet/freescale/fman/fman.c
++++ b/drivers/net/ethernet/freescale/fman/fman.c
+@@ -1396,8 +1396,7 @@ static void enable_time_stamp(struct fman *fman)
+ {
+ struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
+ u16 fm_clk_freq = fman->state->fm_clk_freq;
+- u32 tmp, intgr, ts_freq;
+- u64 frac;
++ u32 tmp, intgr, ts_freq, frac;
+
+ ts_freq = (u32)(1 << fman->state->count1_micro_bit);
+ /* configure timestamp so that bit 8 will count 1 microsecond
+diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+index 1ca543ac8f2c..d2de9ea80c43 100644
+--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
++++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+@@ -1205,7 +1205,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
+ list_for_each(pos,
+ &dtsec->multicast_addr_hash->lsts[bucket]) {
+ hash_entry = ETH_HASH_ENTRY_OBJ(pos);
+- if (hash_entry->addr == addr) {
++ if (hash_entry && hash_entry->addr == addr) {
+ list_del_init(&hash_entry->node);
+ kfree(hash_entry);
+ break;
+@@ -1218,7 +1218,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
+ list_for_each(pos,
+ &dtsec->unicast_addr_hash->lsts[bucket]) {
+ hash_entry = ETH_HASH_ENTRY_OBJ(pos);
+- if (hash_entry->addr == addr) {
++ if (hash_entry && hash_entry->addr == addr) {
+ list_del_init(&hash_entry->node);
+ kfree(hash_entry);
+ break;
+diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h
+index dd6d0526f6c1..19f327efdaff 100644
+--- a/drivers/net/ethernet/freescale/fman/fman_mac.h
++++ b/drivers/net/ethernet/freescale/fman/fman_mac.h
+@@ -252,7 +252,7 @@ static inline struct eth_hash_t *alloc_hash_table(u16 size)
+ struct eth_hash_t *hash;
+
+ /* Allocate address hash table */
+- hash = kmalloc_array(size, sizeof(struct eth_hash_t *), GFP_KERNEL);
++ hash = kmalloc(sizeof(*hash), GFP_KERNEL);
+ if (!hash)
+ return NULL;
+
+diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
+index e1901874c19f..9088b4f4b4b8 100644
+--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
++++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
+@@ -856,7 +856,6 @@ int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
+
+ tmp = ioread32be(®s->command_config);
+ tmp &= ~CMD_CFG_PFC_MODE;
+- priority = 0;
+
+ iowrite32be(tmp, ®s->command_config);
+
+@@ -986,7 +985,7 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
+
+ list_for_each(pos, &memac->multicast_addr_hash->lsts[hash]) {
+ hash_entry = ETH_HASH_ENTRY_OBJ(pos);
+- if (hash_entry->addr == addr) {
++ if (hash_entry && hash_entry->addr == addr) {
+ list_del_init(&hash_entry->node);
+ kfree(hash_entry);
+ break;
+diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
+index ee82ee1384eb..47f6fee1f396 100644
+--- a/drivers/net/ethernet/freescale/fman/fman_port.c
++++ b/drivers/net/ethernet/freescale/fman/fman_port.c
+@@ -1756,6 +1756,7 @@ static int fman_port_probe(struct platform_device *of_dev)
+ struct fman_port *port;
+ struct fman *fman;
+ struct device_node *fm_node, *port_node;
++ struct platform_device *fm_pdev;
+ struct resource res;
+ struct resource *dev_res;
+ u32 val;
+@@ -1780,8 +1781,14 @@ static int fman_port_probe(struct platform_device *of_dev)
+ goto return_err;
+ }
+
+- fman = dev_get_drvdata(&of_find_device_by_node(fm_node)->dev);
++ fm_pdev = of_find_device_by_node(fm_node);
+ of_node_put(fm_node);
++ if (!fm_pdev) {
++ err = -EINVAL;
++ goto return_err;
++ }
++
++ fman = dev_get_drvdata(&fm_pdev->dev);
+ if (!fman) {
+ err = -EINVAL;
+ goto return_err;
+diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
+index f75b9c11b2d2..ac5a281e0ec3 100644
+--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
++++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
+@@ -630,7 +630,7 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
+
+ list_for_each(pos, &tgec->multicast_addr_hash->lsts[hash]) {
+ hash_entry = ETH_HASH_ENTRY_OBJ(pos);
+- if (hash_entry->addr == addr) {
++ if (hash_entry && hash_entry->addr == addr) {
+ list_del_init(&hash_entry->node);
+ kfree(hash_entry);
+ break;
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index bacc5fb7eba2..34124c213d27 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -1863,8 +1863,10 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
+
+ adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
+ adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
+- if (!adapter->rss_key || !adapter->rss_lut)
++ if (!adapter->rss_key || !adapter->rss_lut) {
++ err = -ENOMEM;
+ goto err_mem;
++ }
+ if (RSS_AQ(adapter))
+ adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
+ else
+@@ -1946,7 +1948,10 @@ static void iavf_watchdog_task(struct work_struct *work)
+ iavf_send_api_ver(adapter);
+ }
+ } else {
+- if (!iavf_process_aq_command(adapter) &&
++ /* An error will be returned if no commands were
++ * processed; use this opportunity to update stats
++ */
++ if (iavf_process_aq_command(adapter) &&
+ adapter->state == __IAVF_RUNNING)
+ iavf_request_stats(adapter);
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+index cbd53b586c36..6cfe8eb7f47d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
++++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+@@ -1535,10 +1535,12 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
+ es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
+ sizeof(*es->ref_count),
+ GFP_KERNEL);
++ if (!es->ref_count)
++ goto err;
+
+ es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
+ sizeof(*es->written), GFP_KERNEL);
+- if (!es->ref_count)
++ if (!es->written)
+ goto err;
+ }
+ return 0;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 86e6bbb57482..b66e5b6eecd9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -809,18 +809,15 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev,
+ {
+ struct mlx5_flow_root_namespace *root = find_root(&prio->node);
+ struct mlx5_flow_table *iter;
+- int i = 0;
+ int err;
+
+ fs_for_each_ft(iter, prio) {
+- i++;
+ err = root->cmds->modify_flow_table(root, iter, ft);
+ if (err) {
+- mlx5_core_warn(dev, "Failed to modify flow table %d\n",
+- iter->id);
++ mlx5_core_err(dev,
++ "Failed to modify flow table id %d, type %d, err %d\n",
++ iter->id, iter->type, err);
+ /* The driver is out of sync with the FW */
+- if (i > 1)
+- WARN_ON(true);
+ return err;
+ }
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+index 1e32e2443f73..348f02e336f6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+@@ -247,29 +247,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
+
+ /* The order of the actions are must to be keep, only the following
+ * order is supported by SW steering:
+- * TX: push vlan -> modify header -> encap
++ * TX: modify header -> push vlan -> encap
+ * RX: decap -> pop vlan -> modify header
+ */
+- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+- tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]);
+- if (!tmp_action) {
+- err = -ENOMEM;
+- goto free_actions;
+- }
+- fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+- actions[num_actions++] = tmp_action;
+- }
+-
+- if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
+- tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]);
+- if (!tmp_action) {
+- err = -ENOMEM;
+- goto free_actions;
+- }
+- fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+- actions[num_actions++] = tmp_action;
+- }
+-
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
+ enum mlx5dr_action_reformat_type decap_type =
+ DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2;
+@@ -322,6 +302,26 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
+ actions[num_actions++] =
+ fte->action.modify_hdr->action.dr_action;
+
++ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
++ tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]);
++ if (!tmp_action) {
++ err = -ENOMEM;
++ goto free_actions;
++ }
++ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
++ actions[num_actions++] = tmp_action;
++ }
++
++ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
++ tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]);
++ if (!tmp_action) {
++ err = -ENOMEM;
++ goto free_actions;
++ }
++ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
++ actions[num_actions++] = tmp_action;
++ }
++
+ if (delay_encap_set)
+ actions[num_actions++] =
+ fte->action.pkt_reformat->action.dr_action;
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index c00ec9a02097..e66002251596 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -666,7 +666,7 @@ static bool ionic_notifyq_service(struct ionic_cq *cq,
+ eid = le64_to_cpu(comp->event.eid);
+
+ /* Have we run out of new completions to process? */
+- if (eid <= lif->last_eid)
++ if ((s64)(eid - lif->last_eid) <= 0)
+ return false;
+
+ lif->last_eid = eid;
+diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
+index 538e70810d3d..a99c7c95de5c 100644
+--- a/drivers/net/ethernet/toshiba/spider_net.c
++++ b/drivers/net/ethernet/toshiba/spider_net.c
+@@ -283,8 +283,8 @@ spider_net_free_chain(struct spider_net_card *card,
+ descr = descr->next;
+ } while (descr != chain->ring);
+
+- dma_free_coherent(&card->pdev->dev, chain->num_desc,
+- chain->hwring, chain->dma_addr);
++ dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr),
++ chain->hwring, chain->dma_addr);
+ }
+
+ /**
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index dba52a5c378a..110924d62744 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -615,7 +615,9 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
+ if (c45_ids)
+ dev->c45_ids = *c45_ids;
+ dev->irq = bus->irq[addr];
++
+ dev_set_name(&mdiodev->dev, PHY_ID_FMT, bus->id, addr);
++ device_initialize(&mdiodev->dev);
+
+ dev->state = PHY_DOWN;
+
+@@ -649,10 +651,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
+ ret = phy_request_driver_module(dev, phy_id);
+ }
+
+- if (!ret) {
+- device_initialize(&mdiodev->dev);
+- } else {
+- kfree(dev);
++ if (ret) {
++ put_device(&mdiodev->dev);
+ dev = ERR_PTR(ret);
+ }
+
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index 216acf37ca7c..a06e6ab453f5 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -861,7 +861,8 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
+
+ switch (protocol) {
+ case IPPROTO_TCP:
+- ctx->l4_hdr_size = tcp_hdrlen(skb);
++ ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) :
++ tcp_hdrlen(skb);
+ break;
+ case IPPROTO_UDP:
+ ctx->l4_hdr_size = sizeof(struct udphdr);
+diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
+index 134e4dd916c1..996eb9c55b39 100644
+--- a/drivers/net/wan/lapbether.c
++++ b/drivers/net/wan/lapbether.c
+@@ -157,6 +157,12 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
+ if (!netif_running(dev))
+ goto drop;
+
++ /* There should be a pseudo header of 1 byte added by upper layers.
++ * Check to make sure it is there before reading it.
++ */
++ if (skb->len < 1)
++ goto drop;
++
+ switch (skb->data[0]) {
+ case X25_IFACE_DATA:
+ break;
+@@ -305,6 +311,7 @@ static void lapbeth_setup(struct net_device *dev)
+ dev->netdev_ops = &lapbeth_netdev_ops;
+ dev->needs_free_netdev = true;
+ dev->type = ARPHRD_X25;
++ dev->hard_header_len = 0;
+ dev->mtu = 1000;
+ dev->addr_len = 0;
+ }
+@@ -331,7 +338,8 @@ static int lapbeth_new_device(struct net_device *dev)
+ * then this driver prepends a length field of 2 bytes,
+ * then the underlying Ethernet device prepends its own header.
+ */
+- ndev->hard_header_len = -1 + 3 + 2 + dev->hard_header_len;
++ ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len
++ + dev->needed_headroom;
+
+ lapbeth = netdev_priv(ndev);
+ lapbeth->axdev = ndev;
+diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
+index 735482877a1f..c38e1963ebc0 100644
+--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
++++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
+@@ -1540,7 +1540,9 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt,
+ err_unmap_msdu:
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ err_free_msdu_id:
++ spin_lock_bh(&htt->tx_lock);
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
++ spin_unlock_bh(&htt->tx_lock);
+ err:
+ return res;
+ }
+@@ -1747,7 +1749,9 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt,
+ err_unmap_msdu:
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ err_free_msdu_id:
++ spin_lock_bh(&htt->tx_lock);
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
++ spin_unlock_bh(&htt->tx_lock);
+ err:
+ return res;
+ }
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+index 37c512036e0e..ce18433aaefb 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+@@ -19,7 +19,7 @@
+ #define BRCMF_ARP_OL_PEER_AUTO_REPLY 0x00000008
+
+ #define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */
+-#define BRCMF_BSS_RSSI_ON_CHANNEL 0x0002
++#define BRCMF_BSS_RSSI_ON_CHANNEL 0x0004
+
+ #define BRCMF_STA_BRCM 0x00000001 /* Running a Broadcom driver */
+ #define BRCMF_STA_WME 0x00000002 /* WMM association */
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+index 2bd892df83cc..eadc64454839 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+@@ -643,6 +643,7 @@ static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
+ static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
+ int ifidx)
+ {
++ struct brcmf_fws_hanger_item *hi;
+ bool (*matchfn)(struct sk_buff *, void *) = NULL;
+ struct sk_buff *skb;
+ int prec;
+@@ -654,6 +655,9 @@ static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
+ skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
+ while (skb) {
+ hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
++ hi = &fws->hanger.items[hslot];
++ WARN_ON(skb != hi->pkt);
++ hi->state = BRCMF_FWS_HANGER_ITEM_STATE_FREE;
+ brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
+ true);
+ brcmu_pkt_buf_free_skb(skb);
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+index d43247a95ce5..38e6809f16c7 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+@@ -3685,7 +3685,11 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
+ if (bus->idlecount > bus->idletime) {
+ brcmf_dbg(SDIO, "idle\n");
+ sdio_claim_host(bus->sdiodev->func1);
+- brcmf_sdio_wd_timer(bus, false);
++#ifdef DEBUG
++ if (!BRCMF_FWCON_ON() ||
++ bus->console_interval == 0)
++#endif
++ brcmf_sdio_wd_timer(bus, false);
+ bus->idlecount = 0;
+ brcmf_sdio_bus_sleep(bus, true, false);
+ sdio_release_host(bus->sdiodev->func1);
+diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
+index 746749f37996..1107b96a8a88 100644
+--- a/drivers/net/wireless/intel/iwlegacy/common.c
++++ b/drivers/net/wireless/intel/iwlegacy/common.c
+@@ -4286,8 +4286,8 @@ il_apm_init(struct il_priv *il)
+ * power savings, even without L1.
+ */
+ if (il->cfg->set_l0s) {
+- pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
+- if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
++ ret = pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
++ if (!ret && (lctl & PCI_EXP_LNKCTL_ASPM_L1)) {
+ /* L1-ASPM enabled; disable(!) L0S */
+ il_set_bit(il, CSR_GIO_REG,
+ CSR_GIO_REG_VAL_L0S_ENABLED);
+diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
+index f672bdf52cc1..2d9ec225aead 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
++++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
+@@ -36,9 +36,9 @@
+ #define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin"
+ #define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin"
+ #define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin"
+-#define SD8977_DEFAULT_FW_NAME "mrvl/sd8977_uapsta.bin"
++#define SD8977_DEFAULT_FW_NAME "mrvl/sdsd8977_combo_v2.bin"
+ #define SD8987_DEFAULT_FW_NAME "mrvl/sd8987_uapsta.bin"
+-#define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin"
++#define SD8997_DEFAULT_FW_NAME "mrvl/sdsd8997_combo_v4.bin"
+
+ #define BLOCK_MODE 1
+ #define BYTE_MODE 0
+diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+index 20c206da0631..7ae2c34f65db 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
++++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+@@ -580,6 +580,11 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv,
+ {
+ struct host_cmd_ds_802_11_key_material *key =
+ &resp->params.key_material;
++ int len;
++
++ len = le16_to_cpu(key->key_param_set.key_len);
++ if (len > sizeof(key->key_param_set.key))
++ return -EINVAL;
+
+ if (le16_to_cpu(key->action) == HostCmd_ACT_GEN_SET) {
+ if ((le16_to_cpu(key->key_param_set.key_info) & KEY_MCAST)) {
+@@ -593,9 +598,8 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv,
+
+ memset(priv->aes_key.key_param_set.key, 0,
+ sizeof(key->key_param_set.key));
+- priv->aes_key.key_param_set.key_len = key->key_param_set.key_len;
+- memcpy(priv->aes_key.key_param_set.key, key->key_param_set.key,
+- le16_to_cpu(priv->aes_key.key_param_set.key_len));
++ priv->aes_key.key_param_set.key_len = cpu_to_le16(len);
++ memcpy(priv->aes_key.key_param_set.key, key->key_param_set.key, len);
+
+ return 0;
+ }
+@@ -610,9 +614,14 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
+ {
+ struct host_cmd_ds_802_11_key_material_v2 *key_v2;
+- __le16 len;
++ int len;
+
+ key_v2 = &resp->params.key_material_v2;
++
++ len = le16_to_cpu(key_v2->key_param_set.key_params.aes.key_len);
++ if (len > WLAN_KEY_LEN_CCMP)
++ return -EINVAL;
++
+ if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) {
+ if ((le16_to_cpu(key_v2->key_param_set.key_info) & KEY_MCAST)) {
+ mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n");
+@@ -628,10 +637,9 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
+ memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0,
+ WLAN_KEY_LEN_CCMP);
+ priv->aes_key_v2.key_param_set.key_params.aes.key_len =
+- key_v2->key_param_set.key_params.aes.key_len;
+- len = priv->aes_key_v2.key_param_set.key_params.aes.key_len;
++ cpu_to_le16(len);
+ memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key,
+- key_v2->key_param_set.key_params.aes.key, le16_to_cpu(len));
++ key_v2->key_param_set.key_params.aes.key, len);
+
+ return 0;
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+index 842cd81704db..b6867d93c0e3 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+@@ -119,8 +119,10 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd,
+ struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
+ int ret = 0;
+
+- if (seq != rxd->seq)
+- return -EAGAIN;
++ if (seq != rxd->seq) {
++ ret = -EAGAIN;
++ goto out;
++ }
+
+ switch (cmd) {
+ case -MCU_CMD_PATCH_SEM_CONTROL:
+@@ -134,6 +136,7 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd,
+ default:
+ break;
+ }
++out:
+ dev_kfree_skb(skb);
+
+ return ret;
+diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
+index 3e95ad198912..853ac1c2ed73 100644
+--- a/drivers/net/wireless/realtek/rtw88/coex.c
++++ b/drivers/net/wireless/realtek/rtw88/coex.c
+@@ -1923,7 +1923,8 @@ static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason)
+ if (coex_stat->wl_under_ips)
+ return;
+
+- if (coex->freeze && !coex_stat->bt_setup_link)
++ if (coex->freeze && coex_dm->reason == COEX_RSN_BTINFO &&
++ !coex_stat->bt_setup_link)
+ return;
+
+ coex_stat->cnt_wl[COEX_CNT_WL_COEXRUN]++;
+diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
+index 35dbdb3c4f1e..8efaee7571f3 100644
+--- a/drivers/net/wireless/realtek/rtw88/fw.c
++++ b/drivers/net/wireless/realtek/rtw88/fw.c
+@@ -340,7 +340,7 @@ void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
+ SET_RA_INFO_INIT_RA_LVL(h2c_pkt, si->init_ra_lv);
+ SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable);
+ SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode);
+- SET_RA_INFO_LDPC(h2c_pkt, si->ldpc_en);
++ SET_RA_INFO_LDPC(h2c_pkt, !!si->ldpc_en);
+ SET_RA_INFO_NO_UPDATE(h2c_pkt, no_update);
+ SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable);
+ SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt);
+diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
+index 88e2252bf8a2..15c7a6fc37b9 100644
+--- a/drivers/net/wireless/realtek/rtw88/main.c
++++ b/drivers/net/wireless/realtek/rtw88/main.c
+@@ -553,8 +553,6 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
+ stbc_en = VHT_STBC_EN;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
+ ldpc_en = VHT_LDPC_EN;
+- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+- is_support_sgi = true;
+ } else if (sta->ht_cap.ht_supported) {
+ ra_mask |= (sta->ht_cap.mcs.rx_mask[1] << 20) |
+ (sta->ht_cap.mcs.rx_mask[0] << 12);
+@@ -562,9 +560,6 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
+ stbc_en = HT_STBC_EN;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
+ ldpc_en = HT_LDPC_EN;
+- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20 ||
+- sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+- is_support_sgi = true;
+ }
+
+ if (efuse->hw_cap.nss == 1)
+@@ -606,12 +601,18 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_80:
+ bw_mode = RTW_CHANNEL_WIDTH_80;
++ is_support_sgi = sta->vht_cap.vht_supported &&
++ (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ bw_mode = RTW_CHANNEL_WIDTH_40;
++ is_support_sgi = sta->ht_cap.ht_supported &&
++ (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
+ break;
+ default:
+ bw_mode = RTW_CHANNEL_WIDTH_20;
++ is_support_sgi = sta->ht_cap.ht_supported &&
++ (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
+ break;
+ }
+
+diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
+index 850864dbafa1..e6d426edab56 100644
+--- a/drivers/net/wireless/ti/wl1251/event.c
++++ b/drivers/net/wireless/ti/wl1251/event.c
+@@ -70,7 +70,7 @@ static int wl1251_event_ps_report(struct wl1251 *wl,
+ break;
+ }
+
+- return 0;
++ return ret;
+ }
+
+ static void wl1251_event_mbox_dump(struct event_mailbox *mbox)
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 071b63146d4b..ff5681da8780 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1074,6 +1074,9 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
+ int pos;
+ int len;
+
++ if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
++ return 0;
++
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.nsid = cpu_to_le32(nsid);
+ c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
+@@ -1087,18 +1090,6 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
+ if (status) {
+ dev_warn(ctrl->device,
+ "Identify Descriptors failed (%d)\n", status);
+- /*
+- * Don't treat non-retryable errors as fatal, as we potentially
+- * already have a NGUID or EUI-64. If we failed with DNR set,
+- * we want to silently ignore the error as we can still
+- * identify the device, but if the status has DNR set, we want
+- * to propagate the error back specifically for the disk
+- * revalidation flow to make sure we don't abandon the
+- * device just because of a temporal retry-able error (such
+- * as path of transport errors).
+- */
+- if (status > 0 && (status & NVME_SC_DNR))
+- status = 0;
+ goto free_data;
+ }
+
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 5433aa2f7601..484aad0d0c9c 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -249,6 +249,12 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
+ fallback = ns;
+ }
+
++ /* No optimized path found, re-check the current path */
++ if (!nvme_path_is_disabled(old) &&
++ old->ana_state == NVME_ANA_OPTIMIZED) {
++ found = old;
++ goto out;
++ }
+ if (!fallback)
+ return NULL;
+ found = fallback;
+@@ -269,10 +275,13 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
+ struct nvme_ns *ns;
+
+ ns = srcu_dereference(head->current_path[node], &head->srcu);
+- if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR && ns)
+- ns = nvme_round_robin_path(head, node, ns);
+- if (unlikely(!ns || !nvme_path_is_optimized(ns)))
+- ns = __nvme_find_path(head, node);
++ if (unlikely(!ns))
++ return __nvme_find_path(head, node);
++
++ if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR)
++ return nvme_round_robin_path(head, node, ns);
++ if (unlikely(!nvme_path_is_optimized(ns)))
++ return __nvme_find_path(head, node);
+ return ns;
+ }
+
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index ed02260862cb..056953bd8bd8 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -115,6 +115,13 @@ enum nvme_quirks {
+ * Prevent tag overlap between queues
+ */
+ NVME_QUIRK_SHARED_TAGS = (1 << 13),
++
++ /*
++ * The controller doesn't handle the Identify Namespace
++ * Identification Descriptor list subcommand despite claiming
++ * NVMe 1.3 compliance.
++ */
++ NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15),
+ };
+
+ /*
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index ee7669f23cff..100da11ce98c 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3117,6 +3117,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
+ .driver_data = NVME_QUIRK_IDENTIFY_CNS |
+ NVME_QUIRK_DISABLE_WRITE_ZEROES, },
++ { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
++ .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
+ { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+ { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index cd0d49978190..d0336545e1fe 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -890,15 +890,20 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
+ ret = PTR_ERR(ctrl->ctrl.connect_q);
+ goto out_free_tag_set;
+ }
+- } else {
+- blk_mq_update_nr_hw_queues(&ctrl->tag_set,
+- ctrl->ctrl.queue_count - 1);
+ }
+
+ ret = nvme_rdma_start_io_queues(ctrl);
+ if (ret)
+ goto out_cleanup_connect_q;
+
++ if (!new) {
++ nvme_start_queues(&ctrl->ctrl);
++ nvme_wait_freeze(&ctrl->ctrl);
++ blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset,
++ ctrl->ctrl.queue_count - 1);
++ nvme_unfreeze(&ctrl->ctrl);
++ }
++
+ return 0;
+
+ out_cleanup_connect_q:
+@@ -931,6 +936,7 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
+ bool remove)
+ {
+ if (ctrl->ctrl.queue_count > 1) {
++ nvme_start_freeze(&ctrl->ctrl);
+ nvme_stop_queues(&ctrl->ctrl);
+ nvme_rdma_stop_io_queues(ctrl);
+ if (ctrl->ctrl.tagset) {
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 53e113a18a54..0166ff0e4738 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1684,15 +1684,20 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
+ ret = PTR_ERR(ctrl->connect_q);
+ goto out_free_tag_set;
+ }
+- } else {
+- blk_mq_update_nr_hw_queues(ctrl->tagset,
+- ctrl->queue_count - 1);
+ }
+
+ ret = nvme_tcp_start_io_queues(ctrl);
+ if (ret)
+ goto out_cleanup_connect_q;
+
++ if (!new) {
++ nvme_start_queues(ctrl);
++ nvme_wait_freeze(ctrl);
++ blk_mq_update_nr_hw_queues(ctrl->tagset,
++ ctrl->queue_count - 1);
++ nvme_unfreeze(ctrl);
++ }
++
+ return 0;
+
+ out_cleanup_connect_q:
+@@ -1797,6 +1802,7 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
+ {
+ if (ctrl->queue_count <= 1)
+ return;
++ nvme_start_freeze(ctrl);
+ nvme_stop_queues(ctrl);
+ nvme_tcp_stop_io_queues(ctrl);
+ if (ctrl->tagset) {
+diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
+index de8e4e347249..e410033b6df0 100644
+--- a/drivers/parisc/sba_iommu.c
++++ b/drivers/parisc/sba_iommu.c
+@@ -1270,7 +1270,7 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
+ ** (one that doesn't overlap memory or LMMIO space) in the
+ ** IBASE and IMASK registers.
+ */
+- ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE);
++ ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1fffffULL;
+ iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
+
+ if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
+diff --git a/drivers/pci/access.c b/drivers/pci/access.c
+index 2fccb5762c76..0914ddeae17f 100644
+--- a/drivers/pci/access.c
++++ b/drivers/pci/access.c
+@@ -204,17 +204,13 @@ EXPORT_SYMBOL(pci_bus_set_ops);
+ static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
+
+ static noinline void pci_wait_cfg(struct pci_dev *dev)
++ __must_hold(&pci_lock)
+ {
+- DECLARE_WAITQUEUE(wait, current);
+-
+- __add_wait_queue(&pci_cfg_wait, &wait);
+ do {
+- set_current_state(TASK_UNINTERRUPTIBLE);
+ raw_spin_unlock_irq(&pci_lock);
+- schedule();
++ wait_event(pci_cfg_wait, !dev->block_cfg_access);
+ raw_spin_lock_irq(&pci_lock);
+ } while (dev->block_cfg_access);
+- __remove_wait_queue(&pci_cfg_wait, &wait);
+ }
+
+ /* Returns 0 on success, negative values indicate error. */
+diff --git a/drivers/pci/controller/pcie-cadence-host.c b/drivers/pci/controller/pcie-cadence-host.c
+index 97e251090b4f..0dfc778f40a7 100644
+--- a/drivers/pci/controller/pcie-cadence-host.c
++++ b/drivers/pci/controller/pcie-cadence-host.c
+@@ -102,6 +102,7 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
+ {
+ struct cdns_pcie *pcie = &rc->pcie;
+ u32 value, ctrl;
++ u32 id;
+
+ /*
+ * Set the root complex BAR configuration register:
+@@ -121,8 +122,12 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
+
+ /* Set root port configuration space */
+- if (rc->vendor_id != 0xffff)
+- cdns_pcie_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id);
++ if (rc->vendor_id != 0xffff) {
++ id = CDNS_PCIE_LM_ID_VENDOR(rc->vendor_id) |
++ CDNS_PCIE_LM_ID_SUBSYS(rc->vendor_id);
++ cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
++ }
++
+ if (rc->device_id != 0xffff)
+ cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id);
+
+diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
+index 7c24c0aedad4..9966dcf1d112 100644
+--- a/drivers/pci/controller/vmd.c
++++ b/drivers/pci/controller/vmd.c
+@@ -694,6 +694,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
+ if (!vmd->bus) {
+ pci_free_resource_list(&resources);
+ irq_domain_remove(vmd->irq_domain);
++ irq_domain_free_fwnode(fn);
+ return -ENODEV;
+ }
+
+@@ -808,6 +809,7 @@ static void vmd_cleanup_srcu(struct vmd_dev *vmd)
+ static void vmd_remove(struct pci_dev *dev)
+ {
+ struct vmd_dev *vmd = pci_get_drvdata(dev);
++ struct fwnode_handle *fn = vmd->irq_domain->fwnode;
+
+ sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
+ pci_stop_root_bus(vmd->bus);
+@@ -816,6 +818,7 @@ static void vmd_remove(struct pci_dev *dev)
+ vmd_teardown_dma_ops(vmd);
+ vmd_detach_resources(vmd);
+ irq_domain_remove(vmd->irq_domain);
++ irq_domain_free_fwnode(fn);
+ }
+
+ #ifdef CONFIG_PM_SLEEP
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 4a0ec34062d6..7624c71011c6 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -1157,6 +1157,7 @@ static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
+ cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]);
+ else
+ cnt += sprintf(buffer + cnt, "%s ", policy_str[i]);
++ cnt += sprintf(buffer + cnt, "\n");
+ return cnt;
+ }
+
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 4ac4b28e0ebb..9bc0f321aaf0 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -4446,6 +4446,8 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
++ acpi_put_table(header);
++
+ /* Filter out flags not applicable to multifunction */
+ acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
+
+diff --git a/drivers/phy/marvell/phy-armada38x-comphy.c b/drivers/phy/marvell/phy-armada38x-comphy.c
+index 6960dfd8ad8c..0fe408964334 100644
+--- a/drivers/phy/marvell/phy-armada38x-comphy.c
++++ b/drivers/phy/marvell/phy-armada38x-comphy.c
+@@ -41,6 +41,7 @@ struct a38x_comphy_lane {
+
+ struct a38x_comphy {
+ void __iomem *base;
++ void __iomem *conf;
+ struct device *dev;
+ struct a38x_comphy_lane lane[MAX_A38X_COMPHY];
+ };
+@@ -54,6 +55,21 @@ static const u8 gbe_mux[MAX_A38X_COMPHY][MAX_A38X_PORTS] = {
+ { 0, 0, 3 },
+ };
+
++static void a38x_set_conf(struct a38x_comphy_lane *lane, bool enable)
++{
++ struct a38x_comphy *priv = lane->priv;
++ u32 conf;
++
++ if (priv->conf) {
++ conf = readl_relaxed(priv->conf);
++ if (enable)
++ conf |= BIT(lane->port);
++ else
++ conf &= ~BIT(lane->port);
++ writel(conf, priv->conf);
++ }
++}
++
+ static void a38x_comphy_set_reg(struct a38x_comphy_lane *lane,
+ unsigned int offset, u32 mask, u32 value)
+ {
+@@ -97,6 +113,7 @@ static int a38x_comphy_set_mode(struct phy *phy, enum phy_mode mode, int sub)
+ {
+ struct a38x_comphy_lane *lane = phy_get_drvdata(phy);
+ unsigned int gen;
++ int ret;
+
+ if (mode != PHY_MODE_ETHERNET)
+ return -EINVAL;
+@@ -115,13 +132,20 @@ static int a38x_comphy_set_mode(struct phy *phy, enum phy_mode mode, int sub)
+ return -EINVAL;
+ }
+
++ a38x_set_conf(lane, false);
++
+ a38x_comphy_set_speed(lane, gen, gen);
+
+- return a38x_comphy_poll(lane, COMPHY_STAT1,
+- COMPHY_STAT1_PLL_RDY_TX |
+- COMPHY_STAT1_PLL_RDY_RX,
+- COMPHY_STAT1_PLL_RDY_TX |
+- COMPHY_STAT1_PLL_RDY_RX);
++ ret = a38x_comphy_poll(lane, COMPHY_STAT1,
++ COMPHY_STAT1_PLL_RDY_TX |
++ COMPHY_STAT1_PLL_RDY_RX,
++ COMPHY_STAT1_PLL_RDY_TX |
++ COMPHY_STAT1_PLL_RDY_RX);
++
++ if (ret == 0)
++ a38x_set_conf(lane, true);
++
++ return ret;
+ }
+
+ static const struct phy_ops a38x_comphy_ops = {
+@@ -174,14 +198,21 @@ static int a38x_comphy_probe(struct platform_device *pdev)
+ if (!priv)
+ return -ENOMEM;
+
+- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- base = devm_ioremap_resource(&pdev->dev, res);
++ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->dev = &pdev->dev;
+ priv->base = base;
+
++ /* Optional */
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "conf");
++ if (res) {
++ priv->conf = devm_ioremap_resource(&pdev->dev, res);
++ if (IS_ERR(priv->conf))
++ return PTR_ERR(priv->conf);
++ }
++
+ for_each_available_child_of_node(pdev->dev.of_node, child) {
+ struct phy *phy;
+ int ret;
+diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+index bfb22f868857..5087b7c44d55 100644
+--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+@@ -111,6 +111,7 @@ struct rcar_gen3_chan {
+ struct work_struct work;
+ struct mutex lock; /* protects rphys[...].powered */
+ enum usb_dr_mode dr_mode;
++ int irq;
+ bool extcon_host;
+ bool is_otg_channel;
+ bool uses_otg_pins;
+@@ -389,12 +390,38 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
+ rcar_gen3_device_recognition(ch);
+ }
+
++static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
++{
++ struct rcar_gen3_chan *ch = _ch;
++ void __iomem *usb2_base = ch->base;
++ u32 status = readl(usb2_base + USB2_OBINTSTA);
++ irqreturn_t ret = IRQ_NONE;
++
++ if (status & USB2_OBINT_BITS) {
++ dev_vdbg(ch->dev, "%s: %08x\n", __func__, status);
++ writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA);
++ rcar_gen3_device_recognition(ch);
++ ret = IRQ_HANDLED;
++ }
++
++ return ret;
++}
++
+ static int rcar_gen3_phy_usb2_init(struct phy *p)
+ {
+ struct rcar_gen3_phy *rphy = phy_get_drvdata(p);
+ struct rcar_gen3_chan *channel = rphy->ch;
+ void __iomem *usb2_base = channel->base;
+ u32 val;
++ int ret;
++
++ if (!rcar_gen3_is_any_rphy_initialized(channel) && channel->irq >= 0) {
++ INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
++ ret = request_irq(channel->irq, rcar_gen3_phy_usb2_irq,
++ IRQF_SHARED, dev_name(channel->dev), channel);
++ if (ret < 0)
++ dev_err(channel->dev, "No irq handler (%d)\n", channel->irq);
++ }
+
+ /* Initialize USB2 part */
+ val = readl(usb2_base + USB2_INT_ENABLE);
+@@ -433,6 +460,9 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p)
+ val &= ~USB2_INT_ENABLE_UCOM_INTEN;
+ writel(val, usb2_base + USB2_INT_ENABLE);
+
++ if (channel->irq >= 0 && !rcar_gen3_is_any_rphy_initialized(channel))
++ free_irq(channel->irq, channel);
++
+ return 0;
+ }
+
+@@ -503,23 +533,6 @@ static const struct phy_ops rz_g1c_phy_usb2_ops = {
+ .owner = THIS_MODULE,
+ };
+
+-static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
+-{
+- struct rcar_gen3_chan *ch = _ch;
+- void __iomem *usb2_base = ch->base;
+- u32 status = readl(usb2_base + USB2_OBINTSTA);
+- irqreturn_t ret = IRQ_NONE;
+-
+- if (status & USB2_OBINT_BITS) {
+- dev_vdbg(ch->dev, "%s: %08x\n", __func__, status);
+- writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA);
+- rcar_gen3_device_recognition(ch);
+- ret = IRQ_HANDLED;
+- }
+-
+- return ret;
+-}
+-
+ static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = {
+ {
+ .compatible = "renesas,usb2-phy-r8a77470",
+@@ -598,7 +611,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
+ struct phy_provider *provider;
+ struct resource *res;
+ const struct phy_ops *phy_usb2_ops;
+- int irq, ret = 0, i;
++ int ret = 0, i;
+
+ if (!dev->of_node) {
+ dev_err(dev, "This driver needs device tree\n");
+@@ -614,16 +627,8 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
+ if (IS_ERR(channel->base))
+ return PTR_ERR(channel->base);
+
+- /* call request_irq for OTG */
+- irq = platform_get_irq_optional(pdev, 0);
+- if (irq >= 0) {
+- INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
+- irq = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq,
+- IRQF_SHARED, dev_name(dev), channel);
+- if (irq < 0)
+- dev_err(dev, "No irq handler (%d)\n", irq);
+- }
+-
++ /* get irq number here and request_irq for OTG in phy_init */
++ channel->irq = platform_get_irq_optional(pdev, 0);
+ channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node);
+ if (channel->dr_mode != USB_DR_MODE_UNKNOWN) {
+ int ret;
+diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
+index e510732afb8b..7f6279fb4f8f 100644
+--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
++++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
+@@ -714,7 +714,9 @@ static int exynos5_usbdrd_phy_calibrate(struct phy *phy)
+ struct phy_usb_instance *inst = phy_get_drvdata(phy);
+ struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
+
+- return exynos5420_usbdrd_phy_calibrate(phy_drd);
++ if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI)
++ return exynos5420_usbdrd_phy_calibrate(phy_drd);
++ return 0;
+ }
+
+ static const struct phy_ops exynos5_usbdrd_phy_ops = {
+diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
+index 1e0614daee9b..a9d511982780 100644
+--- a/drivers/pinctrl/pinctrl-single.c
++++ b/drivers/pinctrl/pinctrl-single.c
+@@ -916,7 +916,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np,
+
+ /* If pinconf isn't supported, don't parse properties in below. */
+ if (!PCS_HAS_PINCONF)
+- return 0;
++ return -ENOTSUPP;
+
+ /* cacluate how much properties are supported in current node */
+ for (i = 0; i < ARRAY_SIZE(prop2); i++) {
+@@ -928,7 +928,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np,
+ nconfs++;
+ }
+ if (!nconfs)
+- return 0;
++ return -ENOTSUPP;
+
+ func->conf = devm_kcalloc(pcs->dev,
+ nconfs, sizeof(struct pcs_conf_vals),
+@@ -1056,9 +1056,12 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
+
+ if (PCS_HAS_PINCONF && function) {
+ res = pcs_parse_pinconf(pcs, np, function, map);
+- if (res)
++ if (res == 0)
++ *num_maps = 2;
++ else if (res == -ENOTSUPP)
++ *num_maps = 1;
++ else
+ goto free_pingroups;
+- *num_maps = 2;
+ } else {
+ *num_maps = 1;
+ }
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index 0d42477946f3..59b78a181723 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -110,6 +110,16 @@ static struct quirk_entry quirk_asus_forceals = {
+ .wmi_force_als_set = true,
+ };
+
++static struct quirk_entry quirk_asus_ga401i = {
++ .wmi_backlight_power = true,
++ .wmi_backlight_set_devstate = true,
++};
++
++static struct quirk_entry quirk_asus_ga502i = {
++ .wmi_backlight_power = true,
++ .wmi_backlight_set_devstate = true,
++};
++
+ static int dmi_matched(const struct dmi_system_id *dmi)
+ {
+ pr_info("Identified laptop model '%s'\n", dmi->ident);
+@@ -411,6 +421,78 @@ static const struct dmi_system_id asus_quirks[] = {
+ },
+ .driver_data = &quirk_asus_forceals,
+ },
++ {
++ .callback = dmi_matched,
++ .ident = "ASUSTeK COMPUTER INC. GA401IH",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "GA401IH"),
++ },
++ .driver_data = &quirk_asus_ga401i,
++ },
++ {
++ .callback = dmi_matched,
++ .ident = "ASUSTeK COMPUTER INC. GA401II",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "GA401II"),
++ },
++ .driver_data = &quirk_asus_ga401i,
++ },
++ {
++ .callback = dmi_matched,
++ .ident = "ASUSTeK COMPUTER INC. GA401IU",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "GA401IU"),
++ },
++ .driver_data = &quirk_asus_ga401i,
++ },
++ {
++ .callback = dmi_matched,
++ .ident = "ASUSTeK COMPUTER INC. GA401IV",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "GA401IV"),
++ },
++ .driver_data = &quirk_asus_ga401i,
++ },
++ {
++ .callback = dmi_matched,
++ .ident = "ASUSTeK COMPUTER INC. GA401IVC",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "GA401IVC"),
++ },
++ .driver_data = &quirk_asus_ga401i,
++ },
++ {
++ .callback = dmi_matched,
++ .ident = "ASUSTeK COMPUTER INC. GA502II",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "GA502II"),
++ },
++ .driver_data = &quirk_asus_ga502i,
++ },
++ {
++ .callback = dmi_matched,
++ .ident = "ASUSTeK COMPUTER INC. GA502IU",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "GA502IU"),
++ },
++ .driver_data = &quirk_asus_ga502i,
++ },
++ {
++ .callback = dmi_matched,
++ .ident = "ASUSTeK COMPUTER INC. GA502IV",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "GA502IV"),
++ },
++ .driver_data = &quirk_asus_ga502i,
++ },
+ {},
+ };
+
+diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
+index 7a506c1d0113..ad1399dcb21f 100644
+--- a/drivers/platform/x86/intel-hid.c
++++ b/drivers/platform/x86/intel-hid.c
+@@ -570,7 +570,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv)
+ return AE_OK;
+
+ if (acpi_match_device_ids(dev, ids) == 0)
+- if (acpi_create_platform_device(dev, NULL))
++ if (!IS_ERR_OR_NULL(acpi_create_platform_device(dev, NULL)))
+ dev_info(&dev->dev,
+ "intel-hid: created platform device\n");
+
+diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
+index cb2a80fdd8f4..3393ee95077f 100644
+--- a/drivers/platform/x86/intel-vbtn.c
++++ b/drivers/platform/x86/intel-vbtn.c
+@@ -286,7 +286,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv)
+ return AE_OK;
+
+ if (acpi_match_device_ids(dev, ids) == 0)
+- if (acpi_create_platform_device(dev, NULL))
++ if (!IS_ERR_OR_NULL(acpi_create_platform_device(dev, NULL)))
+ dev_info(&dev->dev,
+ "intel-vbtn: created platform device\n");
+
+diff --git a/drivers/power/supply/88pm860x_battery.c b/drivers/power/supply/88pm860x_battery.c
+index 5ca047b3f58f..23e7d6447ae9 100644
+--- a/drivers/power/supply/88pm860x_battery.c
++++ b/drivers/power/supply/88pm860x_battery.c
+@@ -433,7 +433,7 @@ static void pm860x_init_battery(struct pm860x_battery_info *info)
+ int ret;
+ int data;
+ int bat_remove;
+- int soc;
++ int soc = 0;
+
+ /* measure enable on GPADC1 */
+ data = MEAS1_GP1;
+@@ -496,7 +496,9 @@ static void pm860x_init_battery(struct pm860x_battery_info *info)
+ }
+ mutex_unlock(&info->lock);
+
+- calc_soc(info, OCV_MODE_ACTIVE, &soc);
++ ret = calc_soc(info, OCV_MODE_ACTIVE, &soc);
++ if (ret < 0)
++ goto out;
+
+ data = pm860x_reg_read(info->i2c, PM8607_POWER_UP_LOG);
+ bat_remove = data & BAT_WU_LOG;
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 0011bdc15afb..a17aebe0aa7a 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -4994,7 +4994,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
+ struct regulator_dev *rdev;
+ bool dangling_cfg_gpiod = false;
+ bool dangling_of_gpiod = false;
+- bool reg_device_fail = false;
+ struct device *dev;
+ int ret, i;
+
+@@ -5123,10 +5122,12 @@ regulator_register(const struct regulator_desc *regulator_desc,
+ }
+
+ /* register with sysfs */
++ device_initialize(&rdev->dev);
+ rdev->dev.class = ®ulator_class;
+ rdev->dev.parent = dev;
+ dev_set_name(&rdev->dev, "regulator.%lu",
+ (unsigned long) atomic_inc_return(®ulator_no));
++ dev_set_drvdata(&rdev->dev, rdev);
+
+ /* set regulator constraints */
+ if (init_data)
+@@ -5177,12 +5178,9 @@ regulator_register(const struct regulator_desc *regulator_desc,
+ !rdev->desc->fixed_uV)
+ rdev->is_switch = true;
+
+- dev_set_drvdata(&rdev->dev, rdev);
+- ret = device_register(&rdev->dev);
+- if (ret != 0) {
+- reg_device_fail = true;
++ ret = device_add(&rdev->dev);
++ if (ret != 0)
+ goto unset_supplies;
+- }
+
+ rdev_init_debugfs(rdev);
+
+@@ -5204,17 +5202,15 @@ unset_supplies:
+ mutex_unlock(®ulator_list_mutex);
+ wash:
+ kfree(rdev->coupling_desc.coupled_rdevs);
+- kfree(rdev->constraints);
+ mutex_lock(®ulator_list_mutex);
+ regulator_ena_gpio_free(rdev);
+ mutex_unlock(®ulator_list_mutex);
++ put_device(&rdev->dev);
++ rdev = NULL;
+ clean:
+ if (dangling_of_gpiod)
+ gpiod_put(config->ena_gpiod);
+- if (reg_device_fail)
+- put_device(&rdev->dev);
+- else
+- kfree(rdev);
++ kfree(rdev);
+ kfree(config);
+ rinse:
+ if (dangling_cfg_gpiod)
+diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
+index 8d4971645cf1..f7ae03fd36cb 100644
+--- a/drivers/s390/block/dasd_diag.c
++++ b/drivers/s390/block/dasd_diag.c
+@@ -319,7 +319,7 @@ dasd_diag_check_device(struct dasd_device *device)
+ struct dasd_diag_characteristics *rdc_data;
+ struct vtoc_cms_label *label;
+ struct dasd_block *block;
+- struct dasd_diag_bio bio;
++ struct dasd_diag_bio *bio;
+ unsigned int sb, bsize;
+ blocknum_t end_block;
+ int rc;
+@@ -395,29 +395,36 @@ dasd_diag_check_device(struct dasd_device *device)
+ rc = -ENOMEM;
+ goto out;
+ }
++ bio = kzalloc(sizeof(*bio), GFP_KERNEL);
++ if (bio == NULL) {
++ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
++ "No memory to allocate initialization bio");
++ rc = -ENOMEM;
++ goto out_label;
++ }
+ rc = 0;
+ end_block = 0;
+ /* try all sizes - needed for ECKD devices */
+ for (bsize = 512; bsize <= PAGE_SIZE; bsize <<= 1) {
+ mdsk_init_io(device, bsize, 0, &end_block);
+- memset(&bio, 0, sizeof (struct dasd_diag_bio));
+- bio.type = MDSK_READ_REQ;
+- bio.block_number = private->pt_block + 1;
+- bio.buffer = label;
++ memset(bio, 0, sizeof(*bio));
++ bio->type = MDSK_READ_REQ;
++ bio->block_number = private->pt_block + 1;
++ bio->buffer = label;
+ memset(&private->iob, 0, sizeof (struct dasd_diag_rw_io));
+ private->iob.dev_nr = rdc_data->dev_nr;
+ private->iob.key = 0;
+ private->iob.flags = 0; /* do synchronous io */
+ private->iob.block_count = 1;
+ private->iob.interrupt_params = 0;
+- private->iob.bio_list = &bio;
++ private->iob.bio_list = bio;
+ private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
+ rc = dia250(&private->iob, RW_BIO);
+ if (rc == 3) {
+ pr_warn("%s: A 64-bit DIAG call failed\n",
+ dev_name(&device->cdev->dev));
+ rc = -EOPNOTSUPP;
+- goto out_label;
++ goto out_bio;
+ }
+ mdsk_term_io(device);
+ if (rc == 0)
+@@ -427,7 +434,7 @@ dasd_diag_check_device(struct dasd_device *device)
+ pr_warn("%s: Accessing the DASD failed because of an incorrect format (rc=%d)\n",
+ dev_name(&device->cdev->dev), rc);
+ rc = -EIO;
+- goto out_label;
++ goto out_bio;
+ }
+ /* check for label block */
+ if (memcmp(label->label_id, DASD_DIAG_CMS1,
+@@ -457,6 +464,8 @@ dasd_diag_check_device(struct dasd_device *device)
+ (rc == 4) ? ", read-only device" : "");
+ rc = 0;
+ }
++out_bio:
++ kfree(bio);
+ out_label:
+ free_page((long) label);
+ out:
+diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
+index 92bace3b28fd..4ce28aa490cd 100644
+--- a/drivers/s390/net/qeth_l2_main.c
++++ b/drivers/s390/net/qeth_l2_main.c
+@@ -1199,6 +1199,10 @@ static void qeth_bridge_state_change(struct qeth_card *card,
+ int extrasize;
+
+ QETH_CARD_TEXT(card, 2, "brstchng");
++ if (qports->num_entries == 0) {
++ QETH_CARD_TEXT(card, 2, "BPempty");
++ return;
++ }
+ if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
+ QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length);
+ return;
+diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
+index a1f3e9ee4e63..14e1d001253c 100644
+--- a/drivers/scsi/arm/cumana_2.c
++++ b/drivers/scsi/arm/cumana_2.c
+@@ -450,7 +450,7 @@ static int cumanascsi2_probe(struct expansion_card *ec,
+
+ if (info->info.scsi.dma != NO_DMA)
+ free_dma(info->info.scsi.dma);
+- free_irq(ec->irq, host);
++ free_irq(ec->irq, info);
+
+ out_release:
+ fas216_release(host);
+diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
+index 134f040d58e2..f441ec8eb93d 100644
+--- a/drivers/scsi/arm/eesox.c
++++ b/drivers/scsi/arm/eesox.c
+@@ -571,7 +571,7 @@ static int eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
+
+ if (info->info.scsi.dma != NO_DMA)
+ free_dma(info->info.scsi.dma);
+- free_irq(ec->irq, host);
++ free_irq(ec->irq, info);
+
+ out_remove:
+ fas216_remove(host);
+diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
+index c795537a671c..2dc0df005cb3 100644
+--- a/drivers/scsi/arm/powertec.c
++++ b/drivers/scsi/arm/powertec.c
+@@ -378,7 +378,7 @@ static int powertecscsi_probe(struct expansion_card *ec,
+
+ if (info->info.scsi.dma != NO_DMA)
+ free_dma(info->info.scsi.dma);
+- free_irq(ec->irq, host);
++ free_irq(ec->irq, info);
+
+ out_release:
+ fas216_release(host);
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 0cbe6740e0c9..2c2966a297c7 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -5586,9 +5586,13 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
+ &instance->irq_context[i])) {
+ dev_err(&instance->pdev->dev,
+ "Failed to register IRQ for vector %d.\n", i);
+- for (j = 0; j < i; j++)
++ for (j = 0; j < i; j++) {
++ if (j < instance->low_latency_index_start)
++ irq_set_affinity_hint(
++ pci_irq_vector(pdev, j), NULL);
+ free_irq(pci_irq_vector(pdev, j),
+ &instance->irq_context[j]);
++ }
+ /* Retry irq register for IO_APIC*/
+ instance->msix_vectors = 0;
+ instance->msix_load_balance = false;
+@@ -5626,6 +5630,9 @@ megasas_destroy_irqs(struct megasas_instance *instance) {
+
+ if (instance->msix_vectors)
+ for (i = 0; i < instance->msix_vectors; i++) {
++ if (i < instance->low_latency_index_start)
++ irq_set_affinity_hint(
++ pci_irq_vector(instance->pdev, i), NULL);
+ free_irq(pci_irq_vector(instance->pdev, i),
+ &instance->irq_context[i]);
+ }
+diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
+index 74fb50644678..4dd50db90677 100644
+--- a/drivers/scsi/mesh.c
++++ b/drivers/scsi/mesh.c
+@@ -1045,6 +1045,8 @@ static void handle_error(struct mesh_state *ms)
+ while ((in_8(&mr->bus_status1) & BS1_RST) != 0)
+ udelay(1);
+ printk("done\n");
++ if (ms->dma_started)
++ halt_dma(ms);
+ handle_reset(ms);
+ /* request_q is empty, no point in mesh_start() */
+ return;
+@@ -1357,7 +1359,8 @@ static void halt_dma(struct mesh_state *ms)
+ ms->conn_tgt, ms->data_ptr, scsi_bufflen(cmd),
+ ms->tgts[ms->conn_tgt].data_goes_out);
+ }
+- scsi_dma_unmap(cmd);
++ if (cmd)
++ scsi_dma_unmap(cmd);
+ ms->dma_started = 0;
+ }
+
+@@ -1712,6 +1715,9 @@ static int mesh_host_reset(struct scsi_cmnd *cmd)
+
+ spin_lock_irqsave(ms->host->host_lock, flags);
+
++ if (ms->dma_started)
++ halt_dma(ms);
++
+ /* Reset the controller & dbdma channel */
+ out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */
+ out_8(&mr->exception, 0xff); /* clear all exception bits */
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
+index 32965ec76965..44181a2cbf18 100644
+--- a/drivers/scsi/scsi_debug.c
++++ b/drivers/scsi/scsi_debug.c
+@@ -5296,6 +5296,12 @@ static int __init scsi_debug_init(void)
+ pr_err("submit_queues must be 1 or more\n");
+ return -EINVAL;
+ }
++
++ if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
++ pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
++ return -EINVAL;
++ }
++
+ sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
+ GFP_KERNEL);
+ if (sdebug_q_arr == NULL)
+diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
+index 0ba1f465db12..8924fcd9f5f5 100644
+--- a/drivers/soc/qcom/rpmh-rsc.c
++++ b/drivers/soc/qcom/rpmh-rsc.c
+@@ -715,6 +715,7 @@ static struct platform_driver rpmh_driver = {
+ .driver = {
+ .name = "rpmh",
+ .of_match_table = rpmh_drv_match,
++ .suppress_bind_attrs = true,
+ },
+ };
+
+diff --git a/drivers/spi/spi-lantiq-ssc.c b/drivers/spi/spi-lantiq-ssc.c
+index 9dfe8b04e688..f9bc1705c0d4 100644
+--- a/drivers/spi/spi-lantiq-ssc.c
++++ b/drivers/spi/spi-lantiq-ssc.c
+@@ -184,6 +184,7 @@ struct lantiq_ssc_spi {
+ unsigned int tx_fifo_size;
+ unsigned int rx_fifo_size;
+ unsigned int base_cs;
++ unsigned int fdx_tx_level;
+ };
+
+ static u32 lantiq_ssc_readl(const struct lantiq_ssc_spi *spi, u32 reg)
+@@ -481,6 +482,7 @@ static void tx_fifo_write(struct lantiq_ssc_spi *spi)
+ u32 data;
+ unsigned int tx_free = tx_fifo_free(spi);
+
++ spi->fdx_tx_level = 0;
+ while (spi->tx_todo && tx_free) {
+ switch (spi->bits_per_word) {
+ case 2 ... 8:
+@@ -509,6 +511,7 @@ static void tx_fifo_write(struct lantiq_ssc_spi *spi)
+
+ lantiq_ssc_writel(spi, data, LTQ_SPI_TB);
+ tx_free--;
++ spi->fdx_tx_level++;
+ }
+ }
+
+@@ -520,6 +523,13 @@ static void rx_fifo_read_full_duplex(struct lantiq_ssc_spi *spi)
+ u32 data;
+ unsigned int rx_fill = rx_fifo_level(spi);
+
++ /*
++ * Wait until all expected data to be shifted in.
++ * Otherwise, rx overrun may occur.
++ */
++ while (rx_fill != spi->fdx_tx_level)
++ rx_fill = rx_fifo_level(spi);
++
+ while (rx_fill) {
+ data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
+
+@@ -907,7 +917,7 @@ static int lantiq_ssc_probe(struct platform_device *pdev)
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 8) |
+ SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
+
+- spi->wq = alloc_ordered_workqueue(dev_name(dev), 0);
++ spi->wq = alloc_ordered_workqueue(dev_name(dev), WQ_MEM_RECLAIM);
+ if (!spi->wq) {
+ err = -ENOMEM;
+ goto err_clk_put;
+diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
+index 2cc6d9951b52..008b64f4e031 100644
+--- a/drivers/spi/spi-rockchip.c
++++ b/drivers/spi/spi-rockchip.c
+@@ -286,7 +286,7 @@ static void rockchip_spi_pio_writer(struct rockchip_spi *rs)
+ static void rockchip_spi_pio_reader(struct rockchip_spi *rs)
+ {
+ u32 words = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
+- u32 rx_left = rs->rx_left - words;
++ u32 rx_left = (rs->rx_left > words) ? rs->rx_left - words : 0;
+
+ /* the hardware doesn't allow us to change fifo threshold
+ * level while spi is enabled, so instead make sure to leave
+diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
+index ac6bf1fbbfe6..be503a0e6ef7 100644
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -223,6 +223,11 @@ static int spidev_message(struct spidev_data *spidev,
+ for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
+ n;
+ n--, k_tmp++, u_tmp++) {
++ /* Ensure that also following allocations from rx_buf/tx_buf will meet
++ * DMA alignment requirements.
++ */
++ unsigned int len_aligned = ALIGN(u_tmp->len, ARCH_KMALLOC_MINALIGN);
++
+ k_tmp->len = u_tmp->len;
+
+ total += k_tmp->len;
+@@ -238,17 +243,17 @@ static int spidev_message(struct spidev_data *spidev,
+
+ if (u_tmp->rx_buf) {
+ /* this transfer needs space in RX bounce buffer */
+- rx_total += k_tmp->len;
++ rx_total += len_aligned;
+ if (rx_total > bufsiz) {
+ status = -EMSGSIZE;
+ goto done;
+ }
+ k_tmp->rx_buf = rx_buf;
+- rx_buf += k_tmp->len;
++ rx_buf += len_aligned;
+ }
+ if (u_tmp->tx_buf) {
+ /* this transfer needs space in TX bounce buffer */
+- tx_total += k_tmp->len;
++ tx_total += len_aligned;
+ if (tx_total > bufsiz) {
+ status = -EMSGSIZE;
+ goto done;
+@@ -258,7 +263,7 @@ static int spidev_message(struct spidev_data *spidev,
+ (uintptr_t) u_tmp->tx_buf,
+ u_tmp->len))
+ goto done;
+- tx_buf += k_tmp->len;
++ tx_buf += len_aligned;
+ }
+
+ k_tmp->cs_change = !!u_tmp->cs_change;
+@@ -290,16 +295,16 @@ static int spidev_message(struct spidev_data *spidev,
+ goto done;
+
+ /* copy any rx data out of bounce buffer */
+- rx_buf = spidev->rx_buffer;
+- for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
++ for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
++ n;
++ n--, k_tmp++, u_tmp++) {
+ if (u_tmp->rx_buf) {
+ if (copy_to_user((u8 __user *)
+- (uintptr_t) u_tmp->rx_buf, rx_buf,
++ (uintptr_t) u_tmp->rx_buf, k_tmp->rx_buf,
+ u_tmp->len)) {
+ status = -EFAULT;
+ goto done;
+ }
+- rx_buf += u_tmp->len;
+ }
+ }
+ status = total;
+diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
+index 511136dce3a4..ddc09616248a 100644
+--- a/drivers/staging/rtl8192u/r8192U_core.c
++++ b/drivers/staging/rtl8192u/r8192U_core.c
+@@ -2401,7 +2401,7 @@ static int rtl8192_read_eeprom_info(struct net_device *dev)
+ ret = eprom_read(dev, (EEPROM_TX_PW_INDEX_CCK >> 1));
+ if (ret < 0)
+ return ret;
+- priv->EEPROMTxPowerLevelCCK = ((u16)ret & 0xff) >> 8;
++ priv->EEPROMTxPowerLevelCCK = ((u16)ret & 0xff00) >> 8;
+ } else
+ priv->EEPROMTxPowerLevelCCK = 0x10;
+ RT_TRACE(COMP_EPROM, "CCK Tx Power Levl: 0x%02x\n", priv->EEPROMTxPowerLevelCCK);
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+index af6bf0736b52..eb76cc2cbfd8 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+@@ -3257,6 +3257,7 @@ failed_platform_init:
+
+ static int vchiq_remove(struct platform_device *pdev)
+ {
++ platform_device_unregister(bcm2835_audio);
+ platform_device_unregister(bcm2835_camera);
+ vchiq_debugfs_deinit();
+ device_destroy(vchiq_class, vchiq_devid);
+diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+index 85776db4bf34..2ce4b19f312a 100644
+--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
++++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+@@ -169,7 +169,7 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id,
+
+ data = ti_bandgap_get_sensor_data(bgp, id);
+
+- if (!IS_ERR_OR_NULL(data))
++ if (IS_ERR_OR_NULL(data))
+ data = ti_thermal_build_data(bgp, id);
+
+ if (!data)
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index e0b77674869c..c96c50faccf7 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -25,17 +25,23 @@ static unsigned int quirk_count;
+
+ static char quirks_param[128];
+
+-static int quirks_param_set(const char *val, const struct kernel_param *kp)
++static int quirks_param_set(const char *value, const struct kernel_param *kp)
+ {
+- char *p, *field;
++ char *val, *p, *field;
+ u16 vid, pid;
+ u32 flags;
+ size_t i;
+ int err;
+
++ val = kstrdup(value, GFP_KERNEL);
++ if (!val)
++ return -ENOMEM;
++
+ err = param_set_copystring(val, kp);
+- if (err)
++ if (err) {
++ kfree(val);
+ return err;
++ }
+
+ mutex_lock(&quirk_mutex);
+
+@@ -60,10 +66,11 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp)
+ if (!quirk_list) {
+ quirk_count = 0;
+ mutex_unlock(&quirk_mutex);
++ kfree(val);
+ return -ENOMEM;
+ }
+
+- for (i = 0, p = (char *)val; p && *p;) {
++ for (i = 0, p = val; p && *p;) {
+ /* Each entry consists of VID:PID:flags */
+ field = strsep(&p, ":");
+ if (!field)
+@@ -144,6 +151,7 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp)
+
+ unlock:
+ mutex_unlock(&quirk_mutex);
++ kfree(val);
+
+ return 0;
+ }
+diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
+index 4e14c4f7fed7..34bb6124f1e2 100644
+--- a/drivers/usb/dwc2/platform.c
++++ b/drivers/usb/dwc2/platform.c
+@@ -514,6 +514,7 @@ static int dwc2_driver_probe(struct platform_device *dev)
+ if (hsotg->gadget_enabled) {
+ retval = usb_add_gadget_udc(hsotg->dev, &hsotg->gadget);
+ if (retval) {
++ hsotg->gadget.udc = NULL;
+ dwc2_hsotg_remove(hsotg);
+ goto error;
+ }
+@@ -522,7 +523,8 @@ static int dwc2_driver_probe(struct platform_device *dev)
+ return 0;
+
+ error:
+- dwc2_lowlevel_hw_disable(hsotg);
++ if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL)
++ dwc2_lowlevel_hw_disable(hsotg);
+ return retval;
+ }
+
+diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
+index db2d4980cb35..3633df6d7610 100644
+--- a/drivers/usb/gadget/function/f_uac2.c
++++ b/drivers/usb/gadget/function/f_uac2.c
+@@ -215,10 +215,7 @@ static struct uac2_ac_header_descriptor ac_hdr_desc = {
+ .bDescriptorSubtype = UAC_MS_HEADER,
+ .bcdADC = cpu_to_le16(0x200),
+ .bCategory = UAC2_FUNCTION_IO_BOX,
+- .wTotalLength = cpu_to_le16(sizeof in_clk_src_desc
+- + sizeof out_clk_src_desc + sizeof usb_out_it_desc
+- + sizeof io_in_it_desc + sizeof usb_in_ot_desc
+- + sizeof io_out_ot_desc),
++ /* .wTotalLength = DYNAMIC */
+ .bmControls = 0,
+ };
+
+@@ -501,7 +498,7 @@ static void setup_descriptor(struct f_uac2_opts *opts)
+ as_in_hdr_desc.bTerminalLink = usb_in_ot_desc.bTerminalID;
+
+ iad_desc.bInterfaceCount = 1;
+- ac_hdr_desc.wTotalLength = 0;
++ ac_hdr_desc.wTotalLength = cpu_to_le16(sizeof(ac_hdr_desc));
+
+ if (EPIN_EN(opts)) {
+ u16 len = le16_to_cpu(ac_hdr_desc.wTotalLength);
+diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
+index cc4a16e253ac..3d33499db50b 100644
+--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
++++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
+@@ -282,6 +282,7 @@ static void bdc_mem_init(struct bdc *bdc, bool reinit)
+ * in that case reinit is passed as 1
+ */
+ if (reinit) {
++ int i;
+ /* Enable interrupts */
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ temp |= BDC_GIE;
+@@ -291,6 +292,9 @@ static void bdc_mem_init(struct bdc *bdc, bool reinit)
+ /* Initialize SRR to 0 */
+ memset(bdc->srr.sr_bds, 0,
+ NUM_SR_ENTRIES * sizeof(struct bdc_bd));
++ /* clear ep flags to avoid post disconnect stops/deconfigs */
++ for (i = 1; i < bdc->num_eps; ++i)
++ bdc->bdc_ep_array[i]->flags = 0;
+ } else {
+ /* One time initiaization only */
+ /* Enable status report function pointers */
+@@ -601,9 +605,14 @@ static int bdc_remove(struct platform_device *pdev)
+ static int bdc_suspend(struct device *dev)
+ {
+ struct bdc *bdc = dev_get_drvdata(dev);
++ int ret;
+
+- clk_disable_unprepare(bdc->clk);
+- return 0;
++ /* Halt the controller */
++ ret = bdc_stop(bdc);
++ if (!ret)
++ clk_disable_unprepare(bdc->clk);
++
++ return ret;
+ }
+
+ static int bdc_resume(struct device *dev)
+diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
+index d49c6dc1082d..9ddc0b4e92c9 100644
+--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
++++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
+@@ -615,7 +615,6 @@ int bdc_ep_enable(struct bdc_ep *ep)
+ }
+ bdc_dbg_bd_list(bdc, ep);
+ /* only for ep0: config ep is called for ep0 from connect event */
+- ep->flags |= BDC_EP_ENABLED;
+ if (ep->ep_num == 1)
+ return ret;
+
+@@ -759,10 +758,13 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
+ __func__, ep->name, start_bdi, end_bdi);
+ dev_dbg(bdc->dev, "ep_dequeue ep=%p ep->desc=%p\n",
+ ep, (void *)ep->usb_ep.desc);
+- /* Stop the ep to see where the HW is ? */
+- ret = bdc_stop_ep(bdc, ep->ep_num);
+- /* if there is an issue with stopping ep, then no need to go further */
+- if (ret)
++ /* if still connected, stop the ep to see where the HW is ? */
++ if (!(bdc_readl(bdc->regs, BDC_USPC) & BDC_PST_MASK)) {
++ ret = bdc_stop_ep(bdc, ep->ep_num);
++ /* if there is an issue, then no need to go further */
++ if (ret)
++ return 0;
++ } else
+ return 0;
+
+ /*
+@@ -1911,7 +1913,9 @@ static int bdc_gadget_ep_disable(struct usb_ep *_ep)
+ __func__, ep->name, ep->flags);
+
+ if (!(ep->flags & BDC_EP_ENABLED)) {
+- dev_warn(bdc->dev, "%s is already disabled\n", ep->name);
++ if (bdc->gadget.speed != USB_SPEED_UNKNOWN)
++ dev_warn(bdc->dev, "%s is already disabled\n",
++ ep->name);
+ return 0;
+ }
+ spin_lock_irqsave(&bdc->lock, flags);
+diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
+index 51efee21915f..7c616d7641c6 100644
+--- a/drivers/usb/gadget/udc/net2280.c
++++ b/drivers/usb/gadget/udc/net2280.c
+@@ -3782,8 +3782,10 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ return 0;
+
+ done:
+- if (dev)
++ if (dev) {
+ net2280_remove(pdev);
++ kfree(dev);
++ }
+ return retval;
+ }
+
+diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
+index 9dd02160cca9..e3780d4d6514 100644
+--- a/drivers/usb/mtu3/mtu3_core.c
++++ b/drivers/usb/mtu3/mtu3_core.c
+@@ -131,8 +131,12 @@ static void mtu3_device_disable(struct mtu3 *mtu)
+ mtu3_setbits(ibase, SSUSB_U2_CTRL(0),
+ SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN);
+
+- if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG)
++ if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) {
+ mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
++ if (mtu->is_u3_ip)
++ mtu3_clrbits(ibase, SSUSB_U3_CTRL(0),
++ SSUSB_U3_PORT_DUAL_MODE);
++ }
+
+ mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
+ }
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index f5143eedbc48..a90801ef0055 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -272,6 +272,8 @@ static struct usb_serial_driver cp210x_device = {
+ .break_ctl = cp210x_break_ctl,
+ .set_termios = cp210x_set_termios,
+ .tx_empty = cp210x_tx_empty,
++ .throttle = usb_serial_generic_throttle,
++ .unthrottle = usb_serial_generic_unthrottle,
+ .tiocmget = cp210x_tiocmget,
+ .tiocmset = cp210x_tiocmset,
+ .attach = cp210x_attach,
+@@ -915,6 +917,7 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
+ u32 baud;
+ u16 bits;
+ u32 ctl_hs;
++ u32 flow_repl;
+
+ cp210x_read_u32_reg(port, CP210X_GET_BAUDRATE, &baud);
+
+@@ -1015,6 +1018,22 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
+ ctl_hs = le32_to_cpu(flow_ctl.ulControlHandshake);
+ if (ctl_hs & CP210X_SERIAL_CTS_HANDSHAKE) {
+ dev_dbg(dev, "%s - flow control = CRTSCTS\n", __func__);
++ /*
++ * When the port is closed, the CP210x hardware disables
++ * auto-RTS and RTS is deasserted but it leaves auto-CTS when
++ * in hardware flow control mode. When re-opening the port, if
++ * auto-CTS is enabled on the cp210x, then auto-RTS must be
++ * re-enabled in the driver.
++ */
++ flow_repl = le32_to_cpu(flow_ctl.ulFlowReplace);
++ flow_repl &= ~CP210X_SERIAL_RTS_MASK;
++ flow_repl |= CP210X_SERIAL_RTS_SHIFT(CP210X_SERIAL_RTS_FLOW_CTL);
++ flow_ctl.ulFlowReplace = cpu_to_le32(flow_repl);
++ cp210x_write_reg_block(port,
++ CP210X_SET_FLOW,
++ &flow_ctl,
++ sizeof(flow_ctl));
++
+ cflag |= CRTSCTS;
+ } else {
+ dev_dbg(dev, "%s - flow control = NONE\n", __func__);
+diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
+index b8dfeb4fb2ed..ffbb2a8901b2 100644
+--- a/drivers/usb/serial/iuu_phoenix.c
++++ b/drivers/usb/serial/iuu_phoenix.c
+@@ -353,10 +353,11 @@ static void iuu_led_activity_on(struct urb *urb)
+ struct usb_serial_port *port = urb->context;
+ int result;
+ char *buf_ptr = port->write_urb->transfer_buffer;
+- *buf_ptr++ = IUU_SET_LED;
++
+ if (xmas) {
+- get_random_bytes(buf_ptr, 6);
+- *(buf_ptr+7) = 1;
++ buf_ptr[0] = IUU_SET_LED;
++ get_random_bytes(buf_ptr + 1, 6);
++ buf_ptr[7] = 1;
+ } else {
+ iuu_rgbf_fill_buffer(buf_ptr, 255, 255, 0, 0, 0, 0, 255);
+ }
+@@ -374,13 +375,14 @@ static void iuu_led_activity_off(struct urb *urb)
+ struct usb_serial_port *port = urb->context;
+ int result;
+ char *buf_ptr = port->write_urb->transfer_buffer;
++
+ if (xmas) {
+ iuu_rxcmd(urb);
+ return;
+- } else {
+- *buf_ptr++ = IUU_SET_LED;
+- iuu_rgbf_fill_buffer(buf_ptr, 0, 0, 255, 255, 0, 0, 255);
+ }
++
++ iuu_rgbf_fill_buffer(buf_ptr, 0, 0, 255, 255, 0, 0, 255);
++
+ usb_fill_bulk_urb(port->write_urb, port->serial->dev,
+ usb_sndbulkpipe(port->serial->dev,
+ port->bulk_out_endpointAddress),
+diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c
+index 00dddf6e08b0..2d2ee17052e8 100644
+--- a/drivers/video/console/newport_con.c
++++ b/drivers/video/console/newport_con.c
+@@ -32,6 +32,8 @@
+ #include <linux/linux_logo.h>
+ #include <linux/font.h>
+
++#define NEWPORT_LEN 0x10000
++
+ #define FONT_DATA ((unsigned char *)font_vga_8x16.data)
+
+ /* borrowed from fbcon.c */
+@@ -43,6 +45,7 @@
+ static unsigned char *font_data[MAX_NR_CONSOLES];
+
+ static struct newport_regs *npregs;
++static unsigned long newport_addr;
+
+ static int logo_active;
+ static int topscan;
+@@ -702,7 +705,6 @@ const struct consw newport_con = {
+ static int newport_probe(struct gio_device *dev,
+ const struct gio_device_id *id)
+ {
+- unsigned long newport_addr;
+ int err;
+
+ if (!dev->resource.start)
+@@ -712,7 +714,7 @@ static int newport_probe(struct gio_device *dev,
+ return -EBUSY; /* we only support one Newport as console */
+
+ newport_addr = dev->resource.start + 0xF0000;
+- if (!request_mem_region(newport_addr, 0x10000, "Newport"))
++ if (!request_mem_region(newport_addr, NEWPORT_LEN, "Newport"))
+ return -ENODEV;
+
+ npregs = (struct newport_regs *)/* ioremap cannot fail */
+@@ -720,6 +722,11 @@ static int newport_probe(struct gio_device *dev,
+ console_lock();
+ err = do_take_over_console(&newport_con, 0, MAX_NR_CONSOLES - 1, 1);
+ console_unlock();
++
++ if (err) {
++ iounmap((void *)npregs);
++ release_mem_region(newport_addr, NEWPORT_LEN);
++ }
+ return err;
+ }
+
+@@ -727,6 +734,7 @@ static void newport_remove(struct gio_device *dev)
+ {
+ give_up_console(&newport_con);
+ iounmap((void *)npregs);
++ release_mem_region(newport_addr, NEWPORT_LEN);
+ }
+
+ static struct gio_device_id newport_ids[] = {
+diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c
+index b770946a0920..76464000933d 100644
+--- a/drivers/video/fbdev/neofb.c
++++ b/drivers/video/fbdev/neofb.c
+@@ -1820,6 +1820,7 @@ static int neo_scan_monitor(struct fb_info *info)
+ #else
+ printk(KERN_ERR
+ "neofb: Only 640x480, 800x600/480 and 1024x768 panels are currently supported\n");
++ kfree(info->monspecs.modedb);
+ return -1;
+ #endif
+ default:
+diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
+index f70c9f79622e..27635926cea3 100644
+--- a/drivers/video/fbdev/pxafb.c
++++ b/drivers/video/fbdev/pxafb.c
+@@ -2425,8 +2425,8 @@ static int pxafb_remove(struct platform_device *dev)
+
+ free_pages_exact(fbi->video_mem, fbi->video_mem_size);
+
+- dma_free_wc(&dev->dev, fbi->dma_buff_size, fbi->dma_buff,
+- fbi->dma_buff_phys);
++ dma_free_coherent(&dev->dev, fbi->dma_buff_size, fbi->dma_buff,
++ fbi->dma_buff_phys);
+
+ return 0;
+ }
+diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
+index 512789f5f884..d5d22d9c0f56 100644
+--- a/drivers/video/fbdev/savage/savagefb_driver.c
++++ b/drivers/video/fbdev/savage/savagefb_driver.c
+@@ -2158,6 +2158,8 @@ static int savage_init_fb_info(struct fb_info *info, struct pci_dev *dev,
+ info->flags |= FBINFO_HWACCEL_COPYAREA |
+ FBINFO_HWACCEL_FILLRECT |
+ FBINFO_HWACCEL_IMAGEBLIT;
++ else
++ kfree(info->pixmap.addr);
+ }
+ #endif
+ return err;
+diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
+index 207d0add684b..246681414577 100644
+--- a/drivers/video/fbdev/sm712fb.c
++++ b/drivers/video/fbdev/sm712fb.c
+@@ -1429,6 +1429,8 @@ static int smtc_map_smem(struct smtcfb_info *sfb,
+ static void smtc_unmap_smem(struct smtcfb_info *sfb)
+ {
+ if (sfb && sfb->fb->screen_base) {
++ if (sfb->chip_id == 0x720)
++ sfb->fb->screen_base -= 0x00200000;
+ iounmap(sfb->fb->screen_base);
+ sfb->fb->screen_base = NULL;
+ }
+diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
+index bed90d612e48..ebb05517b6aa 100644
+--- a/drivers/xen/balloon.c
++++ b/drivers/xen/balloon.c
+@@ -570,11 +570,13 @@ static int add_ballooned_pages(int nr_pages)
+ if (xen_hotplug_unpopulated) {
+ st = reserve_additional_memory();
+ if (st != BP_ECANCELED) {
++ int rc;
++
+ mutex_unlock(&balloon_mutex);
+- wait_event(balloon_wq,
++ rc = wait_event_interruptible(balloon_wq,
+ !list_empty(&ballooned_pages));
+ mutex_lock(&balloon_mutex);
+- return 0;
++ return rc ? -ENOMEM : 0;
+ }
+ }
+
+@@ -632,6 +634,12 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages)
+ out_undo:
+ mutex_unlock(&balloon_mutex);
+ free_xenballooned_pages(pgno, pages);
++ /*
++ * NB: free_xenballooned_pages will only subtract pgno pages, but since
++ * target_unpopulated is incremented with nr_pages at the start we need
++ * to remove the remaining ones also, or accounting will be screwed.
++ */
++ balloon_stats.target_unpopulated -= nr_pages - pgno;
+ return ret;
+ }
+ EXPORT_SYMBOL(alloc_xenballooned_pages);
+diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
+index 2c4f324f8626..da799929087d 100644
+--- a/drivers/xen/gntdev-dmabuf.c
++++ b/drivers/xen/gntdev-dmabuf.c
+@@ -641,6 +641,14 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
+ goto fail_detach;
+ }
+
++ /* Check that we have zero offset. */
++ if (sgt->sgl->offset) {
++ ret = ERR_PTR(-EINVAL);
++ pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
++ sgt->sgl->offset);
++ goto fail_unmap;
++ }
++
+ /* Check number of pages that imported buffer has. */
+ if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
+ ret = ERR_PTR(-EINVAL);
+diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
+index 15a99f9c7253..39def020a074 100644
+--- a/fs/9p/v9fs.c
++++ b/fs/9p/v9fs.c
+@@ -500,10 +500,9 @@ void v9fs_session_close(struct v9fs_session_info *v9ses)
+ }
+
+ #ifdef CONFIG_9P_FSCACHE
+- if (v9ses->fscache) {
++ if (v9ses->fscache)
+ v9fs_cache_session_put_cookie(v9ses);
+- kfree(v9ses->cachetag);
+- }
++ kfree(v9ses->cachetag);
+ #endif
+ kfree(v9ses->uname);
+ kfree(v9ses->aname);
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 1a089a642422..99dcb3897659 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -4481,6 +4481,8 @@ int try_release_extent_mapping(struct page *page, gfp_t mask)
+
+ /* once for us */
+ free_extent_map(em);
++
++ cond_resched(); /* Allow large-extent preemption. */
+ }
+ }
+ return try_release_extent_state(tree, page, mask);
+diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
+index 7889a59a57fa..6f484f0d347e 100644
+--- a/fs/btrfs/space-info.c
++++ b/fs/btrfs/space-info.c
+@@ -304,8 +304,8 @@ again:
+ cache->key.objectid, cache->key.offset,
+ btrfs_block_group_used(&cache->item), cache->pinned,
+ cache->reserved, cache->ro ? "[readonly]" : "");
+- btrfs_dump_free_space(cache, bytes);
+ spin_unlock(&cache->lock);
++ btrfs_dump_free_space(cache, bytes);
+ }
+ if (++index < BTRFS_NR_RAID_TYPES)
+ goto again;
+diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
+index afb8340918b8..c689359ca532 100644
+--- a/fs/dlm/lockspace.c
++++ b/fs/dlm/lockspace.c
+@@ -632,6 +632,9 @@ static int new_lockspace(const char *name, const char *cluster,
+ wait_event(ls->ls_recover_lock_wait,
+ test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags));
+
++ /* let kobject handle freeing of ls if there's an error */
++ do_unreg = 1;
++
+ ls->ls_kobj.kset = dlm_kset;
+ error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL,
+ "%s", ls->ls_name);
+@@ -639,9 +642,6 @@ static int new_lockspace(const char *name, const char *cluster,
+ goto out_recoverd;
+ kobject_uevent(&ls->ls_kobj, KOBJ_ADD);
+
+- /* let kobject handle freeing of ls if there's an error */
+- do_unreg = 1;
+-
+ /* This uevent triggers dlm_controld in userspace to add us to the
+ group of nodes that are members of this lockspace (managed by the
+ cluster infrastructure.) Once it's done that, it tells us who the
+diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
+index 3350ab65d892..b36b414cd7a7 100644
+--- a/fs/erofs/inode.c
++++ b/fs/erofs/inode.c
+@@ -8,31 +8,80 @@
+
+ #include <trace/events/erofs.h>
+
+-/* no locking */
+-static int erofs_read_inode(struct inode *inode, void *data)
++/*
++ * if inode is successfully read, return its inode page (or sometimes
++ * the inode payload page if it's an extended inode) in order to fill
++ * inline data if possible.
++ */
++static struct page *erofs_read_inode(struct inode *inode,
++ unsigned int *ofs)
+ {
++ struct super_block *sb = inode->i_sb;
++ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ struct erofs_inode *vi = EROFS_I(inode);
+- struct erofs_inode_compact *dic = data;
+- struct erofs_inode_extended *die;
++ const erofs_off_t inode_loc = iloc(sbi, vi->nid);
++
++ erofs_blk_t blkaddr, nblks = 0;
++ struct page *page;
++ struct erofs_inode_compact *dic;
++ struct erofs_inode_extended *die, *copied = NULL;
++ unsigned int ifmt;
++ int err;
+
+- const unsigned int ifmt = le16_to_cpu(dic->i_format);
+- struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
+- erofs_blk_t nblks = 0;
++ blkaddr = erofs_blknr(inode_loc);
++ *ofs = erofs_blkoff(inode_loc);
+
+- vi->datalayout = erofs_inode_datalayout(ifmt);
++ erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u",
++ __func__, vi->nid, *ofs, blkaddr);
++
++ page = erofs_get_meta_page(sb, blkaddr);
++ if (IS_ERR(page)) {
++ erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
++ vi->nid, PTR_ERR(page));
++ return page;
++ }
+
++ dic = page_address(page) + *ofs;
++ ifmt = le16_to_cpu(dic->i_format);
++
++ vi->datalayout = erofs_inode_datalayout(ifmt);
+ if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
+ erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
+ vi->datalayout, vi->nid);
+- DBG_BUGON(1);
+- return -EOPNOTSUPP;
++ err = -EOPNOTSUPP;
++ goto err_out;
+ }
+
+ switch (erofs_inode_version(ifmt)) {
+ case EROFS_INODE_LAYOUT_EXTENDED:
+- die = data;
+-
+ vi->inode_isize = sizeof(struct erofs_inode_extended);
++ /* check if the inode acrosses page boundary */
++ if (*ofs + vi->inode_isize <= PAGE_SIZE) {
++ *ofs += vi->inode_isize;
++ die = (struct erofs_inode_extended *)dic;
++ } else {
++ const unsigned int gotten = PAGE_SIZE - *ofs;
++
++ copied = kmalloc(vi->inode_isize, GFP_NOFS);
++ if (!copied) {
++ err = -ENOMEM;
++ goto err_out;
++ }
++ memcpy(copied, dic, gotten);
++ unlock_page(page);
++ put_page(page);
++
++ page = erofs_get_meta_page(sb, blkaddr + 1);
++ if (IS_ERR(page)) {
++ erofs_err(sb, "failed to get inode payload page (nid: %llu), err %ld",
++ vi->nid, PTR_ERR(page));
++ kfree(copied);
++ return page;
++ }
++ *ofs = vi->inode_isize - gotten;
++ memcpy((u8 *)copied + gotten, page_address(page), *ofs);
++ die = copied;
++ }
+ vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
+
+ inode->i_mode = le16_to_cpu(die->i_mode);
+@@ -69,9 +118,12 @@ static int erofs_read_inode(struct inode *inode, void *data)
+ /* total blocks for compressed files */
+ if (erofs_inode_is_data_compressed(vi->datalayout))
+ nblks = le32_to_cpu(die->i_u.compressed_blocks);
++
++ kfree(copied);
+ break;
+ case EROFS_INODE_LAYOUT_COMPACT:
+ vi->inode_isize = sizeof(struct erofs_inode_compact);
++ *ofs += vi->inode_isize;
+ vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
+
+ inode->i_mode = le16_to_cpu(dic->i_mode);
+@@ -111,8 +163,8 @@ static int erofs_read_inode(struct inode *inode, void *data)
+ erofs_err(inode->i_sb,
+ "unsupported on-disk inode version %u of nid %llu",
+ erofs_inode_version(ifmt), vi->nid);
+- DBG_BUGON(1);
+- return -EOPNOTSUPP;
++ err = -EOPNOTSUPP;
++ goto err_out;
+ }
+
+ if (!nblks)
+@@ -120,13 +172,18 @@ static int erofs_read_inode(struct inode *inode, void *data)
+ inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9;
+ else
+ inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK;
+- return 0;
++ return page;
+
+ bogusimode:
+ erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu",
+ inode->i_mode, vi->nid);
++ err = -EFSCORRUPTED;
++err_out:
+ DBG_BUGON(1);
+- return -EFSCORRUPTED;
++ kfree(copied);
++ unlock_page(page);
++ put_page(page);
++ return ERR_PTR(err);
+ }
+
+ static int erofs_fill_symlink(struct inode *inode, void *data,
+@@ -146,7 +203,7 @@ static int erofs_fill_symlink(struct inode *inode, void *data,
+ if (!lnk)
+ return -ENOMEM;
+
+- m_pofs += vi->inode_isize + vi->xattr_isize;
++ m_pofs += vi->xattr_isize;
+ /* inline symlink data shouldn't cross page boundary as well */
+ if (m_pofs + inode->i_size > PAGE_SIZE) {
+ kfree(lnk);
+@@ -167,37 +224,17 @@ static int erofs_fill_symlink(struct inode *inode, void *data,
+
+ static int erofs_fill_inode(struct inode *inode, int isdir)
+ {
+- struct super_block *sb = inode->i_sb;
+ struct erofs_inode *vi = EROFS_I(inode);
+ struct page *page;
+- void *data;
+- int err;
+- erofs_blk_t blkaddr;
+ unsigned int ofs;
+- erofs_off_t inode_loc;
++ int err = 0;
+
+ trace_erofs_fill_inode(inode, isdir);
+- inode_loc = iloc(EROFS_SB(sb), vi->nid);
+- blkaddr = erofs_blknr(inode_loc);
+- ofs = erofs_blkoff(inode_loc);
+-
+- erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u",
+- __func__, vi->nid, ofs, blkaddr);
+
+- page = erofs_get_meta_page(sb, blkaddr);
+-
+- if (IS_ERR(page)) {
+- erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
+- vi->nid, PTR_ERR(page));
++ /* read inode base data from disk */
++ page = erofs_read_inode(inode, &ofs);
++ if (IS_ERR(page))
+ return PTR_ERR(page);
+- }
+-
+- DBG_BUGON(!PageUptodate(page));
+- data = page_address(page);
+-
+- err = erofs_read_inode(inode, data + ofs);
+- if (err)
+- goto out_unlock;
+
+ /* setup the new inode */
+ switch (inode->i_mode & S_IFMT) {
+@@ -210,7 +247,7 @@ static int erofs_fill_inode(struct inode *inode, int isdir)
+ inode->i_fop = &erofs_dir_fops;
+ break;
+ case S_IFLNK:
+- err = erofs_fill_symlink(inode, data, ofs);
++ err = erofs_fill_symlink(inode, page_address(page), ofs);
+ if (err)
+ goto out_unlock;
+ inode_nohighmem(inode);
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index be3d595a607f..fada14ee1cdc 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -1433,8 +1433,10 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
+
+ if (file->f_op->read_iter)
+ ret2 = call_read_iter(file, kiocb, &iter);
+- else
++ else if (req->file->f_op->read)
+ ret2 = loop_rw_iter(READ, file, kiocb, &iter);
++ else
++ ret2 = -EINVAL;
+
+ /*
+ * In case of a short read, punt to async. This can happen
+@@ -1524,8 +1526,10 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
+
+ if (file->f_op->write_iter)
+ ret2 = call_write_iter(file, kiocb, &iter);
+- else
++ else if (req->file->f_op->write)
+ ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
++ else
++ ret2 = -EINVAL;
+
+ if (!force_nonblock)
+ current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
+@@ -2559,6 +2563,7 @@ static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
+ goto err;
+ }
+
++ memcpy(&req->submit, s, sizeof(*s));
+ ret = io_req_set_file(ctx, s, state, req);
+ if (unlikely(ret)) {
+ err_req:
+@@ -3390,6 +3395,9 @@ static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
+ return SIZE_MAX;
+ #endif
+
++ if (sq_offset)
++ *sq_offset = off;
++
+ sq_array_size = array_size(sizeof(u32), sq_entries);
+ if (sq_array_size == SIZE_MAX)
+ return SIZE_MAX;
+@@ -3397,9 +3405,6 @@ static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
+ if (check_add_overflow(off, sq_array_size, &off))
+ return SIZE_MAX;
+
+- if (sq_offset)
+- *sq_offset = off;
+-
+ return off;
+ }
+
+@@ -3856,6 +3861,10 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
+ struct io_rings *rings;
+ size_t size, sq_array_offset;
+
++ /* make sure these are sane, as we already accounted them */
++ ctx->sq_entries = p->sq_entries;
++ ctx->cq_entries = p->cq_entries;
++
+ size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
+ if (size == SIZE_MAX)
+ return -EOVERFLOW;
+@@ -3872,8 +3881,6 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
+ rings->cq_ring_entries = p->cq_entries;
+ ctx->sq_mask = rings->sq_ring_mask;
+ ctx->cq_mask = rings->cq_ring_mask;
+- ctx->sq_entries = rings->sq_ring_entries;
+- ctx->cq_entries = rings->cq_ring_entries;
+
+ size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
+ if (size == SIZE_MAX) {
+diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
+index e8c792b49616..c35bbaa19486 100644
+--- a/fs/kernfs/file.c
++++ b/fs/kernfs/file.c
+@@ -912,7 +912,7 @@ repeat:
+ }
+
+ fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE,
+- &name, 0);
++ NULL, 0);
+ iput(inode);
+ }
+
+diff --git a/fs/minix/inode.c b/fs/minix/inode.c
+index 7cb5fd38eb14..0dd929346f3f 100644
+--- a/fs/minix/inode.c
++++ b/fs/minix/inode.c
+@@ -150,6 +150,23 @@ static int minix_remount (struct super_block * sb, int * flags, char * data)
+ return 0;
+ }
+
++static bool minix_check_superblock(struct minix_sb_info *sbi)
++{
++ if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
++ return false;
++
++ /*
++ * s_max_size must not exceed the block mapping limitation. This check
++ * is only needed for V1 filesystems, since V2/V3 support an extra level
++ * of indirect blocks which places the limit well above U32_MAX.
++ */
++ if (sbi->s_version == MINIX_V1 &&
++ sbi->s_max_size > (7 + 512 + 512*512) * BLOCK_SIZE)
++ return false;
++
++ return true;
++}
++
+ static int minix_fill_super(struct super_block *s, void *data, int silent)
+ {
+ struct buffer_head *bh;
+@@ -228,11 +245,12 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
+ } else
+ goto out_no_fs;
+
++ if (!minix_check_superblock(sbi))
++ goto out_illegal_sb;
++
+ /*
+ * Allocate the buffer map to keep the superblock small.
+ */
+- if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
+- goto out_illegal_sb;
+ i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh);
+ map = kzalloc(i, GFP_KERNEL);
+ if (!map)
+@@ -468,6 +486,13 @@ static struct inode *V1_minix_iget(struct inode *inode)
+ iget_failed(inode);
+ return ERR_PTR(-EIO);
+ }
++ if (raw_inode->i_nlinks == 0) {
++ printk("MINIX-fs: deleted inode referenced: %lu\n",
++ inode->i_ino);
++ brelse(bh);
++ iget_failed(inode);
++ return ERR_PTR(-ESTALE);
++ }
+ inode->i_mode = raw_inode->i_mode;
+ i_uid_write(inode, raw_inode->i_uid);
+ i_gid_write(inode, raw_inode->i_gid);
+@@ -501,6 +526,13 @@ static struct inode *V2_minix_iget(struct inode *inode)
+ iget_failed(inode);
+ return ERR_PTR(-EIO);
+ }
++ if (raw_inode->i_nlinks == 0) {
++ printk("MINIX-fs: deleted inode referenced: %lu\n",
++ inode->i_ino);
++ brelse(bh);
++ iget_failed(inode);
++ return ERR_PTR(-ESTALE);
++ }
+ inode->i_mode = raw_inode->i_mode;
+ i_uid_write(inode, raw_inode->i_uid);
+ i_gid_write(inode, raw_inode->i_gid);
+diff --git a/fs/minix/itree_common.c b/fs/minix/itree_common.c
+index 043c3fdbc8e7..446148792f41 100644
+--- a/fs/minix/itree_common.c
++++ b/fs/minix/itree_common.c
+@@ -75,6 +75,7 @@ static int alloc_branch(struct inode *inode,
+ int n = 0;
+ int i;
+ int parent = minix_new_block(inode);
++ int err = -ENOSPC;
+
+ branch[0].key = cpu_to_block(parent);
+ if (parent) for (n = 1; n < num; n++) {
+@@ -85,6 +86,11 @@ static int alloc_branch(struct inode *inode,
+ break;
+ branch[n].key = cpu_to_block(nr);
+ bh = sb_getblk(inode->i_sb, parent);
++ if (!bh) {
++ minix_free_block(inode, nr);
++ err = -ENOMEM;
++ break;
++ }
+ lock_buffer(bh);
+ memset(bh->b_data, 0, bh->b_size);
+ branch[n].bh = bh;
+@@ -103,7 +109,7 @@ static int alloc_branch(struct inode *inode,
+ bforget(branch[i].bh);
+ for (i = 0; i < n; i++)
+ minix_free_block(inode, block_to_cpu(branch[i].key));
+- return -ENOSPC;
++ return err;
+ }
+
+ static inline int splice_branch(struct inode *inode,
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 443639cbb0cf..9c2b07ce57b2 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1198,31 +1198,27 @@ out:
+ return status;
+ }
+
++static bool
++pnfs_layout_segments_returnable(struct pnfs_layout_hdr *lo,
++ enum pnfs_iomode iomode,
++ u32 seq)
++{
++ struct pnfs_layout_range recall_range = {
++ .length = NFS4_MAX_UINT64,
++ .iomode = iomode,
++ };
++ return pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
++ &recall_range, seq) != -EBUSY;
++}
++
+ /* Return true if layoutreturn is needed */
+ static bool
+ pnfs_layout_need_return(struct pnfs_layout_hdr *lo)
+ {
+- struct pnfs_layout_segment *s;
+- enum pnfs_iomode iomode;
+- u32 seq;
+-
+ if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
+ return false;
+-
+- seq = lo->plh_return_seq;
+- iomode = lo->plh_return_iomode;
+-
+- /* Defer layoutreturn until all recalled lsegs are done */
+- list_for_each_entry(s, &lo->plh_segs, pls_list) {
+- if (seq && pnfs_seqid_is_newer(s->pls_seq, seq))
+- continue;
+- if (iomode != IOMODE_ANY && s->pls_range.iomode != iomode)
+- continue;
+- if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
+- return false;
+- }
+-
+- return true;
++ return pnfs_layout_segments_returnable(lo, lo->plh_return_iomode,
++ lo->plh_return_seq);
+ }
+
+ static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
+@@ -2362,16 +2358,6 @@ out_forget:
+ return ERR_PTR(-EAGAIN);
+ }
+
+-static int
+-mark_lseg_invalid_or_return(struct pnfs_layout_segment *lseg,
+- struct list_head *tmp_list)
+-{
+- if (!mark_lseg_invalid(lseg, tmp_list))
+- return 0;
+- pnfs_cache_lseg_for_layoutreturn(lseg->pls_layout, lseg);
+- return 1;
+-}
+-
+ /**
+ * pnfs_mark_matching_lsegs_return - Free or return matching layout segments
+ * @lo: pointer to layout header
+@@ -2408,7 +2394,7 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
+ lseg, lseg->pls_range.iomode,
+ lseg->pls_range.offset,
+ lseg->pls_range.length);
+- if (mark_lseg_invalid_or_return(lseg, tmp_list))
++ if (mark_lseg_invalid(lseg, tmp_list))
+ continue;
+ remaining++;
+ set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
+diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
+index e2c34c704185..50a863fc1779 100644
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -2871,9 +2871,15 @@ int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
+
+ status = ocfs2_cluster_lock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE,
+ 0, 0);
+- if (status < 0)
++ if (status < 0) {
+ mlog(ML_ERROR, "lock on nfs sync lock failed %d\n", status);
+
++ if (ex)
++ up_write(&osb->nfs_sync_rwlock);
++ else
++ up_read(&osb->nfs_sync_rwlock);
++ }
++
+ return status;
+ }
+
+diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
+index cdf5b8ae2583..74a60bae2b23 100644
+--- a/fs/pstore/platform.c
++++ b/fs/pstore/platform.c
+@@ -275,6 +275,9 @@ static int pstore_compress(const void *in, void *out,
+ {
+ int ret;
+
++ if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION))
++ return -EINVAL;
++
+ ret = crypto_comp_compress(tfm, in, inlen, out, &outlen);
+ if (ret) {
+ pr_err("crypto_comp_compress failed, ret = %d!\n", ret);
+@@ -661,7 +664,7 @@ static void decompress_record(struct pstore_record *record)
+ int unzipped_len;
+ char *unzipped, *workspace;
+
+- if (!record->compressed)
++ if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION) || !record->compressed)
+ return;
+
+ /* Only PSTORE_TYPE_DMESG support compression. */
+diff --git a/fs/xfs/libxfs/xfs_trans_space.h b/fs/xfs/libxfs/xfs_trans_space.h
+index 88221c7a04cc..c6df01a2a158 100644
+--- a/fs/xfs/libxfs/xfs_trans_space.h
++++ b/fs/xfs/libxfs/xfs_trans_space.h
+@@ -57,7 +57,7 @@
+ XFS_DAREMOVE_SPACE_RES(mp, XFS_DATA_FORK)
+ #define XFS_IALLOC_SPACE_RES(mp) \
+ (M_IGEO(mp)->ialloc_blks + \
+- (xfs_sb_version_hasfinobt(&mp->m_sb) ? 2 : 1 * \
++ ((xfs_sb_version_hasfinobt(&mp->m_sb) ? 2 : 1) * \
+ (M_IGEO(mp)->inobt_maxlevels - 1)))
+
+ /*
+diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
+index fa6ea6407992..392fb4df5c12 100644
+--- a/fs/xfs/scrub/bmap.c
++++ b/fs/xfs/scrub/bmap.c
+@@ -45,9 +45,27 @@ xchk_setup_inode_bmap(
+ */
+ if (S_ISREG(VFS_I(sc->ip)->i_mode) &&
+ sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) {
++ struct address_space *mapping = VFS_I(sc->ip)->i_mapping;
++
+ inode_dio_wait(VFS_I(sc->ip));
+- error = filemap_write_and_wait(VFS_I(sc->ip)->i_mapping);
+- if (error)
++
++ /*
++ * Try to flush all incore state to disk before we examine the
++ * space mappings for the data fork. Leave accumulated errors
++ * in the mapping for the writer threads to consume.
++ *
++ * On ENOSPC or EIO writeback errors, we continue into the
++ * extent mapping checks because write failures do not
++ * necessarily imply anything about the correctness of the file
++ * metadata. The metadata and the file data could be on
++ * completely separate devices; a media failure might only
++ * affect a subset of the disk, etc. We can handle delalloc
++ * extents in the scrubber, so leaving them in memory is fine.
++ */
++ error = filemap_fdatawrite(mapping);
++ if (!error)
++ error = filemap_fdatawait_keep_errors(mapping);
++ if (error && (error != -ENOSPC && error != -EIO))
+ goto out;
+ }
+
+diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
+index 6a4fd1738b08..904d8285c226 100644
+--- a/fs/xfs/xfs_reflink.c
++++ b/fs/xfs/xfs_reflink.c
+@@ -1005,6 +1005,7 @@ xfs_reflink_remap_extent(
+ xfs_filblks_t rlen;
+ xfs_filblks_t unmap_len;
+ xfs_off_t newlen;
++ int64_t qres;
+ int error;
+
+ unmap_len = irec->br_startoff + irec->br_blockcount - destoff;
+@@ -1027,13 +1028,19 @@ xfs_reflink_remap_extent(
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, 0);
+
+- /* If we're not just clearing space, then do we have enough quota? */
+- if (real_extent) {
+- error = xfs_trans_reserve_quota_nblks(tp, ip,
+- irec->br_blockcount, 0, XFS_QMOPT_RES_REGBLKS);
+- if (error)
+- goto out_cancel;
+- }
++ /*
++ * Reserve quota for this operation. We don't know if the first unmap
++ * in the dest file will cause a bmap btree split, so we always reserve
++ * at least enough blocks for that split. If the extent being mapped
++ * in is written, we need to reserve quota for that too.
++ */
++ qres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
++ if (real_extent)
++ qres += irec->br_blockcount;
++ error = xfs_trans_reserve_quota_nblks(tp, ip, qres, 0,
++ XFS_QMOPT_RES_REGBLKS);
++ if (error)
++ goto out_cancel;
+
+ trace_xfs_reflink_remap(ip, irec->br_startoff,
+ irec->br_blockcount, irec->br_startblock);
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index d7616d08e863..f050039ca2c0 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -340,6 +340,7 @@
+ */
+ #ifndef RO_AFTER_INIT_DATA
+ #define RO_AFTER_INIT_DATA \
++ . = ALIGN(8); \
+ __start_ro_after_init = .; \
+ *(.data..ro_after_init) \
+ JUMP_TABLE_DATA \
+diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
+index 4bbb5f1c8b5b..4c0224ff0a14 100644
+--- a/include/linux/bitfield.h
++++ b/include/linux/bitfield.h
+@@ -64,7 +64,7 @@
+ */
+ #define FIELD_FIT(_mask, _val) \
+ ({ \
+- __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \
++ __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \
+ !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
+ })
+
+diff --git a/include/linux/tpm.h b/include/linux/tpm.h
+index 53c0ea9ec9df..77fdc988c610 100644
+--- a/include/linux/tpm.h
++++ b/include/linux/tpm.h
+@@ -93,6 +93,7 @@ struct tpm_space {
+ u8 *context_buf;
+ u32 session_tbl[3];
+ u8 *session_buf;
++ u32 buf_size;
+ };
+
+ struct tpm_bios_log {
+diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h
+index eccfd3a4e4c8..f3caeeb7a0d0 100644
+--- a/include/linux/tpm_eventlog.h
++++ b/include/linux/tpm_eventlog.h
+@@ -211,9 +211,16 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
+
+ efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
+
+- /* Check if event is malformed. */
++ /*
++ * Perform validation of the event in order to identify malformed
++ * events. This function may be asked to parse arbitrary byte sequences
++ * immediately following a valid event log. The caller expects this
++ * function to recognize that the byte sequence is not a valid event
++ * and to return an event size of 0.
++ */
+ if (memcmp(efispecid->signature, TCG_SPECID_SIG,
+- sizeof(TCG_SPECID_SIG)) || count > efispecid->num_algs) {
++ sizeof(TCG_SPECID_SIG)) ||
++ !efispecid->num_algs || count != efispecid->num_algs) {
+ size = 0;
+ goto out;
+ }
+diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
+index 1fb11daa5c53..57ce5af258a3 100644
+--- a/include/linux/tracepoint.h
++++ b/include/linux/tracepoint.h
+@@ -362,7 +362,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
+ static const char *___tp_str __tracepoint_string = str; \
+ ___tp_str; \
+ })
+-#define __tracepoint_string __attribute__((section("__tracepoint_str")))
++#define __tracepoint_string __attribute__((section("__tracepoint_str"), used))
+ #else
+ /*
+ * tracepoint_string() is used to save the string address for userspace
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index 895546058a20..c71eb294da95 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -309,6 +309,10 @@ int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
+ int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, unsigned int optlen);
+
++/* update the fast reuse flag when adding a socket */
++void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
++ struct sock *sk);
++
+ struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
+
+ #define TCP_PINGPONG_THRESH 3
+diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
+index 078887c8c586..7c37e3c3b1c7 100644
+--- a/include/net/ip_vs.h
++++ b/include/net/ip_vs.h
+@@ -1624,18 +1624,16 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
+ }
+ #endif /* CONFIG_IP_VS_NFCT */
+
+-/* Really using conntrack? */
+-static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp,
+- struct sk_buff *skb)
++/* Using old conntrack that can not be redirected to another real server? */
++static inline bool ip_vs_conn_uses_old_conntrack(struct ip_vs_conn *cp,
++ struct sk_buff *skb)
+ {
+ #ifdef CONFIG_IP_VS_NFCT
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+
+- if (!(cp->flags & IP_VS_CONN_F_NFCT))
+- return false;
+ ct = nf_ct_get(skb, &ctinfo);
+- if (ct)
++ if (ct && nf_ct_is_confirmed(ct))
+ return true;
+ #endif
+ return false;
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 7cf1b4972c66..377179283c46 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1650,6 +1650,8 @@ void tcp_fastopen_destroy_cipher(struct sock *sk);
+ void tcp_fastopen_ctx_destroy(struct net *net);
+ int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
+ void *primary_key, void *backup_key);
++int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
++ u64 *key);
+ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
+ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req,
+diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
+index 90734aa5aa36..b5f901af79f0 100644
+--- a/include/uapi/linux/seccomp.h
++++ b/include/uapi/linux/seccomp.h
+@@ -93,5 +93,6 @@ struct seccomp_notif_resp {
+ #define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif)
+ #define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \
+ struct seccomp_notif_resp)
+-#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOR(2, __u64)
++#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOW(2, __u64)
++
+ #endif /* _UAPI_LINUX_SECCOMP_H */
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 38ae3cf9d173..b34b5c6e2524 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1238,6 +1238,20 @@ static void uclamp_fork(struct task_struct *p)
+ }
+ }
+
++static void __init init_uclamp_rq(struct rq *rq)
++{
++ enum uclamp_id clamp_id;
++ struct uclamp_rq *uc_rq = rq->uclamp;
++
++ for_each_clamp_id(clamp_id) {
++ uc_rq[clamp_id] = (struct uclamp_rq) {
++ .value = uclamp_none(clamp_id)
++ };
++ }
++
++ rq->uclamp_flags = 0;
++}
++
+ static void __init init_uclamp(void)
+ {
+ struct uclamp_se uc_max = {};
+@@ -1246,11 +1260,8 @@ static void __init init_uclamp(void)
+
+ mutex_init(&uclamp_mutex);
+
+- for_each_possible_cpu(cpu) {
+- memset(&cpu_rq(cpu)->uclamp, 0,
+- sizeof(struct uclamp_rq)*UCLAMP_CNT);
+- cpu_rq(cpu)->uclamp_flags = 0;
+- }
++ for_each_possible_cpu(cpu)
++ init_uclamp_rq(cpu_rq(cpu));
+
+ for_each_clamp_id(clamp_id) {
+ uclamp_se_set(&init_task.uclamp_req[clamp_id],
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 9b16080093be..20bf1f66733a 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -9385,7 +9385,12 @@ static void kick_ilb(unsigned int flags)
+ {
+ int ilb_cpu;
+
+- nohz.next_balance++;
++ /*
++ * Increase nohz.next_balance only when if full ilb is triggered but
++ * not if we only update stats.
++ */
++ if (flags & NOHZ_BALANCE_KICK)
++ nohz.next_balance = jiffies+1;
+
+ ilb_cpu = find_new_ilb();
+
+@@ -9703,6 +9708,14 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
+ }
+ }
+
++ /*
++ * next_balance will be updated only when there is a need.
++ * When the CPU is attached to null domain for ex, it will not be
++ * updated.
++ */
++ if (likely(update_next_balance))
++ nohz.next_balance = next_balance;
++
+ /* Newly idle CPU doesn't need an update */
+ if (idle != CPU_NEWLY_IDLE) {
+ update_blocked_averages(this_cpu);
+@@ -9723,14 +9736,6 @@ abort:
+ if (has_blocked_load)
+ WRITE_ONCE(nohz.has_blocked, 1);
+
+- /*
+- * next_balance will be updated only when there is a need.
+- * When the CPU is attached to null domain for ex, it will not be
+- * updated.
+- */
+- if (likely(update_next_balance))
+- nohz.next_balance = next_balance;
+-
+ return ret;
+ }
+
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
+index 1fa1e13a5944..ffaa97a8d405 100644
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -1333,7 +1333,7 @@ sd_init(struct sched_domain_topology_level *tl,
+ sd_flags = (*tl->sd_flags)();
+ if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
+ "wrong sd_flags in topology description\n"))
+- sd_flags &= ~TOPOLOGY_SD_FLAGS;
++ sd_flags &= TOPOLOGY_SD_FLAGS;
+
+ /* Apply detected topology flags */
+ sd_flags |= dflags;
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index 2c697ce7be21..e0fd97235653 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -42,6 +42,14 @@
+ #include <linux/uaccess.h>
+ #include <linux/anon_inodes.h>
+
++/*
++ * When SECCOMP_IOCTL_NOTIF_ID_VALID was first introduced, it had the
++ * wrong direction flag in the ioctl number. This is the broken one,
++ * which the kernel needs to keep supporting until all userspaces stop
++ * using the wrong command number.
++ */
++#define SECCOMP_IOCTL_NOTIF_ID_VALID_WRONG_DIR SECCOMP_IOR(2, __u64)
++
+ enum notify_state {
+ SECCOMP_NOTIFY_INIT,
+ SECCOMP_NOTIFY_SENT,
+@@ -1168,6 +1176,7 @@ static long seccomp_notify_ioctl(struct file *file, unsigned int cmd,
+ return seccomp_notify_recv(filter, buf);
+ case SECCOMP_IOCTL_NOTIF_SEND:
+ return seccomp_notify_send(filter, buf);
++ case SECCOMP_IOCTL_NOTIF_ID_VALID_WRONG_DIR:
+ case SECCOMP_IOCTL_NOTIF_ID_VALID:
+ return seccomp_notify_id_valid(filter, buf);
+ default:
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index eaee960153e1..a4c8f9d9522e 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -521,10 +521,18 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+ if (!bt->msg_data)
+ goto err;
+
+- ret = -ENOENT;
+-
+- dir = debugfs_lookup(buts->name, blk_debugfs_root);
+- if (!dir)
++#ifdef CONFIG_BLK_DEBUG_FS
++ /*
++ * When tracing whole make_request drivers (multiqueue) block devices,
++ * reuse the existing debugfs directory created by the block layer on
++ * init. For request-based block devices, all partitions block devices,
++ * and scsi-generic block devices we create a temporary new debugfs
++ * directory that will be removed once the trace ends.
++ */
++ if (queue_is_mq(q) && bdev && bdev == bdev->bd_contains)
++ dir = q->debugfs_dir;
++ else
++#endif
+ bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
+
+ bt->dev = dev;
+@@ -565,8 +573,6 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+
+ ret = 0;
+ err:
+- if (dir && !bt->dir)
+- dput(dir);
+ if (ret)
+ blk_trace_free(bt);
+ return ret;
+diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
+index 8cc01a603416..c9acf1c12cfc 100644
+--- a/lib/crc-t10dif.c
++++ b/lib/crc-t10dif.c
+@@ -19,39 +19,46 @@
+ static struct crypto_shash __rcu *crct10dif_tfm;
+ static struct static_key crct10dif_fallback __read_mostly;
+ static DEFINE_MUTEX(crc_t10dif_mutex);
++static struct work_struct crct10dif_rehash_work;
+
+-static int crc_t10dif_rehash(struct notifier_block *self, unsigned long val, void *data)
++static int crc_t10dif_notify(struct notifier_block *self, unsigned long val, void *data)
+ {
+ struct crypto_alg *alg = data;
+- struct crypto_shash *new, *old;
+
+ if (val != CRYPTO_MSG_ALG_LOADED ||
+ static_key_false(&crct10dif_fallback) ||
+ strncmp(alg->cra_name, CRC_T10DIF_STRING, strlen(CRC_T10DIF_STRING)))
+ return 0;
+
++ schedule_work(&crct10dif_rehash_work);
++ return 0;
++}
++
++static void crc_t10dif_rehash(struct work_struct *work)
++{
++ struct crypto_shash *new, *old;
++
+ mutex_lock(&crc_t10dif_mutex);
+ old = rcu_dereference_protected(crct10dif_tfm,
+ lockdep_is_held(&crc_t10dif_mutex));
+ if (!old) {
+ mutex_unlock(&crc_t10dif_mutex);
+- return 0;
++ return;
+ }
+ new = crypto_alloc_shash("crct10dif", 0, 0);
+ if (IS_ERR(new)) {
+ mutex_unlock(&crc_t10dif_mutex);
+- return 0;
++ return;
+ }
+ rcu_assign_pointer(crct10dif_tfm, new);
+ mutex_unlock(&crc_t10dif_mutex);
+
+ synchronize_rcu();
+ crypto_free_shash(old);
+- return 0;
+ }
+
+ static struct notifier_block crc_t10dif_nb = {
+- .notifier_call = crc_t10dif_rehash,
++ .notifier_call = crc_t10dif_notify,
+ };
+
+ __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
+@@ -86,19 +93,26 @@ EXPORT_SYMBOL(crc_t10dif);
+
+ static int __init crc_t10dif_mod_init(void)
+ {
++ struct crypto_shash *tfm;
++
++ INIT_WORK(&crct10dif_rehash_work, crc_t10dif_rehash);
+ crypto_register_notifier(&crc_t10dif_nb);
+- crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0);
+- if (IS_ERR(crct10dif_tfm)) {
++ mutex_lock(&crc_t10dif_mutex);
++ tfm = crypto_alloc_shash("crct10dif", 0, 0);
++ if (IS_ERR(tfm)) {
+ static_key_slow_inc(&crct10dif_fallback);
+- crct10dif_tfm = NULL;
++ tfm = NULL;
+ }
++ RCU_INIT_POINTER(crct10dif_tfm, tfm);
++ mutex_unlock(&crc_t10dif_mutex);
+ return 0;
+ }
+
+ static void __exit crc_t10dif_mod_fini(void)
+ {
+ crypto_unregister_notifier(&crc_t10dif_nb);
+- crypto_free_shash(crct10dif_tfm);
++ cancel_work_sync(&crct10dif_rehash_work);
++ crypto_free_shash(rcu_dereference_protected(crct10dif_tfm, 1));
+ }
+
+ module_init(crc_t10dif_mod_init);
+@@ -106,11 +120,27 @@ module_exit(crc_t10dif_mod_fini);
+
+ static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp)
+ {
++ struct crypto_shash *tfm;
++ const char *name;
++ int len;
++
+ if (static_key_false(&crct10dif_fallback))
+ return sprintf(buffer, "fallback\n");
+
+- return sprintf(buffer, "%s\n",
+- crypto_tfm_alg_driver_name(crypto_shash_tfm(crct10dif_tfm)));
++ rcu_read_lock();
++ tfm = rcu_dereference(crct10dif_tfm);
++ if (!tfm) {
++ len = sprintf(buffer, "init\n");
++ goto unlock;
++ }
++
++ name = crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm));
++ len = sprintf(buffer, "%s\n", name);
++
++unlock:
++ rcu_read_unlock();
++
++ return len;
+ }
+
+ module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0644);
+diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
+index c60409138e13..ccf05719b1ad 100644
+--- a/lib/dynamic_debug.c
++++ b/lib/dynamic_debug.c
+@@ -87,22 +87,22 @@ static struct { unsigned flag:8; char opt_char; } opt_array[] = {
+ { _DPRINTK_FLAGS_NONE, '_' },
+ };
+
++struct flagsbuf { char buf[ARRAY_SIZE(opt_array)+1]; };
++
+ /* format a string into buf[] which describes the _ddebug's flags */
+-static char *ddebug_describe_flags(struct _ddebug *dp, char *buf,
+- size_t maxlen)
++static char *ddebug_describe_flags(unsigned int flags, struct flagsbuf *fb)
+ {
+- char *p = buf;
++ char *p = fb->buf;
+ int i;
+
+- BUG_ON(maxlen < 6);
+ for (i = 0; i < ARRAY_SIZE(opt_array); ++i)
+- if (dp->flags & opt_array[i].flag)
++ if (flags & opt_array[i].flag)
+ *p++ = opt_array[i].opt_char;
+- if (p == buf)
++ if (p == fb->buf)
+ *p++ = '_';
+ *p = '\0';
+
+- return buf;
++ return fb->buf;
+ }
+
+ #define vpr_info(fmt, ...) \
+@@ -144,7 +144,7 @@ static int ddebug_change(const struct ddebug_query *query,
+ struct ddebug_table *dt;
+ unsigned int newflags;
+ unsigned int nfound = 0;
+- char flagbuf[10];
++ struct flagsbuf fbuf;
+
+ /* search for matching ddebugs */
+ mutex_lock(&ddebug_lock);
+@@ -201,8 +201,7 @@ static int ddebug_change(const struct ddebug_query *query,
+ vpr_info("changed %s:%d [%s]%s =%s\n",
+ trim_prefix(dp->filename), dp->lineno,
+ dt->mod_name, dp->function,
+- ddebug_describe_flags(dp, flagbuf,
+- sizeof(flagbuf)));
++ ddebug_describe_flags(dp->flags, &fbuf));
+ }
+ }
+ mutex_unlock(&ddebug_lock);
+@@ -816,7 +815,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
+ {
+ struct ddebug_iter *iter = m->private;
+ struct _ddebug *dp = p;
+- char flagsbuf[10];
++ struct flagsbuf flags;
+
+ vpr_info("called m=%p p=%p\n", m, p);
+
+@@ -829,7 +828,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p)
+ seq_printf(m, "%s:%u [%s]%s =%s \"",
+ trim_prefix(dp->filename), dp->lineno,
+ iter->table->mod_name, dp->function,
+- ddebug_describe_flags(dp, flagsbuf, sizeof(flagsbuf)));
++ ddebug_describe_flags(dp->flags, &flags));
+ seq_escape(m, dp->format, "\t\r\n\"");
+ seq_puts(m, "\"\n");
+
+diff --git a/lib/kobject.c b/lib/kobject.c
+index 83198cb37d8d..386873bdd51c 100644
+--- a/lib/kobject.c
++++ b/lib/kobject.c
+@@ -599,14 +599,7 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(kobject_move);
+
+-/**
+- * kobject_del() - Unlink kobject from hierarchy.
+- * @kobj: object.
+- *
+- * This is the function that should be called to delete an object
+- * successfully added via kobject_add().
+- */
+-void kobject_del(struct kobject *kobj)
++static void __kobject_del(struct kobject *kobj)
+ {
+ struct kernfs_node *sd;
+ const struct kobj_type *ktype;
+@@ -625,9 +618,23 @@ void kobject_del(struct kobject *kobj)
+
+ kobj->state_in_sysfs = 0;
+ kobj_kset_leave(kobj);
+- kobject_put(kobj->parent);
+ kobj->parent = NULL;
+ }
++
++/**
++ * kobject_del() - Unlink kobject from hierarchy.
++ * @kobj: object.
++ *
++ * This is the function that should be called to delete an object
++ * successfully added via kobject_add().
++ */
++void kobject_del(struct kobject *kobj)
++{
++ struct kobject *parent = kobj->parent;
++
++ __kobject_del(kobj);
++ kobject_put(parent);
++}
+ EXPORT_SYMBOL(kobject_del);
+
+ /**
+@@ -663,6 +670,7 @@ EXPORT_SYMBOL(kobject_get_unless_zero);
+ */
+ static void kobject_cleanup(struct kobject *kobj)
+ {
++ struct kobject *parent = kobj->parent;
+ struct kobj_type *t = get_ktype(kobj);
+ const char *name = kobj->name;
+
+@@ -684,7 +692,10 @@ static void kobject_cleanup(struct kobject *kobj)
+ if (kobj->state_in_sysfs) {
+ pr_debug("kobject: '%s' (%p): auto cleanup kobject_del\n",
+ kobject_name(kobj), kobj);
+- kobject_del(kobj);
++ __kobject_del(kobj);
++ } else {
++ /* avoid dropping the parent reference unnecessarily */
++ parent = NULL;
+ }
+
+ if (t && t->release) {
+@@ -698,6 +709,8 @@ static void kobject_cleanup(struct kobject *kobj)
+ pr_debug("kobject: '%s': free name\n", name);
+ kfree_const(name);
+ }
++
++ kobject_put(parent);
+ }
+
+ #ifdef CONFIG_DEBUG_KOBJECT_RELEASE
+diff --git a/mm/mmap.c b/mm/mmap.c
+index ea1ba2db4f4f..a3584a90c55c 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -3172,6 +3172,7 @@ void exit_mmap(struct mm_struct *mm)
+ if (vma->vm_flags & VM_ACCOUNT)
+ nr_accounted += vma_pages(vma);
+ vma = remove_vma(vma);
++ cond_resched();
+ }
+ vm_unacct_memory(nr_accounted);
+ }
+diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
+index 4febc82a7c76..52fb6d6d6d58 100644
+--- a/net/bluetooth/6lowpan.c
++++ b/net/bluetooth/6lowpan.c
+@@ -50,6 +50,7 @@ static bool enable_6lowpan;
+ /* We are listening incoming connections via this channel
+ */
+ static struct l2cap_chan *listen_chan;
++static DEFINE_MUTEX(set_lock);
+
+ struct lowpan_peer {
+ struct list_head list;
+@@ -1070,12 +1071,14 @@ static void do_enable_set(struct work_struct *work)
+
+ enable_6lowpan = set_enable->flag;
+
++ mutex_lock(&set_lock);
+ if (listen_chan) {
+ l2cap_chan_close(listen_chan, 0);
+ l2cap_chan_put(listen_chan);
+ }
+
+ listen_chan = bt_6lowpan_listen();
++ mutex_unlock(&set_lock);
+
+ kfree(set_enable);
+ }
+@@ -1127,11 +1130,13 @@ static ssize_t lowpan_control_write(struct file *fp,
+ if (ret == -EINVAL)
+ return ret;
+
++ mutex_lock(&set_lock);
+ if (listen_chan) {
+ l2cap_chan_close(listen_chan, 0);
+ l2cap_chan_put(listen_chan);
+ listen_chan = NULL;
+ }
++ mutex_unlock(&set_lock);
+
+ if (conn) {
+ struct lowpan_peer *peer;
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 33a232974374..991ab80234ce 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -3337,6 +3337,16 @@ static void sock_inuse_add(struct net *net, int val)
+ }
+ #endif
+
++static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot)
++{
++ if (!twsk_prot)
++ return;
++ kfree(twsk_prot->twsk_slab_name);
++ twsk_prot->twsk_slab_name = NULL;
++ kmem_cache_destroy(twsk_prot->twsk_slab);
++ twsk_prot->twsk_slab = NULL;
++}
++
+ static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
+ {
+ if (!rsk_prot)
+@@ -3407,7 +3417,7 @@ int proto_register(struct proto *prot, int alloc_slab)
+ prot->slab_flags,
+ NULL);
+ if (prot->twsk_prot->twsk_slab == NULL)
+- goto out_free_timewait_sock_slab_name;
++ goto out_free_timewait_sock_slab;
+ }
+ }
+
+@@ -3415,15 +3425,15 @@ int proto_register(struct proto *prot, int alloc_slab)
+ ret = assign_proto_idx(prot);
+ if (ret) {
+ mutex_unlock(&proto_list_mutex);
+- goto out_free_timewait_sock_slab_name;
++ goto out_free_timewait_sock_slab;
+ }
+ list_add(&prot->node, &proto_list);
+ mutex_unlock(&proto_list_mutex);
+ return ret;
+
+-out_free_timewait_sock_slab_name:
++out_free_timewait_sock_slab:
+ if (alloc_slab && prot->twsk_prot)
+- kfree(prot->twsk_prot->twsk_slab_name);
++ tw_prot_cleanup(prot->twsk_prot);
+ out_free_request_sock_slab:
+ if (alloc_slab) {
+ req_prot_cleanup(prot->rsk_prot);
+@@ -3447,12 +3457,7 @@ void proto_unregister(struct proto *prot)
+ prot->slab = NULL;
+
+ req_prot_cleanup(prot->rsk_prot);
+-
+- if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
+- kmem_cache_destroy(prot->twsk_prot->twsk_slab);
+- kfree(prot->twsk_prot->twsk_slab_name);
+- prot->twsk_prot->twsk_slab = NULL;
+- }
++ tw_prot_cleanup(prot->twsk_prot);
+ }
+ EXPORT_SYMBOL(proto_unregister);
+
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 5e486895d67c..9745c52f49ca 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -284,6 +284,57 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
+ ipv6_only_sock(sk), true, false);
+ }
+
++void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
++ struct sock *sk)
++{
++ kuid_t uid = sock_i_uid(sk);
++ bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
++
++ if (hlist_empty(&tb->owners)) {
++ tb->fastreuse = reuse;
++ if (sk->sk_reuseport) {
++ tb->fastreuseport = FASTREUSEPORT_ANY;
++ tb->fastuid = uid;
++ tb->fast_rcv_saddr = sk->sk_rcv_saddr;
++ tb->fast_ipv6_only = ipv6_only_sock(sk);
++ tb->fast_sk_family = sk->sk_family;
++#if IS_ENABLED(CONFIG_IPV6)
++ tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
++#endif
++ } else {
++ tb->fastreuseport = 0;
++ }
++ } else {
++ if (!reuse)
++ tb->fastreuse = 0;
++ if (sk->sk_reuseport) {
++ /* We didn't match or we don't have fastreuseport set on
++ * the tb, but we have sk_reuseport set on this socket
++ * and we know that there are no bind conflicts with
++ * this socket in this tb, so reset our tb's reuseport
++ * settings so that any subsequent sockets that match
++ * our current socket will be put on the fast path.
++ *
++ * If we reset we need to set FASTREUSEPORT_STRICT so we
++ * do extra checking for all subsequent sk_reuseport
++ * socks.
++ */
++ if (!sk_reuseport_match(tb, sk)) {
++ tb->fastreuseport = FASTREUSEPORT_STRICT;
++ tb->fastuid = uid;
++ tb->fast_rcv_saddr = sk->sk_rcv_saddr;
++ tb->fast_ipv6_only = ipv6_only_sock(sk);
++ tb->fast_sk_family = sk->sk_family;
++#if IS_ENABLED(CONFIG_IPV6)
++ tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
++#endif
++ }
++ } else {
++ tb->fastreuseport = 0;
++ }
++ }
++}
++
+ /* Obtain a reference to a local port for the given sock,
+ * if snum is zero it means select any available local port.
+ * We try to allocate an odd port (and leave even ports for connect())
+@@ -296,7 +347,6 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
+ struct inet_bind_hashbucket *head;
+ struct net *net = sock_net(sk);
+ struct inet_bind_bucket *tb = NULL;
+- kuid_t uid = sock_i_uid(sk);
+ int l3mdev;
+
+ l3mdev = inet_sk_bound_l3mdev(sk);
+@@ -333,49 +383,8 @@ tb_found:
+ goto fail_unlock;
+ }
+ success:
+- if (hlist_empty(&tb->owners)) {
+- tb->fastreuse = reuse;
+- if (sk->sk_reuseport) {
+- tb->fastreuseport = FASTREUSEPORT_ANY;
+- tb->fastuid = uid;
+- tb->fast_rcv_saddr = sk->sk_rcv_saddr;
+- tb->fast_ipv6_only = ipv6_only_sock(sk);
+- tb->fast_sk_family = sk->sk_family;
+-#if IS_ENABLED(CONFIG_IPV6)
+- tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
+-#endif
+- } else {
+- tb->fastreuseport = 0;
+- }
+- } else {
+- if (!reuse)
+- tb->fastreuse = 0;
+- if (sk->sk_reuseport) {
+- /* We didn't match or we don't have fastreuseport set on
+- * the tb, but we have sk_reuseport set on this socket
+- * and we know that there are no bind conflicts with
+- * this socket in this tb, so reset our tb's reuseport
+- * settings so that any subsequent sockets that match
+- * our current socket will be put on the fast path.
+- *
+- * If we reset we need to set FASTREUSEPORT_STRICT so we
+- * do extra checking for all subsequent sk_reuseport
+- * socks.
+- */
+- if (!sk_reuseport_match(tb, sk)) {
+- tb->fastreuseport = FASTREUSEPORT_STRICT;
+- tb->fastuid = uid;
+- tb->fast_rcv_saddr = sk->sk_rcv_saddr;
+- tb->fast_ipv6_only = ipv6_only_sock(sk);
+- tb->fast_sk_family = sk->sk_family;
+-#if IS_ENABLED(CONFIG_IPV6)
+- tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
+-#endif
+- }
+- } else {
+- tb->fastreuseport = 0;
+- }
+- }
++ inet_csk_update_fastreuse(tb, sk);
++
+ if (!inet_csk(sk)->icsk_bind_hash)
+ inet_bind_hash(sk, tb, port);
+ WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 2bbaaf0c7176..006a34b18537 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -163,6 +163,7 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child)
+ return -ENOMEM;
+ }
+ }
++ inet_csk_update_fastreuse(tb, child);
+ }
+ inet_bind_hash(child, tb, port);
+ spin_unlock(&head->lock);
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index 0902cb32bbad..c83a5d05aeaa 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -307,24 +307,16 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
+ struct ctl_table tbl = { .maxlen = ((TCP_FASTOPEN_KEY_LENGTH *
+ 2 * TCP_FASTOPEN_KEY_MAX) +
+ (TCP_FASTOPEN_KEY_MAX * 5)) };
+- struct tcp_fastopen_context *ctx;
+- u32 user_key[TCP_FASTOPEN_KEY_MAX * 4];
+- __le32 key[TCP_FASTOPEN_KEY_MAX * 4];
++ u32 user_key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u32)];
++ __le32 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(__le32)];
+ char *backup_data;
+- int ret, i = 0, off = 0, n_keys = 0;
++ int ret, i = 0, off = 0, n_keys;
+
+ tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
+ if (!tbl.data)
+ return -ENOMEM;
+
+- rcu_read_lock();
+- ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
+- if (ctx) {
+- n_keys = tcp_fastopen_context_len(ctx);
+- memcpy(&key[0], &ctx->key[0], TCP_FASTOPEN_KEY_LENGTH * n_keys);
+- }
+- rcu_read_unlock();
+-
++ n_keys = tcp_fastopen_get_cipher(net, NULL, (u64 *)key);
+ if (!n_keys) {
+ memset(&key[0], 0, TCP_FASTOPEN_KEY_LENGTH);
+ n_keys = 1;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 840901154210..01ddfb4156e4 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3527,22 +3527,14 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
+ return 0;
+
+ case TCP_FASTOPEN_KEY: {
+- __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH];
+- struct tcp_fastopen_context *ctx;
+- unsigned int key_len = 0;
++ u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)];
++ unsigned int key_len;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+- rcu_read_lock();
+- ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
+- if (ctx) {
+- key_len = tcp_fastopen_context_len(ctx) *
+- TCP_FASTOPEN_KEY_LENGTH;
+- memcpy(&key[0], &ctx->key[0], key_len);
+- }
+- rcu_read_unlock();
+-
++ key_len = tcp_fastopen_get_cipher(net, icsk, key) *
++ TCP_FASTOPEN_KEY_LENGTH;
+ len = min_t(unsigned int, len, key_len);
+ if (put_user(len, optlen))
+ return -EFAULT;
+diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
+index a915ade0c818..a9971e41f31b 100644
+--- a/net/ipv4/tcp_fastopen.c
++++ b/net/ipv4/tcp_fastopen.c
+@@ -108,6 +108,29 @@ out:
+ return err;
+ }
+
++int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
++ u64 *key)
++{
++ struct tcp_fastopen_context *ctx;
++ int n_keys = 0, i;
++
++ rcu_read_lock();
++ if (icsk)
++ ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
++ else
++ ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
++ if (ctx) {
++ n_keys = tcp_fastopen_context_len(ctx);
++ for (i = 0; i < n_keys; i++) {
++ put_unaligned_le64(ctx->key[i].key[0], key + (i * 2));
++ put_unaligned_le64(ctx->key[i].key[1], key + (i * 2) + 1);
++ }
++ }
++ rcu_read_unlock();
++
++ return n_keys;
++}
++
+ static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req,
+ struct sk_buff *syn,
+ const siphash_key_t *key,
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index 8b80ab794a92..64a05906cc0e 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -2061,14 +2061,14 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
+
+ conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
+ if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
+- bool uses_ct = false, resched = false;
++ bool old_ct = false, resched = false;
+
+ if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
+ unlikely(!atomic_read(&cp->dest->weight))) {
+ resched = true;
+- uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
++ old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
+ } else if (is_new_conn_expected(cp, conn_reuse_mode)) {
+- uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
++ old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
+ if (!atomic_read(&cp->n_control)) {
+ resched = true;
+ } else {
+@@ -2076,15 +2076,17 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
+ * that uses conntrack while it is still
+ * referenced by controlled connection(s).
+ */
+- resched = !uses_ct;
++ resched = !old_ct;
+ }
+ }
+
+ if (resched) {
++ if (!old_ct)
++ cp->flags &= ~IP_VS_CONN_F_NFCT;
+ if (!atomic_read(&cp->n_control))
+ ip_vs_conn_expire_now(cp);
+ __ip_vs_conn_put(cp);
+- if (uses_ct)
++ if (old_ct)
+ return NF_DROP;
+ cp = NULL;
+ }
+diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
+index ba5ffd3badd3..b5c867fe3232 100644
+--- a/net/nfc/rawsock.c
++++ b/net/nfc/rawsock.c
+@@ -332,10 +332,13 @@ static int rawsock_create(struct net *net, struct socket *sock,
+ if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW))
+ return -ESOCKTNOSUPPORT;
+
+- if (sock->type == SOCK_RAW)
++ if (sock->type == SOCK_RAW) {
++ if (!capable(CAP_NET_RAW))
++ return -EPERM;
+ sock->ops = &rawsock_raw_ops;
+- else
++ } else {
+ sock->ops = &rawsock_ops;
++ }
+
+ sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern);
+ if (!sk)
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 1d63ab3a878a..7735340c892e 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -941,6 +941,7 @@ static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
+ }
+
+ static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
++ __releases(&pkc->blk_fill_in_prog_lock)
+ {
+ struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
+ atomic_dec(&pkc->blk_fill_in_prog);
+@@ -988,6 +989,7 @@ static void prb_fill_curr_block(char *curr,
+ struct tpacket_kbdq_core *pkc,
+ struct tpacket_block_desc *pbd,
+ unsigned int len)
++ __acquires(&pkc->blk_fill_in_prog_lock)
+ {
+ struct tpacket3_hdr *ppd;
+
+@@ -2285,8 +2287,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
+ if (do_vnet &&
+ virtio_net_hdr_from_skb(skb, h.raw + macoff -
+ sizeof(struct virtio_net_hdr),
+- vio_le(), true, 0))
++ vio_le(), true, 0)) {
++ if (po->tp_version == TPACKET_V3)
++ prb_clear_blk_fill_status(&po->rx_ring);
+ goto drop_n_account;
++ }
+
+ if (po->tp_version <= TPACKET_V2) {
+ packet_increment_rx_head(po, &po->rx_ring);
+@@ -2392,7 +2397,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
+ __clear_bit(slot_id, po->rx_ring.rx_owner_map);
+ spin_unlock(&sk->sk_receive_queue.lock);
+ sk->sk_data_ready(sk);
+- } else {
++ } else if (po->tp_version == TPACKET_V3) {
+ prb_clear_blk_fill_status(&po->rx_ring);
+ }
+
+diff --git a/net/socket.c b/net/socket.c
+index 432800b39ddb..d1a0264401b7 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -485,7 +485,7 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
+ if (f.file) {
+ sock = sock_from_file(f.file, err);
+ if (likely(sock)) {
+- *fput_needed = f.flags;
++ *fput_needed = f.flags & FDPUT_FPUT;
+ return sock;
+ }
+ fdput(f);
+diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
+index 683755d95075..78ad41656996 100644
+--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
++++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
+@@ -584,7 +584,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len,
+ buf->head[0].iov_len);
+ memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
+ buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
+- buf->len = len - GSS_KRB5_TOK_HDR_LEN + headskip;
++ buf->len = len - (GSS_KRB5_TOK_HDR_LEN + headskip);
+
+ /* Trim off the trailing "extra count" and checksum blob */
+ xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index fd91274e834d..3645cd241d3e 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -949,7 +949,6 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
+
+ maj_stat = gss_unwrap(ctx, 0, priv_len, buf);
+ pad = priv_len - buf->len;
+- buf->len -= pad;
+ /* The upper layers assume the buffer is aligned on 4-byte boundaries.
+ * In the krb5p case, at least, the data ends up offset, so we need to
+ * move it around. */
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
+index 066af6b2eb01..0bb3f0dca80d 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
+@@ -677,7 +677,6 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
+ struct svc_rdma_read_info *info,
+ __be32 *p)
+ {
+- unsigned int i;
+ int ret;
+
+ ret = -EINVAL;
+@@ -700,12 +699,6 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
+ info->ri_chunklen += rs_length;
+ }
+
+- /* Pages under I/O have been copied to head->rc_pages.
+- * Prevent their premature release by svc_xprt_release() .
+- */
+- for (i = 0; i < info->ri_readctxt->rc_page_count; i++)
+- rqstp->rq_pages[i] = NULL;
+-
+ return ret;
+ }
+
+@@ -800,6 +793,26 @@ out:
+ return ret;
+ }
+
++/* Pages under I/O have been copied to head->rc_pages. Ensure they
++ * are not released by svc_xprt_release() until the I/O is complete.
++ *
++ * This has to be done after all Read WRs are constructed to properly
++ * handle a page that is part of I/O on behalf of two different RDMA
++ * segments.
++ *
++ * Do this only if I/O has been posted. Otherwise, we do indeed want
++ * svc_xprt_release() to clean things up properly.
++ */
++static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
++ const unsigned int start,
++ const unsigned int num_pages)
++{
++ unsigned int i;
++
++ for (i = start; i < num_pages + start; i++)
++ rqstp->rq_pages[i] = NULL;
++}
++
+ /**
+ * svc_rdma_recv_read_chunk - Pull a Read chunk from the client
+ * @rdma: controlling RDMA transport
+@@ -853,6 +866,7 @@ int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
+ ret = svc_rdma_post_chunk_ctxt(&info->ri_cc);
+ if (ret < 0)
+ goto out_err;
++ svc_rdma_save_io_pages(rqstp, 0, head->rc_page_count);
+ return 0;
+
+ out_err:
+diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
+index 1adeb1c0473b..25fca390cdcf 100644
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -549,7 +549,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
+ {
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct iov_iter msg_iter;
+- char *kaddr = kmap(page);
++ char *kaddr;
+ struct kvec iov;
+ int rc;
+
+@@ -564,6 +564,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
+ goto out;
+ }
+
++ kaddr = kmap(page);
+ iov.iov_base = kaddr + offset;
+ iov.iov_len = size;
+ iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
+diff --git a/samples/bpf/fds_example.c b/samples/bpf/fds_example.c
+index 2d4b717726b6..34b3fca788e8 100644
+--- a/samples/bpf/fds_example.c
++++ b/samples/bpf/fds_example.c
+@@ -30,6 +30,8 @@
+ #define BPF_M_MAP 1
+ #define BPF_M_PROG 2
+
++char bpf_log_buf[BPF_LOG_BUF_SIZE];
++
+ static void usage(void)
+ {
+ printf("Usage: fds_example [...]\n");
+@@ -57,7 +59,6 @@ static int bpf_prog_create(const char *object)
+ BPF_EXIT_INSN(),
+ };
+ size_t insns_cnt = sizeof(insns) / sizeof(struct bpf_insn);
+- char bpf_log_buf[BPF_LOG_BUF_SIZE];
+ struct bpf_object *obj;
+ int prog_fd;
+
+diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
+index 7225107a9aaf..e59022b3f125 100644
+--- a/scripts/recordmcount.c
++++ b/scripts/recordmcount.c
+@@ -434,6 +434,11 @@ static int arm_is_fake_mcount(Elf32_Rel const *rp)
+ return 1;
+ }
+
++static int arm64_is_fake_mcount(Elf64_Rel const *rp)
++{
++ return ELF64_R_TYPE(w(rp->r_info)) != R_AARCH64_CALL26;
++}
++
+ /* 64-bit EM_MIPS has weird ELF64_Rela.r_info.
+ * http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf
+ * We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40]
+@@ -547,6 +552,7 @@ static int do_file(char const *const fname)
+ make_nop = make_nop_arm64;
+ rel_type_nop = R_AARCH64_NONE;
+ ideal_nop = ideal_nop4_arm64;
++ is_fake_mcount64 = arm64_is_fake_mcount;
+ break;
+ case EM_IA_64: reltype = R_IA64_IMM64; break;
+ case EM_MIPS: /* reltype: e_class */ break;
+diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
+index be469fce19e1..8173982e00ab 100644
+--- a/security/integrity/ima/ima.h
++++ b/security/integrity/ima/ima.h
+@@ -362,6 +362,7 @@ static inline void ima_free_modsig(struct modsig *modsig)
+ #ifdef CONFIG_IMA_LSM_RULES
+
+ #define security_filter_rule_init security_audit_rule_init
++#define security_filter_rule_free security_audit_rule_free
+ #define security_filter_rule_match security_audit_rule_match
+
+ #else
+@@ -372,6 +373,10 @@ static inline int security_filter_rule_init(u32 field, u32 op, char *rulestr,
+ return -EINVAL;
+ }
+
++static inline void security_filter_rule_free(void *lsmrule)
++{
++}
++
+ static inline int security_filter_rule_match(u32 secid, u32 field, u32 op,
+ void *lsmrule)
+ {
+diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
+index 558a7607bf93..e725d4187271 100644
+--- a/security/integrity/ima/ima_policy.c
++++ b/security/integrity/ima/ima_policy.c
+@@ -254,7 +254,7 @@ static void ima_lsm_free_rule(struct ima_rule_entry *entry)
+ int i;
+
+ for (i = 0; i < MAX_LSM_RULES; i++) {
+- kfree(entry->lsm[i].rule);
++ security_filter_rule_free(entry->lsm[i].rule);
+ kfree(entry->lsm[i].args_p);
+ }
+ kfree(entry);
+diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
+index 840a192e9337..9c4308077574 100644
+--- a/security/smack/smackfs.c
++++ b/security/smack/smackfs.c
+@@ -884,7 +884,7 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
+ }
+
+ ret = sscanf(rule, "%d", &maplevel);
+- if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL)
++ if (ret != 1 || maplevel < 0 || maplevel > SMACK_CIPSO_MAXLEVEL)
+ goto out;
+
+ rule += SMK_DIGITLEN;
+@@ -905,6 +905,10 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
+
+ for (i = 0; i < catlen; i++) {
+ rule += SMK_DIGITLEN;
++ if (rule > data + count) {
++ rc = -EOVERFLOW;
++ goto out;
++ }
+ ret = sscanf(rule, "%u", &cat);
+ if (ret != 1 || cat > SMACK_CIPSO_MAXCATNUM)
+ goto out;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index ea25b8d0350d..88629906f314 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4391,6 +4391,7 @@ static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec,
+ {
+ struct alc_spec *spec = codec->spec;
+
++ spec->micmute_led_polarity = 1;
+ alc_fixup_hp_gpio_led(codec, action, 0, 0x04);
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ spec->init_amp = ALC_INIT_DEFAULT;
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index 41b83ecaf008..914b75c23d1b 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -680,10 +680,11 @@ static int fsl_sai_dai_probe(struct snd_soc_dai *cpu_dai)
+ regmap_write(sai->regmap, FSL_SAI_RCSR(ofs), 0);
+
+ regmap_update_bits(sai->regmap, FSL_SAI_TCR1(ofs),
+- FSL_SAI_CR1_RFW_MASK,
++ FSL_SAI_CR1_RFW_MASK(sai->soc_data->fifo_depth),
+ sai->soc_data->fifo_depth - FSL_SAI_MAXBURST_TX);
+ regmap_update_bits(sai->regmap, FSL_SAI_RCR1(ofs),
+- FSL_SAI_CR1_RFW_MASK, FSL_SAI_MAXBURST_RX - 1);
++ FSL_SAI_CR1_RFW_MASK(sai->soc_data->fifo_depth),
++ FSL_SAI_MAXBURST_RX - 1);
+
+ snd_soc_dai_init_dma_data(cpu_dai, &sai->dma_params_tx,
+ &sai->dma_params_rx);
+diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
+index 76b15deea80c..6aba7d28f5f3 100644
+--- a/sound/soc/fsl/fsl_sai.h
++++ b/sound/soc/fsl/fsl_sai.h
+@@ -94,7 +94,7 @@
+ #define FSL_SAI_CSR_FRDE BIT(0)
+
+ /* SAI Transmit and Receive Configuration 1 Register */
+-#define FSL_SAI_CR1_RFW_MASK 0x1f
++#define FSL_SAI_CR1_RFW_MASK(x) ((x) - 1)
+
+ /* SAI Transmit and Receive Configuration 2 Register */
+ #define FSL_SAI_CR2_SYNC BIT(30)
+diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
+index adf416a49b48..60fb87495050 100644
+--- a/sound/soc/intel/boards/bxt_rt298.c
++++ b/sound/soc/intel/boards/bxt_rt298.c
+@@ -556,6 +556,7 @@ static int bxt_card_late_probe(struct snd_soc_card *card)
+ /* broxton audio machine driver for SPT + RT298S */
+ static struct snd_soc_card broxton_rt298 = {
+ .name = "broxton-rt298",
++ .owner = THIS_MODULE,
+ .dai_link = broxton_rt298_dais,
+ .num_links = ARRAY_SIZE(broxton_rt298_dais),
+ .controls = broxton_controls,
+@@ -571,6 +572,7 @@ static struct snd_soc_card broxton_rt298 = {
+
+ static struct snd_soc_card geminilake_rt298 = {
+ .name = "geminilake-rt298",
++ .owner = THIS_MODULE,
+ .dai_link = broxton_rt298_dais,
+ .num_links = ARRAY_SIZE(broxton_rt298_dais),
+ .controls = broxton_controls,
+diff --git a/sound/soc/meson/axg-card.c b/sound/soc/meson/axg-card.c
+index 1f698adde506..7126344017fa 100644
+--- a/sound/soc/meson/axg-card.c
++++ b/sound/soc/meson/axg-card.c
+@@ -266,7 +266,7 @@ static int axg_card_add_tdm_loopback(struct snd_soc_card *card,
+
+ lb = &card->dai_link[*index + 1];
+
+- lb->name = kasprintf(GFP_KERNEL, "%s-lb", pad->name);
++ lb->name = devm_kasprintf(card->dev, GFP_KERNEL, "%s-lb", pad->name);
+ if (!lb->name)
+ return -ENOMEM;
+
+diff --git a/sound/soc/meson/axg-tdm-formatter.c b/sound/soc/meson/axg-tdm-formatter.c
+index 358c8c0d861c..f7e8e9da68a0 100644
+--- a/sound/soc/meson/axg-tdm-formatter.c
++++ b/sound/soc/meson/axg-tdm-formatter.c
+@@ -70,7 +70,7 @@ EXPORT_SYMBOL_GPL(axg_tdm_formatter_set_channel_masks);
+ static int axg_tdm_formatter_enable(struct axg_tdm_formatter *formatter)
+ {
+ struct axg_tdm_stream *ts = formatter->stream;
+- bool invert = formatter->drv->quirks->invert_sclk;
++ bool invert;
+ int ret;
+
+ /* Do nothing if the formatter is already enabled */
+@@ -96,11 +96,12 @@ static int axg_tdm_formatter_enable(struct axg_tdm_formatter *formatter)
+ return ret;
+
+ /*
+- * If sclk is inverted, invert it back and provide the inversion
+- * required by the formatter
++ * If sclk is inverted, it means the bit should latched on the
++ * rising edge which is what our HW expects. If not, we need to
++ * invert it before the formatter.
+ */
+- invert ^= axg_tdm_sclk_invert(ts->iface->fmt);
+- ret = clk_set_phase(formatter->sclk, invert ? 180 : 0);
++ invert = axg_tdm_sclk_invert(ts->iface->fmt);
++ ret = clk_set_phase(formatter->sclk, invert ? 0 : 180);
+ if (ret)
+ return ret;
+
+diff --git a/sound/soc/meson/axg-tdm-formatter.h b/sound/soc/meson/axg-tdm-formatter.h
+index 9ef98e955cb2..a1f0dcc0ff13 100644
+--- a/sound/soc/meson/axg-tdm-formatter.h
++++ b/sound/soc/meson/axg-tdm-formatter.h
+@@ -16,7 +16,6 @@ struct snd_kcontrol;
+
+ struct axg_tdm_formatter_hw {
+ unsigned int skew_offset;
+- bool invert_sclk;
+ };
+
+ struct axg_tdm_formatter_ops {
+diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c
+index d51f3344be7c..e25336f73912 100644
+--- a/sound/soc/meson/axg-tdm-interface.c
++++ b/sound/soc/meson/axg-tdm-interface.c
+@@ -119,18 +119,25 @@ static int axg_tdm_iface_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+ {
+ struct axg_tdm_iface *iface = snd_soc_dai_get_drvdata(dai);
+
+- /* These modes are not supported */
+- if (fmt & (SND_SOC_DAIFMT_CBS_CFM | SND_SOC_DAIFMT_CBM_CFS)) {
++ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
++ case SND_SOC_DAIFMT_CBS_CFS:
++ if (!iface->mclk) {
++ dev_err(dai->dev, "cpu clock master: mclk missing\n");
++ return -ENODEV;
++ }
++ break;
++
++ case SND_SOC_DAIFMT_CBM_CFM:
++ break;
++
++ case SND_SOC_DAIFMT_CBS_CFM:
++ case SND_SOC_DAIFMT_CBM_CFS:
+ dev_err(dai->dev, "only CBS_CFS and CBM_CFM are supported\n");
++ /* Fall-through */
++ default:
+ return -EINVAL;
+ }
+
+- /* If the TDM interface is the clock master, it requires mclk */
+- if (!iface->mclk && (fmt & SND_SOC_DAIFMT_CBS_CFS)) {
+- dev_err(dai->dev, "cpu clock master: mclk missing\n");
+- return -ENODEV;
+- }
+-
+ iface->fmt = fmt;
+ return 0;
+ }
+@@ -319,7 +326,8 @@ static int axg_tdm_iface_hw_params(struct snd_pcm_substream *substream,
+ if (ret)
+ return ret;
+
+- if (iface->fmt & SND_SOC_DAIFMT_CBS_CFS) {
++ if ((iface->fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
++ SND_SOC_DAIFMT_CBS_CFS) {
+ ret = axg_tdm_iface_set_sclk(dai, params);
+ if (ret)
+ return ret;
+diff --git a/sound/soc/meson/axg-tdmin.c b/sound/soc/meson/axg-tdmin.c
+index 973d4c02ef8d..88ed95ae886b 100644
+--- a/sound/soc/meson/axg-tdmin.c
++++ b/sound/soc/meson/axg-tdmin.c
+@@ -228,15 +228,29 @@ static const struct axg_tdm_formatter_driver axg_tdmin_drv = {
+ .regmap_cfg = &axg_tdmin_regmap_cfg,
+ .ops = &axg_tdmin_ops,
+ .quirks = &(const struct axg_tdm_formatter_hw) {
+- .invert_sclk = false,
+ .skew_offset = 2,
+ },
+ };
+
++static const struct axg_tdm_formatter_driver g12a_tdmin_drv = {
++ .component_drv = &axg_tdmin_component_drv,
++ .regmap_cfg = &axg_tdmin_regmap_cfg,
++ .ops = &axg_tdmin_ops,
++ .quirks = &(const struct axg_tdm_formatter_hw) {
++ .skew_offset = 3,
++ },
++};
++
+ static const struct of_device_id axg_tdmin_of_match[] = {
+ {
+ .compatible = "amlogic,axg-tdmin",
+ .data = &axg_tdmin_drv,
++ }, {
++ .compatible = "amlogic,g12a-tdmin",
++ .data = &g12a_tdmin_drv,
++ }, {
++ .compatible = "amlogic,sm1-tdmin",
++ .data = &g12a_tdmin_drv,
+ }, {}
+ };
+ MODULE_DEVICE_TABLE(of, axg_tdmin_of_match);
+diff --git a/sound/soc/meson/axg-tdmout.c b/sound/soc/meson/axg-tdmout.c
+index 418ec314b37d..3ceabddae629 100644
+--- a/sound/soc/meson/axg-tdmout.c
++++ b/sound/soc/meson/axg-tdmout.c
+@@ -238,7 +238,6 @@ static const struct axg_tdm_formatter_driver axg_tdmout_drv = {
+ .regmap_cfg = &axg_tdmout_regmap_cfg,
+ .ops = &axg_tdmout_ops,
+ .quirks = &(const struct axg_tdm_formatter_hw) {
+- .invert_sclk = true,
+ .skew_offset = 1,
+ },
+ };
+@@ -248,7 +247,6 @@ static const struct axg_tdm_formatter_driver g12a_tdmout_drv = {
+ .regmap_cfg = &axg_tdmout_regmap_cfg,
+ .ops = &axg_tdmout_ops,
+ .quirks = &(const struct axg_tdm_formatter_hw) {
+- .invert_sclk = true,
+ .skew_offset = 2,
+ },
+ };
+@@ -309,7 +307,6 @@ static const struct axg_tdm_formatter_driver sm1_tdmout_drv = {
+ .regmap_cfg = &axg_tdmout_regmap_cfg,
+ .ops = &axg_tdmout_ops,
+ .quirks = &(const struct axg_tdm_formatter_hw) {
+- .invert_sclk = true,
+ .skew_offset = 2,
+ },
+ };
+diff --git a/sound/soc/sof/nocodec.c b/sound/soc/sof/nocodec.c
+index ea0fe9a09f3f..71410116add1 100644
+--- a/sound/soc/sof/nocodec.c
++++ b/sound/soc/sof/nocodec.c
+@@ -14,6 +14,7 @@
+
+ static struct snd_soc_card sof_nocodec_card = {
+ .name = "nocodec", /* the sof- prefix is added by the core */
++ .owner = THIS_MODULE
+ };
+
+ static int sof_nocodec_bes_setup(struct device *dev,
+diff --git a/sound/usb/card.h b/sound/usb/card.h
+index f39f23e3525d..d8ec5caf464d 100644
+--- a/sound/usb/card.h
++++ b/sound/usb/card.h
+@@ -133,6 +133,7 @@ struct snd_usb_substream {
+ unsigned int tx_length_quirk:1; /* add length specifier to transfers */
+ unsigned int fmt_type; /* USB audio format type (1-3) */
+ unsigned int pkt_offset_adj; /* Bytes to drop from beginning of packets (for non-compliant devices) */
++ unsigned int stream_offset_adj; /* Bytes to drop from beginning of stream (for non-compliant devices) */
+
+ unsigned int running: 1; /* running status */
+
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index d39bf5b648d1..49f0dc0e3e4d 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -184,6 +184,7 @@ static const struct rc_config {
+ { USB_ID(0x041e, 0x3042), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 */
+ { USB_ID(0x041e, 0x30df), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */
+ { USB_ID(0x041e, 0x3237), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */
++ { USB_ID(0x041e, 0x3263), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */
+ { USB_ID(0x041e, 0x3048), 2, 2, 6, 6, 2, 0x6e91 }, /* Toshiba SB0500 */
+ };
+
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index d11d00efc574..7b41f9748978 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -1417,6 +1417,12 @@ static void retire_capture_urb(struct snd_usb_substream *subs,
+ // continue;
+ }
+ bytes = urb->iso_frame_desc[i].actual_length;
++ if (subs->stream_offset_adj > 0) {
++ unsigned int adj = min(subs->stream_offset_adj, bytes);
++ cp += adj;
++ bytes -= adj;
++ subs->stream_offset_adj -= adj;
++ }
+ frames = bytes / stride;
+ if (!subs->txfr_quirk)
+ bytes = frames * stride;
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 562179492a33..1573229d8cf4 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -3570,6 +3570,62 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+ }
+ }
+ },
++{
++ /*
++ * PIONEER DJ DDJ-RB
++ * PCM is 4 channels out, 2 dummy channels in @ 44.1 fixed
++ * The feedback for the output is the dummy input.
++ */
++ USB_DEVICE_VENDOR_SPEC(0x2b73, 0x000e),
++ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ .ifnum = QUIRK_ANY_INTERFACE,
++ .type = QUIRK_COMPOSITE,
++ .data = (const struct snd_usb_audio_quirk[]) {
++ {
++ .ifnum = 0,
++ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
++ .data = &(const struct audioformat) {
++ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
++ .channels = 4,
++ .iface = 0,
++ .altsetting = 1,
++ .altset_idx = 1,
++ .endpoint = 0x01,
++ .ep_attr = USB_ENDPOINT_XFER_ISOC|
++ USB_ENDPOINT_SYNC_ASYNC,
++ .rates = SNDRV_PCM_RATE_44100,
++ .rate_min = 44100,
++ .rate_max = 44100,
++ .nr_rates = 1,
++ .rate_table = (unsigned int[]) { 44100 }
++ }
++ },
++ {
++ .ifnum = 0,
++ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
++ .data = &(const struct audioformat) {
++ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
++ .channels = 2,
++ .iface = 0,
++ .altsetting = 1,
++ .altset_idx = 1,
++ .endpoint = 0x82,
++ .ep_attr = USB_ENDPOINT_XFER_ISOC|
++ USB_ENDPOINT_SYNC_ASYNC|
++ USB_ENDPOINT_USAGE_IMPLICIT_FB,
++ .rates = SNDRV_PCM_RATE_44100,
++ .rate_min = 44100,
++ .rate_max = 44100,
++ .nr_rates = 1,
++ .rate_table = (unsigned int[]) { 44100 }
++ }
++ },
++ {
++ .ifnum = -1
++ }
++ }
++ }
++},
+
+ #define ALC1220_VB_DESKTOP(vend, prod) { \
+ USB_DEVICE(vend, prod), \
+@@ -3623,7 +3679,13 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */
+ * with.
+ */
+ {
+- USB_DEVICE(0x534d, 0x2109),
++ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
++ USB_DEVICE_ID_MATCH_INT_CLASS |
++ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
++ .idVendor = 0x534d,
++ .idProduct = 0x2109,
++ .bInterfaceClass = USB_CLASS_AUDIO,
++ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "MacroSilicon",
+ .product_name = "MS2109",
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index a8bb953cc468..a756f50d9f07 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1432,6 +1432,9 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
+ case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */
+ set_format_emu_quirk(subs, fmt);
+ break;
++ case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */
++ subs->stream_offset_adj = 2;
++ break;
+ }
+ }
+
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index 11785f9652ad..d01edd5da6cf 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -94,6 +94,7 @@ static void snd_usb_init_substream(struct snd_usb_stream *as,
+ subs->tx_length_quirk = as->chip->tx_length_quirk;
+ subs->speed = snd_usb_get_speed(subs->dev);
+ subs->pkt_offset_adj = 0;
++ subs->stream_offset_adj = 0;
+
+ snd_usb_set_pcm_ops(as->pcm, stream);
+
+diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
+index 9a9376d1d3df..66765f970bc5 100644
+--- a/tools/bpf/bpftool/btf.c
++++ b/tools/bpf/bpftool/btf.c
+@@ -510,7 +510,7 @@ static int do_dump(int argc, char **argv)
+ goto done;
+ }
+ if (!btf) {
+- err = ENOENT;
++ err = -ENOENT;
+ p_err("can't find btf with ID (%u)", btf_id);
+ goto done;
+ }
+diff --git a/tools/build/Build.include b/tools/build/Build.include
+index 9ec01f4454f9..585486e40995 100644
+--- a/tools/build/Build.include
++++ b/tools/build/Build.include
+@@ -74,7 +74,8 @@ dep-cmd = $(if $(wildcard $(fixdep)),
+ # dependencies in the cmd file
+ if_changed_dep = $(if $(strip $(any-prereq) $(arg-check)), \
+ @set -e; \
+- $(echo-cmd) $(cmd_$(1)) && $(dep-cmd))
++ $(echo-cmd) $(cmd_$(1)); \
++ $(dep-cmd))
+
+ # if_changed - execute command if any prerequisite is newer than
+ # target, or command line has changed
+diff --git a/tools/testing/selftests/powerpc/benchmarks/context_switch.c b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
+index a2e8c9da7fa5..d50cc05df495 100644
+--- a/tools/testing/selftests/powerpc/benchmarks/context_switch.c
++++ b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
+@@ -19,6 +19,7 @@
+ #include <limits.h>
+ #include <sys/time.h>
+ #include <sys/syscall.h>
++#include <sys/sysinfo.h>
+ #include <sys/types.h>
+ #include <sys/shm.h>
+ #include <linux/futex.h>
+@@ -104,8 +105,9 @@ static void start_thread_on(void *(*fn)(void *), void *arg, unsigned long cpu)
+
+ static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu)
+ {
+- int pid;
+- cpu_set_t cpuset;
++ int pid, ncpus;
++ cpu_set_t *cpuset;
++ size_t size;
+
+ pid = fork();
+ if (pid == -1) {
+@@ -116,14 +118,23 @@ static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu)
+ if (pid)
+ return;
+
+- CPU_ZERO(&cpuset);
+- CPU_SET(cpu, &cpuset);
++ ncpus = get_nprocs();
++ size = CPU_ALLOC_SIZE(ncpus);
++ cpuset = CPU_ALLOC(ncpus);
++ if (!cpuset) {
++ perror("malloc");
++ exit(1);
++ }
++ CPU_ZERO_S(size, cpuset);
++ CPU_SET_S(cpu, size, cpuset);
+
+- if (sched_setaffinity(0, sizeof(cpuset), &cpuset)) {
++ if (sched_setaffinity(0, size, cpuset)) {
+ perror("sched_setaffinity");
++ CPU_FREE(cpuset);
+ exit(1);
+ }
+
++ CPU_FREE(cpuset);
+ fn(arg);
+
+ exit(0);
+diff --git a/tools/testing/selftests/powerpc/eeh/eeh-functions.sh b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh
+index f52ed92b53e7..00dc32c0ed75 100755
+--- a/tools/testing/selftests/powerpc/eeh/eeh-functions.sh
++++ b/tools/testing/selftests/powerpc/eeh/eeh-functions.sh
+@@ -5,12 +5,17 @@ pe_ok() {
+ local dev="$1"
+ local path="/sys/bus/pci/devices/$dev/eeh_pe_state"
+
+- if ! [ -e "$path" ] ; then
++ # if a driver doesn't support the error handling callbacks then the
++ # device is recovered by removing and re-probing it. This causes the
++ # sysfs directory to disappear so read the PE state once and squash
++ # any potential error messages
++ local eeh_state="$(cat $path 2>/dev/null)"
++ if [ -z "$eeh_state" ]; then
+ return 1;
+ fi
+
+- local fw_state="$(cut -d' ' -f1 < $path)"
+- local sw_state="$(cut -d' ' -f2 < $path)"
++ local fw_state="$(echo $eeh_state | cut -d' ' -f1)"
++ local sw_state="$(echo $eeh_state | cut -d' ' -f2)"
+
+ # If EEH_PE_ISOLATED or EEH_PE_RECOVERING are set then the PE is in an
+ # error state or being recovered. Either way, not ok.
+diff --git a/tools/testing/selftests/powerpc/utils.c b/tools/testing/selftests/powerpc/utils.c
+index c02d24835db4..176102eca994 100644
+--- a/tools/testing/selftests/powerpc/utils.c
++++ b/tools/testing/selftests/powerpc/utils.c
+@@ -16,6 +16,7 @@
+ #include <string.h>
+ #include <sys/ioctl.h>
+ #include <sys/stat.h>
++#include <sys/sysinfo.h>
+ #include <sys/types.h>
+ #include <sys/utsname.h>
+ #include <unistd.h>
+@@ -88,28 +89,40 @@ void *get_auxv_entry(int type)
+
+ int pick_online_cpu(void)
+ {
+- cpu_set_t mask;
+- int cpu;
++ int ncpus, cpu = -1;
++ cpu_set_t *mask;
++ size_t size;
++
++ ncpus = get_nprocs_conf();
++ size = CPU_ALLOC_SIZE(ncpus);
++ mask = CPU_ALLOC(ncpus);
++ if (!mask) {
++ perror("malloc");
++ return -1;
++ }
+
+- CPU_ZERO(&mask);
++ CPU_ZERO_S(size, mask);
+
+- if (sched_getaffinity(0, sizeof(mask), &mask)) {
++ if (sched_getaffinity(0, size, mask)) {
+ perror("sched_getaffinity");
+- return -1;
++ goto done;
+ }
+
+ /* We prefer a primary thread, but skip 0 */
+- for (cpu = 8; cpu < CPU_SETSIZE; cpu += 8)
+- if (CPU_ISSET(cpu, &mask))
+- return cpu;
++ for (cpu = 8; cpu < ncpus; cpu += 8)
++ if (CPU_ISSET_S(cpu, size, mask))
++ goto done;
+
+ /* Search for anything, but in reverse */
+- for (cpu = CPU_SETSIZE - 1; cpu >= 0; cpu--)
+- if (CPU_ISSET(cpu, &mask))
+- return cpu;
++ for (cpu = ncpus - 1; cpu >= 0; cpu--)
++ if (CPU_ISSET_S(cpu, size, mask))
++ goto done;
+
+ printf("No cpus in affinity mask?!\n");
+- return -1;
++
++done:
++ CPU_FREE(mask);
++ return cpu;
+ }
+
+ bool is_ppc64le(void)
+diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
+index 96bbda4f10fc..19c7351eeb74 100644
+--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
+@@ -177,7 +177,7 @@ struct seccomp_metadata {
+ #define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif)
+ #define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \
+ struct seccomp_notif_resp)
+-#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOR(2, __u64)
++#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOW(2, __u64)
+
+ struct seccomp_notif {
+ __u64 id;
next reply other threads:[~2020-08-19 9:28 UTC|newest]
Thread overview: 347+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-08-19 9:28 Alice Ferrazzi [this message]
-- strict thread matches above, loose matches on Subject: below --
2025-10-02 13:27 [gentoo-commits] proj/linux-patches:5.4 commit in: / Arisu Tachibana
2025-09-10 5:33 Arisu Tachibana
2025-09-04 14:32 Arisu Tachibana
2025-09-04 14:32 Arisu Tachibana
2025-08-21 7:00 Arisu Tachibana
2025-08-21 6:59 Arisu Tachibana
2025-08-21 6:58 Arisu Tachibana
2025-08-21 6:58 Arisu Tachibana
2025-08-21 6:57 Arisu Tachibana
2025-08-21 6:56 Arisu Tachibana
2025-08-21 6:56 Arisu Tachibana
2025-08-21 6:55 Arisu Tachibana
2025-08-21 6:54 Arisu Tachibana
2025-08-21 5:23 Arisu Tachibana
2025-08-21 5:23 Arisu Tachibana
2025-08-21 5:23 Arisu Tachibana
2025-08-21 5:21 Arisu Tachibana
2025-08-21 5:20 Arisu Tachibana
2025-08-21 5:19 Arisu Tachibana
2025-08-21 5:19 Arisu Tachibana
2025-08-21 5:18 Arisu Tachibana
2025-08-21 5:18 Arisu Tachibana
2025-08-21 5:17 Arisu Tachibana
2025-08-21 5:16 Arisu Tachibana
2025-08-21 1:17 Arisu Tachibana
2025-08-21 1:16 Arisu Tachibana
2025-08-21 1:13 Arisu Tachibana
2025-08-21 1:12 Arisu Tachibana
2025-08-16 3:12 Arisu Tachibana
2025-08-01 10:32 Arisu Tachibana
2025-07-24 9:19 Arisu Tachibana
2025-07-18 12:07 Arisu Tachibana
2025-07-14 16:22 Arisu Tachibana
2025-07-11 2:32 Arisu Tachibana
2025-07-11 2:29 Arisu Tachibana
2025-07-06 18:27 Arisu Tachibana
2025-07-06 18:27 Arisu Tachibana
2025-07-06 18:27 Arisu Tachibana
2025-07-06 18:27 Arisu Tachibana
2025-07-06 13:36 Arisu Tachibana
2025-07-06 13:36 Arisu Tachibana
2025-07-06 13:36 Arisu Tachibana
2024-04-18 3:06 Alice Ferrazzi
2023-10-05 14:24 Mike Pagano
2023-09-23 10:18 Mike Pagano
2023-09-02 9:58 Mike Pagano
2023-08-30 14:56 Mike Pagano
2023-08-16 17:00 Mike Pagano
2023-08-11 11:57 Mike Pagano
2023-08-08 18:42 Mike Pagano
2023-07-27 11:51 Mike Pagano
2023-07-24 20:29 Mike Pagano
2023-06-28 10:28 Mike Pagano
2023-06-21 14:55 Alice Ferrazzi
2023-06-14 10:20 Mike Pagano
2023-06-09 11:32 Mike Pagano
2023-06-05 11:50 Mike Pagano
2023-05-30 12:56 Mike Pagano
2023-05-17 11:21 Mike Pagano
2023-05-17 11:00 Mike Pagano
2023-05-10 17:58 Mike Pagano
2023-04-26 9:51 Alice Ferrazzi
2023-04-20 11:17 Alice Ferrazzi
2023-04-05 10:01 Alice Ferrazzi
2023-03-30 13:41 Alice Ferrazzi
2023-03-22 14:16 Alice Ferrazzi
2023-03-17 10:46 Mike Pagano
2023-03-13 11:34 Alice Ferrazzi
2023-03-11 16:20 Mike Pagano
2023-03-03 12:31 Mike Pagano
2023-02-25 11:42 Mike Pagano
2023-02-24 3:08 Alice Ferrazzi
2023-02-22 14:41 Alice Ferrazzi
2023-02-06 12:48 Mike Pagano
2023-02-02 19:15 Mike Pagano
2023-01-24 7:25 Alice Ferrazzi
2023-01-18 11:10 Mike Pagano
2022-12-19 12:27 Alice Ferrazzi
2022-12-14 12:14 Mike Pagano
2022-12-08 12:13 Alice Ferrazzi
2022-11-25 17:05 Mike Pagano
2022-11-10 17:59 Mike Pagano
2022-11-03 15:13 Mike Pagano
2022-11-01 19:47 Mike Pagano
2022-10-29 9:52 Mike Pagano
2022-10-26 11:44 Mike Pagano
2022-10-17 16:48 Mike Pagano
2022-10-15 10:06 Mike Pagano
2022-10-07 11:12 Mike Pagano
2022-10-05 11:58 Mike Pagano
2022-09-28 9:26 Mike Pagano
2022-09-20 12:02 Mike Pagano
2022-09-15 10:31 Mike Pagano
2022-09-05 12:04 Mike Pagano
2022-08-25 10:34 Mike Pagano
2022-08-11 12:35 Mike Pagano
2022-08-03 14:51 Alice Ferrazzi
2022-07-29 15:29 Mike Pagano
2022-07-21 20:09 Mike Pagano
2022-07-15 10:04 Mike Pagano
2022-07-12 16:01 Mike Pagano
2022-07-07 16:18 Mike Pagano
2022-07-02 16:08 Mike Pagano
2022-06-29 11:09 Mike Pagano
2022-06-27 19:03 Mike Pagano
2022-06-25 19:46 Mike Pagano
2022-06-22 13:50 Mike Pagano
2022-06-22 13:25 Mike Pagano
2022-06-22 12:47 Mike Pagano
2022-06-16 11:43 Mike Pagano
2022-06-14 17:12 Mike Pagano
2022-06-06 11:04 Mike Pagano
2022-05-25 11:55 Mike Pagano
2022-05-18 9:49 Mike Pagano
2022-05-15 22:11 Mike Pagano
2022-05-12 11:30 Mike Pagano
2022-05-09 10:55 Mike Pagano
2022-04-27 12:21 Mike Pagano
2022-04-20 12:08 Mike Pagano
2022-04-15 13:10 Mike Pagano
2022-04-12 19:21 Mike Pagano
2022-03-28 10:58 Mike Pagano
2022-03-23 11:57 Mike Pagano
2022-03-19 13:21 Mike Pagano
2022-03-16 13:31 Mike Pagano
2022-03-11 10:55 Mike Pagano
2022-03-08 18:31 Mike Pagano
2022-03-02 13:07 Mike Pagano
2022-02-23 12:38 Mike Pagano
2022-02-16 12:46 Mike Pagano
2022-02-11 12:36 Mike Pagano
2022-02-08 17:55 Mike Pagano
2022-02-05 12:14 Mike Pagano
2022-02-01 17:24 Mike Pagano
2022-01-31 13:01 Mike Pagano
2022-01-29 17:44 Mike Pagano
2022-01-27 11:38 Mike Pagano
2022-01-20 10:00 Mike Pagano
2022-01-16 10:22 Mike Pagano
2022-01-11 14:34 Mike Pagano
2022-01-05 12:54 Mike Pagano
2021-12-29 13:07 Mike Pagano
2021-12-22 14:06 Mike Pagano
2021-12-17 11:55 Mike Pagano
2021-12-16 16:51 Mike Pagano
2021-12-14 14:19 Mike Pagano
2021-12-08 12:54 Mike Pagano
2021-12-01 12:50 Mike Pagano
2021-11-26 11:58 Mike Pagano
2021-11-21 20:44 Mike Pagano
2021-11-17 12:00 Mike Pagano
2021-11-12 14:14 Mike Pagano
2021-11-06 13:26 Mike Pagano
2021-11-04 11:23 Mike Pagano
2021-11-02 19:31 Mike Pagano
2021-10-27 15:51 Mike Pagano
2021-10-27 11:58 Mike Pagano
2021-10-20 13:24 Mike Pagano
2021-10-17 13:12 Mike Pagano
2021-10-13 14:55 Alice Ferrazzi
2021-10-09 21:32 Mike Pagano
2021-10-06 14:06 Mike Pagano
2021-09-30 10:49 Mike Pagano
2021-09-26 14:13 Mike Pagano
2021-09-22 11:39 Mike Pagano
2021-09-20 22:03 Mike Pagano
2021-09-16 11:19 Mike Pagano
2021-09-15 12:00 Mike Pagano
2021-09-12 14:38 Mike Pagano
2021-09-03 11:21 Mike Pagano
2021-09-03 9:39 Alice Ferrazzi
2021-08-26 14:36 Mike Pagano
2021-08-18 12:46 Mike Pagano
2021-08-15 20:06 Mike Pagano
2021-08-12 11:52 Mike Pagano
2021-08-08 13:38 Mike Pagano
2021-08-04 11:53 Mike Pagano
2021-08-03 12:23 Mike Pagano
2021-07-31 10:32 Alice Ferrazzi
2021-07-28 12:36 Mike Pagano
2021-07-25 17:27 Mike Pagano
2021-07-20 15:39 Alice Ferrazzi
2021-07-19 11:18 Mike Pagano
2021-07-14 16:22 Mike Pagano
2021-07-13 12:37 Mike Pagano
2021-07-11 14:44 Mike Pagano
2021-07-07 13:13 Mike Pagano
2021-06-30 14:24 Mike Pagano
2021-06-23 15:11 Mike Pagano
2021-06-18 11:38 Mike Pagano
2021-06-16 12:23 Mike Pagano
2021-06-10 11:59 Mike Pagano
2021-06-07 11:23 Mike Pagano
2021-06-03 10:28 Alice Ferrazzi
2021-05-28 12:03 Alice Ferrazzi
2021-05-26 12:06 Mike Pagano
2021-05-22 10:04 Mike Pagano
2021-05-19 12:23 Mike Pagano
2021-05-14 14:10 Alice Ferrazzi
2021-05-11 14:20 Mike Pagano
2021-05-07 11:44 Alice Ferrazzi
2021-05-07 11:37 Mike Pagano
2021-05-02 16:02 Mike Pagano
2021-05-02 16:00 Mike Pagano
2021-04-30 19:01 Mike Pagano
2021-04-28 11:52 Alice Ferrazzi
2021-04-21 11:42 Mike Pagano
2021-04-16 11:14 Alice Ferrazzi
2021-04-14 11:20 Alice Ferrazzi
2021-04-10 13:25 Mike Pagano
2021-04-07 13:27 Mike Pagano
2021-03-30 13:12 Alice Ferrazzi
2021-03-24 12:09 Mike Pagano
2021-03-22 15:55 Mike Pagano
2021-03-20 14:32 Mike Pagano
2021-03-17 18:43 Mike Pagano
2021-03-16 16:04 Mike Pagano
2021-03-11 14:08 Mike Pagano
2021-03-09 12:18 Mike Pagano
2021-03-07 15:16 Mike Pagano
2021-03-04 14:51 Mike Pagano
2021-03-04 12:06 Alice Ferrazzi
2021-03-01 23:49 Mike Pagano
2021-03-01 23:44 Mike Pagano
2021-02-27 14:16 Mike Pagano
2021-02-26 10:01 Alice Ferrazzi
2021-02-23 17:01 Mike Pagano
2021-02-23 14:28 Alice Ferrazzi
2021-02-17 11:39 Alice Ferrazzi
2021-02-13 14:46 Alice Ferrazzi
2021-02-10 9:53 Alice Ferrazzi
2021-02-07 15:24 Alice Ferrazzi
2021-02-03 23:48 Mike Pagano
2021-01-30 13:37 Alice Ferrazzi
2021-01-27 11:13 Mike Pagano
2021-01-23 17:50 Mike Pagano
2021-01-23 16:37 Mike Pagano
2021-01-19 20:32 Mike Pagano
2021-01-17 16:19 Mike Pagano
2021-01-12 20:05 Mike Pagano
2021-01-09 17:51 Mike Pagano
2021-01-08 16:08 Mike Pagano
2021-01-06 14:14 Mike Pagano
2020-12-30 12:53 Mike Pagano
2020-12-21 13:27 Mike Pagano
2020-12-16 23:14 Mike Pagano
2020-12-11 12:56 Mike Pagano
2020-12-08 12:07 Mike Pagano
2020-12-02 12:50 Mike Pagano
2020-11-26 14:27 Mike Pagano
2020-11-24 14:44 Mike Pagano
2020-11-22 19:31 Mike Pagano
2020-11-18 20:19 Mike Pagano
2020-11-18 20:10 Mike Pagano
2020-11-18 20:03 Mike Pagano
2020-11-13 12:16 Mike Pagano
2020-11-11 15:48 Mike Pagano
2020-11-10 13:57 Mike Pagano
2020-11-05 12:36 Mike Pagano
2020-11-01 20:31 Mike Pagano
2020-10-29 11:19 Mike Pagano
2020-10-17 10:18 Mike Pagano
2020-10-14 20:37 Mike Pagano
2020-10-07 12:48 Mike Pagano
2020-10-01 12:49 Mike Pagano
2020-09-26 21:59 Mike Pagano
2020-09-24 15:38 Mike Pagano
2020-09-24 15:38 Mike Pagano
2020-09-24 15:38 Mike Pagano
2020-09-23 12:10 Mike Pagano
2020-09-17 14:56 Mike Pagano
2020-09-12 18:08 Mike Pagano
2020-09-09 18:00 Mike Pagano
2020-09-08 22:26 Mike Pagano
2020-09-05 10:47 Mike Pagano
2020-09-03 11:38 Mike Pagano
2020-08-26 11:16 Mike Pagano
2020-08-21 13:25 Alice Ferrazzi
2020-08-12 23:30 Alice Ferrazzi
2020-08-07 12:16 Alice Ferrazzi
2020-08-05 14:45 Thomas Deutschmann
2020-08-01 19:45 Mike Pagano
2020-07-31 18:28 Mike Pagano
2020-07-31 18:04 Mike Pagano
2020-07-30 14:58 Mike Pagano
2020-07-29 12:40 Mike Pagano
2020-07-22 12:53 Mike Pagano
2020-07-16 11:19 Mike Pagano
2020-07-09 12:13 Mike Pagano
2020-07-01 12:23 Mike Pagano
2020-06-29 17:40 Mike Pagano
2020-06-24 16:49 Mike Pagano
2020-06-22 14:48 Mike Pagano
2020-06-17 16:40 Mike Pagano
2020-06-10 19:42 Mike Pagano
2020-06-07 21:53 Mike Pagano
2020-06-03 11:43 Mike Pagano
2020-06-02 11:37 Mike Pagano
2020-05-27 16:31 Mike Pagano
2020-05-20 11:37 Mike Pagano
2020-05-20 11:33 Mike Pagano
2020-05-14 11:32 Mike Pagano
2020-05-13 12:18 Mike Pagano
2020-05-11 22:49 Mike Pagano
2020-05-09 22:12 Mike Pagano
2020-05-06 11:47 Mike Pagano
2020-05-02 19:24 Mike Pagano
2020-05-02 13:25 Mike Pagano
2020-04-29 17:56 Mike Pagano
2020-04-23 11:55 Mike Pagano
2020-04-21 11:19 Mike Pagano
2020-04-17 11:46 Mike Pagano
2020-04-15 15:52 Mike Pagano
2020-04-13 11:18 Mike Pagano
2020-04-08 12:42 Mike Pagano
2020-04-02 15:26 Mike Pagano
2020-04-01 12:03 Mike Pagano
2020-03-25 15:01 Mike Pagano
2020-03-21 18:58 Mike Pagano
2020-03-18 14:23 Mike Pagano
2020-03-12 14:04 Mike Pagano
2020-03-05 16:26 Mike Pagano
2020-02-28 16:41 Mike Pagano
2020-02-24 11:09 Mike Pagano
2020-02-19 23:48 Mike Pagano
2020-02-14 23:55 Mike Pagano
2020-02-11 15:35 Mike Pagano
2020-02-06 11:07 Mike Pagano
2020-02-01 10:53 Mike Pagano
2020-02-01 10:31 Mike Pagano
2020-01-29 16:18 Mike Pagano
2020-01-26 12:27 Mike Pagano
2020-01-23 11:09 Mike Pagano
2020-01-17 19:57 Mike Pagano
2020-01-14 22:33 Mike Pagano
2020-01-12 15:01 Mike Pagano
2020-01-09 11:17 Mike Pagano
2020-01-04 19:59 Mike Pagano
2019-12-31 17:48 Mike Pagano
2019-12-30 23:03 Mike Pagano
2019-12-21 15:01 Mike Pagano
2019-12-18 19:30 Mike Pagano
2019-12-17 21:57 Mike Pagano
2019-12-13 12:39 Mike Pagano
2019-12-05 1:04 Thomas Deutschmann
2019-11-29 21:21 Thomas Deutschmann
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1597829283.9059eacca2ff0577649ea045d1f50f8c63c4b7cb.alicef@gentoo \
--to=alicef@gentoo.org \
--cc=gentoo-commits@lists.gentoo.org \
--cc=gentoo-dev@lists.gentoo.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox