From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Tue, 12 Feb 2019 20:53:52 +0000 (UTC) [thread overview]
Message-ID: <1550004809.1a31279af4f26ab88a68bf8bb958c47c92600c64.mpagano@gentoo> (raw)
commit: 1a31279af4f26ab88a68bf8bb958c47c92600c64
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Feb 12 20:53:29 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Feb 12 20:53:29 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1a31279a
proj/linux-patches: Linux patch 4.19.21
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +
1020_linux-4.19.21.patch | 9498 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 9502 insertions(+)
diff --git a/0000_README b/0000_README
index b213e93..ff7bed3 100644
--- a/0000_README
+++ b/0000_README
@@ -123,6 +123,10 @@ Patch: 1019_linux-4.19.20.patch
From: http://www.kernel.org
Desc: Linux 4.19.20
+Patch: 1020_linux-4.19.21.patch
+From: http://www.kernel.org
+Desc: Linux 4.19.21
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1020_linux-4.19.21.patch b/1020_linux-4.19.21.patch
new file mode 100644
index 0000000..5b26e96
--- /dev/null
+++ b/1020_linux-4.19.21.patch
@@ -0,0 +1,9498 @@
+diff --git a/Makefile b/Makefile
+index f1859811dca1..ba5f14d38d8e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 20
++SUBLEVEL = 21
+ EXTRAVERSION =
+ NAME = "People's Front"
+
+diff --git a/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts b/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts
+index df1227613d48..c2ece0b91885 100644
+--- a/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts
+@@ -13,7 +13,7 @@
+ bootargs = "console=ttyS4,115200 earlyprintk";
+ };
+
+- memory {
++ memory@80000000 {
+ reg = <0x80000000 0x40000000>;
+ };
+
+diff --git a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
+index 7a291de02543..22dade6393d0 100644
+--- a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
+@@ -13,7 +13,7 @@
+ bootargs = "earlyprintk";
+ };
+
+- memory {
++ memory@80000000 {
+ reg = <0x80000000 0x20000000>;
+ };
+
+diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts b/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts
+index d598b6391362..024e52a6cd0f 100644
+--- a/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts
+@@ -14,7 +14,7 @@
+ bootargs = "console=ttyS4,115200 earlyprintk";
+ };
+
+- memory {
++ memory@80000000 {
+ reg = <0x80000000 0x40000000>;
+ };
+
+@@ -322,4 +322,3 @@
+ &adc {
+ status = "okay";
+ };
+-
+diff --git a/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts b/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts
+index 43ed13963d35..33d704541de6 100644
+--- a/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts
+@@ -17,7 +17,7 @@
+ bootargs = "console=ttyS4,115200 earlyprintk";
+ };
+
+- memory {
++ memory@80000000 {
+ reg = <0x80000000 0x20000000>;
+ };
+
+diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
+index 6f258b50eb44..502a361d1fe9 100644
+--- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts
++++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
+@@ -274,20 +274,16 @@
+ read-only;
+ };
+ /*
+- * Between the boot loader and the rootfs is the kernel
+- * in a custom Storlink format flashed from the boot
+- * menu. The rootfs is in squashfs format.
++ * This firmware image contains the kernel catenated
++ * with the squashfs root filesystem. For some reason
++ * this is called "upgrade" on the vendor system.
+ */
+- partition@1800c0 {
+- label = "rootfs";
+- reg = <0x001800c0 0x01dbff40>;
+- read-only;
+- };
+- partition@1f40000 {
++ partition@40000 {
+ label = "upgrade";
+- reg = <0x01f40000 0x00040000>;
++ reg = <0x00040000 0x01f40000>;
+ read-only;
+ };
++ /* RGDB, Residental Gateway Database? */
+ partition@1f80000 {
+ label = "rgdb";
+ reg = <0x01f80000 0x00040000>;
+diff --git a/arch/arm/boot/dts/imx51-zii-rdu1.dts b/arch/arm/boot/dts/imx51-zii-rdu1.dts
+index 469cce2c0357..6e80254c4562 100644
+--- a/arch/arm/boot/dts/imx51-zii-rdu1.dts
++++ b/arch/arm/boot/dts/imx51-zii-rdu1.dts
+@@ -477,6 +477,15 @@
+ };
+
+ &gpio1 {
++ gpio-line-names = "", "", "", "",
++ "", "", "", "",
++ "", "hp-amp-shutdown-b", "", "",
++ "", "", "", "",
++ "", "", "", "",
++ "", "", "", "",
++ "", "", "", "",
++ "", "", "", "";
++
+ unused-sd3-wp-gpio {
+ /*
+ * See pinctrl_esdhc1 below for more details on this
+@@ -501,9 +510,6 @@
+ hpa1: amp@60 {
+ compatible = "ti,tpa6130a2";
+ reg = <0x60>;
+- pinctrl-names = "default";
+- pinctrl-0 = <&pinctrl_ampgpio>;
+- power-gpio = <&gpio1 9 GPIO_ACTIVE_HIGH>;
+ Vdd-supply = <®_3p3v>;
+ };
+
+@@ -677,7 +683,10 @@
+ };
+
+ &iomuxc {
+- pinctrl_ampgpio: ampgpiogrp {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
++
++ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX51_PAD_GPIO1_9__GPIO1_9 0x5e
+ >;
+diff --git a/arch/arm/boot/dts/mmp2.dtsi b/arch/arm/boot/dts/mmp2.dtsi
+index 766bbb8495b6..47e5b63339d1 100644
+--- a/arch/arm/boot/dts/mmp2.dtsi
++++ b/arch/arm/boot/dts/mmp2.dtsi
+@@ -220,12 +220,15 @@
+ status = "disabled";
+ };
+
+- twsi2: i2c@d4025000 {
++ twsi2: i2c@d4031000 {
+ compatible = "mrvl,mmp-twsi";
+- reg = <0xd4025000 0x1000>;
+- interrupts = <58>;
++ reg = <0xd4031000 0x1000>;
++ interrupt-parent = <&intcmux17>;
++ interrupts = <0>;
+ clocks = <&soc_clocks MMP2_CLK_TWSI1>;
+ resets = <&soc_clocks MMP2_CLK_TWSI1>;
++ #address-cells = <1>;
++ #size-cells = <0>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
+index 490726b52216..9dc7ec7655cb 100644
+--- a/arch/arm/boot/dts/omap4-sdp.dts
++++ b/arch/arm/boot/dts/omap4-sdp.dts
+@@ -33,6 +33,7 @@
+ gpio = <&gpio2 16 GPIO_ACTIVE_HIGH>; /* gpio line 48 */
+ enable-active-high;
+ regulator-boot-on;
++ startup-delay-us = <25000>;
+ };
+
+ vbat: fixedregulator-vbat {
+diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
+index 0978282d5fc2..f574a5e0d589 100644
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -693,6 +693,21 @@ void smp_send_stop(void)
+ pr_warn("SMP: failed to stop secondary CPUs\n");
+ }
+
++/* In case panic() and panic() called at the same time on CPU1 and CPU2,
++ * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop()
++ * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online,
++ * kdump fails. So split out the panic_smp_self_stop() and add
++ * set_cpu_online(smp_processor_id(), false).
++ */
++void panic_smp_self_stop(void)
++{
++ pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
++ smp_processor_id());
++ set_cpu_online(smp_processor_id(), false);
++ while (1)
++ cpu_relax();
++}
++
+ /*
+ * not supported here
+ */
+diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
+index cd65ea4e9c54..ec3789ba17b8 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -2397,7 +2397,7 @@ static int __init _init(struct omap_hwmod *oh, void *data)
+ * a stub; implementing this properly requires iclk autoidle usecounting in
+ * the clock code. No return value.
+ */
+-static void __init _setup_iclk_autoidle(struct omap_hwmod *oh)
++static void _setup_iclk_autoidle(struct omap_hwmod *oh)
+ {
+ struct omap_hwmod_ocp_if *os;
+
+@@ -2428,7 +2428,7 @@ static void __init _setup_iclk_autoidle(struct omap_hwmod *oh)
+ * reset. Returns 0 upon success or a negative error code upon
+ * failure.
+ */
+-static int __init _setup_reset(struct omap_hwmod *oh)
++static int _setup_reset(struct omap_hwmod *oh)
+ {
+ int r;
+
+@@ -2489,7 +2489,7 @@ static int __init _setup_reset(struct omap_hwmod *oh)
+ *
+ * No return value.
+ */
+-static void __init _setup_postsetup(struct omap_hwmod *oh)
++static void _setup_postsetup(struct omap_hwmod *oh)
+ {
+ u8 postsetup_state;
+
+diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
+index c5c0ab8ac9f9..024c1fbcc55a 100644
+--- a/arch/arm/mach-pxa/cm-x300.c
++++ b/arch/arm/mach-pxa/cm-x300.c
+@@ -558,7 +558,7 @@ static struct pxa3xx_u2d_platform_data cm_x300_u2d_platform_data = {
+ .exit = cm_x300_u2d_exit,
+ };
+
+-static void cm_x300_init_u2d(void)
++static void __init cm_x300_init_u2d(void)
+ {
+ pxa3xx_set_u2d_info(&cm_x300_u2d_platform_data);
+ }
+diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c
+index 9e132b3e48c6..9960ea158829 100644
+--- a/arch/arm/mach-pxa/littleton.c
++++ b/arch/arm/mach-pxa/littleton.c
+@@ -184,7 +184,7 @@ static struct pxafb_mach_info littleton_lcd_info = {
+ .lcd_conn = LCD_COLOR_TFT_16BPP,
+ };
+
+-static void littleton_init_lcd(void)
++static void __init littleton_init_lcd(void)
+ {
+ pxa_set_fb_info(NULL, &littleton_lcd_info);
+ }
+diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
+index e3851795d6d7..68a536de542d 100644
+--- a/arch/arm/mach-pxa/zeus.c
++++ b/arch/arm/mach-pxa/zeus.c
+@@ -559,7 +559,7 @@ static struct pxaohci_platform_data zeus_ohci_platform_data = {
+ .flags = ENABLE_PORT_ALL | POWER_SENSE_LOW,
+ };
+
+-static void zeus_register_ohci(void)
++static void __init zeus_register_ohci(void)
+ {
+ /* Port 2 is shared between host and client interface. */
+ UP2OCR = UP2OCR_HXOE | UP2OCR_HXS | UP2OCR_DMPDE | UP2OCR_DPPDE;
+diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
+index 35b2e50f17fb..49bb9a020a09 100644
+--- a/arch/arm64/include/asm/io.h
++++ b/arch/arm64/include/asm/io.h
+@@ -106,7 +106,23 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
+ }
+
+ /* IO barriers */
+-#define __iormb() rmb()
++#define __iormb(v) \
++({ \
++ unsigned long tmp; \
++ \
++ rmb(); \
++ \
++ /* \
++ * Create a dummy control dependency from the IO read to any \
++ * later instructions. This ensures that a subsequent call to \
++ * udelay() will be ordered due to the ISB in get_cycles(). \
++ */ \
++ asm volatile("eor %0, %1, %1\n" \
++ "cbnz %0, ." \
++ : "=r" (tmp) : "r" ((unsigned long)(v)) \
++ : "memory"); \
++})
++
+ #define __iowmb() wmb()
+
+ #define mmiowb() do { } while (0)
+@@ -131,10 +147,10 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
+ * following Normal memory access. Writes are ordered relative to any prior
+ * Normal memory access.
+ */
+-#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
+-#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
+-#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+-#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; })
++#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(__v); __v; })
++#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(__v); __v; })
++#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; })
++#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(__v); __v; })
+
+ #define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); })
+ #define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); })
+@@ -185,9 +201,9 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
+ /*
+ * io{read,write}{16,32,64}be() macros
+ */
+-#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
+-#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
+-#define ioread64be(p) ({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(); __v; })
++#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(__v); __v; })
++#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(__v); __v; })
++#define ioread64be(p) ({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(__v); __v; })
+
+ #define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
+ #define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
+diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
+index 98c4ce55d9c3..ad64d2c92ef5 100644
+--- a/arch/arm64/include/uapi/asm/ptrace.h
++++ b/arch/arm64/include/uapi/asm/ptrace.h
+@@ -130,7 +130,7 @@ struct user_sve_header {
+
+ /* Offset from the start of struct user_sve_header to the register data */
+ #define SVE_PT_REGS_OFFSET \
+- ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \
++ ((sizeof(struct user_sve_header) + (SVE_VQ_BYTES - 1)) \
+ / SVE_VQ_BYTES * SVE_VQ_BYTES)
+
+ /*
+diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
+index 1175f5827ae1..295951f3172e 100644
+--- a/arch/arm64/kernel/entry-ftrace.S
++++ b/arch/arm64/kernel/entry-ftrace.S
+@@ -79,7 +79,6 @@
+ .macro mcount_get_lr reg
+ ldr \reg, [x29]
+ ldr \reg, [\reg, #8]
+- mcount_adjust_addr \reg, \reg
+ .endm
+
+ .macro mcount_get_lr_addr reg
+diff --git a/arch/mips/boot/dts/img/boston.dts b/arch/mips/boot/dts/img/boston.dts
+index 65af3f6ba81c..84328afa3a55 100644
+--- a/arch/mips/boot/dts/img/boston.dts
++++ b/arch/mips/boot/dts/img/boston.dts
+@@ -141,6 +141,12 @@
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+
++ eg20t_phub@2,0,0 {
++ compatible = "pci8086,8801";
++ reg = <0x00020000 0 0 0 0>;
++ intel,eg20t-prefetch = <0>;
++ };
++
+ eg20t_mac@2,0,1 {
+ compatible = "pci8086,8802";
+ reg = <0x00020100 0 0 0 0>;
+diff --git a/arch/mips/include/asm/mach-jz4740/jz4740_mmc.h b/arch/mips/include/asm/mach-jz4740/jz4740_mmc.h
+index e9cc62cfac99..ff50aeb1a933 100644
+--- a/arch/mips/include/asm/mach-jz4740/jz4740_mmc.h
++++ b/arch/mips/include/asm/mach-jz4740/jz4740_mmc.h
+@@ -4,8 +4,6 @@
+
+ struct jz4740_mmc_platform_data {
+ int gpio_power;
+- int gpio_card_detect;
+- int gpio_read_only;
+ unsigned card_detect_active_low:1;
+ unsigned read_only_active_low:1;
+ unsigned power_active_low:1;
+diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
+index c05dcf5ab414..273ef58f4d43 100644
+--- a/arch/mips/include/uapi/asm/inst.h
++++ b/arch/mips/include/uapi/asm/inst.h
+@@ -369,8 +369,8 @@ enum mm_32a_minor_op {
+ mm_ext_op = 0x02c,
+ mm_pool32axf_op = 0x03c,
+ mm_srl32_op = 0x040,
++ mm_srlv32_op = 0x050,
+ mm_sra_op = 0x080,
+- mm_srlv32_op = 0x090,
+ mm_rotr_op = 0x0c0,
+ mm_lwxs_op = 0x118,
+ mm_addu32_op = 0x150,
+diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
+index af0c8ace0141..705593d40d12 100644
+--- a/arch/mips/jz4740/board-qi_lb60.c
++++ b/arch/mips/jz4740/board-qi_lb60.c
+@@ -43,7 +43,6 @@
+ #include "clock.h"
+
+ /* GPIOs */
+-#define QI_LB60_GPIO_SD_CD JZ_GPIO_PORTD(0)
+ #define QI_LB60_GPIO_SD_VCC_EN_N JZ_GPIO_PORTD(2)
+
+ #define QI_LB60_GPIO_KEYOUT(x) (JZ_GPIO_PORTC(10) + (x))
+@@ -386,12 +385,18 @@ static struct platform_device qi_lb60_gpio_keys = {
+ };
+
+ static struct jz4740_mmc_platform_data qi_lb60_mmc_pdata = {
+- .gpio_card_detect = QI_LB60_GPIO_SD_CD,
+- .gpio_read_only = -1,
+ .gpio_power = QI_LB60_GPIO_SD_VCC_EN_N,
+ .power_active_low = 1,
+ };
+
++static struct gpiod_lookup_table qi_lb60_mmc_gpio_table = {
++ .dev_id = "jz4740-mmc.0",
++ .table = {
++ GPIO_LOOKUP("GPIOD", 0, "cd", GPIO_ACTIVE_HIGH),
++ { },
++ },
++};
++
+ /* beeper */
+ static struct pwm_lookup qi_lb60_pwm_lookup[] = {
+ PWM_LOOKUP("jz4740-pwm", 4, "pwm-beeper", NULL, 0,
+@@ -500,6 +505,7 @@ static int __init qi_lb60_init_platform_devices(void)
+ gpiod_add_lookup_table(&qi_lb60_audio_gpio_table);
+ gpiod_add_lookup_table(&qi_lb60_nand_gpio_table);
+ gpiod_add_lookup_table(&qi_lb60_spigpio_gpio_table);
++ gpiod_add_lookup_table(&qi_lb60_mmc_gpio_table);
+
+ spi_register_board_info(qi_lb60_spi_board_info,
+ ARRAY_SIZE(qi_lb60_spi_board_info));
+diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
+index 1f9cb0e3c79a..613d61763433 100644
+--- a/arch/mips/ralink/Kconfig
++++ b/arch/mips/ralink/Kconfig
+@@ -38,6 +38,7 @@ choice
+
+ config SOC_MT7620
+ bool "MT7620/8"
++ select CPU_MIPSR2_IRQ_VI
+ select HW_HAS_PCI
+
+ config SOC_MT7621
+diff --git a/arch/nds32/mm/Makefile b/arch/nds32/mm/Makefile
+index 6b6855852223..7c5c15ad854a 100644
+--- a/arch/nds32/mm/Makefile
++++ b/arch/nds32/mm/Makefile
+@@ -4,4 +4,8 @@ obj-y := extable.o tlb.o \
+
+ obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
+ obj-$(CONFIG_HIGHMEM) += highmem.o
+-CFLAGS_proc-n13.o += -fomit-frame-pointer
++
++ifdef CONFIG_FUNCTION_TRACER
++CFLAGS_REMOVE_proc.o = $(CC_FLAGS_FTRACE)
++endif
++CFLAGS_proc.o += -fomit-frame-pointer
+diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
+index 1e7a33592e29..15bc07a31c46 100644
+--- a/arch/powerpc/include/asm/fadump.h
++++ b/arch/powerpc/include/asm/fadump.h
+@@ -200,7 +200,7 @@ struct fad_crash_memory_ranges {
+ unsigned long long size;
+ };
+
+-extern int is_fadump_boot_memory_area(u64 addr, ulong size);
++extern int is_fadump_memory_area(u64 addr, ulong size);
+ extern int early_init_dt_scan_fw_dump(unsigned long node,
+ const char *uname, int depth, void *data);
+ extern int fadump_reserve_mem(void);
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index bac225bb7f64..23bea99bf8d5 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -63,7 +63,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size,
+ #endif
+
+ #define access_ok(type, addr, size) \
+- (__chk_user_ptr(addr), \
++ (__chk_user_ptr(addr), (void)(type), \
+ __access_ok((__force unsigned long)(addr), (size), get_fs()))
+
+ /*
+diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
+index a711d22339ea..c02c95287a5f 100644
+--- a/arch/powerpc/kernel/fadump.c
++++ b/arch/powerpc/kernel/fadump.c
+@@ -118,13 +118,19 @@ int __init early_init_dt_scan_fw_dump(unsigned long node,
+
+ /*
+ * If fadump is registered, check if the memory provided
+- * falls within boot memory area.
++ * falls within boot memory area and reserved memory area.
+ */
+-int is_fadump_boot_memory_area(u64 addr, ulong size)
++int is_fadump_memory_area(u64 addr, ulong size)
+ {
++ u64 d_start = fw_dump.reserve_dump_area_start;
++ u64 d_end = d_start + fw_dump.reserve_dump_area_size;
++
+ if (!fw_dump.dump_registered)
+ return 0;
+
++ if (((addr + size) > d_start) && (addr <= d_end))
++ return 1;
++
+ return (addr + size) > RMA_START && addr <= fw_dump.boot_memory_size;
+ }
+
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
+index 07ae018e550e..53016c753f3c 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -296,6 +296,10 @@ SECTIONS
+ #ifdef CONFIG_PPC32
+ .data : AT(ADDR(.data) - LOAD_OFFSET) {
+ DATA_DATA
++#ifdef CONFIG_UBSAN
++ *(.data..Lubsan_data*)
++ *(.data..Lubsan_type*)
++#endif
+ *(.data.rel*)
+ *(SDATA_MAIN)
+ *(.sdata2)
+diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
+index eba5756d5b41..79b79408d92e 100644
+--- a/arch/powerpc/kvm/powerpc.c
++++ b/arch/powerpc/kvm/powerpc.c
+@@ -543,8 +543,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+ #ifdef CONFIG_PPC_BOOK3S_64
+ case KVM_CAP_SPAPR_TCE:
+ case KVM_CAP_SPAPR_TCE_64:
+- /* fallthrough */
++ r = 1;
++ break;
+ case KVM_CAP_SPAPR_TCE_VFIO:
++ r = !!cpu_has_feature(CPU_FTR_HVMODE);
++ break;
+ case KVM_CAP_PPC_RTAS:
+ case KVM_CAP_PPC_FIXUP_HCALL:
+ case KVM_CAP_PPC_ENABLE_HCALL:
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index d51cf5f4e45e..365526ee29b8 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -221,7 +221,9 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr,
+ static bool bad_kernel_fault(bool is_exec, unsigned long error_code,
+ unsigned long address)
+ {
+- if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT))) {
++ /* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */
++ if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT |
++ DSISR_PROTFAULT))) {
+ printk_ratelimited(KERN_CRIT "kernel tried to execute"
+ " exec-protected page (%lx) -"
+ "exploit attempt? (uid: %d)\n",
+diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
+index 177de814286f..6a2f65d3d088 100644
+--- a/arch/powerpc/perf/isa207-common.c
++++ b/arch/powerpc/perf/isa207-common.c
+@@ -226,8 +226,13 @@ void isa207_get_mem_weight(u64 *weight)
+ u64 mmcra = mfspr(SPRN_MMCRA);
+ u64 exp = MMCRA_THR_CTR_EXP(mmcra);
+ u64 mantissa = MMCRA_THR_CTR_MANT(mmcra);
++ u64 sier = mfspr(SPRN_SIER);
++ u64 val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
+
+- *weight = mantissa << (2 * exp);
++ if (val == 0 || val == 7)
++ *weight = 0;
++ else
++ *weight = mantissa << (2 * exp);
+ }
+
+ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
+diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
+index fe9691040f54..7639b2168755 100644
+--- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
+@@ -299,7 +299,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
+ if (alloc_userspace_copy) {
+ offset = 0;
+ uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
+- levels, tce_table_size, &offset,
++ tmplevels, tce_table_size, &offset,
+ &total_allocated_uas);
+ if (!uas)
+ goto free_tces_exit;
+diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
+index a0b20c03f078..e3010b14aea5 100644
+--- a/arch/powerpc/platforms/pseries/dlpar.c
++++ b/arch/powerpc/platforms/pseries/dlpar.c
+@@ -272,6 +272,8 @@ int dlpar_detach_node(struct device_node *dn)
+ if (rc)
+ return rc;
+
++ of_node_put(dn);
++
+ return 0;
+ }
+
+diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
+index c1578f54c626..e4c658cda3a7 100644
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
+@@ -389,8 +389,11 @@ static bool lmb_is_removable(struct drmem_lmb *lmb)
+ phys_addr = lmb->base_addr;
+
+ #ifdef CONFIG_FA_DUMP
+- /* Don't hot-remove memory that falls in fadump boot memory area */
+- if (is_fadump_boot_memory_area(phys_addr, block_sz))
++ /*
++ * Don't hot-remove memory that falls in fadump boot memory area
++ * and memory that is reserved for capturing old kernel memory.
++ */
++ if (is_fadump_memory_area(phys_addr, block_sz))
+ return false;
+ #endif
+
+diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h
+index 2bb1f3bb98ac..48c784f2101a 100644
+--- a/arch/s390/include/uapi/asm/zcrypt.h
++++ b/arch/s390/include/uapi/asm/zcrypt.h
+@@ -147,8 +147,8 @@ struct ica_xcRB {
+ * @cprb_len: CPRB header length [0x0020]
+ * @cprb_ver_id: CPRB version id. [0x04]
+ * @pad_000: Alignment pad bytes
+- * @flags: Admin cmd [0x80] or functional cmd [0x00]
+- * @func_id: Function id / subtype [0x5434]
++ * @flags: Admin bit [0x80], Special bit [0x20]
++ * @func_id: Function id / subtype [0x5434] "T4"
+ * @source_id: Source id [originator id]
+ * @target_id: Target id [usage/ctrl domain id]
+ * @ret_code: Return code
+diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
+index e59c577ed871..c70bc7809dda 100644
+--- a/arch/sh/boards/mach-kfr2r09/setup.c
++++ b/arch/sh/boards/mach-kfr2r09/setup.c
+@@ -25,7 +25,6 @@
+ #include <linux/memblock.h>
+ #include <linux/mfd/tmio.h>
+ #include <linux/mmc/host.h>
+-#include <linux/mtd/onenand.h>
+ #include <linux/mtd/physmap.h>
+ #include <linux/platform_data/lv5207lp.h>
+ #include <linux/platform_device.h>
+diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
+index 7485398d0737..9c04562310b3 100644
+--- a/arch/um/include/asm/pgtable.h
++++ b/arch/um/include/asm/pgtable.h
+@@ -197,12 +197,17 @@ static inline pte_t pte_mkold(pte_t pte)
+
+ static inline pte_t pte_wrprotect(pte_t pte)
+ {
+- pte_clear_bits(pte, _PAGE_RW);
++ if (likely(pte_get_bits(pte, _PAGE_RW)))
++ pte_clear_bits(pte, _PAGE_RW);
++ else
++ return pte;
+ return(pte_mknewprot(pte));
+ }
+
+ static inline pte_t pte_mkread(pte_t pte)
+ {
++ if (unlikely(pte_get_bits(pte, _PAGE_USER)))
++ return pte;
+ pte_set_bits(pte, _PAGE_USER);
+ return(pte_mknewprot(pte));
+ }
+@@ -221,6 +226,8 @@ static inline pte_t pte_mkyoung(pte_t pte)
+
+ static inline pte_t pte_mkwrite(pte_t pte)
+ {
++ if (unlikely(pte_get_bits(pte, _PAGE_RW)))
++ return pte;
+ pte_set_bits(pte, _PAGE_RW);
+ return(pte_mknewprot(pte));
+ }
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 155fa4b53c56..d0b186264941 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -3439,6 +3439,11 @@ static void free_excl_cntrs(int cpu)
+ }
+
+ static void intel_pmu_cpu_dying(int cpu)
++{
++ fini_debug_store_on_cpu(cpu);
++}
++
++static void intel_pmu_cpu_dead(int cpu)
+ {
+ struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+ struct intel_shared_regs *pc;
+@@ -3451,8 +3456,6 @@ static void intel_pmu_cpu_dying(int cpu)
+ }
+
+ free_excl_cntrs(cpu);
+-
+- fini_debug_store_on_cpu(cpu);
+ }
+
+ static void intel_pmu_sched_task(struct perf_event_context *ctx,
+@@ -3541,6 +3544,7 @@ static __initconst const struct x86_pmu core_pmu = {
+ .cpu_prepare = intel_pmu_cpu_prepare,
+ .cpu_starting = intel_pmu_cpu_starting,
+ .cpu_dying = intel_pmu_cpu_dying,
++ .cpu_dead = intel_pmu_cpu_dead,
+ };
+
+ static struct attribute *intel_pmu_attrs[];
+@@ -3581,6 +3585,8 @@ static __initconst const struct x86_pmu intel_pmu = {
+ .cpu_prepare = intel_pmu_cpu_prepare,
+ .cpu_starting = intel_pmu_cpu_starting,
+ .cpu_dying = intel_pmu_cpu_dying,
++ .cpu_dead = intel_pmu_cpu_dead,
++
+ .guest_get_msrs = intel_guest_get_msrs,
+ .sched_task = intel_pmu_sched_task,
+ };
+diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
+index c07bee31abe8..b10e04387f38 100644
+--- a/arch/x86/events/intel/uncore_snbep.c
++++ b/arch/x86/events/intel/uncore_snbep.c
+@@ -1222,6 +1222,8 @@ static struct pci_driver snbep_uncore_pci_driver = {
+ .id_table = snbep_uncore_pci_ids,
+ };
+
++#define NODE_ID_MASK 0x7
++
+ /*
+ * build pci bus to socket mapping
+ */
+@@ -1243,7 +1245,7 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
+ err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
+ if (err)
+ break;
+- nodeid = config;
++ nodeid = config & NODE_ID_MASK;
+ /* get the Node ID mapping */
+ err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
+ if (err)
+diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
+index 69dcdf195b61..fa2c93cb42a2 100644
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -106,6 +106,9 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
+ #define user_insn(insn, output, input...) \
+ ({ \
+ int err; \
++ \
++ might_fault(); \
++ \
+ asm volatile(ASM_STAC "\n" \
+ "1:" #insn "\n\t" \
+ "2: " ASM_CLAC "\n" \
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 807d06a7acac..1e0c4c74195c 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -69,7 +69,7 @@ void __init check_bugs(void)
+ * identify_boot_cpu() initialized SMT support information, let the
+ * core code know.
+ */
+- cpu_smt_check_topology_early();
++ cpu_smt_check_topology();
+
+ if (!IS_ENABLED(CONFIG_SMP)) {
+ pr_info("CPU: ");
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
+index cdbedeb3f3db..f9e7096b1804 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -783,6 +783,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
+ quirk_no_way_out(i, m, regs);
+
+ if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
++ m->bank = i;
+ mce_read_aux(m, i);
+ *msg = tmp;
+ return 1;
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index f1d3fe5a0c65..02ac8fa0cd6d 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -5837,6 +5837,13 @@ static bool svm_cpu_has_accelerated_tpr(void)
+
+ static bool svm_has_emulated_msr(int index)
+ {
++ switch (index) {
++ case MSR_IA32_MCG_EXT_CTL:
++ return false;
++ default:
++ break;
++ }
++
+ return true;
+ }
+
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 39a0e34ff676..0b2e13dd517b 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -27,6 +27,7 @@
+ #include <linux/mm.h>
+ #include <linux/highmem.h>
+ #include <linux/sched.h>
++#include <linux/sched/smt.h>
+ #include <linux/moduleparam.h>
+ #include <linux/mod_devicetable.h>
+ #include <linux/trace_events.h>
+@@ -8469,6 +8470,7 @@ static void free_nested(struct vcpu_vmx *vmx)
+ if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
+ return;
+
++ hrtimer_cancel(&vmx->nested.preemption_timer);
+ vmx->nested.vmxon = false;
+ vmx->nested.smm.vmxon = false;
+ free_vpid(vmx->nested.vpid02);
+@@ -11128,7 +11130,7 @@ static int vmx_vm_init(struct kvm *kvm)
+ * Warn upon starting the first VM in a potentially
+ * insecure environment.
+ */
+- if (cpu_smt_control == CPU_SMT_ENABLED)
++ if (sched_smt_active())
+ pr_warn_once(L1TF_MSG_SMT);
+ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
+ pr_warn_once(L1TF_MSG_L1D);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 5a9a3ebe8fba..3a7cf7c6b28a 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4904,6 +4904,13 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
+ {
+ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+
++ /*
++ * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
++ * is returned, but our callers are not ready for that and they blindly
++ * call kvm_inject_page_fault. Ensure that they at least do not leak
++ * uninitialized kernel stack memory into cr2 and error code.
++ */
++ memset(exception, 0, sizeof(*exception));
+ return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
+ exception);
+ }
+diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c
+index 526536c81ddc..ca1e8e6dccc8 100644
+--- a/arch/x86/pci/broadcom_bus.c
++++ b/arch/x86/pci/broadcom_bus.c
+@@ -50,8 +50,8 @@ static void __init cnb20le_res(u8 bus, u8 slot, u8 func)
+ word1 = read_pci_config_16(bus, slot, func, 0xc0);
+ word2 = read_pci_config_16(bus, slot, func, 0xc2);
+ if (word1 != word2) {
+- res.start = (word1 << 16) | 0x0000;
+- res.end = (word2 << 16) | 0xffff;
++ res.start = ((resource_size_t) word1 << 16) | 0x0000;
++ res.end = ((resource_size_t) word2 << 16) | 0xffff;
+ res.flags = IORESOURCE_MEM;
+ update_res(info, res.start, res.end, res.flags, 0);
+ }
+diff --git a/arch/xtensa/boot/dts/xtfpga.dtsi b/arch/xtensa/boot/dts/xtfpga.dtsi
+index 1090528825ec..e46ae07bab05 100644
+--- a/arch/xtensa/boot/dts/xtfpga.dtsi
++++ b/arch/xtensa/boot/dts/xtfpga.dtsi
+@@ -103,7 +103,7 @@
+ };
+ };
+
+- spi0: spi-master@0d0a0000 {
++ spi0: spi@0d0a0000 {
+ compatible = "cdns,xtfpga-spi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+diff --git a/crypto/Kconfig b/crypto/Kconfig
+index 59e32623a7ce..0fb9586766a7 100644
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -1056,7 +1056,8 @@ config CRYPTO_AES_TI
+ 8 for decryption), this implementation only uses just two S-boxes of
+ 256 bytes each, and attempts to eliminate data dependent latencies by
+ prefetching the entire table into the cache at the start of each
+- block.
++ block. Interrupts are also disabled to avoid races where cachelines
++ are evicted when the CPU is interrupted to do something else.
+
+ config CRYPTO_AES_586
+ tristate "AES cipher algorithms (i586)"
+diff --git a/crypto/aes_ti.c b/crypto/aes_ti.c
+index 03023b2290e8..1ff9785b30f5 100644
+--- a/crypto/aes_ti.c
++++ b/crypto/aes_ti.c
+@@ -269,6 +269,7 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ const u32 *rkp = ctx->key_enc + 4;
+ int rounds = 6 + ctx->key_length / 4;
+ u32 st0[4], st1[4];
++ unsigned long flags;
+ int round;
+
+ st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in);
+@@ -276,6 +277,12 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ st0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8);
+ st0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12);
+
++ /*
++ * Temporarily disable interrupts to avoid races where cachelines are
++ * evicted when the CPU is interrupted to do something else.
++ */
++ local_irq_save(flags);
++
+ st0[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[128];
+ st0[1] ^= __aesti_sbox[32] ^ __aesti_sbox[160];
+ st0[2] ^= __aesti_sbox[64] ^ __aesti_sbox[192];
+@@ -300,6 +307,8 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ put_unaligned_le32(subshift(st1, 1) ^ rkp[5], out + 4);
+ put_unaligned_le32(subshift(st1, 2) ^ rkp[6], out + 8);
+ put_unaligned_le32(subshift(st1, 3) ^ rkp[7], out + 12);
++
++ local_irq_restore(flags);
+ }
+
+ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+@@ -308,6 +317,7 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ const u32 *rkp = ctx->key_dec + 4;
+ int rounds = 6 + ctx->key_length / 4;
+ u32 st0[4], st1[4];
++ unsigned long flags;
+ int round;
+
+ st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in);
+@@ -315,6 +325,12 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ st0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8);
+ st0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12);
+
++ /*
++ * Temporarily disable interrupts to avoid races where cachelines are
++ * evicted when the CPU is interrupted to do something else.
++ */
++ local_irq_save(flags);
++
+ st0[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[128];
+ st0[1] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[160];
+ st0[2] ^= __aesti_inv_sbox[64] ^ __aesti_inv_sbox[192];
+@@ -339,6 +355,8 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ put_unaligned_le32(inv_subshift(st1, 1) ^ rkp[5], out + 4);
+ put_unaligned_le32(inv_subshift(st1, 2) ^ rkp[6], out + 8);
+ put_unaligned_le32(inv_subshift(st1, 3) ^ rkp[7], out + 12);
++
++ local_irq_restore(flags);
+ }
+
+ static struct crypto_alg aes_alg = {
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index 02c6fd9caff7..f008ba7c9ced 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -691,6 +691,8 @@ static void __ghes_panic(struct ghes *ghes)
+ {
+ __ghes_print_estatus(KERN_EMERG, ghes->generic, ghes->estatus);
+
++ ghes_clear_estatus(ghes);
++
+ /* reboot to log the error! */
+ if (!panic_timeout)
+ panic_timeout = ghes_panic_timeout;
+diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
+index 9d52743080a4..c336784d0bcb 100644
+--- a/drivers/acpi/spcr.c
++++ b/drivers/acpi/spcr.c
+@@ -148,6 +148,13 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
+ }
+
+ switch (table->baud_rate) {
++ case 0:
++ /*
++ * SPCR 1.04 defines 0 as a preconfigured state of UART.
++ * Assume firmware or bootloader configures console correctly.
++ */
++ baud_rate = 0;
++ break;
+ case 3:
+ baud_rate = 9600;
+ break;
+@@ -196,6 +203,10 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
+ * UART so don't attempt to change to the baud rate state
+ * in the table because driver cannot calculate the dividers
+ */
++ baud_rate = 0;
++ }
++
++ if (!baud_rate) {
+ snprintf(opts, sizeof(opts), "%s,%s,0x%llx", uart, iotype,
+ table->serial_port.address);
+ } else {
+diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
+index 10ecb232245d..03867f539f3a 100644
+--- a/drivers/ata/sata_rcar.c
++++ b/drivers/ata/sata_rcar.c
+@@ -895,7 +895,9 @@ static int sata_rcar_probe(struct platform_device *pdev)
+ int ret = 0;
+
+ irq = platform_get_irq(pdev, 0);
+- if (irq <= 0)
++ if (irq < 0)
++ return irq;
++ if (!irq)
+ return -EINVAL;
+
+ priv = devm_kzalloc(dev, sizeof(struct sata_rcar_priv), GFP_KERNEL);
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index 585e2e1c9c8f..e06a57936cc9 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -614,8 +614,10 @@ static void remove_probe_files(struct bus_type *bus)
+ static ssize_t uevent_store(struct device_driver *drv, const char *buf,
+ size_t count)
+ {
+- kobject_synth_uevent(&drv->p->kobj, buf, count);
+- return count;
++ int rc;
++
++ rc = kobject_synth_uevent(&drv->p->kobj, buf, count);
++ return rc ? rc : count;
+ }
+ static DRIVER_ATTR_WO(uevent);
+
+@@ -831,8 +833,10 @@ static void klist_devices_put(struct klist_node *n)
+ static ssize_t bus_uevent_store(struct bus_type *bus,
+ const char *buf, size_t count)
+ {
+- kobject_synth_uevent(&bus->p->subsys.kobj, buf, count);
+- return count;
++ int rc;
++
++ rc = kobject_synth_uevent(&bus->p->subsys.kobj, buf, count);
++ return rc ? rc : count;
+ }
+ static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store);
+
+diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
+index 5d5b5988e88b..dd6a6850cb45 100644
+--- a/drivers/base/cacheinfo.c
++++ b/drivers/base/cacheinfo.c
+@@ -79,8 +79,7 @@ static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
+ ct_idx = get_cacheinfo_idx(this_leaf->type);
+ propname = cache_type_info[ct_idx].size_prop;
+
+- if (of_property_read_u32(np, propname, &this_leaf->size))
+- this_leaf->size = 0;
++ of_property_read_u32(np, propname, &this_leaf->size);
+ }
+
+ /* not cache_line_size() because that's a macro in include/linux/cache.h */
+@@ -114,8 +113,7 @@ static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
+ ct_idx = get_cacheinfo_idx(this_leaf->type);
+ propname = cache_type_info[ct_idx].nr_sets_prop;
+
+- if (of_property_read_u32(np, propname, &this_leaf->number_of_sets))
+- this_leaf->number_of_sets = 0;
++ of_property_read_u32(np, propname, &this_leaf->number_of_sets);
+ }
+
+ static void cache_associativity(struct cacheinfo *this_leaf)
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 04bbcd779e11..92e2c32c2227 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -1067,8 +1067,14 @@ out:
+ static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+ {
+- if (kobject_synth_uevent(&dev->kobj, buf, count))
++ int rc;
++
++ rc = kobject_synth_uevent(&dev->kobj, buf, count);
++
++ if (rc) {
+ dev_err(dev, "uevent: failed to send synthetic uevent\n");
++ return rc;
++ }
+
+ return count;
+ }
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index 2607f859881a..7caa1adaf62a 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -926,9 +926,6 @@ static void __device_release_driver(struct device *dev, struct device *parent)
+
+ drv = dev->driver;
+ if (drv) {
+- if (driver_allows_async_probing(drv))
+- async_synchronize_full();
+-
+ while (device_links_busy(dev)) {
+ device_unlock(dev);
+ if (parent && dev->bus->need_parent_lock)
+@@ -1034,6 +1031,9 @@ void driver_detach(struct device_driver *drv)
+ struct device_private *dev_prv;
+ struct device *dev;
+
++ if (driver_allows_async_probing(drv))
++ async_synchronize_full();
++
+ for (;;) {
+ spin_lock(&drv->p->klist_devices.k_lock);
+ if (list_empty(&drv->p->klist_devices.k_list)) {
+diff --git a/drivers/base/devres.c b/drivers/base/devres.c
+index f98a097e73f2..d68b52cf9225 100644
+--- a/drivers/base/devres.c
++++ b/drivers/base/devres.c
+@@ -24,8 +24,14 @@ struct devres_node {
+
+ struct devres {
+ struct devres_node node;
+- /* -- 3 pointers */
+- unsigned long long data[]; /* guarantee ull alignment */
++ /*
++ * Some archs want to perform DMA into kmalloc caches
++ * and need a guaranteed alignment larger than
++ * the alignment of a 64-bit integer.
++ * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
++ * buffer alignment as if it was allocated by plain kmalloc().
++ */
++ u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
+ };
+
+ struct devres_group {
+diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
+index b4f02768ba47..319fabdd63a3 100644
+--- a/drivers/block/drbd/drbd_nl.c
++++ b/drivers/block/drbd/drbd_nl.c
+@@ -668,14 +668,15 @@ drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int for
+ if (rv == SS_TWO_PRIMARIES) {
+ /* Maybe the peer is detected as dead very soon...
+ retry at most once more in this case. */
+- int timeo;
+- rcu_read_lock();
+- nc = rcu_dereference(connection->net_conf);
+- timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
+- rcu_read_unlock();
+- schedule_timeout_interruptible(timeo);
+- if (try < max_tries)
++ if (try < max_tries) {
++ int timeo;
+ try = max_tries - 1;
++ rcu_read_lock();
++ nc = rcu_dereference(connection->net_conf);
++ timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
++ rcu_read_unlock();
++ schedule_timeout_interruptible(timeo);
++ }
+ continue;
+ }
+ if (rv < SS_SUCCESS) {
+diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
+index 75f6b47169e6..cb919b964066 100644
+--- a/drivers/block/drbd/drbd_receiver.c
++++ b/drivers/block/drbd/drbd_receiver.c
+@@ -3364,7 +3364,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
+ enum drbd_conns rv = C_MASK;
+ enum drbd_disk_state mydisk;
+ struct net_conf *nc;
+- int hg, rule_nr, rr_conflict, tentative;
++ int hg, rule_nr, rr_conflict, tentative, always_asbp;
+
+ mydisk = device->state.disk;
+ if (mydisk == D_NEGOTIATING)
+@@ -3415,8 +3415,12 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
+
+ rcu_read_lock();
+ nc = rcu_dereference(peer_device->connection->net_conf);
++ always_asbp = nc->always_asbp;
++ rr_conflict = nc->rr_conflict;
++ tentative = nc->tentative;
++ rcu_read_unlock();
+
+- if (hg == 100 || (hg == -100 && nc->always_asbp)) {
++ if (hg == 100 || (hg == -100 && always_asbp)) {
+ int pcount = (device->state.role == R_PRIMARY)
+ + (peer_role == R_PRIMARY);
+ int forced = (hg == -100);
+@@ -3455,9 +3459,6 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
+ "Sync from %s node\n",
+ (hg < 0) ? "peer" : "this");
+ }
+- rr_conflict = nc->rr_conflict;
+- tentative = nc->tentative;
+- rcu_read_unlock();
+
+ if (hg == -100) {
+ /* FIXME this log message is not correct if we end up here
+@@ -4141,7 +4142,7 @@ static int receive_uuids(struct drbd_connection *connection, struct packet_info
+ kfree(device->p_uuid);
+ device->p_uuid = p_uuid;
+
+- if (device->state.conn < C_CONNECTED &&
++ if ((device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS) &&
+ device->state.disk < D_INCONSISTENT &&
+ device->state.role == R_PRIMARY &&
+ (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
+diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
+index f68e9baffad7..5d7024057540 100644
+--- a/drivers/block/sunvdc.c
++++ b/drivers/block/sunvdc.c
+@@ -45,6 +45,8 @@ MODULE_VERSION(DRV_MODULE_VERSION);
+ #define WAITING_FOR_GEN_CMD 0x04
+ #define WAITING_FOR_ANY -1
+
++#define VDC_MAX_RETRIES 10
++
+ static struct workqueue_struct *sunvdc_wq;
+
+ struct vdc_req_entry {
+@@ -431,6 +433,7 @@ static int __vdc_tx_trigger(struct vdc_port *port)
+ .end_idx = dr->prod,
+ };
+ int err, delay;
++ int retries = 0;
+
+ hdr.seq = dr->snd_nxt;
+ delay = 1;
+@@ -443,6 +446,8 @@ static int __vdc_tx_trigger(struct vdc_port *port)
+ udelay(delay);
+ if ((delay <<= 1) > 128)
+ delay = 128;
++ if (retries++ > VDC_MAX_RETRIES)
++ break;
+ } while (err == -EAGAIN);
+
+ if (err == -ENOTCONN)
+diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
+index 469541c1e51e..20907a0a043b 100644
+--- a/drivers/block/swim3.c
++++ b/drivers/block/swim3.c
+@@ -1026,7 +1026,11 @@ static void floppy_release(struct gendisk *disk, fmode_t mode)
+ struct swim3 __iomem *sw = fs->swim3;
+
+ mutex_lock(&swim3_mutex);
+- if (fs->ref_count > 0 && --fs->ref_count == 0) {
++ if (fs->ref_count > 0)
++ --fs->ref_count;
++ else if (fs->ref_count == -1)
++ fs->ref_count = 0;
++ if (fs->ref_count == 0) {
+ swim3_action(fs, MOTOR_OFF);
+ out_8(&sw->control_bic, 0xff);
+ swim3_select(fs, RELAX);
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index e19bf0a750cf..a65505db09e5 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -53,6 +53,11 @@ static size_t huge_class_size;
+
+ static void zram_free_page(struct zram *zram, size_t index);
+
++static int zram_slot_trylock(struct zram *zram, u32 index)
++{
++ return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].value);
++}
++
+ static void zram_slot_lock(struct zram *zram, u32 index)
+ {
+ bit_spin_lock(ZRAM_LOCK, &zram->table[index].value);
+@@ -401,7 +406,6 @@ static ssize_t backing_dev_store(struct device *dev,
+ goto out;
+
+ reset_bdev(zram);
+- spin_lock_init(&zram->bitmap_lock);
+
+ zram->old_block_size = old_block_size;
+ zram->bdev = bdev;
+@@ -445,29 +449,24 @@ out:
+
+ static unsigned long get_entry_bdev(struct zram *zram)
+ {
+- unsigned long entry;
+-
+- spin_lock(&zram->bitmap_lock);
++ unsigned long blk_idx = 1;
++retry:
+ /* skip 0 bit to confuse zram.handle = 0 */
+- entry = find_next_zero_bit(zram->bitmap, zram->nr_pages, 1);
+- if (entry == zram->nr_pages) {
+- spin_unlock(&zram->bitmap_lock);
++ blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
++ if (blk_idx == zram->nr_pages)
+ return 0;
+- }
+
+- set_bit(entry, zram->bitmap);
+- spin_unlock(&zram->bitmap_lock);
++ if (test_and_set_bit(blk_idx, zram->bitmap))
++ goto retry;
+
+- return entry;
++ return blk_idx;
+ }
+
+ static void put_entry_bdev(struct zram *zram, unsigned long entry)
+ {
+ int was_set;
+
+- spin_lock(&zram->bitmap_lock);
+ was_set = test_and_clear_bit(entry, zram->bitmap);
+- spin_unlock(&zram->bitmap_lock);
+ WARN_ON_ONCE(!was_set);
+ }
+
+@@ -888,9 +887,10 @@ static ssize_t debug_stat_show(struct device *dev,
+
+ down_read(&zram->init_lock);
+ ret = scnprintf(buf, PAGE_SIZE,
+- "version: %d\n%8llu\n",
++ "version: %d\n%8llu %8llu\n",
+ version,
+- (u64)atomic64_read(&zram->stats.writestall));
++ (u64)atomic64_read(&zram->stats.writestall),
++ (u64)atomic64_read(&zram->stats.miss_free));
+ up_read(&zram->init_lock);
+
+ return ret;
+@@ -1402,10 +1402,14 @@ static void zram_slot_free_notify(struct block_device *bdev,
+
+ zram = bdev->bd_disk->private_data;
+
+- zram_slot_lock(zram, index);
++ atomic64_inc(&zram->stats.notify_free);
++ if (!zram_slot_trylock(zram, index)) {
++ atomic64_inc(&zram->stats.miss_free);
++ return;
++ }
++
+ zram_free_page(zram, index);
+ zram_slot_unlock(zram, index);
+- atomic64_inc(&zram->stats.notify_free);
+ }
+
+ static int zram_rw_page(struct block_device *bdev, sector_t sector,
+diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
+index 72c8584b6dff..d1095dfdffa8 100644
+--- a/drivers/block/zram/zram_drv.h
++++ b/drivers/block/zram/zram_drv.h
+@@ -79,6 +79,7 @@ struct zram_stats {
+ atomic64_t pages_stored; /* no. of pages currently stored */
+ atomic_long_t max_used_pages; /* no. of maximum pages stored */
+ atomic64_t writestall; /* no. of write slow paths */
++ atomic64_t miss_free; /* no. of missed free */
+ };
+
+ struct zram {
+@@ -110,7 +111,6 @@ struct zram {
+ unsigned int old_block_size;
+ unsigned long *bitmap;
+ unsigned long nr_pages;
+- spinlock_t bitmap_lock;
+ #endif
+ #ifdef CONFIG_ZRAM_MEMORY_TRACKING
+ struct dentry *debugfs_dir;
+diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
+index ddbd8c6a0ceb..800132369134 100644
+--- a/drivers/bluetooth/hci_bcm.c
++++ b/drivers/bluetooth/hci_bcm.c
+@@ -907,6 +907,10 @@ static int bcm_get_resources(struct bcm_device *dev)
+
+ dev->clk = devm_clk_get(dev->dev, NULL);
+
++ /* Handle deferred probing */
++ if (dev->clk == ERR_PTR(-EPROBE_DEFER))
++ return PTR_ERR(dev->clk);
++
+ dev->device_wakeup = devm_gpiod_get_optional(dev->dev, "device-wakeup",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(dev->device_wakeup))
+diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
+index ae3a7537cf0f..72cd96a8eb19 100644
+--- a/drivers/cdrom/gdrom.c
++++ b/drivers/cdrom/gdrom.c
+@@ -889,6 +889,7 @@ static void __exit exit_gdrom(void)
+ platform_device_unregister(pd);
+ platform_driver_unregister(&gdrom_driver);
+ kfree(gd.toc);
++ kfree(gd.cd_info);
+ }
+
+ module_init(init_gdrom);
+diff --git a/drivers/clk/imgtec/clk-boston.c b/drivers/clk/imgtec/clk-boston.c
+index 15af423cc0c9..f5d54a64d33c 100644
+--- a/drivers/clk/imgtec/clk-boston.c
++++ b/drivers/clk/imgtec/clk-boston.c
+@@ -73,27 +73,32 @@ static void __init clk_boston_setup(struct device_node *np)
+ hw = clk_hw_register_fixed_rate(NULL, "input", NULL, 0, in_freq);
+ if (IS_ERR(hw)) {
+ pr_err("failed to register input clock: %ld\n", PTR_ERR(hw));
+- return;
++ goto error;
+ }
+ onecell->hws[BOSTON_CLK_INPUT] = hw;
+
+ hw = clk_hw_register_fixed_rate(NULL, "sys", "input", 0, sys_freq);
+ if (IS_ERR(hw)) {
+ pr_err("failed to register sys clock: %ld\n", PTR_ERR(hw));
+- return;
++ goto error;
+ }
+ onecell->hws[BOSTON_CLK_SYS] = hw;
+
+ hw = clk_hw_register_fixed_rate(NULL, "cpu", "input", 0, cpu_freq);
+ if (IS_ERR(hw)) {
+ pr_err("failed to register cpu clock: %ld\n", PTR_ERR(hw));
+- return;
++ goto error;
+ }
+ onecell->hws[BOSTON_CLK_CPU] = hw;
+
+ err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, onecell);
+ if (err)
+ pr_err("failed to add DT provider: %d\n", err);
++
++ return;
++
++error:
++ kfree(onecell);
+ }
+
+ /*
+diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
+index eb6bcbf345a3..390e3e0ecc45 100644
+--- a/drivers/clk/imx/clk-imx6sl.c
++++ b/drivers/clk/imx/clk-imx6sl.c
+@@ -17,6 +17,8 @@
+
+ #include "clk.h"
+
++#define CCDR 0x4
++#define BM_CCM_CCDR_MMDC_CH0_MASK (1 << 17)
+ #define CCSR 0xc
+ #define BM_CCSR_PLL1_SW_CLK_SEL (1 << 2)
+ #define CACRR 0x10
+@@ -409,6 +411,10 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
+ clks[IMX6SL_CLK_USDHC3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6);
+ clks[IMX6SL_CLK_USDHC4] = imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8);
+
++ /* Ensure the MMDC CH0 handshake is bypassed */
++ writel_relaxed(readl_relaxed(base + CCDR) |
++ BM_CCM_CCDR_MMDC_CH0_MASK, base + CCDR);
++
+ imx_check_clocks(clks, ARRAY_SIZE(clks));
+
+ clk_data.clks = clks;
+diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
+index 50060e895e7a..9d79ff857d83 100644
+--- a/drivers/clk/meson/meson8b.c
++++ b/drivers/clk/meson/meson8b.c
+@@ -583,7 +583,7 @@ static struct clk_regmap meson8b_cpu_scale_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .shift = 20,
+- .width = 9,
++ .width = 10,
+ .table = cpu_scale_table,
+ .flags = CLK_DIVIDER_ALLOW_ZERO,
+ },
+@@ -596,20 +596,27 @@ static struct clk_regmap meson8b_cpu_scale_div = {
+ },
+ };
+
++static u32 mux_table_cpu_scale_out_sel[] = { 0, 1, 3 };
+ static struct clk_regmap meson8b_cpu_scale_out_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x3,
+ .shift = 2,
++ .table = mux_table_cpu_scale_out_sel,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_scale_out_sel",
+ .ops = &clk_regmap_mux_ro_ops,
++ /*
++ * NOTE: We are skipping the parent with value 0x2 (which is
++ * "cpu_div3") because it results in a duty cycle of 33% which
++ * makes the system unstable and can result in a lockup of the
++ * whole system.
++ */
+ .parent_names = (const char *[]) { "cpu_in_sel",
+ "cpu_div2",
+- "cpu_div3",
+ "cpu_scale_div" },
+- .num_parents = 4,
++ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ };
+@@ -627,7 +634,8 @@ static struct clk_regmap meson8b_cpu_clk = {
+ "cpu_scale_out_sel" },
+ .num_parents = 2,
+ .flags = (CLK_SET_RATE_PARENT |
+- CLK_SET_RATE_NO_REPARENT),
++ CLK_SET_RATE_NO_REPARENT |
++ CLK_IS_CRITICAL),
+ },
+ };
+
+diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+index 13eb5b23c5e7..c40d572a7602 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
++++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+@@ -366,10 +366,10 @@ static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4,
+ static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x",
+ "pll-audio-2x", "pll-audio" };
+ static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", i2s_parents,
+- 0x0b0, 16, 2, BIT(31), 0);
++ 0x0b0, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+ static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", i2s_parents,
+- 0x0b4, 16, 2, BIT(31), 0);
++ 0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
+
+ /* TODO: the parent for most of the USB clocks is not known */
+ static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M",
+@@ -446,7 +446,7 @@ static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve",
+ static SUNXI_CCU_GATE(ac_dig_clk, "ac-dig", "pll-audio",
+ 0x140, BIT(31), CLK_SET_RATE_PARENT);
+ static SUNXI_CCU_GATE(ac_dig_4x_clk, "ac-dig-4x", "pll-audio-4x",
+- 0x140, BIT(30), 0);
++ 0x140, BIT(30), CLK_SET_RATE_PARENT);
+ static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M",
+ 0x144, BIT(31), 0);
+
+diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
+index db2ede565f1a..b44476a1b7ad 100644
+--- a/drivers/cpuidle/cpuidle-big_little.c
++++ b/drivers/cpuidle/cpuidle-big_little.c
+@@ -167,6 +167,7 @@ static int __init bl_idle_init(void)
+ {
+ int ret;
+ struct device_node *root = of_find_node_by_path("/");
++ const struct of_device_id *match_id;
+
+ if (!root)
+ return -ENODEV;
+@@ -174,7 +175,11 @@ static int __init bl_idle_init(void)
+ /*
+ * Initialize the driver just for a compliant set of machines
+ */
+- if (!of_match_node(compatible_machine_match, root))
++ match_id = of_match_node(compatible_machine_match, root);
++
++ of_node_put(root);
++
++ if (!match_id)
+ return -ENODEV;
+
+ if (!mcpm_is_available())
+diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
+index d2663a4e1f5e..a92a66b1ff46 100644
+--- a/drivers/crypto/ux500/cryp/cryp_core.c
++++ b/drivers/crypto/ux500/cryp/cryp_core.c
+@@ -556,7 +556,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
+ desc = dmaengine_prep_slave_sg(channel,
+ ctx->device->dma.sg_src,
+ ctx->device->dma.sg_src_len,
+- direction, DMA_CTRL_ACK);
++ DMA_MEM_TO_DEV, DMA_CTRL_ACK);
+ break;
+
+ case DMA_FROM_DEVICE:
+@@ -580,7 +580,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
+ desc = dmaengine_prep_slave_sg(channel,
+ ctx->device->dma.sg_dst,
+ ctx->device->dma.sg_dst_len,
+- direction,
++ DMA_DEV_TO_MEM,
+ DMA_CTRL_ACK |
+ DMA_PREP_INTERRUPT);
+
+diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
+index 633321a8dd03..a0bb8a6eec3f 100644
+--- a/drivers/crypto/ux500/hash/hash_core.c
++++ b/drivers/crypto/ux500/hash/hash_core.c
+@@ -166,7 +166,7 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
+ __func__);
+ desc = dmaengine_prep_slave_sg(channel,
+ ctx->device->dma.sg, ctx->device->dma.sg_len,
+- direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
++ DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(ctx->device->dev,
+ "%s: dmaengine_prep_slave_sg() failed!\n", __func__);
+diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
+index 847f84a41a69..2b11d967acd0 100644
+--- a/drivers/dma/bcm2835-dma.c
++++ b/drivers/dma/bcm2835-dma.c
+@@ -415,38 +415,32 @@ static void bcm2835_dma_fill_cb_chain_with_sg(
+ }
+ }
+
+-static int bcm2835_dma_abort(void __iomem *chan_base)
++static int bcm2835_dma_abort(struct bcm2835_chan *c)
+ {
+- unsigned long cs;
++ void __iomem *chan_base = c->chan_base;
+ long int timeout = 10000;
+
+- cs = readl(chan_base + BCM2835_DMA_CS);
+- if (!(cs & BCM2835_DMA_ACTIVE))
++ /*
++ * A zero control block address means the channel is idle.
++ * (The ACTIVE flag in the CS register is not a reliable indicator.)
++ */
++ if (!readl(chan_base + BCM2835_DMA_ADDR))
+ return 0;
+
+ /* Write 0 to the active bit - Pause the DMA */
+ writel(0, chan_base + BCM2835_DMA_CS);
+
+ /* Wait for any current AXI transfer to complete */
+- while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) {
++ while ((readl(chan_base + BCM2835_DMA_CS) &
++ BCM2835_DMA_WAITING_FOR_WRITES) && --timeout)
+ cpu_relax();
+- cs = readl(chan_base + BCM2835_DMA_CS);
+- }
+
+- /* We'll un-pause when we set of our next DMA */
++ /* Peripheral might be stuck and fail to signal AXI write responses */
+ if (!timeout)
+- return -ETIMEDOUT;
+-
+- if (!(cs & BCM2835_DMA_ACTIVE))
+- return 0;
+-
+- /* Terminate the control block chain */
+- writel(0, chan_base + BCM2835_DMA_NEXTCB);
+-
+- /* Abort the whole DMA */
+- writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE,
+- chan_base + BCM2835_DMA_CS);
++ dev_err(c->vc.chan.device->dev,
++ "failed to complete outstanding writes\n");
+
++ writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS);
+ return 0;
+ }
+
+@@ -485,8 +479,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+
+- /* Acknowledge interrupt */
+- writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS);
++ /*
++ * Clear the INT flag to receive further interrupts. Keep the channel
++ * active in case the descriptor is cyclic or in case the client has
++ * already terminated the descriptor and issued a new one. (May happen
++ * if this IRQ handler is threaded.) If the channel is finished, it
++ * will remain idle despite the ACTIVE flag being set.
++ */
++ writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE,
++ c->chan_base + BCM2835_DMA_CS);
+
+ d = c->desc;
+
+@@ -494,11 +495,7 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
+ if (d->cyclic) {
+ /* call the cyclic callback */
+ vchan_cyclic_callback(&d->vd);
+-
+- /* Keep the DMA engine running */
+- writel(BCM2835_DMA_ACTIVE,
+- c->chan_base + BCM2835_DMA_CS);
+- } else {
++ } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) {
+ vchan_cookie_complete(&c->desc->vd);
+ bcm2835_dma_start_desc(c);
+ }
+@@ -796,7 +793,6 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
+ struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+ struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
+ unsigned long flags;
+- int timeout = 10000;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+@@ -806,27 +802,11 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
+ list_del_init(&c->node);
+ spin_unlock(&d->lock);
+
+- /*
+- * Stop DMA activity: we assume the callback will not be called
+- * after bcm_dma_abort() returns (even if it does, it will see
+- * c->desc is NULL and exit.)
+- */
++ /* stop DMA activity */
+ if (c->desc) {
+ vchan_terminate_vdesc(&c->desc->vd);
+ c->desc = NULL;
+- bcm2835_dma_abort(c->chan_base);
+-
+- /* Wait for stopping */
+- while (--timeout) {
+- if (!(readl(c->chan_base + BCM2835_DMA_CS) &
+- BCM2835_DMA_ACTIVE))
+- break;
+-
+- cpu_relax();
+- }
+-
+- if (!timeout)
+- dev_err(d->ddev.dev, "DMA transfer could not be terminated\n");
++ bcm2835_dma_abort(c);
+ }
+
+ vchan_get_all_descriptors(&c->vc, &head);
+diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
+index 75b6ff0415ee..118d371a2a4a 100644
+--- a/drivers/dma/imx-dma.c
++++ b/drivers/dma/imx-dma.c
+@@ -617,7 +617,7 @@ static void imxdma_tasklet(unsigned long data)
+ {
+ struct imxdma_channel *imxdmac = (void *)data;
+ struct imxdma_engine *imxdma = imxdmac->imxdma;
+- struct imxdma_desc *desc;
++ struct imxdma_desc *desc, *next_desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imxdma->lock, flags);
+@@ -647,10 +647,10 @@ static void imxdma_tasklet(unsigned long data)
+ list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
+
+ if (!list_empty(&imxdmac->ld_queue)) {
+- desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
+- node);
++ next_desc = list_first_entry(&imxdmac->ld_queue,
++ struct imxdma_desc, node);
+ list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
+- if (imxdma_xfer_desc(desc) < 0)
++ if (imxdma_xfer_desc(next_desc) < 0)
+ dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
+ __func__, imxdmac->channel);
+ }
+diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
+index c74a88b65039..73de6a6179fc 100644
+--- a/drivers/dma/xilinx/zynqmp_dma.c
++++ b/drivers/dma/xilinx/zynqmp_dma.c
+@@ -163,7 +163,7 @@ struct zynqmp_dma_desc_ll {
+ u32 ctrl;
+ u64 nxtdscraddr;
+ u64 rsvd;
+-}; __aligned(64)
++};
+
+ /**
+ * struct zynqmp_dma_desc_sw - Per Transaction structure
+diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
+index 9336ffdf6e2c..fceaafd67ec6 100644
+--- a/drivers/firmware/efi/vars.c
++++ b/drivers/firmware/efi/vars.c
+@@ -318,7 +318,12 @@ EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
+ static efi_status_t
+ check_var_size(u32 attributes, unsigned long size)
+ {
+- const struct efivar_operations *fops = __efivars->ops;
++ const struct efivar_operations *fops;
++
++ if (!__efivars)
++ return EFI_UNSUPPORTED;
++
++ fops = __efivars->ops;
+
+ if (!fops->query_variable_store)
+ return EFI_UNSUPPORTED;
+@@ -329,7 +334,12 @@ check_var_size(u32 attributes, unsigned long size)
+ static efi_status_t
+ check_var_size_nonblocking(u32 attributes, unsigned long size)
+ {
+- const struct efivar_operations *fops = __efivars->ops;
++ const struct efivar_operations *fops;
++
++ if (!__efivars)
++ return EFI_UNSUPPORTED;
++
++ fops = __efivars->ops;
+
+ if (!fops->query_variable_store)
+ return EFI_UNSUPPORTED;
+@@ -429,13 +439,18 @@ static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
+ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
+ void *data, bool duplicates, struct list_head *head)
+ {
+- const struct efivar_operations *ops = __efivars->ops;
++ const struct efivar_operations *ops;
+ unsigned long variable_name_size = 1024;
+ efi_char16_t *variable_name;
+ efi_status_t status;
+ efi_guid_t vendor_guid;
+ int err = 0;
+
++ if (!__efivars)
++ return -EFAULT;
++
++ ops = __efivars->ops;
++
+ variable_name = kzalloc(variable_name_size, GFP_KERNEL);
+ if (!variable_name) {
+ printk(KERN_ERR "efivars: Memory allocation failed.\n");
+@@ -583,12 +598,14 @@ static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
+ */
+ int __efivar_entry_delete(struct efivar_entry *entry)
+ {
+- const struct efivar_operations *ops = __efivars->ops;
+ efi_status_t status;
+
+- status = ops->set_variable(entry->var.VariableName,
+- &entry->var.VendorGuid,
+- 0, 0, NULL);
++ if (!__efivars)
++ return -EINVAL;
++
++ status = __efivars->ops->set_variable(entry->var.VariableName,
++ &entry->var.VendorGuid,
++ 0, 0, NULL);
+
+ return efi_status_to_err(status);
+ }
+@@ -607,12 +624,17 @@ EXPORT_SYMBOL_GPL(__efivar_entry_delete);
+ */
+ int efivar_entry_delete(struct efivar_entry *entry)
+ {
+- const struct efivar_operations *ops = __efivars->ops;
++ const struct efivar_operations *ops;
+ efi_status_t status;
+
+ if (down_interruptible(&efivars_lock))
+ return -EINTR;
+
++ if (!__efivars) {
++ up(&efivars_lock);
++ return -EINVAL;
++ }
++ ops = __efivars->ops;
+ status = ops->set_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ 0, 0, NULL);
+@@ -650,13 +672,19 @@ EXPORT_SYMBOL_GPL(efivar_entry_delete);
+ int efivar_entry_set(struct efivar_entry *entry, u32 attributes,
+ unsigned long size, void *data, struct list_head *head)
+ {
+- const struct efivar_operations *ops = __efivars->ops;
++ const struct efivar_operations *ops;
+ efi_status_t status;
+ efi_char16_t *name = entry->var.VariableName;
+ efi_guid_t vendor = entry->var.VendorGuid;
+
+ if (down_interruptible(&efivars_lock))
+ return -EINTR;
++
++ if (!__efivars) {
++ up(&efivars_lock);
++ return -EINVAL;
++ }
++ ops = __efivars->ops;
+ if (head && efivar_entry_find(name, vendor, head, false)) {
+ up(&efivars_lock);
+ return -EEXIST;
+@@ -687,12 +715,17 @@ static int
+ efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
+ u32 attributes, unsigned long size, void *data)
+ {
+- const struct efivar_operations *ops = __efivars->ops;
++ const struct efivar_operations *ops;
+ efi_status_t status;
+
+ if (down_trylock(&efivars_lock))
+ return -EBUSY;
+
++ if (!__efivars) {
++ up(&efivars_lock);
++ return -EINVAL;
++ }
++
+ status = check_var_size_nonblocking(attributes,
+ size + ucs2_strsize(name, 1024));
+ if (status != EFI_SUCCESS) {
+@@ -700,6 +733,7 @@ efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
+ return -ENOSPC;
+ }
+
++ ops = __efivars->ops;
+ status = ops->set_variable_nonblocking(name, &vendor, attributes,
+ size, data);
+
+@@ -727,9 +761,13 @@ efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
+ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
+ bool block, unsigned long size, void *data)
+ {
+- const struct efivar_operations *ops = __efivars->ops;
++ const struct efivar_operations *ops;
+ efi_status_t status;
+
++ if (!__efivars)
++ return -EINVAL;
++
++ ops = __efivars->ops;
+ if (!ops->query_variable_store)
+ return -ENOSYS;
+
+@@ -829,13 +867,18 @@ EXPORT_SYMBOL_GPL(efivar_entry_find);
+ */
+ int efivar_entry_size(struct efivar_entry *entry, unsigned long *size)
+ {
+- const struct efivar_operations *ops = __efivars->ops;
++ const struct efivar_operations *ops;
+ efi_status_t status;
+
+ *size = 0;
+
+ if (down_interruptible(&efivars_lock))
+ return -EINTR;
++ if (!__efivars) {
++ up(&efivars_lock);
++ return -EINVAL;
++ }
++ ops = __efivars->ops;
+ status = ops->get_variable(entry->var.VariableName,
+ &entry->var.VendorGuid, NULL, size, NULL);
+ up(&efivars_lock);
+@@ -861,12 +904,14 @@ EXPORT_SYMBOL_GPL(efivar_entry_size);
+ int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
+ unsigned long *size, void *data)
+ {
+- const struct efivar_operations *ops = __efivars->ops;
+ efi_status_t status;
+
+- status = ops->get_variable(entry->var.VariableName,
+- &entry->var.VendorGuid,
+- attributes, size, data);
++ if (!__efivars)
++ return -EINVAL;
++
++ status = __efivars->ops->get_variable(entry->var.VariableName,
++ &entry->var.VendorGuid,
++ attributes, size, data);
+
+ return efi_status_to_err(status);
+ }
+@@ -882,14 +927,19 @@ EXPORT_SYMBOL_GPL(__efivar_entry_get);
+ int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
+ unsigned long *size, void *data)
+ {
+- const struct efivar_operations *ops = __efivars->ops;
+ efi_status_t status;
+
+ if (down_interruptible(&efivars_lock))
+ return -EINTR;
+- status = ops->get_variable(entry->var.VariableName,
+- &entry->var.VendorGuid,
+- attributes, size, data);
++
++ if (!__efivars) {
++ up(&efivars_lock);
++ return -EINVAL;
++ }
++
++ status = __efivars->ops->get_variable(entry->var.VariableName,
++ &entry->var.VendorGuid,
++ attributes, size, data);
+ up(&efivars_lock);
+
+ return efi_status_to_err(status);
+@@ -921,7 +971,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_get);
+ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
+ unsigned long *size, void *data, bool *set)
+ {
+- const struct efivar_operations *ops = __efivars->ops;
++ const struct efivar_operations *ops;
+ efi_char16_t *name = entry->var.VariableName;
+ efi_guid_t *vendor = &entry->var.VendorGuid;
+ efi_status_t status;
+@@ -940,6 +990,11 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
+ if (down_interruptible(&efivars_lock))
+ return -EINTR;
+
++ if (!__efivars) {
++ err = -EINVAL;
++ goto out;
++ }
++
+ /*
+ * Ensure that the available space hasn't shrunk below the safe level
+ */
+@@ -956,6 +1011,8 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
+ }
+ }
+
++ ops = __efivars->ops;
++
+ status = ops->set_variable(name, vendor, attributes, *size, data);
+ if (status != EFI_SUCCESS) {
+ err = efi_status_to_err(status);
+diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
+index 68e4b2b98c8f..7a42c194b944 100644
+--- a/drivers/fpga/altera-cvp.c
++++ b/drivers/fpga/altera-cvp.c
+@@ -403,6 +403,7 @@ static int altera_cvp_probe(struct pci_dev *pdev,
+ struct altera_cvp_conf *conf;
+ struct fpga_manager *mgr;
+ u16 cmd, val;
++ u32 regval;
+ int ret;
+
+ /*
+@@ -416,6 +417,14 @@ static int altera_cvp_probe(struct pci_dev *pdev,
+ return -ENODEV;
+ }
+
++ pci_read_config_dword(pdev, VSE_CVP_STATUS, ®val);
++ if (!(regval & VSE_CVP_STATUS_CVP_EN)) {
++ dev_err(&pdev->dev,
++ "CVP is disabled for this device: CVP_STATUS Reg 0x%x\n",
++ regval);
++ return -ENODEV;
++ }
++
+ conf = devm_kzalloc(&pdev->dev, sizeof(*conf), GFP_KERNEL);
+ if (!conf)
+ return -ENOMEM;
+@@ -471,7 +480,8 @@ static int altera_cvp_probe(struct pci_dev *pdev,
+ return 0;
+
+ err_unmap:
+- pci_iounmap(pdev, conf->map);
++ if (conf->map)
++ pci_iounmap(pdev, conf->map);
+ pci_release_region(pdev, CVP_BAR);
+ err_disable:
+ cmd &= ~PCI_COMMAND_MEMORY;
+@@ -486,7 +496,8 @@ static void altera_cvp_remove(struct pci_dev *pdev)
+ u16 cmd;
+
+ fpga_mgr_unregister(mgr);
+- pci_iounmap(pdev, conf->map);
++ if (conf->map)
++ pci_iounmap(pdev, conf->map);
+ pci_release_region(pdev, CVP_BAR);
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ cmd &= ~PCI_COMMAND_MEMORY;
+diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
+index d72af6f6cdbd..00e954f22bc9 100644
+--- a/drivers/gpio/gpio-mt7621.c
++++ b/drivers/gpio/gpio-mt7621.c
+@@ -244,6 +244,8 @@ mediatek_gpio_bank_probe(struct device *dev,
+ rg->chip.of_xlate = mediatek_gpio_xlate;
+ rg->chip.label = devm_kasprintf(dev, GFP_KERNEL, "%s-bank%d",
+ dev_name(dev), bank);
++ if (!rg->chip.label)
++ return -ENOMEM;
+
+ ret = devm_gpiochip_add_data(dev, &rg->chip, mtk);
+ if (ret < 0) {
+@@ -295,6 +297,7 @@ mediatek_gpio_probe(struct platform_device *pdev)
+ struct device_node *np = dev->of_node;
+ struct mtk *mtk;
+ int i;
++ int ret;
+
+ mtk = devm_kzalloc(dev, sizeof(*mtk), GFP_KERNEL);
+ if (!mtk)
+@@ -309,8 +312,11 @@ mediatek_gpio_probe(struct platform_device *pdev)
+ platform_set_drvdata(pdev, mtk);
+ mediatek_gpio_irq_chip.name = dev_name(dev);
+
+- for (i = 0; i < MTK_BANK_CNT; i++)
+- mediatek_gpio_bank_probe(dev, np, i);
++ for (i = 0; i < MTK_BANK_CNT; i++) {
++ ret = mediatek_gpio_bank_probe(dev, np, i);
++ if (ret)
++ return ret;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index b3ab6c428423..fd713326dcfc 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -2279,6 +2279,12 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
+ unsigned long flags;
+ unsigned offset;
+
++ if (label) {
++ label = kstrdup_const(label, GFP_KERNEL);
++ if (!label)
++ return -ENOMEM;
++ }
++
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ /* NOTE: gpio_request() can be called in early boot,
+@@ -2289,6 +2295,7 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
+ desc_set_label(desc, label ? : "?");
+ status = 0;
+ } else {
++ kfree_const(label);
+ status = -EBUSY;
+ goto done;
+ }
+@@ -2305,6 +2312,7 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
+
+ if (status < 0) {
+ desc_set_label(desc, NULL);
++ kfree_const(label);
+ clear_bit(FLAG_REQUESTED, &desc->flags);
+ goto done;
+ }
+@@ -2400,6 +2408,7 @@ static bool gpiod_free_commit(struct gpio_desc *desc)
+ chip->free(chip, gpio_chip_hwgpio(desc));
+ spin_lock_irqsave(&gpio_lock, flags);
+ }
++ kfree_const(desc->label);
+ desc_set_label(desc, NULL);
+ clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ clear_bit(FLAG_REQUESTED, &desc->flags);
+@@ -3221,11 +3230,19 @@ EXPORT_SYMBOL_GPL(gpiod_cansleep);
+ * @desc: gpio to set the consumer name on
+ * @name: the new consumer name
+ */
+-void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
++int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
+ {
+- VALIDATE_DESC_VOID(desc);
+- /* Just overwrite whatever the previous name was */
+- desc->label = name;
++ VALIDATE_DESC(desc);
++ if (name) {
++ name = kstrdup_const(name, GFP_KERNEL);
++ if (!name)
++ return -ENOMEM;
++ }
++
++ kfree_const(desc->label);
++ desc_set_label(desc, name);
++
++ return 0;
+ }
+ EXPORT_SYMBOL_GPL(gpiod_set_consumer_name);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 7c89785fd731..23a7ef97afdd 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -324,7 +324,7 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
+ {
+ enum gpio_result gpio_result;
+ uint32_t clock_pin = 0;
+-
++ uint8_t retry = 0;
+ struct ddc *ddc;
+
+ enum connector_id connector_id =
+@@ -353,11 +353,22 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
+ return present;
+ }
+
+- /* Read GPIO: DP sink is present if both clock and data pins are zero */
+- /* [anaumov] in DAL2, there was no check for GPIO failure */
+-
+- gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
+- ASSERT(gpio_result == GPIO_RESULT_OK);
++ /*
++ * Read GPIO: DP sink is present if both clock and data pins are zero
++ *
++ * [W/A] plug-unplug DP cable, sometimes customer board has
++ * one short pulse on clk_pin(1V, < 1ms). DP will be config to HDMI/DVI
++ * then monitor can't br light up. Add retry 3 times
++ * But in real passive dongle, it need additional 3ms to detect
++ */
++ do {
++ gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
++ ASSERT(gpio_result == GPIO_RESULT_OK);
++ if (clock_pin)
++ udelay(1000);
++ else
++ break;
++ } while (retry++ < 3);
+
+ present = (gpio_result == GPIO_RESULT_OK) && !clock_pin;
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index a7553b6d59c2..05840f5bddd5 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -2240,7 +2240,8 @@ static void get_active_converter_info(
+ translate_dpcd_max_bpc(
+ hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
+
+- link->dpcd_caps.dongle_caps.extendedCapValid = true;
++ if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk != 0)
++ link->dpcd_caps.dongle_caps.extendedCapValid = true;
+ }
+
+ break;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index ea6beccfd89d..87bf422f16be 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -1917,6 +1917,8 @@ enum dc_status resource_map_pool_resources(
+ }
+ */
+
++ calculate_phy_pix_clks(stream);
++
+ /* acquire new resources */
+ pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 0941f3c689bc..580e7e82034f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1268,10 +1268,19 @@ static void program_scaler(const struct dc *dc,
+ pipe_ctx->plane_res.scl_data.lb_params.depth,
+ &pipe_ctx->stream->bit_depth_params);
+
+- if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color)
++ if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color) {
++ /*
++ * The way 420 is packed, 2 channels carry Y component, 1 channel
++ * alternate between Cb and Cr, so both channels need the pixel
++ * value for Y
++ */
++ if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
++ color.color_r_cr = color.color_g_y;
++
+ pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color(
+ pipe_ctx->stream_res.tg,
+ &color);
++ }
+
+ pipe_ctx->plane_res.xfm->funcs->transform_set_scaler(pipe_ctx->plane_res.xfm,
+ &pipe_ctx->plane_res.scl_data);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index cfcc54f2ce65..4058b59d9bea 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1190,7 +1190,8 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
+ tf = plane_state->in_transfer_func;
+
+ if (plane_state->gamma_correction &&
+- !plane_state->gamma_correction->is_identity
++ !dpp_base->ctx->dc->debug.always_use_regamma
++ && !plane_state->gamma_correction->is_identity
+ && dce_use_lut(plane_state->format))
+ dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
+
+@@ -2120,6 +2121,15 @@ static void dcn10_blank_pixel_data(
+ color_space = stream->output_color_space;
+ color_space_to_black_color(dc, color_space, &black_color);
+
++ /*
++ * The way 420 is packed, 2 channels carry Y component, 1 channel
++ * alternate between Cb and Cr, so both channels need the pixel
++ * value for Y
++ */
++ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
++ black_color.color_r_cr = black_color.color_g_y;
++
++
+ if (stream_res->tg->funcs->set_blank_color)
+ stream_res->tg->funcs->set_blank_color(
+ stream_res->tg,
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+index 5b67f575cd34..45629f26dbc2 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+@@ -1528,8 +1528,21 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+ efuse = efuse >> 24;
+
+ if (hwmgr->chip_id == CHIP_POLARIS10) {
+- min = 1000;
+- max = 2300;
++ if (hwmgr->is_kicker) {
++ min = 1200;
++ max = 2500;
++ } else {
++ min = 1000;
++ max = 2300;
++ }
++ } else if (hwmgr->chip_id == CHIP_POLARIS11) {
++ if (hwmgr->is_kicker) {
++ min = 900;
++ max = 2100;
++ } else {
++ min = 1100;
++ max = 2100;
++ }
+ } else {
+ min = 1100;
+ max = 2100;
+diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
+index f77bff5aa307..23397c08be11 100644
+--- a/drivers/gpu/drm/drm_atomic_helper.c
++++ b/drivers/gpu/drm/drm_atomic_helper.c
+@@ -3192,7 +3192,7 @@ EXPORT_SYMBOL(drm_atomic_helper_suspend);
+ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
+ struct drm_modeset_acquire_ctx *ctx)
+ {
+- int i;
++ int i, ret;
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state;
+ struct drm_connector *connector;
+@@ -3211,7 +3211,11 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
+ for_each_new_connector_in_state(state, connector, new_conn_state, i)
+ state->connectors[i].old_state = connector->state;
+
+- return drm_atomic_commit(state);
++ ret = drm_atomic_commit(state);
++
++ state->acquire_ctx = NULL;
++
++ return ret;
+ }
+ EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
+
+diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
+index ba8cfe65c65b..e2f775d1c112 100644
+--- a/drivers/gpu/drm/drm_bufs.c
++++ b/drivers/gpu/drm/drm_bufs.c
+@@ -36,6 +36,8 @@
+ #include <drm/drmP.h>
+ #include "drm_legacy.h"
+
++#include <linux/nospec.h>
++
+ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
+ struct drm_local_map *map)
+ {
+@@ -1417,6 +1419,7 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data,
+ idx, dma->buf_count - 1);
+ return -EINVAL;
+ }
++ idx = array_index_nospec(idx, dma->buf_count);
+ buf = dma->buflist[idx];
+ if (buf->file_priv != file_priv) {
+ DRM_ERROR("Process %d freeing buffer not owned\n",
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+index 14fc7c2a6bb7..c9962a36b86b 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+@@ -331,7 +331,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
+ if (hw_ctl && hw_ctl->ops.get_flush_register)
+ flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
+
+- if (flush_register == 0)
++ if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
+ new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
+ -1, 0);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+index 41bec570c518..31205625c734 100644
+--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
++++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+@@ -17,7 +17,7 @@
+ * | |
+ * | |
+ * +---------+ | +----------+ | +----+
+- * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0pllbyte
++ * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
+ * +---------+ | +----------+ | +----+
+ * | |
+ * | | dsi0_pll_by_2_bit_clk
+@@ -25,7 +25,7 @@
+ * | | +----+ | |\ dsi0_pclk_mux
+ * | |--| /2 |--o--| \ |
+ * | | +----+ | \ | +---------+
+- * | --------------| |--o--| div_7_4 |-- dsi0pll
++ * | --------------| |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
+ * |------------------------------| / +---------+
+ * | +-----+ | /
+ * -----------| /4? |--o----------|/
+@@ -690,7 +690,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
+
+ hws[num++] = hw;
+
+- snprintf(clk_name, 32, "dsi%dpllbyte", pll_10nm->id);
++ snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->id);
+ snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
+
+ /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
+@@ -739,7 +739,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
+
+ hws[num++] = hw;
+
+- snprintf(clk_name, 32, "dsi%dpll", pll_10nm->id);
++ snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->id);
+ snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id);
+
+ /* PIX CLK DIV : DIV_CTRL_7_4*/
+diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+index 3105965fc260..5a485489a1e2 100644
+--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
++++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+@@ -147,7 +147,7 @@ static int cdn_dp_mailbox_validate_receive(struct cdn_dp_device *dp,
+ }
+
+ static int cdn_dp_mailbox_read_receive(struct cdn_dp_device *dp,
+- u8 *buff, u8 buff_size)
++ u8 *buff, u16 buff_size)
+ {
+ u32 i;
+ int ret;
+diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+index d5240b777a8f..adcdf946c365 100644
+--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
++++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+@@ -168,6 +168,13 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
+ goto err_assert_reset;
+ }
+
++ /*
++ * At least on H6, some registers have some bits set by default
++ * which may cause issues. Clear them here.
++ */
++ writel(0, regs + TCON_TOP_PORT_SEL_REG);
++ writel(0, regs + TCON_TOP_GATE_SRC_REG);
++
+ /*
+ * TCON TOP has two muxes, which select parent clock for each TCON TV
+ * channel clock. Parent could be either TCON TV or TVE clock. For now
+diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
+index 54d96518a131..a08766d39eab 100644
+--- a/drivers/gpu/drm/v3d/v3d_bo.c
++++ b/drivers/gpu/drm/v3d/v3d_bo.c
+@@ -293,6 +293,7 @@ v3d_prime_import_sg_table(struct drm_device *dev,
+ bo->resv = attach->dmabuf->resv;
+
+ bo->sgt = sgt;
++ obj->import_attach = attach;
+ v3d_bo_get_pages(bo);
+
+ v3d_mmu_insert_ptes(bo);
+diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
+index 629f40424bba..ab39315c9078 100644
+--- a/drivers/gpu/drm/vc4/vc4_plane.c
++++ b/drivers/gpu/drm/vc4/vc4_plane.c
+@@ -315,12 +315,14 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
+ vc4_get_scaling_mode(vc4_state->src_h[1],
+ vc4_state->crtc_h);
+
+- /* YUV conversion requires that horizontal scaling be enabled,
+- * even on a plane that's otherwise 1:1. Looks like only PPF
+- * works in that case, so let's pick that one.
++ /* YUV conversion requires that horizontal scaling be enabled
++ * on the UV plane even if vc4_get_scaling_mode() returned
++ * VC4_SCALING_NONE (which can happen when the down-scaling
++ * ratio is 0.5). Let's force it to VC4_SCALING_PPF in this
++ * case.
+ */
+- if (vc4_state->is_unity)
+- vc4_state->x_scaling[0] = VC4_SCALING_PPF;
++ if (vc4_state->x_scaling[1] == VC4_SCALING_NONE)
++ vc4_state->x_scaling[1] = VC4_SCALING_PPF;
+ } else {
+ vc4_state->is_yuv = false;
+ vc4_state->x_scaling[1] = VC4_SCALING_NONE;
+diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
+index 0e5620f76ee0..6887db878b38 100644
+--- a/drivers/gpu/drm/vgem/vgem_drv.c
++++ b/drivers/gpu/drm/vgem/vgem_drv.c
+@@ -471,31 +471,31 @@ static int __init vgem_init(void)
+ if (!vgem_device)
+ return -ENOMEM;
+
+- ret = drm_dev_init(&vgem_device->drm, &vgem_driver, NULL);
+- if (ret)
+- goto out_free;
+-
+ vgem_device->platform =
+ platform_device_register_simple("vgem", -1, NULL, 0);
+ if (IS_ERR(vgem_device->platform)) {
+ ret = PTR_ERR(vgem_device->platform);
+- goto out_fini;
++ goto out_free;
+ }
+
+ dma_coerce_mask_and_coherent(&vgem_device->platform->dev,
+ DMA_BIT_MASK(64));
++ ret = drm_dev_init(&vgem_device->drm, &vgem_driver,
++ &vgem_device->platform->dev);
++ if (ret)
++ goto out_unregister;
+
+ /* Final step: expose the device/driver to userspace */
+ ret = drm_dev_register(&vgem_device->drm, 0);
+ if (ret)
+- goto out_unregister;
++ goto out_fini;
+
+ return 0;
+
+-out_unregister:
+- platform_device_unregister(vgem_device->platform);
+ out_fini:
+ drm_dev_fini(&vgem_device->drm);
++out_unregister:
++ platform_device_unregister(vgem_device->platform);
+ out_free:
+ kfree(vgem_device);
+ return ret;
+diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
+index f4081962784c..91653adc41cc 100644
+--- a/drivers/gpu/ipu-v3/ipu-image-convert.c
++++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
+@@ -1524,7 +1524,7 @@ unlock:
+ EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
+
+ /* Abort any active or pending conversions for this context */
+-void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
++static void __ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
+ {
+ struct ipu_image_convert_chan *chan = ctx->chan;
+ struct ipu_image_convert_priv *priv = chan->priv;
+@@ -1551,7 +1551,7 @@ void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
+
+ need_abort = (run_count || active_run);
+
+- ctx->aborting = need_abort;
++ ctx->aborting = true;
+
+ spin_unlock_irqrestore(&chan->irqlock, flags);
+
+@@ -1572,7 +1572,11 @@ void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
+ dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
+ force_abort(ctx);
+ }
++}
+
++void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
++{
++ __ipu_image_convert_abort(ctx);
+ ctx->aborting = false;
+ }
+ EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
+@@ -1586,7 +1590,7 @@ void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
+ bool put_res;
+
+ /* make sure no runs are hanging around */
+- ipu_image_convert_abort(ctx);
++ __ipu_image_convert_abort(ctx);
+
+ dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
+ chan->ic_task, ctx);
+diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
+index b48100236df8..ebc9ffde41e9 100644
+--- a/drivers/hid/hid-debug.c
++++ b/drivers/hid/hid-debug.c
+@@ -30,6 +30,7 @@
+
+ #include <linux/debugfs.h>
+ #include <linux/seq_file.h>
++#include <linux/kfifo.h>
+ #include <linux/sched/signal.h>
+ #include <linux/export.h>
+ #include <linux/slab.h>
+@@ -661,17 +662,12 @@ EXPORT_SYMBOL_GPL(hid_dump_device);
+ /* enqueue string to 'events' ring buffer */
+ void hid_debug_event(struct hid_device *hdev, char *buf)
+ {
+- unsigned i;
+ struct hid_debug_list *list;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdev->debug_list_lock, flags);
+- list_for_each_entry(list, &hdev->debug_list, node) {
+- for (i = 0; buf[i]; i++)
+- list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
+- buf[i];
+- list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
+- }
++ list_for_each_entry(list, &hdev->debug_list, node)
++ kfifo_in(&list->hid_debug_fifo, buf, strlen(buf));
+ spin_unlock_irqrestore(&hdev->debug_list_lock, flags);
+
+ wake_up_interruptible(&hdev->debug_wait);
+@@ -722,8 +718,7 @@ void hid_dump_input(struct hid_device *hdev, struct hid_usage *usage, __s32 valu
+ hid_debug_event(hdev, buf);
+
+ kfree(buf);
+- wake_up_interruptible(&hdev->debug_wait);
+-
++ wake_up_interruptible(&hdev->debug_wait);
+ }
+ EXPORT_SYMBOL_GPL(hid_dump_input);
+
+@@ -1088,8 +1083,8 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
+ goto out;
+ }
+
+- if (!(list->hid_debug_buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_KERNEL))) {
+- err = -ENOMEM;
++ err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL);
++ if (err) {
+ kfree(list);
+ goto out;
+ }
+@@ -1109,77 +1104,57 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+ {
+ struct hid_debug_list *list = file->private_data;
+- int ret = 0, len;
++ int ret = 0, copied;
+ DECLARE_WAITQUEUE(wait, current);
+
+ mutex_lock(&list->read_mutex);
+- while (ret == 0) {
+- if (list->head == list->tail) {
+- add_wait_queue(&list->hdev->debug_wait, &wait);
+- set_current_state(TASK_INTERRUPTIBLE);
+-
+- while (list->head == list->tail) {
+- if (file->f_flags & O_NONBLOCK) {
+- ret = -EAGAIN;
+- break;
+- }
+- if (signal_pending(current)) {
+- ret = -ERESTARTSYS;
+- break;
+- }
++ if (kfifo_is_empty(&list->hid_debug_fifo)) {
++ add_wait_queue(&list->hdev->debug_wait, &wait);
++ set_current_state(TASK_INTERRUPTIBLE);
++
++ while (kfifo_is_empty(&list->hid_debug_fifo)) {
++ if (file->f_flags & O_NONBLOCK) {
++ ret = -EAGAIN;
++ break;
++ }
+
+- if (!list->hdev || !list->hdev->debug) {
+- ret = -EIO;
+- set_current_state(TASK_RUNNING);
+- goto out;
+- }
++ if (signal_pending(current)) {
++ ret = -ERESTARTSYS;
++ break;
++ }
+
+- /* allow O_NONBLOCK from other threads */
+- mutex_unlock(&list->read_mutex);
+- schedule();
+- mutex_lock(&list->read_mutex);
+- set_current_state(TASK_INTERRUPTIBLE);
++ /* if list->hdev is NULL we cannot remove_wait_queue().
++ * if list->hdev->debug is 0 then hid_debug_unregister()
++ * was already called and list->hdev is being destroyed.
++ * if we add remove_wait_queue() here we can hit a race.
++ */
++ if (!list->hdev || !list->hdev->debug) {
++ ret = -EIO;
++ set_current_state(TASK_RUNNING);
++ goto out;
+ }
+
+- set_current_state(TASK_RUNNING);
+- remove_wait_queue(&list->hdev->debug_wait, &wait);
++ /* allow O_NONBLOCK from other threads */
++ mutex_unlock(&list->read_mutex);
++ schedule();
++ mutex_lock(&list->read_mutex);
++ set_current_state(TASK_INTERRUPTIBLE);
+ }
+
+- if (ret)
+- goto out;
++ __set_current_state(TASK_RUNNING);
++ remove_wait_queue(&list->hdev->debug_wait, &wait);
+
+- /* pass the ringbuffer contents to userspace */
+-copy_rest:
+- if (list->tail == list->head)
++ if (ret)
+ goto out;
+- if (list->tail > list->head) {
+- len = list->tail - list->head;
+- if (len > count)
+- len = count;
+-
+- if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
+- ret = -EFAULT;
+- goto out;
+- }
+- ret += len;
+- list->head += len;
+- } else {
+- len = HID_DEBUG_BUFSIZE - list->head;
+- if (len > count)
+- len = count;
+-
+- if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
+- ret = -EFAULT;
+- goto out;
+- }
+- list->head = 0;
+- ret += len;
+- count -= len;
+- if (count > 0)
+- goto copy_rest;
+- }
+-
+ }
++
++ /* pass the fifo content to userspace, locking is not needed with only
++ * one concurrent reader and one concurrent writer
++ */
++ ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied);
++ if (ret)
++ goto out;
++ ret = copied;
+ out:
+ mutex_unlock(&list->read_mutex);
+ return ret;
+@@ -1190,7 +1165,7 @@ static __poll_t hid_debug_events_poll(struct file *file, poll_table *wait)
+ struct hid_debug_list *list = file->private_data;
+
+ poll_wait(file, &list->hdev->debug_wait, wait);
+- if (list->head != list->tail)
++ if (!kfifo_is_empty(&list->hid_debug_fifo))
+ return EPOLLIN | EPOLLRDNORM;
+ if (!list->hdev->debug)
+ return EPOLLERR | EPOLLHUP;
+@@ -1205,7 +1180,7 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
+ spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
+ list_del(&list->node);
+ spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
+- kfree(list->hid_debug_buf);
++ kfifo_free(&list->hid_debug_fifo);
+ kfree(list);
+
+ return 0;
+@@ -1256,4 +1231,3 @@ void hid_debug_exit(void)
+ {
+ debugfs_remove_recursive(hid_debug_root);
+ }
+-
+diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
+index 643b6eb54442..eacc76d2ab96 100644
+--- a/drivers/hid/hid-lenovo.c
++++ b/drivers/hid/hid-lenovo.c
+@@ -743,7 +743,9 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev)
+ data_pointer->led_mute.brightness_get = lenovo_led_brightness_get_tpkbd;
+ data_pointer->led_mute.brightness_set = lenovo_led_brightness_set_tpkbd;
+ data_pointer->led_mute.dev = dev;
+- led_classdev_register(dev, &data_pointer->led_mute);
++ ret = led_classdev_register(dev, &data_pointer->led_mute);
++ if (ret < 0)
++ goto err;
+
+ data_pointer->led_micmute.name = name_micmute;
+ data_pointer->led_micmute.brightness_get =
+@@ -751,7 +753,11 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev)
+ data_pointer->led_micmute.brightness_set =
+ lenovo_led_brightness_set_tpkbd;
+ data_pointer->led_micmute.dev = dev;
+- led_classdev_register(dev, &data_pointer->led_micmute);
++ ret = led_classdev_register(dev, &data_pointer->led_micmute);
++ if (ret < 0) {
++ led_classdev_unregister(&data_pointer->led_mute);
++ goto err;
++ }
+
+ lenovo_features_set_tpkbd(hdev);
+
+diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
+index 08e3945a6fbf..0e30fa00204c 100644
+--- a/drivers/hwmon/lm80.c
++++ b/drivers/hwmon/lm80.c
+@@ -360,9 +360,11 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
+ struct i2c_client *client = data->client;
+ unsigned long min, val;
+ u8 reg;
+- int err = kstrtoul(buf, 10, &val);
+- if (err < 0)
+- return err;
++ int rv;
++
++ rv = kstrtoul(buf, 10, &val);
++ if (rv < 0)
++ return rv;
+
+ /* Save fan_min */
+ mutex_lock(&data->update_lock);
+@@ -390,8 +392,11 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
+ return -EINVAL;
+ }
+
+- reg = (lm80_read_value(client, LM80_REG_FANDIV) &
+- ~(3 << (2 * (nr + 1)))) | (data->fan_div[nr] << (2 * (nr + 1)));
++ rv = lm80_read_value(client, LM80_REG_FANDIV);
++ if (rv < 0)
++ return rv;
++ reg = (rv & ~(3 << (2 * (nr + 1))))
++ | (data->fan_div[nr] << (2 * (nr + 1)));
+ lm80_write_value(client, LM80_REG_FANDIV, reg);
+
+ /* Restore fan_min */
+@@ -623,6 +628,7 @@ static int lm80_probe(struct i2c_client *client,
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct lm80_data *data;
++ int rv;
+
+ data = devm_kzalloc(dev, sizeof(struct lm80_data), GFP_KERNEL);
+ if (!data)
+@@ -635,8 +641,14 @@ static int lm80_probe(struct i2c_client *client,
+ lm80_init_client(client);
+
+ /* A few vars need to be filled upon startup */
+- data->fan[f_min][0] = lm80_read_value(client, LM80_REG_FAN_MIN(1));
+- data->fan[f_min][1] = lm80_read_value(client, LM80_REG_FAN_MIN(2));
++ rv = lm80_read_value(client, LM80_REG_FAN_MIN(1));
++ if (rv < 0)
++ return rv;
++ data->fan[f_min][0] = rv;
++ rv = lm80_read_value(client, LM80_REG_FAN_MIN(2));
++ if (rv < 0)
++ return rv;
++ data->fan[f_min][1] = rv;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data, lm80_groups);
+diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
+index 51d34959709b..fb5bac079e83 100644
+--- a/drivers/i2c/busses/i2c-axxia.c
++++ b/drivers/i2c/busses/i2c-axxia.c
+@@ -296,22 +296,7 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev)
+ i2c_int_disable(idev, MST_STATUS_TFL);
+ }
+
+- if (status & MST_STATUS_SCC) {
+- /* Stop completed */
+- i2c_int_disable(idev, ~MST_STATUS_TSS);
+- complete(&idev->msg_complete);
+- } else if (status & MST_STATUS_SNS) {
+- /* Transfer done */
+- i2c_int_disable(idev, ~MST_STATUS_TSS);
+- if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len)
+- axxia_i2c_empty_rx_fifo(idev);
+- complete(&idev->msg_complete);
+- } else if (status & MST_STATUS_TSS) {
+- /* Transfer timeout */
+- idev->msg_err = -ETIMEDOUT;
+- i2c_int_disable(idev, ~MST_STATUS_TSS);
+- complete(&idev->msg_complete);
+- } else if (unlikely(status & MST_STATUS_ERR)) {
++ if (unlikely(status & MST_STATUS_ERR)) {
+ /* Transfer error */
+ i2c_int_disable(idev, ~0);
+ if (status & MST_STATUS_AL)
+@@ -328,6 +313,21 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev)
+ readl(idev->base + MST_TX_BYTES_XFRD),
+ readl(idev->base + MST_TX_XFER));
+ complete(&idev->msg_complete);
++ } else if (status & MST_STATUS_SCC) {
++ /* Stop completed */
++ i2c_int_disable(idev, ~MST_STATUS_TSS);
++ complete(&idev->msg_complete);
++ } else if (status & MST_STATUS_SNS) {
++ /* Transfer done */
++ i2c_int_disable(idev, ~MST_STATUS_TSS);
++ if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len)
++ axxia_i2c_empty_rx_fifo(idev);
++ complete(&idev->msg_complete);
++ } else if (status & MST_STATUS_TSS) {
++ /* Transfer timeout */
++ idev->msg_err = -ETIMEDOUT;
++ i2c_int_disable(idev, ~MST_STATUS_TSS);
++ complete(&idev->msg_complete);
+ }
+
+ out:
+diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
+index 818cab14e87c..ddcfb6d349d1 100644
+--- a/drivers/i2c/busses/i2c-sh_mobile.c
++++ b/drivers/i2c/busses/i2c-sh_mobile.c
+@@ -800,6 +800,7 @@ static const struct sh_mobile_dt_config r8a7740_dt_config = {
+ static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
+ { .compatible = "renesas,iic-r8a73a4", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,iic-r8a7740", .data = &r8a7740_dt_config },
++ { .compatible = "renesas,iic-r8a774c0", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,iic-r8a7790", .data = &v2_freq_calc_dt_config },
+ { .compatible = "renesas,iic-r8a7791", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,iic-r8a7792", .data = &fast_clock_dt_config },
+@@ -808,6 +809,7 @@ static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
+ { .compatible = "renesas,rcar-gen2-iic", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,iic-r8a7795", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,rcar-gen3-iic", .data = &fast_clock_dt_config },
++ { .compatible = "renesas,iic-r8a77990", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,iic-sh73a0", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,rmobile-iic", .data = &default_dt_config },
+ {},
+diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
+index af53a1084ee5..471caa5323e4 100644
+--- a/drivers/iio/accel/kxcjk-1013.c
++++ b/drivers/iio/accel/kxcjk-1013.c
+@@ -1490,6 +1490,7 @@ static const struct acpi_device_id kx_acpi_match[] = {
+ {"KXCJ1008", KXCJ91008},
+ {"KXCJ9000", KXCJ91008},
+ {"KIOX000A", KXCJ91008},
++ {"KIOX010A", KXCJ91008}, /* KXCJ91008 inside the display of a 2-in-1 */
+ {"KXTJ1009", KXTJ21009},
+ {"SMO8500", KXCJ91008},
+ { },
+diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
+index da2d16dfa63e..5dd104cf0939 100644
+--- a/drivers/iio/adc/meson_saradc.c
++++ b/drivers/iio/adc/meson_saradc.c
+@@ -587,8 +587,11 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev,
+ struct clk_init_data init;
+ const char *clk_parents[1];
+
+- init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%pOF#adc_div",
+- indio_dev->dev.of_node);
++ init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%s#adc_div",
++ dev_name(indio_dev->dev.parent));
++ if (!init.name)
++ return -ENOMEM;
++
+ init.flags = 0;
+ init.ops = &clk_divider_ops;
+ clk_parents[0] = __clk_get_name(priv->clkin);
+@@ -606,8 +609,11 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev,
+ if (WARN_ON(IS_ERR(priv->adc_div_clk)))
+ return PTR_ERR(priv->adc_div_clk);
+
+- init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%pOF#adc_en",
+- indio_dev->dev.of_node);
++ init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%s#adc_en",
++ dev_name(indio_dev->dev.parent));
++ if (!init.name)
++ return -ENOMEM;
++
+ init.flags = CLK_SET_RATE_PARENT;
+ init.ops = &clk_gate_ops;
+ clk_parents[0] = __clk_get_name(priv->adc_div_clk);
+diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
+index 9bd63abb2dfe..6f013a565353 100644
+--- a/drivers/infiniband/hw/hfi1/rc.c
++++ b/drivers/infiniband/hw/hfi1/rc.c
+@@ -1157,6 +1157,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
+ if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
+ cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
+ break;
++ rvt_qp_wqe_unreserve(qp, wqe);
+ s_last = qp->s_last;
+ trace_hfi1_qp_send_completion(qp, wqe, s_last);
+ if (++s_last >= qp->s_size)
+@@ -1209,6 +1210,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
+ u32 s_last;
+
+ rvt_put_swqe(wqe);
++ rvt_qp_wqe_unreserve(qp, wqe);
+ s_last = qp->s_last;
+ trace_hfi1_qp_send_completion(qp, wqe, s_last);
+ if (++s_last >= qp->s_size)
+diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
+index 5f56f3c1b4c4..62a3832a1ebb 100644
+--- a/drivers/infiniband/hw/hfi1/ruc.c
++++ b/drivers/infiniband/hw/hfi1/ruc.c
+@@ -278,6 +278,8 @@ send:
+ goto op_err;
+ if (!ret)
+ goto rnr_nak;
++ if (wqe->length > qp->r_len)
++ goto inv_err;
+ break;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+@@ -445,7 +447,10 @@ op_err:
+ goto err;
+
+ inv_err:
+- send_status = IB_WC_REM_INV_REQ_ERR;
++ send_status =
++ sqp->ibqp.qp_type == IB_QPT_RC ?
++ IB_WC_REM_INV_REQ_ERR :
++ IB_WC_SUCCESS;
+ wc.status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+
+diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
+index f8a7de795beb..563f71e6d1d3 100644
+--- a/drivers/infiniband/hw/qib/qib_ruc.c
++++ b/drivers/infiniband/hw/qib/qib_ruc.c
+@@ -274,6 +274,8 @@ again:
+ goto op_err;
+ if (!ret)
+ goto rnr_nak;
++ if (wqe->length > qp->r_len)
++ goto inv_err;
+ break;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+@@ -434,7 +436,10 @@ op_err:
+ goto err;
+
+ inv_err:
+- send_status = IB_WC_REM_INV_REQ_ERR;
++ send_status =
++ sqp->ibqp.qp_type == IB_QPT_RC ?
++ IB_WC_REM_INV_REQ_ERR :
++ IB_WC_SUCCESS;
+ wc.status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index bee0dfb7b93b..34c9aa76a7bd 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -438,7 +438,14 @@ static int iommu_init_device(struct device *dev)
+
+ dev_data->alias = get_alias(dev);
+
+- if (dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
++ /*
++ * By default we use passthrough mode for IOMMUv2 capable device.
++ * But if amd_iommu=force_isolation is set (e.g. to debug DMA to
++ * invalid address), we ignore the capability for the device so
++ * it'll be forced to go into translation mode.
++ */
++ if ((iommu_pass_through || !amd_iommu_force_isolation) &&
++ dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
+ struct amd_iommu *iommu;
+
+ iommu = amd_iommu_rlookup_table[dev_data->devid];
+diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
+index 3e02aace38b1..9ae3678844eb 100644
+--- a/drivers/iommu/arm-smmu-v3.c
++++ b/drivers/iommu/arm-smmu-v3.c
+@@ -586,7 +586,11 @@ struct arm_smmu_device {
+
+ struct arm_smmu_strtab_cfg strtab_cfg;
+
+- u32 sync_count;
++ /* Hi16xx adds an extra 32 bits of goodness to its MSI payload */
++ union {
++ u32 sync_count;
++ u64 padding;
++ };
+
+ /* IOMMU core code handle */
+ struct iommu_device iommu;
+@@ -684,7 +688,13 @@ static void queue_inc_cons(struct arm_smmu_queue *q)
+ u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
+
+ q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
+- writel(q->cons, q->cons_reg);
++
++ /*
++ * Ensure that all CPU accesses (reads and writes) to the queue
++ * are complete before we update the cons pointer.
++ */
++ mb();
++ writel_relaxed(q->cons, q->cons_reg);
+ }
+
+ static int queue_sync_prod(struct arm_smmu_queue *q)
+diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
+index e7cbf4fcf61d..ce119cb279c3 100644
+--- a/drivers/iommu/arm-smmu.c
++++ b/drivers/iommu/arm-smmu.c
+@@ -118,6 +118,7 @@ enum arm_smmu_implementation {
+ GENERIC_SMMU,
+ ARM_MMU500,
+ CAVIUM_SMMUV2,
++ QCOM_SMMUV2,
+ };
+
+ struct arm_smmu_s2cr {
+@@ -1912,6 +1913,7 @@ ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
+ ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
+ ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
+ ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
++ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2);
+
+ static const struct of_device_id arm_smmu_of_match[] = {
+ { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
+@@ -1920,6 +1922,7 @@ static const struct of_device_id arm_smmu_of_match[] = {
+ { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
+ { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
+ { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
++ { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
+ { },
+ };
+ MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index cf3abb8d284f..4c2246fe5dbe 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -93,9 +93,14 @@ struct its_device;
+ * The ITS structure - contains most of the infrastructure, with the
+ * top-level MSI domain, the command queue, the collections, and the
+ * list of devices writing to it.
++ *
++ * dev_alloc_lock has to be taken for device allocations, while the
++ * spinlock must be taken to parse data structures such as the device
++ * list.
+ */
+ struct its_node {
+ raw_spinlock_t lock;
++ struct mutex dev_alloc_lock;
+ struct list_head entry;
+ void __iomem *base;
+ phys_addr_t phys_base;
+@@ -152,6 +157,7 @@ struct its_device {
+ void *itt;
+ u32 nr_ites;
+ u32 device_id;
++ bool shared;
+ };
+
+ static struct {
+@@ -2290,6 +2296,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
+ struct its_device *its_dev;
+ struct msi_domain_info *msi_info;
+ u32 dev_id;
++ int err = 0;
+
+ /*
+ * We ignore "dev" entierely, and rely on the dev_id that has
+@@ -2312,6 +2319,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
+ return -EINVAL;
+ }
+
++ mutex_lock(&its->dev_alloc_lock);
+ its_dev = its_find_device(its, dev_id);
+ if (its_dev) {
+ /*
+@@ -2319,18 +2327,22 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
+ * another alias (PCI bridge of some sort). No need to
+ * create the device.
+ */
++ its_dev->shared = true;
+ pr_debug("Reusing ITT for devID %x\n", dev_id);
+ goto out;
+ }
+
+ its_dev = its_create_device(its, dev_id, nvec, true);
+- if (!its_dev)
+- return -ENOMEM;
++ if (!its_dev) {
++ err = -ENOMEM;
++ goto out;
++ }
+
+ pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
+ out:
++ mutex_unlock(&its->dev_alloc_lock);
+ info->scratchpad[0].ptr = its_dev;
+- return 0;
++ return err;
+ }
+
+ static struct msi_domain_ops its_msi_domain_ops = {
+@@ -2434,6 +2446,7 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ {
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
++ struct its_node *its = its_dev->its;
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+@@ -2448,8 +2461,14 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ irq_domain_reset_irq_data(data);
+ }
+
+- /* If all interrupts have been freed, start mopping the floor */
+- if (bitmap_empty(its_dev->event_map.lpi_map,
++ mutex_lock(&its->dev_alloc_lock);
++
++ /*
++ * If all interrupts have been freed, start mopping the
++ * floor. This is conditionned on the device not being shared.
++ */
++ if (!its_dev->shared &&
++ bitmap_empty(its_dev->event_map.lpi_map,
+ its_dev->event_map.nr_lpis)) {
+ its_lpi_free(its_dev->event_map.lpi_map,
+ its_dev->event_map.lpi_base,
+@@ -2461,6 +2480,8 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ its_free_device(its_dev);
+ }
+
++ mutex_unlock(&its->dev_alloc_lock);
++
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+ }
+
+@@ -3385,6 +3406,7 @@ static int __init its_probe_one(struct resource *res,
+ }
+
+ raw_spin_lock_init(&its->lock);
++ mutex_init(&its->dev_alloc_lock);
+ INIT_LIST_HEAD(&its->entry);
+ INIT_LIST_HEAD(&its->its_device_list);
+ typer = gic_read_typer(its_base + GITS_TYPER);
+diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
+index 8e5b03161b2f..64a63711fd95 100644
+--- a/drivers/isdn/hisax/hfc_pci.c
++++ b/drivers/isdn/hisax/hfc_pci.c
+@@ -1170,11 +1170,13 @@ HFCPCI_l1hw(struct PStack *st, int pr, void *arg)
+ if (cs->debug & L1_DEB_LAPD)
+ debugl1(cs, "-> PH_REQUEST_PULL");
+ #endif
++ spin_lock_irqsave(&cs->lock, flags);
+ if (!cs->tx_skb) {
+ test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
+ st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
+ } else
+ test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
++ spin_unlock_irqrestore(&cs->lock, flags);
+ break;
+ case (HW_RESET | REQUEST):
+ spin_lock_irqsave(&cs->lock, flags);
+diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
+index 2940cdc87af1..95be6e36c7dd 100644
+--- a/drivers/lightnvm/pblk-core.c
++++ b/drivers/lightnvm/pblk-core.c
+@@ -1252,15 +1252,22 @@ int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
+
+ ret = pblk_line_alloc_bitmaps(pblk, line);
+ if (ret)
+- return ret;
++ goto fail;
+
+ if (!pblk_line_init_bb(pblk, line, 0)) {
+- list_add(&line->list, &l_mg->free_list);
+- return -EINTR;
++ ret = -EINTR;
++ goto fail;
+ }
+
+ pblk_rl_free_lines_dec(&pblk->rl, line, true);
+ return 0;
++
++fail:
++ spin_lock(&l_mg->free_lock);
++ list_add(&line->list, &l_mg->free_list);
++ spin_unlock(&l_mg->free_lock);
++
++ return ret;
+ }
+
+ void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
+diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
+index 879227d584e7..c3e038d4b22e 100644
+--- a/drivers/lightnvm/pblk-write.c
++++ b/drivers/lightnvm/pblk-write.c
+@@ -158,9 +158,11 @@ static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
+ w_ctx = &entry->w_ctx;
+
+ /* Check if the lba has been overwritten */
+- ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
+- if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
+- w_ctx->lba = ADDR_EMPTY;
++ if (w_ctx->lba != ADDR_EMPTY) {
++ ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
++ if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
++ w_ctx->lba = ADDR_EMPTY;
++ }
+
+ /* Mark up the entry as submittable again */
+ flags = READ_ONCE(w_ctx->flags);
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 811427e53126..7033a2880771 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1208,7 +1208,9 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
+ struct bio *split = bio_split(bio, max_sectors,
+ gfp, &conf->bio_split);
+ bio_chain(split, bio);
++ allow_barrier(conf);
+ generic_make_request(bio);
++ wait_barrier(conf);
+ bio = split;
+ r10_bio->master_bio = bio;
+ r10_bio->sectors = max_sectors;
+@@ -1513,7 +1515,9 @@ retry_write:
+ struct bio *split = bio_split(bio, r10_bio->sectors,
+ GFP_NOIO, &conf->bio_split);
+ bio_chain(split, bio);
++ allow_barrier(conf);
+ generic_make_request(bio);
++ wait_barrier(conf);
+ bio = split;
+ r10_bio->master_bio = bio;
+ }
+diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
+index 82af97430e5b..63c9ac2c6a5f 100644
+--- a/drivers/media/i2c/Kconfig
++++ b/drivers/media/i2c/Kconfig
+@@ -61,6 +61,7 @@ config VIDEO_TDA1997X
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on SND_SOC
+ select SND_PCM
++ select HDMI
+ ---help---
+ V4L2 subdevice driver for the NXP TDA1997x HDMI receivers.
+
+@@ -610,6 +611,7 @@ config VIDEO_IMX274
+ tristate "Sony IMX274 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
++ select REGMAP_I2C
+ ---help---
+ This is a V4L2 sensor driver for the Sony IMX274
+ CMOS image sensor.
+diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
+index 5b008b0002c0..aa8b04cfed0f 100644
+--- a/drivers/media/i2c/ad9389b.c
++++ b/drivers/media/i2c/ad9389b.c
+@@ -578,7 +578,7 @@ static const struct v4l2_dv_timings_cap ad9389b_timings_cap = {
+ .type = V4L2_DV_BT_656_1120,
+ /* keep this initialization for compatibility with GCC < 4.4.6 */
+ .reserved = { 0 },
+- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
++ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 170000000,
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
+ V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
+index f3899cc84e27..88349b5053cc 100644
+--- a/drivers/media/i2c/adv7511.c
++++ b/drivers/media/i2c/adv7511.c
+@@ -130,7 +130,7 @@ static const struct v4l2_dv_timings_cap adv7511_timings_cap = {
+ .type = V4L2_DV_BT_656_1120,
+ /* keep this initialization for compatibility with GCC < 4.4.6 */
+ .reserved = { 0 },
+- V4L2_INIT_BT_TIMINGS(0, ADV7511_MAX_WIDTH, 0, ADV7511_MAX_HEIGHT,
++ V4L2_INIT_BT_TIMINGS(640, ADV7511_MAX_WIDTH, 350, ADV7511_MAX_HEIGHT,
+ ADV7511_MIN_PIXELCLOCK, ADV7511_MAX_PIXELCLOCK,
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
+diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
+index c78698199ac5..f01964c36ad5 100644
+--- a/drivers/media/i2c/adv7604.c
++++ b/drivers/media/i2c/adv7604.c
+@@ -766,7 +766,7 @@ static const struct v4l2_dv_timings_cap adv7604_timings_cap_analog = {
+ .type = V4L2_DV_BT_656_1120,
+ /* keep this initialization for compatibility with GCC < 4.4.6 */
+ .reserved = { 0 },
+- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
++ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 170000000,
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
+ V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+@@ -777,7 +777,7 @@ static const struct v4l2_dv_timings_cap adv76xx_timings_cap_digital = {
+ .type = V4L2_DV_BT_656_1120,
+ /* keep this initialization for compatibility with GCC < 4.4.6 */
+ .reserved = { 0 },
+- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 225000000,
++ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 225000000,
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
+ V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
+index 71fe56565f75..bb43a75ed6d0 100644
+--- a/drivers/media/i2c/adv7842.c
++++ b/drivers/media/i2c/adv7842.c
+@@ -663,7 +663,7 @@ static const struct v4l2_dv_timings_cap adv7842_timings_cap_analog = {
+ .type = V4L2_DV_BT_656_1120,
+ /* keep this initialization for compatibility with GCC < 4.4.6 */
+ .reserved = { 0 },
+- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
++ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 170000000,
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
+ V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+@@ -674,7 +674,7 @@ static const struct v4l2_dv_timings_cap adv7842_timings_cap_digital = {
+ .type = V4L2_DV_BT_656_1120,
+ /* keep this initialization for compatibility with GCC < 4.4.6 */
+ .reserved = { 0 },
+- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 225000000,
++ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 225000000,
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
+ V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
+index ff25ea9aca48..26070fb6ce4e 100644
+--- a/drivers/media/i2c/tc358743.c
++++ b/drivers/media/i2c/tc358743.c
+@@ -59,7 +59,7 @@ static const struct v4l2_dv_timings_cap tc358743_timings_cap = {
+ /* keep this initialization for compatibility with GCC < 4.4.6 */
+ .reserved = { 0 },
+ /* Pixel clock from REF_01 p. 20. Min/max height/width are unknown */
+- V4L2_INIT_BT_TIMINGS(1, 10000, 1, 10000, 0, 165000000,
++ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 13000000, 165000000,
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
+ V4L2_DV_BT_CAP_PROGRESSIVE |
+diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
+index 498ad2368cbc..f5ee28058ea2 100644
+--- a/drivers/media/i2c/ths8200.c
++++ b/drivers/media/i2c/ths8200.c
+@@ -49,7 +49,7 @@ static const struct v4l2_dv_timings_cap ths8200_timings_cap = {
+ .type = V4L2_DV_BT_656_1120,
+ /* keep this initialization for compatibility with GCC < 4.4.6 */
+ .reserved = { 0 },
+- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1080, 25000000, 148500000,
++ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1080, 25000000, 148500000,
+ V4L2_DV_BT_STD_CEA861, V4L2_DV_BT_CAP_PROGRESSIVE)
+ };
+
+diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
+index 06d29d8f6be8..f27d294dcbef 100644
+--- a/drivers/media/i2c/video-i2c.c
++++ b/drivers/media/i2c/video-i2c.c
+@@ -510,7 +510,12 @@ static const struct v4l2_ioctl_ops video_i2c_ioctl_ops = {
+
+ static void video_i2c_release(struct video_device *vdev)
+ {
+- kfree(video_get_drvdata(vdev));
++ struct video_i2c_data *data = video_get_drvdata(vdev);
++
++ v4l2_device_unregister(&data->v4l2_dev);
++ mutex_destroy(&data->lock);
++ mutex_destroy(&data->queue_lock);
++ kfree(data);
+ }
+
+ static int video_i2c_probe(struct i2c_client *client,
+@@ -608,10 +613,6 @@ static int video_i2c_remove(struct i2c_client *client)
+ struct video_i2c_data *data = i2c_get_clientdata(client);
+
+ video_unregister_device(&data->vdev);
+- v4l2_device_unregister(&data->v4l2_dev);
+-
+- mutex_destroy(&data->lock);
+- mutex_destroy(&data->queue_lock);
+
+ return 0;
+ }
+diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
+index d26c2d85a009..d20d3df5778b 100644
+--- a/drivers/media/platform/coda/coda-bit.c
++++ b/drivers/media/platform/coda/coda-bit.c
+@@ -991,16 +991,15 @@ static int coda_start_encoding(struct coda_ctx *ctx)
+ else
+ coda_write(dev, CODA_STD_H264,
+ CODA_CMD_ENC_SEQ_COD_STD);
+- if (ctx->params.h264_deblk_enabled) {
+- value = ((ctx->params.h264_deblk_alpha &
+- CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK) <<
+- CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET) |
+- ((ctx->params.h264_deblk_beta &
+- CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK) <<
+- CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET);
+- } else {
+- value = 1 << CODA_264PARAM_DISABLEDEBLK_OFFSET;
+- }
++ value = ((ctx->params.h264_disable_deblocking_filter_idc &
++ CODA_264PARAM_DISABLEDEBLK_MASK) <<
++ CODA_264PARAM_DISABLEDEBLK_OFFSET) |
++ ((ctx->params.h264_slice_alpha_c0_offset_div2 &
++ CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK) <<
++ CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET) |
++ ((ctx->params.h264_slice_beta_offset_div2 &
++ CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK) <<
++ CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET);
+ coda_write(dev, value, CODA_CMD_ENC_SEQ_264_PARA);
+ break;
+ case V4L2_PIX_FMT_JPEG:
+diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
+index bf7b8417c27f..19d92edcc981 100644
+--- a/drivers/media/platform/coda/coda-common.c
++++ b/drivers/media/platform/coda/coda-common.c
+@@ -1793,14 +1793,13 @@ static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
+ ctx->params.h264_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:
+- ctx->params.h264_deblk_alpha = ctrl->val;
++ ctx->params.h264_slice_alpha_c0_offset_div2 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
+- ctx->params.h264_deblk_beta = ctrl->val;
++ ctx->params.h264_slice_beta_offset_div2 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+- ctx->params.h264_deblk_enabled = (ctrl->val ==
+- V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
++ ctx->params.h264_disable_deblocking_filter_idc = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ /* TODO: switch between baseline and constrained baseline */
+@@ -1882,13 +1881,13 @@ static void coda_encode_ctrls(struct coda_ctx *ctx)
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 0, 51, 1, 51);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+- V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, 0, 15, 1, 0);
++ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, -6, 6, 1, 0);
+ v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
+- V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, 0, 15, 1, 0);
++ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, -6, 6, 1, 0);
+ v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
+- V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED, 0x0,
+- V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
++ V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY,
++ 0x0, V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
+ v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, 0x0,
+diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
+index 19ac0b9dc6eb..2469ca1dc598 100644
+--- a/drivers/media/platform/coda/coda.h
++++ b/drivers/media/platform/coda/coda.h
+@@ -115,9 +115,9 @@ struct coda_params {
+ u8 h264_inter_qp;
+ u8 h264_min_qp;
+ u8 h264_max_qp;
+- u8 h264_deblk_enabled;
+- u8 h264_deblk_alpha;
+- u8 h264_deblk_beta;
++ u8 h264_disable_deblocking_filter_idc;
++ s8 h264_slice_alpha_c0_offset_div2;
++ s8 h264_slice_beta_offset_div2;
+ u8 h264_profile_idc;
+ u8 h264_level_idc;
+ u8 mpeg4_intra_qp;
+diff --git a/drivers/media/platform/coda/coda_regs.h b/drivers/media/platform/coda/coda_regs.h
+index 5e7b00a97671..e675e38f3475 100644
+--- a/drivers/media/platform/coda/coda_regs.h
++++ b/drivers/media/platform/coda/coda_regs.h
+@@ -292,7 +292,7 @@
+ #define CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET 8
+ #define CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK 0x0f
+ #define CODA_264PARAM_DISABLEDEBLK_OFFSET 6
+-#define CODA_264PARAM_DISABLEDEBLK_MASK 0x01
++#define CODA_264PARAM_DISABLEDEBLK_MASK 0x03
+ #define CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_OFFSET 5
+ #define CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_MASK 0x01
+ #define CODA_264PARAM_CHROMAQPOFFSET_OFFSET 0
+diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
+index 18c035ef84cf..df1ae6b5c854 100644
+--- a/drivers/media/platform/davinci/vpbe.c
++++ b/drivers/media/platform/davinci/vpbe.c
+@@ -740,7 +740,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default output %s",
+ def_output);
+- return ret;
++ goto fail_kfree_amp;
+ }
+
+ printk(KERN_NOTICE "Setting default mode to %s\n", def_mode);
+@@ -748,12 +748,15 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default mode %s",
+ def_mode);
+- return ret;
++ goto fail_kfree_amp;
+ }
+ vpbe_dev->initialized = 1;
+ /* TBD handling of bootargs for default output and mode */
+ return 0;
+
++fail_kfree_amp:
++ mutex_lock(&vpbe_dev->lock);
++ kfree(vpbe_dev->amp);
+ fail_kfree_encoders:
+ kfree(vpbe_dev->encoders);
+ fail_dev_unregister:
+diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
+index 3e73e9db781f..7c025045ea90 100644
+--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
+@@ -41,25 +41,27 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev)
+ node = of_parse_phandle(dev->of_node, "mediatek,larb", 0);
+ if (!node) {
+ mtk_v4l2_err("no mediatek,larb found");
+- return -1;
++ return -ENODEV;
+ }
+ pdev = of_find_device_by_node(node);
++ of_node_put(node);
+ if (!pdev) {
+ mtk_v4l2_err("no mediatek,larb device found");
+- return -1;
++ return -ENODEV;
+ }
+ pm->larbvenc = &pdev->dev;
+
+ node = of_parse_phandle(dev->of_node, "mediatek,larb", 1);
+ if (!node) {
+ mtk_v4l2_err("no mediatek,larb found");
+- return -1;
++ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(node);
++ of_node_put(node);
+ if (!pdev) {
+ mtk_v4l2_err("no mediatek,larb device found");
+- return -1;
++ return -ENODEV;
+ }
+
+ pm->larbvenclt = &pdev->dev;
+diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
+index 8b2c16dd58bd..0f218afdadaa 100644
+--- a/drivers/media/rc/rc-main.c
++++ b/drivers/media/rc/rc-main.c
+@@ -1956,6 +1956,8 @@ void rc_unregister_device(struct rc_dev *dev)
+ rc_free_rx_device(dev);
+
+ mutex_lock(&dev->lock);
++ if (dev->users && dev->close)
++ dev->close(dev);
+ dev->registered = false;
+ mutex_unlock(&dev->lock);
+
+diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
+index 76382c858c35..1246d69ba187 100644
+--- a/drivers/memstick/core/memstick.c
++++ b/drivers/memstick/core/memstick.c
+@@ -18,6 +18,7 @@
+ #include <linux/delay.h>
+ #include <linux/slab.h>
+ #include <linux/module.h>
++#include <linux/pm_runtime.h>
+
+ #define DRIVER_NAME "memstick"
+
+@@ -436,6 +437,7 @@ static void memstick_check(struct work_struct *work)
+ struct memstick_dev *card;
+
+ dev_dbg(&host->dev, "memstick_check started\n");
++ pm_runtime_get_noresume(host->dev.parent);
+ mutex_lock(&host->lock);
+ if (!host->card) {
+ if (memstick_power_on(host))
+@@ -479,6 +481,7 @@ out_power_off:
+ host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF);
+
+ mutex_unlock(&host->lock);
++ pm_runtime_put(host->dev.parent);
+ dev_dbg(&host->dev, "memstick_check finished\n");
+ }
+
+diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
+index 0d3b7473bc21..5301302fb531 100644
+--- a/drivers/mmc/host/bcm2835.c
++++ b/drivers/mmc/host/bcm2835.c
+@@ -286,6 +286,7 @@ static void bcm2835_reset(struct mmc_host *mmc)
+
+ if (host->dma_chan)
+ dmaengine_terminate_sync(host->dma_chan);
++ host->dma_chan = NULL;
+ bcm2835_reset_internal(host);
+ }
+
+@@ -772,6 +773,8 @@ static void bcm2835_finish_command(struct bcm2835_host *host)
+
+ if (!(sdhsts & SDHSTS_CRC7_ERROR) ||
+ (host->cmd->opcode != MMC_SEND_OP_COND)) {
++ u32 edm, fsm;
++
+ if (sdhsts & SDHSTS_CMD_TIME_OUT) {
+ host->cmd->error = -ETIMEDOUT;
+ } else {
+@@ -780,6 +783,13 @@ static void bcm2835_finish_command(struct bcm2835_host *host)
+ bcm2835_dumpregs(host);
+ host->cmd->error = -EILSEQ;
+ }
++ edm = readl(host->ioaddr + SDEDM);
++ fsm = edm & SDEDM_FSM_MASK;
++ if (fsm == SDEDM_FSM_READWAIT ||
++ fsm == SDEDM_FSM_WRITESTART1)
++ /* Kick the FSM out of its wait */
++ writel(edm | SDEDM_FORCE_DATA_MODE,
++ host->ioaddr + SDEDM);
+ bcm2835_finish_request(host);
+ return;
+ }
+@@ -837,6 +847,8 @@ static void bcm2835_timeout(struct work_struct *work)
+ dev_err(dev, "timeout waiting for hardware interrupt.\n");
+ bcm2835_dumpregs(host);
+
++ bcm2835_reset(host->mmc);
++
+ if (host->data) {
+ host->data->error = -ETIMEDOUT;
+ bcm2835_finish_data(host);
+diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
+index 993386c9ea50..864338e308e2 100644
+--- a/drivers/mmc/host/jz4740_mmc.c
++++ b/drivers/mmc/host/jz4740_mmc.c
+@@ -983,17 +983,17 @@ static int jz4740_mmc_request_gpios(struct mmc_host *mmc,
+ if (!pdata->read_only_active_low)
+ mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+
+- if (gpio_is_valid(pdata->gpio_card_detect)) {
+- ret = mmc_gpio_request_cd(mmc, pdata->gpio_card_detect, 0);
+- if (ret)
+- return ret;
+- }
++ /*
++ * Get optional card detect and write protect GPIOs,
++ * only back out on probe deferral.
++ */
++ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
++ if (ret == -EPROBE_DEFER)
++ return ret;
+
+- if (gpio_is_valid(pdata->gpio_read_only)) {
+- ret = mmc_gpio_request_ro(mmc, pdata->gpio_read_only);
+- if (ret)
+- return ret;
+- }
++ ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
++ if (ret == -EPROBE_DEFER)
++ return ret;
+
+ return jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_power,
+ "MMC read only", true, pdata->power_active_low);
+diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
+index 2cfec33178c1..9841b447ccde 100644
+--- a/drivers/mmc/host/meson-mx-sdio.c
++++ b/drivers/mmc/host/meson-mx-sdio.c
+@@ -596,6 +596,9 @@ static int meson_mx_mmc_register_clks(struct meson_mx_mmc_host *host)
+ init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL,
+ "%s#fixed_factor",
+ dev_name(host->controller_dev));
++ if (!init.name)
++ return -ENOMEM;
++
+ init.ops = &clk_fixed_factor_ops;
+ init.flags = 0;
+ init.parent_names = &clk_fixed_factor_parent;
+@@ -612,6 +615,9 @@ static int meson_mx_mmc_register_clks(struct meson_mx_mmc_host *host)
+ clk_div_parent = __clk_get_name(host->fixed_factor_clk);
+ init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL,
+ "%s#div", dev_name(host->controller_dev));
++ if (!init.name)
++ return -ENOMEM;
++
+ init.ops = &clk_divider_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = &clk_div_parent;
+diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
+index 9cb7554a463d..a7bf8515116f 100644
+--- a/drivers/mmc/host/sdhci-of-esdhc.c
++++ b/drivers/mmc/host/sdhci-of-esdhc.c
+@@ -526,8 +526,12 @@ static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
+ /* Wait max 20 ms */
+ timeout = ktime_add_ms(ktime_get(), 20);
+ val = ESDHC_CLOCK_STABLE;
+- while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) {
+- if (ktime_after(ktime_get(), timeout)) {
++ while (1) {
++ bool timedout = ktime_after(ktime_get(), timeout);
++
++ if (sdhci_readl(host, ESDHC_PRSSTAT) & val)
++ break;
++ if (timedout) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ break;
+@@ -591,8 +595,12 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
+
+ /* Wait max 20 ms */
+ timeout = ktime_add_ms(ktime_get(), 20);
+- while (!(sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)) {
+- if (ktime_after(ktime_get(), timeout)) {
++ while (1) {
++ bool timedout = ktime_after(ktime_get(), timeout);
++
++ if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
++ break;
++ if (timedout) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ return;
+diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
+index d264391616f9..d02f5cf76b3d 100644
+--- a/drivers/mmc/host/sdhci-omap.c
++++ b/drivers/mmc/host/sdhci-omap.c
+@@ -220,8 +220,12 @@ static void sdhci_omap_conf_bus_power(struct sdhci_omap_host *omap_host,
+
+ /* wait 1ms */
+ timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT);
+- while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL) & HCTL_SDBP)) {
+- if (WARN_ON(ktime_after(ktime_get(), timeout)))
++ while (1) {
++ bool timedout = ktime_after(ktime_get(), timeout);
++
++ if (sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL) & HCTL_SDBP)
++ break;
++ if (WARN_ON(timedout))
+ return;
+ usleep_range(5, 10);
+ }
+@@ -653,8 +657,12 @@ static void sdhci_omap_init_74_clocks(struct sdhci_host *host, u8 power_mode)
+
+ /* wait 1ms */
+ timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT);
+- while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_STAT) & INT_CC_EN)) {
+- if (WARN_ON(ktime_after(ktime_get(), timeout)))
++ while (1) {
++ bool timedout = ktime_after(ktime_get(), timeout);
++
++ if (sdhci_omap_readl(omap_host, SDHCI_OMAP_STAT) & INT_CC_EN)
++ break;
++ if (WARN_ON(timedout))
+ return;
+ usleep_range(5, 10);
+ }
+diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c
+index c335052d0c02..caccedc836dc 100644
+--- a/drivers/mmc/host/sdhci-xenon-phy.c
++++ b/drivers/mmc/host/sdhci-xenon-phy.c
+@@ -357,9 +357,13 @@ static int xenon_emmc_phy_enable_dll(struct sdhci_host *host)
+
+ /* Wait max 32 ms */
+ timeout = ktime_add_ms(ktime_get(), 32);
+- while (!(sdhci_readw(host, XENON_SLOT_EXT_PRESENT_STATE) &
+- XENON_DLL_LOCK_STATE)) {
+- if (ktime_after(ktime_get(), timeout)) {
++ while (1) {
++ bool timedout = ktime_after(ktime_get(), timeout);
++
++ if (sdhci_readw(host, XENON_SLOT_EXT_PRESENT_STATE) &
++ XENON_DLL_LOCK_STATE)
++ break;
++ if (timedout) {
+ dev_err(mmc_dev(host->mmc), "Wait for DLL Lock time-out\n");
+ return -ETIMEDOUT;
+ }
+diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
+index 4d0791f6ec23..a0b5089b3274 100644
+--- a/drivers/mmc/host/sdhci-xenon.c
++++ b/drivers/mmc/host/sdhci-xenon.c
+@@ -34,9 +34,13 @@ static int xenon_enable_internal_clk(struct sdhci_host *host)
+ sdhci_writel(host, reg, SDHCI_CLOCK_CONTROL);
+ /* Wait max 20 ms */
+ timeout = ktime_add_ms(ktime_get(), 20);
+- while (!((reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
+- & SDHCI_CLOCK_INT_STABLE)) {
+- if (ktime_after(ktime_get(), timeout)) {
++ while (1) {
++ bool timedout = ktime_after(ktime_get(), timeout);
++
++ reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
++ if (reg & SDHCI_CLOCK_INT_STABLE)
++ break;
++ if (timedout) {
+ dev_err(mmc_dev(host->mmc), "Internal clock never stabilised.\n");
+ return -ETIMEDOUT;
+ }
+diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
+index 5200e4bdce93..ea243840ee0f 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
++++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
+@@ -314,6 +314,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
+ {
+ struct mv88e6xxx_chip *chip = dev_id;
+ struct mv88e6xxx_atu_entry entry;
++ int spid;
+ int err;
+ u16 val;
+
+@@ -336,6 +337,8 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
+ if (err)
+ goto out;
+
++ spid = entry.state;
++
+ if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) {
+ dev_err_ratelimited(chip->dev,
+ "ATU age out violation for %pM\n",
+@@ -344,23 +347,23 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
+
+ if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
+ dev_err_ratelimited(chip->dev,
+- "ATU member violation for %pM portvec %x\n",
+- entry.mac, entry.portvec);
+- chip->ports[entry.portvec].atu_member_violation++;
++ "ATU member violation for %pM portvec %x spid %d\n",
++ entry.mac, entry.portvec, spid);
++ chip->ports[spid].atu_member_violation++;
+ }
+
+ if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
+ dev_err_ratelimited(chip->dev,
+- "ATU miss violation for %pM portvec %x\n",
+- entry.mac, entry.portvec);
+- chip->ports[entry.portvec].atu_miss_violation++;
++ "ATU miss violation for %pM portvec %x spid %d\n",
++ entry.mac, entry.portvec, spid);
++ chip->ports[spid].atu_miss_violation++;
+ }
+
+ if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) {
+ dev_err_ratelimited(chip->dev,
+- "ATU full violation for %pM portvec %x\n",
+- entry.mac, entry.portvec);
+- chip->ports[entry.portvec].atu_full_violation++;
++ "ATU full violation for %pM portvec %x spid %d\n",
++ entry.mac, entry.portvec, spid);
++ chip->ports[spid].atu_full_violation++;
+ }
+ mutex_unlock(&chip->reg_lock);
+
+diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+index c965e65d07db..9939ccaeb125 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+@@ -262,6 +262,8 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self)
+ AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR) &
+ HW_ATL_MPI_STATE_MSK) == MPI_DEINIT,
+ 10, 1000U);
++ if (err)
++ return err;
+ }
+
+ if (self->rbl_enabled)
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
+index 7b6859e4924e..fc16b2b0d0e9 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -519,7 +519,6 @@ static void bcm_sysport_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+ {
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+- u32 reg;
+
+ wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
+ wol->wolopts = priv->wolopts;
+@@ -527,11 +526,7 @@ static void bcm_sysport_get_wol(struct net_device *dev,
+ if (!(priv->wolopts & WAKE_MAGICSECURE))
+ return;
+
+- /* Return the programmed SecureOn password */
+- reg = umac_readl(priv, UMAC_PSW_MS);
+- put_unaligned_be16(reg, &wol->sopass[0]);
+- reg = umac_readl(priv, UMAC_PSW_LS);
+- put_unaligned_be32(reg, &wol->sopass[2]);
++ memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
+ }
+
+ static int bcm_sysport_set_wol(struct net_device *dev,
+@@ -547,13 +542,8 @@ static int bcm_sysport_set_wol(struct net_device *dev,
+ if (wol->wolopts & ~supported)
+ return -EINVAL;
+
+- /* Program the SecureOn password */
+- if (wol->wolopts & WAKE_MAGICSECURE) {
+- umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
+- UMAC_PSW_MS);
+- umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
+- UMAC_PSW_LS);
+- }
++ if (wol->wolopts & WAKE_MAGICSECURE)
++ memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
+
+ /* Flag the device and relevant IRQ as wakeup capable */
+ if (wol->wolopts) {
+@@ -2588,13 +2578,18 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
+ unsigned int index, i = 0;
+ u32 reg;
+
+- /* Password has already been programmed */
+ reg = umac_readl(priv, UMAC_MPD_CTRL);
+ if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
+ reg |= MPD_EN;
+ reg &= ~PSW_EN;
+- if (priv->wolopts & WAKE_MAGICSECURE)
++ if (priv->wolopts & WAKE_MAGICSECURE) {
++ /* Program the SecureOn password */
++ umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
++ UMAC_PSW_MS);
++ umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
++ UMAC_PSW_LS);
+ reg |= PSW_EN;
++ }
+ umac_writel(priv, reg, UMAC_MPD_CTRL);
+
+ if (priv->wolopts & WAKE_FILTER) {
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
+index 046c6c1d97fd..36e0adf5c9b8 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.h
++++ b/drivers/net/ethernet/broadcom/bcmsysport.h
+@@ -12,6 +12,7 @@
+ #define __BCM_SYSPORT_H
+
+ #include <linux/bitmap.h>
++#include <linux/ethtool.h>
+ #include <linux/if_vlan.h>
+ #include <linux/net_dim.h>
+
+@@ -776,6 +777,7 @@ struct bcm_sysport_priv {
+ unsigned int crc_fwd:1;
+ u16 rev;
+ u32 wolopts;
++ u8 sopass[SOPASS_MAX];
+ unsigned int wol_irq_disabled:1;
+
+ /* MIB related fields */
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index e2d92548226a..034f57500f00 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -6073,23 +6073,26 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
+ int bnxt_reserve_rings(struct bnxt *bp)
+ {
+ int tcs = netdev_get_num_tc(bp->dev);
++ bool reinit_irq = false;
+ int rc;
+
+ if (!bnxt_need_reserve_rings(bp))
+ return 0;
+
+- rc = __bnxt_reserve_rings(bp);
+- if (rc) {
+- netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
+- return rc;
+- }
+ if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
+ bnxt_ulp_irq_stop(bp);
+ bnxt_clear_int_mode(bp);
+- rc = bnxt_init_int_mode(bp);
++ reinit_irq = true;
++ }
++ rc = __bnxt_reserve_rings(bp);
++ if (reinit_irq) {
++ if (!rc)
++ rc = bnxt_init_int_mode(bp);
+ bnxt_ulp_irq_restart(bp, rc);
+- if (rc)
+- return rc;
++ }
++ if (rc) {
++ netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
++ return rc;
+ }
+ if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
+ netdev_err(bp->dev, "tx ring reservation failure\n");
+diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
+index 60641e202534..9a7f70db20c7 100644
+--- a/drivers/net/ethernet/cisco/enic/enic_main.c
++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
+@@ -1434,7 +1434,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
+ * csum is correct or is zero.
+ */
+ if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
+- tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) {
++ tcp_udp_csum_ok && outer_csum_ok &&
++ (ipv4_csum_ok || ipv6)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = encap;
+ }
+diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
+index bc6eb30aa20f..41c6fa200e74 100644
+--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
++++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
+@@ -928,7 +928,7 @@ int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
+ hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK;
+
+ /* Create element to be added to the driver hash table */
+- hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
++ hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
+ if (!hash_entry)
+ return -ENOMEM;
+ hash_entry->addr = addr;
+diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
+index 40705938eecc..f75b9c11b2d2 100644
+--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
++++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
+@@ -553,7 +553,7 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
+ hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
+
+ /* Create element to be added to the driver hash table */
+- hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
++ hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
+ if (!hash_entry)
+ return -ENOMEM;
+ hash_entry->addr = addr;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index b7b2f8254ce1..0ccfa6a84535 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -2691,6 +2691,8 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
+
+ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
+ {
++#define HNS3_VECTOR_PF_MAX_NUM 64
++
+ struct hnae3_handle *h = priv->ae_handle;
+ struct hns3_enet_tqp_vector *tqp_vector;
+ struct hnae3_vector_info *vector;
+@@ -2703,6 +2705,8 @@ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
+ /* RSS size, cpu online and vector_num should be the same */
+ /* Should consider 2p/4p later */
+ vector_num = min_t(u16, num_online_cpus(), tqp_num);
++ vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM);
++
+ vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
+ GFP_KERNEL);
+ if (!vector)
+@@ -2760,12 +2764,12 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
+
+ hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
+
+- if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
+- (void)irq_set_affinity_hint(
+- priv->tqp_vector[i].vector_irq,
+- NULL);
+- free_irq(priv->tqp_vector[i].vector_irq,
+- &priv->tqp_vector[i]);
++ if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
++ irq_set_affinity_notifier(tqp_vector->vector_irq,
++ NULL);
++ irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
++ free_irq(tqp_vector->vector_irq, tqp_vector);
++ tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
+ }
+
+ priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
+index 7a80652e2500..f84e2c2d02c0 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e.h
++++ b/drivers/net/ethernet/intel/i40e/i40e.h
+@@ -122,6 +122,7 @@ enum i40e_state_t {
+ __I40E_MDD_EVENT_PENDING,
+ __I40E_VFLR_EVENT_PENDING,
+ __I40E_RESET_RECOVERY_PENDING,
++ __I40E_TIMEOUT_RECOVERY_PENDING,
+ __I40E_MISC_IRQ_REQUESTED,
+ __I40E_RESET_INTR_RECEIVED,
+ __I40E_REINIT_REQUESTED,
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index ed9d3fc4aaba..41fa22c562c1 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -336,6 +336,10 @@ static void i40e_tx_timeout(struct net_device *netdev)
+ (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
+ return; /* don't do any new action before the next timeout */
+
++ /* don't kick off another recovery if one is already pending */
++ if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
++ return;
++
+ if (tx_ring) {
+ head = i40e_get_head(tx_ring);
+ /* Read interrupt register */
+@@ -9566,6 +9570,7 @@ end_core_reset:
+ clear_bit(__I40E_RESET_FAILED, pf->state);
+ clear_recovery:
+ clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
++ clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
+ }
+
+ /**
+@@ -12011,6 +12016,9 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
+ ether_addr_copy(netdev->dev_addr, mac_addr);
+ ether_addr_copy(netdev->perm_addr, mac_addr);
+
++ /* i40iw_net_event() reads 16 bytes from neigh->primary_key */
++ netdev->neigh_priv_len = sizeof(u32) * 4;
++
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->priv_flags |= IFF_SUPP_NOFCS;
+ /* Setup netdev TC information */
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 3f047bb43348..db1543bca701 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -4333,8 +4333,12 @@ static void ice_napi_enable_all(struct ice_vsi *vsi)
+ if (!vsi->netdev)
+ return;
+
+- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
+- napi_enable(&vsi->q_vectors[q_idx]->napi);
++ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
++ struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
++
++ if (q_vector->rx.ring || q_vector->tx.ring)
++ napi_enable(&q_vector->napi);
++ }
+ }
+
+ /**
+@@ -4817,8 +4821,12 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
+ if (!vsi->netdev)
+ return;
+
+- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
+- napi_disable(&vsi->q_vectors[q_idx]->napi);
++ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
++ struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
++
++ if (q_vector->rx.ring || q_vector->tx.ring)
++ napi_disable(&q_vector->napi);
++ }
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 0796cef96fa3..ffaa6e031632 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -8770,9 +8770,11 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
+ rtnl_unlock();
+
+ #ifdef CONFIG_PM
+- retval = pci_save_state(pdev);
+- if (retval)
+- return retval;
++ if (!runtime) {
++ retval = pci_save_state(pdev);
++ if (retval)
++ return retval;
++ }
+ #endif
+
+ status = rd32(E1000_STATUS);
+diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
+index 9c08c3650c02..15dea48e0195 100644
+--- a/drivers/net/ethernet/marvell/skge.c
++++ b/drivers/net/ethernet/marvell/skge.c
+@@ -152,8 +152,10 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ memset(p, 0, regs->len);
+ memcpy_fromio(p, io, B3_RAM_ADDR);
+
+- memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
+- regs->len - B3_RI_WTO_R1);
++ if (regs->len > B3_RI_WTO_R1) {
++ memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
++ regs->len - B3_RI_WTO_R1);
++ }
+ }
+
+ /* Wake on Lan only supported on Yukon chips with rev 1 or above */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 7365899c3ac9..944f21f99d43 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -1758,7 +1758,7 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq)
+
+ static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
+ {
+- return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
++ return cpumask_first(priv->mdev->priv.irq_info[ix + MLX5_EQ_VEC_COMP_BASE].mask);
+ }
+
+ static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 8262f093fec4..d3f794d4fb96 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -707,6 +707,8 @@ static u32 mlx5e_get_fcs(const struct sk_buff *skb)
+ return __get_unaligned_cpu32(fcs_bytes);
+ }
+
++#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
++
+ static inline void mlx5e_handle_csum(struct net_device *netdev,
+ struct mlx5_cqe64 *cqe,
+ struct mlx5e_rq *rq,
+@@ -725,6 +727,17 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
+ return;
+ }
+
++ /* CQE csum doesn't cover padding octets in short ethernet
++ * frames. And the pad field is appended prior to calculating
++ * and appending the FCS field.
++ *
++ * Detecting these padded frames requires to verify and parse
++ * IP headers, so we simply force all those small frames to be
++ * CHECKSUM_UNNECESSARY even if they are not padded.
++ */
++ if (short_frame(skb->len))
++ goto csum_unnecessary;
++
+ if (likely(is_last_ethertype_ip(skb, &network_depth))) {
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+@@ -744,6 +757,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
+ return;
+ }
+
++csum_unnecessary:
+ if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
+ (cqe->hds_ip_ext & CQE_L4_OK))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+index 6dacaeba2fbf..0b03d65474e9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+@@ -387,8 +387,14 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
+ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs_room < num_wqebbs)) {
++#ifdef CONFIG_MLX5_EN_IPSEC
++ struct mlx5_wqe_eth_seg cur_eth = wqe->eth;
++#endif
+ mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
+ mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
++#ifdef CONFIG_MLX5_EN_IPSEC
++ wqe->eth = cur_eth;
++#endif
+ }
+
+ /* fill wqe */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+index 3f767cde4c1d..54f1a40a68ed 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+@@ -511,14 +511,14 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
+ ktime_to_ns(ktime_get_real()));
+
+ /* Calculate period in seconds to call the overflow watchdog - to make
+- * sure counter is checked at least once every wrap around.
++ * sure counter is checked at least twice every wrap around.
+ * The period is calculated as the minimum between max HW cycles count
+ * (The clock source mask) and max amount of cycles that can be
+ * multiplied by clock multiplier where the result doesn't exceed
+ * 64bits.
+ */
+ overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
+- overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1);
++ overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
+
+ ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
+ frac, &frac);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index b5e9f664fc66..563ce3fedab4 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -640,18 +640,19 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)
+ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
+ {
+ struct mlx5_priv *priv = &mdev->priv;
+- int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
++ int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
++ int irq = pci_irq_vector(mdev->pdev, vecidx);
+
+- if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
++ if (!zalloc_cpumask_var(&priv->irq_info[vecidx].mask, GFP_KERNEL)) {
+ mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
+ return -ENOMEM;
+ }
+
+ cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
+- priv->irq_info[i].mask);
++ priv->irq_info[vecidx].mask);
+
+ if (IS_ENABLED(CONFIG_SMP) &&
+- irq_set_affinity_hint(irq, priv->irq_info[i].mask))
++ irq_set_affinity_hint(irq, priv->irq_info[vecidx].mask))
+ mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
+
+ return 0;
+@@ -659,11 +660,12 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
+
+ static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
+ {
++ int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
+ struct mlx5_priv *priv = &mdev->priv;
+- int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
++ int irq = pci_irq_vector(mdev->pdev, vecidx);
+
+ irq_set_affinity_hint(irq, NULL);
+- free_cpumask_var(priv->irq_info[i].mask);
++ free_cpumask_var(priv->irq_info[vecidx].mask);
+ }
+
+ static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index de821a9fdfaf..d64cd8d44d83 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -4235,6 +4235,25 @@ void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
+ dev_put(mlxsw_sp_port->dev);
+ }
+
++static void
++mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
++ struct net_device *lag_dev)
++{
++ struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
++ struct net_device *upper_dev;
++ struct list_head *iter;
++
++ if (netif_is_bridge_port(lag_dev))
++ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
++
++ netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
++ if (!netif_is_bridge_port(upper_dev))
++ continue;
++ br_dev = netdev_master_upper_dev_get(upper_dev);
++ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
++ }
++}
++
+ static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
+ {
+ char sldr_pl[MLXSW_REG_SLDR_LEN];
+@@ -4427,6 +4446,10 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+
+ /* Any VLANs configured on the port are no longer valid */
+ mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
++ /* Make the LAG and its directly linked uppers leave bridges they
++ * are memeber in
++ */
++ mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
+
+ if (lag->ref_count == 1)
+ mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+index e171513bb32a..30931a2c025b 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+@@ -95,8 +95,9 @@ int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
+ return -EIO;
+
+- max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE);
+- if (rulei->priority > max_priority)
++ /* Priority range is 1..cap_kvd_size-1. */
++ max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1;
++ if (rulei->priority >= max_priority)
+ return -EINVAL;
+
+ /* Unlike in TC, in HW, higher number means higher priority. */
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+index 0d9ea37c5d21..cdec48bcc6ad 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+@@ -282,30 +282,6 @@ mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
+ kfree(bridge_port);
+ }
+
+-static bool
+-mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
+- bridge_port)
+-{
+- struct net_device *dev = bridge_port->dev;
+- struct mlxsw_sp *mlxsw_sp;
+-
+- if (is_vlan_dev(dev))
+- mlxsw_sp = mlxsw_sp_lower_get(vlan_dev_real_dev(dev));
+- else
+- mlxsw_sp = mlxsw_sp_lower_get(dev);
+-
+- /* In case ports were pulled from out of a bridged LAG, then
+- * it's possible the reference count isn't zero, yet the bridge
+- * port should be destroyed, as it's no longer an upper of ours.
+- */
+- if (!mlxsw_sp && list_empty(&bridge_port->vlans_list))
+- return true;
+- else if (bridge_port->ref_count == 0)
+- return true;
+- else
+- return false;
+-}
+-
+ static struct mlxsw_sp_bridge_port *
+ mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
+ struct net_device *brport_dev)
+@@ -343,8 +319,7 @@ static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
+ {
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+- bridge_port->ref_count--;
+- if (!mlxsw_sp_bridge_port_should_destroy(bridge_port))
++ if (--bridge_port->ref_count != 0)
+ return;
+ bridge_device = bridge_port->bridge_device;
+ mlxsw_sp_bridge_port_destroy(bridge_port);
+diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
+index 9319d84bf49f..d84501441edd 100644
+--- a/drivers/net/ethernet/sun/niu.c
++++ b/drivers/net/ethernet/sun/niu.c
+@@ -8100,6 +8100,8 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
+ start += 3;
+
+ prop_len = niu_pci_eeprom_read(np, start + 4);
++ if (prop_len < 0)
++ return prop_len;
+ err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
+ if (err < 0)
+ return err;
+@@ -8144,8 +8146,12 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "VPD_SCAN: Reading in property [%s] len[%d]\n",
+ namebuf, prop_len);
+- for (i = 0; i < prop_len; i++)
+- *prop_buf++ = niu_pci_eeprom_read(np, off + i);
++ for (i = 0; i < prop_len; i++) {
++ err = niu_pci_eeprom_read(np, off + i);
++ if (err >= 0)
++ *prop_buf = err;
++ ++prop_buf;
++ }
+ }
+
+ start += len;
+diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
+index 29aa8d772b0c..59b3f1fbabd4 100644
+--- a/drivers/net/phy/dp83640.c
++++ b/drivers/net/phy/dp83640.c
+@@ -896,14 +896,14 @@ static void decode_txts(struct dp83640_private *dp83640,
+ struct phy_txts *phy_txts)
+ {
+ struct skb_shared_hwtstamps shhwtstamps;
++ struct dp83640_skb_info *skb_info;
+ struct sk_buff *skb;
+- u64 ns;
+ u8 overflow;
++ u64 ns;
+
+ /* We must already have the skb that triggered this. */
+-
++again:
+ skb = skb_dequeue(&dp83640->tx_queue);
+-
+ if (!skb) {
+ pr_debug("have timestamp but tx_queue empty\n");
+ return;
+@@ -918,6 +918,11 @@ static void decode_txts(struct dp83640_private *dp83640,
+ }
+ return;
+ }
++ skb_info = (struct dp83640_skb_info *)skb->cb;
++ if (time_after(jiffies, skb_info->tmo)) {
++ kfree_skb(skb);
++ goto again;
++ }
+
+ ns = phy2txts(phy_txts);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+@@ -1470,6 +1475,7 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
+ static void dp83640_txtstamp(struct phy_device *phydev,
+ struct sk_buff *skb, int type)
+ {
++ struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
+ struct dp83640_private *dp83640 = phydev->priv;
+
+ switch (dp83640->hwts_tx_en) {
+@@ -1482,6 +1488,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
+ /* fall through */
+ case HWTSTAMP_TX_ON:
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
++ skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
+ skb_queue_tail(&dp83640->tx_queue, skb);
+ break;
+
+diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
+index d71be15c8c69..73813c7afa49 100644
+--- a/drivers/net/phy/marvell.c
++++ b/drivers/net/phy/marvell.c
+@@ -868,8 +868,6 @@ static int m88e1510_config_init(struct phy_device *phydev)
+
+ /* SGMII-to-Copper mode initialization */
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+- u32 pause;
+-
+ /* Select page 18 */
+ err = marvell_set_page(phydev, 18);
+ if (err < 0)
+@@ -892,16 +890,6 @@ static int m88e1510_config_init(struct phy_device *phydev)
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
+ if (err < 0)
+ return err;
+-
+- /* There appears to be a bug in the 88e1512 when used in
+- * SGMII to copper mode, where the AN advertisement register
+- * clears the pause bits each time a negotiation occurs.
+- * This means we can never be truely sure what was advertised,
+- * so disable Pause support.
+- */
+- pause = SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+- phydev->supported &= ~pause;
+- phydev->advertising &= ~pause;
+ }
+
+ return m88e1318_config_init(phydev);
+diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
+index f2d01cb6f958..6e971628bb50 100644
+--- a/drivers/net/usb/smsc95xx.c
++++ b/drivers/net/usb/smsc95xx.c
+@@ -1295,6 +1295,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
+ dev->net->features |= NETIF_F_RXCSUM;
+
+ dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
++ set_bit(EVENT_NO_IP_ALIGN, &dev->flags);
+
+ smsc95xx_init_mac_address(dev);
+
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 42feaa4d2916..c88ee376a2eb 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -502,6 +502,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
+ struct bpf_prog *xdp_prog;
+ struct send_queue *sq;
+ unsigned int len;
++ int packets = 0;
++ int bytes = 0;
+ int drops = 0;
+ int kicks = 0;
+ int ret, err;
+@@ -525,10 +527,18 @@ static int virtnet_xdp_xmit(struct net_device *dev,
+
+ /* Free up any pending old buffers before queueing new ones. */
+ while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+- if (likely(is_xdp_frame(ptr)))
+- xdp_return_frame(ptr_to_xdp(ptr));
+- else
+- napi_consume_skb(ptr, false);
++ if (likely(is_xdp_frame(ptr))) {
++ struct xdp_frame *frame = ptr_to_xdp(ptr);
++
++ bytes += frame->len;
++ xdp_return_frame(frame);
++ } else {
++ struct sk_buff *skb = ptr;
++
++ bytes += skb->len;
++ napi_consume_skb(skb, false);
++ }
++ packets++;
+ }
+
+ for (i = 0; i < n; i++) {
+@@ -548,6 +558,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
+ }
+ out:
+ u64_stats_update_begin(&sq->stats.syncp);
++ sq->stats.bytes += bytes;
++ sq->stats.packets += packets;
+ sq->stats.xdp_tx += n;
+ sq->stats.xdp_tx_drops += drops;
+ sq->stats.kicks += kicks;
+diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
+index c40cd129afe7..5210cffb5344 100644
+--- a/drivers/net/wireless/ath/ath10k/core.c
++++ b/drivers/net/wireless/ath/ath10k/core.c
+@@ -532,6 +532,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ .hw_ops = &wcn3990_ops,
+ .decap_align_bytes = 1,
+ .num_peers = TARGET_HL_10_TLV_NUM_PEERS,
++ .n_cipher_suites = 8,
+ .ast_skid_limit = TARGET_HL_10_TLV_AST_SKID_LIMIT,
+ .num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES,
+ .target_64bit = true,
+diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
+index 21ba20981a80..0fca44e91a71 100644
+--- a/drivers/net/wireless/ath/ath9k/ath9k.h
++++ b/drivers/net/wireless/ath/ath9k/ath9k.h
+@@ -272,7 +272,7 @@ struct ath_node {
+ #endif
+ u8 key_idx[4];
+
+- u32 ackto;
++ int ackto;
+ struct list_head list;
+ };
+
+diff --git a/drivers/net/wireless/ath/ath9k/dynack.c b/drivers/net/wireless/ath/ath9k/dynack.c
+index 7334c9b09e82..6e236a485431 100644
+--- a/drivers/net/wireless/ath/ath9k/dynack.c
++++ b/drivers/net/wireless/ath/ath9k/dynack.c
+@@ -29,9 +29,13 @@
+ * ath_dynack_ewma - EWMA (Exponentially Weighted Moving Average) calculation
+ *
+ */
+-static inline u32 ath_dynack_ewma(u32 old, u32 new)
++static inline int ath_dynack_ewma(int old, int new)
+ {
+- return (new * (EWMA_DIV - EWMA_LEVEL) + old * EWMA_LEVEL) / EWMA_DIV;
++ if (old > 0)
++ return (new * (EWMA_DIV - EWMA_LEVEL) +
++ old * EWMA_LEVEL) / EWMA_DIV;
++ else
++ return new;
+ }
+
+ /**
+@@ -82,10 +86,10 @@ static inline bool ath_dynack_bssidmask(struct ath_hw *ah, const u8 *mac)
+ */
+ static void ath_dynack_compute_ackto(struct ath_hw *ah)
+ {
+- struct ath_node *an;
+- u32 to = 0;
+- struct ath_dynack *da = &ah->dynack;
+ struct ath_common *common = ath9k_hw_common(ah);
++ struct ath_dynack *da = &ah->dynack;
++ struct ath_node *an;
++ int to = 0;
+
+ list_for_each_entry(an, &da->nodes, list)
+ if (an->ackto > to)
+@@ -144,7 +148,8 @@ static void ath_dynack_compute_to(struct ath_hw *ah)
+ an->ackto = ath_dynack_ewma(an->ackto,
+ ackto);
+ ath_dbg(ath9k_hw_common(ah), DYNACK,
+- "%pM to %u\n", dst, an->ackto);
++ "%pM to %d [%u]\n", dst,
++ an->ackto, ackto);
+ if (time_is_before_jiffies(da->lto)) {
+ ath_dynack_compute_ackto(ah);
+ da->lto = jiffies + COMPUTE_TO;
+@@ -166,10 +171,12 @@ static void ath_dynack_compute_to(struct ath_hw *ah)
+ * @ah: ath hw
+ * @skb: socket buffer
+ * @ts: tx status info
++ * @sta: station pointer
+ *
+ */
+ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
+- struct ath_tx_status *ts)
++ struct ath_tx_status *ts,
++ struct ieee80211_sta *sta)
+ {
+ u8 ridx;
+ struct ieee80211_hdr *hdr;
+@@ -177,7 +184,7 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+- if ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !da->enabled)
++ if (!da->enabled || (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ return;
+
+ spin_lock_bh(&da->qlock);
+@@ -187,11 +194,19 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
+ /* late ACK */
+ if (ts->ts_status & ATH9K_TXERR_XRETRY) {
+ if (ieee80211_is_assoc_req(hdr->frame_control) ||
+- ieee80211_is_assoc_resp(hdr->frame_control)) {
++ ieee80211_is_assoc_resp(hdr->frame_control) ||
++ ieee80211_is_auth(hdr->frame_control)) {
+ ath_dbg(common, DYNACK, "late ack\n");
++
+ ath9k_hw_setslottime(ah, (LATEACK_TO - 3) / 2);
+ ath9k_hw_set_ack_timeout(ah, LATEACK_TO);
+ ath9k_hw_set_cts_timeout(ah, LATEACK_TO);
++ if (sta) {
++ struct ath_node *an;
++
++ an = (struct ath_node *)sta->drv_priv;
++ an->ackto = -1;
++ }
+ da->lto = jiffies + LATEACK_DELAY;
+ }
+
+@@ -251,7 +266,7 @@ void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb,
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+- if (!ath_dynack_bssidmask(ah, hdr->addr1) || !da->enabled)
++ if (!da->enabled || !ath_dynack_bssidmask(ah, hdr->addr1))
+ return;
+
+ spin_lock_bh(&da->qlock);
+diff --git a/drivers/net/wireless/ath/ath9k/dynack.h b/drivers/net/wireless/ath/ath9k/dynack.h
+index 6d7bef976742..cf60224d40df 100644
+--- a/drivers/net/wireless/ath/ath9k/dynack.h
++++ b/drivers/net/wireless/ath/ath9k/dynack.h
+@@ -86,7 +86,8 @@ void ath_dynack_node_deinit(struct ath_hw *ah, struct ath_node *an);
+ void ath_dynack_init(struct ath_hw *ah);
+ void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb, u32 ts);
+ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
+- struct ath_tx_status *ts);
++ struct ath_tx_status *ts,
++ struct ieee80211_sta *sta);
+ #else
+ static inline void ath_dynack_init(struct ath_hw *ah) {}
+ static inline void ath_dynack_node_init(struct ath_hw *ah,
+@@ -97,7 +98,8 @@ static inline void ath_dynack_sample_ack_ts(struct ath_hw *ah,
+ struct sk_buff *skb, u32 ts) {}
+ static inline void ath_dynack_sample_tx_ts(struct ath_hw *ah,
+ struct sk_buff *skb,
+- struct ath_tx_status *ts) {}
++ struct ath_tx_status *ts,
++ struct ieee80211_sta *sta) {}
+ #endif
+
+ #endif /* DYNACK_H */
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index 43b6c8508e49..4b7a7fc2a0fe 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -629,7 +629,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
+ if (bf == bf->bf_lastbf)
+ ath_dynack_sample_tx_ts(sc->sc_ah,
+ bf->bf_mpdu,
+- ts);
++ ts, sta);
+ }
+
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, sta, ts,
+@@ -773,7 +773,8 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
+ memcpy(info->control.rates, bf->rates,
+ sizeof(info->control.rates));
+ ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
+- ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts);
++ ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts,
++ sta);
+ }
+ ath_tx_complete_buf(sc, bf, txq, bf_head, sta, ts, txok);
+ } else
+diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
+index 7debed6bec06..a0fe8cbad104 100644
+--- a/drivers/net/wireless/ath/wil6210/main.c
++++ b/drivers/net/wireless/ath/wil6210/main.c
+@@ -995,10 +995,13 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
+
+ wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
+
+- /* Clear MAC link up */
+- wil_s(wil, RGF_HP_CTRL, BIT(15));
+- wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD);
+- wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
++ if (wil->hw_version < HW_VER_TALYN) {
++ /* Clear MAC link up */
++ wil_s(wil, RGF_HP_CTRL, BIT(15));
++ wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0,
++ BIT_HPAL_PERST_FROM_PAD);
++ wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
++ }
+
+ wil_halt_cpu(wil);
+
+diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
+index 6a7943e487fb..75c8aa297107 100644
+--- a/drivers/net/wireless/ath/wil6210/txrx.c
++++ b/drivers/net/wireless/ath/wil6210/txrx.c
+@@ -1313,6 +1313,8 @@ found:
+ wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
+ wil_set_da_for_vring(wil, skb2, i);
+ wil_tx_ring(wil, vif, v2, skb2);
++ /* successful call to wil_tx_ring takes skb2 ref */
++ dev_kfree_skb_any(skb2);
+ } else {
+ wil_err(wil, "skb_copy failed\n");
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+index 55594c93b014..47dbd2d3e3b4 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+@@ -442,7 +442,7 @@ struct iwl_he_backoff_conf {
+ * Support for Nss x BW (or RU) matrix:
+ * (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz)
+ * Each entry contains 2 QAM thresholds for 8us and 16us:
+- * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6/7=RES
++ * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6=RES, 7=NONE
+ * i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx:
+ * QAM_tx < QAM_th1 --> PPE=0us
+ * QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index 9a764af30f36..0f357e8c4f94 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -1997,7 +1997,13 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
+ if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
+ sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP);
+
+- /* If PPE Thresholds exist, parse them into a FW-familiar format */
++ /*
++ * Initialize the PPE thresholds to "None" (7), as described in Table
++ * 9-262ac of 80211.ax/D3.0.
++ */
++ memset(&sta_ctxt_cmd.pkt_ext, 7, sizeof(sta_ctxt_cmd.pkt_ext));
++
++ /* If PPE Thresholds exist, parse them into a FW-familiar format. */
+ if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ u8 nss = (sta->he_cap.ppe_thres[0] &
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+index 8169d1450b3b..d1c1a8069c7e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+@@ -98,8 +98,12 @@ static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta)
+ {
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
++ struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ u8 supp = 0;
+
++ if (he_cap && he_cap->has_he)
++ return 0;
++
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+ supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ);
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
+index 374cc655c11d..16e6b6970e28 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
+@@ -799,7 +799,7 @@ static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev)
+
+ /* enable detection*/
+ mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16);
+- mt76_wr(dev, 0x212c, 0x0c350001);
++ mt76_wr(dev, MT_BBP(IBI, 11), 0x0c350001);
+ }
+
+ void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev)
+@@ -842,7 +842,11 @@ void mt76x2_dfs_init_params(struct mt76x2_dev *dev)
+ mt76_wr(dev, MT_BBP(DFS, 0), 0);
+ /* clear detector status */
+ mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+- mt76_wr(dev, 0x212c, 0);
++ if (mt76_chip(&dev->mt76) == 0x7610 ||
++ mt76_chip(&dev->mt76) == 0x7630)
++ mt76_wr(dev, MT_BBP(IBI, 11), 0xfde8081);
++ else
++ mt76_wr(dev, MT_BBP(IBI, 11), 0);
+
+ mt76x2_irq_disable(dev, MT_INT_GPTIMER);
+ mt76_rmw_field(dev, MT_INT_TIMER_EN,
+diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
+index 67213f11acbd..0a9eac93dd01 100644
+--- a/drivers/net/wireless/st/cw1200/scan.c
++++ b/drivers/net/wireless/st/cw1200/scan.c
+@@ -78,6 +78,10 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
+ if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS)
+ return -EINVAL;
+
++ /* will be unlocked in cw1200_scan_work() */
++ down(&priv->scan.lock);
++ mutex_lock(&priv->conf_mutex);
++
+ frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0,
+ req->ie_len);
+ if (!frame.skb)
+@@ -86,19 +90,15 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
+ if (req->ie_len)
+ skb_put_data(frame.skb, req->ie, req->ie_len);
+
+- /* will be unlocked in cw1200_scan_work() */
+- down(&priv->scan.lock);
+- mutex_lock(&priv->conf_mutex);
+-
+ ret = wsm_set_template_frame(priv, &frame);
+ if (!ret) {
+ /* Host want to be the probe responder. */
+ ret = wsm_set_probe_responder(priv, true);
+ }
+ if (ret) {
++ dev_kfree_skb(frame.skb);
+ mutex_unlock(&priv->conf_mutex);
+ up(&priv->scan.lock);
+- dev_kfree_skb(frame.skb);
+ return ret;
+ }
+
+@@ -120,10 +120,9 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
+ ++priv->scan.n_ssids;
+ }
+
+- mutex_unlock(&priv->conf_mutex);
+-
+ if (frame.skb)
+ dev_kfree_skb(frame.skb);
++ mutex_unlock(&priv->conf_mutex);
+ queue_work(priv->workqueue, &priv->scan.work);
+ return 0;
+ }
+diff --git a/drivers/opp/core.c b/drivers/opp/core.c
+index 31ff03dbeb83..f3433bf47b10 100644
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -191,12 +191,12 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
+ if (IS_ERR(opp_table))
+ return 0;
+
+- count = opp_table->regulator_count;
+-
+ /* Regulator may not be required for the device */
+- if (!count)
++ if (!opp_table->regulators)
+ goto put_opp_table;
+
++ count = opp_table->regulator_count;
++
+ uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
+ if (!uV)
+ goto put_opp_table;
+@@ -976,6 +976,9 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
+ struct regulator *reg;
+ int i;
+
++ if (!opp_table->regulators)
++ return true;
++
+ for (i = 0; i < opp_table->regulator_count; i++) {
+ reg = opp_table->regulators[i];
+
+@@ -1263,7 +1266,7 @@ static int _allocate_set_opp_data(struct opp_table *opp_table)
+ struct dev_pm_set_opp_data *data;
+ int len, count = opp_table->regulator_count;
+
+- if (WARN_ON(!count))
++ if (WARN_ON(!opp_table->regulators))
+ return -EINVAL;
+
+ /* space for set_opp_data */
+diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
+index 975050a69494..3826b444298c 100644
+--- a/drivers/pci/controller/dwc/pci-imx6.c
++++ b/drivers/pci/controller/dwc/pci-imx6.c
+@@ -66,6 +66,7 @@ struct imx6_pcie {
+ #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
+
+ /* PCIe Root Complex registers (memory-mapped) */
++#define PCIE_RC_IMX6_MSI_CAP 0x50
+ #define PCIE_RC_LCR 0x7c
+ #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
+ #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
+@@ -682,6 +683,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
+ struct resource *dbi_base;
+ struct device_node *node = dev->of_node;
+ int ret;
++ u16 val;
+
+ imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
+ if (!imx6_pcie)
+@@ -816,6 +818,14 @@ static int imx6_pcie_probe(struct platform_device *pdev)
+ if (ret < 0)
+ return ret;
+
++ if (pci_msi_enabled()) {
++ val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP +
++ PCI_MSI_FLAGS);
++ val |= PCI_MSI_FLAGS_ENABLE;
++ dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS,
++ val);
++ }
++
+ return 0;
+ }
+
+diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
+index 54a8b30dda38..37d0c15c9eeb 100644
+--- a/drivers/pci/switch/switchtec.c
++++ b/drivers/pci/switch/switchtec.c
+@@ -800,6 +800,7 @@ static int ioctl_event_ctl(struct switchtec_dev *stdev,
+ {
+ int ret;
+ int nr_idxs;
++ unsigned int event_flags;
+ struct switchtec_ioctl_event_ctl ctl;
+
+ if (copy_from_user(&ctl, uctl, sizeof(ctl)))
+@@ -821,7 +822,9 @@ static int ioctl_event_ctl(struct switchtec_dev *stdev,
+ else
+ return -EINVAL;
+
++ event_flags = ctl.flags;
+ for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
++ ctl.flags = event_flags;
+ ret = event_ctl(stdev, &ctl);
+ if (ret < 0)
+ return ret;
+diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
+index 54ec278d2fc4..e1a77b2de78a 100644
+--- a/drivers/perf/arm_spe_pmu.c
++++ b/drivers/perf/arm_spe_pmu.c
+@@ -927,6 +927,11 @@ static int arm_spe_pmu_perf_init(struct arm_spe_pmu *spe_pmu)
+
+ idx = atomic_inc_return(&pmu_idx);
+ name = devm_kasprintf(dev, GFP_KERNEL, "%s_%d", PMUNAME, idx);
++ if (!name) {
++ dev_err(dev, "failed to allocate name for pmu %d\n", idx);
++ return -ENOMEM;
++ }
++
+ return perf_pmu_register(&spe_pmu->pmu, name, -1);
+ }
+
+diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
+index d4dcd39b8d76..881078ff73f6 100644
+--- a/drivers/phy/allwinner/phy-sun4i-usb.c
++++ b/drivers/phy/allwinner/phy-sun4i-usb.c
+@@ -126,6 +126,7 @@ struct sun4i_usb_phy_cfg {
+ bool dedicated_clocks;
+ bool enable_pmu_unk1;
+ bool phy0_dual_route;
++ int missing_phys;
+ };
+
+ struct sun4i_usb_phy_data {
+@@ -646,6 +647,9 @@ static struct phy *sun4i_usb_phy_xlate(struct device *dev,
+ if (args->args[0] >= data->cfg->num_phys)
+ return ERR_PTR(-ENODEV);
+
++ if (data->cfg->missing_phys & BIT(args->args[0]))
++ return ERR_PTR(-ENODEV);
++
+ return data->phys[args->args[0]].phy;
+ }
+
+@@ -741,6 +745,9 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
+ struct sun4i_usb_phy *phy = data->phys + i;
+ char name[16];
+
++ if (data->cfg->missing_phys & BIT(i))
++ continue;
++
+ snprintf(name, sizeof(name), "usb%d_vbus", i);
+ phy->vbus = devm_regulator_get_optional(dev, name);
+ if (IS_ERR(phy->vbus)) {
+diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+index fa530913a2c8..08925d24180b 100644
+--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
++++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+@@ -90,7 +90,7 @@ struct bcm2835_pinctrl {
+ struct gpio_chip gpio_chip;
+ struct pinctrl_gpio_range gpio_range;
+
+- spinlock_t irq_lock[BCM2835_NUM_BANKS];
++ raw_spinlock_t irq_lock[BCM2835_NUM_BANKS];
+ };
+
+ /* pins are just named GPIO0..GPIO53 */
+@@ -461,10 +461,10 @@ static void bcm2835_gpio_irq_enable(struct irq_data *data)
+ unsigned bank = GPIO_REG_OFFSET(gpio);
+ unsigned long flags;
+
+- spin_lock_irqsave(&pc->irq_lock[bank], flags);
++ raw_spin_lock_irqsave(&pc->irq_lock[bank], flags);
+ set_bit(offset, &pc->enabled_irq_map[bank]);
+ bcm2835_gpio_irq_config(pc, gpio, true);
+- spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
++ raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
+ }
+
+ static void bcm2835_gpio_irq_disable(struct irq_data *data)
+@@ -476,12 +476,12 @@ static void bcm2835_gpio_irq_disable(struct irq_data *data)
+ unsigned bank = GPIO_REG_OFFSET(gpio);
+ unsigned long flags;
+
+- spin_lock_irqsave(&pc->irq_lock[bank], flags);
++ raw_spin_lock_irqsave(&pc->irq_lock[bank], flags);
+ bcm2835_gpio_irq_config(pc, gpio, false);
+ /* Clear events that were latched prior to clearing event sources */
+ bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
+ clear_bit(offset, &pc->enabled_irq_map[bank]);
+- spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
++ raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
+ }
+
+ static int __bcm2835_gpio_irq_set_type_disabled(struct bcm2835_pinctrl *pc,
+@@ -584,7 +584,7 @@ static int bcm2835_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+ unsigned long flags;
+ int ret;
+
+- spin_lock_irqsave(&pc->irq_lock[bank], flags);
++ raw_spin_lock_irqsave(&pc->irq_lock[bank], flags);
+
+ if (test_bit(offset, &pc->enabled_irq_map[bank]))
+ ret = __bcm2835_gpio_irq_set_type_enabled(pc, gpio, type);
+@@ -596,7 +596,7 @@ static int bcm2835_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+ else
+ irq_set_handler_locked(data, handle_level_irq);
+
+- spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
++ raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
+
+ return ret;
+ }
+@@ -1047,7 +1047,7 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
+ for_each_set_bit(offset, &events, 32)
+ bcm2835_gpio_wr(pc, GPEDS0 + i * 4, BIT(offset));
+
+- spin_lock_init(&pc->irq_lock[i]);
++ raw_spin_lock_init(&pc->irq_lock[i]);
+ }
+
+ err = gpiochip_add_data(&pc->gpio_chip, pc);
+diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
+index 86466173114d..e482672e833a 100644
+--- a/drivers/pinctrl/meson/pinctrl-meson8.c
++++ b/drivers/pinctrl/meson/pinctrl-meson8.c
+@@ -807,7 +807,9 @@ static const char * const gpio_groups[] = {
+ "BOOT_5", "BOOT_6", "BOOT_7", "BOOT_8", "BOOT_9",
+ "BOOT_10", "BOOT_11", "BOOT_12", "BOOT_13", "BOOT_14",
+ "BOOT_15", "BOOT_16", "BOOT_17", "BOOT_18",
++};
+
++static const char * const gpio_aobus_groups[] = {
+ "GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3",
+ "GPIOAO_4", "GPIOAO_5", "GPIOAO_6", "GPIOAO_7",
+ "GPIOAO_8", "GPIOAO_9", "GPIOAO_10", "GPIOAO_11",
+@@ -1030,6 +1032,7 @@ static struct meson_pmx_func meson8_cbus_functions[] = {
+ };
+
+ static struct meson_pmx_func meson8_aobus_functions[] = {
++ FUNCTION(gpio_aobus),
+ FUNCTION(uart_ao),
+ FUNCTION(remote),
+ FUNCTION(i2c_slave_ao),
+diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
+index 647ad15d5c3c..91cffc051055 100644
+--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
++++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
+@@ -646,16 +646,18 @@ static const char * const gpio_groups[] = {
+ "BOOT_10", "BOOT_11", "BOOT_12", "BOOT_13", "BOOT_14",
+ "BOOT_15", "BOOT_16", "BOOT_17", "BOOT_18",
+
+- "GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3",
+- "GPIOAO_4", "GPIOAO_5", "GPIOAO_6", "GPIOAO_7",
+- "GPIOAO_8", "GPIOAO_9", "GPIOAO_10", "GPIOAO_11",
+- "GPIOAO_12", "GPIOAO_13", "GPIO_BSD_EN", "GPIO_TEST_N",
+-
+ "DIF_0_P", "DIF_0_N", "DIF_1_P", "DIF_1_N",
+ "DIF_2_P", "DIF_2_N", "DIF_3_P", "DIF_3_N",
+ "DIF_4_P", "DIF_4_N"
+ };
+
++static const char * const gpio_aobus_groups[] = {
++ "GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3",
++ "GPIOAO_4", "GPIOAO_5", "GPIOAO_6", "GPIOAO_7",
++ "GPIOAO_8", "GPIOAO_9", "GPIOAO_10", "GPIOAO_11",
++ "GPIOAO_12", "GPIOAO_13", "GPIO_BSD_EN", "GPIO_TEST_N"
++};
++
+ static const char * const sd_a_groups[] = {
+ "sd_d0_a", "sd_d1_a", "sd_d2_a", "sd_d3_a", "sd_clk_a",
+ "sd_cmd_a"
+@@ -871,6 +873,7 @@ static struct meson_pmx_func meson8b_cbus_functions[] = {
+ };
+
+ static struct meson_pmx_func meson8b_aobus_functions[] = {
++ FUNCTION(gpio_aobus),
+ FUNCTION(uart_ao),
+ FUNCTION(uart_ao_b),
+ FUNCTION(i2c_slave_ao),
+diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
+index cbf58a10113d..4d87d75b9c6e 100644
+--- a/drivers/pinctrl/pinctrl-sx150x.c
++++ b/drivers/pinctrl/pinctrl-sx150x.c
+@@ -1166,7 +1166,6 @@ static int sx150x_probe(struct i2c_client *client,
+ }
+
+ /* Register GPIO controller */
+- pctl->gpio.label = devm_kstrdup(dev, client->name, GFP_KERNEL);
+ pctl->gpio.base = -1;
+ pctl->gpio.ngpio = pctl->data->npins;
+ pctl->gpio.get_direction = sx150x_gpio_get_direction;
+@@ -1180,6 +1179,10 @@ static int sx150x_probe(struct i2c_client *client,
+ pctl->gpio.of_node = dev->of_node;
+ #endif
+ pctl->gpio.can_sleep = true;
++ pctl->gpio.label = devm_kstrdup(dev, client->name, GFP_KERNEL);
++ if (!pctl->gpio.label)
++ return -ENOMEM;
++
+ /*
+ * Setting multiple pins is not safe when all pins are not
+ * handled by the same regmap register. The oscio pin (present
+@@ -1200,13 +1203,15 @@ static int sx150x_probe(struct i2c_client *client,
+
+ /* Add Interrupt support if an irq is specified */
+ if (client->irq > 0) {
+- pctl->irq_chip.name = devm_kstrdup(dev, client->name,
+- GFP_KERNEL);
+ pctl->irq_chip.irq_mask = sx150x_irq_mask;
+ pctl->irq_chip.irq_unmask = sx150x_irq_unmask;
+ pctl->irq_chip.irq_set_type = sx150x_irq_set_type;
+ pctl->irq_chip.irq_bus_lock = sx150x_irq_bus_lock;
+ pctl->irq_chip.irq_bus_sync_unlock = sx150x_irq_bus_sync_unlock;
++ pctl->irq_chip.name = devm_kstrdup(dev, client->name,
++ GFP_KERNEL);
++ if (!pctl->irq_chip.name)
++ return -ENOMEM;
+
+ pctl->irq.masked = ~0;
+ pctl->irq.sense = 0;
+diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
+index b6fd4838f60f..e5d5b1adb5a9 100644
+--- a/drivers/platform/chrome/cros_ec_proto.c
++++ b/drivers/platform/chrome/cros_ec_proto.c
+@@ -575,6 +575,7 @@ static int get_keyboard_state_event(struct cros_ec_device *ec_dev)
+
+ int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event)
+ {
++ u8 event_type;
+ u32 host_event;
+ int ret;
+
+@@ -594,11 +595,22 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event)
+ return ret;
+
+ if (wake_event) {
++ event_type = ec_dev->event_data.event_type;
+ host_event = cros_ec_get_host_event(ec_dev);
+
+- /* Consider non-host_event as wake event */
+- *wake_event = !host_event ||
+- !!(host_event & ec_dev->host_event_wake_mask);
++ /*
++ * Sensor events need to be parsed by the sensor sub-device.
++ * Defer them, and don't report the wakeup here.
++ */
++ if (event_type == EC_MKBP_EVENT_SENSOR_FIFO)
++ *wake_event = false;
++ /* Masked host-events should not count as wake events. */
++ else if (host_event &&
++ !(host_event & ec_dev->host_event_wake_mask))
++ *wake_event = false;
++ /* Consider all other events as wake events. */
++ else
++ *wake_event = true;
+ }
+
+ return ret;
+diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
+index d89936c93ba0..78b4aa4410fb 100644
+--- a/drivers/platform/x86/mlx-platform.c
++++ b/drivers/platform/x86/mlx-platform.c
+@@ -83,12 +83,12 @@
+ #define MLXPLAT_CPLD_LPC_REG_TACHO4_OFFSET 0xe7
+ #define MLXPLAT_CPLD_LPC_REG_TACHO5_OFFSET 0xe8
+ #define MLXPLAT_CPLD_LPC_REG_TACHO6_OFFSET 0xe9
+-#define MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET 0xea
+-#define MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET 0xeb
+-#define MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET 0xec
+-#define MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET 0xed
+-#define MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET 0xee
+-#define MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET 0xef
++#define MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET 0xeb
++#define MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET 0xec
++#define MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET 0xed
++#define MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET 0xee
++#define MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET 0xef
++#define MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET 0xf0
+ #define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
+ #define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb
+ #define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda
+diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
+index 2012551d93e0..796eeffdf93b 100644
+--- a/drivers/ptp/ptp_chardev.c
++++ b/drivers/ptp/ptp_chardev.c
+@@ -228,7 +228,9 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
+ pct->sec = ts.tv_sec;
+ pct->nsec = ts.tv_nsec;
+ pct++;
+- ptp->info->gettime64(ptp->info, &ts);
++ err = ptp->info->gettime64(ptp->info, &ts);
++ if (err)
++ goto out;
+ pct->sec = ts.tv_sec;
+ pct->nsec = ts.tv_nsec;
+ pct++;
+@@ -281,6 +283,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
+ break;
+ }
+
++out:
+ kfree(sysoff);
+ return err;
+ }
+diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
+index 7eacc1c4b3b1..c64903a5978f 100644
+--- a/drivers/ptp/ptp_clock.c
++++ b/drivers/ptp/ptp_clock.c
+@@ -253,8 +253,10 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
+ ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid,
+ ptp, ptp->pin_attr_groups,
+ "ptp%d", ptp->index);
+- if (IS_ERR(ptp->dev))
++ if (IS_ERR(ptp->dev)) {
++ err = PTR_ERR(ptp->dev);
+ goto no_device;
++ }
+
+ /* Register a new PPS source. */
+ if (info->pps) {
+@@ -265,6 +267,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
+ pps.owner = info->owner;
+ ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS);
+ if (!ptp->pps_source) {
++ err = -EINVAL;
+ pr_err("failed to register pps source\n");
+ goto no_pps;
+ }
+diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
+index 6f7ebc1dbe10..2e1a27bd97d1 100644
+--- a/drivers/s390/crypto/zcrypt_error.h
++++ b/drivers/s390/crypto/zcrypt_error.h
+@@ -52,6 +52,7 @@ struct error_hdr {
+ #define REP82_ERROR_FORMAT_FIELD 0x29
+ #define REP82_ERROR_INVALID_COMMAND 0x30
+ #define REP82_ERROR_MALFORMED_MSG 0x40
++#define REP82_ERROR_INVALID_SPECIAL_CMD 0x41
+ #define REP82_ERROR_INVALID_DOMAIN_PRECHECK 0x42
+ #define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */
+ #define REP82_ERROR_WORD_ALIGNMENT 0x60
+@@ -90,6 +91,7 @@ static inline int convert_error(struct zcrypt_queue *zq,
+ case REP88_ERROR_MESSAGE_MALFORMD:
+ case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
+ case REP82_ERROR_INVALID_DOMAIN_PENDING:
++ case REP82_ERROR_INVALID_SPECIAL_CMD:
+ // REP88_ERROR_INVALID_KEY // '82' CEX2A
+ // REP88_ERROR_OPERAND // '84' CEX2A
+ // REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
+diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
+index 1391e5f35918..702da909cee5 100644
+--- a/drivers/scsi/aic94xx/aic94xx_init.c
++++ b/drivers/scsi/aic94xx/aic94xx_init.c
+@@ -281,7 +281,7 @@ static ssize_t asd_show_dev_rev(struct device *dev,
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ asd_dev_rev[asd_ha->revision_id]);
+ }
+-static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL);
++static DEVICE_ATTR(aic_revision, S_IRUGO, asd_show_dev_rev, NULL);
+
+ static ssize_t asd_show_dev_bios_build(struct device *dev,
+ struct device_attribute *attr,char *buf)
+@@ -478,7 +478,7 @@ static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
+ {
+ int err;
+
+- err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision);
++ err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
+ if (err)
+ return err;
+
+@@ -500,13 +500,13 @@ err_update_bios:
+ err_biosb:
+ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
+ err_rev:
+- device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
++ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
+ return err;
+ }
+
+ static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
+ {
+- device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
++ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
+ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
+ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
+ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios);
+diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
+index 6637116529aa..f987c40c47a1 100644
+--- a/drivers/scsi/cxlflash/main.c
++++ b/drivers/scsi/cxlflash/main.c
+@@ -3694,6 +3694,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
+ host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
+
+ cfg = shost_priv(host);
++ cfg->state = STATE_PROBING;
+ cfg->host = host;
+ rc = alloc_mem(cfg);
+ if (rc) {
+@@ -3782,6 +3783,7 @@ out:
+ return rc;
+
+ out_remove:
++ cfg->state = STATE_PROBED;
+ cxlflash_remove(pdev);
+ goto out;
+ }
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+index 687ff61bba9f..3922b17e2ea3 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+@@ -492,7 +492,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
+ hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1);
+ hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120);
+ hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01);
+-
++ hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32);
+ /* used for 12G negotiate */
+ hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
+ }
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index 4dda969e947c..0d214e6b8e9a 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -242,6 +242,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
+ icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
+ if (elscmd == ELS_CMD_FLOGI)
+ icmd->ulpTimeout = FF_DEF_RATOV * 2;
++ else if (elscmd == ELS_CMD_LOGO)
++ icmd->ulpTimeout = phba->fc_ratov;
+ else
+ icmd->ulpTimeout = phba->fc_ratov * 2;
+ } else {
+@@ -2682,16 +2684,15 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ goto out;
+ }
+
++ /* The LOGO will not be retried on failure. A LOGO was
++ * issued to the remote rport and a ACC or RJT or no Answer are
++ * all acceptable. Note the failure and move forward with
++ * discovery. The PLOGI will retry.
++ */
+ if (irsp->ulpStatus) {
+- /* Check for retry */
+- if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
+- /* ELS command is being retried */
+- skip_recovery = 1;
+- goto out;
+- }
+ /* LOGO failed */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+- "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
++ "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n",
+ ndlp->nlp_DID, irsp->ulpStatus,
+ irsp->un.ulpWord[4]);
+ /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
+@@ -2737,7 +2738,8 @@ out:
+ * For any other port type, the rpi is unregistered as an implicit
+ * LOGO.
+ */
+- if ((ndlp->nlp_type & NLP_FCP_TARGET) && (skip_recovery == 0)) {
++ if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) &&
++ skip_recovery == 0) {
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ spin_lock_irqsave(shost->host_lock, flags);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+@@ -2770,6 +2772,8 @@ out:
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the LOGO ELS command.
+ *
++ * Callers of this routine are expected to unregister the RPI first
++ *
+ * Return code
+ * 0 - successfully issued logo
+ * 1 - failed to issue logo
+@@ -2811,22 +2815,6 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ "Issue LOGO: did:x%x",
+ ndlp->nlp_DID, 0, 0);
+
+- /*
+- * If we are issuing a LOGO, we may try to recover the remote NPort
+- * by issuing a PLOGI later. Even though we issue ELS cmds by the
+- * VPI, if we have a valid RPI, and that RPI gets unreg'ed while
+- * that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI
+- * for that ELS cmd. To avoid this situation, lets get rid of the
+- * RPI right now, before any ELS cmds are sent.
+- */
+- spin_lock_irq(shost->host_lock);
+- ndlp->nlp_flag |= NLP_ISSUE_LOGO;
+- spin_unlock_irq(shost->host_lock);
+- if (lpfc_unreg_rpi(vport, ndlp)) {
+- lpfc_els_free_iocb(phba, elsiocb);
+- return 0;
+- }
+-
+ phba->fc_stat.elsXmitLOGO++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
+ spin_lock_irq(shost->host_lock);
+@@ -2834,7 +2822,6 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
+ spin_unlock_irq(shost->host_lock);
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+-
+ if (rc == IOCB_ERROR) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_LOGO_SND;
+@@ -2842,6 +2829,11 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
++
++ spin_lock_irq(shost->host_lock);
++ ndlp->nlp_prev_state = ndlp->nlp_state;
++ spin_unlock_irq(shost->host_lock);
++ lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
+ return 0;
+ }
+
+@@ -5701,6 +5693,9 @@ error:
+ stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
+ stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+
++ if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)
++ stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
++
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ phba->fc_stat.elsXmitLSRJT++;
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+@@ -9502,7 +9497,8 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
+ "rport in state 0x%x\n", ndlp->nlp_state);
+ return;
+ }
+- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
++ lpfc_printf_log(phba, KERN_ERR,
++ LOG_ELS | LOG_FCP_ERROR | LOG_NVME_IOERR,
+ "3094 Start rport recovery on shost id 0x%x "
+ "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
+ "flags 0x%x\n",
+@@ -9515,8 +9511,8 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
+ */
+ spin_lock_irqsave(shost->host_lock, flags);
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
++ ndlp->nlp_flag |= NLP_ISSUE_LOGO;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+- lpfc_issue_els_logo(vport, ndlp, 0);
+- lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
++ lpfc_unreg_rpi(vport, ndlp);
+ }
+
+diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
+index bd9bce9d9974..a6619fd8238c 100644
+--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
++++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
+@@ -836,7 +836,9 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
++ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
++ spin_unlock_irq(shost->host_lock);
+ return 0;
+ }
+
+@@ -851,7 +853,10 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ return 1;
+ }
+ }
++
++ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_NPR_ADISC;
++ spin_unlock_irq(shost->host_lock);
+ lpfc_unreg_rpi(vport, ndlp);
+ return 0;
+ }
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index 53133cfd420f..622832e55211 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -9809,6 +9809,7 @@ static void scsih_remove(struct pci_dev *pdev)
+
+ /* release all the volumes */
+ _scsih_ir_shutdown(ioc);
++ sas_remove_host(shost);
+ list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
+ list) {
+ if (raid_device->starget) {
+@@ -9851,7 +9852,6 @@ static void scsih_remove(struct pci_dev *pdev)
+ ioc->sas_hba.num_phys = 0;
+ }
+
+- sas_remove_host(shost);
+ mpt3sas_base_detach(ioc);
+ spin_lock(&gioc_lock);
+ list_del(&ioc->list);
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
+index f8cc2677c1cd..20d36061c217 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
+@@ -834,10 +834,13 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_phy->phy_id);
+ mpt3sas_phy->phy_belongs_to_port = 0;
+- sas_port_delete_phy(mpt3sas_port->port, mpt3sas_phy->phy);
++ if (!ioc->remove_host)
++ sas_port_delete_phy(mpt3sas_port->port,
++ mpt3sas_phy->phy);
+ list_del(&mpt3sas_phy->port_siblings);
+ }
+- sas_port_delete(mpt3sas_port->port);
++ if (!ioc->remove_host)
++ sas_port_delete(mpt3sas_port->port);
+ kfree(mpt3sas_port);
+ }
+
+diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
+index 8c1a232ac6bf..3781e8109dd7 100644
+--- a/drivers/scsi/smartpqi/smartpqi_init.c
++++ b/drivers/scsi/smartpqi/smartpqi_init.c
+@@ -653,6 +653,7 @@ struct bmic_host_wellness_driver_version {
+ u8 driver_version_tag[2];
+ __le16 driver_version_length;
+ char driver_version[32];
++ u8 dont_write_tag[2];
+ u8 end_tag[2];
+ };
+
+@@ -682,6 +683,8 @@ static int pqi_write_driver_version_to_host_wellness(
+ strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
+ sizeof(buffer->driver_version) - 1);
+ buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
++ buffer->dont_write_tag[0] = 'D';
++ buffer->dont_write_tag[1] = 'W';
+ buffer->end_tag[0] = 'Z';
+ buffer->end_tag[1] = 'Z';
+
+@@ -1181,6 +1184,9 @@ static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
+ if (rc)
+ goto out;
+
++ if (vpd->page_code != CISS_VPD_LV_STATUS)
++ goto out;
++
+ page_length = offsetof(struct ciss_vpd_logical_volume_status,
+ volume_status) + vpd->page_length;
+ if (page_length < sizeof(*vpd))
+diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
+index 5141bd4c9f06..ca7dfb3a520f 100644
+--- a/drivers/scsi/smartpqi/smartpqi_sis.c
++++ b/drivers/scsi/smartpqi/smartpqi_sis.c
+@@ -59,7 +59,7 @@
+
+ #define SIS_CTRL_KERNEL_UP 0x80
+ #define SIS_CTRL_KERNEL_PANIC 0x100
+-#define SIS_CTRL_READY_TIMEOUT_SECS 30
++#define SIS_CTRL_READY_TIMEOUT_SECS 180
+ #define SIS_CTRL_READY_RESUME_TIMEOUT_SECS 90
+ #define SIS_CTRL_READY_POLL_INTERVAL_MSECS 10
+
+diff --git a/drivers/soc/bcm/brcmstb/common.c b/drivers/soc/bcm/brcmstb/common.c
+index 14185451901d..bf9123f727e8 100644
+--- a/drivers/soc/bcm/brcmstb/common.c
++++ b/drivers/soc/bcm/brcmstb/common.c
+@@ -31,13 +31,17 @@ static const struct of_device_id brcmstb_machine_match[] = {
+
+ bool soc_is_brcmstb(void)
+ {
++ const struct of_device_id *match;
+ struct device_node *root;
+
+ root = of_find_node_by_path("/");
+ if (!root)
+ return false;
+
+- return of_match_node(brcmstb_machine_match, root) != NULL;
++ match = of_match_node(brcmstb_machine_match, root);
++ of_node_put(root);
++
++ return match != NULL;
+ }
+
+ u32 brcmstb_get_family_id(void)
+diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c
+index cd8f41351add..7bfb154d6fa5 100644
+--- a/drivers/soc/tegra/common.c
++++ b/drivers/soc/tegra/common.c
+@@ -22,11 +22,15 @@ static const struct of_device_id tegra_machine_match[] = {
+
+ bool soc_is_tegra(void)
+ {
++ const struct of_device_id *match;
+ struct device_node *root;
+
+ root = of_find_node_by_path("/");
+ if (!root)
+ return false;
+
+- return of_match_node(tegra_machine_match, root) != NULL;
++ match = of_match_node(tegra_machine_match, root);
++ of_node_put(root);
++
++ return match != NULL;
+ }
+diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
+index 367b39fe46e5..e6313c54e3ad 100644
+--- a/drivers/staging/erofs/internal.h
++++ b/drivers/staging/erofs/internal.h
+@@ -39,7 +39,7 @@
+ #define debugln(x, ...) ((void)0)
+
+ #define dbg_might_sleep() ((void)0)
+-#define DBG_BUGON(...) ((void)0)
++#define DBG_BUGON(x) ((void)(x))
+ #endif
+
+ #ifdef CONFIG_EROFS_FAULT_INJECTION
+diff --git a/drivers/staging/fsl-dpaa2/rtc/rtc.c b/drivers/staging/fsl-dpaa2/rtc/rtc.c
+index 0d52cb85441f..318a33c2f7a7 100644
+--- a/drivers/staging/fsl-dpaa2/rtc/rtc.c
++++ b/drivers/staging/fsl-dpaa2/rtc/rtc.c
+@@ -142,7 +142,10 @@ static int rtc_probe(struct fsl_mc_device *mc_dev)
+
+ err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
+ if (err) {
+- dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
++ if (err == -ENXIO)
++ err = -EPROBE_DEFER;
++ else
++ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
+ goto err_exit;
+ }
+
+diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
+index b736275c10f5..6a48ad067a8b 100644
+--- a/drivers/staging/iio/adc/ad7280a.c
++++ b/drivers/staging/iio/adc/ad7280a.c
+@@ -256,7 +256,9 @@ static int ad7280_read(struct ad7280_state *st, unsigned int devaddr,
+ if (ret)
+ return ret;
+
+- __ad7280_read32(st, &tmp);
++ ret = __ad7280_read32(st, &tmp);
++ if (ret)
++ return ret;
+
+ if (ad7280_check_crc(st, tmp))
+ return -EIO;
+@@ -294,7 +296,9 @@ static int ad7280_read_channel(struct ad7280_state *st, unsigned int devaddr,
+
+ ad7280_delay(st);
+
+- __ad7280_read32(st, &tmp);
++ ret = __ad7280_read32(st, &tmp);
++ if (ret)
++ return ret;
+
+ if (ad7280_check_crc(st, tmp))
+ return -EIO;
+@@ -327,7 +331,9 @@ static int ad7280_read_all_channels(struct ad7280_state *st, unsigned int cnt,
+ ad7280_delay(st);
+
+ for (i = 0; i < cnt; i++) {
+- __ad7280_read32(st, &tmp);
++ ret = __ad7280_read32(st, &tmp);
++ if (ret)
++ return ret;
+
+ if (ad7280_check_crc(st, tmp))
+ return -EIO;
+@@ -370,7 +376,10 @@ static int ad7280_chain_setup(struct ad7280_state *st)
+ return ret;
+
+ for (n = 0; n <= AD7280A_MAX_CHAIN; n++) {
+- __ad7280_read32(st, &val);
++ ret = __ad7280_read32(st, &val);
++ if (ret)
++ return ret;
++
+ if (val == 0)
+ return n - 1;
+
+diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
+index 16d72072c076..8bcb5d5de749 100644
+--- a/drivers/staging/iio/adc/ad7780.c
++++ b/drivers/staging/iio/adc/ad7780.c
+@@ -87,12 +87,16 @@ static int ad7780_read_raw(struct iio_dev *indio_dev,
+ long m)
+ {
+ struct ad7780_state *st = iio_priv(indio_dev);
++ int voltage_uv;
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ return ad_sigma_delta_single_conversion(indio_dev, chan, val);
+ case IIO_CHAN_INFO_SCALE:
+- *val = st->int_vref_mv * st->gain;
++ voltage_uv = regulator_get_voltage(st->reg);
++ if (voltage_uv < 0)
++ return voltage_uv;
++ *val = (voltage_uv / 1000) * st->gain;
+ *val2 = chan->scan_type.realbits - 1;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_OFFSET:
+diff --git a/drivers/staging/iio/resolver/ad2s90.c b/drivers/staging/iio/resolver/ad2s90.c
+index 59586947a936..51cda9151412 100644
+--- a/drivers/staging/iio/resolver/ad2s90.c
++++ b/drivers/staging/iio/resolver/ad2s90.c
+@@ -85,7 +85,12 @@ static int ad2s90_probe(struct spi_device *spi)
+ /* need 600ns between CS and the first falling edge of SCLK */
+ spi->max_speed_hz = 830000;
+ spi->mode = SPI_MODE_3;
+- spi_setup(spi);
++ ret = spi_setup(spi);
++
++ if (ret < 0) {
++ dev_err(&spi->dev, "spi_setup failed!\n");
++ return ret;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
+index c85a805a1243..a497ec197872 100644
+--- a/drivers/staging/pi433/pi433_if.c
++++ b/drivers/staging/pi433/pi433_if.c
+@@ -1255,6 +1255,10 @@ static int pi433_probe(struct spi_device *spi)
+
+ /* create cdev */
+ device->cdev = cdev_alloc();
++ if (!device->cdev) {
++ dev_dbg(device->dev, "allocation of cdev failed");
++ goto cdev_failed;
++ }
+ device->cdev->owner = THIS_MODULE;
+ cdev_init(device->cdev, &pi433_fops);
+ retval = cdev_add(device->cdev, device->devt, 1);
+diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c
+index eac63aab8162..93742dbdee77 100644
+--- a/drivers/staging/speakup/spk_ttyio.c
++++ b/drivers/staging/speakup/spk_ttyio.c
+@@ -265,7 +265,8 @@ static void spk_ttyio_send_xchar(char ch)
+ return;
+ }
+
+- speakup_tty->ops->send_xchar(speakup_tty, ch);
++ if (speakup_tty->ops->send_xchar)
++ speakup_tty->ops->send_xchar(speakup_tty, ch);
+ mutex_unlock(&speakup_tty_mutex);
+ }
+
+@@ -277,7 +278,8 @@ static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear)
+ return;
+ }
+
+- speakup_tty->ops->tiocmset(speakup_tty, set, clear);
++ if (speakup_tty->ops->tiocmset)
++ speakup_tty->ops->tiocmset(speakup_tty, set, clear);
+ mutex_unlock(&speakup_tty_mutex);
+ }
+
+diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c
+index df35fc01fd3e..43626e15703a 100644
+--- a/drivers/tee/optee/supp.c
++++ b/drivers/tee/optee/supp.c
+@@ -19,7 +19,7 @@
+ struct optee_supp_req {
+ struct list_head link;
+
+- bool busy;
++ bool in_queue;
+ u32 func;
+ u32 ret;
+ size_t num_params;
+@@ -54,7 +54,6 @@ void optee_supp_release(struct optee_supp *supp)
+
+ /* Abort all request retrieved by supplicant */
+ idr_for_each_entry(&supp->idr, req, id) {
+- req->busy = false;
+ idr_remove(&supp->idr, id);
+ req->ret = TEEC_ERROR_COMMUNICATION;
+ complete(&req->c);
+@@ -63,6 +62,7 @@ void optee_supp_release(struct optee_supp *supp)
+ /* Abort all queued requests */
+ list_for_each_entry_safe(req, req_tmp, &supp->reqs, link) {
+ list_del(&req->link);
++ req->in_queue = false;
+ req->ret = TEEC_ERROR_COMMUNICATION;
+ complete(&req->c);
+ }
+@@ -103,6 +103,7 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
+ /* Insert the request in the request list */
+ mutex_lock(&supp->mutex);
+ list_add_tail(&req->link, &supp->reqs);
++ req->in_queue = true;
+ mutex_unlock(&supp->mutex);
+
+ /* Tell an eventual waiter there's a new request */
+@@ -130,9 +131,10 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
+ * will serve all requests in a timely manner and
+ * interrupting then wouldn't make sense.
+ */
+- interruptable = !req->busy;
+- if (!req->busy)
++ if (req->in_queue) {
+ list_del(&req->link);
++ req->in_queue = false;
++ }
+ }
+ mutex_unlock(&supp->mutex);
+
+@@ -176,7 +178,7 @@ static struct optee_supp_req *supp_pop_entry(struct optee_supp *supp,
+ return ERR_PTR(-ENOMEM);
+
+ list_del(&req->link);
+- req->busy = true;
++ req->in_queue = false;
+
+ return req;
+ }
+@@ -318,7 +320,6 @@ static struct optee_supp_req *supp_pop_req(struct optee_supp *supp,
+ if ((num_params - nm) != req->num_params)
+ return ERR_PTR(-EINVAL);
+
+- req->busy = false;
+ idr_remove(&supp->idr, id);
+ supp->req_id = -1;
+ *num_meta = nm;
+diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
+index 23ad4f9f2143..24b006a95142 100644
+--- a/drivers/thermal/broadcom/bcm2835_thermal.c
++++ b/drivers/thermal/broadcom/bcm2835_thermal.c
+@@ -27,6 +27,8 @@
+ #include <linux/platform_device.h>
+ #include <linux/thermal.h>
+
++#include "../thermal_hwmon.h"
++
+ #define BCM2835_TS_TSENSCTL 0x00
+ #define BCM2835_TS_TSENSSTAT 0x04
+
+@@ -275,6 +277,15 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
+
+ platform_set_drvdata(pdev, tz);
+
++ /*
++ * Thermal_zone doesn't enable hwmon as default,
++ * enable it here
++ */
++ tz->tzp->no_hwmon = false;
++ err = thermal_add_hwmon_sysfs(tz);
++ if (err)
++ goto err_tz;
++
+ bcm2835_thermal_debugfs(pdev);
+
+ return 0;
+diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c
+index bf1c628d4a7a..e22fc60ad36d 100644
+--- a/drivers/thermal/thermal-generic-adc.c
++++ b/drivers/thermal/thermal-generic-adc.c
+@@ -26,7 +26,7 @@ struct gadc_thermal_info {
+
+ static int gadc_thermal_adc_to_temp(struct gadc_thermal_info *gti, int val)
+ {
+- int temp, adc_hi, adc_lo;
++ int temp, temp_hi, temp_lo, adc_hi, adc_lo;
+ int i;
+
+ for (i = 0; i < gti->nlookup_table; i++) {
+@@ -36,13 +36,17 @@ static int gadc_thermal_adc_to_temp(struct gadc_thermal_info *gti, int val)
+
+ if (i == 0) {
+ temp = gti->lookup_table[0];
+- } else if (i >= (gti->nlookup_table - 1)) {
++ } else if (i >= gti->nlookup_table) {
+ temp = gti->lookup_table[2 * (gti->nlookup_table - 1)];
+ } else {
+ adc_hi = gti->lookup_table[2 * i - 1];
+ adc_lo = gti->lookup_table[2 * i + 1];
+- temp = gti->lookup_table[2 * i];
+- temp -= ((val - adc_lo) * 1000) / (adc_hi - adc_lo);
++
++ temp_hi = gti->lookup_table[2 * i - 2];
++ temp_lo = gti->lookup_table[2 * i];
++
++ temp = temp_hi + mult_frac(temp_lo - temp_hi, val - adc_hi,
++ adc_lo - adc_hi);
+ }
+
+ return temp;
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index 441778100887..bf9721fc2824 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -451,16 +451,20 @@ static void update_temperature(struct thermal_zone_device *tz)
+ tz->last_temperature, tz->temperature);
+ }
+
+-static void thermal_zone_device_reset(struct thermal_zone_device *tz)
++static void thermal_zone_device_init(struct thermal_zone_device *tz)
+ {
+ struct thermal_instance *pos;
+-
+ tz->temperature = THERMAL_TEMP_INVALID;
+- tz->passive = 0;
+ list_for_each_entry(pos, &tz->thermal_instances, tz_node)
+ pos->initialized = false;
+ }
+
++static void thermal_zone_device_reset(struct thermal_zone_device *tz)
++{
++ tz->passive = 0;
++ thermal_zone_device_init(tz);
++}
++
+ void thermal_zone_device_update(struct thermal_zone_device *tz,
+ enum thermal_notify_event event)
+ {
+@@ -1502,7 +1506,7 @@ static int thermal_pm_notify(struct notifier_block *nb,
+ case PM_POST_SUSPEND:
+ atomic_set(&in_suspend, 0);
+ list_for_each_entry(tz, &thermal_tz_list, node) {
+- thermal_zone_device_reset(tz);
++ thermal_zone_device_init(tz);
+ thermal_zone_device_update(tz,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+diff --git a/drivers/thermal/thermal_hwmon.h b/drivers/thermal/thermal_hwmon.h
+index 019f6f88224e..a160b9d62dd0 100644
+--- a/drivers/thermal/thermal_hwmon.h
++++ b/drivers/thermal/thermal_hwmon.h
+@@ -19,13 +19,13 @@
+ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz);
+ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz);
+ #else
+-static int
++static inline int
+ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
+ {
+ return 0;
+ }
+
+-static void
++static inline void
+ thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
+ {
+ }
+diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
+index 2241ceae7d7f..aa99edb4dff7 100644
+--- a/drivers/thermal/thermal_sysfs.c
++++ b/drivers/thermal/thermal_sysfs.c
+@@ -712,11 +712,14 @@ cur_state_store(struct device *dev, struct device_attribute *attr,
+ if ((long)state < 0)
+ return -EINVAL;
+
++ mutex_lock(&cdev->lock);
++
+ result = cdev->ops->set_cur_state(cdev, state);
+- if (result)
+- return result;
+- thermal_cooling_device_stats_update(cdev, state);
+- return count;
++ if (!result)
++ thermal_cooling_device_stats_update(cdev, state);
++
++ mutex_unlock(&cdev->lock);
++ return result ? result : count;
+ }
+
+ static struct device_attribute
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index f80a300b5d68..48bd694a5fa1 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -3420,6 +3420,11 @@ static int
+ serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)
+ {
+ int num_iomem, num_port, first_port = -1, i;
++ int rc;
++
++ rc = serial_pci_is_class_communication(dev);
++ if (rc)
++ return rc;
+
+ /*
+ * Should we try to make guesses for multiport serial devices later?
+@@ -3647,10 +3652,6 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
+
+ board = &pci_boards[ent->driver_data];
+
+- rc = serial_pci_is_class_communication(dev);
+- if (rc)
+- return rc;
+-
+ rc = serial_pci_is_blacklisted(dev);
+ if (rc)
+ return rc;
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 3f8d1274fc85..7d030c2e42ff 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -1477,6 +1477,8 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
+ else
+ cr1 &= ~UARTCR1_PT;
+ }
++ } else {
++ cr1 &= ~UARTCR1_PE;
+ }
+
+ /* ask the core to calculate the divisor */
+@@ -1688,6 +1690,8 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
+ else
+ ctrl &= ~UARTCTRL_PT;
+ }
++ } else {
++ ctrl &= ~UARTCTRL_PE;
+ }
+
+ /* ask the core to calculate the divisor */
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
+index 2f8fa184aafa..c6058b52d5d5 100644
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -1365,11 +1365,14 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
+ wr_regl(port, S3C2410_ULCON, ulcon);
+ wr_regl(port, S3C2410_UBRDIV, quot);
+
++ port->status &= ~UPSTAT_AUTOCTS;
++
+ umcon = rd_regl(port, S3C2410_UMCON);
+ if (termios->c_cflag & CRTSCTS) {
+ umcon |= S3C2410_UMCOM_AFC;
+ /* Disable RTS when RX FIFO contains 63 bytes */
+ umcon &= ~S3C2412_UMCON_AFC_8;
++ port->status = UPSTAT_AUTOCTS;
+ } else {
+ umcon &= ~S3C2410_UMCOM_AFC;
+ }
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index 7fe679413188..f0b354b65a0e 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -130,6 +130,9 @@ static void uart_start(struct tty_struct *tty)
+ struct uart_port *port;
+ unsigned long flags;
+
++ if (!state)
++ return;
++
+ port = uart_port_lock(state, flags);
+ __uart_start(tty);
+ uart_port_unlock(port, flags);
+@@ -727,6 +730,9 @@ static void uart_unthrottle(struct tty_struct *tty)
+ upstat_t mask = UPSTAT_SYNC_FIFO;
+ struct uart_port *port;
+
++ if (!state)
++ return;
++
+ port = uart_port_ref(state);
+ if (!port)
+ return;
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index effba6ce0caa..859b173e3b82 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -1331,7 +1331,7 @@ static void sci_tx_dma_release(struct sci_port *s)
+ dma_release_channel(chan);
+ }
+
+-static void sci_submit_rx(struct sci_port *s)
++static int sci_submit_rx(struct sci_port *s, bool port_lock_held)
+ {
+ struct dma_chan *chan = s->chan_rx;
+ struct uart_port *port = &s->port;
+@@ -1359,19 +1359,22 @@ static void sci_submit_rx(struct sci_port *s)
+ s->active_rx = s->cookie_rx[0];
+
+ dma_async_issue_pending(chan);
+- return;
++ return 0;
+
+ fail:
++ /* Switch to PIO */
++ if (!port_lock_held)
++ spin_lock_irqsave(&port->lock, flags);
+ if (i)
+ dmaengine_terminate_async(chan);
+ for (i = 0; i < 2; i++)
+ s->cookie_rx[i] = -EINVAL;
+ s->active_rx = -EINVAL;
+- /* Switch to PIO */
+- spin_lock_irqsave(&port->lock, flags);
+ s->chan_rx = NULL;
+ sci_start_rx(port);
+- spin_unlock_irqrestore(&port->lock, flags);
++ if (!port_lock_held)
++ spin_unlock_irqrestore(&port->lock, flags);
++ return -EAGAIN;
+ }
+
+ static void work_fn_tx(struct work_struct *work)
+@@ -1491,7 +1494,7 @@ static enum hrtimer_restart rx_timer_fn(struct hrtimer *t)
+ }
+
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
+- sci_submit_rx(s);
++ sci_submit_rx(s, true);
+
+ /* Direct new serial port interrupts back to CPU */
+ scr = serial_port_in(port, SCSCR);
+@@ -1617,7 +1620,7 @@ static void sci_request_dma(struct uart_port *port)
+ s->chan_rx_saved = s->chan_rx = chan;
+
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
+- sci_submit_rx(s);
++ sci_submit_rx(s, false);
+ }
+ }
+
+@@ -1666,8 +1669,10 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
+ disable_irq_nosync(irq);
+ scr |= SCSCR_RDRQE;
+ } else {
++ if (sci_submit_rx(s, false) < 0)
++ goto handle_pio;
++
+ scr &= ~SCSCR_RIE;
+- sci_submit_rx(s);
+ }
+ serial_port_out(port, SCSCR, scr);
+ /* Clear current interrupt */
+@@ -1679,6 +1684,8 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
+
+ return IRQ_HANDLED;
+ }
++
++handle_pio:
+ #endif
+
+ if (s->rx_trigger > 1 && s->rx_fifo_timeout > 0) {
+@@ -1914,7 +1921,7 @@ out_nomem:
+
+ static void sci_free_irq(struct sci_port *port)
+ {
+- int i;
++ int i, j;
+
+ /*
+ * Intentionally in reverse order so we iterate over the muxed
+@@ -1930,6 +1937,13 @@ static void sci_free_irq(struct sci_port *port)
+ if (unlikely(irq < 0))
+ continue;
+
++ /* Check if already freed (irq was muxed) */
++ for (j = 0; j < i; j++)
++ if (port->irqs[j] == irq)
++ j = i + 1;
++ if (j > i)
++ continue;
++
+ free_irq(port->irqs[i], port);
+ kfree(port->irqstr[i]);
+
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index cc62707c0251..3adff4da2ee1 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1111,6 +1111,16 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ USB_PORT_FEAT_ENABLE);
+ }
+
++ /*
++ * Add debounce if USB3 link is in polling/link training state.
++ * Link will automatically transition to Enabled state after
++ * link training completes.
++ */
++ if (hub_is_superspeed(hdev) &&
++ ((portstatus & USB_PORT_STAT_LINK_STATE) ==
++ USB_SS_PORT_LS_POLLING))
++ need_debounce_delay = true;
++
+ /* Clear status-change flags; we'll debounce later */
+ if (portchange & USB_PORT_STAT_C_CONNECTION) {
+ need_debounce_delay = true;
+diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
+index ef2c199e6059..dff2c6e8d797 100644
+--- a/drivers/usb/dwc2/params.c
++++ b/drivers/usb/dwc2/params.c
+@@ -71,6 +71,13 @@ static void dwc2_set_his_params(struct dwc2_hsotg *hsotg)
+ p->power_down = false;
+ }
+
++static void dwc2_set_s3c6400_params(struct dwc2_hsotg *hsotg)
++{
++ struct dwc2_core_params *p = &hsotg->params;
++
++ p->power_down = 0;
++}
++
+ static void dwc2_set_rk_params(struct dwc2_hsotg *hsotg)
+ {
+ struct dwc2_core_params *p = &hsotg->params;
+@@ -151,7 +158,8 @@ const struct of_device_id dwc2_of_match_table[] = {
+ { .compatible = "lantiq,arx100-usb", .data = dwc2_set_ltq_params },
+ { .compatible = "lantiq,xrx200-usb", .data = dwc2_set_ltq_params },
+ { .compatible = "snps,dwc2" },
+- { .compatible = "samsung,s3c6400-hsotg" },
++ { .compatible = "samsung,s3c6400-hsotg",
++ .data = dwc2_set_s3c6400_params },
+ { .compatible = "amlogic,meson8-usb",
+ .data = dwc2_set_amlogic_params },
+ { .compatible = "amlogic,meson8b-usb",
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index d8bf9307901e..0db90f6f4aa8 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -912,8 +912,6 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
+ struct usb_gadget *gadget = &dwc->gadget;
+ enum usb_device_speed speed = gadget->speed;
+
+- dwc3_ep_inc_enq(dep);
+-
+ trb->size = DWC3_TRB_SIZE_LENGTH(length);
+ trb->bpl = lower_32_bits(dma);
+ trb->bph = upper_32_bits(dma);
+@@ -983,16 +981,20 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
+ usb_endpoint_type(dep->endpoint.desc));
+ }
+
+- /* always enable Continue on Short Packet */
++ /*
++ * Enable Continue on Short Packet
++ * when endpoint is not a stream capable
++ */
+ if (usb_endpoint_dir_out(dep->endpoint.desc)) {
+- trb->ctrl |= DWC3_TRB_CTRL_CSP;
++ if (!dep->stream_capable)
++ trb->ctrl |= DWC3_TRB_CTRL_CSP;
+
+ if (short_not_ok)
+ trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
+ }
+
+ if ((!no_interrupt && !chain) ||
+- (dwc3_calc_trbs_left(dep) == 0))
++ (dwc3_calc_trbs_left(dep) == 1))
+ trb->ctrl |= DWC3_TRB_CTRL_IOC;
+
+ if (chain)
+@@ -1003,6 +1005,8 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
+
+ trb->ctrl |= DWC3_TRB_CTRL_HWO;
+
++ dwc3_ep_inc_enq(dep);
++
+ trace_dwc3_prepare_trb(dep, trb);
+ }
+
+@@ -1106,7 +1110,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
+ unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
+ unsigned int rem = length % maxp;
+
+- if (rem && usb_endpoint_dir_out(dep->endpoint.desc)) {
++ if ((!length || rem) && usb_endpoint_dir_out(dep->endpoint.desc)) {
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_trb *trb;
+
+diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
+index f22714cce070..f27c5cbe285c 100644
+--- a/drivers/usb/dwc3/trace.h
++++ b/drivers/usb/dwc3/trace.h
+@@ -251,9 +251,11 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
+ s = "2x ";
+ break;
+ case 3:
++ default:
+ s = "3x ";
+ break;
+ }
++ break;
+ default:
+ s = "";
+ } s; }),
+diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
+index 660878a19505..b77f3126580e 100644
+--- a/drivers/usb/gadget/udc/net2272.c
++++ b/drivers/usb/gadget/udc/net2272.c
+@@ -2083,7 +2083,7 @@ static irqreturn_t net2272_irq(int irq, void *_dev)
+ #if defined(PLX_PCI_RDK2)
+ /* see if PCI int for us by checking irqstat */
+ intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
+- if (!intcsr & (1 << NET2272_PCI_IRQ)) {
++ if (!(intcsr & (1 << NET2272_PCI_IRQ))) {
+ spin_unlock(&dev->lock);
+ return IRQ_NONE;
+ }
+diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
+index d045d8458f81..48d10a61e271 100644
+--- a/drivers/usb/mtu3/mtu3_core.c
++++ b/drivers/usb/mtu3/mtu3_core.c
+@@ -578,8 +578,10 @@ static void mtu3_regs_init(struct mtu3 *mtu)
+ if (mtu->is_u3_ip) {
+ /* disable LGO_U1/U2 by default */
+ mtu3_clrbits(mbase, U3D_LINK_POWER_CONTROL,
+- SW_U1_ACCEPT_ENABLE | SW_U2_ACCEPT_ENABLE |
+ SW_U1_REQUEST_ENABLE | SW_U2_REQUEST_ENABLE);
++ /* enable accept LGO_U1/U2 link command from host */
++ mtu3_setbits(mbase, U3D_LINK_POWER_CONTROL,
++ SW_U1_ACCEPT_ENABLE | SW_U2_ACCEPT_ENABLE);
+ /* device responses to u3_exit from host automatically */
+ mtu3_clrbits(mbase, U3D_LTSSM_CTRL, SOFT_U3_EXIT_EN);
+ /* automatically build U2 link when U3 detect fail */
+diff --git a/drivers/usb/mtu3/mtu3_gadget_ep0.c b/drivers/usb/mtu3/mtu3_gadget_ep0.c
+index 25216e79cd6e..3c464d8ae023 100644
+--- a/drivers/usb/mtu3/mtu3_gadget_ep0.c
++++ b/drivers/usb/mtu3/mtu3_gadget_ep0.c
+@@ -336,9 +336,9 @@ static int ep0_handle_feature_dev(struct mtu3 *mtu,
+
+ lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL);
+ if (set)
+- lpc |= SW_U1_ACCEPT_ENABLE;
++ lpc |= SW_U1_REQUEST_ENABLE;
+ else
+- lpc &= ~SW_U1_ACCEPT_ENABLE;
++ lpc &= ~SW_U1_REQUEST_ENABLE;
+ mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc);
+
+ mtu->u1_enable = !!set;
+@@ -351,9 +351,9 @@ static int ep0_handle_feature_dev(struct mtu3 *mtu,
+
+ lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL);
+ if (set)
+- lpc |= SW_U2_ACCEPT_ENABLE;
++ lpc |= SW_U2_REQUEST_ENABLE;
+ else
+- lpc &= ~SW_U2_ACCEPT_ENABLE;
++ lpc &= ~SW_U2_REQUEST_ENABLE;
+ mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc);
+
+ mtu->u2_enable = !!set;
+diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
+index 23a0df79ef21..403eb97915f8 100644
+--- a/drivers/usb/musb/musb_dsps.c
++++ b/drivers/usb/musb/musb_dsps.c
+@@ -181,9 +181,11 @@ static void dsps_musb_enable(struct musb *musb)
+
+ musb_writel(reg_base, wrp->epintr_set, epmask);
+ musb_writel(reg_base, wrp->coreintr_set, coremask);
+- /* start polling for ID change in dual-role idle mode */
+- if (musb->xceiv->otg->state == OTG_STATE_B_IDLE &&
+- musb->port_mode == MUSB_OTG)
++ /*
++ * start polling for runtime PM active and idle,
++ * and for ID change in dual-role idle mode.
++ */
++ if (musb->xceiv->otg->state == OTG_STATE_B_IDLE)
+ dsps_mod_timer(glue, -1);
+ }
+
+@@ -227,8 +229,13 @@ static int dsps_check_status(struct musb *musb, void *unused)
+
+ switch (musb->xceiv->otg->state) {
+ case OTG_STATE_A_WAIT_VRISE:
+- dsps_mod_timer_optional(glue);
+- break;
++ if (musb->port_mode == MUSB_HOST) {
++ musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
++ dsps_mod_timer_optional(glue);
++ break;
++ }
++ /* fall through */
++
+ case OTG_STATE_A_WAIT_BCON:
+ /* keep VBUS on for host-only mode */
+ if (musb->port_mode == MUSB_HOST) {
+@@ -249,6 +256,10 @@ static int dsps_check_status(struct musb *musb, void *unused)
+ musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ }
++
++ if (musb->port_mode == MUSB_PERIPHERAL)
++ skip_session = 1;
++
+ if (!(devctl & MUSB_DEVCTL_SESSION) && !skip_session)
+ musb_writeb(mregs, MUSB_DEVCTL,
+ MUSB_DEVCTL_SESSION);
+diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
+index eae8b1b1b45b..ffe462a657b1 100644
+--- a/drivers/usb/musb/musb_gadget.c
++++ b/drivers/usb/musb/musb_gadget.c
+@@ -452,13 +452,10 @@ void musb_g_tx(struct musb *musb, u8 epnum)
+ }
+
+ if (request) {
+- u8 is_dma = 0;
+- bool short_packet = false;
+
+ trace_musb_req_tx(req);
+
+ if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
+- is_dma = 1;
+ csr |= MUSB_TXCSR_P_WZC_BITS;
+ csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
+ MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
+@@ -476,16 +473,8 @@ void musb_g_tx(struct musb *musb, u8 epnum)
+ */
+ if ((request->zero && request->length)
+ && (request->length % musb_ep->packet_sz == 0)
+- && (request->actual == request->length))
+- short_packet = true;
++ && (request->actual == request->length)) {
+
+- if ((musb_dma_inventra(musb) || musb_dma_ux500(musb)) &&
+- (is_dma && (!dma->desired_mode ||
+- (request->actual &
+- (musb_ep->packet_sz - 1)))))
+- short_packet = true;
+-
+- if (short_packet) {
+ /*
+ * On DMA completion, FIFO may not be
+ * available yet...
+diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
+index a688f7f87829..5fc6825745f2 100644
+--- a/drivers/usb/musb/musbhsdma.c
++++ b/drivers/usb/musb/musbhsdma.c
+@@ -346,12 +346,10 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
+ channel->status = MUSB_DMA_STATUS_FREE;
+
+ /* completed */
+- if ((devctl & MUSB_DEVCTL_HM)
+- && (musb_channel->transmit)
+- && ((channel->desired_mode == 0)
+- || (channel->actual_len &
+- (musb_channel->max_packet_sz - 1)))
+- ) {
++ if (musb_channel->transmit &&
++ (!channel->desired_mode ||
++ (channel->actual_len %
++ musb_channel->max_packet_sz))) {
+ u8 epnum = musb_channel->epnum;
+ int offset = musb->io.ep_offset(epnum,
+ MUSB_TXCSR);
+@@ -363,11 +361,14 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
+ */
+ musb_ep_select(mbase, epnum);
+ txcsr = musb_readw(mbase, offset);
+- txcsr &= ~(MUSB_TXCSR_DMAENAB
++ if (channel->desired_mode == 1) {
++ txcsr &= ~(MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_AUTOSET);
+- musb_writew(mbase, offset, txcsr);
+- /* Send out the packet */
+- txcsr &= ~MUSB_TXCSR_DMAMODE;
++ musb_writew(mbase, offset, txcsr);
++ /* Send out the packet */
++ txcsr &= ~MUSB_TXCSR_DMAMODE;
++ txcsr |= MUSB_TXCSR_DMAENAB;
++ }
+ txcsr |= MUSB_TXCSR_TXPKTRDY;
+ musb_writew(mbase, offset, txcsr);
+ }
+diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
+index 27bdb7222527..f5f0568d8533 100644
+--- a/drivers/usb/phy/phy-am335x.c
++++ b/drivers/usb/phy/phy-am335x.c
+@@ -61,9 +61,6 @@ static int am335x_phy_probe(struct platform_device *pdev)
+ if (ret)
+ return ret;
+
+- ret = usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
+- if (ret)
+- return ret;
+ am_phy->usb_phy_gen.phy.init = am335x_init;
+ am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown;
+
+@@ -82,7 +79,7 @@ static int am335x_phy_probe(struct platform_device *pdev)
+ device_set_wakeup_enable(dev, false);
+ phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false);
+
+- return 0;
++ return usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
+ }
+
+ static int am335x_phy_remove(struct platform_device *pdev)
+diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
+index 4310df46639d..b0792585d5bc 100644
+--- a/drivers/usb/renesas_usbhs/common.c
++++ b/drivers/usb/renesas_usbhs/common.c
+@@ -457,6 +457,10 @@ static int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev)
+ * platform functions
+ */
+ static const struct of_device_id usbhs_of_match[] = {
++ {
++ .compatible = "renesas,usbhs-r8a774c0",
++ .data = (void *)USBHS_TYPE_RCAR_GEN3_WITH_PLL,
++ },
+ {
+ .compatible = "renesas,usbhs-r8a7790",
+ .data = (void *)USBHS_TYPE_RCAR_GEN2,
+diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
+index ff561073ee4e..42f909618f04 100644
+--- a/drivers/video/fbdev/clps711x-fb.c
++++ b/drivers/video/fbdev/clps711x-fb.c
+@@ -287,14 +287,17 @@ static int clps711x_fb_probe(struct platform_device *pdev)
+ }
+
+ ret = of_get_fb_videomode(disp, &cfb->mode, OF_USE_NATIVE_MODE);
+- if (ret)
++ if (ret) {
++ of_node_put(disp);
+ goto out_fb_release;
++ }
+
+ of_property_read_u32(disp, "ac-prescale", &cfb->ac_prescale);
+ cfb->cmap_invert = of_property_read_bool(disp, "cmap-invert");
+
+ ret = of_property_read_u32(disp, "bits-per-pixel",
+ &info->var.bits_per_pixel);
++ of_node_put(disp);
+ if (ret)
+ goto out_fb_release;
+
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index 75ebbbf0a1fb..5d961e3ac66e 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -3066,7 +3066,7 @@ static int fbcon_fb_unbind(int idx)
+ for (i = first_fb_vc; i <= last_fb_vc; i++) {
+ if (con2fb_map[i] != idx &&
+ con2fb_map[i] != -1) {
+- new_idx = i;
++ new_idx = con2fb_map[i];
+ break;
+ }
+ }
+diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
+index 20405421a5ed..77cee99fc36c 100644
+--- a/drivers/video/fbdev/core/fbmem.c
++++ b/drivers/video/fbdev/core/fbmem.c
+@@ -435,7 +435,9 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
+ image->dx += image->width + 8;
+ }
+ } else if (rotate == FB_ROTATE_UD) {
+- for (x = 0; x < num; x++) {
++ u32 dx = image->dx;
++
++ for (x = 0; x < num && image->dx <= dx; x++) {
+ info->fbops->fb_imageblit(info, image);
+ image->dx -= image->width + 8;
+ }
+@@ -447,7 +449,9 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
+ image->dy += image->height + 8;
+ }
+ } else if (rotate == FB_ROTATE_CCW) {
+- for (x = 0; x < num; x++) {
++ u32 dy = image->dy;
++
++ for (x = 0; x < num && image->dy <= dy; x++) {
+ info->fbops->fb_imageblit(info, image);
+ image->dy -= image->height + 8;
+ }
+diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
+index 3093655c7b92..1475ed5ffcde 100644
+--- a/drivers/virt/vboxguest/vboxguest_core.c
++++ b/drivers/virt/vboxguest/vboxguest_core.c
+@@ -1312,7 +1312,7 @@ static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
+ return -EINVAL;
+ }
+
+- if (f32bit)
++ if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
+ ret = vbg_hgcm_call32(gdev, client_id,
+ call->function, call->timeout_ms,
+ VBG_IOCTL_HGCM_CALL_PARMS32(call),
+diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c
+index 88d81feba4e6..d01efd342dc0 100644
+--- a/drivers/watchdog/renesas_wdt.c
++++ b/drivers/watchdog/renesas_wdt.c
+@@ -77,12 +77,17 @@ static int rwdt_init_timeout(struct watchdog_device *wdev)
+ static int rwdt_start(struct watchdog_device *wdev)
+ {
+ struct rwdt_priv *priv = watchdog_get_drvdata(wdev);
++ u8 val;
+
+ pm_runtime_get_sync(wdev->parent);
+
+- rwdt_write(priv, 0, RWTCSRB);
+- rwdt_write(priv, priv->cks, RWTCSRA);
++ /* Stop the timer before we modify any register */
++ val = readb_relaxed(priv->base + RWTCSRA) & ~RWTCSRA_TME;
++ rwdt_write(priv, val, RWTCSRA);
++
+ rwdt_init_timeout(wdev);
++ rwdt_write(priv, priv->cks, RWTCSRA);
++ rwdt_write(priv, 0, RWTCSRB);
+
+ while (readb_relaxed(priv->base + RWTCSRA) & RWTCSRA_WRFLG)
+ cpu_relax();
+diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
+index 7cde3f46ad26..d0078cbb718b 100644
+--- a/fs/binfmt_script.c
++++ b/fs/binfmt_script.c
+@@ -42,10 +42,14 @@ static int load_script(struct linux_binprm *bprm)
+ fput(bprm->file);
+ bprm->file = NULL;
+
+- bprm->buf[BINPRM_BUF_SIZE - 1] = '\0';
+- if ((cp = strchr(bprm->buf, '\n')) == NULL)
+- cp = bprm->buf+BINPRM_BUF_SIZE-1;
++ for (cp = bprm->buf+2;; cp++) {
++ if (cp >= bprm->buf + BINPRM_BUF_SIZE)
++ return -ENOEXEC;
++ if (!*cp || (*cp == '\n'))
++ break;
++ }
+ *cp = '\0';
++
+ while (cp > bprm->buf) {
+ cp--;
+ if ((*cp == ' ') || (*cp == '\t'))
+diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
+index 7177d1d33584..45f5cf9cd203 100644
+--- a/fs/btrfs/btrfs_inode.h
++++ b/fs/btrfs/btrfs_inode.h
+@@ -29,6 +29,7 @@ enum {
+ BTRFS_INODE_IN_DELALLOC_LIST,
+ BTRFS_INODE_READDIO_NEED_LOCK,
+ BTRFS_INODE_HAS_PROPS,
++ BTRFS_INODE_SNAPSHOT_FLUSH,
+ };
+
+ /* in memory btrfs inode */
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 2cddfe7806a4..82682da5a40d 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -3155,7 +3155,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
+ struct inode *inode, u64 new_size,
+ u32 min_type);
+
+-int btrfs_start_delalloc_inodes(struct btrfs_root *root);
++int btrfs_start_delalloc_snapshot(struct btrfs_root *root);
+ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int nr);
+ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
+ unsigned int extra_bits,
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 4dd6faab02bb..79f82f2ec4d5 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -3928,12 +3928,25 @@ static int extent_write_cache_pages(struct address_space *mapping,
+ range_whole = 1;
+ scanned = 1;
+ }
+- if (wbc->sync_mode == WB_SYNC_ALL)
++
++ /*
++ * We do the tagged writepage as long as the snapshot flush bit is set
++ * and we are the first one who do the filemap_flush() on this inode.
++ *
++ * The nr_to_write == LONG_MAX is needed to make sure other flushers do
++ * not race in and drop the bit.
++ */
++ if (range_whole && wbc->nr_to_write == LONG_MAX &&
++ test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
++ &BTRFS_I(inode)->runtime_flags))
++ wbc->tagged_writepages = 1;
++
++ if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
+ tag = PAGECACHE_TAG_TOWRITE;
+ else
+ tag = PAGECACHE_TAG_DIRTY;
+ retry:
+- if (wbc->sync_mode == WB_SYNC_ALL)
++ if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
+ tag_pages_for_writeback(mapping, index, end);
+ done_index = index;
+ while (!done && !nr_to_write_done && (index <= end) &&
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 83b3a626c796..59f361f7d0c1 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -10005,7 +10005,7 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode
+ * some fairly slow code that needs optimization. This walks the list
+ * of all the inodes with pending delalloc and forces them to disk.
+ */
+-static int start_delalloc_inodes(struct btrfs_root *root, int nr)
++static int start_delalloc_inodes(struct btrfs_root *root, int nr, bool snapshot)
+ {
+ struct btrfs_inode *binode;
+ struct inode *inode;
+@@ -10033,6 +10033,9 @@ static int start_delalloc_inodes(struct btrfs_root *root, int nr)
+ }
+ spin_unlock(&root->delalloc_lock);
+
++ if (snapshot)
++ set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
++ &binode->runtime_flags);
+ work = btrfs_alloc_delalloc_work(inode);
+ if (!work) {
+ iput(inode);
+@@ -10066,7 +10069,7 @@ out:
+ return ret;
+ }
+
+-int btrfs_start_delalloc_inodes(struct btrfs_root *root)
++int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
+ {
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ int ret;
+@@ -10074,7 +10077,7 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root)
+ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
+ return -EROFS;
+
+- ret = start_delalloc_inodes(root, -1);
++ ret = start_delalloc_inodes(root, -1, true);
+ if (ret > 0)
+ ret = 0;
+ return ret;
+@@ -10103,7 +10106,7 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int nr)
+ &fs_info->delalloc_roots);
+ spin_unlock(&fs_info->delalloc_root_lock);
+
+- ret = start_delalloc_inodes(root, nr);
++ ret = start_delalloc_inodes(root, nr, false);
+ btrfs_put_fs_root(root);
+ if (ret < 0)
+ goto out;
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index c9152155fcbf..8bf9cce11213 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -778,7 +778,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
+ wait_event(root->subv_writers->wait,
+ percpu_counter_sum(&root->subv_writers->counter) == 0);
+
+- ret = btrfs_start_delalloc_inodes(root);
++ ret = btrfs_start_delalloc_snapshot(root);
+ if (ret)
+ goto dec_and_free;
+
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 0ee1cd4b56fb..285f64f2de5f 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -850,6 +850,35 @@ static noinline struct btrfs_device *device_list_add(const char *path,
+ return ERR_PTR(-EEXIST);
+ }
+
++ /*
++ * We are going to replace the device path for a given devid,
++ * make sure it's the same device if the device is mounted
++ */
++ if (device->bdev) {
++ struct block_device *path_bdev;
++
++ path_bdev = lookup_bdev(path);
++ if (IS_ERR(path_bdev)) {
++ mutex_unlock(&fs_devices->device_list_mutex);
++ return ERR_CAST(path_bdev);
++ }
++
++ if (device->bdev != path_bdev) {
++ bdput(path_bdev);
++ mutex_unlock(&fs_devices->device_list_mutex);
++ btrfs_warn_in_rcu(device->fs_info,
++ "duplicate device fsid:devid for %pU:%llu old:%s new:%s",
++ disk_super->fsid, devid,
++ rcu_str_deref(device->name), path);
++ return ERR_PTR(-EEXIST);
++ }
++ bdput(path_bdev);
++ btrfs_info_in_rcu(device->fs_info,
++ "device fsid %pU devid %llu moved old:%s new:%s",
++ disk_super->fsid, devid,
++ rcu_str_deref(device->name), path);
++ }
++
+ name = rcu_string_strdup(path, GFP_NOFS);
+ if (!name) {
+ mutex_unlock(&fs_devices->device_list_mutex);
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index e169e1a5fd35..3925a7bfc74d 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -655,7 +655,14 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
+ /* scan and find it */
+ int i;
+ char *cur_ent;
+- char *end_of_smb = cfile->srch_inf.ntwrk_buf_start +
++ char *end_of_smb;
++
++ if (cfile->srch_inf.ntwrk_buf_start == NULL) {
++ cifs_dbg(VFS, "ntwrk_buf_start is NULL during readdir\n");
++ return -EIO;
++ }
++
++ end_of_smb = cfile->srch_inf.ntwrk_buf_start +
+ server->ops->calc_smb_size(
+ cfile->srch_inf.ntwrk_buf_start,
+ server);
+diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
+index 562fa8c3edff..47ee66d70109 100644
+--- a/fs/dlm/ast.c
++++ b/fs/dlm/ast.c
+@@ -292,6 +292,8 @@ void dlm_callback_suspend(struct dlm_ls *ls)
+ flush_workqueue(ls->ls_callback_wq);
+ }
+
++#define MAX_CB_QUEUE 25
++
+ void dlm_callback_resume(struct dlm_ls *ls)
+ {
+ struct dlm_lkb *lkb, *safe;
+@@ -302,15 +304,23 @@ void dlm_callback_resume(struct dlm_ls *ls)
+ if (!ls->ls_callback_wq)
+ return;
+
++more:
+ mutex_lock(&ls->ls_cb_mutex);
+ list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
+ list_del_init(&lkb->lkb_cb_list);
+ queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
+ count++;
++ if (count == MAX_CB_QUEUE)
++ break;
+ }
+ mutex_unlock(&ls->ls_cb_mutex);
+
+ if (count)
+ log_rinfo(ls, "dlm_callback_resume %d", count);
++ if (count == MAX_CB_QUEUE) {
++ count = 0;
++ cond_resched();
++ goto more;
++ }
+ }
+
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 42bbe6824b4b..58f48ea0db23 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -1154,7 +1154,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
+ * semantics). All the events that happen during that period of time are
+ * chained in ep->ovflist and requeued later on.
+ */
+- if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
++ if (ep->ovflist != EP_UNACTIVE_PTR) {
+ if (epi->next == EP_UNACTIVE_PTR) {
+ epi->next = ep->ovflist;
+ ep->ovflist = epi;
+diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
+index 111824199a88..b9fe937a3c70 100644
+--- a/fs/f2fs/acl.c
++++ b/fs/f2fs/acl.c
+@@ -352,12 +352,14 @@ static int f2fs_acl_create(struct inode *dir, umode_t *mode,
+ return PTR_ERR(p);
+
+ clone = f2fs_acl_clone(p, GFP_NOFS);
+- if (!clone)
+- goto no_mem;
++ if (!clone) {
++ ret = -ENOMEM;
++ goto release_acl;
++ }
+
+ ret = f2fs_acl_create_masq(clone, mode);
+ if (ret < 0)
+- goto no_mem_clone;
++ goto release_clone;
+
+ if (ret == 0)
+ posix_acl_release(clone);
+@@ -371,11 +373,11 @@ static int f2fs_acl_create(struct inode *dir, umode_t *mode,
+
+ return 0;
+
+-no_mem_clone:
++release_clone:
+ posix_acl_release(clone);
+-no_mem:
++release_acl:
+ posix_acl_release(p);
+- return -ENOMEM;
++ return ret;
+ }
+
+ int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 11f28342f641..08314fb42652 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -2259,6 +2259,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
+ bool locked = false;
+ struct extent_info ei = {0,0,0};
+ int err = 0;
++ int flag;
+
+ /*
+ * we already allocated all the blocks, so we don't need to get
+@@ -2268,9 +2269,15 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
+ !is_inode_flag_set(inode, FI_NO_PREALLOC))
+ return 0;
+
++ /* f2fs_lock_op avoids race between write CP and convert_inline_page */
++ if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
++ flag = F2FS_GET_BLOCK_DEFAULT;
++ else
++ flag = F2FS_GET_BLOCK_PRE_AIO;
++
+ if (f2fs_has_inline_data(inode) ||
+ (pos & PAGE_MASK) >= i_size_read(inode)) {
+- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
++ __do_map_lock(sbi, flag, true);
+ locked = true;
+ }
+ restart:
+@@ -2308,6 +2315,7 @@ restart:
+ f2fs_put_dnode(&dn);
+ __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
+ true);
++ WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
+ locked = true;
+ goto restart;
+ }
+@@ -2321,7 +2329,7 @@ out:
+ f2fs_put_dnode(&dn);
+ unlock_out:
+ if (locked)
+- __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
++ __do_map_lock(sbi, flag, false);
+ return err;
+ }
+
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index ecb735142276..42aef5c94927 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -2613,10 +2613,19 @@ static inline bool is_dot_dotdot(const struct qstr *str)
+
+ static inline bool f2fs_may_extent_tree(struct inode *inode)
+ {
+- if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE) ||
++ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
++
++ if (!test_opt(sbi, EXTENT_CACHE) ||
+ is_inode_flag_set(inode, FI_NO_EXTENT))
+ return false;
+
++ /*
++ * for recovered files during mount do not create extents
++ * if shrinker is not registered.
++ */
++ if (list_empty(&sbi->s_list))
++ return false;
++
+ return S_ISREG(inode->i_mode);
+ }
+
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 5474aaa274b9..fd36aa6569dc 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -220,6 +220,9 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
+
+ trace_f2fs_sync_file_enter(inode);
+
++ if (S_ISDIR(inode->i_mode))
++ goto go_write;
++
+ /* if fdatasync is triggered, let's do in-place-update */
+ if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
+ set_inode_flag(inode, FI_NEED_IPU);
+diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
+index 36cfd816c160..29042e6d5126 100644
+--- a/fs/f2fs/shrinker.c
++++ b/fs/f2fs/shrinker.c
+@@ -138,6 +138,6 @@ void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
+ f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
+
+ spin_lock(&f2fs_list_lock);
+- list_del(&sbi->s_list);
++ list_del_init(&sbi->s_list);
+ spin_unlock(&f2fs_list_lock);
+ }
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 338138b34993..c9639ef0e8d5 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1039,9 +1039,6 @@ static void f2fs_put_super(struct super_block *sb)
+ f2fs_write_checkpoint(sbi, &cpc);
+ }
+
+- /* f2fs_write_checkpoint can update stat informaion */
+- f2fs_destroy_stats(sbi);
+-
+ /*
+ * normally superblock is clean, so we need to release this.
+ * In addition, EIO will skip do checkpoint, we need this as well.
+@@ -1061,6 +1058,12 @@ static void f2fs_put_super(struct super_block *sb)
+ iput(sbi->node_inode);
+ iput(sbi->meta_inode);
+
++ /*
++ * iput() can update stat information, if f2fs_write_checkpoint()
++ * above failed with error.
++ */
++ f2fs_destroy_stats(sbi);
++
+ /* destroy f2fs internal modules */
+ f2fs_destroy_node_manager(sbi);
+ f2fs_destroy_segment_manager(sbi);
+@@ -2980,30 +2983,30 @@ try_onemore:
+
+ f2fs_build_gc_manager(sbi);
+
++ err = f2fs_build_stats(sbi);
++ if (err)
++ goto free_nm;
++
+ /* get an inode for node space */
+ sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
+ if (IS_ERR(sbi->node_inode)) {
+ f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
+ err = PTR_ERR(sbi->node_inode);
+- goto free_nm;
++ goto free_stats;
+ }
+
+- err = f2fs_build_stats(sbi);
+- if (err)
+- goto free_node_inode;
+-
+ /* read root inode and dentry */
+ root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
+ if (IS_ERR(root)) {
+ f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
+ err = PTR_ERR(root);
+- goto free_stats;
++ goto free_node_inode;
+ }
+ if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
+ !root->i_size || !root->i_nlink) {
+ iput(root);
+ err = -EINVAL;
+- goto free_stats;
++ goto free_node_inode;
+ }
+
+ sb->s_root = d_make_root(root); /* allocate root dentry */
+@@ -3121,12 +3124,12 @@ free_sysfs:
+ free_root_inode:
+ dput(sb->s_root);
+ sb->s_root = NULL;
+-free_stats:
+- f2fs_destroy_stats(sbi);
+ free_node_inode:
+ f2fs_release_ino_entry(sbi, true);
+ truncate_inode_pages_final(NODE_MAPPING(sbi));
+ iput(sbi->node_inode);
++free_stats:
++ f2fs_destroy_stats(sbi);
+ free_nm:
+ f2fs_destroy_node_manager(sbi);
+ free_sm:
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index bf0da0382c9e..baaed4d05b22 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -1698,7 +1698,6 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
+ req->in.h.nodeid = outarg->nodeid;
+ req->in.numargs = 2;
+ req->in.argpages = 1;
+- req->page_descs[0].offset = offset;
+ req->end = fuse_retrieve_end;
+
+ index = outarg->offset >> PAGE_SHIFT;
+@@ -1713,6 +1712,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
+
+ this_num = min_t(unsigned, num, PAGE_SIZE - offset);
+ req->pages[req->num_pages] = page;
++ req->page_descs[req->num_pages].offset = offset;
+ req->page_descs[req->num_pages].length = this_num;
+ req->num_pages++;
+
+@@ -2032,8 +2032,10 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
+
+ ret = fuse_dev_do_write(fud, &cs, len);
+
++ pipe_lock(pipe);
+ for (idx = 0; idx < nbuf; idx++)
+ pipe_buf_release(pipe, &bufs[idx]);
++ pipe_unlock(pipe);
+
+ out:
+ kvfree(bufs);
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index fbd6978479cb..bd500c3b7858 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -1778,7 +1778,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
+ spin_unlock(&fc->lock);
+
+ dec_wb_stat(&bdi->wb, WB_WRITEBACK);
+- dec_node_page_state(page, NR_WRITEBACK_TEMP);
++ dec_node_page_state(new_req->pages[0], NR_WRITEBACK_TEMP);
+ wb_writeout_inc(&bdi->wb);
+ fuse_writepage_free(fc, new_req);
+ fuse_request_free(new_req);
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index ac4b2f005778..5ef2c71348bd 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -2409,8 +2409,7 @@ static int nfs_compare_mount_options(const struct super_block *s, const struct n
+ goto Ebusy;
+ if (a->acdirmax != b->acdirmax)
+ goto Ebusy;
+- if (b->auth_info.flavor_len > 0 &&
+- clnt_a->cl_auth->au_flavor != clnt_b->cl_auth->au_flavor)
++ if (clnt_a->cl_auth->au_flavor != clnt_b->cl_auth->au_flavor)
+ goto Ebusy;
+ return 1;
+ Ebusy:
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index 7fb9f7c667b1..899174c7a8ae 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1126,6 +1126,8 @@ static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size)
+ case 'Y':
+ case 'y':
+ case '1':
++ if (nn->nfsd_serv)
++ return -EBUSY;
+ nfsd4_end_grace(nn);
+ break;
+ default:
+diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile
+index 99ee093182cb..cc9b32b9db7c 100644
+--- a/fs/ocfs2/Makefile
++++ b/fs/ocfs2/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0
+-ccflags-y := -Ifs/ocfs2
++ccflags-y := -I$(src)
+
+ obj-$(CONFIG_OCFS2_FS) += \
+ ocfs2.o \
+diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
+index 1d098c3c00e0..9f8250df99f1 100644
+--- a/fs/ocfs2/buffer_head_io.c
++++ b/fs/ocfs2/buffer_head_io.c
+@@ -152,7 +152,6 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
+ #endif
+ }
+
+- clear_buffer_uptodate(bh);
+ get_bh(bh); /* for end_buffer_read_sync() */
+ bh->b_end_io = end_buffer_read_sync;
+ submit_bh(REQ_OP_READ, 0, bh);
+@@ -306,7 +305,6 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
+ continue;
+ }
+
+- clear_buffer_uptodate(bh);
+ get_bh(bh); /* for end_buffer_read_sync() */
+ if (validate)
+ set_buffer_needs_validate(bh);
+diff --git a/fs/ocfs2/dlm/Makefile b/fs/ocfs2/dlm/Makefile
+index bd1aab1f49a4..ef2854422a6e 100644
+--- a/fs/ocfs2/dlm/Makefile
++++ b/fs/ocfs2/dlm/Makefile
+@@ -1,4 +1,4 @@
+-ccflags-y := -Ifs/ocfs2
++ccflags-y := -I$(src)/..
+
+ obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o
+
+diff --git a/fs/ocfs2/dlmfs/Makefile b/fs/ocfs2/dlmfs/Makefile
+index eed3db8c5b49..33431a0296a3 100644
+--- a/fs/ocfs2/dlmfs/Makefile
++++ b/fs/ocfs2/dlmfs/Makefile
+@@ -1,4 +1,4 @@
+-ccflags-y := -Ifs/ocfs2
++ccflags-y := -I$(src)/..
+
+ obj-$(CONFIG_OCFS2_FS) += ocfs2_dlmfs.o
+
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index 5df554a9f9c9..ae796e10f68b 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -1357,6 +1357,12 @@ reread:
+
+ iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
+ ICBTAG_FLAG_AD_MASK;
++ if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_SHORT &&
++ iinfo->i_alloc_type != ICBTAG_FLAG_AD_LONG &&
++ iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
++ ret = -EIO;
++ goto out;
++ }
+ iinfo->i_unique = 0;
+ iinfo->i_lenEAttr = 0;
+ iinfo->i_lenExtents = 0;
+diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
+index 6fc5425b1474..2652d00842d6 100644
+--- a/fs/xfs/libxfs/xfs_attr_leaf.c
++++ b/fs/xfs/libxfs/xfs_attr_leaf.c
+@@ -243,7 +243,7 @@ xfs_attr3_leaf_verify(
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_attr_leafblock *leaf = bp->b_addr;
+ struct xfs_attr_leaf_entry *entries;
+- uint16_t end;
++ uint32_t end; /* must be 32bit - see below */
+ int i;
+
+ xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
+@@ -293,6 +293,11 @@ xfs_attr3_leaf_verify(
+ /*
+ * Quickly check the freemap information. Attribute data has to be
+ * aligned to 4-byte boundaries, and likewise for the free space.
++ *
++ * Note that for 64k block size filesystems, the freemap entries cannot
++ * overflow as they are only be16 fields. However, when checking end
++ * pointer of the freemap, we have to be careful to detect overflows and
++ * so use uint32_t for those checks.
+ */
+ for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
+ if (ichdr.freemap[i].base > mp->m_attr_geo->blksize)
+@@ -303,7 +308,9 @@ xfs_attr3_leaf_verify(
+ return __this_address;
+ if (ichdr.freemap[i].size & 0x3)
+ return __this_address;
+- end = ichdr.freemap[i].base + ichdr.freemap[i].size;
++
++ /* be care of 16 bit overflows here */
++ end = (uint32_t)ichdr.freemap[i].base + ichdr.freemap[i].size;
+ if (end < ichdr.freemap[i].base)
+ return __this_address;
+ if (end > mp->m_attr_geo->blksize)
+diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
+index a47670332326..3a496ffe6551 100644
+--- a/fs/xfs/libxfs/xfs_bmap.c
++++ b/fs/xfs/libxfs/xfs_bmap.c
+@@ -1683,10 +1683,13 @@ xfs_bmap_add_extent_delay_real(
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
+ /*
+ * Filling in all of a previously delayed allocation extent.
+- * The right neighbor is contiguous, the left is not.
++ * The right neighbor is contiguous, the left is not. Take care
++ * with delay -> unwritten extent allocation here because the
++ * delalloc record we are overwriting is always written.
+ */
+ PREV.br_startblock = new->br_startblock;
+ PREV.br_blockcount += RIGHT.br_blockcount;
++ PREV.br_state = new->br_state;
+
+ xfs_iext_next(ifp, &bma->icur);
+ xfs_iext_remove(bma->ip, &bma->icur, state);
+diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
+index 34c6d7bd4d18..bbdae2b4559f 100644
+--- a/fs/xfs/libxfs/xfs_btree.c
++++ b/fs/xfs/libxfs/xfs_btree.c
+@@ -330,7 +330,7 @@ xfs_btree_sblock_verify_crc(
+
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.s.bb_lsn)))
+- return __this_address;
++ return false;
+ return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
+ }
+
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index 49f5f5896a43..b697866946d2 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -449,6 +449,7 @@ xfs_map_blocks(
+ }
+
+ wpc->imap = imap;
++ xfs_trim_extent_eof(&wpc->imap, ip);
+ trace_xfs_map_blocks_found(ip, offset, count, wpc->io_type, &imap);
+ return 0;
+ allocate_blocks:
+@@ -459,6 +460,7 @@ allocate_blocks:
+ ASSERT(whichfork == XFS_COW_FORK || cow_fsb == NULLFILEOFF ||
+ imap.br_startoff + imap.br_blockcount <= cow_fsb);
+ wpc->imap = imap;
++ xfs_trim_extent_eof(&wpc->imap, ip);
+ trace_xfs_map_blocks_alloc(ip, offset, count, wpc->io_type, &imap);
+ return 0;
+ }
+diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
+index 6de8d90041ff..211b06e4702e 100644
+--- a/fs/xfs/xfs_bmap_util.c
++++ b/fs/xfs/xfs_bmap_util.c
+@@ -1175,9 +1175,9 @@ xfs_free_file_space(
+ * page could be mmap'd and iomap_zero_range doesn't do that for us.
+ * Writeback of the eof page will do this, albeit clumsily.
+ */
+- if (offset + len >= XFS_ISIZE(ip) && ((offset + len) & PAGE_MASK)) {
++ if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
+ error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
+- (offset + len) & ~PAGE_MASK, LLONG_MAX);
++ round_down(offset + len, PAGE_SIZE), LLONG_MAX);
+ }
+
+ return error;
+@@ -1824,6 +1824,12 @@ xfs_swap_extents(
+ if (error)
+ goto out_unlock;
+
++ if (xfs_inode_has_cow_data(tip)) {
++ error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
++ if (error)
++ return error;
++ }
++
+ /*
+ * Extent "swapping" with rmap requires a permanent reservation and
+ * a block reservation because it's really just a remap operation
+diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
+index 12d8455bfbb2..010db5f8fb00 100644
+--- a/fs/xfs/xfs_buf_item.c
++++ b/fs/xfs/xfs_buf_item.c
+@@ -1233,9 +1233,23 @@ xfs_buf_iodone(
+ }
+
+ /*
+- * Requeue a failed buffer for writeback
++ * Requeue a failed buffer for writeback.
+ *
+- * Return true if the buffer has been re-queued properly, false otherwise
++ * We clear the log item failed state here as well, but we have to be careful
++ * about reference counts because the only active reference counts on the buffer
++ * may be the failed log items. Hence if we clear the log item failed state
++ * before queuing the buffer for IO we can release all active references to
++ * the buffer and free it, leading to use after free problems in
++ * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which
++ * order we process them in - the buffer is locked, and we own the buffer list
++ * so nothing on them is going to change while we are performing this action.
++ *
++ * Hence we can safely queue the buffer for IO before we clear the failed log
++ * item state, therefore always having an active reference to the buffer and
++ * avoiding the transient zero-reference state that leads to use-after-free.
++ *
++ * Return true if the buffer was added to the buffer list, false if it was
++ * already on the buffer list.
+ */
+ bool
+ xfs_buf_resubmit_failed_buffers(
+@@ -1243,16 +1257,16 @@ xfs_buf_resubmit_failed_buffers(
+ struct list_head *buffer_list)
+ {
+ struct xfs_log_item *lip;
++ bool ret;
++
++ ret = xfs_buf_delwri_queue(bp, buffer_list);
+
+ /*
+- * Clear XFS_LI_FAILED flag from all items before resubmit
+- *
+- * XFS_LI_FAILED set/clear is protected by ail_lock, caller this
++ * XFS_LI_FAILED set/clear is protected by ail_lock, caller of this
+ * function already have it acquired
+ */
+ list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
+ xfs_clear_li_failed(lip);
+
+- /* Add this buffer back to the delayed write list */
+- return xfs_buf_delwri_queue(bp, buffer_list);
++ return ret;
+ }
+diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
+index 0ef5ece5634c..bad90479ade2 100644
+--- a/fs/xfs/xfs_ioctl.c
++++ b/fs/xfs/xfs_ioctl.c
+@@ -1616,7 +1616,7 @@ xfs_ioc_getbmap(
+ error = 0;
+ out_free_buf:
+ kmem_free(buf);
+- return 0;
++ return error;
+ }
+
+ struct getfsmap_info {
+diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c
+index 73a1d77ec187..3091e4bc04ef 100644
+--- a/fs/xfs/xfs_qm_bhv.c
++++ b/fs/xfs/xfs_qm_bhv.c
+@@ -40,7 +40,7 @@ xfs_fill_statvfs_from_dquot(
+ statp->f_files = limit;
+ statp->f_ffree =
+ (statp->f_files > dqp->q_res_icount) ?
+- (statp->f_ffree - dqp->q_res_icount) : 0;
++ (statp->f_files - dqp->q_res_icount) : 0;
+ }
+ }
+
+diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
+index 42ea7bab9144..7088f44c0c59 100644
+--- a/fs/xfs/xfs_reflink.c
++++ b/fs/xfs/xfs_reflink.c
+@@ -302,6 +302,7 @@ xfs_reflink_reserve_cow(
+ if (error)
+ return error;
+
++ xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
+ trace_xfs_reflink_cow_alloc(ip, &got);
+ return 0;
+ }
+diff --git a/fs/xfs/xfs_stats.c b/fs/xfs/xfs_stats.c
+index 4e4423153071..740ac9674848 100644
+--- a/fs/xfs/xfs_stats.c
++++ b/fs/xfs/xfs_stats.c
+@@ -119,7 +119,7 @@ static int xqmstat_proc_show(struct seq_file *m, void *v)
+ int j;
+
+ seq_printf(m, "qm");
+- for (j = XFSSTAT_END_IBT_V2; j < XFSSTAT_END_XQMSTAT; j++)
++ for (j = XFSSTAT_END_REFCOUNT; j < XFSSTAT_END_XQMSTAT; j++)
+ seq_printf(m, " %u", counter_val(xfsstats.xs_stats, j));
+ seq_putc(m, '\n');
+ return 0;
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index 218df7f4d3e1..5041357d0297 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -180,12 +180,10 @@ enum cpuhp_smt_control {
+ #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
+ extern enum cpuhp_smt_control cpu_smt_control;
+ extern void cpu_smt_disable(bool force);
+-extern void cpu_smt_check_topology_early(void);
+ extern void cpu_smt_check_topology(void);
+ #else
+ # define cpu_smt_control (CPU_SMT_ENABLED)
+ static inline void cpu_smt_disable(bool force) { }
+-static inline void cpu_smt_check_topology_early(void) { }
+ static inline void cpu_smt_check_topology(void) { }
+ #endif
+
+diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h
+index 5972e4969197..eeae59d3ceb7 100644
+--- a/include/linux/genl_magic_struct.h
++++ b/include/linux/genl_magic_struct.h
+@@ -191,6 +191,7 @@ static inline void ct_assert_unique_operations(void)
+ {
+ switch (0) {
+ #include GENL_MAGIC_INCLUDE_FILE
++ case 0:
+ ;
+ }
+ }
+@@ -209,6 +210,7 @@ static inline void ct_assert_unique_top_level_attributes(void)
+ {
+ switch (0) {
+ #include GENL_MAGIC_INCLUDE_FILE
++ case 0:
+ ;
+ }
+ }
+@@ -218,7 +220,8 @@ static inline void ct_assert_unique_top_level_attributes(void)
+ static inline void ct_assert_unique_ ## s_name ## _attributes(void) \
+ { \
+ switch (0) { \
+- s_fields \
++ s_fields \
++ case 0: \
+ ; \
+ } \
+ }
+diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
+index 21ddbe440030..acc4279ad5e3 100644
+--- a/include/linux/gpio/consumer.h
++++ b/include/linux/gpio/consumer.h
+@@ -142,7 +142,7 @@ int gpiod_is_active_low(const struct gpio_desc *desc);
+ int gpiod_cansleep(const struct gpio_desc *desc);
+
+ int gpiod_to_irq(const struct gpio_desc *desc);
+-void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name);
++int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name);
+
+ /* Convert between the old gpio_ and new gpiod_ interfaces */
+ struct gpio_desc *gpio_to_desc(unsigned gpio);
+@@ -465,10 +465,12 @@ static inline int gpiod_to_irq(const struct gpio_desc *desc)
+ return -EINVAL;
+ }
+
+-static inline void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
++static inline int gpiod_set_consumer_name(struct gpio_desc *desc,
++ const char *name)
+ {
+ /* GPIO can never have been requested */
+ WARN_ON(1);
++ return -EINVAL;
+ }
+
+ static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
+diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h
+index 8663f216c563..2d6100edf204 100644
+--- a/include/linux/hid-debug.h
++++ b/include/linux/hid-debug.h
+@@ -24,7 +24,10 @@
+
+ #ifdef CONFIG_DEBUG_FS
+
++#include <linux/kfifo.h>
++
+ #define HID_DEBUG_BUFSIZE 512
++#define HID_DEBUG_FIFOSIZE 512
+
+ void hid_dump_input(struct hid_device *, struct hid_usage *, __s32);
+ void hid_dump_report(struct hid_device *, int , u8 *, int);
+@@ -37,11 +40,8 @@ void hid_debug_init(void);
+ void hid_debug_exit(void);
+ void hid_debug_event(struct hid_device *, char *);
+
+-
+ struct hid_debug_list {
+- char *hid_debug_buf;
+- int head;
+- int tail;
++ DECLARE_KFIFO_PTR(hid_debug_fifo, char);
+ struct fasync_struct *fasync;
+ struct hid_device *hdev;
+ struct list_head node;
+@@ -64,4 +64,3 @@ struct hid_debug_list {
+ #endif
+
+ #endif
+-
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index c926698040e0..a03d5e264e5e 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -694,7 +694,8 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
+ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, unsigned long len);
+ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+- void *data, int offset, unsigned long len);
++ void *data, unsigned int offset,
++ unsigned long len);
+ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ gpa_t gpa, unsigned long len);
+ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 88a041b73abf..bbcfe2e5fd91 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -1321,7 +1321,7 @@ enum {
+ static inline const struct cpumask *
+ mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
+ {
+- return dev->priv.irq_info[vector].mask;
++ return dev->priv.irq_info[vector + MLX5_EQ_VEC_COMP_BASE].mask;
+ }
+
+ #endif /* MLX5_DRIVER_H */
+diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
+index ea8c93bbb0e0..e87f2d5b3cc6 100644
+--- a/include/sound/compress_driver.h
++++ b/include/sound/compress_driver.h
+@@ -171,7 +171,11 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
+ if (snd_BUG_ON(!stream))
+ return;
+
+- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
++ if (stream->direction == SND_COMPRESS_PLAYBACK)
++ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
++ else
++ stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
++
+ wake_up(&stream->runtime->sleep);
+ }
+
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 1aa517908561..e578c3999970 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -1743,7 +1743,7 @@ static int parse_cgroup_root_flags(char *data, unsigned int *root_flags)
+
+ *root_flags = 0;
+
+- if (!data)
++ if (!data || *data == '\0')
+ return 0;
+
+ while ((token = strsep(&data, ",")) != NULL) {
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 1699ff68c412..56f657adcf03 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -355,9 +355,6 @@ void __weak arch_smt_update(void) { }
+
+ #ifdef CONFIG_HOTPLUG_SMT
+ enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
+-EXPORT_SYMBOL_GPL(cpu_smt_control);
+-
+-static bool cpu_smt_available __read_mostly;
+
+ void __init cpu_smt_disable(bool force)
+ {
+@@ -375,25 +372,11 @@ void __init cpu_smt_disable(bool force)
+
+ /*
+ * The decision whether SMT is supported can only be done after the full
+- * CPU identification. Called from architecture code before non boot CPUs
+- * are brought up.
+- */
+-void __init cpu_smt_check_topology_early(void)
+-{
+- if (!topology_smt_supported())
+- cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
+-}
+-
+-/*
+- * If SMT was disabled by BIOS, detect it here, after the CPUs have been
+- * brought online. This ensures the smt/l1tf sysfs entries are consistent
+- * with reality. cpu_smt_available is set to true during the bringup of non
+- * boot CPUs when a SMT sibling is detected. Note, this may overwrite
+- * cpu_smt_control's previous setting.
++ * CPU identification. Called from architecture code.
+ */
+ void __init cpu_smt_check_topology(void)
+ {
+- if (!cpu_smt_available)
++ if (!topology_smt_supported())
+ cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
+ }
+
+@@ -406,18 +389,10 @@ early_param("nosmt", smt_cmdline_disable);
+
+ static inline bool cpu_smt_allowed(unsigned int cpu)
+ {
+- if (topology_is_primary_thread(cpu))
++ if (cpu_smt_control == CPU_SMT_ENABLED)
+ return true;
+
+- /*
+- * If the CPU is not a 'primary' thread and the booted_once bit is
+- * set then the processor has SMT support. Store this information
+- * for the late check of SMT support in cpu_smt_check_topology().
+- */
+- if (per_cpu(cpuhp_state, cpu).booted_once)
+- cpu_smt_available = true;
+-
+- if (cpu_smt_control == CPU_SMT_ENABLED)
++ if (topology_is_primary_thread(cpu))
+ return true;
+
+ /*
+diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
+index 65c0f1363788..94aa9ae0007a 100644
+--- a/kernel/debug/debug_core.c
++++ b/kernel/debug/debug_core.c
+@@ -535,6 +535,8 @@ return_normal:
+ arch_kgdb_ops.correct_hw_break();
+ if (trace_on)
+ tracing_on();
++ kgdb_info[cpu].debuggerinfo = NULL;
++ kgdb_info[cpu].task = NULL;
+ kgdb_info[cpu].exception_state &=
+ ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
+ kgdb_info[cpu].enter_kgdb--;
+@@ -667,6 +669,8 @@ kgdb_restore:
+ if (trace_on)
+ tracing_on();
+
++ kgdb_info[cpu].debuggerinfo = NULL;
++ kgdb_info[cpu].task = NULL;
+ kgdb_info[cpu].exception_state &=
+ ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
+ kgdb_info[cpu].enter_kgdb--;
+diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c
+index 7921ae4fca8d..7e2379aa0a1e 100644
+--- a/kernel/debug/kdb/kdb_bt.c
++++ b/kernel/debug/kdb/kdb_bt.c
+@@ -186,7 +186,16 @@ kdb_bt(int argc, const char **argv)
+ kdb_printf("btc: cpu status: ");
+ kdb_parse("cpu\n");
+ for_each_online_cpu(cpu) {
+- sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu));
++ void *kdb_tsk = KDB_TSK(cpu);
++
++ /* If a CPU failed to round up we could be here */
++ if (!kdb_tsk) {
++ kdb_printf("WARNING: no task for cpu %ld\n",
++ cpu);
++ continue;
++ }
++
++ sprintf(buf, "btt 0x%px\n", kdb_tsk);
+ kdb_parse(buf);
+ touch_nmi_watchdog();
+ }
+diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c
+index 15e1a7af5dd0..53a0df6e4d92 100644
+--- a/kernel/debug/kdb/kdb_debugger.c
++++ b/kernel/debug/kdb/kdb_debugger.c
+@@ -118,13 +118,6 @@ int kdb_stub(struct kgdb_state *ks)
+ kdb_bp_remove();
+ KDB_STATE_CLEAR(DOING_SS);
+ KDB_STATE_SET(PAGER);
+- /* zero out any offline cpu data */
+- for_each_present_cpu(i) {
+- if (!cpu_online(i)) {
+- kgdb_info[i].debuggerinfo = NULL;
+- kgdb_info[i].task = NULL;
+- }
+- }
+ if (ks->err_code == DIE_OOPS || reason == KDB_REASON_OOPS) {
+ ks->pass_exception = 1;
+ KDB_FLAG_SET(CATASTROPHIC);
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index 5d3cf407e374..51386d9105fa 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -724,6 +724,9 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
+ size = sizeof(struct ring_buffer);
+ size += nr_pages * sizeof(void *);
+
++ if (order_base_2(size) >= MAX_ORDER)
++ goto fail;
++
+ rb = kzalloc(size, GFP_KERNEL);
+ if (!rb)
+ goto fail;
+diff --git a/kernel/futex.c b/kernel/futex.c
+index f89abca89513..d7c465fd687c 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2850,35 +2850,39 @@ retry_private:
+ * and BUG when futex_unlock_pi() interleaves with this.
+ *
+ * Therefore acquire wait_lock while holding hb->lock, but drop the
+- * latter before calling rt_mutex_start_proxy_lock(). This still fully
+- * serializes against futex_unlock_pi() as that does the exact same
+- * lock handoff sequence.
++ * latter before calling __rt_mutex_start_proxy_lock(). This
++ * interleaves with futex_unlock_pi() -- which does a similar lock
++ * handoff -- such that the latter can observe the futex_q::pi_state
++ * before __rt_mutex_start_proxy_lock() is done.
+ */
+ raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
+ spin_unlock(q.lock_ptr);
++ /*
++ * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
++ * such that futex_unlock_pi() is guaranteed to observe the waiter when
++ * it sees the futex_q::pi_state.
++ */
+ ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
+ raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
+
+ if (ret) {
+ if (ret == 1)
+ ret = 0;
+-
+- spin_lock(q.lock_ptr);
+- goto no_block;
++ goto cleanup;
+ }
+
+-
+ if (unlikely(to))
+ hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
+
+ ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
+
++cleanup:
+ spin_lock(q.lock_ptr);
+ /*
+- * If we failed to acquire the lock (signal/timeout), we must
++ * If we failed to acquire the lock (deadlock/signal/timeout), we must
+ * first acquire the hb->lock before removing the lock from the
+- * rt_mutex waitqueue, such that we can keep the hb and rt_mutex
+- * wait lists consistent.
++ * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
++ * lists consistent.
+ *
+ * In particular; it is important that futex_unlock_pi() can not
+ * observe this inconsistency.
+@@ -3002,6 +3006,10 @@ retry:
+ * there is no point where we hold neither; and therefore
+ * wake_futex_pi() must observe a state consistent with what we
+ * observed.
++ *
++ * In particular; this forces __rt_mutex_start_proxy() to
++ * complete such that we're guaranteed to observe the
++ * rt_waiter. Also see the WARN in wake_futex_pi().
+ */
+ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+ spin_unlock(&hb->lock);
+diff --git a/kernel/hung_task.c b/kernel/hung_task.c
+index b9132d1269ef..9eca2371f189 100644
+--- a/kernel/hung_task.c
++++ b/kernel/hung_task.c
+@@ -33,7 +33,7 @@ int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
+ * is disabled during the critical section. It also controls the size of
+ * the RCU grace period. So it needs to be upper-bound.
+ */
+-#define HUNG_TASK_BATCHING 1024
++#define HUNG_TASK_LOCK_BREAK (HZ / 10)
+
+ /*
+ * Zero means infinite timeout - no checking done:
+@@ -111,8 +111,11 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
+
+ trace_sched_process_hang(t);
+
+- if (!sysctl_hung_task_warnings && !sysctl_hung_task_panic)
+- return;
++ if (sysctl_hung_task_panic) {
++ console_verbose();
++ hung_task_show_lock = true;
++ hung_task_call_panic = true;
++ }
+
+ /*
+ * Ok, the task did not get scheduled for more than 2 minutes,
+@@ -134,11 +137,6 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
+ }
+
+ touch_nmi_watchdog();
+-
+- if (sysctl_hung_task_panic) {
+- hung_task_show_lock = true;
+- hung_task_call_panic = true;
+- }
+ }
+
+ /*
+@@ -172,7 +170,7 @@ static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
+ static void check_hung_uninterruptible_tasks(unsigned long timeout)
+ {
+ int max_count = sysctl_hung_task_check_count;
+- int batch_count = HUNG_TASK_BATCHING;
++ unsigned long last_break = jiffies;
+ struct task_struct *g, *t;
+
+ /*
+@@ -187,10 +185,10 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
+ for_each_process_thread(g, t) {
+ if (!max_count--)
+ goto unlock;
+- if (!--batch_count) {
+- batch_count = HUNG_TASK_BATCHING;
++ if (time_after(jiffies, last_break + HUNG_TASK_LOCK_BREAK)) {
+ if (!rcu_lock_break(g, t))
+ goto unlock;
++ last_break = jiffies;
+ }
+ /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
+ if (t->state == TASK_UNINTERRUPTIBLE)
+diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
+index f4f29b9d90ee..e12cdf637c71 100644
+--- a/kernel/irq/affinity.c
++++ b/kernel/irq/affinity.c
+@@ -117,12 +117,11 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
+ */
+ if (numvecs <= nodes) {
+ for_each_node_mask(n, nodemsk) {
+- cpumask_copy(masks + curvec, node_to_cpumask[n]);
+- if (++done == numvecs)
+- break;
++ cpumask_or(masks + curvec, masks + curvec, node_to_cpumask[n]);
+ if (++curvec == last_affv)
+ curvec = affd->pre_vectors;
+ }
++ done = numvecs;
+ goto out;
+ }
+
+diff --git a/kernel/kcov.c b/kernel/kcov.c
+index 97959d7b77e2..c2277dbdbfb1 100644
+--- a/kernel/kcov.c
++++ b/kernel/kcov.c
+@@ -112,7 +112,7 @@ void notrace __sanitizer_cov_trace_pc(void)
+ EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
+
+ #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
+-static void write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
++static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
+ {
+ struct task_struct *t;
+ u64 *area;
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 2823d4163a37..9562aaa2afdc 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1726,12 +1726,33 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
+ rt_mutex_set_owner(lock, NULL);
+ }
+
++/**
++ * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
++ * @lock: the rt_mutex to take
++ * @waiter: the pre-initialized rt_mutex_waiter
++ * @task: the task to prepare
++ *
++ * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
++ * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
++ *
++ * NOTE: does _NOT_ remove the @waiter on failure; must either call
++ * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
++ *
++ * Returns:
++ * 0 - task blocked on lock
++ * 1 - acquired the lock for task, caller should wake it up
++ * <0 - error
++ *
++ * Special API call for PI-futex support.
++ */
+ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+ struct rt_mutex_waiter *waiter,
+ struct task_struct *task)
+ {
+ int ret;
+
++ lockdep_assert_held(&lock->wait_lock);
++
+ if (try_to_take_rt_mutex(lock, task, NULL))
+ return 1;
+
+@@ -1749,9 +1770,6 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+ ret = 0;
+ }
+
+- if (unlikely(ret))
+- remove_waiter(lock, waiter);
+-
+ debug_rt_mutex_print_deadlock(waiter);
+
+ return ret;
+@@ -1763,12 +1781,18 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+ * @waiter: the pre-initialized rt_mutex_waiter
+ * @task: the task to prepare
+ *
++ * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
++ * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
++ *
++ * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
++ * on failure.
++ *
+ * Returns:
+ * 0 - task blocked on lock
+ * 1 - acquired the lock for task, caller should wake it up
+ * <0 - error
+ *
+- * Special API call for FUTEX_REQUEUE_PI support.
++ * Special API call for PI-futex support.
+ */
+ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+ struct rt_mutex_waiter *waiter,
+@@ -1778,6 +1802,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+
+ raw_spin_lock_irq(&lock->wait_lock);
+ ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
++ if (unlikely(ret))
++ remove_waiter(lock, waiter);
+ raw_spin_unlock_irq(&lock->wait_lock);
+
+ return ret;
+@@ -1845,7 +1871,8 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
+ * @lock: the rt_mutex we were woken on
+ * @waiter: the pre-initialized rt_mutex_waiter
+ *
+- * Attempt to clean up after a failed rt_mutex_wait_proxy_lock().
++ * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
++ * rt_mutex_wait_proxy_lock().
+ *
+ * Unless we acquired the lock; we're still enqueued on the wait-list and can
+ * in fact still be granted ownership until we're removed. Therefore we can
+diff --git a/kernel/module.c b/kernel/module.c
+index 6746c85511fe..38bf28b5cc20 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -1207,8 +1207,10 @@ static ssize_t store_uevent(struct module_attribute *mattr,
+ struct module_kobject *mk,
+ const char *buffer, size_t count)
+ {
+- kobject_synth_uevent(&mk->kobj, buffer, count);
+- return count;
++ int rc;
++
++ rc = kobject_synth_uevent(&mk->kobj, buffer, count);
++ return rc ? rc : count;
+ }
+
+ struct module_attribute module_uevent =
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 7137bc343b4a..f7c375d1e601 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -5932,6 +5932,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
+
+ #ifdef CONFIG_SCHED_SMT
+ DEFINE_STATIC_KEY_FALSE(sched_smt_present);
++EXPORT_SYMBOL_GPL(sched_smt_present);
+
+ static inline void set_idle_cores(int cpu, int val)
+ {
+diff --git a/kernel/smp.c b/kernel/smp.c
+index d86eec5f51c1..084c8b3a2681 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -584,8 +584,6 @@ void __init smp_init(void)
+ num_nodes, (num_nodes > 1 ? "s" : ""),
+ num_cpus, (num_cpus > 1 ? "s" : ""));
+
+- /* Final decision about SMT support */
+- cpu_smt_check_topology();
+ /* Any cleanup work */
+ smp_cpus_done(setup_max_cpus);
+ }
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index cc02050fd0c4..32dea29d05a0 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -2767,6 +2767,8 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
+ bool neg;
+
+ left -= proc_skip_spaces(&p);
++ if (!left)
++ break;
+
+ err = proc_get_long(&p, &left, &val, &neg,
+ proc_wspace_sep,
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index f3b22f456fac..7846ce24ecc0 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -50,7 +50,9 @@ enum timekeeping_adv_mode {
+ static struct {
+ seqcount_t seq;
+ struct timekeeper timekeeper;
+-} tk_core ____cacheline_aligned;
++} tk_core ____cacheline_aligned = {
++ .seq = SEQCNT_ZERO(tk_core.seq),
++};
+
+ static DEFINE_RAW_SPINLOCK(timekeeper_lock);
+ static struct timekeeper shadow_timekeeper;
+diff --git a/lib/seq_buf.c b/lib/seq_buf.c
+index 11f2ae0f9099..6aabb609dd87 100644
+--- a/lib/seq_buf.c
++++ b/lib/seq_buf.c
+@@ -144,9 +144,13 @@ int seq_buf_puts(struct seq_buf *s, const char *str)
+
+ WARN_ON(s->size == 0);
+
++ /* Add 1 to len for the trailing null byte which must be there */
++ len += 1;
++
+ if (seq_buf_can_fit(s, len)) {
+ memcpy(s->buffer + s->len, str, len);
+- s->len += len;
++ /* Don't count the trailing null byte against the capacity */
++ s->len += len - 1;
+ return 0;
+ }
+ seq_buf_set_overflow(s);
+diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
+index 82ac39ce5310..aecc0996628f 100644
+--- a/lib/test_rhashtable.c
++++ b/lib/test_rhashtable.c
+@@ -541,38 +541,45 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
+ static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects,
+ int cnt, bool slow)
+ {
+- struct rhltable rhlt;
++ struct rhltable *rhlt;
+ unsigned int i, ret;
+ const char *key;
+ int err = 0;
+
+- err = rhltable_init(&rhlt, &test_rht_params_dup);
+- if (WARN_ON(err))
++ rhlt = kmalloc(sizeof(*rhlt), GFP_KERNEL);
++ if (WARN_ON(!rhlt))
++ return -EINVAL;
++
++ err = rhltable_init(rhlt, &test_rht_params_dup);
++ if (WARN_ON(err)) {
++ kfree(rhlt);
+ return err;
++ }
+
+ for (i = 0; i < cnt; i++) {
+ rhl_test_objects[i].value.tid = i;
+- key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead);
++ key = rht_obj(&rhlt->ht, &rhl_test_objects[i].list_node.rhead);
+ key += test_rht_params_dup.key_offset;
+
+ if (slow) {
+- err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key,
++ err = PTR_ERR(rhashtable_insert_slow(&rhlt->ht, key,
+ &rhl_test_objects[i].list_node.rhead));
+ if (err == -EAGAIN)
+ err = 0;
+ } else
+- err = rhltable_insert(&rhlt,
++ err = rhltable_insert(rhlt,
+ &rhl_test_objects[i].list_node,
+ test_rht_params_dup);
+ if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast"))
+ goto skip_print;
+ }
+
+- ret = print_ht(&rhlt);
++ ret = print_ht(rhlt);
+ WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast");
+
+ skip_print:
+- rhltable_destroy(&rhlt);
++ rhltable_destroy(rhlt);
++ kfree(rhlt);
+
+ return 0;
+ }
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 9e45553cabd6..a9de1dbb9a6c 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -294,6 +294,32 @@ EXPORT_SYMBOL(nr_online_nodes);
+ int page_group_by_mobility_disabled __read_mostly;
+
+ #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
++/*
++ * During boot we initialize deferred pages on-demand, as needed, but once
++ * page_alloc_init_late() has finished, the deferred pages are all initialized,
++ * and we can permanently disable that path.
++ */
++static DEFINE_STATIC_KEY_TRUE(deferred_pages);
++
++/*
++ * Calling kasan_free_pages() only after deferred memory initialization
++ * has completed. Poisoning pages during deferred memory init will greatly
++ * lengthen the process and cause problem in large memory systems as the
++ * deferred pages initialization is done with interrupt disabled.
++ *
++ * Assuming that there will be no reference to those newly initialized
++ * pages before they are ever allocated, this should have no effect on
++ * KASAN memory tracking as the poison will be properly inserted at page
++ * allocation time. The only corner case is when pages are allocated by
++ * on-demand allocation and then freed again before the deferred pages
++ * initialization is done, but this is not likely to happen.
++ */
++static inline void kasan_free_nondeferred_pages(struct page *page, int order)
++{
++ if (!static_branch_unlikely(&deferred_pages))
++ kasan_free_pages(page, order);
++}
++
+ /* Returns true if the struct page for the pfn is uninitialised */
+ static inline bool __meminit early_page_uninitialised(unsigned long pfn)
+ {
+@@ -326,6 +352,8 @@ static inline bool update_defer_init(pg_data_t *pgdat,
+ return true;
+ }
+ #else
++#define kasan_free_nondeferred_pages(p, o) kasan_free_pages(p, o)
++
+ static inline bool early_page_uninitialised(unsigned long pfn)
+ {
+ return false;
+@@ -1030,7 +1058,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
+ arch_free_page(page, order);
+ kernel_poison_pages(page, 1 << order, 0);
+ kernel_map_pages(page, 1 << order, 0);
+- kasan_free_pages(page, order);
++ kasan_free_nondeferred_pages(page, order);
+
+ return true;
+ }
+@@ -1593,13 +1621,6 @@ static int __init deferred_init_memmap(void *data)
+ return 0;
+ }
+
+-/*
+- * During boot we initialize deferred pages on-demand, as needed, but once
+- * page_alloc_init_late() has finished, the deferred pages are all initialized,
+- * and we can permanently disable that path.
+- */
+-static DEFINE_STATIC_KEY_TRUE(deferred_pages);
+-
+ /*
+ * If this zone has deferred pages, try to grow it by initializing enough
+ * deferred pages to satisfy the allocation specified by order, rounded up to
+diff --git a/mm/percpu-km.c b/mm/percpu-km.c
+index 38de70ab1a0d..0f643dc2dc65 100644
+--- a/mm/percpu-km.c
++++ b/mm/percpu-km.c
+@@ -50,6 +50,7 @@ static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
+ const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
+ struct pcpu_chunk *chunk;
+ struct page *pages;
++ unsigned long flags;
+ int i;
+
+ chunk = pcpu_alloc_chunk(gfp);
+@@ -68,9 +69,9 @@ static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
+ chunk->data = pages;
+ chunk->base_addr = page_address(pages) - pcpu_group_offsets[0];
+
+- spin_lock_irq(&pcpu_lock);
++ spin_lock_irqsave(&pcpu_lock, flags);
+ pcpu_chunk_populated(chunk, 0, nr_pages, false);
+- spin_unlock_irq(&pcpu_lock);
++ spin_unlock_irqrestore(&pcpu_lock, flags);
+
+ pcpu_stats_chunk_alloc();
+ trace_percpu_create_chunk(chunk->base_addr);
+diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
+index 6eb837a47b5c..baaaeb2b2c42 100644
+--- a/net/dccp/ccid.h
++++ b/net/dccp/ccid.h
+@@ -202,7 +202,7 @@ static inline void ccid_hc_tx_packet_recv(struct ccid *ccid, struct sock *sk,
+ static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
+ u8 pkt, u8 opt, u8 *val, u8 len)
+ {
+- if (ccid->ccid_ops->ccid_hc_tx_parse_options == NULL)
++ if (!ccid || !ccid->ccid_ops->ccid_hc_tx_parse_options)
+ return 0;
+ return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len);
+ }
+@@ -214,7 +214,7 @@ static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
+ static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk,
+ u8 pkt, u8 opt, u8 *val, u8 len)
+ {
+- if (ccid->ccid_ops->ccid_hc_rx_parse_options == NULL)
++ if (!ccid || !ccid->ccid_ops->ccid_hc_rx_parse_options)
+ return 0;
+ return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len);
+ }
+diff --git a/net/dsa/master.c b/net/dsa/master.c
+index c90ee3227dea..aae478d61101 100644
+--- a/net/dsa/master.c
++++ b/net/dsa/master.c
+@@ -158,6 +158,8 @@ static void dsa_master_ethtool_teardown(struct net_device *dev)
+ cpu_dp->orig_ethtool_ops = NULL;
+ }
+
++static struct lock_class_key dsa_master_addr_list_lock_key;
++
+ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
+ {
+ /* If we use a tagging format that doesn't have an ethertype
+@@ -167,6 +169,8 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
+ wmb();
+
+ dev->dsa_ptr = cpu_dp;
++ lockdep_set_class(&dev->addr_list_lock,
++ &dsa_master_addr_list_lock_key);
+
+ return dsa_master_ethtool_setup(dev);
+ }
+diff --git a/net/dsa/slave.c b/net/dsa/slave.c
+index 1c45c1d6d241..b39720d0995d 100644
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -140,11 +140,14 @@ static int dsa_slave_close(struct net_device *dev)
+ static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
+ {
+ struct net_device *master = dsa_slave_to_master(dev);
+-
+- if (change & IFF_ALLMULTI)
+- dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1);
+- if (change & IFF_PROMISC)
+- dev_set_promiscuity(master, dev->flags & IFF_PROMISC ? 1 : -1);
++ if (dev->flags & IFF_UP) {
++ if (change & IFF_ALLMULTI)
++ dev_set_allmulti(master,
++ dev->flags & IFF_ALLMULTI ? 1 : -1);
++ if (change & IFF_PROMISC)
++ dev_set_promiscuity(master,
++ dev->flags & IFF_PROMISC ? 1 : -1);
++ }
+ }
+
+ static void dsa_slave_set_rx_mode(struct net_device *dev)
+@@ -639,7 +642,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
+ int ret;
+
+ /* Port's PHY and MAC both need to be EEE capable */
+- if (!dev->phydev && !dp->pl)
++ if (!dev->phydev || !dp->pl)
+ return -ENODEV;
+
+ if (!ds->ops->set_mac_eee)
+@@ -659,7 +662,7 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
+ int ret;
+
+ /* Port's PHY and MAC both need to be EEE capable */
+- if (!dev->phydev && !dp->pl)
++ if (!dev->phydev || !dp->pl)
+ return -ENODEV;
+
+ if (!ds->ops->get_mac_eee)
+diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
+index 4a46df8441c9..f5b4febeaa25 100644
+--- a/net/ipv6/xfrm6_tunnel.c
++++ b/net/ipv6/xfrm6_tunnel.c
+@@ -144,6 +144,9 @@ static u32 __xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
+ index = __xfrm6_tunnel_spi_check(net, spi);
+ if (index >= 0)
+ goto alloc_spi;
++
++ if (spi == XFRM6_TUNNEL_SPI_MAX)
++ break;
+ }
+ for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tn->spi; spi++) {
+ index = __xfrm6_tunnel_spi_check(net, spi);
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 5e2b4a41acf1..51ad330bf8e8 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -142,6 +142,9 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
+ /* allocate extra bitmaps */
+ if (status->chains)
+ len += 4 * hweight8(status->chains);
++ /* vendor presence bitmap */
++ if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)
++ len += 4;
+
+ if (ieee80211_have_rx_timestamp(status)) {
+ len = ALIGN(len, 8);
+@@ -197,8 +200,6 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
+ if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
+ struct ieee80211_vendor_radiotap *rtap = (void *)skb->data;
+
+- /* vendor presence bitmap */
+- len += 4;
+ /* alignment for fixed 6-byte vendor data header */
+ len = ALIGN(len, 2);
+ /* vendor data header */
+diff --git a/net/rds/bind.c b/net/rds/bind.c
+index 762d2c6788a3..17c9d9f0c848 100644
+--- a/net/rds/bind.c
++++ b/net/rds/bind.c
+@@ -78,10 +78,10 @@ struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
+ __rds_create_bind_key(key, addr, port, scope_id);
+ rcu_read_lock();
+ rs = rhashtable_lookup(&bind_hash_table, key, ht_parms);
+- if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
+- rds_sock_addref(rs);
+- else
++ if (rs && (sock_flag(rds_rs_to_sk(rs), SOCK_DEAD) ||
++ !refcount_inc_not_zero(&rds_rs_to_sk(rs)->sk_refcnt)))
+ rs = NULL;
++
+ rcu_read_unlock();
+
+ rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr,
+diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
+index 816b19a78809..0374b0623c8b 100644
+--- a/net/rxrpc/recvmsg.c
++++ b/net/rxrpc/recvmsg.c
+@@ -596,6 +596,7 @@ error_requeue_call:
+ }
+ error_no_call:
+ release_sock(&rx->sk);
++error_trace:
+ trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
+ return ret;
+
+@@ -604,7 +605,7 @@ wait_interrupted:
+ wait_error:
+ finish_wait(sk_sleep(&rx->sk), &wait);
+ call = NULL;
+- goto error_no_call;
++ goto error_trace;
+ }
+
+ /**
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 876393cf5ed6..e5e70cff5bb3 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -2045,7 +2045,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
+ struct sctp_endpoint *ep = sctp_sk(sk)->ep;
+ struct sctp_transport *transport = NULL;
+ struct sctp_sndrcvinfo _sinfo, *sinfo;
+- struct sctp_association *asoc;
++ struct sctp_association *asoc, *tmp;
+ struct sctp_cmsgs cmsgs;
+ union sctp_addr *daddr;
+ bool new = false;
+@@ -2071,7 +2071,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
+
+ /* SCTP_SENDALL process */
+ if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) {
+- list_for_each_entry(asoc, &ep->asocs, asocs) {
++ list_for_each_entry_safe(asoc, tmp, &ep->asocs, asocs) {
+ err = sctp_sendmsg_check_sflags(asoc, sflags, msg,
+ msg_len);
+ if (err == 0)
+diff --git a/net/sctp/stream.c b/net/sctp/stream.c
+index 80e0ae5534ec..f24633114dfd 100644
+--- a/net/sctp/stream.c
++++ b/net/sctp/stream.c
+@@ -84,6 +84,19 @@ static void fa_zero(struct flex_array *fa, size_t index, size_t count)
+ }
+ }
+
++static size_t fa_index(struct flex_array *fa, void *elem, size_t count)
++{
++ size_t index = 0;
++
++ while (count--) {
++ if (elem == flex_array_get(fa, index))
++ break;
++ index++;
++ }
++
++ return index;
++}
++
+ /* Migrates chunks from stream queues to new stream queues if needed,
+ * but not across associations. Also, removes those chunks to streams
+ * higher than the new max.
+@@ -147,6 +160,13 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
+
+ if (stream->out) {
+ fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt));
++ if (stream->out_curr) {
++ size_t index = fa_index(stream->out, stream->out_curr,
++ stream->outcnt);
++
++ BUG_ON(index == stream->outcnt);
++ stream->out_curr = flex_array_get(out, index);
++ }
+ fa_free(stream->out);
+ }
+
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index 488019766433..32556f480a60 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -624,6 +624,12 @@ static void tipc_node_timeout(struct timer_list *t)
+
+ __skb_queue_head_init(&xmitq);
+
++ /* Initial node interval to value larger (10 seconds), then it will be
++ * recalculated with link lowest tolerance
++ */
++ tipc_node_read_lock(n);
++ n->keepalive_intv = 10000;
++ tipc_node_read_unlock(n);
+ for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
+ tipc_node_read_lock(n);
+ le = &n->links[bearer_id];
+diff --git a/samples/livepatch/livepatch-shadow-fix1.c b/samples/livepatch/livepatch-shadow-fix1.c
+index 49b13553eaae..e8f1bd6b29b1 100644
+--- a/samples/livepatch/livepatch-shadow-fix1.c
++++ b/samples/livepatch/livepatch-shadow-fix1.c
+@@ -89,6 +89,11 @@ struct dummy *livepatch_fix1_dummy_alloc(void)
+ * pointer to handle resource release.
+ */
+ leak = kzalloc(sizeof(int), GFP_KERNEL);
++ if (!leak) {
++ kfree(d);
++ return NULL;
++ }
++
+ klp_shadow_alloc(d, SV_LEAK, sizeof(leak), GFP_KERNEL,
+ shadow_leak_ctor, leak);
+
+diff --git a/samples/livepatch/livepatch-shadow-mod.c b/samples/livepatch/livepatch-shadow-mod.c
+index 4c54b250332d..4aa8a88d3cd6 100644
+--- a/samples/livepatch/livepatch-shadow-mod.c
++++ b/samples/livepatch/livepatch-shadow-mod.c
+@@ -118,6 +118,10 @@ noinline struct dummy *dummy_alloc(void)
+
+ /* Oops, forgot to save leak! */
+ leak = kzalloc(sizeof(int), GFP_KERNEL);
++ if (!leak) {
++ kfree(d);
++ return NULL;
++ }
+
+ pr_info("%s: dummy @ %p, expires @ %lx\n",
+ __func__, d, d->jiffies_expire);
+diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
+index 64220e36ce3b..98a7d63a723e 100755
+--- a/scripts/decode_stacktrace.sh
++++ b/scripts/decode_stacktrace.sh
+@@ -78,7 +78,7 @@ parse_symbol() {
+ fi
+
+ # Strip out the base of the path
+- code=${code//$basepath/""}
++ code=${code//^$basepath/""}
+
+ # In the case of inlines, move everything to same line
+ code=${code//$'\n'/' '}
+diff --git a/scripts/gdb/linux/proc.py b/scripts/gdb/linux/proc.py
+index 086d27223c0c..0aebd7565b03 100644
+--- a/scripts/gdb/linux/proc.py
++++ b/scripts/gdb/linux/proc.py
+@@ -41,7 +41,7 @@ class LxVersion(gdb.Command):
+
+ def invoke(self, arg, from_tty):
+ # linux_banner should contain a newline
+- gdb.write(gdb.parse_and_eval("linux_banner").string())
++ gdb.write(gdb.parse_and_eval("(char *)linux_banner").string())
+
+ LxVersion()
+
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
+index 5a5b3780456f..5a77efd39b3f 100644
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -1204,6 +1204,30 @@ static int secref_whitelist(const struct sectioncheck *mismatch,
+ return 1;
+ }
+
++static inline int is_arm_mapping_symbol(const char *str)
++{
++ return str[0] == '$' && strchr("axtd", str[1])
++ && (str[2] == '\0' || str[2] == '.');
++}
++
++/*
++ * If there's no name there, ignore it; likewise, ignore it if it's
++ * one of the magic symbols emitted used by current ARM tools.
++ *
++ * Otherwise if find_symbols_between() returns those symbols, they'll
++ * fail the whitelist tests and cause lots of false alarms ... fixable
++ * only by merging __exit and __init sections into __text, bloating
++ * the kernel (which is especially evil on embedded platforms).
++ */
++static inline int is_valid_name(struct elf_info *elf, Elf_Sym *sym)
++{
++ const char *name = elf->strtab + sym->st_name;
++
++ if (!name || !strlen(name))
++ return 0;
++ return !is_arm_mapping_symbol(name);
++}
++
+ /**
+ * Find symbol based on relocation record info.
+ * In some cases the symbol supplied is a valid symbol so
+@@ -1229,6 +1253,8 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
+ continue;
+ if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
+ continue;
++ if (!is_valid_name(elf, sym))
++ continue;
+ if (sym->st_value == addr)
+ return sym;
+ /* Find a symbol nearby - addr are maybe negative */
+@@ -1247,30 +1273,6 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
+ return NULL;
+ }
+
+-static inline int is_arm_mapping_symbol(const char *str)
+-{
+- return str[0] == '$' && strchr("axtd", str[1])
+- && (str[2] == '\0' || str[2] == '.');
+-}
+-
+-/*
+- * If there's no name there, ignore it; likewise, ignore it if it's
+- * one of the magic symbols emitted used by current ARM tools.
+- *
+- * Otherwise if find_symbols_between() returns those symbols, they'll
+- * fail the whitelist tests and cause lots of false alarms ... fixable
+- * only by merging __exit and __init sections into __text, bloating
+- * the kernel (which is especially evil on embedded platforms).
+- */
+-static inline int is_valid_name(struct elf_info *elf, Elf_Sym *sym)
+-{
+- const char *name = elf->strtab + sym->st_name;
+-
+- if (!name || !strlen(name))
+- return 0;
+- return !is_arm_mapping_symbol(name);
+-}
+-
+ /*
+ * Find symbols before or equal addr and after addr - in the section sec.
+ * If we find two symbols with equal offset prefer one with a valid name.
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index 70d3066e69fe..017c47eb795e 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -4333,6 +4333,12 @@ static int smack_key_permission(key_ref_t key_ref,
+ int request = 0;
+ int rc;
+
++ /*
++ * Validate requested permissions
++ */
++ if (perm & ~KEY_NEED_ALL)
++ return -EINVAL;
++
+ keyp = key_ref_to_ptr(key_ref);
+ if (keyp == NULL)
+ return -EINVAL;
+@@ -4356,10 +4362,10 @@ static int smack_key_permission(key_ref_t key_ref,
+ ad.a.u.key_struct.key = keyp->serial;
+ ad.a.u.key_struct.key_desc = keyp->description;
+ #endif
+- if (perm & KEY_NEED_READ)
+- request = MAY_READ;
++ if (perm & (KEY_NEED_READ | KEY_NEED_SEARCH | KEY_NEED_VIEW))
++ request |= MAY_READ;
+ if (perm & (KEY_NEED_WRITE | KEY_NEED_LINK | KEY_NEED_SETATTR))
+- request = MAY_WRITE;
++ request |= MAY_WRITE;
+ rc = smk_access(tkp, keyp->security, request, &ad);
+ rc = smk_bu_note("key access", tkp, keyp->security, request, rc);
+ return rc;
+diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
+index d361bb77ca00..8db1890605f6 100644
+--- a/sound/pci/hda/hda_bind.c
++++ b/sound/pci/hda/hda_bind.c
+@@ -109,7 +109,8 @@ static int hda_codec_driver_probe(struct device *dev)
+ err = snd_hda_codec_build_controls(codec);
+ if (err < 0)
+ goto error_module;
+- if (codec->card->registered) {
++ /* only register after the bus probe finished; otherwise it's racy */
++ if (!codec->bus->bus_probing && codec->card->registered) {
+ err = snd_card_register(codec->card);
+ if (err < 0)
+ goto error_module;
+diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
+index 0d98bb9068b1..acacc1900265 100644
+--- a/sound/pci/hda/hda_codec.h
++++ b/sound/pci/hda/hda_codec.h
+@@ -68,6 +68,7 @@ struct hda_bus {
+ unsigned int response_reset:1; /* controller was reset */
+ unsigned int in_reset:1; /* during reset operation */
+ unsigned int no_response_fallback:1; /* don't fallback at RIRB error */
++ unsigned int bus_probing :1; /* during probing process */
+
+ int primary_dig_out_type; /* primary digital out PCM type */
+ unsigned int mixer_assigned; /* codec addr for mixer name */
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 1ddeebc373b3..1bb7613701ac 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2315,6 +2315,7 @@ static int azx_probe_continue(struct azx *chip)
+ int dev = chip->dev_index;
+ int err;
+
++ to_hda_bus(bus)->bus_probing = 1;
+ hda->probe_continued = 1;
+
+ /* bind with i915 if needed */
+@@ -2410,6 +2411,7 @@ i915_power_fail:
+ if (err < 0)
+ hda->init_failed = 1;
+ complete_all(&hda->probe_wait);
++ to_hda_bus(bus)->bus_probing = 0;
+ return err;
+ }
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index dbb38fe2da7d..9199d91d0a59 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -515,6 +515,15 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type)
+ }
+ }
+
++/* get a primary headphone pin if available */
++static hda_nid_t alc_get_hp_pin(struct alc_spec *spec)
++{
++ if (spec->gen.autocfg.hp_pins[0])
++ return spec->gen.autocfg.hp_pins[0];
++ if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
++ return spec->gen.autocfg.line_out_pins[0];
++ return 0;
++}
+
+ /*
+ * Realtek SSID verification
+@@ -725,9 +734,7 @@ do_sku:
+ * 15 : 1 --> enable the function "Mute internal speaker
+ * when the external headphone out jack is plugged"
+ */
+- if (!spec->gen.autocfg.hp_pins[0] &&
+- !(spec->gen.autocfg.line_out_pins[0] &&
+- spec->gen.autocfg.line_out_type == AUTO_PIN_HP_OUT)) {
++ if (!alc_get_hp_pin(spec)) {
+ hda_nid_t nid;
+ tmp = (ass >> 11) & 0x3; /* HP to chassis */
+ nid = ports[tmp];
+@@ -2959,7 +2966,7 @@ static void alc282_restore_default_value(struct hda_codec *codec)
+ static void alc282_init(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
++ hda_nid_t hp_pin = alc_get_hp_pin(spec);
+ bool hp_pin_sense;
+ int coef78;
+
+@@ -2996,7 +3003,7 @@ static void alc282_init(struct hda_codec *codec)
+ static void alc282_shutup(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
++ hda_nid_t hp_pin = alc_get_hp_pin(spec);
+ bool hp_pin_sense;
+ int coef78;
+
+@@ -3074,14 +3081,9 @@ static void alc283_restore_default_value(struct hda_codec *codec)
+ static void alc283_init(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
++ hda_nid_t hp_pin = alc_get_hp_pin(spec);
+ bool hp_pin_sense;
+
+- if (!spec->gen.autocfg.hp_outs) {
+- if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
+- hp_pin = spec->gen.autocfg.line_out_pins[0];
+- }
+-
+ alc283_restore_default_value(codec);
+
+ if (!hp_pin)
+@@ -3115,14 +3117,9 @@ static void alc283_init(struct hda_codec *codec)
+ static void alc283_shutup(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
++ hda_nid_t hp_pin = alc_get_hp_pin(spec);
+ bool hp_pin_sense;
+
+- if (!spec->gen.autocfg.hp_outs) {
+- if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
+- hp_pin = spec->gen.autocfg.line_out_pins[0];
+- }
+-
+ if (!hp_pin) {
+ alc269_shutup(codec);
+ return;
+@@ -3156,7 +3153,7 @@ static void alc283_shutup(struct hda_codec *codec)
+ static void alc256_init(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
++ hda_nid_t hp_pin = alc_get_hp_pin(spec);
+ bool hp_pin_sense;
+
+ if (!hp_pin)
+@@ -3192,7 +3189,7 @@ static void alc256_init(struct hda_codec *codec)
+ static void alc256_shutup(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
++ hda_nid_t hp_pin = alc_get_hp_pin(spec);
+ bool hp_pin_sense;
+
+ if (!hp_pin) {
+@@ -3228,7 +3225,7 @@ static void alc256_shutup(struct hda_codec *codec)
+ static void alc225_init(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
++ hda_nid_t hp_pin = alc_get_hp_pin(spec);
+ bool hp1_pin_sense, hp2_pin_sense;
+
+ if (!hp_pin)
+@@ -3271,7 +3268,7 @@ static void alc225_init(struct hda_codec *codec)
+ static void alc225_shutup(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
++ hda_nid_t hp_pin = alc_get_hp_pin(spec);
+ bool hp1_pin_sense, hp2_pin_sense;
+
+ if (!hp_pin) {
+@@ -3315,7 +3312,7 @@ static void alc225_shutup(struct hda_codec *codec)
+ static void alc_default_init(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
++ hda_nid_t hp_pin = alc_get_hp_pin(spec);
+ bool hp_pin_sense;
+
+ if (!hp_pin)
+@@ -3344,7 +3341,7 @@ static void alc_default_init(struct hda_codec *codec)
+ static void alc_default_shutup(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
++ hda_nid_t hp_pin = alc_get_hp_pin(spec);
+ bool hp_pin_sense;
+
+ if (!hp_pin) {
+@@ -3376,7 +3373,7 @@ static void alc_default_shutup(struct hda_codec *codec)
+ static void alc294_hp_init(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
++ hda_nid_t hp_pin = alc_get_hp_pin(spec);
+ int i, val;
+
+ if (!hp_pin)
+@@ -4780,7 +4777,7 @@ static void alc_update_headset_mode(struct hda_codec *codec)
+ struct alc_spec *spec = codec->spec;
+
+ hda_nid_t mux_pin = spec->gen.imux_pins[spec->gen.cur_mux[0]];
+- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
++ hda_nid_t hp_pin = alc_get_hp_pin(spec);
+
+ int new_headset_mode;
+
+@@ -5059,7 +5056,7 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
+ static void alc_shutup_dell_xps13(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+- int hp_pin = spec->gen.autocfg.hp_pins[0];
++ int hp_pin = alc_get_hp_pin(spec);
+
+ /* Prevent pop noises when headphones are plugged in */
+ snd_hda_codec_write(codec, hp_pin, 0,
+@@ -5152,7 +5149,7 @@ static void alc271_hp_gate_mic_jack(struct hda_codec *codec,
+
+ if (action == HDA_FIXUP_ACT_PROBE) {
+ int mic_pin = find_ext_mic_pin(codec);
+- int hp_pin = spec->gen.autocfg.hp_pins[0];
++ int hp_pin = alc_get_hp_pin(spec);
+
+ if (snd_BUG_ON(!mic_pin || !hp_pin))
+ return;
+@@ -5575,6 +5572,7 @@ enum {
+ ALC294_FIXUP_ASUS_MIC,
+ ALC294_FIXUP_ASUS_HEADSET_MIC,
+ ALC294_FIXUP_ASUS_SPK,
++ ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
+ };
+
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -6499,6 +6497,15 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
+ },
++ [ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -6677,6 +6684,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
++ SND_PCI_QUIRK(0x1558, 0x1325, "System76 Darter Pro (darp5)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
+ SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
+diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
+index 6ec19fb4a934..2e75b5bc5f1d 100644
+--- a/sound/soc/fsl/Kconfig
++++ b/sound/soc/fsl/Kconfig
+@@ -221,7 +221,7 @@ config SND_SOC_PHYCORE_AC97
+
+ config SND_SOC_EUKREA_TLV320
+ tristate "Eukrea TLV320"
+- depends on ARCH_MXC && I2C
++ depends on ARCH_MXC && !ARM64 && I2C
+ select SND_SOC_TLV320AIC23_I2C
+ select SND_SOC_IMX_AUDMUX
+ select SND_SOC_IMX_SSI
+diff --git a/sound/soc/intel/atom/sst/sst_loader.c b/sound/soc/intel/atom/sst/sst_loader.c
+index 27413ebae956..b8c456753f01 100644
+--- a/sound/soc/intel/atom/sst/sst_loader.c
++++ b/sound/soc/intel/atom/sst/sst_loader.c
+@@ -354,14 +354,14 @@ static int sst_request_fw(struct intel_sst_drv *sst)
+ const struct firmware *fw;
+
+ retval = request_firmware(&fw, sst->firmware_name, sst->dev);
+- if (fw == NULL) {
+- dev_err(sst->dev, "fw is returning as null\n");
+- return -EINVAL;
+- }
+ if (retval) {
+ dev_err(sst->dev, "request fw failed %d\n", retval);
+ return retval;
+ }
++ if (fw == NULL) {
++ dev_err(sst->dev, "fw is returning as null\n");
++ return -EINVAL;
++ }
+ mutex_lock(&sst->sst_lock);
+ retval = sst_cache_and_parse_fw(sst, fw);
+ mutex_unlock(&sst->sst_lock);
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 7e93686a430a..d71e01954975 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1448,6 +1448,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ case 0x20b1: /* XMOS based devices */
+ case 0x152a: /* Thesycon devices */
+ case 0x25ce: /* Mytek devices */
++ case 0x2ab6: /* T+A devices */
+ if (fp->dsd_raw)
+ return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+ break;
+diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
+index f216b2f5c3d7..42a787856cd8 100644
+--- a/tools/build/Makefile.feature
++++ b/tools/build/Makefile.feature
+@@ -79,8 +79,8 @@ FEATURE_TESTS_EXTRA := \
+ cplus-demangle \
+ hello \
+ libbabeltrace \
+- liberty \
+- liberty-z \
++ libbfd-liberty \
++ libbfd-liberty-z \
+ libunwind-debug-frame \
+ libunwind-debug-frame-arm \
+ libunwind-debug-frame-aarch64 \
+diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
+index 0516259be70f..bf8a8ebcca1e 100644
+--- a/tools/build/feature/Makefile
++++ b/tools/build/feature/Makefile
+@@ -15,8 +15,8 @@ FILES= \
+ test-libbfd.bin \
+ test-disassembler-four-args.bin \
+ test-reallocarray.bin \
+- test-liberty.bin \
+- test-liberty-z.bin \
++ test-libbfd-liberty.bin \
++ test-libbfd-liberty-z.bin \
+ test-cplus-demangle.bin \
+ test-libelf.bin \
+ test-libelf-getphdrnum.bin \
+@@ -200,7 +200,7 @@ $(OUTPUT)test-libpython-version.bin:
+ $(BUILD)
+
+ $(OUTPUT)test-libbfd.bin:
+- $(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl
++ $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
+
+ $(OUTPUT)test-disassembler-four-args.bin:
+ $(BUILD) -DPACKAGE='"perf"' -lbfd -lopcodes
+@@ -208,10 +208,10 @@ $(OUTPUT)test-disassembler-four-args.bin:
+ $(OUTPUT)test-reallocarray.bin:
+ $(BUILD)
+
+-$(OUTPUT)test-liberty.bin:
++$(OUTPUT)test-libbfd-liberty.bin:
+ $(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty
+
+-$(OUTPUT)test-liberty-z.bin:
++$(OUTPUT)test-libbfd-liberty-z.bin:
+ $(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty -lz
+
+ $(OUTPUT)test-cplus-demangle.bin:
+diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
+index bbb2a8ef367c..d7e06fe0270e 100644
+--- a/tools/hv/hv_kvp_daemon.c
++++ b/tools/hv/hv_kvp_daemon.c
+@@ -1178,6 +1178,7 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
+ FILE *file;
+ char cmd[PATH_MAX];
+ char *mac_addr;
++ int str_len;
+
+ /*
+ * Set the configuration for the specified interface with
+@@ -1301,8 +1302,18 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
+ * invoke the external script to do its magic.
+ */
+
+- snprintf(cmd, sizeof(cmd), KVP_SCRIPTS_PATH "%s %s",
+- "hv_set_ifconfig", if_file);
++ str_len = snprintf(cmd, sizeof(cmd), KVP_SCRIPTS_PATH "%s %s",
++ "hv_set_ifconfig", if_file);
++ /*
++ * This is a little overcautious, but it's necessary to suppress some
++ * false warnings from gcc 8.0.1.
++ */
++ if (str_len <= 0 || (unsigned int)str_len >= sizeof(cmd)) {
++ syslog(LOG_ERR, "Cmd '%s' (len=%d) may be too long",
++ cmd, str_len);
++ return HV_E_FAIL;
++ }
++
+ if (system(cmd)) {
+ syslog(LOG_ERR, "Failed to execute cmd '%s'; error: %d %s",
+ cmd, errno, strerror(errno));
+diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
+index 60aa4ca8b2c5..7a0014794bff 100644
+--- a/tools/lib/bpf/bpf.c
++++ b/tools/lib/bpf/bpf.c
+@@ -77,6 +77,7 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
+ {
+ __u32 name_len = create_attr->name ? strlen(create_attr->name) : 0;
+ union bpf_attr attr;
++ int ret;
+
+ memset(&attr, '\0', sizeof(attr));
+
+@@ -94,7 +95,15 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
+ attr.map_ifindex = create_attr->map_ifindex;
+ attr.inner_map_fd = create_attr->inner_map_fd;
+
+- return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
++ ret = sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
++ if (ret < 0 && errno == EINVAL && create_attr->name) {
++ /* Retry the same syscall, but without the name.
++ * Pre v4.14 kernels don't support map names.
++ */
++ memset(attr.map_name, 0, sizeof(attr.map_name));
++ return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
++ }
++ return ret;
+ }
+
+ int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
+diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
+index f00ea77f5f08..849b3be15bd8 100644
+--- a/tools/perf/Makefile.config
++++ b/tools/perf/Makefile.config
+@@ -688,18 +688,20 @@ endif
+
+ ifeq ($(feature-libbfd), 1)
+ EXTLIBS += -lbfd
++else
++ # we are on a system that requires -liberty and (maybe) -lz
++ # to link against -lbfd; test each case individually here
+
+ # call all detections now so we get correct
+ # status in VF output
+- $(call feature_check,liberty)
+- $(call feature_check,liberty-z)
+- $(call feature_check,cplus-demangle)
++ $(call feature_check,libbfd-liberty)
++ $(call feature_check,libbfd-liberty-z)
+
+- ifeq ($(feature-liberty), 1)
+- EXTLIBS += -liberty
++ ifeq ($(feature-libbfd-liberty), 1)
++ EXTLIBS += -lbfd -liberty
+ else
+- ifeq ($(feature-liberty-z), 1)
+- EXTLIBS += -liberty -lz
++ ifeq ($(feature-libbfd-liberty-z), 1)
++ EXTLIBS += -lbfd -liberty -lz
+ endif
+ endif
+ endif
+@@ -709,24 +711,24 @@ ifdef NO_DEMANGLE
+ else
+ ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
+ EXTLIBS += -liberty
+- CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
+ else
+- ifneq ($(feature-libbfd), 1)
+- ifneq ($(feature-liberty), 1)
+- ifneq ($(feature-liberty-z), 1)
+- # we dont have neither HAVE_CPLUS_DEMANGLE_SUPPORT
+- # or any of 'bfd iberty z' trinity
+- ifeq ($(feature-cplus-demangle), 1)
+- EXTLIBS += -liberty
+- CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
+- else
+- msg := $(warning No bfd.h/libbfd found, please install binutils-dev[el]/zlib-static/libiberty-dev to gain symbol demangling)
+- CFLAGS += -DNO_DEMANGLE
+- endif
+- endif
++ ifeq ($(filter -liberty,$(EXTLIBS)),)
++ $(call feature_check,cplus-demangle)
++
++ # we dont have neither HAVE_CPLUS_DEMANGLE_SUPPORT
++ # or any of 'bfd iberty z' trinity
++ ifeq ($(feature-cplus-demangle), 1)
++ EXTLIBS += -liberty
++ else
++ msg := $(warning No bfd.h/libbfd found, please install binutils-dev[el]/zlib-static/libiberty-dev to gain symbol demangling)
++ CFLAGS += -DNO_DEMANGLE
+ endif
+ endif
+ endif
++
++ ifneq ($(filter -liberty,$(EXTLIBS)),)
++ CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
++ endif
+ endif
+
+ ifneq ($(filter -lbfd,$(EXTLIBS)),)
+diff --git a/tools/perf/arch/x86/util/kvm-stat.c b/tools/perf/arch/x86/util/kvm-stat.c
+index b32409a0e546..081353d7b095 100644
+--- a/tools/perf/arch/x86/util/kvm-stat.c
++++ b/tools/perf/arch/x86/util/kvm-stat.c
+@@ -156,7 +156,7 @@ int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
+ if (strstr(cpuid, "Intel")) {
+ kvm->exit_reasons = vmx_exit_reasons;
+ kvm->exit_reasons_isa = "VMX";
+- } else if (strstr(cpuid, "AMD")) {
++ } else if (strstr(cpuid, "AMD") || strstr(cpuid, "Hygon")) {
+ kvm->exit_reasons = svm_exit_reasons;
+ kvm->exit_reasons_isa = "SVM";
+ } else
+diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py
+index ff9b60b99f52..44090a9a19f3 100644
+--- a/tools/perf/tests/attr.py
++++ b/tools/perf/tests/attr.py
+@@ -116,7 +116,7 @@ class Event(dict):
+ if not self.has_key(t) or not other.has_key(t):
+ continue
+ if not data_equal(self[t], other[t]):
+- log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
++ log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
+
+ # Test file description needs to have following sections:
+ # [config]
+diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
+index 699561fa512c..67bcbf876776 100644
+--- a/tools/perf/tests/evsel-tp-sched.c
++++ b/tools/perf/tests/evsel-tp-sched.c
+@@ -17,7 +17,7 @@ static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
+ return -1;
+ }
+
+- is_signed = !!(field->flags | FIELD_IS_SIGNED);
++ is_signed = !!(field->flags & FIELD_IS_SIGNED);
+ if (should_be_signed && !is_signed) {
+ pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n",
+ evsel->name, name, is_signed, should_be_signed);
+diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
+index bbed90e5d9bb..cee717a3794f 100644
+--- a/tools/perf/util/dso.c
++++ b/tools/perf/util/dso.c
+@@ -295,7 +295,7 @@ static int decompress_kmodule(struct dso *dso, const char *name,
+ unlink(tmpbuf);
+
+ if (pathname && (fd >= 0))
+- strncpy(pathname, tmpbuf, len);
++ strlcpy(pathname, tmpbuf, len);
+
+ return fd;
+ }
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index 3cadc252dd89..bd9226bc5945 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -2636,6 +2636,7 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
+ struct perf_header *header = &session->header;
+ int fd = perf_data__fd(session->data);
+ struct stat st;
++ time_t stctime;
+ int ret, bit;
+
+ hd.fp = fp;
+@@ -2645,7 +2646,8 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
+ if (ret == -1)
+ return -1;
+
+- fprintf(fp, "# captured on : %s", ctime(&st.st_ctime));
++ stctime = st.st_ctime;
++ fprintf(fp, "# captured on : %s", ctime(&stctime));
+
+ fprintf(fp, "# header version : %u\n", header->version);
+ fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset);
+@@ -3521,7 +3523,7 @@ perf_event__synthesize_event_update_unit(struct perf_tool *tool,
+ if (ev == NULL)
+ return -ENOMEM;
+
+- strncpy(ev->data, evsel->unit, size);
++ strlcpy(ev->data, evsel->unit, size + 1);
+ err = process(tool, (union perf_event *)ev, NULL, NULL);
+ free(ev);
+ return err;
+diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
+index b76088fadf3d..6a6548890d5a 100644
+--- a/tools/perf/util/probe-file.c
++++ b/tools/perf/util/probe-file.c
+@@ -424,7 +424,7 @@ static int probe_cache__open(struct probe_cache *pcache, const char *target,
+
+ if (target && build_id_cache__cached(target)) {
+ /* This is a cached buildid */
+- strncpy(sbuildid, target, SBUILD_ID_SIZE);
++ strlcpy(sbuildid, target, SBUILD_ID_SIZE);
+ dir_name = build_id_cache__linkname(sbuildid, NULL, 0);
+ goto found;
+ }
+diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
+index ce501ba14b08..69f5f6142dcf 100644
+--- a/tools/perf/util/python.c
++++ b/tools/perf/util/python.c
+@@ -939,7 +939,8 @@ static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
+
+ file = PyFile_FromFile(fp, "perf", "r", NULL);
+ #else
+- file = PyFile_FromFd(evlist->pollfd.entries[i].fd, "perf", "r", -1, NULL, NULL, NULL, 1);
++ file = PyFile_FromFd(evlist->pollfd.entries[i].fd, "perf", "r", -1,
++ NULL, NULL, NULL, 0);
+ #endif
+ if (file == NULL)
+ goto free_list;
+diff --git a/tools/perf/util/s390-cpumsf.c b/tools/perf/util/s390-cpumsf.c
+index d2c78ffd9fee..aa7f8c11fbb7 100644
+--- a/tools/perf/util/s390-cpumsf.c
++++ b/tools/perf/util/s390-cpumsf.c
+@@ -499,7 +499,7 @@ static int s390_cpumsf_samples(struct s390_cpumsf_queue *sfq, u64 *ts)
+ aux_ts = get_trailer_time(buf);
+ if (!aux_ts) {
+ pr_err("[%#08" PRIx64 "] Invalid AUX trailer entry TOD clock base\n",
+- sfq->buffer->data_offset);
++ (s64)sfq->buffer->data_offset);
+ aux_ts = ~0ULL;
+ goto out;
+ }
+diff --git a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
+index 84e2b648e622..2fa3c5757bcb 100755
+--- a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
++++ b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
+@@ -585,9 +585,9 @@ current_max_cpu = 0
+
+ read_trace_data(filename)
+
+-clear_trace_file()
+-# Free the memory
+ if interval:
++ clear_trace_file()
++ # Free the memory
+ free_trace_buffer()
+
+ if graph_data_present == False:
+diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
+index 0ef68204c84b..d029cad08cbd 100644
+--- a/tools/testing/selftests/bpf/test_progs.c
++++ b/tools/testing/selftests/bpf/test_progs.c
+@@ -51,10 +51,10 @@ static struct {
+ struct iphdr iph;
+ struct tcphdr tcp;
+ } __packed pkt_v4 = {
+- .eth.h_proto = bpf_htons(ETH_P_IP),
++ .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
+ .iph.ihl = 5,
+ .iph.protocol = 6,
+- .iph.tot_len = bpf_htons(MAGIC_BYTES),
++ .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
+ .tcp.urg_ptr = 123,
+ };
+
+@@ -64,9 +64,9 @@ static struct {
+ struct ipv6hdr iph;
+ struct tcphdr tcp;
+ } __packed pkt_v6 = {
+- .eth.h_proto = bpf_htons(ETH_P_IPV6),
++ .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
+ .iph.nexthdr = 6,
+- .iph.payload_len = bpf_htons(MAGIC_BYTES),
++ .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
+ .tcp.urg_ptr = 123,
+ };
+
+diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
+index dac7ceb1a677..08443a15e6be 100644
+--- a/virt/kvm/arm/mmio.c
++++ b/virt/kvm/arm/mmio.c
+@@ -117,6 +117,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
+ }
+
++ /*
++ * The MMIO instruction is emulated and should not be re-executed
++ * in the guest.
++ */
++ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
++
+ return 0;
+ }
+
+@@ -144,11 +150,6 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
+ vcpu->arch.mmio_decode.sign_extend = sign_extend;
+ vcpu->arch.mmio_decode.rt = rt;
+
+- /*
+- * The MMIO instruction is emulated and should not be re-executed
+- * in the guest.
+- */
+- kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+ return 0;
+ }
+
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index f986e31fa68c..0ffb02ff5234 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1959,7 +1959,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
+
+ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+- void *data, int offset, unsigned long len)
++ void *data, unsigned int offset,
++ unsigned long len)
+ {
+ struct kvm_memslots *slots = kvm_memslots(kvm);
+ int r;
+@@ -2912,8 +2913,10 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
+ if (ops->init)
+ ops->init(dev);
+
++ kvm_get_kvm(kvm);
+ ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
+ if (ret < 0) {
++ kvm_put_kvm(kvm);
+ mutex_lock(&kvm->lock);
+ list_del(&dev->vm_node);
+ mutex_unlock(&kvm->lock);
+@@ -2921,7 +2924,6 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
+ return ret;
+ }
+
+- kvm_get_kvm(kvm);
+ cd->fd = ret;
+ return 0;
+ }
next reply other threads:[~2019-02-12 20:53 UTC|newest]
Thread overview: 332+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-02-12 20:53 Mike Pagano [this message]
-- strict thread matches above, loose matches on Subject: below --
2024-04-18 3:06 [gentoo-commits] proj/linux-patches:4.19 commit in: / Alice Ferrazzi
2023-09-02 9:59 Mike Pagano
2023-08-30 15:00 Mike Pagano
2023-08-16 16:59 Mike Pagano
2023-08-11 11:58 Mike Pagano
2023-08-08 18:43 Mike Pagano
2023-07-24 20:30 Mike Pagano
2023-06-28 10:29 Mike Pagano
2023-06-21 14:55 Alice Ferrazzi
2023-06-14 10:21 Mike Pagano
2023-06-09 11:32 Mike Pagano
2023-05-30 12:57 Mike Pagano
2023-05-17 11:14 Mike Pagano
2023-05-17 11:01 Mike Pagano
2023-05-10 17:59 Mike Pagano
2023-04-26 9:35 Alice Ferrazzi
2023-04-20 11:17 Alice Ferrazzi
2023-04-05 11:41 Mike Pagano
2023-03-22 14:16 Alice Ferrazzi
2023-03-17 10:46 Mike Pagano
2023-03-13 11:35 Alice Ferrazzi
2023-03-11 16:01 Mike Pagano
2023-03-03 12:31 Mike Pagano
2023-02-25 11:41 Mike Pagano
2023-02-24 3:19 Alice Ferrazzi
2023-02-24 3:15 Alice Ferrazzi
2023-02-22 14:51 Alice Ferrazzi
2023-02-06 12:49 Mike Pagano
2023-01-24 7:16 Alice Ferrazzi
2023-01-18 11:11 Mike Pagano
2022-12-14 12:15 Mike Pagano
2022-12-08 12:14 Alice Ferrazzi
2022-11-25 17:04 Mike Pagano
2022-11-23 9:39 Alice Ferrazzi
2022-11-10 17:58 Mike Pagano
2022-11-03 15:11 Mike Pagano
2022-11-01 19:48 Mike Pagano
2022-10-26 11:41 Mike Pagano
2022-10-05 11:59 Mike Pagano
2022-09-28 9:18 Mike Pagano
2022-09-20 12:03 Mike Pagano
2022-09-15 11:09 Mike Pagano
2022-09-05 12:06 Mike Pagano
2022-08-25 10:35 Mike Pagano
2022-08-11 12:36 Mike Pagano
2022-07-29 15:28 Mike Pagano
2022-07-21 20:12 Mike Pagano
2022-07-12 16:01 Mike Pagano
2022-07-07 16:18 Mike Pagano
2022-07-02 16:07 Mike Pagano
2022-06-25 10:22 Mike Pagano
2022-06-16 11:40 Mike Pagano
2022-06-14 16:02 Mike Pagano
2022-06-06 11:05 Mike Pagano
2022-05-27 12:24 Mike Pagano
2022-05-25 11:55 Mike Pagano
2022-05-18 9:50 Mike Pagano
2022-05-15 22:12 Mike Pagano
2022-05-12 11:30 Mike Pagano
2022-05-01 17:04 Mike Pagano
2022-04-27 12:03 Mike Pagano
2022-04-20 12:09 Mike Pagano
2022-04-15 13:11 Mike Pagano
2022-04-12 19:24 Mike Pagano
2022-03-28 10:59 Mike Pagano
2022-03-23 11:57 Mike Pagano
2022-03-16 13:27 Mike Pagano
2022-03-11 10:56 Mike Pagano
2022-03-08 18:30 Mike Pagano
2022-03-02 13:08 Mike Pagano
2022-02-26 21:14 Mike Pagano
2022-02-23 12:39 Mike Pagano
2022-02-16 12:47 Mike Pagano
2022-02-11 12:53 Mike Pagano
2022-02-11 12:46 Mike Pagano
2022-02-11 12:45 Mike Pagano
2022-02-11 12:37 Mike Pagano
2022-02-08 17:56 Mike Pagano
2022-01-29 17:45 Mike Pagano
2022-01-27 11:39 Mike Pagano
2022-01-11 13:14 Mike Pagano
2022-01-05 12:55 Mike Pagano
2021-12-29 13:11 Mike Pagano
2021-12-22 14:07 Mike Pagano
2021-12-14 10:36 Mike Pagano
2021-12-08 12:55 Mike Pagano
2021-12-01 12:51 Mike Pagano
2021-11-26 11:59 Mike Pagano
2021-11-12 14:16 Mike Pagano
2021-11-06 13:26 Mike Pagano
2021-11-02 19:32 Mike Pagano
2021-10-27 11:59 Mike Pagano
2021-10-20 13:26 Mike Pagano
2021-10-17 13:12 Mike Pagano
2021-10-13 15:00 Alice Ferrazzi
2021-10-09 21:33 Mike Pagano
2021-10-06 14:06 Mike Pagano
2021-09-26 14:13 Mike Pagano
2021-09-22 11:40 Mike Pagano
2021-09-20 22:05 Mike Pagano
2021-09-03 11:22 Mike Pagano
2021-09-03 10:08 Alice Ferrazzi
2021-08-26 14:06 Mike Pagano
2021-08-25 22:45 Mike Pagano
2021-08-25 20:41 Mike Pagano
2021-08-15 20:07 Mike Pagano
2021-08-12 11:51 Mike Pagano
2021-08-08 13:39 Mike Pagano
2021-08-04 11:54 Mike Pagano
2021-08-03 12:26 Mike Pagano
2021-07-31 10:34 Alice Ferrazzi
2021-07-28 12:37 Mike Pagano
2021-07-20 15:35 Alice Ferrazzi
2021-07-13 12:38 Mike Pagano
2021-07-11 14:45 Mike Pagano
2021-06-30 14:25 Mike Pagano
2021-06-16 12:22 Mike Pagano
2021-06-10 11:46 Mike Pagano
2021-06-03 10:32 Alice Ferrazzi
2021-05-26 12:05 Mike Pagano
2021-05-22 10:03 Mike Pagano
2021-05-07 11:40 Alice Ferrazzi
2021-04-30 19:02 Mike Pagano
2021-04-28 18:31 Mike Pagano
2021-04-28 11:44 Alice Ferrazzi
2021-04-16 11:15 Alice Ferrazzi
2021-04-14 11:22 Alice Ferrazzi
2021-04-10 13:24 Mike Pagano
2021-04-07 12:21 Mike Pagano
2021-03-30 14:17 Mike Pagano
2021-03-24 12:08 Mike Pagano
2021-03-22 15:50 Mike Pagano
2021-03-20 14:26 Mike Pagano
2021-03-17 16:21 Mike Pagano
2021-03-11 14:05 Mike Pagano
2021-03-07 15:15 Mike Pagano
2021-03-04 12:08 Mike Pagano
2021-02-23 14:31 Alice Ferrazzi
2021-02-13 15:28 Alice Ferrazzi
2021-02-10 10:03 Alice Ferrazzi
2021-02-07 14:40 Alice Ferrazzi
2021-02-03 23:43 Mike Pagano
2021-01-30 13:34 Alice Ferrazzi
2021-01-27 11:15 Mike Pagano
2021-01-23 16:36 Mike Pagano
2021-01-19 20:34 Mike Pagano
2021-01-17 16:20 Mike Pagano
2021-01-12 20:06 Mike Pagano
2021-01-09 12:57 Mike Pagano
2021-01-06 14:15 Mike Pagano
2020-12-30 12:52 Mike Pagano
2020-12-11 12:56 Mike Pagano
2020-12-08 12:06 Mike Pagano
2020-12-02 12:49 Mike Pagano
2020-11-24 14:40 Mike Pagano
2020-11-22 19:26 Mike Pagano
2020-11-18 19:56 Mike Pagano
2020-11-11 15:43 Mike Pagano
2020-11-10 13:56 Mike Pagano
2020-11-05 12:35 Mike Pagano
2020-11-01 20:29 Mike Pagano
2020-10-29 11:18 Mike Pagano
2020-10-17 10:17 Mike Pagano
2020-10-14 20:36 Mike Pagano
2020-10-07 12:50 Mike Pagano
2020-10-01 12:45 Mike Pagano
2020-09-26 22:07 Mike Pagano
2020-09-26 22:00 Mike Pagano
2020-09-24 15:58 Mike Pagano
2020-09-23 12:07 Mike Pagano
2020-09-17 15:01 Mike Pagano
2020-09-17 14:55 Mike Pagano
2020-09-12 17:59 Mike Pagano
2020-09-09 17:59 Mike Pagano
2020-09-03 11:37 Mike Pagano
2020-08-26 11:15 Mike Pagano
2020-08-21 10:49 Alice Ferrazzi
2020-08-19 9:36 Alice Ferrazzi
2020-08-12 23:36 Alice Ferrazzi
2020-08-07 19:16 Mike Pagano
2020-08-05 14:51 Thomas Deutschmann
2020-07-31 18:00 Mike Pagano
2020-07-29 12:33 Mike Pagano
2020-07-22 12:42 Mike Pagano
2020-07-16 11:17 Mike Pagano
2020-07-09 12:12 Mike Pagano
2020-07-01 12:14 Mike Pagano
2020-06-29 17:41 Mike Pagano
2020-06-25 15:07 Mike Pagano
2020-06-22 14:47 Mike Pagano
2020-06-10 21:27 Mike Pagano
2020-06-07 21:52 Mike Pagano
2020-06-03 11:41 Mike Pagano
2020-05-27 16:25 Mike Pagano
2020-05-20 11:30 Mike Pagano
2020-05-20 11:27 Mike Pagano
2020-05-14 11:30 Mike Pagano
2020-05-13 12:33 Mike Pagano
2020-05-11 22:50 Mike Pagano
2020-05-09 22:20 Mike Pagano
2020-05-06 11:46 Mike Pagano
2020-05-02 19:24 Mike Pagano
2020-04-29 17:57 Mike Pagano
2020-04-23 11:44 Mike Pagano
2020-04-21 11:15 Mike Pagano
2020-04-17 11:45 Mike Pagano
2020-04-15 17:09 Mike Pagano
2020-04-13 11:34 Mike Pagano
2020-04-02 15:24 Mike Pagano
2020-03-25 14:58 Mike Pagano
2020-03-20 11:57 Mike Pagano
2020-03-18 14:21 Mike Pagano
2020-03-16 12:23 Mike Pagano
2020-03-11 17:20 Mike Pagano
2020-03-05 16:23 Mike Pagano
2020-02-28 16:38 Mike Pagano
2020-02-24 11:06 Mike Pagano
2020-02-19 23:45 Mike Pagano
2020-02-14 23:52 Mike Pagano
2020-02-11 16:20 Mike Pagano
2020-02-05 17:05 Mike Pagano
2020-02-01 10:37 Mike Pagano
2020-02-01 10:30 Mike Pagano
2020-01-29 16:16 Mike Pagano
2020-01-27 14:25 Mike Pagano
2020-01-23 11:07 Mike Pagano
2020-01-17 19:56 Mike Pagano
2020-01-14 22:30 Mike Pagano
2020-01-12 15:00 Mike Pagano
2020-01-09 11:15 Mike Pagano
2020-01-04 19:50 Mike Pagano
2019-12-31 17:46 Mike Pagano
2019-12-21 15:03 Mike Pagano
2019-12-17 21:56 Mike Pagano
2019-12-13 12:35 Mike Pagano
2019-12-05 12:03 Alice Ferrazzi
2019-12-01 14:06 Thomas Deutschmann
2019-11-24 15:44 Mike Pagano
2019-11-20 19:36 Mike Pagano
2019-11-12 21:00 Mike Pagano
2019-11-10 16:20 Mike Pagano
2019-11-06 14:26 Mike Pagano
2019-10-29 12:04 Mike Pagano
2019-10-17 22:27 Mike Pagano
2019-10-11 17:04 Mike Pagano
2019-10-07 17:42 Mike Pagano
2019-10-05 11:42 Mike Pagano
2019-10-01 10:10 Mike Pagano
2019-09-21 17:11 Mike Pagano
2019-09-19 12:34 Mike Pagano
2019-09-19 10:04 Mike Pagano
2019-09-16 12:26 Mike Pagano
2019-09-10 11:12 Mike Pagano
2019-09-06 17:25 Mike Pagano
2019-08-29 14:15 Mike Pagano
2019-08-25 17:37 Mike Pagano
2019-08-23 22:18 Mike Pagano
2019-08-16 12:26 Mike Pagano
2019-08-16 12:13 Mike Pagano
2019-08-09 17:45 Mike Pagano
2019-08-06 19:19 Mike Pagano
2019-08-04 16:15 Mike Pagano
2019-07-31 15:09 Mike Pagano
2019-07-31 10:22 Mike Pagano
2019-07-28 16:27 Mike Pagano
2019-07-26 11:35 Mike Pagano
2019-07-21 14:41 Mike Pagano
2019-07-14 15:44 Mike Pagano
2019-07-10 11:05 Mike Pagano
2019-07-03 11:34 Mike Pagano
2019-06-25 10:53 Mike Pagano
2019-06-22 19:06 Mike Pagano
2019-06-19 17:17 Thomas Deutschmann
2019-06-17 19:22 Mike Pagano
2019-06-15 15:07 Mike Pagano
2019-06-11 12:42 Mike Pagano
2019-06-10 19:43 Mike Pagano
2019-06-09 16:19 Mike Pagano
2019-06-04 11:11 Mike Pagano
2019-05-31 15:02 Mike Pagano
2019-05-26 17:10 Mike Pagano
2019-05-22 11:02 Mike Pagano
2019-05-16 23:03 Mike Pagano
2019-05-14 21:00 Mike Pagano
2019-05-10 19:40 Mike Pagano
2019-05-08 10:06 Mike Pagano
2019-05-05 13:42 Mike Pagano
2019-05-04 18:28 Mike Pagano
2019-05-02 10:13 Mike Pagano
2019-04-27 17:36 Mike Pagano
2019-04-20 11:09 Mike Pagano
2019-04-19 19:51 Mike Pagano
2019-04-05 21:46 Mike Pagano
2019-04-03 10:59 Mike Pagano
2019-03-27 10:22 Mike Pagano
2019-03-23 20:23 Mike Pagano
2019-03-19 16:58 Mike Pagano
2019-03-13 22:08 Mike Pagano
2019-03-10 14:15 Mike Pagano
2019-03-06 19:06 Mike Pagano
2019-03-05 18:04 Mike Pagano
2019-02-27 11:23 Mike Pagano
2019-02-23 11:35 Mike Pagano
2019-02-23 0:46 Mike Pagano
2019-02-20 11:19 Mike Pagano
2019-02-16 0:42 Mike Pagano
2019-02-15 12:39 Mike Pagano
2019-02-06 17:08 Mike Pagano
2019-01-31 11:28 Mike Pagano
2019-01-26 15:09 Mike Pagano
2019-01-22 23:06 Mike Pagano
2019-01-16 23:32 Mike Pagano
2019-01-13 19:29 Mike Pagano
2019-01-09 17:54 Mike Pagano
2018-12-29 18:55 Mike Pagano
2018-12-29 1:08 Mike Pagano
2018-12-21 14:58 Mike Pagano
2018-12-19 19:09 Mike Pagano
2018-12-17 11:42 Mike Pagano
2018-12-13 11:40 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-05 20:16 Mike Pagano
2018-12-01 15:08 Mike Pagano
2018-11-27 16:16 Mike Pagano
2018-11-23 12:42 Mike Pagano
2018-11-21 12:30 Mike Pagano
2018-11-14 0:47 Mike Pagano
2018-11-14 0:47 Mike Pagano
2018-11-13 20:44 Mike Pagano
2018-11-04 16:22 Alice Ferrazzi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1550004809.1a31279af4f26ab88a68bf8bb958c47c92600c64.mpagano@gentoo \
--to=mpagano@gentoo.org \
--cc=gentoo-commits@lists.gentoo.org \
--cc=gentoo-dev@lists.gentoo.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox