public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Fri, 19 Apr 2019 19:51:34 +0000 (UTC)	[thread overview]
Message-ID: <1555703473.0f24bbd911eccbda14a4813938f48fd974f5bdb2.mpagano@gentoo> (raw)

commit:     0f24bbd911eccbda14a4813938f48fd974f5bdb2
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Apr 19 19:51:13 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Apr 19 19:51:13 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0f24bbd9

Linux patch 4.19.35

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1034_linux-4.19.35.patch | 3735 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3739 insertions(+)

diff --git a/0000_README b/0000_README
index e752acc..fbfea55 100644
--- a/0000_README
+++ b/0000_README
@@ -179,6 +179,10 @@ Patch:  1033_linux-4.19.34.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.19.34
 
+Patch:  1034_linux-4.19.35.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.19.35
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1034_linux-4.19.35.patch b/1034_linux-4.19.35.patch
new file mode 100644
index 0000000..4caf535
--- /dev/null
+++ b/1034_linux-4.19.35.patch
@@ -0,0 +1,3735 @@
+diff --git a/Makefile b/Makefile
+index 8fdfe0af5862..f4229975b48c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 34
++SUBLEVEL = 35
+ EXTRAVERSION =
+ NAME = "People's Front"
+ 
+@@ -483,7 +483,7 @@ endif
+ ifeq ($(cc-name),clang)
+ ifneq ($(CROSS_COMPILE),)
+ CLANG_FLAGS	:= --target=$(notdir $(CROSS_COMPILE:%-=%))
+-GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
++GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
+ CLANG_FLAGS	+= --prefix=$(GCC_TOOLCHAIN_DIR)
+ GCC_TOOLCHAIN	:= $(realpath $(GCC_TOOLCHAIN_DIR)/..)
+ endif
+diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
+index c87d01297a01..20bbb899b3b7 100644
+--- a/arch/arm/boot/dts/am335x-evm.dts
++++ b/arch/arm/boot/dts/am335x-evm.dts
+@@ -57,6 +57,24 @@
+ 		enable-active-high;
+ 	};
+ 
++	/* TPS79501 */
++	v1_8d_reg: fixedregulator-v1_8d {
++		compatible = "regulator-fixed";
++		regulator-name = "v1_8d";
++		vin-supply = <&vbat>;
++		regulator-min-microvolt = <1800000>;
++		regulator-max-microvolt = <1800000>;
++	};
++
++	/* TPS79501 */
++	v3_3d_reg: fixedregulator-v3_3d {
++		compatible = "regulator-fixed";
++		regulator-name = "v3_3d";
++		vin-supply = <&vbat>;
++		regulator-min-microvolt = <3300000>;
++		regulator-max-microvolt = <3300000>;
++	};
++
+ 	matrix_keypad: matrix_keypad0 {
+ 		compatible = "gpio-matrix-keypad";
+ 		debounce-delay-ms = <5>;
+@@ -499,10 +517,10 @@
+ 		status = "okay";
+ 
+ 		/* Regulators */
+-		AVDD-supply = <&vaux2_reg>;
+-		IOVDD-supply = <&vaux2_reg>;
+-		DRVDD-supply = <&vaux2_reg>;
+-		DVDD-supply = <&vbat>;
++		AVDD-supply = <&v3_3d_reg>;
++		IOVDD-supply = <&v3_3d_reg>;
++		DRVDD-supply = <&v3_3d_reg>;
++		DVDD-supply = <&v1_8d_reg>;
+ 	};
+ };
+ 
+diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
+index bf1a40e45c97..ba589bc41a57 100644
+--- a/arch/arm/boot/dts/am335x-evmsk.dts
++++ b/arch/arm/boot/dts/am335x-evmsk.dts
+@@ -73,6 +73,24 @@
+ 		enable-active-high;
+ 	};
+ 
++	/* TPS79518 */
++	v1_8d_reg: fixedregulator-v1_8d {
++		compatible = "regulator-fixed";
++		regulator-name = "v1_8d";
++		vin-supply = <&vbat>;
++		regulator-min-microvolt = <1800000>;
++		regulator-max-microvolt = <1800000>;
++	};
++
++	/* TPS78633 */
++	v3_3d_reg: fixedregulator-v3_3d {
++		compatible = "regulator-fixed";
++		regulator-name = "v3_3d";
++		vin-supply = <&vbat>;
++		regulator-min-microvolt = <3300000>;
++		regulator-max-microvolt = <3300000>;
++	};
++
+ 	leds {
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&user_leds_s0>;
+@@ -501,10 +519,10 @@
+ 		status = "okay";
+ 
+ 		/* Regulators */
+-		AVDD-supply = <&vaux2_reg>;
+-		IOVDD-supply = <&vaux2_reg>;
+-		DRVDD-supply = <&vaux2_reg>;
+-		DVDD-supply = <&vbat>;
++		AVDD-supply = <&v3_3d_reg>;
++		IOVDD-supply = <&v3_3d_reg>;
++		DRVDD-supply = <&v3_3d_reg>;
++		DVDD-supply = <&v1_8d_reg>;
+ 	};
+ };
+ 
+diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
+index 0840ffb3205c..e6a36a792bae 100644
+--- a/arch/arm/boot/dts/rk3288.dtsi
++++ b/arch/arm/boot/dts/rk3288.dtsi
+@@ -70,7 +70,7 @@
+ 			compatible = "arm,cortex-a12";
+ 			reg = <0x501>;
+ 			resets = <&cru SRST_CORE1>;
+-			operating-points = <&cpu_opp_table>;
++			operating-points-v2 = <&cpu_opp_table>;
+ 			#cooling-cells = <2>; /* min followed by max */
+ 			clock-latency = <40000>;
+ 			clocks = <&cru ARMCLK>;
+@@ -80,7 +80,7 @@
+ 			compatible = "arm,cortex-a12";
+ 			reg = <0x502>;
+ 			resets = <&cru SRST_CORE2>;
+-			operating-points = <&cpu_opp_table>;
++			operating-points-v2 = <&cpu_opp_table>;
+ 			#cooling-cells = <2>; /* min followed by max */
+ 			clock-latency = <40000>;
+ 			clocks = <&cru ARMCLK>;
+@@ -90,7 +90,7 @@
+ 			compatible = "arm,cortex-a12";
+ 			reg = <0x503>;
+ 			resets = <&cru SRST_CORE3>;
+-			operating-points = <&cpu_opp_table>;
++			operating-points-v2 = <&cpu_opp_table>;
+ 			#cooling-cells = <2>; /* min followed by max */
+ 			clock-latency = <40000>;
+ 			clocks = <&cru ARMCLK>;
+diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h
+index 1c01a6f843d8..28a2e45752fe 100644
+--- a/arch/arm/boot/dts/sama5d2-pinfunc.h
++++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
+@@ -518,7 +518,7 @@
+ #define PIN_PC9__GPIO			PINMUX_PIN(PIN_PC9, 0, 0)
+ #define PIN_PC9__FIQ			PINMUX_PIN(PIN_PC9, 1, 3)
+ #define PIN_PC9__GTSUCOMP		PINMUX_PIN(PIN_PC9, 2, 1)
+-#define PIN_PC9__ISC_D0			PINMUX_PIN(PIN_PC9, 2, 1)
++#define PIN_PC9__ISC_D0			PINMUX_PIN(PIN_PC9, 3, 1)
+ #define PIN_PC9__TIOA4			PINMUX_PIN(PIN_PC9, 4, 2)
+ #define PIN_PC10			74
+ #define PIN_PC10__GPIO			PINMUX_PIN(PIN_PC10, 0, 0)
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+index 5272e887a434..c142169a58fc 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+@@ -45,8 +45,7 @@
+ 
+ 	vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator {
+ 		compatible = "regulator-fixed";
+-		enable-active-high;
+-		gpio = <&gpio0 RK_PD3 GPIO_ACTIVE_HIGH>;
++		gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&usb20_host_drv>;
+ 		regulator-name = "vcc_host1_5v";
+@@ -238,7 +237,7 @@
+ 
+ 	usb2 {
+ 		usb20_host_drv: usb20-host-drv {
+-			rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>;
++			rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+index 3f5a2944300f..e065394360bb 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+@@ -1356,11 +1356,11 @@
+ 
+ 		sdmmc0 {
+ 			sdmmc0_clk: sdmmc0-clk {
+-				rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_4ma>;
++				rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_8ma>;
+ 			};
+ 
+ 			sdmmc0_cmd: sdmmc0-cmd {
+-				rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_4ma>;
++				rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_8ma>;
+ 			};
+ 
+ 			sdmmc0_dectn: sdmmc0-dectn {
+@@ -1372,14 +1372,14 @@
+ 			};
+ 
+ 			sdmmc0_bus1: sdmmc0-bus1 {
+-				rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>;
++				rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>;
+ 			};
+ 
+ 			sdmmc0_bus4: sdmmc0-bus4 {
+-				rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>,
+-						<1 RK_PA1 1 &pcfg_pull_up_4ma>,
+-						<1 RK_PA2 1 &pcfg_pull_up_4ma>,
+-						<1 RK_PA3 1 &pcfg_pull_up_4ma>;
++				rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>,
++						<1 RK_PA1 1 &pcfg_pull_up_8ma>,
++						<1 RK_PA2 1 &pcfg_pull_up_8ma>,
++						<1 RK_PA3 1 &pcfg_pull_up_8ma>;
+ 			};
+ 
+ 			sdmmc0_gpio: sdmmc0-gpio {
+@@ -1553,50 +1553,50 @@
+ 			rgmiim1_pins: rgmiim1-pins {
+ 				rockchip,pins =
+ 					/* mac_txclk */
+-					<1 RK_PB4 2 &pcfg_pull_none_12ma>,
++					<1 RK_PB4 2 &pcfg_pull_none_8ma>,
+ 					/* mac_rxclk */
+-					<1 RK_PB5 2 &pcfg_pull_none_2ma>,
++					<1 RK_PB5 2 &pcfg_pull_none_4ma>,
+ 					/* mac_mdio */
+-					<1 RK_PC3 2 &pcfg_pull_none_2ma>,
++					<1 RK_PC3 2 &pcfg_pull_none_4ma>,
+ 					/* mac_txen */
+-					<1 RK_PD1 2 &pcfg_pull_none_12ma>,
++					<1 RK_PD1 2 &pcfg_pull_none_8ma>,
+ 					/* mac_clk */
+-					<1 RK_PC5 2 &pcfg_pull_none_2ma>,
++					<1 RK_PC5 2 &pcfg_pull_none_4ma>,
+ 					/* mac_rxdv */
+-					<1 RK_PC6 2 &pcfg_pull_none_2ma>,
++					<1 RK_PC6 2 &pcfg_pull_none_4ma>,
+ 					/* mac_mdc */
+-					<1 RK_PC7 2 &pcfg_pull_none_2ma>,
++					<1 RK_PC7 2 &pcfg_pull_none_4ma>,
+ 					/* mac_rxd1 */
+-					<1 RK_PB2 2 &pcfg_pull_none_2ma>,
++					<1 RK_PB2 2 &pcfg_pull_none_4ma>,
+ 					/* mac_rxd0 */
+-					<1 RK_PB3 2 &pcfg_pull_none_2ma>,
++					<1 RK_PB3 2 &pcfg_pull_none_4ma>,
+ 					/* mac_txd1 */
+-					<1 RK_PB0 2 &pcfg_pull_none_12ma>,
++					<1 RK_PB0 2 &pcfg_pull_none_8ma>,
+ 					/* mac_txd0 */
+-					<1 RK_PB1 2 &pcfg_pull_none_12ma>,
++					<1 RK_PB1 2 &pcfg_pull_none_8ma>,
+ 					/* mac_rxd3 */
+-					<1 RK_PB6 2 &pcfg_pull_none_2ma>,
++					<1 RK_PB6 2 &pcfg_pull_none_4ma>,
+ 					/* mac_rxd2 */
+-					<1 RK_PB7 2 &pcfg_pull_none_2ma>,
++					<1 RK_PB7 2 &pcfg_pull_none_4ma>,
+ 					/* mac_txd3 */
+-					<1 RK_PC0 2 &pcfg_pull_none_12ma>,
++					<1 RK_PC0 2 &pcfg_pull_none_8ma>,
+ 					/* mac_txd2 */
+-					<1 RK_PC1 2 &pcfg_pull_none_12ma>,
++					<1 RK_PC1 2 &pcfg_pull_none_8ma>,
+ 
+ 					/* mac_txclk */
+-					<0 RK_PB0 1 &pcfg_pull_none>,
++					<0 RK_PB0 1 &pcfg_pull_none_8ma>,
+ 					/* mac_txen */
+-					<0 RK_PB4 1 &pcfg_pull_none>,
++					<0 RK_PB4 1 &pcfg_pull_none_8ma>,
+ 					/* mac_clk */
+-					<0 RK_PD0 1 &pcfg_pull_none>,
++					<0 RK_PD0 1 &pcfg_pull_none_4ma>,
+ 					/* mac_txd1 */
+-					<0 RK_PC0 1 &pcfg_pull_none>,
++					<0 RK_PC0 1 &pcfg_pull_none_8ma>,
+ 					/* mac_txd0 */
+-					<0 RK_PC1 1 &pcfg_pull_none>,
++					<0 RK_PC1 1 &pcfg_pull_none_8ma>,
+ 					/* mac_txd3 */
+-					<0 RK_PC7 1 &pcfg_pull_none>,
++					<0 RK_PC7 1 &pcfg_pull_none_8ma>,
+ 					/* mac_txd2 */
+-					<0 RK_PC6 1 &pcfg_pull_none>;
++					<0 RK_PC6 1 &pcfg_pull_none_8ma>;
+ 			};
+ 
+ 			rmiim1_pins: rmiim1-pins {
+diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
+index 07fe2479d310..b447b4db423a 100644
+--- a/arch/arm64/include/asm/futex.h
++++ b/arch/arm64/include/asm/futex.h
+@@ -30,8 +30,8 @@ do {									\
+ "	prfm	pstl1strm, %2\n"					\
+ "1:	ldxr	%w1, %2\n"						\
+ 	insn "\n"							\
+-"2:	stlxr	%w3, %w0, %2\n"						\
+-"	cbnz	%w3, 1b\n"						\
++"2:	stlxr	%w0, %w3, %2\n"						\
++"	cbnz	%w0, 1b\n"						\
+ "	dmb	ish\n"							\
+ "3:\n"									\
+ "	.pushsection .fixup,\"ax\"\n"					\
+@@ -50,30 +50,30 @@ do {									\
+ static inline int
+ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
+ {
+-	int oldval = 0, ret, tmp;
++	int oldval, ret, tmp;
+ 	u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
+ 
+ 	pagefault_disable();
+ 
+ 	switch (op) {
+ 	case FUTEX_OP_SET:
+-		__futex_atomic_op("mov	%w0, %w4",
++		__futex_atomic_op("mov	%w3, %w4",
+ 				  ret, oldval, uaddr, tmp, oparg);
+ 		break;
+ 	case FUTEX_OP_ADD:
+-		__futex_atomic_op("add	%w0, %w1, %w4",
++		__futex_atomic_op("add	%w3, %w1, %w4",
+ 				  ret, oldval, uaddr, tmp, oparg);
+ 		break;
+ 	case FUTEX_OP_OR:
+-		__futex_atomic_op("orr	%w0, %w1, %w4",
++		__futex_atomic_op("orr	%w3, %w1, %w4",
+ 				  ret, oldval, uaddr, tmp, oparg);
+ 		break;
+ 	case FUTEX_OP_ANDN:
+-		__futex_atomic_op("and	%w0, %w1, %w4",
++		__futex_atomic_op("and	%w3, %w1, %w4",
+ 				  ret, oldval, uaddr, tmp, ~oparg);
+ 		break;
+ 	case FUTEX_OP_XOR:
+-		__futex_atomic_op("eor	%w0, %w1, %w4",
++		__futex_atomic_op("eor	%w3, %w1, %w4",
+ 				  ret, oldval, uaddr, tmp, oparg);
+ 		break;
+ 	default:
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index b9da093e0341..a0099be4311a 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -101,10 +101,16 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
+ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+ {
+ 	struct stackframe frame;
+-	int skip;
++	int skip = 0;
+ 
+ 	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
+ 
++	if (regs) {
++		if (user_mode(regs))
++			return;
++		skip = 1;
++	}
++
+ 	if (!tsk)
+ 		tsk = current;
+ 
+@@ -125,7 +131,6 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+ 	frame.graph = tsk->curr_ret_stack;
+ #endif
+ 
+-	skip = !!regs;
+ 	printk("Call trace:\n");
+ 	do {
+ 		/* skip until specified stack frame */
+@@ -175,15 +180,13 @@ static int __die(const char *str, int err, struct pt_regs *regs)
+ 		return ret;
+ 
+ 	print_modules();
+-	__show_regs(regs);
+ 	pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
+ 		 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
+ 		 end_of_stack(tsk));
++	show_regs(regs);
+ 
+-	if (!user_mode(regs)) {
+-		dump_backtrace(regs, tsk);
++	if (!user_mode(regs))
+ 		dump_instr(KERN_EMERG, regs);
+-	}
+ 
+ 	return ret;
+ }
+diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
+index 787e27964ab9..774c3e17c798 100644
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -450,7 +450,7 @@ void __init arm64_memblock_init(void)
+ 		 * memory spans, randomize the linear region as well.
+ 		 */
+ 		if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
+-			range = range / ARM64_MEMSTART_ALIGN + 1;
++			range /= ARM64_MEMSTART_ALIGN;
+ 			memstart_addr -= ARM64_MEMSTART_ALIGN *
+ 					 ((range * memstart_offset_seed) >> 16);
+ 		}
+diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h
+index 2a27b275ab09..9ff033d261ab 100644
+--- a/arch/parisc/include/asm/ptrace.h
++++ b/arch/parisc/include/asm/ptrace.h
+@@ -22,13 +22,14 @@ unsigned long profile_pc(struct pt_regs *);
+ 
+ static inline unsigned long regs_return_value(struct pt_regs *regs)
+ {
+-	return regs->gr[20];
++	return regs->gr[28];
+ }
+ 
+ static inline void instruction_pointer_set(struct pt_regs *regs,
+ 						unsigned long val)
+ {
+-        regs->iaoq[0] = val;
++	regs->iaoq[0] = val;
++	regs->iaoq[1] = val + 4;
+ }
+ 
+ /* Query offset/name of register from its name/offset */
+diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
+index eb39e7e380d7..841db71958cd 100644
+--- a/arch/parisc/kernel/process.c
++++ b/arch/parisc/kernel/process.c
+@@ -210,12 +210,6 @@ void __cpuidle arch_cpu_idle(void)
+ 
+ static int __init parisc_idle_init(void)
+ {
+-	const char *marker;
+-
+-	/* check QEMU/SeaBIOS marker in PAGE0 */
+-	marker = (char *) &PAGE0->pad0;
+-	running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
+-
+ 	if (!running_on_qemu)
+ 		cpu_idle_poll_ctrl(1);
+ 
+diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
+index 4e87c35c22b7..79c8b994e7d1 100644
+--- a/arch/parisc/kernel/setup.c
++++ b/arch/parisc/kernel/setup.c
+@@ -399,6 +399,9 @@ void __init start_parisc(void)
+ 	int ret, cpunum;
+ 	struct pdc_coproc_cfg coproc_cfg;
+ 
++	/* check QEMU/SeaBIOS marker in PAGE0 */
++	running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0);
++
+ 	cpunum = smp_processor_id();
+ 
+ 	init_cpu_topology();
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index bbd1c73243d7..14b0f5b6a373 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -755,12 +755,25 @@ SYSCALL_DEFINE0(rt_sigreturn)
+ 		if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
+ 					   &uc_transact->uc_mcontext))
+ 			goto badframe;
+-	}
+-	else
+-	/* Fall through, for non-TM restore */
++	} else
+ #endif
+-	if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
+-		goto badframe;
++	{
++		/*
++		 * Fall through, for non-TM restore
++		 *
++		 * Unset MSR[TS] on the thread regs since MSR from user
++		 * context does not have MSR active, and recheckpoint was
++		 * not called since restore_tm_sigcontexts() was not called
++		 * also.
++		 *
++		 * If not unsetting it, the code can RFID to userspace with
++		 * MSR[TS] set, but without CPU in the proper state,
++		 * causing a TM bad thing.
++		 */
++		current->thread.regs->msr &= ~MSR_TS_MASK;
++		if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
++			goto badframe;
++	}
+ 
+ 	if (restore_altstack(&uc->uc_stack))
+ 		goto badframe;
+diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
+index 8d25f8904c00..1dcde0fda435 100644
+--- a/arch/riscv/include/asm/syscall.h
++++ b/arch/riscv/include/asm/syscall.h
+@@ -78,10 +78,11 @@ static inline void syscall_get_arguments(struct task_struct *task,
+ 	if (i == 0) {
+ 		args[0] = regs->orig_a0;
+ 		args++;
+-		i++;
+ 		n--;
++	} else {
++		i--;
+ 	}
+-	memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
++	memcpy(args, &regs->a1 + i, n * sizeof(args[0]));
+ }
+ 
+ static inline void syscall_set_arguments(struct task_struct *task,
+@@ -93,10 +94,11 @@ static inline void syscall_set_arguments(struct task_struct *task,
+         if (i == 0) {
+                 regs->orig_a0 = args[0];
+                 args++;
+-                i++;
+                 n--;
+-        }
+-	memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
++	} else {
++		i--;
++	}
++	memcpy(&regs->a1 + i, args, n * sizeof(regs->a1));
+ }
+ 
+ #endif	/* _ASM_RISCV_SYSCALL_H */
+diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
+index c3d7ccd25381..5bfe2243a08f 100644
+--- a/arch/x86/entry/vdso/Makefile
++++ b/arch/x86/entry/vdso/Makefile
+@@ -47,7 +47,7 @@ targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
+ CPPFLAGS_vdso.lds += -P -C
+ 
+ VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \
+-			-z max-page-size=4096 -z common-page-size=4096
++			-z max-page-size=4096
+ 
+ $(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
+ 	$(call if_changed,vdso)
+@@ -98,7 +98,7 @@ CFLAGS_REMOVE_vvar.o = -pg
+ 
+ CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
+ VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \
+-			   -z max-page-size=4096 -z common-page-size=4096
++			   -z max-page-size=4096
+ 
+ # x32-rebranded versions
+ vobjx32s-y := $(vobjs-y:.o=-x32.o)
+diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
+index c84584bb9402..3e5dd85b019a 100644
+--- a/arch/x86/events/amd/core.c
++++ b/arch/x86/events/amd/core.c
+@@ -3,10 +3,14 @@
+ #include <linux/types.h>
+ #include <linux/init.h>
+ #include <linux/slab.h>
++#include <linux/delay.h>
+ #include <asm/apicdef.h>
++#include <asm/nmi.h>
+ 
+ #include "../perf_event.h"
+ 
++static DEFINE_PER_CPU(unsigned int, perf_nmi_counter);
++
+ static __initconst const u64 amd_hw_cache_event_ids
+ 				[PERF_COUNT_HW_CACHE_MAX]
+ 				[PERF_COUNT_HW_CACHE_OP_MAX]
+@@ -429,6 +433,132 @@ static void amd_pmu_cpu_dead(int cpu)
+ 	}
+ }
+ 
++/*
++ * When a PMC counter overflows, an NMI is used to process the event and
++ * reset the counter. NMI latency can result in the counter being updated
++ * before the NMI can run, which can result in what appear to be spurious
++ * NMIs. This function is intended to wait for the NMI to run and reset
++ * the counter to avoid possible unhandled NMI messages.
++ */
++#define OVERFLOW_WAIT_COUNT	50
++
++static void amd_pmu_wait_on_overflow(int idx)
++{
++	unsigned int i;
++	u64 counter;
++
++	/*
++	 * Wait for the counter to be reset if it has overflowed. This loop
++	 * should exit very, very quickly, but just in case, don't wait
++	 * forever...
++	 */
++	for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
++		rdmsrl(x86_pmu_event_addr(idx), counter);
++		if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
++			break;
++
++		/* Might be in IRQ context, so can't sleep */
++		udelay(1);
++	}
++}
++
++static void amd_pmu_disable_all(void)
++{
++	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
++	int idx;
++
++	x86_pmu_disable_all();
++
++	/*
++	 * This shouldn't be called from NMI context, but add a safeguard here
++	 * to return, since if we're in NMI context we can't wait for an NMI
++	 * to reset an overflowed counter value.
++	 */
++	if (in_nmi())
++		return;
++
++	/*
++	 * Check each counter for overflow and wait for it to be reset by the
++	 * NMI if it has overflowed. This relies on the fact that all active
++	 * counters are always enabled when this function is caled and
++	 * ARCH_PERFMON_EVENTSEL_INT is always set.
++	 */
++	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
++		if (!test_bit(idx, cpuc->active_mask))
++			continue;
++
++		amd_pmu_wait_on_overflow(idx);
++	}
++}
++
++static void amd_pmu_disable_event(struct perf_event *event)
++{
++	x86_pmu_disable_event(event);
++
++	/*
++	 * This can be called from NMI context (via x86_pmu_stop). The counter
++	 * may have overflowed, but either way, we'll never see it get reset
++	 * by the NMI if we're already in the NMI. And the NMI latency support
++	 * below will take care of any pending NMI that might have been
++	 * generated by the overflow.
++	 */
++	if (in_nmi())
++		return;
++
++	amd_pmu_wait_on_overflow(event->hw.idx);
++}
++
++/*
++ * Because of NMI latency, if multiple PMC counters are active or other sources
++ * of NMIs are received, the perf NMI handler can handle one or more overflowed
++ * PMC counters outside of the NMI associated with the PMC overflow. If the NMI
++ * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel
++ * back-to-back NMI support won't be active. This PMC handler needs to take into
++ * account that this can occur, otherwise this could result in unknown NMI
++ * messages being issued. Examples of this is PMC overflow while in the NMI
++ * handler when multiple PMCs are active or PMC overflow while handling some
++ * other source of an NMI.
++ *
++ * Attempt to mitigate this by using the number of active PMCs to determine
++ * whether to return NMI_HANDLED if the perf NMI handler did not handle/reset
++ * any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the
++ * number of active PMCs or 2. The value of 2 is used in case an NMI does not
++ * arrive at the LAPIC in time to be collapsed into an already pending NMI.
++ */
++static int amd_pmu_handle_irq(struct pt_regs *regs)
++{
++	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
++	int active, handled;
++
++	/*
++	 * Obtain the active count before calling x86_pmu_handle_irq() since
++	 * it is possible that x86_pmu_handle_irq() may make a counter
++	 * inactive (through x86_pmu_stop).
++	 */
++	active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
++
++	/* Process any counter overflows */
++	handled = x86_pmu_handle_irq(regs);
++
++	/*
++	 * If a counter was handled, record the number of possible remaining
++	 * NMIs that can occur.
++	 */
++	if (handled) {
++		this_cpu_write(perf_nmi_counter,
++			       min_t(unsigned int, 2, active));
++
++		return handled;
++	}
++
++	if (!this_cpu_read(perf_nmi_counter))
++		return NMI_DONE;
++
++	this_cpu_dec(perf_nmi_counter);
++
++	return NMI_HANDLED;
++}
++
+ static struct event_constraint *
+ amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ 			  struct perf_event *event)
+@@ -621,11 +751,11 @@ static ssize_t amd_event_sysfs_show(char *page, u64 config)
+ 
+ static __initconst const struct x86_pmu amd_pmu = {
+ 	.name			= "AMD",
+-	.handle_irq		= x86_pmu_handle_irq,
+-	.disable_all		= x86_pmu_disable_all,
++	.handle_irq		= amd_pmu_handle_irq,
++	.disable_all		= amd_pmu_disable_all,
+ 	.enable_all		= x86_pmu_enable_all,
+ 	.enable			= x86_pmu_enable_event,
+-	.disable		= x86_pmu_disable_event,
++	.disable		= amd_pmu_disable_event,
+ 	.hw_config		= amd_pmu_hw_config,
+ 	.schedule_events	= x86_schedule_events,
+ 	.eventsel		= MSR_K7_EVNTSEL0,
+@@ -728,7 +858,7 @@ void amd_pmu_enable_virt(void)
+ 	cpuc->perf_ctr_virt_mask = 0;
+ 
+ 	/* Reload all events */
+-	x86_pmu_disable_all();
++	amd_pmu_disable_all();
+ 	x86_pmu_enable_all(0);
+ }
+ EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
+@@ -746,7 +876,7 @@ void amd_pmu_disable_virt(void)
+ 	cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
+ 
+ 	/* Reload all events */
+-	x86_pmu_disable_all();
++	amd_pmu_disable_all();
+ 	x86_pmu_enable_all(0);
+ }
+ EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index a41554350893..c9625bff4328 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -1328,8 +1328,9 @@ void x86_pmu_stop(struct perf_event *event, int flags)
+ 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ 	struct hw_perf_event *hwc = &event->hw;
+ 
+-	if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
++	if (test_bit(hwc->idx, cpuc->active_mask)) {
+ 		x86_pmu.disable(event);
++		__clear_bit(hwc->idx, cpuc->active_mask);
+ 		cpuc->events[hwc->idx] = NULL;
+ 		WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+ 		hwc->state |= PERF_HES_STOPPED;
+@@ -1426,16 +1427,8 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
+ 	apic_write(APIC_LVTPC, APIC_DM_NMI);
+ 
+ 	for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+-		if (!test_bit(idx, cpuc->active_mask)) {
+-			/*
+-			 * Though we deactivated the counter some cpus
+-			 * might still deliver spurious interrupts still
+-			 * in flight. Catch them:
+-			 */
+-			if (__test_and_clear_bit(idx, cpuc->running))
+-				handled++;
++		if (!test_bit(idx, cpuc->active_mask))
+ 			continue;
+-		}
+ 
+ 		event = cpuc->events[idx];
+ 
+diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
+index 9f645ba57dbb..33611a74bfff 100644
+--- a/arch/x86/include/asm/bitops.h
++++ b/arch/x86/include/asm/bitops.h
+@@ -36,22 +36,17 @@
+  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+  */
+ 
+-#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
+-/* Technically wrong, but this avoids compilation errors on some gcc
+-   versions. */
+-#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
+-#else
+-#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
+-#endif
++#define RLONG_ADDR(x)			 "m" (*(volatile long *) (x))
++#define WBYTE_ADDR(x)			"+m" (*(volatile char *) (x))
+ 
+-#define ADDR				BITOP_ADDR(addr)
++#define ADDR				RLONG_ADDR(addr)
+ 
+ /*
+  * We do the locked ops that don't return the old value as
+  * a mask operation on a byte.
+  */
+ #define IS_IMMEDIATE(nr)		(__builtin_constant_p(nr))
+-#define CONST_MASK_ADDR(nr, addr)	BITOP_ADDR((void *)(addr) + ((nr)>>3))
++#define CONST_MASK_ADDR(nr, addr)	WBYTE_ADDR((void *)(addr) + ((nr)>>3))
+ #define CONST_MASK(nr)			(1 << ((nr) & 7))
+ 
+ /**
+@@ -79,7 +74,7 @@ set_bit(long nr, volatile unsigned long *addr)
+ 			: "memory");
+ 	} else {
+ 		asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
+-			: BITOP_ADDR(addr) : "Ir" (nr) : "memory");
++			: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
+ 	}
+ }
+ 
+@@ -94,7 +89,7 @@ set_bit(long nr, volatile unsigned long *addr)
+  */
+ static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
+ {
+-	asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory");
++	asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
+ }
+ 
+ /**
+@@ -116,8 +111,7 @@ clear_bit(long nr, volatile unsigned long *addr)
+ 			: "iq" ((u8)~CONST_MASK(nr)));
+ 	} else {
+ 		asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
+-			: BITOP_ADDR(addr)
+-			: "Ir" (nr));
++			: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
+ 	}
+ }
+ 
+@@ -137,7 +131,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad
+ 
+ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
+ {
+-	asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr));
++	asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
+ }
+ 
+ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
+@@ -145,7 +139,7 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
+ 	bool negative;
+ 	asm volatile(LOCK_PREFIX "andb %2,%1"
+ 		CC_SET(s)
+-		: CC_OUT(s) (negative), ADDR
++		: CC_OUT(s) (negative), WBYTE_ADDR(addr)
+ 		: "ir" ((char) ~(1 << nr)) : "memory");
+ 	return negative;
+ }
+@@ -161,13 +155,9 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
+  * __clear_bit() is non-atomic and implies release semantics before the memory
+  * operation. It can be used for an unlock if no other CPUs can concurrently
+  * modify other bits in the word.
+- *
+- * No memory barrier is required here, because x86 cannot reorder stores past
+- * older loads. Same principle as spin_unlock.
+  */
+ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
+ {
+-	barrier();
+ 	__clear_bit(nr, addr);
+ }
+ 
+@@ -182,7 +172,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *
+  */
+ static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
+ {
+-	asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr));
++	asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
+ }
+ 
+ /**
+@@ -202,8 +192,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
+ 			: "iq" ((u8)CONST_MASK(nr)));
+ 	} else {
+ 		asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
+-			: BITOP_ADDR(addr)
+-			: "Ir" (nr));
++			: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
+ 	}
+ }
+ 
+@@ -249,8 +238,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
+ 
+ 	asm(__ASM_SIZE(bts) " %2,%1"
+ 	    CC_SET(c)
+-	    : CC_OUT(c) (oldbit), ADDR
+-	    : "Ir" (nr));
++	    : CC_OUT(c) (oldbit)
++	    : ADDR, "Ir" (nr) : "memory");
+ 	return oldbit;
+ }
+ 
+@@ -290,8 +279,8 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long
+ 
+ 	asm volatile(__ASM_SIZE(btr) " %2,%1"
+ 		     CC_SET(c)
+-		     : CC_OUT(c) (oldbit), ADDR
+-		     : "Ir" (nr));
++		     : CC_OUT(c) (oldbit)
++		     : ADDR, "Ir" (nr) : "memory");
+ 	return oldbit;
+ }
+ 
+@@ -302,8 +291,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
+ 
+ 	asm volatile(__ASM_SIZE(btc) " %2,%1"
+ 		     CC_SET(c)
+-		     : CC_OUT(c) (oldbit), ADDR
+-		     : "Ir" (nr) : "memory");
++		     : CC_OUT(c) (oldbit)
++		     : ADDR, "Ir" (nr) : "memory");
+ 
+ 	return oldbit;
+ }
+@@ -335,7 +324,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
+ 	asm volatile(__ASM_SIZE(bt) " %2,%1"
+ 		     CC_SET(c)
+ 		     : CC_OUT(c) (oldbit)
+-		     : "m" (*(unsigned long *)addr), "Ir" (nr));
++		     : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
+ 
+ 	return oldbit;
+ }
+diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h
+index 55d392c6bd29..2fd165f1cffa 100644
+--- a/arch/x86/include/asm/string_32.h
++++ b/arch/x86/include/asm/string_32.h
+@@ -179,14 +179,7 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
+  *	No 3D Now!
+  */
+ 
+-#if (__GNUC__ >= 4)
+ #define memcpy(t, f, n) __builtin_memcpy(t, f, n)
+-#else
+-#define memcpy(t, f, n)				\
+-	(__builtin_constant_p((n))		\
+-	 ? __constant_memcpy((t), (f), (n))	\
+-	 : __memcpy((t), (f), (n)))
+-#endif
+ 
+ #endif
+ #endif /* !CONFIG_FORTIFY_SOURCE */
+@@ -282,12 +275,7 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern,
+ 
+ 	{
+ 		int d0, d1;
+-#if __GNUC__ == 4 && __GNUC_MINOR__ == 0
+-		/* Workaround for broken gcc 4.0 */
+-		register unsigned long eax asm("%eax") = pattern;
+-#else
+ 		unsigned long eax = pattern;
+-#endif
+ 
+ 		switch (count % 4) {
+ 		case 0:
+@@ -321,15 +309,7 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern,
+ #define __HAVE_ARCH_MEMSET
+ extern void *memset(void *, int, size_t);
+ #ifndef CONFIG_FORTIFY_SOURCE
+-#if (__GNUC__ >= 4)
+ #define memset(s, c, count) __builtin_memset(s, c, count)
+-#else
+-#define memset(s, c, count)						\
+-	(__builtin_constant_p(c)					\
+-	 ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \
+-				 (count))				\
+-	 : __memset((s), (c), (count)))
+-#endif
+ #endif /* !CONFIG_FORTIFY_SOURCE */
+ 
+ #define __HAVE_ARCH_MEMSET16
+diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
+index d33f92b9fa22..052a7a4ac025 100644
+--- a/arch/x86/include/asm/string_64.h
++++ b/arch/x86/include/asm/string_64.h
+@@ -32,21 +32,6 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t
+ extern void *memcpy(void *to, const void *from, size_t len);
+ extern void *__memcpy(void *to, const void *from, size_t len);
+ 
+-#ifndef CONFIG_FORTIFY_SOURCE
+-#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
+-#define memcpy(dst, src, len)					\
+-({								\
+-	size_t __len = (len);					\
+-	void *__ret;						\
+-	if (__builtin_constant_p(len) && __len >= 64)		\
+-		__ret = __memcpy((dst), (src), __len);		\
+-	else							\
+-		__ret = __builtin_memcpy((dst), (src), __len);	\
+-	__ret;							\
+-})
+-#endif
+-#endif /* !CONFIG_FORTIFY_SOURCE */
+-
+ #define __HAVE_ARCH_MEMSET
+ void *memset(void *s, int c, size_t n);
+ void *__memset(void *s, int c, size_t n);
+diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
+index ef05bea7010d..6b5c710846f5 100644
+--- a/arch/x86/include/asm/xen/hypercall.h
++++ b/arch/x86/include/asm/xen/hypercall.h
+@@ -206,6 +206,9 @@ xen_single_call(unsigned int call,
+ 	__HYPERCALL_DECLS;
+ 	__HYPERCALL_5ARG(a1, a2, a3, a4, a5);
+ 
++	if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
++		return -EINVAL;
++
+ 	asm volatile(CALL_NOSPEC
+ 		     : __HYPERCALL_5PARAM
+ 		     : [thunk_target] "a" (&hypercall_page[call])
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index b47541962012..6dc72804fe6e 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -6398,11 +6398,11 @@ e_free:
+ 	return ret;
+ }
+ 
+-static int get_num_contig_pages(int idx, struct page **inpages,
+-				unsigned long npages)
++static unsigned long get_num_contig_pages(unsigned long idx,
++				struct page **inpages, unsigned long npages)
+ {
+ 	unsigned long paddr, next_paddr;
+-	int i = idx + 1, pages = 1;
++	unsigned long i = idx + 1, pages = 1;
+ 
+ 	/* find the number of contiguous pages starting from idx */
+ 	paddr = __sme_page_pa(inpages[idx]);
+@@ -6421,12 +6421,12 @@ static int get_num_contig_pages(int idx, struct page **inpages,
+ 
+ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ {
+-	unsigned long vaddr, vaddr_end, next_vaddr, npages, size;
++	unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
+ 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ 	struct kvm_sev_launch_update_data params;
+ 	struct sev_data_launch_update_data *data;
+ 	struct page **inpages;
+-	int i, ret, pages;
++	int ret;
+ 
+ 	if (!sev_guest(kvm))
+ 		return -ENOTTY;
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index f99f59625da5..6b6bcafd1d2c 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -11582,6 +11582,17 @@ static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
+ 	return 0;
+ }
+ 
++static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) {
++	int msr;
++
++	for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
++		unsigned word = msr / BITS_PER_LONG;
++
++		msr_bitmap[word] = ~0;
++		msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
++	}
++}
++
+ /*
+  * Merge L0's and L1's MSR bitmap, return false to indicate that
+  * we do not use the hardware.
+@@ -11623,39 +11634,44 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
+ 		return false;
+ 
+ 	msr_bitmap_l1 = (unsigned long *)kmap(page);
+-	if (nested_cpu_has_apic_reg_virt(vmcs12)) {
+-		/*
+-		 * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
+-		 * just lets the processor take the value from the virtual-APIC page;
+-		 * take those 256 bits directly from the L1 bitmap.
+-		 */
+-		for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
+-			unsigned word = msr / BITS_PER_LONG;
+-			msr_bitmap_l0[word] = msr_bitmap_l1[word];
+-			msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
+-		}
+-	} else {
+-		for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
+-			unsigned word = msr / BITS_PER_LONG;
+-			msr_bitmap_l0[word] = ~0;
+-			msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
+-		}
+-	}
+ 
+-	nested_vmx_disable_intercept_for_msr(
+-		msr_bitmap_l1, msr_bitmap_l0,
+-		X2APIC_MSR(APIC_TASKPRI),
+-		MSR_TYPE_W);
++	/*
++	 * To keep the control flow simple, pay eight 8-byte writes (sixteen
++	 * 4-byte writes on 32-bit systems) up front to enable intercepts for
++	 * the x2APIC MSR range and selectively disable them below.
++	 */
++	enable_x2apic_msr_intercepts(msr_bitmap_l0);
++
++	if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
++		if (nested_cpu_has_apic_reg_virt(vmcs12)) {
++			/*
++			 * L0 need not intercept reads for MSRs between 0x800
++			 * and 0x8ff, it just lets the processor take the value
++			 * from the virtual-APIC page; take those 256 bits
++			 * directly from the L1 bitmap.
++			 */
++			for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
++				unsigned word = msr / BITS_PER_LONG;
++
++				msr_bitmap_l0[word] = msr_bitmap_l1[word];
++			}
++		}
+ 
+-	if (nested_cpu_has_vid(vmcs12)) {
+-		nested_vmx_disable_intercept_for_msr(
+-			msr_bitmap_l1, msr_bitmap_l0,
+-			X2APIC_MSR(APIC_EOI),
+-			MSR_TYPE_W);
+ 		nested_vmx_disable_intercept_for_msr(
+ 			msr_bitmap_l1, msr_bitmap_l0,
+-			X2APIC_MSR(APIC_SELF_IPI),
+-			MSR_TYPE_W);
++			X2APIC_MSR(APIC_TASKPRI),
++			MSR_TYPE_R | MSR_TYPE_W);
++
++		if (nested_cpu_has_vid(vmcs12)) {
++			nested_vmx_disable_intercept_for_msr(
++				msr_bitmap_l1, msr_bitmap_l0,
++				X2APIC_MSR(APIC_EOI),
++				MSR_TYPE_W);
++			nested_vmx_disable_intercept_for_msr(
++				msr_bitmap_l1, msr_bitmap_l0,
++				X2APIC_MSR(APIC_SELF_IPI),
++				MSR_TYPE_W);
++		}
+ 	}
+ 
+ 	if (spec_ctrl)
+@@ -12836,11 +12852,15 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
+ 	nested_cache_shadow_vmcs12(vcpu, vmcs12);
+ 
+ 	/*
+-	 * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken
+-	 * by event injection, halt vcpu.
++	 * If we're entering a halted L2 vcpu and the L2 vcpu won't be
++	 * awakened by event injection or by an NMI-window VM-exit or
++	 * by an interrupt-window VM-exit, halt the vcpu.
+ 	 */
+ 	if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
+-	    !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK)) {
++	    !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
++	    !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_NMI_PENDING) &&
++	    !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) &&
++	      (vmcs12->guest_rflags & X86_EFLAGS_IF))) {
+ 		vmx->nested.nested_run_pending = 0;
+ 		return kvm_vcpu_halt(vcpu);
+ 	}
+diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
+index 0df4080fa20f..a94da7dd3eae 100644
+--- a/arch/xtensa/kernel/stacktrace.c
++++ b/arch/xtensa/kernel/stacktrace.c
+@@ -253,10 +253,14 @@ static int return_address_cb(struct stackframe *frame, void *data)
+ 	return 1;
+ }
+ 
++/*
++ * level == 0 is for the return address from the caller of this function,
++ * not from this function itself.
++ */
+ unsigned long return_address(unsigned level)
+ {
+ 	struct return_addr_data r = {
+-		.skip = level + 1,
++		.skip = level,
+ 	};
+ 	walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
+ 	return r.addr;
+diff --git a/block/bio.c b/block/bio.c
+index 55a5386fd431..3d757055305f 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1240,8 +1240,11 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
+ 			}
+ 		}
+ 
+-		if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
++		if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
++			if (!map_data)
++				__free_page(page);
+ 			break;
++		}
+ 
+ 		len -= bytes;
+ 		offset = 0;
+diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
+index 78f9de260d5f..2f4641e5ecde 100644
+--- a/drivers/acpi/acpica/dsopcode.c
++++ b/drivers/acpi/acpica/dsopcode.c
+@@ -523,6 +523,10 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
+ 			  ACPI_FORMAT_UINT64(obj_desc->region.address),
+ 			  obj_desc->region.length));
+ 
++	status = acpi_ut_add_address_range(obj_desc->region.space_id,
++					   obj_desc->region.address,
++					   obj_desc->region.length, node);
++
+ 	/* Now the address and length are valid for this opregion */
+ 
+ 	obj_desc->region.flags |= AOPOBJ_DATA_VALID;
+diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
+index e10fec99a182..4424997ecf30 100644
+--- a/drivers/acpi/acpica/evgpe.c
++++ b/drivers/acpi/acpica/evgpe.c
+@@ -81,8 +81,12 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
+ 
+ 	ACPI_FUNCTION_TRACE(ev_enable_gpe);
+ 
+-	/* Enable the requested GPE */
++	/* Clear the GPE status */
++	status = acpi_hw_clear_gpe(gpe_event_info);
++	if (ACPI_FAILURE(status))
++		return_ACPI_STATUS(status);
+ 
++	/* Enable the requested GPE */
+ 	status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
+ 	return_ACPI_STATUS(status);
+ }
+diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
+index 8638f43cfc3d..79d86da1c892 100644
+--- a/drivers/acpi/acpica/nsobject.c
++++ b/drivers/acpi/acpica/nsobject.c
+@@ -186,6 +186,10 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
+ 		}
+ 	}
+ 
++	if (obj_desc->common.type == ACPI_TYPE_REGION) {
++		acpi_ut_remove_address_range(obj_desc->region.space_id, node);
++	}
++
+ 	/* Clear the Node entry in all cases */
+ 
+ 	node->object = NULL;
+diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
+index 40728491f37b..1df9cb8e659e 100644
+--- a/drivers/char/Kconfig
++++ b/drivers/char/Kconfig
+@@ -343,7 +343,7 @@ config XILINX_HWICAP
+ 
+ config R3964
+ 	tristate "Siemens R3964 line discipline"
+-	depends on TTY
++	depends on TTY && BROKEN
+ 	---help---
+ 	  This driver allows synchronous communication with devices using the
+ 	  Siemens R3964 packet protocol. Unless you are dealing with special
+diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
+index 258c8d259ea1..f965845917e3 100644
+--- a/drivers/clk/meson/meson-aoclk.c
++++ b/drivers/clk/meson/meson-aoclk.c
+@@ -65,20 +65,15 @@ int meson_aoclkc_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
+-	/* Populate regmap */
+-	for (clkid = 0; clkid < data->num_clks; clkid++)
++	/*
++	 * Populate regmap and register all clks
++	 */
++	for (clkid = 0; clkid < data->num_clks; clkid++) {
+ 		data->clks[clkid]->map = regmap;
+ 
+-	/* Register all clks */
+-	for (clkid = 0; clkid < data->hw_data->num; clkid++) {
+-		if (!data->hw_data->hws[clkid])
+-			continue;
+-
+ 		ret = devm_clk_hw_register(dev, data->hw_data->hws[clkid]);
+-		if (ret) {
+-			dev_err(dev, "Clock registration failed\n");
++		if (ret)
+ 			return ret;
+-		}
+ 	}
+ 
+ 	return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
+index 00aad8164dec..542f31ce108f 100644
+--- a/drivers/gpu/drm/i915/gvt/gtt.c
++++ b/drivers/gpu/drm/i915/gvt/gtt.c
+@@ -1940,7 +1940,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
+  */
+ void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
+ {
+-	atomic_dec(&mm->pincount);
++	atomic_dec_if_positive(&mm->pincount);
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
+index 43aa058e29fc..663a7c9ca3d3 100644
+--- a/drivers/gpu/drm/i915/gvt/scheduler.c
++++ b/drivers/gpu/drm/i915/gvt/scheduler.c
+@@ -1389,8 +1389,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
+ 		intel_runtime_pm_put(dev_priv);
+ 	}
+ 
+-	if (ret && (vgpu_is_vm_unhealthy(ret))) {
+-		enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
++	if (ret) {
++		if (vgpu_is_vm_unhealthy(ret))
++			enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
+ 		intel_vgpu_destroy_workload(workload);
+ 		return ERR_PTR(ret);
+ 	}
+diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
+index 9ef515df724b..54e767bd5ddb 100644
+--- a/drivers/gpu/drm/udl/udl_drv.c
++++ b/drivers/gpu/drm/udl/udl_drv.c
+@@ -51,6 +51,7 @@ static struct drm_driver driver = {
+ 	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+ 	.load = udl_driver_load,
+ 	.unload = udl_driver_unload,
++	.release = udl_driver_release,
+ 
+ 	/* gem hooks */
+ 	.gem_free_object_unlocked = udl_gem_free_object,
+diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
+index e9e9b1ff678e..4ae67d882eae 100644
+--- a/drivers/gpu/drm/udl/udl_drv.h
++++ b/drivers/gpu/drm/udl/udl_drv.h
+@@ -104,6 +104,7 @@ void udl_urb_completion(struct urb *urb);
+ 
+ int udl_driver_load(struct drm_device *dev, unsigned long flags);
+ void udl_driver_unload(struct drm_device *dev);
++void udl_driver_release(struct drm_device *dev);
+ 
+ int udl_fbdev_init(struct drm_device *dev);
+ void udl_fbdev_cleanup(struct drm_device *dev);
+diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
+index 1b014d92855b..19055dda3140 100644
+--- a/drivers/gpu/drm/udl/udl_main.c
++++ b/drivers/gpu/drm/udl/udl_main.c
+@@ -378,6 +378,12 @@ void udl_driver_unload(struct drm_device *dev)
+ 		udl_free_urb_list(dev);
+ 
+ 	udl_fbdev_cleanup(dev);
+-	udl_modeset_cleanup(dev);
+ 	kfree(udl);
+ }
++
++void udl_driver_release(struct drm_device *dev)
++{
++	udl_modeset_cleanup(dev);
++	drm_dev_fini(dev);
++	kfree(dev);
++}
+diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
+index 81da17a42dc9..c7adaca2ab01 100644
+--- a/drivers/hwmon/Kconfig
++++ b/drivers/hwmon/Kconfig
+@@ -1755,6 +1755,7 @@ config SENSORS_VT8231
+ config SENSORS_W83773G
+ 	tristate "Nuvoton W83773G"
+ 	depends on I2C
++	select REGMAP_I2C
+ 	help
+ 	  If you say yes here you get support for the Nuvoton W83773G hardware
+ 	  monitoring chip.
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 96d5fb3f6199..bc6ef2303f0b 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -908,7 +908,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig
+ static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
+ {
+ 	return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
+-	       range2->logical_sector + range2->n_sectors > range2->logical_sector;
++	       range1->logical_sector + range1->n_sectors > range2->logical_sector;
+ }
+ 
+ static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
+@@ -954,8 +954,6 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity
+ 		struct dm_integrity_range *last_range =
+ 			list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
+ 		struct task_struct *last_range_task;
+-		if (!ranges_overlap(range, last_range))
+-			break;
+ 		last_range_task = last_range->task;
+ 		list_del(&last_range->wait_entry);
+ 		if (!add_new_range(ic, last_range, false)) {
+@@ -3174,7 +3172,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 			journal_watermark = val;
+ 		else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
+ 			sync_msec = val;
+-		else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) {
++		else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
+ 			if (ic->meta_dev) {
+ 				dm_put_device(ti, ic->meta_dev);
+ 				ic->meta_dev = NULL;
+@@ -3193,17 +3191,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 				goto bad;
+ 			}
+ 			ic->sectors_per_block = val >> SECTOR_SHIFT;
+-		} else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
++		} else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
+ 			r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
+ 					    "Invalid internal_hash argument");
+ 			if (r)
+ 				goto bad;
+-		} else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
++		} else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
+ 			r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
+ 					    "Invalid journal_crypt argument");
+ 			if (r)
+ 				goto bad;
+-		} else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
++		} else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
+ 			r = get_alg_and_key(opt_string, &ic->journal_mac_alg,  &ti->error,
+ 					    "Invalid journal_mac argument");
+ 			if (r)
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 3d0e2c198f06..c7fe4789c40e 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -1872,6 +1872,36 @@ static bool dm_table_supports_secure_erase(struct dm_table *t)
+ 	return true;
+ }
+ 
++static int device_requires_stable_pages(struct dm_target *ti,
++					struct dm_dev *dev, sector_t start,
++					sector_t len, void *data)
++{
++	struct request_queue *q = bdev_get_queue(dev->bdev);
++
++	return q && bdi_cap_stable_pages_required(q->backing_dev_info);
++}
++
++/*
++ * If any underlying device requires stable pages, a table must require
++ * them as well.  Only targets that support iterate_devices are considered:
++ * don't want error, zero, etc to require stable pages.
++ */
++static bool dm_table_requires_stable_pages(struct dm_table *t)
++{
++	struct dm_target *ti;
++	unsigned i;
++
++	for (i = 0; i < dm_table_get_num_targets(t); i++) {
++		ti = dm_table_get_target(t, i);
++
++		if (ti->type->iterate_devices &&
++		    ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
++			return true;
++	}
++
++	return false;
++}
++
+ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+ 			       struct queue_limits *limits)
+ {
+@@ -1929,6 +1959,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+ 
+ 	dm_table_verify_integrity(t);
+ 
++	/*
++	 * Some devices don't use blk_integrity but still want stable pages
++	 * because they do their own checksumming.
++	 */
++	if (dm_table_requires_stable_pages(t))
++		q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
++	else
++		q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
++
+ 	/*
+ 	 * Determine whether or not this queue's I/O timings contribute
+ 	 * to the entropy pool, Only request-based targets use this.
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 07d2949a8746..42768fe92b41 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1007,15 +1007,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
+ 		return -EINVAL;
+ 	}
+ 
+-	/*
+-	 * BIO based queue uses its own splitting. When multipage bvecs
+-	 * is switched on, size of the incoming bio may be too big to
+-	 * be handled in some targets, such as crypt.
+-	 *
+-	 * When these targets are ready for the big bio, we can remove
+-	 * the limit.
+-	 */
+-	ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE);
++	ti->max_io_len = (uint32_t) len;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 0bd93bb7d1a2..581ad0a17d0c 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -1092,6 +1092,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ 	tpa_info = &rxr->rx_tpa[agg_id];
+ 
+ 	if (unlikely(cons != rxr->rx_next_cons)) {
++		netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
++			    cons, rxr->rx_next_cons);
+ 		bnxt_sched_reset(bp, rxr);
+ 		return;
+ 	}
+@@ -1544,15 +1546,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
+ 	}
+ 
+ 	cons = rxcmp->rx_cmp_opaque;
+-	rx_buf = &rxr->rx_buf_ring[cons];
+-	data = rx_buf->data;
+-	data_ptr = rx_buf->data_ptr;
+ 	if (unlikely(cons != rxr->rx_next_cons)) {
+ 		int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
+ 
++		netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
++			    cons, rxr->rx_next_cons);
+ 		bnxt_sched_reset(bp, rxr);
+ 		return rc1;
+ 	}
++	rx_buf = &rxr->rx_buf_ring[cons];
++	data = rx_buf->data;
++	data_ptr = rx_buf->data_ptr;
+ 	prefetch(data_ptr);
+ 
+ 	misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
+@@ -1569,11 +1573,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
+ 
+ 	rx_buf->data = NULL;
+ 	if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
++		u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
++
+ 		bnxt_reuse_rx_data(rxr, cons, data);
+ 		if (agg_bufs)
+ 			bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+ 
+ 		rc = -EIO;
++		if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
++			netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
++			bnxt_sched_reset(bp, rxr);
++		}
+ 		goto next_rx;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index c8704b1690eb..a475f36ddf8c 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -1888,6 +1888,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
+ 	 */
+ 	adapter->state = VNIC_PROBED;
+ 
++	reinit_completion(&adapter->init_done);
+ 	rc = init_crq_queue(adapter);
+ 	if (rc) {
+ 		netdev_err(adapter->netdev,
+@@ -4569,7 +4570,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
+ 	old_num_rx_queues = adapter->req_rx_queues;
+ 	old_num_tx_queues = adapter->req_tx_queues;
+ 
+-	init_completion(&adapter->init_done);
++	reinit_completion(&adapter->init_done);
+ 	adapter->init_done_rc = 0;
+ 	ibmvnic_send_crq_init(adapter);
+ 	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
+@@ -4624,7 +4625,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
+ 
+ 	adapter->from_passive_init = false;
+ 
+-	init_completion(&adapter->init_done);
+ 	adapter->init_done_rc = 0;
+ 	ibmvnic_send_crq_init(adapter);
+ 	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
+@@ -4703,6 +4703,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
+ 	INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
+ 	INIT_LIST_HEAD(&adapter->rwi_list);
+ 	spin_lock_init(&adapter->rwi_lock);
++	init_completion(&adapter->init_done);
+ 	adapter->resetting = false;
+ 
+ 	adapter->mac_change_pending = false;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+index eac245a93f91..4ab0d030b544 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+@@ -122,7 +122,9 @@ out:
+ 	return err;
+ }
+ 
+-/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */
++/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
++ * minimum speed value is 40Gbps
++ */
+ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
+ {
+ 	u32 speed;
+@@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
+ 	int err;
+ 
+ 	err = mlx5e_port_linkspeed(priv->mdev, &speed);
+-	if (err) {
+-		mlx5_core_warn(priv->mdev, "cannot get port speed\n");
+-		return 0;
+-	}
++	if (err)
++		speed = SPEED_40000;
++	speed = max_t(u32, speed, SPEED_40000);
+ 
+ 	xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
+ 
+@@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
+ }
+ 
+ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
+-				 u32 xoff, unsigned int mtu)
++				 u32 xoff, unsigned int max_mtu)
+ {
+ 	int i;
+ 
+@@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
+ 		}
+ 
+ 		if (port_buffer->buffer[i].size <
+-		    (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
++		    (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
+ 			return -ENOMEM;
+ 
+ 		port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
+-		port_buffer->buffer[i].xon  = port_buffer->buffer[i].xoff - mtu;
++		port_buffer->buffer[i].xon  =
++			port_buffer->buffer[i].xoff - max_mtu;
+ 	}
+ 
+ 	return 0;
+@@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
+ 
+ /**
+  * update_buffer_lossy()
+- *   mtu: device's MTU
++ *   max_mtu: netdev's max_mtu
+  *   pfc_en: <input> current pfc configuration
+  *   buffer: <input> current prio to buffer mapping
+  *   xoff:   <input> xoff value
+@@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
+  *     Return 0 if no error.
+  *     Set change to true if buffer configuration is modified.
+  */
+-static int update_buffer_lossy(unsigned int mtu,
++static int update_buffer_lossy(unsigned int max_mtu,
+ 			       u8 pfc_en, u8 *buffer, u32 xoff,
+ 			       struct mlx5e_port_buffer *port_buffer,
+ 			       bool *change)
+@@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu,
+ 	}
+ 
+ 	if (changed) {
+-		err = update_xoff_threshold(port_buffer, xoff, mtu);
++		err = update_xoff_threshold(port_buffer, xoff, max_mtu);
+ 		if (err)
+ 			return err;
+ 
+@@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu,
+ 	return 0;
+ }
+ 
++#define MINIMUM_MAX_MTU 9216
+ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
+ 				    u32 change, unsigned int mtu,
+ 				    struct ieee_pfc *pfc,
+@@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
+ 	bool update_prio2buffer = false;
+ 	u8 buffer[MLX5E_MAX_PRIORITY];
+ 	bool update_buffer = false;
++	unsigned int max_mtu;
+ 	u32 total_used = 0;
+ 	u8 curr_pfc_en;
+ 	int err;
+ 	int i;
+ 
+ 	mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
++	max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
+ 
+ 	err = mlx5e_port_query_buffer(priv, &port_buffer);
+ 	if (err)
+@@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
+ 
+ 	if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
+ 		update_buffer = true;
+-		err = update_xoff_threshold(&port_buffer, xoff, mtu);
++		err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
+ 		if (err)
+ 			return err;
+ 	}
+@@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
+ 		if (err)
+ 			return err;
+ 
+-		err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff,
++		err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
+ 					  &port_buffer, &update_buffer);
+ 		if (err)
+ 			return err;
+@@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
+ 		if (err)
+ 			return err;
+ 
+-		err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff,
+-					  &port_buffer, &update_buffer);
++		err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
++					  xoff, &port_buffer, &update_buffer);
+ 		if (err)
+ 			return err;
+ 	}
+@@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
+ 			return -EINVAL;
+ 
+ 		update_buffer = true;
+-		err = update_xoff_threshold(&port_buffer, xoff, mtu);
++		err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
+ 		if (err)
+ 			return err;
+ 	}
+@@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
+ 	/* Need to update buffer configuration if xoff value is changed */
+ 	if (!update_buffer && xoff != priv->dcbx.xoff) {
+ 		update_buffer = true;
+-		err = update_xoff_threshold(&port_buffer, xoff, mtu);
++		err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
+ 		if (err)
+ 			return err;
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+index db3278cc052b..124e4567a4ee 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+@@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
+ 	if (err)
+ 		return err;
+ 
++	mutex_lock(&mdev->mlx5e_res.td.list_lock);
+ 	list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
++	mutex_unlock(&mdev->mlx5e_res.td.list_lock);
+ 
+ 	return 0;
+ }
+@@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
+ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
+ 		       struct mlx5e_tir *tir)
+ {
++	mutex_lock(&mdev->mlx5e_res.td.list_lock);
+ 	mlx5_core_destroy_tir(mdev, tir->tirn);
+ 	list_del(&tir->list);
++	mutex_unlock(&mdev->mlx5e_res.td.list_lock);
+ }
+ 
+ static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
+@@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
+ 	}
+ 
+ 	INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
++	mutex_init(&mdev->mlx5e_res.td.list_lock);
+ 
+ 	return 0;
+ 
+@@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
+ {
+ 	struct mlx5_core_dev *mdev = priv->mdev;
+ 	struct mlx5e_tir *tir;
+-	int err  = -ENOMEM;
++	int err  = 0;
+ 	u32 tirn = 0;
+ 	int inlen;
+ 	void *in;
+ 
+ 	inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+ 	in = kvzalloc(inlen, GFP_KERNEL);
+-	if (!in)
++	if (!in) {
++		err = -ENOMEM;
+ 		goto out;
++	}
+ 
+ 	if (enable_uc_lb)
+ 		MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
+@@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
+ 
+ 	MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
+ 
++	mutex_lock(&mdev->mlx5e_res.td.list_lock);
+ 	list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
+ 		tirn = tir->tirn;
+ 		err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
+@@ -168,6 +176,7 @@ out:
+ 	kvfree(in);
+ 	if (err)
+ 		netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
++	mutex_unlock(&mdev->mlx5e_res.td.list_lock);
+ 
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
+index 5cf5f2a9d51f..8de64e88c670 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
+@@ -217,15 +217,21 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
+ 	void *cmd;
+ 	int ret;
+ 
++	rcu_read_lock();
++	flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
++	rcu_read_unlock();
++
++	if (!flow) {
++		WARN_ONCE(1, "Received NULL pointer for handle\n");
++		return -EINVAL;
++	}
++
+ 	buf = kzalloc(size, GFP_ATOMIC);
+ 	if (!buf)
+ 		return -ENOMEM;
+ 
+ 	cmd = (buf + 1);
+ 
+-	rcu_read_lock();
+-	flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
+-	rcu_read_unlock();
+ 	mlx5_fpga_tls_flow_to_cmd(flow, cmd);
+ 
+ 	MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
+@@ -238,6 +244,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
+ 	buf->complete = mlx_tls_kfree_complete;
+ 
+ 	ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
++	if (ret < 0)
++		kfree(buf);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 563ce3fedab4..0e820cf92f8a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -162,26 +162,6 @@ static struct mlx5_profile profile[] = {
+ 			.size	= 8,
+ 			.limit	= 4
+ 		},
+-		.mr_cache[16]	= {
+-			.size	= 8,
+-			.limit	= 4
+-		},
+-		.mr_cache[17]	= {
+-			.size	= 8,
+-			.limit	= 4
+-		},
+-		.mr_cache[18]	= {
+-			.size	= 8,
+-			.limit	= 4
+-		},
+-		.mr_cache[19]	= {
+-			.size	= 4,
+-			.limit	= 2
+-		},
+-		.mr_cache[20]	= {
+-			.size	= 4,
+-			.limit	= 2
+-		},
+ 	},
+ };
+ 
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+index 18a09cdcd9c6..aa5869eb2e3f 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+@@ -225,7 +225,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	ret = dev_queue_xmit(skb);
+ 	nfp_repr_inc_tx_stats(netdev, len, ret);
+ 
+-	return ret;
++	return NETDEV_TX_OK;
+ }
+ 
+ static int nfp_repr_stop(struct net_device *netdev)
+@@ -329,6 +329,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
+ 
+ 	SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
+ 
++	netdev->priv_flags |= IFF_DISABLE_NETPOLL;
++
+ 	if (nfp_app_has_tc(app)) {
+ 		netdev->features |= NETIF_F_HW_TC;
+ 		netdev->hw_features |= NETIF_F_HW_TC;
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index 5f45ffeeecb4..7a50b911b180 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -28,6 +28,7 @@
+ #include <linux/pm_runtime.h>
+ #include <linux/firmware.h>
+ #include <linux/prefetch.h>
++#include <linux/pci-aspm.h>
+ #include <linux/ipv6.h>
+ #include <net/ip6_checksum.h>
+ 
+@@ -5417,7 +5418,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
+ 	tp->cp_cmd |= PktCntrDisable | INTT_1;
+ 	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
+ 
+-	RTL_W16(tp, IntrMitigate, 0x5151);
++	RTL_W16(tp, IntrMitigate, 0x5100);
+ 
+ 	/* Work around for RxFIFO overflow. */
+ 	if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
+@@ -7324,6 +7325,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 			return rc;
+ 	}
+ 
++	/* Disable ASPM completely as that cause random device stop working
++	 * problems as well as full system hangs for some PCIe devices users.
++	 */
++	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
++
+ 	/* enable device (incl. PCI PM wakeup and hotplug setup) */
+ 	rc = pcim_enable_device(pdev);
+ 	if (rc < 0) {
+diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
+index 42d284669b03..31d8d83c25ac 100644
+--- a/drivers/net/hyperv/hyperv_net.h
++++ b/drivers/net/hyperv/hyperv_net.h
+@@ -970,6 +970,7 @@ struct netvsc_device {
+ 
+ 	wait_queue_head_t wait_drain;
+ 	bool destroy;
++	bool tx_disable; /* if true, do not wake up queue again */
+ 
+ 	/* Receive buffer allocated by us but manages by NetVSP */
+ 	void *recv_buf;
+diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
+index 1a942feab954..fb12b63439c6 100644
+--- a/drivers/net/hyperv/netvsc.c
++++ b/drivers/net/hyperv/netvsc.c
+@@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void)
+ 
+ 	init_waitqueue_head(&net_device->wait_drain);
+ 	net_device->destroy = false;
++	net_device->tx_disable = false;
+ 
+ 	net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
+ 	net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
+@@ -716,7 +717,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
+ 	} else {
+ 		struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
+ 
+-		if (netif_tx_queue_stopped(txq) &&
++		if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
+ 		    (hv_get_avail_to_write_percent(&channel->outbound) >
+ 		     RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
+ 			netif_tx_wake_queue(txq);
+@@ -871,7 +872,8 @@ static inline int netvsc_send_pkt(
+ 	} else if (ret == -EAGAIN) {
+ 		netif_tx_stop_queue(txq);
+ 		ndev_ctx->eth_stats.stop_queue++;
+-		if (atomic_read(&nvchan->queue_sends) < 1) {
++		if (atomic_read(&nvchan->queue_sends) < 1 &&
++		    !net_device->tx_disable) {
+ 			netif_tx_wake_queue(txq);
+ 			ndev_ctx->eth_stats.wake_queue++;
+ 			ret = -ENOSPC;
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index c8320405c8f1..9d699bd5f715 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
+ 	rcu_read_unlock();
+ }
+ 
++static void netvsc_tx_enable(struct netvsc_device *nvscdev,
++			     struct net_device *ndev)
++{
++	nvscdev->tx_disable = false;
++	virt_wmb(); /* ensure queue wake up mechanism is on */
++
++	netif_tx_wake_all_queues(ndev);
++}
++
+ static int netvsc_open(struct net_device *net)
+ {
+ 	struct net_device_context *ndev_ctx = netdev_priv(net);
+@@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net)
+ 	rdev = nvdev->extension;
+ 	if (!rdev->link_state) {
+ 		netif_carrier_on(net);
+-		netif_tx_wake_all_queues(net);
++		netvsc_tx_enable(nvdev, net);
+ 	}
+ 
+ 	if (vf_netdev) {
+@@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
+ 	}
+ }
+ 
++static void netvsc_tx_disable(struct netvsc_device *nvscdev,
++			      struct net_device *ndev)
++{
++	if (nvscdev) {
++		nvscdev->tx_disable = true;
++		virt_wmb(); /* ensure txq will not wake up after stop */
++	}
++
++	netif_tx_disable(ndev);
++}
++
+ static int netvsc_close(struct net_device *net)
+ {
+ 	struct net_device_context *net_device_ctx = netdev_priv(net);
+@@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net)
+ 	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
+ 	int ret;
+ 
+-	netif_tx_disable(net);
++	netvsc_tx_disable(nvdev, net);
+ 
+ 	/* No need to close rndis filter if it is removed already */
+ 	if (!nvdev)
+@@ -918,7 +938,7 @@ static int netvsc_detach(struct net_device *ndev,
+ 
+ 	/* If device was up (receiving) then shutdown */
+ 	if (netif_running(ndev)) {
+-		netif_tx_disable(ndev);
++		netvsc_tx_disable(nvdev, ndev);
+ 
+ 		ret = rndis_filter_close(nvdev);
+ 		if (ret) {
+@@ -1899,7 +1919,7 @@ static void netvsc_link_change(struct work_struct *w)
+ 		if (rdev->link_state) {
+ 			rdev->link_state = false;
+ 			netif_carrier_on(net);
+-			netif_tx_wake_all_queues(net);
++			netvsc_tx_enable(net_device, net);
+ 		} else {
+ 			notify = true;
+ 		}
+@@ -1909,7 +1929,7 @@ static void netvsc_link_change(struct work_struct *w)
+ 		if (!rdev->link_state) {
+ 			rdev->link_state = true;
+ 			netif_carrier_off(net);
+-			netif_tx_stop_all_queues(net);
++			netvsc_tx_disable(net_device, net);
+ 		}
+ 		kfree(event);
+ 		break;
+@@ -1918,7 +1938,7 @@ static void netvsc_link_change(struct work_struct *w)
+ 		if (!rdev->link_state) {
+ 			rdev->link_state = true;
+ 			netif_carrier_off(net);
+-			netif_tx_stop_all_queues(net);
++			netvsc_tx_disable(net_device, net);
+ 			event->event = RNDIS_STATUS_MEDIA_CONNECT;
+ 			spin_lock_irqsave(&ndev_ctx->lock, flags);
+ 			list_add(&event->list, &ndev_ctx->reconfig_events);
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 74bebbdb4b15..9195f3476b1d 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1203,6 +1203,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x19d2, 0x2002, 4)},	/* ZTE (Vodafone) K3765-Z */
+ 	{QMI_FIXED_INTF(0x2001, 0x7e19, 4)},	/* D-Link DWM-221 B1 */
+ 	{QMI_FIXED_INTF(0x2001, 0x7e35, 4)},	/* D-Link DWM-222 */
++	{QMI_FIXED_INTF(0x2020, 0x2031, 4)},	/* Olicard 600 */
+ 	{QMI_FIXED_INTF(0x2020, 0x2033, 4)},	/* BroadMobi BM806U */
+ 	{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
+ 	{QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
+diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
+index da7c72372ffc..9c397fa8704c 100644
+--- a/drivers/pci/hotplug/pciehp_ctrl.c
++++ b/drivers/pci/hotplug/pciehp_ctrl.c
+@@ -117,6 +117,10 @@ static void remove_board(struct slot *p_slot)
+ 		 * removed from the slot/adapter.
+ 		 */
+ 		msleep(1000);
++
++		/* Ignore link or presence changes caused by power off */
++		atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
++			   &ctrl->pending_events);
+ 	}
+ 
+ 	/* turn off Green LED */
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index c0673a717239..37d897bc4cf1 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -3852,6 +3852,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
+ 			 quirk_dma_func1_alias);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
++			 quirk_dma_func1_alias);
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
+ 			 quirk_dma_func1_alias);
+diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
+index 0840d27381ea..e0a04bfc873e 100644
+--- a/drivers/tty/Kconfig
++++ b/drivers/tty/Kconfig
+@@ -441,4 +441,28 @@ config VCC
+ 	depends on SUN_LDOMS
+ 	help
+ 	  Support for Sun logical domain consoles.
++
++config LDISC_AUTOLOAD
++	bool "Automatically load TTY Line Disciplines"
++	default y
++	help
++	  Historically the kernel has always automatically loaded any
++	  line discipline that is in a kernel module when a user asks
++	  for it to be loaded with the TIOCSETD ioctl, or through other
++	  means.  This is not always the best thing to do on systems
++	  where you know you will not be using some of the more
++	  "ancient" line disciplines, so prevent the kernel from doing
++	  this unless the request is coming from a process with the
++	  CAP_SYS_MODULE permissions.
++
++	  Say 'Y' here if you trust your userspace users to do the right
++	  thing, or if you have only provided the line disciplines that
++	  you know you will be using, or if you wish to continue to use
++	  the traditional method of on-demand loading of these modules
++	  by any user.
++
++	  This functionality can be changed at runtime with the
++	  dev.tty.ldisc_autoload sysctl, this configuration option will
++	  only set the default value of this functionality.
++
+ endif # TTY
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index e7d192ebecd7..ac8025cd4a1f 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -512,6 +512,8 @@ static const struct file_operations hung_up_tty_fops = {
+ static DEFINE_SPINLOCK(redirect_lock);
+ static struct file *redirect;
+ 
++extern void tty_sysctl_init(void);
++
+ /**
+  *	tty_wakeup	-	request more data
+  *	@tty: terminal
+@@ -3340,6 +3342,7 @@ void console_sysfs_notify(void)
+  */
+ int __init tty_init(void)
+ {
++	tty_sysctl_init();
+ 	cdev_init(&tty_cdev, &tty_fops);
+ 	if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
+ 	    register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
+diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
+index fc4c97cae01e..53bb6d4e9e8d 100644
+--- a/drivers/tty/tty_ldisc.c
++++ b/drivers/tty/tty_ldisc.c
+@@ -156,6 +156,13 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
+  *		takes tty_ldiscs_lock to guard against ldisc races
+  */
+ 
++#if defined(CONFIG_LDISC_AUTOLOAD)
++	#define INITIAL_AUTOLOAD_STATE	1
++#else
++	#define INITIAL_AUTOLOAD_STATE	0
++#endif
++static int tty_ldisc_autoload = INITIAL_AUTOLOAD_STATE;
++
+ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
+ {
+ 	struct tty_ldisc *ld;
+@@ -170,6 +177,8 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
+ 	 */
+ 	ldops = get_ldops(disc);
+ 	if (IS_ERR(ldops)) {
++		if (!capable(CAP_SYS_MODULE) && !tty_ldisc_autoload)
++			return ERR_PTR(-EPERM);
+ 		request_module("tty-ldisc-%d", disc);
+ 		ldops = get_ldops(disc);
+ 		if (IS_ERR(ldops))
+@@ -829,3 +838,41 @@ void tty_ldisc_deinit(struct tty_struct *tty)
+ 		tty_ldisc_put(tty->ldisc);
+ 	tty->ldisc = NULL;
+ }
++
++static int zero;
++static int one = 1;
++static struct ctl_table tty_table[] = {
++	{
++		.procname	= "ldisc_autoload",
++		.data		= &tty_ldisc_autoload,
++		.maxlen		= sizeof(tty_ldisc_autoload),
++		.mode		= 0644,
++		.proc_handler	= proc_dointvec,
++		.extra1		= &zero,
++		.extra2		= &one,
++	},
++	{ }
++};
++
++static struct ctl_table tty_dir_table[] = {
++	{
++		.procname	= "tty",
++		.mode		= 0555,
++		.child		= tty_table,
++	},
++	{ }
++};
++
++static struct ctl_table tty_root_table[] = {
++	{
++		.procname	= "dev",
++		.mode		= 0555,
++		.child		= tty_dir_table,
++	},
++	{ }
++};
++
++void tty_sysctl_init(void)
++{
++	register_sysctl_table(tty_root_table);
++}
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index 814b395007b2..9529e28e1822 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -1086,6 +1086,8 @@ struct virtqueue *vring_create_virtqueue(
+ 					  GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
+ 		if (queue)
+ 			break;
++		if (!may_reduce_num)
++			return NULL;
+ 	}
+ 
+ 	if (!num)
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index cdbb888a8d4a..1c25dae083a8 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -296,10 +296,10 @@ static void blkdev_bio_end_io(struct bio *bio)
+ 	struct blkdev_dio *dio = bio->bi_private;
+ 	bool should_dirty = dio->should_dirty;
+ 
+-	if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) {
+-		if (bio->bi_status && !dio->bio.bi_status)
+-			dio->bio.bi_status = bio->bi_status;
+-	} else {
++	if (bio->bi_status && !dio->bio.bi_status)
++		dio->bio.bi_status = bio->bi_status;
++
++	if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) {
+ 		if (!dio->is_sync) {
+ 			struct kiocb *iocb = dio->iocb;
+ 			ssize_t ret;
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 8bf9cce11213..0eb333c62fe4 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -496,6 +496,16 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EPERM;
+ 
++	/*
++	 * If the fs is mounted with nologreplay, which requires it to be
++	 * mounted in RO mode as well, we can not allow discard on free space
++	 * inside block groups, because log trees refer to extents that are not
++	 * pinned in a block group's free space cache (pinning the extents is
++	 * precisely the first phase of replaying a log tree).
++	 */
++	if (btrfs_test_opt(fs_info, NOLOGREPLAY))
++		return -EROFS;
++
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
+ 				dev_list) {
+diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
+index dc6140013ae8..61d22a56c0ba 100644
+--- a/fs/btrfs/props.c
++++ b/fs/btrfs/props.c
+@@ -366,11 +366,11 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
+ 
+ static int prop_compression_validate(const char *value, size_t len)
+ {
+-	if (!strncmp("lzo", value, len))
++	if (!strncmp("lzo", value, 3))
+ 		return 0;
+-	else if (!strncmp("zlib", value, len))
++	else if (!strncmp("zlib", value, 4))
+ 		return 0;
+-	else if (!strncmp("zstd", value, len))
++	else if (!strncmp("zstd", value, 4))
+ 		return 0;
+ 
+ 	return -EINVAL;
+@@ -396,7 +396,7 @@ static int prop_compression_apply(struct inode *inode,
+ 		btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
+ 	} else if (!strncmp("zlib", value, 4)) {
+ 		type = BTRFS_COMPRESS_ZLIB;
+-	} else if (!strncmp("zstd", value, len)) {
++	} else if (!strncmp("zstd", value, 4)) {
+ 		type = BTRFS_COMPRESS_ZSTD;
+ 		btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
+ 	} else {
+diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h
+index 50fb0dee23e8..d35b8ec1c485 100644
+--- a/include/linux/bitrev.h
++++ b/include/linux/bitrev.h
+@@ -34,41 +34,41 @@ static inline u32 __bitrev32(u32 x)
+ 
+ #define __constant_bitrev32(x)	\
+ ({					\
+-	u32 __x = x;			\
+-	__x = (__x >> 16) | (__x << 16);	\
+-	__x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8);	\
+-	__x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4);	\
+-	__x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2);	\
+-	__x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1);	\
+-	__x;								\
++	u32 ___x = x;			\
++	___x = (___x >> 16) | (___x << 16);	\
++	___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8);	\
++	___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4);	\
++	___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2);	\
++	___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1);	\
++	___x;								\
+ })
+ 
+ #define __constant_bitrev16(x)	\
+ ({					\
+-	u16 __x = x;			\
+-	__x = (__x >> 8) | (__x << 8);	\
+-	__x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4);	\
+-	__x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2);	\
+-	__x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1);	\
+-	__x;								\
++	u16 ___x = x;			\
++	___x = (___x >> 8) | (___x << 8);	\
++	___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4);	\
++	___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2);	\
++	___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1);	\
++	___x;								\
+ })
+ 
+ #define __constant_bitrev8x4(x) \
+ ({			\
+-	u32 __x = x;	\
+-	__x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4);	\
+-	__x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2);	\
+-	__x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1);	\
+-	__x;								\
++	u32 ___x = x;	\
++	___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4);	\
++	___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2);	\
++	___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1);	\
++	___x;								\
+ })
+ 
+ #define __constant_bitrev8(x)	\
+ ({					\
+-	u8 __x = x;			\
+-	__x = (__x >> 4) | (__x << 4);	\
+-	__x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2);	\
+-	__x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1);	\
+-	__x;								\
++	u8 ___x = x;			\
++	___x = (___x >> 4) | (___x << 4);	\
++	___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2);	\
++	___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1);	\
++	___x;								\
+ })
+ 
+ #define bitrev32(x) \
+diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
+index 652f602167df..cc6b6532eb56 100644
+--- a/include/linux/memcontrol.h
++++ b/include/linux/memcontrol.h
+@@ -559,7 +559,10 @@ struct mem_cgroup *lock_page_memcg(struct page *page);
+ void __unlock_page_memcg(struct mem_cgroup *memcg);
+ void unlock_page_memcg(struct page *page);
+ 
+-/* idx can be of type enum memcg_stat_item or node_stat_item */
++/*
++ * idx can be of type enum memcg_stat_item or node_stat_item.
++ * Keep in sync with memcg_exact_page_state().
++ */
+ static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
+ 					     int idx)
+ {
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index bbcfe2e5fd91..e8b92dee5a72 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -776,6 +776,8 @@ struct mlx5_pagefault {
+ };
+ 
+ struct mlx5_td {
++	/* protects tirs list changes while tirs refresh */
++	struct mutex     list_lock;
+ 	struct list_head tirs_list;
+ 	u32              tdn;
+ };
+diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
+index b8d95564bd53..14edb795ab43 100644
+--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
++++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
+@@ -21,6 +21,19 @@ struct nf_ct_gre_keymap {
+ 	struct nf_conntrack_tuple tuple;
+ };
+ 
++enum grep_conntrack {
++	GRE_CT_UNREPLIED,
++	GRE_CT_REPLIED,
++	GRE_CT_MAX
++};
++
++struct netns_proto_gre {
++	struct nf_proto_net	nf;
++	rwlock_t		keymap_lock;
++	struct list_head	keymap_list;
++	unsigned int		gre_timeouts[GRE_CT_MAX];
++};
++
+ /* add new tuple->key_reply pair to keymap */
+ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
+ 			 struct nf_conntrack_tuple *t);
+diff --git a/include/linux/string.h b/include/linux/string.h
+index 4a5a0eb7df51..f58e1ef76572 100644
+--- a/include/linux/string.h
++++ b/include/linux/string.h
+@@ -143,6 +143,9 @@ extern void * memscan(void *,int,__kernel_size_t);
+ #ifndef __HAVE_ARCH_MEMCMP
+ extern int memcmp(const void *,const void *,__kernel_size_t);
+ #endif
++#ifndef __HAVE_ARCH_BCMP
++extern int bcmp(const void *,const void *,__kernel_size_t);
++#endif
+ #ifndef __HAVE_ARCH_MEMCHR
+ extern void * memchr(const void *,int,__kernel_size_t);
+ #endif
+diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
+index fab02133a919..3dc70adfe5f5 100644
+--- a/include/linux/virtio_ring.h
++++ b/include/linux/virtio_ring.h
+@@ -63,7 +63,7 @@ struct virtqueue;
+ /*
+  * Creates a virtqueue and allocates the descriptor ring.  If
+  * may_reduce_num is set, then this may allocate a smaller ring than
+- * expected.  The caller should query virtqueue_get_ring_size to learn
++ * expected.  The caller should query virtqueue_get_vring_size to learn
+  * the actual size of the ring.
+  */
+ struct virtqueue *vring_create_virtqueue(unsigned int index,
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 71d31e4d4391..cfc3dd5ff085 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -648,7 +648,7 @@ int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
+ 			     unsigned char __user *data, int optlen);
+ void ip_options_undo(struct ip_options *opt);
+ void ip_forward_options(struct sk_buff *skb);
+-int ip_options_rcv_srr(struct sk_buff *skb);
++int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
+ 
+ /*
+  *	Functions provided by ip_sockglue.c
+diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
+index 9b5fdc50519a..3f7b166262d7 100644
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -57,6 +57,7 @@ struct net {
+ 						 */
+ 	spinlock_t		rules_mod_lock;
+ 
++	u32			hash_mix;
+ 	atomic64_t		cookie_gen;
+ 
+ 	struct list_head	list;		/* list of network namespaces */
+diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
+index 16a842456189..d9b665151f3d 100644
+--- a/include/net/netns/hash.h
++++ b/include/net/netns/hash.h
+@@ -2,16 +2,10 @@
+ #ifndef __NET_NS_HASH_H__
+ #define __NET_NS_HASH_H__
+ 
+-#include <asm/cache.h>
+-
+-struct net;
++#include <net/net_namespace.h>
+ 
+ static inline u32 net_hash_mix(const struct net *net)
+ {
+-#ifdef CONFIG_NET_NS
+-	return (u32)(((unsigned long)net) >> ilog2(sizeof(*net)));
+-#else
+-	return 0;
+-#endif
++	return net->hash_mix;
+ }
+ #endif
+diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
+index 811009ebacd4..379e89c706c9 100644
+--- a/kernel/irq/chip.c
++++ b/kernel/irq/chip.c
+@@ -1384,6 +1384,10 @@ int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
+ int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
+ {
+ 	data = data->parent_data;
++
++	if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
++		return 0;
++
+ 	if (data->chip->irq_set_wake)
+ 		return data->chip->irq_set_wake(data, on);
+ 
+diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
+index ba454cba4069..8e009cee6517 100644
+--- a/kernel/irq/irqdesc.c
++++ b/kernel/irq/irqdesc.c
+@@ -554,6 +554,7 @@ int __init early_irq_init(void)
+ 		alloc_masks(&desc[i], node);
+ 		raw_spin_lock_init(&desc[i].lock);
+ 		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
++		mutex_init(&desc[i].request_mutex);
+ 		desc_set_defaults(i, &desc[i], node, NULL, NULL);
+ 	}
+ 	return arch_early_irq_init();
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index f7c375d1e601..640094391169 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -7437,10 +7437,10 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
+ 	if (cfs_rq->last_h_load_update == now)
+ 		return;
+ 
+-	cfs_rq->h_load_next = NULL;
++	WRITE_ONCE(cfs_rq->h_load_next, NULL);
+ 	for_each_sched_entity(se) {
+ 		cfs_rq = cfs_rq_of(se);
+-		cfs_rq->h_load_next = se;
++		WRITE_ONCE(cfs_rq->h_load_next, se);
+ 		if (cfs_rq->last_h_load_update == now)
+ 			break;
+ 	}
+@@ -7450,7 +7450,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
+ 		cfs_rq->last_h_load_update = now;
+ 	}
+ 
+-	while ((se = cfs_rq->h_load_next) != NULL) {
++	while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
+ 		load = cfs_rq->h_load;
+ 		load = div64_ul(load * se->avg.load_avg,
+ 			cfs_rq_load_avg(cfs_rq) + 1);
+diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
+index fa5de5e8de61..fdeb9bc6affb 100644
+--- a/kernel/time/alarmtimer.c
++++ b/kernel/time/alarmtimer.c
+@@ -597,7 +597,7 @@ static ktime_t alarm_timer_remaining(struct k_itimer *timr, ktime_t now)
+ {
+ 	struct alarm *alarm = &timr->it.alarm.alarmtimer;
+ 
+-	return ktime_sub(now, alarm->node.expires);
++	return ktime_sub(alarm->node.expires, now);
+ }
+ 
+ /**
+diff --git a/lib/string.c b/lib/string.c
+index 2c0900a5d51a..72125fd5b4a6 100644
+--- a/lib/string.c
++++ b/lib/string.c
+@@ -865,6 +865,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count)
+ EXPORT_SYMBOL(memcmp);
+ #endif
+ 
++#ifndef __HAVE_ARCH_BCMP
++/**
++ * bcmp - returns 0 if and only if the buffers have identical contents.
++ * @a: pointer to first buffer.
++ * @b: pointer to second buffer.
++ * @len: size of buffers.
++ *
++ * The sign or magnitude of a non-zero return value has no particular
++ * meaning, and architectures may implement their own more efficient bcmp(). So
++ * while this particular implementation is a simple (tail) call to memcmp, do
++ * not rely on anything but whether the return value is zero or non-zero.
++ */
++#undef bcmp
++int bcmp(const void *a, const void *b, size_t len)
++{
++	return memcmp(a, b, len);
++}
++EXPORT_SYMBOL(bcmp);
++#endif
++
+ #ifndef __HAVE_ARCH_MEMSCAN
+ /**
+  * memscan - Find a character in an area of memory.
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index d2cd70cfaa90..7d08e89361ee 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -734,6 +734,21 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
+ 	spinlock_t *ptl;
+ 
+ 	ptl = pmd_lock(mm, pmd);
++	if (!pmd_none(*pmd)) {
++		if (write) {
++			if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
++				WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
++				goto out_unlock;
++			}
++			entry = pmd_mkyoung(*pmd);
++			entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
++			if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
++				update_mmu_cache_pmd(vma, addr, pmd);
++		}
++
++		goto out_unlock;
++	}
++
+ 	entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
+ 	if (pfn_t_devmap(pfn))
+ 		entry = pmd_mkdevmap(entry);
+@@ -745,11 +760,16 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
+ 	if (pgtable) {
+ 		pgtable_trans_huge_deposit(mm, pmd, pgtable);
+ 		mm_inc_nr_ptes(mm);
++		pgtable = NULL;
+ 	}
+ 
+ 	set_pmd_at(mm, addr, pmd, entry);
+ 	update_mmu_cache_pmd(vma, addr, pmd);
++
++out_unlock:
+ 	spin_unlock(ptl);
++	if (pgtable)
++		pte_free(mm, pgtable);
+ }
+ 
+ vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
+@@ -800,6 +820,20 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
+ 	spinlock_t *ptl;
+ 
+ 	ptl = pud_lock(mm, pud);
++	if (!pud_none(*pud)) {
++		if (write) {
++			if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
++				WARN_ON_ONCE(!is_huge_zero_pud(*pud));
++				goto out_unlock;
++			}
++			entry = pud_mkyoung(*pud);
++			entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
++			if (pudp_set_access_flags(vma, addr, pud, entry, 1))
++				update_mmu_cache_pud(vma, addr, pud);
++		}
++		goto out_unlock;
++	}
++
+ 	entry = pud_mkhuge(pfn_t_pud(pfn, prot));
+ 	if (pfn_t_devmap(pfn))
+ 		entry = pud_mkdevmap(entry);
+@@ -809,6 +843,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
+ 	}
+ 	set_pud_at(mm, addr, pud, entry);
+ 	update_mmu_cache_pud(vma, addr, pud);
++
++out_unlock:
+ 	spin_unlock(ptl);
+ }
+ 
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 7c712c4565e6..7e7cc0cd89fe 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3897,6 +3897,22 @@ struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
+ 	return &memcg->cgwb_domain;
+ }
+ 
++/*
++ * idx can be of type enum memcg_stat_item or node_stat_item.
++ * Keep in sync with memcg_exact_page().
++ */
++static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
++{
++	long x = atomic_long_read(&memcg->stat[idx]);
++	int cpu;
++
++	for_each_online_cpu(cpu)
++		x += per_cpu_ptr(memcg->stat_cpu, cpu)->count[idx];
++	if (x < 0)
++		x = 0;
++	return x;
++}
++
+ /**
+  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
+  * @wb: bdi_writeback in question
+@@ -3922,10 +3938,10 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
+ 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
+ 	struct mem_cgroup *parent;
+ 
+-	*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
++	*pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
+ 
+ 	/* this should eventually include NR_UNSTABLE_NFS */
+-	*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
++	*pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
+ 	*pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
+ 						     (1 << LRU_ACTIVE_FILE));
+ 	*pheadroom = PAGE_COUNTER_MAX;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 5c8c0a572ee9..d47554307a6d 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4959,8 +4959,10 @@ static inline void __netif_receive_skb_list_ptype(struct list_head *head,
+ 	if (pt_prev->list_func != NULL)
+ 		pt_prev->list_func(head, pt_prev, orig_dev);
+ 	else
+-		list_for_each_entry_safe(skb, next, head, list)
++		list_for_each_entry_safe(skb, next, head, list) {
++			skb_list_del_init(skb);
+ 			pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
++		}
+ }
+ 
+ static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
+diff --git a/net/core/ethtool.c b/net/core/ethtool.c
+index aeabc4831fca..7cc97f43f138 100644
+--- a/net/core/ethtool.c
++++ b/net/core/ethtool.c
+@@ -1863,11 +1863,16 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
+ 	WARN_ON_ONCE(!ret);
+ 
+ 	gstrings.len = ret;
+-	data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
+-	if (gstrings.len && !data)
+-		return -ENOMEM;
+ 
+-	__ethtool_get_strings(dev, gstrings.string_set, data);
++	if (gstrings.len) {
++		data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
++		if (!data)
++			return -ENOMEM;
++
++		__ethtool_get_strings(dev, gstrings.string_set, data);
++	} else {
++		data = NULL;
++	}
+ 
+ 	ret = -EFAULT;
+ 	if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
+@@ -1963,11 +1968,15 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
+ 		return -EFAULT;
+ 
+ 	stats.n_stats = n_stats;
+-	data = vzalloc(array_size(n_stats, sizeof(u64)));
+-	if (n_stats && !data)
+-		return -ENOMEM;
+ 
+-	ops->get_ethtool_stats(dev, &stats, data);
++	if (n_stats) {
++		data = vzalloc(array_size(n_stats, sizeof(u64)));
++		if (!data)
++			return -ENOMEM;
++		ops->get_ethtool_stats(dev, &stats, data);
++	} else {
++		data = NULL;
++	}
+ 
+ 	ret = -EFAULT;
+ 	if (copy_to_user(useraddr, &stats, sizeof(stats)))
+@@ -2007,16 +2016,21 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
+ 		return -EFAULT;
+ 
+ 	stats.n_stats = n_stats;
+-	data = vzalloc(array_size(n_stats, sizeof(u64)));
+-	if (n_stats && !data)
+-		return -ENOMEM;
+ 
+-	if (dev->phydev && !ops->get_ethtool_phy_stats) {
+-		ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
+-		if (ret < 0)
+-			return ret;
++	if (n_stats) {
++		data = vzalloc(array_size(n_stats, sizeof(u64)));
++		if (!data)
++			return -ENOMEM;
++
++		if (dev->phydev && !ops->get_ethtool_phy_stats) {
++			ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
++			if (ret < 0)
++				goto out;
++		} else {
++			ops->get_ethtool_phy_stats(dev, &stats, data);
++		}
+ 	} else {
+-		ops->get_ethtool_phy_stats(dev, &stats, data);
++		data = NULL;
+ 	}
+ 
+ 	ret = -EFAULT;
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index 670c84b1bfc2..7320f0844a50 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -304,6 +304,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
+ 
+ 	refcount_set(&net->count, 1);
+ 	refcount_set(&net->passive, 1);
++	get_random_bytes(&net->hash_mix, sizeof(u32));
+ 	net->dev_base_seq = 1;
+ 	net->user_ns = user_ns;
+ 	idr_init(&net->netns_ids);
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 8656b1e20d35..ceee28e184af 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3832,7 +3832,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
+ 	unsigned int delta_truesize;
+ 	struct sk_buff *lp;
+ 
+-	if (unlikely(p->len + len >= 65536))
++	if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
+ 		return -E2BIG;
+ 
+ 	lp = NAPI_GRO_CB(p)->last;
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index f199945f6e4a..3c734832bb7c 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -260,7 +260,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+ 	struct net *net = dev_net(skb->dev);
+ 	struct metadata_dst *tun_dst = NULL;
+ 	struct erspan_base_hdr *ershdr;
+-	struct erspan_metadata *pkt_md;
+ 	struct ip_tunnel_net *itn;
+ 	struct ip_tunnel *tunnel;
+ 	const struct iphdr *iph;
+@@ -283,9 +282,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+ 		if (unlikely(!pskb_may_pull(skb, len)))
+ 			return PACKET_REJECT;
+ 
+-		ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
+-		pkt_md = (struct erspan_metadata *)(ershdr + 1);
+-
+ 		if (__iptunnel_pull_header(skb,
+ 					   len,
+ 					   htons(ETH_P_TEB),
+@@ -293,8 +289,9 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+ 			goto drop;
+ 
+ 		if (tunnel->collect_md) {
++			struct erspan_metadata *pkt_md, *md;
+ 			struct ip_tunnel_info *info;
+-			struct erspan_metadata *md;
++			unsigned char *gh;
+ 			__be64 tun_id;
+ 			__be16 flags;
+ 
+@@ -307,6 +304,14 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+ 			if (!tun_dst)
+ 				return PACKET_REJECT;
+ 
++			/* skb can be uncloned in __iptunnel_pull_header, so
++			 * old pkt_md is no longer valid and we need to reset
++			 * it
++			 */
++			gh = skb_network_header(skb) +
++			     skb_network_header_len(skb);
++			pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
++							    sizeof(*ershdr));
+ 			md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
+ 			md->version = ver;
+ 			md2 = &md->u.md2;
+diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
+index bd8ef4f87c79..c3a0683e83df 100644
+--- a/net/ipv4/ip_input.c
++++ b/net/ipv4/ip_input.c
+@@ -258,11 +258,10 @@ int ip_local_deliver(struct sk_buff *skb)
+ 		       ip_local_deliver_finish);
+ }
+ 
+-static inline bool ip_rcv_options(struct sk_buff *skb)
++static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
+ {
+ 	struct ip_options *opt;
+ 	const struct iphdr *iph;
+-	struct net_device *dev = skb->dev;
+ 
+ 	/* It looks as overkill, because not all
+ 	   IP options require packet mangling.
+@@ -298,7 +297,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
+ 			}
+ 		}
+ 
+-		if (ip_options_rcv_srr(skb))
++		if (ip_options_rcv_srr(skb, dev))
+ 			goto drop;
+ 	}
+ 
+@@ -354,7 +353,7 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
+ 	}
+ #endif
+ 
+-	if (iph->ihl > 5 && ip_rcv_options(skb))
++	if (iph->ihl > 5 && ip_rcv_options(skb, dev))
+ 		goto drop;
+ 
+ 	rt = skb_rtable(skb);
+diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
+index 32a35043c9f5..3db31bb9df50 100644
+--- a/net/ipv4/ip_options.c
++++ b/net/ipv4/ip_options.c
+@@ -612,7 +612,7 @@ void ip_forward_options(struct sk_buff *skb)
+ 	}
+ }
+ 
+-int ip_options_rcv_srr(struct sk_buff *skb)
++int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
+ {
+ 	struct ip_options *opt = &(IPCB(skb)->opt);
+ 	int srrspace, srrptr;
+@@ -647,7 +647,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
+ 
+ 		orefdst = skb->_skb_refdst;
+ 		skb_dst_set(skb, NULL);
+-		err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev);
++		err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
+ 		rt2 = skb_rtable(skb);
+ 		if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
+ 			skb_dst_drop(skb);
+diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
+index ca61e2a659e7..5205c5a5d8d5 100644
+--- a/net/ipv4/tcp_dctcp.c
++++ b/net/ipv4/tcp_dctcp.c
+@@ -66,11 +66,6 @@ static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
+ module_param(dctcp_alpha_on_init, uint, 0644);
+ MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
+ 
+-static unsigned int dctcp_clamp_alpha_on_loss __read_mostly;
+-module_param(dctcp_clamp_alpha_on_loss, uint, 0644);
+-MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss,
+-		 "parameter for clamping alpha on loss");
+-
+ static struct tcp_congestion_ops dctcp_reno;
+ 
+ static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
+@@ -211,21 +206,23 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
+ 	}
+ }
+ 
+-static void dctcp_state(struct sock *sk, u8 new_state)
++static void dctcp_react_to_loss(struct sock *sk)
+ {
+-	if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) {
+-		struct dctcp *ca = inet_csk_ca(sk);
++	struct dctcp *ca = inet_csk_ca(sk);
++	struct tcp_sock *tp = tcp_sk(sk);
+ 
+-		/* If this extension is enabled, we clamp dctcp_alpha to
+-		 * max on packet loss; the motivation is that dctcp_alpha
+-		 * is an indicator to the extend of congestion and packet
+-		 * loss is an indicator of extreme congestion; setting
+-		 * this in practice turned out to be beneficial, and
+-		 * effectively assumes total congestion which reduces the
+-		 * window by half.
+-		 */
+-		ca->dctcp_alpha = DCTCP_MAX_ALPHA;
+-	}
++	ca->loss_cwnd = tp->snd_cwnd;
++	tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
++}
++
++static void dctcp_state(struct sock *sk, u8 new_state)
++{
++	if (new_state == TCP_CA_Recovery &&
++	    new_state != inet_csk(sk)->icsk_ca_state)
++		dctcp_react_to_loss(sk);
++	/* We handle RTO in dctcp_cwnd_event to ensure that we perform only
++	 * one loss-adjustment per RTT.
++	 */
+ }
+ 
+ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
+@@ -237,6 +234,9 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
+ 	case CA_EVENT_ECN_NO_CE:
+ 		dctcp_ce_state_1_to_0(sk);
+ 		break;
++	case CA_EVENT_LOSS:
++		dctcp_react_to_loss(sk);
++		break;
+ 	default:
+ 		/* Don't care for the rest. */
+ 		break;
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 30fdf891940b..11101cf8693b 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -2490,7 +2490,8 @@ static void __net_exit tcp_sk_exit(struct net *net)
+ {
+ 	int cpu;
+ 
+-	module_put(net->ipv4.tcp_congestion_control->owner);
++	if (net->ipv4.tcp_congestion_control)
++		module_put(net->ipv4.tcp_congestion_control->owner);
+ 
+ 	for_each_possible_cpu(cpu)
+ 		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index faed98dab913..c4a7db62658e 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -540,11 +540,10 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
+ 	return PACKET_REJECT;
+ }
+ 
+-static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
+-			 struct tnl_ptk_info *tpi)
++static int ip6erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
++			 int gre_hdr_len)
+ {
+ 	struct erspan_base_hdr *ershdr;
+-	struct erspan_metadata *pkt_md;
+ 	const struct ipv6hdr *ipv6h;
+ 	struct erspan_md2 *md2;
+ 	struct ip6_tnl *tunnel;
+@@ -563,18 +562,16 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
+ 		if (unlikely(!pskb_may_pull(skb, len)))
+ 			return PACKET_REJECT;
+ 
+-		ershdr = (struct erspan_base_hdr *)skb->data;
+-		pkt_md = (struct erspan_metadata *)(ershdr + 1);
+-
+ 		if (__iptunnel_pull_header(skb, len,
+ 					   htons(ETH_P_TEB),
+ 					   false, false) < 0)
+ 			return PACKET_REJECT;
+ 
+ 		if (tunnel->parms.collect_md) {
++			struct erspan_metadata *pkt_md, *md;
+ 			struct metadata_dst *tun_dst;
+ 			struct ip_tunnel_info *info;
+-			struct erspan_metadata *md;
++			unsigned char *gh;
+ 			__be64 tun_id;
+ 			__be16 flags;
+ 
+@@ -587,6 +584,14 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
+ 			if (!tun_dst)
+ 				return PACKET_REJECT;
+ 
++			/* skb can be uncloned in __iptunnel_pull_header, so
++			 * old pkt_md is no longer valid and we need to reset
++			 * it
++			 */
++			gh = skb_network_header(skb) +
++			     skb_network_header_len(skb);
++			pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
++							    sizeof(*ershdr));
+ 			info = &tun_dst->u.tun_info;
+ 			md = ip_tunnel_info_opts(info);
+ 			md->version = ver;
+@@ -623,7 +628,7 @@ static int gre_rcv(struct sk_buff *skb)
+ 
+ 	if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
+ 		     tpi.proto == htons(ETH_P_ERSPAN2))) {
+-		if (ip6erspan_rcv(skb, hdr_len, &tpi) == PACKET_RCVD)
++		if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
+ 			return 0;
+ 		goto out;
+ 	}
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 0bb87f3a10c7..eed9231c90ad 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -587,7 +587,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+ 				inet6_sk(skb->sk) : NULL;
+ 	struct ipv6hdr *tmp_hdr;
+ 	struct frag_hdr *fh;
+-	unsigned int mtu, hlen, left, len;
++	unsigned int mtu, hlen, left, len, nexthdr_offset;
+ 	int hroom, troom;
+ 	__be32 frag_id;
+ 	int ptr, offset = 0, err = 0;
+@@ -598,6 +598,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+ 		goto fail;
+ 	hlen = err;
+ 	nexthdr = *prevhdr;
++	nexthdr_offset = prevhdr - skb_network_header(skb);
+ 
+ 	mtu = ip6_skb_dst_mtu(skb);
+ 
+@@ -632,6 +633,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+ 	    (err = skb_checksum_help(skb)))
+ 		goto fail;
+ 
++	prevhdr = skb_network_header(skb) + nexthdr_offset;
+ 	hroom = LL_RESERVED_SPACE(rt->dst.dev);
+ 	if (skb_has_frag_list(skb)) {
+ 		unsigned int first_len = skb_pagelen(skb);
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 0c6403cf8b52..ade1390c6348 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -627,7 +627,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 		rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
+ 					   eiph->daddr, eiph->saddr, 0, 0,
+ 					   IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
+-		if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL) {
++		if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
+ 			if (!IS_ERR(rt))
+ 				ip_rt_put(rt);
+ 			goto out;
+@@ -636,7 +636,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 	} else {
+ 		if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
+ 				   skb2->dev) ||
+-		    skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
++		    skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
+ 			goto out;
+ 	}
+ 
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index de9aa5cb295c..8f6cf8e6b5c1 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -669,6 +669,10 @@ static int ipip6_rcv(struct sk_buff *skb)
+ 		    !net_eq(tunnel->net, dev_net(tunnel->dev))))
+ 			goto out;
+ 
++		/* skb can be uncloned in iptunnel_pull_header, so
++		 * old iph is no longer valid
++		 */
++		iph = (const struct iphdr *)skb_mac_header(skb);
+ 		err = IP_ECN_decapsulate(iph, skb);
+ 		if (unlikely(err)) {
+ 			if (log_ecn_error)
+diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
+index 571d824e4e24..b919db02c7f9 100644
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -2054,14 +2054,14 @@ static int __init kcm_init(void)
+ 	if (err)
+ 		goto fail;
+ 
+-	err = sock_register(&kcm_family_ops);
+-	if (err)
+-		goto sock_register_fail;
+-
+ 	err = register_pernet_device(&kcm_net_ops);
+ 	if (err)
+ 		goto net_ops_fail;
+ 
++	err = sock_register(&kcm_family_ops);
++	if (err)
++		goto sock_register_fail;
++
+ 	err = kcm_proc_init();
+ 	if (err)
+ 		goto proc_init_fail;
+@@ -2069,12 +2069,12 @@ static int __init kcm_init(void)
+ 	return 0;
+ 
+ proc_init_fail:
+-	unregister_pernet_device(&kcm_net_ops);
+-
+-net_ops_fail:
+ 	sock_unregister(PF_KCM);
+ 
+ sock_register_fail:
++	unregister_pernet_device(&kcm_net_ops);
++
++net_ops_fail:
+ 	proto_unregister(&kcm_proto);
+ 
+ fail:
+@@ -2090,8 +2090,8 @@ fail:
+ static void __exit kcm_exit(void)
+ {
+ 	kcm_proc_exit();
+-	unregister_pernet_device(&kcm_net_ops);
+ 	sock_unregister(PF_KCM);
++	unregister_pernet_device(&kcm_net_ops);
+ 	proto_unregister(&kcm_proto);
+ 	destroy_workqueue(kcm_wq);
+ 
+diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
+index 650eb4fba2c5..841c472aae1c 100644
+--- a/net/netfilter/nf_conntrack_proto_gre.c
++++ b/net/netfilter/nf_conntrack_proto_gre.c
+@@ -43,24 +43,12 @@
+ #include <linux/netfilter/nf_conntrack_proto_gre.h>
+ #include <linux/netfilter/nf_conntrack_pptp.h>
+ 
+-enum grep_conntrack {
+-	GRE_CT_UNREPLIED,
+-	GRE_CT_REPLIED,
+-	GRE_CT_MAX
+-};
+-
+ static const unsigned int gre_timeouts[GRE_CT_MAX] = {
+ 	[GRE_CT_UNREPLIED]	= 30*HZ,
+ 	[GRE_CT_REPLIED]	= 180*HZ,
+ };
+ 
+ static unsigned int proto_gre_net_id __read_mostly;
+-struct netns_proto_gre {
+-	struct nf_proto_net	nf;
+-	rwlock_t		keymap_lock;
+-	struct list_head	keymap_list;
+-	unsigned int		gre_timeouts[GRE_CT_MAX];
+-};
+ 
+ static inline struct netns_proto_gre *gre_pernet(struct net *net)
+ {
+@@ -408,6 +396,8 @@ static int __init nf_ct_proto_gre_init(void)
+ {
+ 	int ret;
+ 
++	BUILD_BUG_ON(offsetof(struct netns_proto_gre, nf) != 0);
++
+ 	ret = register_pernet_subsys(&proto_gre_net_ops);
+ 	if (ret < 0)
+ 		goto out_pernet;
+diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
+index a30f8ba4b89a..70a7382b9787 100644
+--- a/net/netfilter/nfnetlink_cttimeout.c
++++ b/net/netfilter/nfnetlink_cttimeout.c
+@@ -392,7 +392,8 @@ err:
+ static int
+ cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
+ 			    u32 seq, u32 type, int event,
+-			    const struct nf_conntrack_l4proto *l4proto)
++			    const struct nf_conntrack_l4proto *l4proto,
++			    const unsigned int *timeouts)
+ {
+ 	struct nlmsghdr *nlh;
+ 	struct nfgenmsg *nfmsg;
+@@ -421,7 +422,7 @@ cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
+ 		if (!nest_parms)
+ 			goto nla_put_failure;
+ 
+-		ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, NULL);
++		ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, timeouts);
+ 		if (ret < 0)
+ 			goto nla_put_failure;
+ 
+@@ -444,6 +445,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
+ 				 struct netlink_ext_ack *extack)
+ {
+ 	const struct nf_conntrack_l4proto *l4proto;
++	unsigned int *timeouts = NULL;
+ 	struct sk_buff *skb2;
+ 	int ret, err;
+ 	__u16 l3num;
+@@ -456,12 +458,55 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
+ 	l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
+ 	l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+ 
+-	/* This protocol is not supported, skip. */
+-	if (l4proto->l4proto != l4num) {
+-		err = -EOPNOTSUPP;
++	err = -EOPNOTSUPP;
++	if (l4proto->l4proto != l4num)
+ 		goto err;
++
++	switch (l4proto->l4proto) {
++	case IPPROTO_ICMP:
++		timeouts = &net->ct.nf_ct_proto.icmp.timeout;
++		break;
++	case IPPROTO_TCP:
++		timeouts = net->ct.nf_ct_proto.tcp.timeouts;
++		break;
++	case IPPROTO_UDP: /* fallthrough */
++	case IPPROTO_UDPLITE:
++		timeouts = net->ct.nf_ct_proto.udp.timeouts;
++		break;
++	case IPPROTO_DCCP:
++#ifdef CONFIG_NF_CT_PROTO_DCCP
++		timeouts = net->ct.nf_ct_proto.dccp.dccp_timeout;
++#endif
++		break;
++	case IPPROTO_ICMPV6:
++		timeouts = &net->ct.nf_ct_proto.icmpv6.timeout;
++		break;
++	case IPPROTO_SCTP:
++#ifdef CONFIG_NF_CT_PROTO_SCTP
++		timeouts = net->ct.nf_ct_proto.sctp.timeouts;
++#endif
++		break;
++	case IPPROTO_GRE:
++#ifdef CONFIG_NF_CT_PROTO_GRE
++		if (l4proto->net_id) {
++			struct netns_proto_gre *net_gre;
++
++			net_gre = net_generic(net, *l4proto->net_id);
++			timeouts = net_gre->gre_timeouts;
++		}
++#endif
++		break;
++	case 255:
++		timeouts = &net->ct.nf_ct_proto.generic.timeout;
++		break;
++	default:
++		WARN_ONCE(1, "Missing timeouts for proto %d", l4proto->l4proto);
++		break;
+ 	}
+ 
++	if (!timeouts)
++		goto err;
++
+ 	skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ 	if (skb2 == NULL) {
+ 		err = -ENOMEM;
+@@ -472,7 +517,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
+ 					  nlh->nlmsg_seq,
+ 					  NFNL_MSG_TYPE(nlh->nlmsg_type),
+ 					  IPCTNL_MSG_TIMEOUT_DEFAULT_SET,
+-					  l4proto);
++					  l4proto, timeouts);
+ 	if (ret <= 0) {
+ 		kfree_skb(skb2);
+ 		err = -ENOMEM;
+diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
+index c7b6010b2c09..eab5e8eaddaa 100644
+--- a/net/openvswitch/flow_netlink.c
++++ b/net/openvswitch/flow_netlink.c
+@@ -2306,14 +2306,14 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
+ 
+ 	struct sw_flow_actions *acts;
+ 	int new_acts_size;
+-	int req_size = NLA_ALIGN(attr_len);
++	size_t req_size = NLA_ALIGN(attr_len);
+ 	int next_offset = offsetof(struct sw_flow_actions, actions) +
+ 					(*sfa)->actions_len;
+ 
+ 	if (req_size <= (ksize(*sfa) - next_offset))
+ 		goto out;
+ 
+-	new_acts_size = ksize(*sfa) * 2;
++	new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
+ 
+ 	if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
+ 		if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
+diff --git a/net/rds/tcp.c b/net/rds/tcp.c
+index b9bbcf3d6c63..18bb522df282 100644
+--- a/net/rds/tcp.c
++++ b/net/rds/tcp.c
+@@ -600,7 +600,7 @@ static void rds_tcp_kill_sock(struct net *net)
+ 	list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
+ 		struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
+ 
+-		if (net != c_net || !tc->t_sock)
++		if (net != c_net)
+ 			continue;
+ 		if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
+ 			list_move_tail(&tc->t_tcp_node, &tmp_list);
+diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
+index 6b67aa13d2dd..c7f5d630d97c 100644
+--- a/net/sched/act_sample.c
++++ b/net/sched/act_sample.c
+@@ -43,8 +43,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
+ 	struct tc_action_net *tn = net_generic(net, sample_net_id);
+ 	struct nlattr *tb[TCA_SAMPLE_MAX + 1];
+ 	struct psample_group *psample_group;
++	u32 psample_group_num, rate;
+ 	struct tc_sample *parm;
+-	u32 psample_group_num;
+ 	struct tcf_sample *s;
+ 	bool exists = false;
+ 	int ret, err;
+@@ -80,6 +80,12 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
+ 		return -EEXIST;
+ 	}
+ 
++	rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
++	if (!rate) {
++		NL_SET_ERR_MSG(extack, "invalid sample rate");
++		tcf_idr_release(*a, bind);
++		return -EINVAL;
++	}
+ 	psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
+ 	psample_group = psample_group_get(net, psample_group_num);
+ 	if (!psample_group) {
+@@ -91,7 +97,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
+ 
+ 	spin_lock_bh(&s->tcf_lock);
+ 	s->tcf_action = parm->action;
+-	s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
++	s->rate = rate;
+ 	s->psample_group_num = psample_group_num;
+ 	RCU_INIT_POINTER(s->psample_group, psample_group);
+ 
+diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
+index 856fa79d4ffd..621bc1d5b057 100644
+--- a/net/sched/cls_matchall.c
++++ b/net/sched/cls_matchall.c
+@@ -126,6 +126,11 @@ static void mall_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
+ 
+ static void *mall_get(struct tcf_proto *tp, u32 handle)
+ {
++	struct cls_mall_head *head = rtnl_dereference(tp->root);
++
++	if (head && head->handle == handle)
++		return head;
++
+ 	return NULL;
+ }
+ 
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 1c9f079e8a50..d97b2b4b7a8b 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -600,6 +600,7 @@ out:
+ static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
+ {
+ 	/* No address mapping for V4 sockets */
++	memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
+ 	return sizeof(struct sockaddr_in);
+ }
+ 
+diff --git a/scripts/package/builddeb b/scripts/package/builddeb
+index 90c9a8ac7adb..0b31f4f1f92c 100755
+--- a/scripts/package/builddeb
++++ b/scripts/package/builddeb
+@@ -81,7 +81,7 @@ else
+ 	cp System.map "$tmpdir/boot/System.map-$version"
+ 	cp $KCONFIG_CONFIG "$tmpdir/boot/config-$version"
+ fi
+-cp "$($MAKE -s image_name)" "$tmpdir/$installed_image_path"
++cp "$($MAKE -s -f $srctree/Makefile image_name)" "$tmpdir/$installed_image_path"
+ 
+ if grep -q "^CONFIG_OF=y" $KCONFIG_CONFIG ; then
+ 	# Only some architectures with OF support have this target
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index 92e6524a3a9d..b55cb96d1fed 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -1252,7 +1252,7 @@ static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
+ 
+ 	/* fill the info fields */
+ 	if (client_info->name[0])
+-		strlcpy(client->name, client_info->name, sizeof(client->name));
++		strscpy(client->name, client_info->name, sizeof(client->name));
+ 
+ 	client->filter = client_info->filter;
+ 	client->event_lost = client_info->event_lost;
+@@ -1530,7 +1530,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
+ 	/* set queue name */
+ 	if (!info->name[0])
+ 		snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
+-	strlcpy(q->name, info->name, sizeof(q->name));
++	strscpy(q->name, info->name, sizeof(q->name));
+ 	snd_use_lock_free(&q->use_lock);
+ 
+ 	return 0;
+@@ -1592,7 +1592,7 @@ static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client,
+ 		queuefree(q);
+ 		return -EPERM;
+ 	}
+-	strlcpy(q->name, info->name, sizeof(q->name));
++	strscpy(q->name, info->name, sizeof(q->name));
+ 	queuefree(q);
+ 
+ 	return 0;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 7572b8cc7127..9bc8a7cb40ea 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2272,6 +2272,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
+ 	SND_PCI_QUIRK(0x8086, 0x2040, "Intel DZ77BH-55K", 0),
+ 	/* https://bugzilla.kernel.org/show_bug.cgi?id=199607 */
+ 	SND_PCI_QUIRK(0x8086, 0x2057, "Intel NUC5i7RYB", 0),
++	/* https://bugs.launchpad.net/bugs/1821663 */
++	SND_PCI_QUIRK(0x8086, 0x2064, "Intel SDP 8086:2064", 0),
+ 	/* https://bugzilla.redhat.com/show_bug.cgi?id=1520902 */
+ 	SND_PCI_QUIRK(0x8086, 0x2068, "Intel NUC7i3BNB", 0),
+ 	/* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
+@@ -2280,6 +2282,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x367b, "Lenovo IdeaCentre B550", 0),
+ 	/* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
+ 	SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
++	/* https://bugs.launchpad.net/bugs/1821663 */
++	SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0),
+ 	{}
+ };
+ #endif /* CONFIG_PM */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 4c6321ec844d..b9d832bde23e 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1864,8 +1864,8 @@ enum {
+ 	ALC887_FIXUP_BASS_CHMAP,
+ 	ALC1220_FIXUP_GB_DUAL_CODECS,
+ 	ALC1220_FIXUP_CLEVO_P950,
+-	ALC1220_FIXUP_SYSTEM76_ORYP5,
+-	ALC1220_FIXUP_SYSTEM76_ORYP5_PINS,
++	ALC1220_FIXUP_CLEVO_PB51ED,
++	ALC1220_FIXUP_CLEVO_PB51ED_PINS,
+ };
+ 
+ static void alc889_fixup_coef(struct hda_codec *codec,
+@@ -2070,7 +2070,7 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
+ static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
+ 				const struct hda_fixup *fix, int action);
+ 
+-static void alc1220_fixup_system76_oryp5(struct hda_codec *codec,
++static void alc1220_fixup_clevo_pb51ed(struct hda_codec *codec,
+ 				     const struct hda_fixup *fix,
+ 				     int action)
+ {
+@@ -2322,18 +2322,18 @@ static const struct hda_fixup alc882_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc1220_fixup_clevo_p950,
+ 	},
+-	[ALC1220_FIXUP_SYSTEM76_ORYP5] = {
++	[ALC1220_FIXUP_CLEVO_PB51ED] = {
+ 		.type = HDA_FIXUP_FUNC,
+-		.v.func = alc1220_fixup_system76_oryp5,
++		.v.func = alc1220_fixup_clevo_pb51ed,
+ 	},
+-	[ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = {
++	[ALC1220_FIXUP_CLEVO_PB51ED_PINS] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+ 			{ 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+ 			{}
+ 		},
+ 		.chained = true,
+-		.chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5,
++		.chain_id = ALC1220_FIXUP_CLEVO_PB51ED,
+ 	},
+ };
+ 
+@@ -2411,8 +2411,9 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
+-	SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
+-	SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
++	SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x65d1, "Tuxedo Book XC1509", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
+ 	SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
+ 	SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
+@@ -5594,6 +5595,7 @@ enum {
+ 	ALC233_FIXUP_ASUS_MIC_NO_PRESENCE,
+ 	ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE,
+ 	ALC233_FIXUP_LENOVO_MULTI_CODECS,
++	ALC233_FIXUP_ACER_HEADSET_MIC,
+ 	ALC294_FIXUP_LENOVO_MIC_LOCATION,
+ 	ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE,
+ 	ALC700_FIXUP_INTEL_REFERENCE,
+@@ -6401,6 +6403,16 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc233_alc662_fixup_lenovo_dual_codecs,
+ 	},
++	[ALC233_FIXUP_ACER_HEADSET_MIC] = {
++		.type = HDA_FIXUP_VERBS,
++		.v.verbs = (const struct hda_verb[]) {
++			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
++			{ 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC233_FIXUP_ASUS_MIC_NO_PRESENCE
++	},
+ 	[ALC294_FIXUP_LENOVO_MIC_LOCATION] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+@@ -6644,6 +6656,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
+ 	SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
+diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
+index 4daefa5b150a..38fd32ab443c 100644
+--- a/sound/soc/fsl/fsl_esai.c
++++ b/sound/soc/fsl/fsl_esai.c
+@@ -54,6 +54,8 @@ struct fsl_esai {
+ 	u32 fifo_depth;
+ 	u32 slot_width;
+ 	u32 slots;
++	u32 tx_mask;
++	u32 rx_mask;
+ 	u32 hck_rate[2];
+ 	u32 sck_rate[2];
+ 	bool hck_dir[2];
+@@ -361,21 +363,13 @@ static int fsl_esai_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask,
+ 	regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
+ 			   ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
+ 
+-	regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMA,
+-			   ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(tx_mask));
+-	regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMB,
+-			   ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(tx_mask));
+-
+ 	regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
+ 			   ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
+ 
+-	regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMA,
+-			   ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(rx_mask));
+-	regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMB,
+-			   ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(rx_mask));
+-
+ 	esai_priv->slot_width = slot_width;
+ 	esai_priv->slots = slots;
++	esai_priv->tx_mask = tx_mask;
++	esai_priv->rx_mask = rx_mask;
+ 
+ 	return 0;
+ }
+@@ -596,6 +590,7 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
+ 	bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ 	u8 i, channels = substream->runtime->channels;
+ 	u32 pins = DIV_ROUND_UP(channels, esai_priv->slots);
++	u32 mask;
+ 
+ 	switch (cmd) {
+ 	case SNDRV_PCM_TRIGGER_START:
+@@ -608,15 +603,38 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
+ 		for (i = 0; tx && i < channels; i++)
+ 			regmap_write(esai_priv->regmap, REG_ESAI_ETDR, 0x0);
+ 
++		/*
++		 * When set the TE/RE in the end of enablement flow, there
++		 * will be channel swap issue for multi data line case.
++		 * In order to workaround this issue, we switch the bit
++		 * enablement sequence to below sequence
++		 * 1) clear the xSMB & xSMA: which is done in probe and
++		 *                           stop state.
++		 * 2) set TE/RE
++		 * 3) set xSMB
++		 * 4) set xSMA:  xSMA is the last one in this flow, which
++		 *               will trigger esai to start.
++		 */
+ 		regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
+ 				   tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK,
+ 				   tx ? ESAI_xCR_TE(pins) : ESAI_xCR_RE(pins));
++		mask = tx ? esai_priv->tx_mask : esai_priv->rx_mask;
++
++		regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
++				   ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(mask));
++		regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
++				   ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(mask));
++
+ 		break;
+ 	case SNDRV_PCM_TRIGGER_SUSPEND:
+ 	case SNDRV_PCM_TRIGGER_STOP:
+ 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ 		regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
+ 				   tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK, 0);
++		regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
++				   ESAI_xSMA_xS_MASK, 0);
++		regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
++				   ESAI_xSMB_xS_MASK, 0);
+ 
+ 		/* Disable and reset FIFO */
+ 		regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx),
+@@ -906,6 +924,15 @@ static int fsl_esai_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
++	esai_priv->tx_mask = 0xFFFFFFFF;
++	esai_priv->rx_mask = 0xFFFFFFFF;
++
++	/* Clear the TSMA, TSMB, RSMA, RSMB */
++	regmap_write(esai_priv->regmap, REG_ESAI_TSMA, 0);
++	regmap_write(esai_priv->regmap, REG_ESAI_TSMB, 0);
++	regmap_write(esai_priv->regmap, REG_ESAI_RSMA, 0);
++	regmap_write(esai_priv->regmap, REG_ESAI_RSMB, 0);
++
+ 	ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component,
+ 					      &fsl_esai_dai, 1);
+ 	if (ret) {
+diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+index e662400873ec..6868e71e3a3f 100644
+--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
++++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+@@ -711,9 +711,17 @@ static int sst_soc_probe(struct snd_soc_component *component)
+ 	return sst_dsp_init_v2_dpcm(component);
+ }
+ 
++static void sst_soc_remove(struct snd_soc_component *component)
++{
++	struct sst_data *drv = dev_get_drvdata(component->dev);
++
++	drv->soc_card = NULL;
++}
++
+ static const struct snd_soc_component_driver sst_soc_platform_drv  = {
+ 	.name		= DRV_NAME,
+ 	.probe		= sst_soc_probe,
++	.remove		= sst_soc_remove,
+ 	.ops		= &sst_platform_ops,
+ 	.compr_ops	= &sst_platform_compr_ops,
+ 	.pcm_new	= sst_pcm_new,
+diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json b/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
+index 3aca33c00039..618def9bdf0e 100644
+--- a/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
++++ b/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
+@@ -143,6 +143,30 @@
+             "$TC actions flush action sample"
+         ]
+     },
++    {
++        "id": "7571",
++        "name": "Add sample action with invalid rate",
++        "category": [
++            "actions",
++            "sample"
++        ],
++        "setup": [
++            [
++                "$TC actions flush action sample",
++                0,
++                1,
++                255
++            ]
++        ],
++        "cmdUnderTest": "$TC actions add action sample rate 0 group 1 index 2",
++        "expExitCode": "255",
++        "verifyCmd": "$TC actions get action sample index 2",
++        "matchPattern": "action order [0-9]+: sample rate 1/0 group 1.*index 2 ref",
++        "matchCount": "0",
++        "teardown": [
++            "$TC actions flush action sample"
++        ]
++    },
+     {
+         "id": "b6d4",
+         "name": "Add sample action with mandatory arguments and invalid control action",


             reply	other threads:[~2019-04-20 23:26 UTC|newest]

Thread overview: 332+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-04-19 19:51 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2024-04-18  3:06 [gentoo-commits] proj/linux-patches:4.19 commit in: / Alice Ferrazzi
2023-09-02  9:59 Mike Pagano
2023-08-30 15:00 Mike Pagano
2023-08-16 16:59 Mike Pagano
2023-08-11 11:58 Mike Pagano
2023-08-08 18:43 Mike Pagano
2023-07-24 20:30 Mike Pagano
2023-06-28 10:29 Mike Pagano
2023-06-21 14:55 Alice Ferrazzi
2023-06-14 10:21 Mike Pagano
2023-06-09 11:32 Mike Pagano
2023-05-30 12:57 Mike Pagano
2023-05-17 11:14 Mike Pagano
2023-05-17 11:01 Mike Pagano
2023-05-10 17:59 Mike Pagano
2023-04-26  9:35 Alice Ferrazzi
2023-04-20 11:17 Alice Ferrazzi
2023-04-05 11:41 Mike Pagano
2023-03-22 14:16 Alice Ferrazzi
2023-03-17 10:46 Mike Pagano
2023-03-13 11:35 Alice Ferrazzi
2023-03-11 16:01 Mike Pagano
2023-03-03 12:31 Mike Pagano
2023-02-25 11:41 Mike Pagano
2023-02-24  3:19 Alice Ferrazzi
2023-02-24  3:15 Alice Ferrazzi
2023-02-22 14:51 Alice Ferrazzi
2023-02-06 12:49 Mike Pagano
2023-01-24  7:16 Alice Ferrazzi
2023-01-18 11:11 Mike Pagano
2022-12-14 12:15 Mike Pagano
2022-12-08 12:14 Alice Ferrazzi
2022-11-25 17:04 Mike Pagano
2022-11-23  9:39 Alice Ferrazzi
2022-11-10 17:58 Mike Pagano
2022-11-03 15:11 Mike Pagano
2022-11-01 19:48 Mike Pagano
2022-10-26 11:41 Mike Pagano
2022-10-05 11:59 Mike Pagano
2022-09-28  9:18 Mike Pagano
2022-09-20 12:03 Mike Pagano
2022-09-15 11:09 Mike Pagano
2022-09-05 12:06 Mike Pagano
2022-08-25 10:35 Mike Pagano
2022-08-11 12:36 Mike Pagano
2022-07-29 15:28 Mike Pagano
2022-07-21 20:12 Mike Pagano
2022-07-12 16:01 Mike Pagano
2022-07-07 16:18 Mike Pagano
2022-07-02 16:07 Mike Pagano
2022-06-25 10:22 Mike Pagano
2022-06-16 11:40 Mike Pagano
2022-06-14 16:02 Mike Pagano
2022-06-06 11:05 Mike Pagano
2022-05-27 12:24 Mike Pagano
2022-05-25 11:55 Mike Pagano
2022-05-18  9:50 Mike Pagano
2022-05-15 22:12 Mike Pagano
2022-05-12 11:30 Mike Pagano
2022-05-01 17:04 Mike Pagano
2022-04-27 12:03 Mike Pagano
2022-04-20 12:09 Mike Pagano
2022-04-15 13:11 Mike Pagano
2022-04-12 19:24 Mike Pagano
2022-03-28 10:59 Mike Pagano
2022-03-23 11:57 Mike Pagano
2022-03-16 13:27 Mike Pagano
2022-03-11 10:56 Mike Pagano
2022-03-08 18:30 Mike Pagano
2022-03-02 13:08 Mike Pagano
2022-02-26 21:14 Mike Pagano
2022-02-23 12:39 Mike Pagano
2022-02-16 12:47 Mike Pagano
2022-02-11 12:53 Mike Pagano
2022-02-11 12:46 Mike Pagano
2022-02-11 12:45 Mike Pagano
2022-02-11 12:37 Mike Pagano
2022-02-08 17:56 Mike Pagano
2022-01-29 17:45 Mike Pagano
2022-01-27 11:39 Mike Pagano
2022-01-11 13:14 Mike Pagano
2022-01-05 12:55 Mike Pagano
2021-12-29 13:11 Mike Pagano
2021-12-22 14:07 Mike Pagano
2021-12-14 10:36 Mike Pagano
2021-12-08 12:55 Mike Pagano
2021-12-01 12:51 Mike Pagano
2021-11-26 11:59 Mike Pagano
2021-11-12 14:16 Mike Pagano
2021-11-06 13:26 Mike Pagano
2021-11-02 19:32 Mike Pagano
2021-10-27 11:59 Mike Pagano
2021-10-20 13:26 Mike Pagano
2021-10-17 13:12 Mike Pagano
2021-10-13 15:00 Alice Ferrazzi
2021-10-09 21:33 Mike Pagano
2021-10-06 14:06 Mike Pagano
2021-09-26 14:13 Mike Pagano
2021-09-22 11:40 Mike Pagano
2021-09-20 22:05 Mike Pagano
2021-09-03 11:22 Mike Pagano
2021-09-03 10:08 Alice Ferrazzi
2021-08-26 14:06 Mike Pagano
2021-08-25 22:45 Mike Pagano
2021-08-25 20:41 Mike Pagano
2021-08-15 20:07 Mike Pagano
2021-08-12 11:51 Mike Pagano
2021-08-08 13:39 Mike Pagano
2021-08-04 11:54 Mike Pagano
2021-08-03 12:26 Mike Pagano
2021-07-31 10:34 Alice Ferrazzi
2021-07-28 12:37 Mike Pagano
2021-07-20 15:35 Alice Ferrazzi
2021-07-13 12:38 Mike Pagano
2021-07-11 14:45 Mike Pagano
2021-06-30 14:25 Mike Pagano
2021-06-16 12:22 Mike Pagano
2021-06-10 11:46 Mike Pagano
2021-06-03 10:32 Alice Ferrazzi
2021-05-26 12:05 Mike Pagano
2021-05-22 10:03 Mike Pagano
2021-05-07 11:40 Alice Ferrazzi
2021-04-30 19:02 Mike Pagano
2021-04-28 18:31 Mike Pagano
2021-04-28 11:44 Alice Ferrazzi
2021-04-16 11:15 Alice Ferrazzi
2021-04-14 11:22 Alice Ferrazzi
2021-04-10 13:24 Mike Pagano
2021-04-07 12:21 Mike Pagano
2021-03-30 14:17 Mike Pagano
2021-03-24 12:08 Mike Pagano
2021-03-22 15:50 Mike Pagano
2021-03-20 14:26 Mike Pagano
2021-03-17 16:21 Mike Pagano
2021-03-11 14:05 Mike Pagano
2021-03-07 15:15 Mike Pagano
2021-03-04 12:08 Mike Pagano
2021-02-23 14:31 Alice Ferrazzi
2021-02-13 15:28 Alice Ferrazzi
2021-02-10 10:03 Alice Ferrazzi
2021-02-07 14:40 Alice Ferrazzi
2021-02-03 23:43 Mike Pagano
2021-01-30 13:34 Alice Ferrazzi
2021-01-27 11:15 Mike Pagano
2021-01-23 16:36 Mike Pagano
2021-01-19 20:34 Mike Pagano
2021-01-17 16:20 Mike Pagano
2021-01-12 20:06 Mike Pagano
2021-01-09 12:57 Mike Pagano
2021-01-06 14:15 Mike Pagano
2020-12-30 12:52 Mike Pagano
2020-12-11 12:56 Mike Pagano
2020-12-08 12:06 Mike Pagano
2020-12-02 12:49 Mike Pagano
2020-11-24 14:40 Mike Pagano
2020-11-22 19:26 Mike Pagano
2020-11-18 19:56 Mike Pagano
2020-11-11 15:43 Mike Pagano
2020-11-10 13:56 Mike Pagano
2020-11-05 12:35 Mike Pagano
2020-11-01 20:29 Mike Pagano
2020-10-29 11:18 Mike Pagano
2020-10-17 10:17 Mike Pagano
2020-10-14 20:36 Mike Pagano
2020-10-07 12:50 Mike Pagano
2020-10-01 12:45 Mike Pagano
2020-09-26 22:07 Mike Pagano
2020-09-26 22:00 Mike Pagano
2020-09-24 15:58 Mike Pagano
2020-09-23 12:07 Mike Pagano
2020-09-17 15:01 Mike Pagano
2020-09-17 14:55 Mike Pagano
2020-09-12 17:59 Mike Pagano
2020-09-09 17:59 Mike Pagano
2020-09-03 11:37 Mike Pagano
2020-08-26 11:15 Mike Pagano
2020-08-21 10:49 Alice Ferrazzi
2020-08-19  9:36 Alice Ferrazzi
2020-08-12 23:36 Alice Ferrazzi
2020-08-07 19:16 Mike Pagano
2020-08-05 14:51 Thomas Deutschmann
2020-07-31 18:00 Mike Pagano
2020-07-29 12:33 Mike Pagano
2020-07-22 12:42 Mike Pagano
2020-07-16 11:17 Mike Pagano
2020-07-09 12:12 Mike Pagano
2020-07-01 12:14 Mike Pagano
2020-06-29 17:41 Mike Pagano
2020-06-25 15:07 Mike Pagano
2020-06-22 14:47 Mike Pagano
2020-06-10 21:27 Mike Pagano
2020-06-07 21:52 Mike Pagano
2020-06-03 11:41 Mike Pagano
2020-05-27 16:25 Mike Pagano
2020-05-20 11:30 Mike Pagano
2020-05-20 11:27 Mike Pagano
2020-05-14 11:30 Mike Pagano
2020-05-13 12:33 Mike Pagano
2020-05-11 22:50 Mike Pagano
2020-05-09 22:20 Mike Pagano
2020-05-06 11:46 Mike Pagano
2020-05-02 19:24 Mike Pagano
2020-04-29 17:57 Mike Pagano
2020-04-23 11:44 Mike Pagano
2020-04-21 11:15 Mike Pagano
2020-04-17 11:45 Mike Pagano
2020-04-15 17:09 Mike Pagano
2020-04-13 11:34 Mike Pagano
2020-04-02 15:24 Mike Pagano
2020-03-25 14:58 Mike Pagano
2020-03-20 11:57 Mike Pagano
2020-03-18 14:21 Mike Pagano
2020-03-16 12:23 Mike Pagano
2020-03-11 17:20 Mike Pagano
2020-03-05 16:23 Mike Pagano
2020-02-28 16:38 Mike Pagano
2020-02-24 11:06 Mike Pagano
2020-02-19 23:45 Mike Pagano
2020-02-14 23:52 Mike Pagano
2020-02-11 16:20 Mike Pagano
2020-02-05 17:05 Mike Pagano
2020-02-01 10:37 Mike Pagano
2020-02-01 10:30 Mike Pagano
2020-01-29 16:16 Mike Pagano
2020-01-27 14:25 Mike Pagano
2020-01-23 11:07 Mike Pagano
2020-01-17 19:56 Mike Pagano
2020-01-14 22:30 Mike Pagano
2020-01-12 15:00 Mike Pagano
2020-01-09 11:15 Mike Pagano
2020-01-04 19:50 Mike Pagano
2019-12-31 17:46 Mike Pagano
2019-12-21 15:03 Mike Pagano
2019-12-17 21:56 Mike Pagano
2019-12-13 12:35 Mike Pagano
2019-12-05 12:03 Alice Ferrazzi
2019-12-01 14:06 Thomas Deutschmann
2019-11-24 15:44 Mike Pagano
2019-11-20 19:36 Mike Pagano
2019-11-12 21:00 Mike Pagano
2019-11-10 16:20 Mike Pagano
2019-11-06 14:26 Mike Pagano
2019-10-29 12:04 Mike Pagano
2019-10-17 22:27 Mike Pagano
2019-10-11 17:04 Mike Pagano
2019-10-07 17:42 Mike Pagano
2019-10-05 11:42 Mike Pagano
2019-10-01 10:10 Mike Pagano
2019-09-21 17:11 Mike Pagano
2019-09-19 12:34 Mike Pagano
2019-09-19 10:04 Mike Pagano
2019-09-16 12:26 Mike Pagano
2019-09-10 11:12 Mike Pagano
2019-09-06 17:25 Mike Pagano
2019-08-29 14:15 Mike Pagano
2019-08-25 17:37 Mike Pagano
2019-08-23 22:18 Mike Pagano
2019-08-16 12:26 Mike Pagano
2019-08-16 12:13 Mike Pagano
2019-08-09 17:45 Mike Pagano
2019-08-06 19:19 Mike Pagano
2019-08-04 16:15 Mike Pagano
2019-07-31 15:09 Mike Pagano
2019-07-31 10:22 Mike Pagano
2019-07-28 16:27 Mike Pagano
2019-07-26 11:35 Mike Pagano
2019-07-21 14:41 Mike Pagano
2019-07-14 15:44 Mike Pagano
2019-07-10 11:05 Mike Pagano
2019-07-03 11:34 Mike Pagano
2019-06-25 10:53 Mike Pagano
2019-06-22 19:06 Mike Pagano
2019-06-19 17:17 Thomas Deutschmann
2019-06-17 19:22 Mike Pagano
2019-06-15 15:07 Mike Pagano
2019-06-11 12:42 Mike Pagano
2019-06-10 19:43 Mike Pagano
2019-06-09 16:19 Mike Pagano
2019-06-04 11:11 Mike Pagano
2019-05-31 15:02 Mike Pagano
2019-05-26 17:10 Mike Pagano
2019-05-22 11:02 Mike Pagano
2019-05-16 23:03 Mike Pagano
2019-05-14 21:00 Mike Pagano
2019-05-10 19:40 Mike Pagano
2019-05-08 10:06 Mike Pagano
2019-05-05 13:42 Mike Pagano
2019-05-04 18:28 Mike Pagano
2019-05-02 10:13 Mike Pagano
2019-04-27 17:36 Mike Pagano
2019-04-20 11:09 Mike Pagano
2019-04-05 21:46 Mike Pagano
2019-04-03 10:59 Mike Pagano
2019-03-27 10:22 Mike Pagano
2019-03-23 20:23 Mike Pagano
2019-03-19 16:58 Mike Pagano
2019-03-13 22:08 Mike Pagano
2019-03-10 14:15 Mike Pagano
2019-03-06 19:06 Mike Pagano
2019-03-05 18:04 Mike Pagano
2019-02-27 11:23 Mike Pagano
2019-02-23 11:35 Mike Pagano
2019-02-23  0:46 Mike Pagano
2019-02-20 11:19 Mike Pagano
2019-02-16  0:42 Mike Pagano
2019-02-15 12:39 Mike Pagano
2019-02-12 20:53 Mike Pagano
2019-02-06 17:08 Mike Pagano
2019-01-31 11:28 Mike Pagano
2019-01-26 15:09 Mike Pagano
2019-01-22 23:06 Mike Pagano
2019-01-16 23:32 Mike Pagano
2019-01-13 19:29 Mike Pagano
2019-01-09 17:54 Mike Pagano
2018-12-29 18:55 Mike Pagano
2018-12-29  1:08 Mike Pagano
2018-12-21 14:58 Mike Pagano
2018-12-19 19:09 Mike Pagano
2018-12-17 11:42 Mike Pagano
2018-12-13 11:40 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-05 20:16 Mike Pagano
2018-12-01 15:08 Mike Pagano
2018-11-27 16:16 Mike Pagano
2018-11-23 12:42 Mike Pagano
2018-11-21 12:30 Mike Pagano
2018-11-14  0:47 Mike Pagano
2018-11-14  0:47 Mike Pagano
2018-11-13 20:44 Mike Pagano
2018-11-04 16:22 Alice Ferrazzi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1555703473.0f24bbd911eccbda14a4813938f48fd974f5bdb2.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox