public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Alice Ferrazzi" <alicef@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Thu, 26 Jan 2017 08:51:36 +0000 (UTC)	[thread overview]
Message-ID: <1485420579.94c945baf75ef5a35c3c220ddda71d0060b72aa6.alicef@gentoo> (raw)

commit:     94c945baf75ef5a35c3c220ddda71d0060b72aa6
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Jan 26 08:49:39 2017 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Jan 26 08:49:39 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=94c945ba

Linux patch 4.9.6

 0000_README            |    4 +
 1005_linux-4.9.6.patch | 4537 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4541 insertions(+)

diff --git a/0000_README b/0000_README
index a0a0324..970967a 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch:  1004_linux-4.9.5.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.9.5
 
+Patch:  1005_linux-4.9.6.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.9.6
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1005_linux-4.9.6.patch b/1005_linux-4.9.6.patch
new file mode 100644
index 0000000..aaeaa34
--- /dev/null
+++ b/1005_linux-4.9.6.patch
@@ -0,0 +1,4537 @@
+diff --git a/Documentation/devicetree/bindings/clock/imx31-clock.txt b/Documentation/devicetree/bindings/clock/imx31-clock.txt
+index 19df842c694f..8163d565f697 100644
+--- a/Documentation/devicetree/bindings/clock/imx31-clock.txt
++++ b/Documentation/devicetree/bindings/clock/imx31-clock.txt
+@@ -77,7 +77,7 @@ Examples:
+ clks: ccm@53f80000{
+ 	compatible = "fsl,imx31-ccm";
+ 	reg = <0x53f80000 0x4000>;
+-	interrupts = <0 31 0x04 0 53 0x04>;
++	interrupts = <31>, <53>;
+ 	#clock-cells = <1>;
+ };
+ 
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 37babf91f2cb..922dec8fa07e 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -3998,10 +3998,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ 			it if 0 is given (See Documentation/cgroup-v1/memory.txt)
+ 
+ 	swiotlb=	[ARM,IA-64,PPC,MIPS,X86]
+-			Format: { <int> | force }
++			Format: { <int> | force | noforce }
+ 			<int> -- Number of I/O TLB slabs
+ 			force -- force using of bounce buffers even if they
+ 			         wouldn't be automatically used by the kernel
++			noforce -- Never use bounce buffers (for debugging)
+ 
+ 	switches=	[HW,M68k]
+ 
+diff --git a/Makefile b/Makefile
+index 2a8af8af7b27..ef95231d1625 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+ 
+diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
+index bd204bfa29ed..249e10190d20 100644
+--- a/arch/arc/Kconfig
++++ b/arch/arc/Kconfig
+@@ -28,7 +28,7 @@ config ARC
+ 	select HAVE_KPROBES
+ 	select HAVE_KRETPROBES
+ 	select HAVE_MEMBLOCK
+-	select HAVE_MOD_ARCH_SPECIFIC if ARC_DW2_UNWIND
++	select HAVE_MOD_ARCH_SPECIFIC
+ 	select HAVE_OPROFILE
+ 	select HAVE_PERF_EVENTS
+ 	select HANDLE_DOMAIN_IRQ
+diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h
+index 6e91d8b339c3..567590ea8f6c 100644
+--- a/arch/arc/include/asm/module.h
++++ b/arch/arc/include/asm/module.h
+@@ -14,13 +14,13 @@
+ 
+ #include <asm-generic/module.h>
+ 
+-#ifdef CONFIG_ARC_DW2_UNWIND
+ struct mod_arch_specific {
++#ifdef CONFIG_ARC_DW2_UNWIND
+ 	void *unw_info;
+ 	int unw_sec_idx;
++#endif
+ 	const char *secstr;
+ };
+-#endif
+ 
+ #define MODULE_PROC_FAMILY "ARC700"
+ 
+diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c
+index 42e964db2967..3d99a6091332 100644
+--- a/arch/arc/kernel/module.c
++++ b/arch/arc/kernel/module.c
+@@ -32,8 +32,8 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+ #ifdef CONFIG_ARC_DW2_UNWIND
+ 	mod->arch.unw_sec_idx = 0;
+ 	mod->arch.unw_info = NULL;
+-	mod->arch.secstr = secstr;
+ #endif
++	mod->arch.secstr = secstr;
+ 	return 0;
+ }
+ 
+@@ -113,8 +113,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
+ 
+ 	}
+ 
++#ifdef CONFIG_ARC_DW2_UNWIND
+ 	if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)
+ 		module->arch.unw_sec_idx = tgtsec;
++#endif
+ 
+ 	return 0;
+ 
+diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
+index c558ba75cbcc..7037201c5e3a 100644
+--- a/arch/arm/boot/dts/Makefile
++++ b/arch/arm/boot/dts/Makefile
+@@ -485,6 +485,7 @@ dtb-$(CONFIG_ARCH_OMAP3) += \
+ 	am3517-evm.dtb \
+ 	am3517_mt_ventoux.dtb \
+ 	logicpd-torpedo-37xx-devkit.dtb \
++	logicpd-som-lv-37xx-devkit.dtb \
+ 	omap3430-sdp.dtb \
+ 	omap3-beagle.dtb \
+ 	omap3-beagle-xm.dtb \
+diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
+index 194d884c9de1..795c1467fa50 100644
+--- a/arch/arm/boot/dts/am33xx.dtsi
++++ b/arch/arm/boot/dts/am33xx.dtsi
+@@ -16,6 +16,7 @@
+ 	interrupt-parent = <&intc>;
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
++	chosen { };
+ 
+ 	aliases {
+ 		i2c0 = &i2c0;
+diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
+index a275fa956813..a20a71d9d22e 100644
+--- a/arch/arm/boot/dts/am4372.dtsi
++++ b/arch/arm/boot/dts/am4372.dtsi
+@@ -16,6 +16,7 @@
+ 	interrupt-parent = <&wakeupgen>;
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
++	chosen { };
+ 
+ 	memory@0 {
+ 		device_type = "memory";
+diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
+index 46d46d894a44..74dd21b7373c 100644
+--- a/arch/arm/boot/dts/bcm283x.dtsi
++++ b/arch/arm/boot/dts/bcm283x.dtsi
+@@ -104,7 +104,7 @@
+ 			reg = <0x7e104000 0x10>;
+ 		};
+ 
+-		mailbox: mailbox@7e00b800 {
++		mailbox: mailbox@7e00b880 {
+ 			compatible = "brcm,bcm2835-mbox";
+ 			reg = <0x7e00b880 0x40>;
+ 			interrupts = <0 1>;
+diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
+index 41de15fe15a2..78492a0bbbab 100644
+--- a/arch/arm/boot/dts/da850-evm.dts
++++ b/arch/arm/boot/dts/da850-evm.dts
+@@ -99,6 +99,7 @@
+ 				#size-cells = <1>;
+ 				compatible = "m25p64";
+ 				spi-max-frequency = <30000000>;
++				m25p,fast-read;
+ 				reg = <0>;
+ 				partition@0 {
+ 					label = "U-Boot-SPL";
+diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi
+index ff90a6ce6bdc..d87efab24fa2 100644
+--- a/arch/arm/boot/dts/dm814x.dtsi
++++ b/arch/arm/boot/dts/dm814x.dtsi
+@@ -12,6 +12,7 @@
+ 	interrupt-parent = <&intc>;
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
++	chosen { };
+ 
+ 	aliases {
+ 		i2c0 = &i2c1;
+diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
+index f1e0f771ff29..cbdfbc4e4a26 100644
+--- a/arch/arm/boot/dts/dm816x.dtsi
++++ b/arch/arm/boot/dts/dm816x.dtsi
+@@ -12,6 +12,7 @@
+ 	interrupt-parent = <&intc>;
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
++	chosen { };
+ 
+ 	aliases {
+ 		i2c0 = &i2c1;
+diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
+index d4fcd68f6349..064d84f87e45 100644
+--- a/arch/arm/boot/dts/dra7.dtsi
++++ b/arch/arm/boot/dts/dra7.dtsi
+@@ -18,6 +18,7 @@
+ 
+ 	compatible = "ti,dra7xx";
+ 	interrupt-parent = <&crossbar_mpu>;
++	chosen { };
+ 
+ 	aliases {
+ 		i2c0 = &i2c1;
+@@ -1376,6 +1377,7 @@
+ 			phy-names = "sata-phy";
+ 			clocks = <&sata_ref_clk>;
+ 			ti,hwmods = "sata";
++			ports-implemented = <0x1>;
+ 		};
+ 
+ 		rtc: rtc@48838000 {
+diff --git a/arch/arm/boot/dts/imx31.dtsi b/arch/arm/boot/dts/imx31.dtsi
+index 1ce7ae94e7ad..11e9e6bd8abb 100644
+--- a/arch/arm/boot/dts/imx31.dtsi
++++ b/arch/arm/boot/dts/imx31.dtsi
+@@ -30,11 +30,11 @@
+ 		};
+ 	};
+ 
+-	avic: avic-interrupt-controller@60000000 {
++	avic: interrupt-controller@68000000 {
+ 		compatible = "fsl,imx31-avic", "fsl,avic";
+ 		interrupt-controller;
+ 		#interrupt-cells = <1>;
+-		reg = <0x60000000 0x100000>;
++		reg = <0x68000000 0x100000>;
+ 	};
+ 
+ 	soc {
+@@ -118,13 +118,6 @@
+ 				interrupts = <19>;
+ 				clocks = <&clks 25>;
+ 			};
+-
+-			clks: ccm@53f80000{
+-				compatible = "fsl,imx31-ccm";
+-				reg = <0x53f80000 0x4000>;
+-				interrupts = <0 31 0x04 0 53 0x04>;
+-				#clock-cells = <1>;
+-			};
+ 		};
+ 
+ 		aips@53f00000 { /* AIPS2 */
+@@ -134,6 +127,13 @@
+ 			reg = <0x53f00000 0x100000>;
+ 			ranges;
+ 
++			clks: ccm@53f80000{
++				compatible = "fsl,imx31-ccm";
++				reg = <0x53f80000 0x4000>;
++				interrupts = <31>, <53>;
++				#clock-cells = <1>;
++			};
++
+ 			gpt: timer@53f90000 {
+ 				compatible = "fsl,imx31-gpt";
+ 				reg = <0x53f90000 0x4000>;
+diff --git a/arch/arm/boot/dts/imx6q-cm-fx6.dts b/arch/arm/boot/dts/imx6q-cm-fx6.dts
+index 59bc5a4dce17..a150bca84daa 100644
+--- a/arch/arm/boot/dts/imx6q-cm-fx6.dts
++++ b/arch/arm/boot/dts/imx6q-cm-fx6.dts
+@@ -183,7 +183,6 @@
+ 			MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK	0x1b0b0
+ 			MX6QDL_PAD_ENET_MDIO__ENET_MDIO		0x1b0b0
+ 			MX6QDL_PAD_ENET_MDC__ENET_MDC		0x1b0b0
+-			MX6QDL_PAD_GPIO_16__ENET_REF_CLK	0x4001b0a8
+ 		>;
+ 	};
+ 
+diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
+index b0b3220a1fd9..01166ba36f27 100644
+--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
+@@ -319,8 +319,6 @@
+ 		compatible = "fsl,imx6q-nitrogen6_max-sgtl5000",
+ 			     "fsl,imx-audio-sgtl5000";
+ 		model = "imx6q-nitrogen6_max-sgtl5000";
+-		pinctrl-names = "default";
+-		pinctrl-0 = <&pinctrl_sgtl5000>;
+ 		ssi-controller = <&ssi1>;
+ 		audio-codec = <&codec>;
+ 		audio-routing =
+@@ -402,6 +400,8 @@
+ 
+ 	codec: sgtl5000@0a {
+ 		compatible = "fsl,sgtl5000";
++		pinctrl-names = "default";
++		pinctrl-0 = <&pinctrl_sgtl5000>;
+ 		reg = <0x0a>;
+ 		clocks = <&clks IMX6QDL_CLK_CKO>;
+ 		VDDA-supply = <&reg_2p5v>;
+diff --git a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
+index da8598402ab8..38faa90007d7 100644
+--- a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
++++ b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
+@@ -158,7 +158,7 @@
+ &mmc1 {
+ 	interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>;
+ 	pinctrl-names = "default";
+-	pinctrl-0 = <&mmc1_pins &mmc1_cd>;
++	pinctrl-0 = <&mmc1_pins>;
+ 	wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>;		/* gpio_126 */
+ 	cd-gpios = <&gpio4 14 IRQ_TYPE_LEVEL_LOW>;		/* gpio_110 */
+ 	vmmc-supply = <&vmmc1>;
+@@ -193,7 +193,8 @@
+ 			OMAP3_CORE1_IOPAD(0x214a, PIN_INPUT | MUX_MODE0)	/* sdmmc1_dat1.sdmmc1_dat1 */
+ 			OMAP3_CORE1_IOPAD(0x214c, PIN_INPUT | MUX_MODE0)	/* sdmmc1_dat2.sdmmc1_dat2 */
+ 			OMAP3_CORE1_IOPAD(0x214e, PIN_INPUT | MUX_MODE0)	/* sdmmc1_dat3.sdmmc1_dat3 */
+-			OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4)	/* cam_strobe.gpio_126 sdmmc1_wp*/
++			OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4)	/* cam_strobe.gpio_126 */
++			OMAP3_CORE1_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4)	/* cam_d11.gpio_110 */
+ 		>;
+ 	};
+ 
+@@ -242,12 +243,6 @@
+ 			OMAP3_WKUP_IOPAD(0x2a16, PIN_OUTPUT | PIN_OFF_OUTPUT_LOW | MUX_MODE4)       /* sys_boot6.gpio_8 */
+ 		>;
+ 	};
+-
+-	mmc1_cd: pinmux_mmc1_cd {
+-		pinctrl-single,pins = <
+-			OMAP3_WKUP_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4)	/* cam_d11.gpio_110 */
+-		>;
+-	};
+ };
+ 
+ 
+diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi
+index 4f793a025a72..f1d6de8b3c19 100644
+--- a/arch/arm/boot/dts/omap2.dtsi
++++ b/arch/arm/boot/dts/omap2.dtsi
+@@ -17,6 +17,7 @@
+ 	interrupt-parent = <&intc>;
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
++	chosen { };
+ 
+ 	aliases {
+ 		serial0 = &uart1;
+diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
+index 353d818ce5a6..2008648b8c9f 100644
+--- a/arch/arm/boot/dts/omap3.dtsi
++++ b/arch/arm/boot/dts/omap3.dtsi
+@@ -17,6 +17,7 @@
+ 	interrupt-parent = <&intc>;
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
++	chosen { };
+ 
+ 	aliases {
+ 		i2c0 = &i2c1;
+diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
+index 0ced079b7ae3..9c289ddab3df 100644
+--- a/arch/arm/boot/dts/omap4.dtsi
++++ b/arch/arm/boot/dts/omap4.dtsi
+@@ -15,6 +15,7 @@
+ 	interrupt-parent = <&wakeupgen>;
+ 	#address-cells = <1>;
+ 	#size-cells = <1>;
++	chosen { };
+ 
+ 	aliases {
+ 		i2c0 = &i2c1;
+diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
+index 25262118ec3d..1d1d8e90cd80 100644
+--- a/arch/arm/boot/dts/omap5.dtsi
++++ b/arch/arm/boot/dts/omap5.dtsi
+@@ -17,6 +17,7 @@
+ 
+ 	compatible = "ti,omap5";
+ 	interrupt-parent = <&wakeupgen>;
++	chosen { };
+ 
+ 	aliases {
+ 		i2c0 = &i2c1;
+@@ -985,6 +986,7 @@
+ 			phy-names = "sata-phy";
+ 			clocks = <&sata_ref_clk>;
+ 			ti,hwmods = "sata";
++			ports-implemented = <0x1>;
+ 		};
+ 
+ 		dss: dss@58000000 {
+diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi
+index 725ecb3c5fb4..7e860d3737ff 100644
+--- a/arch/arm/boot/dts/r8a7794.dtsi
++++ b/arch/arm/boot/dts/r8a7794.dtsi
+@@ -319,7 +319,7 @@
+ 				  "ch12";
+ 		clocks = <&mstp5_clks R8A7794_CLK_AUDIO_DMAC0>;
+ 		clock-names = "fck";
+-		power-domains = <&cpg_clocks>;
++		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
+ 		#dma-cells = <1>;
+ 		dma-channels = <13>;
+ 	};
+@@ -1025,8 +1025,7 @@
+ 			clocks = <&extal_clk &usb_extal_clk>;
+ 			#clock-cells = <1>;
+ 			clock-output-names = "main", "pll0", "pll1", "pll3",
+-					     "lb", "qspi", "sdh", "sd0", "z",
+-					     "rcan";
++					     "lb", "qspi", "sdh", "sd0", "rcan";
+ 			#power-domain-cells = <0>;
+ 		};
+ 		/* Variable factor clocks */
+@@ -1483,7 +1482,7 @@
+ 			      "mix.0", "mix.1",
+ 			      "dvc.0", "dvc.1",
+ 			      "clk_a", "clk_b", "clk_c", "clk_i";
+-		power-domains = <&cpg_clocks>;
++		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
+ 
+ 		status = "disabled";
+ 
+diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
+index 522b5feb4eaa..b62eaeb147aa 100644
+--- a/arch/arm/include/asm/cputype.h
++++ b/arch/arm/include/asm/cputype.h
+@@ -94,6 +94,9 @@
+ #define ARM_CPU_XSCALE_ARCH_V2		0x4000
+ #define ARM_CPU_XSCALE_ARCH_V3		0x6000
+ 
++/* Qualcomm implemented cores */
++#define ARM_CPU_PART_SCORPION		0x510002d0
++
+ extern unsigned int processor_id;
+ 
+ #ifdef CONFIG_CPU_CP15
+diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
+index b8df45883cf7..25538a935874 100644
+--- a/arch/arm/kernel/hw_breakpoint.c
++++ b/arch/arm/kernel/hw_breakpoint.c
+@@ -1066,6 +1066,22 @@ static int __init arch_hw_breakpoint_init(void)
+ 		return 0;
+ 	}
+ 
++	/*
++	 * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
++	 * whenever a WFI is issued, even if the core is not powered down, in
++	 * violation of the architecture.  When DBGPRSR.SPD is set, accesses to
++	 * breakpoint and watchpoint registers are treated as undefined, so
++	 * this results in boot time and runtime failures when these are
++	 * accessed and we unexpectedly take a trap.
++	 *
++	 * It's not clear if/how this can be worked around, so we blacklist
++	 * Scorpion CPUs to avoid these issues.
++	*/
++	if (read_cpuid_part() == ARM_CPU_PART_SCORPION) {
++		pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
++		return 0;
++	}
++
+ 	has_ossr = core_has_os_save_restore();
+ 
+ 	/* Determine how many BRPs/WRPs are available. */
+diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
+index 22313cb53362..9af0701f7094 100644
+--- a/arch/arm/kernel/smp_tlb.c
++++ b/arch/arm/kernel/smp_tlb.c
+@@ -9,6 +9,7 @@
+  */
+ #include <linux/preempt.h>
+ #include <linux/smp.h>
++#include <linux/uaccess.h>
+ 
+ #include <asm/smp_plat.h>
+ #include <asm/tlbflush.h>
+@@ -40,8 +41,11 @@ static inline void ipi_flush_tlb_mm(void *arg)
+ static inline void ipi_flush_tlb_page(void *arg)
+ {
+ 	struct tlb_args *ta = (struct tlb_args *)arg;
++	unsigned int __ua_flags = uaccess_save_and_enable();
+ 
+ 	local_flush_tlb_page(ta->ta_vma, ta->ta_start);
++
++	uaccess_restore(__ua_flags);
+ }
+ 
+ static inline void ipi_flush_tlb_kernel_page(void *arg)
+@@ -54,8 +58,11 @@ static inline void ipi_flush_tlb_kernel_page(void *arg)
+ static inline void ipi_flush_tlb_range(void *arg)
+ {
+ 	struct tlb_args *ta = (struct tlb_args *)arg;
++	unsigned int __ua_flags = uaccess_save_and_enable();
+ 
+ 	local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
++
++	uaccess_restore(__ua_flags);
+ }
+ 
+ static inline void ipi_flush_tlb_kernel_range(void *arg)
+diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
+index 8538910db202..a970e7fcba9e 100644
+--- a/arch/arm/mach-ux500/pm.c
++++ b/arch/arm/mach-ux500/pm.c
+@@ -134,8 +134,8 @@ bool prcmu_pending_irq(void)
+  */
+ bool prcmu_is_cpu_in_wfi(int cpu)
+ {
+-	return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 :
+-		     PRCM_ARM_WFI_STANDBY_WFI0;
++	return readl(PRCM_ARM_WFI_STANDBY) &
++		(cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0);
+ }
+ 
+ /*
+diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
+index b71086d25195..53211a0acf0f 100644
+--- a/arch/arm64/include/asm/memory.h
++++ b/arch/arm64/include/asm/memory.h
+@@ -217,7 +217,7 @@ static inline void *phys_to_virt(phys_addr_t x)
+ #define _virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+ #else
+ #define __virt_to_pgoff(kaddr)	(((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
+-#define __page_to_voff(page)	(((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
++#define __page_to_voff(kaddr)	(((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
+ 
+ #define page_to_virt(page)	((void *)((__page_to_voff(page)) | PAGE_OFFSET))
+ #define virt_to_page(vaddr)	((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
+diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
+index b5c3933ed441..d1ff83dfe5de 100644
+--- a/arch/arm64/include/uapi/asm/ptrace.h
++++ b/arch/arm64/include/uapi/asm/ptrace.h
+@@ -77,6 +77,7 @@ struct user_fpsimd_state {
+ 	__uint128_t	vregs[32];
+ 	__u32		fpsr;
+ 	__u32		fpcr;
++	__u32		__reserved[2];
+ };
+ 
+ struct user_hwdebug_state {
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 223d54a4d66b..79b0fe24d5b7 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -624,7 +624,7 @@ el0_inv:
+ 	mov	x0, sp
+ 	mov	x1, #BAD_SYNC
+ 	mov	x2, x25
+-	bl	bad_mode
++	bl	bad_el0_sync
+ 	b	ret_to_user
+ ENDPROC(el0_sync)
+ 
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index e0c81da60f76..8eedeef375d6 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -550,6 +550,8 @@ static int hw_break_set(struct task_struct *target,
+ 	/* (address, ctrl) registers */
+ 	limit = regset->n * regset->size;
+ 	while (count && offset < limit) {
++		if (count < PTRACE_HBP_ADDR_SZ)
++			return -EINVAL;
+ 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
+ 					 offset, offset + PTRACE_HBP_ADDR_SZ);
+ 		if (ret)
+@@ -559,6 +561,8 @@ static int hw_break_set(struct task_struct *target,
+ 			return ret;
+ 		offset += PTRACE_HBP_ADDR_SZ;
+ 
++		if (!count)
++			break;
+ 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
+ 					 offset, offset + PTRACE_HBP_CTRL_SZ);
+ 		if (ret)
+@@ -595,7 +599,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
+ 		   const void *kbuf, const void __user *ubuf)
+ {
+ 	int ret;
+-	struct user_pt_regs newregs;
++	struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
+ 
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
+ 	if (ret)
+@@ -625,7 +629,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
+ 		   const void *kbuf, const void __user *ubuf)
+ {
+ 	int ret;
+-	struct user_fpsimd_state newstate;
++	struct user_fpsimd_state newstate =
++		target->thread.fpsimd_state.user_fpsimd;
+ 
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
+ 	if (ret)
+@@ -649,7 +654,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
+ 		   const void *kbuf, const void __user *ubuf)
+ {
+ 	int ret;
+-	unsigned long tls;
++	unsigned long tls = target->thread.tp_value;
+ 
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
+ 	if (ret)
+@@ -675,7 +680,8 @@ static int system_call_set(struct task_struct *target,
+ 			   unsigned int pos, unsigned int count,
+ 			   const void *kbuf, const void __user *ubuf)
+ {
+-	int syscallno, ret;
++	int syscallno = task_pt_regs(target)->syscallno;
++	int ret;
+ 
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
+ 	if (ret)
+@@ -947,7 +953,7 @@ static int compat_tls_set(struct task_struct *target,
+ 			  const void __user *ubuf)
+ {
+ 	int ret;
+-	compat_ulong_t tls;
++	compat_ulong_t tls = target->thread.tp_value;
+ 
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
+ 	if (ret)
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index c9986b3e0a96..11e5eae088ab 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -596,17 +596,34 @@ const char *esr_get_class_string(u32 esr)
+ }
+ 
+ /*
+- * bad_mode handles the impossible case in the exception vector.
++ * bad_mode handles the impossible case in the exception vector. This is always
++ * fatal.
+  */
+ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
+ {
+-	siginfo_t info;
+-	void __user *pc = (void __user *)instruction_pointer(regs);
+ 	console_verbose();
+ 
+ 	pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
+ 		handler[reason], smp_processor_id(), esr,
+ 		esr_get_class_string(esr));
++
++	die("Oops - bad mode", regs, 0);
++	local_irq_disable();
++	panic("bad mode");
++}
++
++/*
++ * bad_el0_sync handles unexpected, but potentially recoverable synchronous
++ * exceptions taken from EL0. Unlike bad_mode, this returns.
++ */
++asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
++{
++	siginfo_t info;
++	void __user *pc = (void __user *)instruction_pointer(regs);
++	console_verbose();
++
++	pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n",
++		smp_processor_id(), esr, esr_get_class_string(esr));
+ 	__show_regs(regs);
+ 
+ 	info.si_signo = SIGILL;
+@@ -614,7 +631,10 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
+ 	info.si_code  = ILL_ILLOPC;
+ 	info.si_addr  = pc;
+ 
+-	arm64_notify_die("Oops - bad mode", regs, &info, 0);
++	current->thread.fault_address = 0;
++	current->thread.fault_code = 0;
++
++	force_sig_info(info.si_signo, &info, current);
+ }
+ 
+ void __pte_error(const char *file, int line, unsigned long val)
+diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
+index 3f74d0d98de6..02265a589ef5 100644
+--- a/arch/arm64/mm/dma-mapping.c
++++ b/arch/arm64/mm/dma-mapping.c
+@@ -524,7 +524,8 @@ EXPORT_SYMBOL(dummy_dma_ops);
+ 
+ static int __init arm64_dma_init(void)
+ {
+-	if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
++	if (swiotlb_force == SWIOTLB_FORCE ||
++	    max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
+ 		swiotlb = 1;
+ 
+ 	return atomic_pool_init();
+diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
+index 212c4d1e2f26..380ebe705093 100644
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -401,8 +401,11 @@ static void __init free_unused_memmap(void)
+  */
+ void __init mem_init(void)
+ {
+-	if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
++	if (swiotlb_force == SWIOTLB_FORCE ||
++	    max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
+ 		swiotlb_init(1);
++	else
++		swiotlb_force = SWIOTLB_NO_FORCE;
+ 
+ 	set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
+ 
+diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
+index c56ea8c84abb..c4ced1d01d57 100644
+--- a/arch/powerpc/include/asm/ppc-opcode.h
++++ b/arch/powerpc/include/asm/ppc-opcode.h
+@@ -157,7 +157,7 @@
+ #define PPC_INST_MCRXR			0x7c000400
+ #define PPC_INST_MCRXR_MASK		0xfc0007fe
+ #define PPC_INST_MFSPR_PVR		0x7c1f42a6
+-#define PPC_INST_MFSPR_PVR_MASK		0xfc1fffff
++#define PPC_INST_MFSPR_PVR_MASK		0xfc1ffffe
+ #define PPC_INST_MFTMR			0x7c0002dc
+ #define PPC_INST_MSGSND			0x7c00019c
+ #define PPC_INST_MSGCLR			0x7c0001dc
+@@ -174,13 +174,13 @@
+ #define PPC_INST_RFDI			0x4c00004e
+ #define PPC_INST_RFMCI			0x4c00004c
+ #define PPC_INST_MFSPR_DSCR		0x7c1102a6
+-#define PPC_INST_MFSPR_DSCR_MASK	0xfc1fffff
++#define PPC_INST_MFSPR_DSCR_MASK	0xfc1ffffe
+ #define PPC_INST_MTSPR_DSCR		0x7c1103a6
+-#define PPC_INST_MTSPR_DSCR_MASK	0xfc1fffff
++#define PPC_INST_MTSPR_DSCR_MASK	0xfc1ffffe
+ #define PPC_INST_MFSPR_DSCR_USER	0x7c0302a6
+-#define PPC_INST_MFSPR_DSCR_USER_MASK	0xfc1fffff
++#define PPC_INST_MFSPR_DSCR_USER_MASK	0xfc1ffffe
+ #define PPC_INST_MTSPR_DSCR_USER	0x7c0303a6
+-#define PPC_INST_MTSPR_DSCR_USER_MASK	0xfc1fffff
++#define PPC_INST_MTSPR_DSCR_USER_MASK	0xfc1ffffe
+ #define PPC_INST_MFVSRD			0x7c000066
+ #define PPC_INST_MTVSRD			0x7c000166
+ #define PPC_INST_SLBFEE			0x7c0007a7
+diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
+index b1ec62f2cc31..5c8f12fe9721 100644
+--- a/arch/powerpc/kernel/ptrace.c
++++ b/arch/powerpc/kernel/ptrace.c
+@@ -463,6 +463,10 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
+ 
+ 	flush_fp_to_thread(target);
+ 
++	for (i = 0; i < 32 ; i++)
++		buf[i] = target->thread.TS_FPR(i);
++	buf[32] = target->thread.fp_state.fpscr;
++
+ 	/* copy to local buffer then write that out */
+ 	i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
+ 	if (i)
+@@ -672,6 +676,9 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
+ 	flush_altivec_to_thread(target);
+ 	flush_vsx_to_thread(target);
+ 
++	for (i = 0; i < 32 ; i++)
++		buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
++
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ 				 buf, 0, 32 * sizeof(double));
+ 	if (!ret)
+@@ -1019,6 +1026,10 @@ static int tm_cfpr_set(struct task_struct *target,
+ 	flush_fp_to_thread(target);
+ 	flush_altivec_to_thread(target);
+ 
++	for (i = 0; i < 32; i++)
++		buf[i] = target->thread.TS_CKFPR(i);
++	buf[32] = target->thread.ckfp_state.fpscr;
++
+ 	/* copy to local buffer then write that out */
+ 	i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
+ 	if (i)
+@@ -1283,6 +1294,9 @@ static int tm_cvsx_set(struct task_struct *target,
+ 	flush_altivec_to_thread(target);
+ 	flush_vsx_to_thread(target);
+ 
++	for (i = 0; i < 32 ; i++)
++		buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
++
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ 				 buf, 0, 32 * sizeof(double));
+ 	if (!ret)
+diff --git a/arch/powerpc/perf/power9-events-list.h b/arch/powerpc/perf/power9-events-list.h
+index 6447dc1c3d89..929b56d47ad9 100644
+--- a/arch/powerpc/perf/power9-events-list.h
++++ b/arch/powerpc/perf/power9-events-list.h
+@@ -16,7 +16,7 @@ EVENT(PM_CYC,					0x0001e)
+ EVENT(PM_ICT_NOSLOT_CYC,			0x100f8)
+ EVENT(PM_CMPLU_STALL,				0x1e054)
+ EVENT(PM_INST_CMPL,				0x00002)
+-EVENT(PM_BRU_CMPL,				0x40060)
++EVENT(PM_BRU_CMPL,				0x10012)
+ EVENT(PM_BR_MPRED_CMPL,				0x400f6)
+ 
+ /* All L1 D cache load references counted at finish, gated by reject */
+diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
+index d38e86fd5720..60c57657c772 100644
+--- a/arch/powerpc/sysdev/xics/icp-opal.c
++++ b/arch/powerpc/sysdev/xics/icp-opal.c
+@@ -20,6 +20,7 @@
+ #include <asm/xics.h>
+ #include <asm/io.h>
+ #include <asm/opal.h>
++#include <asm/kvm_ppc.h>
+ 
+ static void icp_opal_teardown_cpu(void)
+ {
+@@ -39,7 +40,26 @@ static void icp_opal_flush_ipi(void)
+ 	 * Should we be flagging idle loop instead?
+ 	 * Or creating some task to be scheduled?
+ 	 */
+-	opal_int_eoi((0x00 << 24) | XICS_IPI);
++	if (opal_int_eoi((0x00 << 24) | XICS_IPI) > 0)
++		force_external_irq_replay();
++}
++
++static unsigned int icp_opal_get_xirr(void)
++{
++	unsigned int kvm_xirr;
++	__be32 hw_xirr;
++	int64_t rc;
++
++	/* Handle an interrupt latched by KVM first */
++	kvm_xirr = kvmppc_get_xics_latch();
++	if (kvm_xirr)
++		return kvm_xirr;
++
++	/* Then ask OPAL */
++	rc = opal_int_get_xirr(&hw_xirr, false);
++	if (rc < 0)
++		return 0;
++	return be32_to_cpu(hw_xirr);
+ }
+ 
+ static unsigned int icp_opal_get_irq(void)
+@@ -47,12 +67,8 @@ static unsigned int icp_opal_get_irq(void)
+ 	unsigned int xirr;
+ 	unsigned int vec;
+ 	unsigned int irq;
+-	int64_t rc;
+ 
+-	rc = opal_int_get_xirr(&xirr, false);
+-	if (rc < 0)
+-		return 0;
+-	xirr = be32_to_cpu(xirr);
++	xirr = icp_opal_get_xirr();
+ 	vec = xirr & 0x00ffffff;
+ 	if (vec == XICS_IRQ_SPURIOUS)
+ 		return 0;
+@@ -67,7 +83,8 @@ static unsigned int icp_opal_get_irq(void)
+ 	xics_mask_unknown_vec(vec);
+ 
+ 	/* We might learn about it later, so EOI it */
+-	opal_int_eoi(xirr);
++	if (opal_int_eoi(xirr) > 0)
++		force_external_irq_replay();
+ 
+ 	return 0;
+ }
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 9c7a1ecfe6bd..47a1de77b18d 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -916,7 +916,7 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
+ 	memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
+ 	       S390_ARCH_FAC_LIST_SIZE_BYTE);
+ 	memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
+-	       S390_ARCH_FAC_LIST_SIZE_BYTE);
++	       sizeof(S390_lowcore.stfle_fac_list));
+ 	if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
+ 		ret = -EFAULT;
+ 	kfree(mach);
+@@ -1437,7 +1437,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+ 
+ 	/* Populate the facility mask initially. */
+ 	memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
+-	       S390_ARCH_FAC_LIST_SIZE_BYTE);
++	       sizeof(S390_lowcore.stfle_fac_list));
+ 	for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
+ 		if (i < kvm_s390_fac_list_mask_size())
+ 			kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index 48e6d84f173e..3d8ff40ecc6f 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -1876,6 +1876,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
+ 	.irq_ack		= irq_chip_ack_parent,
+ 	.irq_eoi		= ioapic_ack_level,
+ 	.irq_set_affinity	= ioapic_set_affinity,
++	.irq_retrigger		= irq_chip_retrigger_hierarchy,
+ 	.flags			= IRQCHIP_SKIP_SET_WAKE,
+ };
+ 
+@@ -1887,6 +1888,7 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
+ 	.irq_ack		= irq_chip_ack_parent,
+ 	.irq_eoi		= ioapic_ir_ack_level,
+ 	.irq_set_affinity	= ioapic_set_affinity,
++	.irq_retrigger		= irq_chip_retrigger_hierarchy,
+ 	.flags			= IRQCHIP_SKIP_SET_WAKE,
+ };
+ 
+diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
+index b47edb8f5256..8da13d4e77cc 100644
+--- a/arch/x86/kernel/pci-swiotlb.c
++++ b/arch/x86/kernel/pci-swiotlb.c
+@@ -70,7 +70,7 @@ int __init pci_swiotlb_detect_override(void)
+ {
+ 	int use_swiotlb = swiotlb | swiotlb_force;
+ 
+-	if (swiotlb_force)
++	if (swiotlb_force == SWIOTLB_FORCE)
+ 		swiotlb = 1;
+ 
+ 	return use_swiotlb;
+diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
+index 3cd69832d7f4..3961103e9176 100644
+--- a/arch/x86/pci/acpi.c
++++ b/arch/x86/pci/acpi.c
+@@ -114,6 +114,16 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
+ 			DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"),
+ 		},
+ 	},
++	/* https://bugzilla.kernel.org/show_bug.cgi?id=42606 */
++	{
++		.callback = set_nouse_crs,
++		.ident = "Supermicro X8DTH",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "X8DTH-i/6/iF/6F"),
++			DMI_MATCH(DMI_BIOS_VERSION, "2.0a"),
++		},
++	},
+ 
+ 	/* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */
+ 	{
+diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
+index 0e98e5d241d0..5f8b4b0302b6 100644
+--- a/arch/x86/xen/pci-swiotlb-xen.c
++++ b/arch/x86/xen/pci-swiotlb-xen.c
+@@ -49,7 +49,7 @@ int __init pci_xen_swiotlb_detect(void)
+ 	 * activate this IOMMU. If running as PV privileged, activate it
+ 	 * irregardless.
+ 	 */
+-	if ((xen_initial_domain() || swiotlb || swiotlb_force))
++	if (xen_initial_domain() || swiotlb || swiotlb_force == SWIOTLB_FORCE)
+ 		xen_swiotlb = 1;
+ 
+ 	/* If we are running under Xen, we MUST disable the native SWIOTLB.
+diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
+index 8f3488b80896..7f6fed9f0703 100644
+--- a/drivers/clocksource/exynos_mct.c
++++ b/drivers/clocksource/exynos_mct.c
+@@ -495,6 +495,7 @@ static int exynos4_mct_dying_cpu(unsigned int cpu)
+ 	if (mct_int_type == MCT_INT_SPI) {
+ 		if (evt->irq != -1)
+ 			disable_irq_nosync(evt->irq);
++		exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
+ 	} else {
+ 		disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
+ 	}
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index bf3ea7603a58..712592cef1a2 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -593,11 +593,16 @@ struct devfreq *devfreq_add_device(struct device *dev,
+ 	list_add(&devfreq->node, &devfreq_list);
+ 
+ 	governor = find_devfreq_governor(devfreq->governor_name);
+-	if (!IS_ERR(governor))
+-		devfreq->governor = governor;
+-	if (devfreq->governor)
+-		err = devfreq->governor->event_handler(devfreq,
+-					DEVFREQ_GOV_START, NULL);
++	if (IS_ERR(governor)) {
++		dev_err(dev, "%s: Unable to find governor for the device\n",
++			__func__);
++		err = PTR_ERR(governor);
++		goto err_init;
++	}
++
++	devfreq->governor = governor;
++	err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
++						NULL);
+ 	if (err) {
+ 		dev_err(dev, "%s: Unable to start governor for the device\n",
+ 			__func__);
+diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
+index 29866f7e6d7e..1b21bb60e797 100644
+--- a/drivers/devfreq/exynos-bus.c
++++ b/drivers/devfreq/exynos-bus.c
+@@ -498,7 +498,7 @@ static int exynos_bus_probe(struct platform_device *pdev)
+ 	if (IS_ERR(bus->devfreq)) {
+ 		dev_err(dev,
+ 			"failed to add devfreq dev with passive governor\n");
+-		ret = -EPROBE_DEFER;
++		ret = PTR_ERR(bus->devfreq);
+ 		goto err;
+ 	}
+ 
+diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
+index 030fe05ed43b..9f3dbc8c63d2 100644
+--- a/drivers/dma/pl330.c
++++ b/drivers/dma/pl330.c
+@@ -448,6 +448,9 @@ struct dma_pl330_chan {
+ 
+ 	/* for cyclic capability */
+ 	bool cyclic;
++
++	/* for runtime pm tracking */
++	bool active;
+ };
+ 
+ struct pl330_dmac {
+@@ -2031,6 +2034,7 @@ static void pl330_tasklet(unsigned long data)
+ 		_stop(pch->thread);
+ 		spin_unlock(&pch->thread->dmac->lock);
+ 		power_down = true;
++		pch->active = false;
+ 	} else {
+ 		/* Make sure the PL330 Channel thread is active */
+ 		spin_lock(&pch->thread->dmac->lock);
+@@ -2050,6 +2054,7 @@ static void pl330_tasklet(unsigned long data)
+ 			desc->status = PREP;
+ 			list_move_tail(&desc->node, &pch->work_list);
+ 			if (power_down) {
++				pch->active = true;
+ 				spin_lock(&pch->thread->dmac->lock);
+ 				_start(pch->thread);
+ 				spin_unlock(&pch->thread->dmac->lock);
+@@ -2164,6 +2169,7 @@ static int pl330_terminate_all(struct dma_chan *chan)
+ 	unsigned long flags;
+ 	struct pl330_dmac *pl330 = pch->dmac;
+ 	LIST_HEAD(list);
++	bool power_down = false;
+ 
+ 	pm_runtime_get_sync(pl330->ddma.dev);
+ 	spin_lock_irqsave(&pch->lock, flags);
+@@ -2174,6 +2180,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
+ 	pch->thread->req[0].desc = NULL;
+ 	pch->thread->req[1].desc = NULL;
+ 	pch->thread->req_running = -1;
++	power_down = pch->active;
++	pch->active = false;
+ 
+ 	/* Mark all desc done */
+ 	list_for_each_entry(desc, &pch->submitted_list, node) {
+@@ -2191,6 +2199,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
+ 	list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
+ 	spin_unlock_irqrestore(&pch->lock, flags);
+ 	pm_runtime_mark_last_busy(pl330->ddma.dev);
++	if (power_down)
++		pm_runtime_put_autosuspend(pl330->ddma.dev);
+ 	pm_runtime_put_autosuspend(pl330->ddma.dev);
+ 
+ 	return 0;
+@@ -2350,6 +2360,7 @@ static void pl330_issue_pending(struct dma_chan *chan)
+ 		 * updated on work_list emptiness status.
+ 		 */
+ 		WARN_ON(list_empty(&pch->submitted_list));
++		pch->active = true;
+ 		pm_runtime_get_sync(pch->dmac->ddma.dev);
+ 	}
+ 	list_splice_tail_init(&pch->submitted_list, &pch->work_list);
+diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
+index 2e441d0ccd79..4c357d475465 100644
+--- a/drivers/dma/sh/rcar-dmac.c
++++ b/drivers/dma/sh/rcar-dmac.c
+@@ -986,6 +986,7 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
+ {
+ 	struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+ 	struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
++	struct rcar_dmac_chan_map *map = &rchan->map;
+ 	struct rcar_dmac_desc_page *page, *_page;
+ 	struct rcar_dmac_desc *desc;
+ 	LIST_HEAD(list);
+@@ -1019,6 +1020,13 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
+ 		free_page((unsigned long)page);
+ 	}
+ 
++	/* Remove slave mapping if present. */
++	if (map->slave.xfer_size) {
++		dma_unmap_resource(chan->device->dev, map->addr,
++				   map->slave.xfer_size, map->dir, 0);
++		map->slave.xfer_size = 0;
++	}
++
+ 	pm_runtime_put(chan->device->dev);
+ }
+ 
+diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c
+index 717704e9ae07..c0303f61c26a 100644
+--- a/drivers/hid/hid-corsair.c
++++ b/drivers/hid/hid-corsair.c
+@@ -148,26 +148,36 @@ static enum led_brightness k90_backlight_get(struct led_classdev *led_cdev)
+ 	struct usb_interface *usbif = to_usb_interface(dev->parent);
+ 	struct usb_device *usbdev = interface_to_usbdev(usbif);
+ 	int brightness;
+-	char data[8];
++	char *data;
++
++	data = kmalloc(8, GFP_KERNEL);
++	if (!data)
++		return -ENOMEM;
+ 
+ 	ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
+ 			      K90_REQUEST_STATUS,
+ 			      USB_DIR_IN | USB_TYPE_VENDOR |
+ 			      USB_RECIP_DEVICE, 0, 0, data, 8,
+ 			      USB_CTRL_SET_TIMEOUT);
+-	if (ret < 0) {
++	if (ret < 5) {
+ 		dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
+ 			 ret);
+-		return -EIO;
++		ret = -EIO;
++		goto out;
+ 	}
+ 	brightness = data[4];
+ 	if (brightness < 0 || brightness > 3) {
+ 		dev_warn(dev,
+ 			 "Read invalid backlight brightness: %02hhx.\n",
+ 			 data[4]);
+-		return -EIO;
++		ret = -EIO;
++		goto out;
+ 	}
+-	return brightness;
++	ret = brightness;
++out:
++	kfree(data);
++
++	return ret;
+ }
+ 
+ static enum led_brightness k90_record_led_get(struct led_classdev *led_cdev)
+@@ -253,17 +263,22 @@ static ssize_t k90_show_macro_mode(struct device *dev,
+ 	struct usb_interface *usbif = to_usb_interface(dev->parent);
+ 	struct usb_device *usbdev = interface_to_usbdev(usbif);
+ 	const char *macro_mode;
+-	char data[8];
++	char *data;
++
++	data = kmalloc(2, GFP_KERNEL);
++	if (!data)
++		return -ENOMEM;
+ 
+ 	ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
+ 			      K90_REQUEST_GET_MODE,
+ 			      USB_DIR_IN | USB_TYPE_VENDOR |
+ 			      USB_RECIP_DEVICE, 0, 0, data, 2,
+ 			      USB_CTRL_SET_TIMEOUT);
+-	if (ret < 0) {
++	if (ret < 1) {
+ 		dev_warn(dev, "Failed to get K90 initial mode (error %d).\n",
+ 			 ret);
+-		return -EIO;
++		ret = -EIO;
++		goto out;
+ 	}
+ 
+ 	switch (data[0]) {
+@@ -277,10 +292,15 @@ static ssize_t k90_show_macro_mode(struct device *dev,
+ 	default:
+ 		dev_warn(dev, "K90 in unknown mode: %02hhx.\n",
+ 			 data[0]);
+-		return -EIO;
++		ret = -EIO;
++		goto out;
+ 	}
+ 
+-	return snprintf(buf, PAGE_SIZE, "%s\n", macro_mode);
++	ret = snprintf(buf, PAGE_SIZE, "%s\n", macro_mode);
++out:
++	kfree(data);
++
++	return ret;
+ }
+ 
+ static ssize_t k90_store_macro_mode(struct device *dev,
+@@ -320,26 +340,36 @@ static ssize_t k90_show_current_profile(struct device *dev,
+ 	struct usb_interface *usbif = to_usb_interface(dev->parent);
+ 	struct usb_device *usbdev = interface_to_usbdev(usbif);
+ 	int current_profile;
+-	char data[8];
++	char *data;
++
++	data = kmalloc(8, GFP_KERNEL);
++	if (!data)
++		return -ENOMEM;
+ 
+ 	ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
+ 			      K90_REQUEST_STATUS,
+ 			      USB_DIR_IN | USB_TYPE_VENDOR |
+ 			      USB_RECIP_DEVICE, 0, 0, data, 8,
+ 			      USB_CTRL_SET_TIMEOUT);
+-	if (ret < 0) {
++	if (ret < 8) {
+ 		dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
+ 			 ret);
+-		return -EIO;
++		ret = -EIO;
++		goto out;
+ 	}
+ 	current_profile = data[7];
+ 	if (current_profile < 1 || current_profile > 3) {
+ 		dev_warn(dev, "Read invalid current profile: %02hhx.\n",
+ 			 data[7]);
+-		return -EIO;
++		ret = -EIO;
++		goto out;
+ 	}
+ 
+-	return snprintf(buf, PAGE_SIZE, "%d\n", current_profile);
++	ret = snprintf(buf, PAGE_SIZE, "%d\n", current_profile);
++out:
++	kfree(data);
++
++	return ret;
+ }
+ 
+ static ssize_t k90_store_current_profile(struct device *dev,
+diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
+index 1a2984c28b95..ae04826e82fc 100644
+--- a/drivers/infiniband/core/cache.c
++++ b/drivers/infiniband/core/cache.c
+@@ -770,12 +770,8 @@ static int _gid_table_setup_one(struct ib_device *ib_dev)
+ 	int err = 0;
+ 
+ 	table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
+-
+-	if (!table) {
+-		pr_warn("failed to allocate ib gid cache for %s\n",
+-			ib_dev->name);
++	if (!table)
+ 		return -ENOMEM;
+-	}
+ 
+ 	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
+ 		u8 rdma_port = port + rdma_start_port(ib_dev);
+@@ -1170,14 +1166,13 @@ int ib_cache_setup_one(struct ib_device *device)
+ 					  GFP_KERNEL);
+ 	if (!device->cache.pkey_cache ||
+ 	    !device->cache.lmc_cache) {
+-		pr_warn("Couldn't allocate cache for %s\n", device->name);
+-		return -ENOMEM;
++		err = -ENOMEM;
++		goto free;
+ 	}
+ 
+ 	err = gid_table_setup_one(device);
+ 	if (err)
+-		/* Allocated memory will be cleaned in the release function */
+-		return err;
++		goto free;
+ 
+ 	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
+ 		ib_cache_update(device, p + rdma_start_port(device));
+@@ -1192,6 +1187,9 @@ int ib_cache_setup_one(struct ib_device *device)
+ 
+ err:
+ 	gid_table_cleanup_one(device);
++free:
++	kfree(device->cache.pkey_cache);
++	kfree(device->cache.lmc_cache);
+ 	return err;
+ }
+ 
+diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
+index b9bf0759f10a..8dfc76f8cbb4 100644
+--- a/drivers/infiniband/hw/mlx4/ah.c
++++ b/drivers/infiniband/hw/mlx4/ah.c
+@@ -114,7 +114,9 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
+ 		       !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support))
+ 			--ah->av.eth.stat_rate;
+ 	}
+-
++	ah->av.eth.sl_tclass_flowlabel |=
++			cpu_to_be32((ah_attr->grh.traffic_class << 20) |
++				    ah_attr->grh.flow_label);
+ 	/*
+ 	 * HW requires multicast LID so we just choose one.
+ 	 */
+@@ -122,7 +124,7 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
+ 		ah->av.ib.dlid = cpu_to_be16(0xc000);
+ 
+ 	memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16);
+-	ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29);
++	ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(ah_attr->sl << 29);
+ 
+ 	return &ah->ibah;
+ }
+diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
+index 1672907ff219..18d309e40f1b 100644
+--- a/drivers/infiniband/hw/mlx4/mad.c
++++ b/drivers/infiniband/hw/mlx4/mad.c
+@@ -702,10 +702,18 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
+ 
+ 	/* If a grh is present, we demux according to it */
+ 	if (wc->wc_flags & IB_WC_GRH) {
+-		slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id);
+-		if (slave < 0) {
+-			mlx4_ib_warn(ibdev, "failed matching grh\n");
+-			return -ENOENT;
++		if (grh->dgid.global.interface_id ==
++			cpu_to_be64(IB_SA_WELL_KNOWN_GUID) &&
++		    grh->dgid.global.subnet_prefix == cpu_to_be64(
++			atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix))) {
++			slave = 0;
++		} else {
++			slave = mlx4_ib_find_real_gid(ibdev, port,
++						      grh->dgid.global.interface_id);
++			if (slave < 0) {
++				mlx4_ib_warn(ibdev, "failed matching grh\n");
++				return -ENOENT;
++			}
+ 		}
+ 	}
+ 	/* Class-specific handling */
+diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
+index b597e8227591..46ad99595fd2 100644
+--- a/drivers/infiniband/hw/mlx4/main.c
++++ b/drivers/infiniband/hw/mlx4/main.c
+@@ -697,9 +697,11 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
+ 	if (err)
+ 		goto out;
+ 
+-	props->active_width	=  (((u8 *)mailbox->buf)[5] == 0x40) ?
+-						IB_WIDTH_4X : IB_WIDTH_1X;
+-	props->active_speed	= IB_SPEED_QDR;
++	props->active_width	=  (((u8 *)mailbox->buf)[5] == 0x40) ||
++				   (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
++					   IB_WIDTH_4X : IB_WIDTH_1X;
++	props->active_speed	=  (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
++					   IB_SPEED_FDR : IB_SPEED_QDR;
+ 	props->port_cap_flags	= IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
+ 	props->gid_tbl_len	= mdev->dev->caps.gid_table_len[port];
+ 	props->max_msg_sz	= mdev->dev->caps.max_msg_sz;
+@@ -2820,14 +2822,19 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
+ 			goto err_steer_qp_release;
+ 		}
+ 
+-		bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
+-
+-		err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
+-				dev, ibdev->steer_qpn_base,
+-				ibdev->steer_qpn_base +
+-				ibdev->steer_qpn_count - 1);
+-		if (err)
+-			goto err_steer_free_bitmap;
++		if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
++			bitmap_zero(ibdev->ib_uc_qpns_bitmap,
++				    ibdev->steer_qpn_count);
++			err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
++					dev, ibdev->steer_qpn_base,
++					ibdev->steer_qpn_base +
++					ibdev->steer_qpn_count - 1);
++			if (err)
++				goto err_steer_free_bitmap;
++		} else {
++			bitmap_fill(ibdev->ib_uc_qpns_bitmap,
++				    ibdev->steer_qpn_count);
++		}
+ 	}
+ 
+ 	for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
+diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
+index 570bc866b1d6..c22454383976 100644
+--- a/drivers/infiniband/hw/mlx4/qp.c
++++ b/drivers/infiniband/hw/mlx4/qp.c
+@@ -1280,7 +1280,8 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp)
+ 	if (is_qp0(dev, mqp))
+ 		mlx4_CLOSE_PORT(dev->dev, mqp->port);
+ 
+-	if (dev->qp1_proxy[mqp->port - 1] == mqp) {
++	if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI &&
++	    dev->qp1_proxy[mqp->port - 1] == mqp) {
+ 		mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
+ 		dev->qp1_proxy[mqp->port - 1] = NULL;
+ 		mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
+@@ -1764,14 +1765,14 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
+ 		u8 port_num = mlx4_is_bonded(to_mdev(ibqp->device)->dev) ? 1 :
+ 			attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
+ 		union ib_gid gid;
+-		struct ib_gid_attr gid_attr;
++		struct ib_gid_attr gid_attr = {.gid_type = IB_GID_TYPE_IB};
+ 		u16 vlan = 0xffff;
+ 		u8 smac[ETH_ALEN];
+ 		int status = 0;
+ 		int is_eth = rdma_cap_eth_ah(&dev->ib_dev, port_num) &&
+ 			attr->ah_attr.ah_flags & IB_AH_GRH;
+ 
+-		if (is_eth) {
++		if (is_eth && attr->ah_attr.ah_flags & IB_AH_GRH) {
+ 			int index = attr->ah_attr.grh.sgid_index;
+ 
+ 			status = ib_get_cached_gid(ibqp->device, port_num,
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 32b09f059c84..4cab29ea394c 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -496,6 +496,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ 	struct mlx5_core_dev *mdev = dev->mdev;
+ 	int err = -ENOMEM;
++	int max_sq_desc;
+ 	int max_rq_sg;
+ 	int max_sq_sg;
+ 	u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
+@@ -618,9 +619,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ 	props->max_qp_wr	   = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+ 	max_rq_sg =  MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
+ 		     sizeof(struct mlx5_wqe_data_seg);
+-	max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) -
+-		     sizeof(struct mlx5_wqe_ctrl_seg)) /
+-		     sizeof(struct mlx5_wqe_data_seg);
++	max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
++	max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
++		     sizeof(struct mlx5_wqe_raddr_seg)) /
++		sizeof(struct mlx5_wqe_data_seg);
+ 	props->max_sge = min(max_rq_sg, max_sq_sg);
+ 	props->max_sge_rd	   = MLX5_MAX_SGE_RD;
+ 	props->max_cq		   = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index 4e9012463c37..be2d02b6a6aa 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -628,7 +628,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
+ 		ent->order = i + 2;
+ 		ent->dev = dev;
+ 
+-		if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
++		if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
++		    (mlx5_core_is_pf(dev->mdev)))
+ 			limit = dev->mdev->profile->mr_cache[i].limit;
+ 		else
+ 			limit = 0;
+@@ -646,6 +647,33 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
+ 	return 0;
+ }
+ 
++static void wait_for_async_commands(struct mlx5_ib_dev *dev)
++{
++	struct mlx5_mr_cache *cache = &dev->cache;
++	struct mlx5_cache_ent *ent;
++	int total = 0;
++	int i;
++	int j;
++
++	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
++		ent = &cache->ent[i];
++		for (j = 0 ; j < 1000; j++) {
++			if (!ent->pending)
++				break;
++			msleep(50);
++		}
++	}
++	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
++		ent = &cache->ent[i];
++		total += ent->pending;
++	}
++
++	if (total)
++		mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
++	else
++		mlx5_ib_warn(dev, "done with all pending requests\n");
++}
++
+ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
+ {
+ 	int i;
+@@ -659,6 +687,7 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
+ 		clean_keys(dev, i);
+ 
+ 	destroy_workqueue(dev->cache.wq);
++	wait_for_async_commands(dev);
+ 	del_timer_sync(&dev->delay_timer);
+ 
+ 	return 0;
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index d1e921816bfe..aee3942ec68d 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -351,6 +351,29 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
+ 		return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
+ }
+ 
++static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size)
++{
++	int max_sge;
++
++	if (attr->qp_type == IB_QPT_RC)
++		max_sge = (min_t(int, wqe_size, 512) -
++			   sizeof(struct mlx5_wqe_ctrl_seg) -
++			   sizeof(struct mlx5_wqe_raddr_seg)) /
++			sizeof(struct mlx5_wqe_data_seg);
++	else if (attr->qp_type == IB_QPT_XRC_INI)
++		max_sge = (min_t(int, wqe_size, 512) -
++			   sizeof(struct mlx5_wqe_ctrl_seg) -
++			   sizeof(struct mlx5_wqe_xrc_seg) -
++			   sizeof(struct mlx5_wqe_raddr_seg)) /
++			sizeof(struct mlx5_wqe_data_seg);
++	else
++		max_sge = (wqe_size - sq_overhead(attr)) /
++			sizeof(struct mlx5_wqe_data_seg);
++
++	return min_t(int, max_sge, wqe_size - sq_overhead(attr) /
++		     sizeof(struct mlx5_wqe_data_seg));
++}
++
+ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
+ 			struct mlx5_ib_qp *qp)
+ {
+@@ -387,7 +410,11 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
+ 		return -ENOMEM;
+ 	}
+ 	qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
+-	qp->sq.max_gs = attr->cap.max_send_sge;
++	qp->sq.max_gs = get_send_sge(attr, wqe_size);
++	if (qp->sq.max_gs < attr->cap.max_send_sge)
++		return -ENOMEM;
++
++	attr->cap.max_send_sge = qp->sq.max_gs;
+ 	qp->sq.max_post = wq_size / wqe_size;
+ 	attr->cap.max_send_wr = qp->sq.max_post;
+ 
+diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
+index 3857dbd9c956..729b0696626e 100644
+--- a/drivers/infiniband/hw/mlx5/srq.c
++++ b/drivers/infiniband/hw/mlx5/srq.c
+@@ -282,6 +282,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
+ 	mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n",
+ 		    desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
+ 		    srq->msrq.max_avail_gather);
++	in.type = init_attr->srq_type;
+ 
+ 	if (pd->uobject)
+ 		err = create_srq_user(pd, srq, &in, udata, buf_size);
+@@ -294,7 +295,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
+ 		goto err_srq;
+ 	}
+ 
+-	in.type = init_attr->srq_type;
+ 	in.log_size = ilog2(srq->msrq.max);
+ 	in.wqe_shift = srq->msrq.wqe_shift - 4;
+ 	if (srq->wq_sig)
+diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
+index f459c43a77c8..13ed2cc6eaa2 100644
+--- a/drivers/infiniband/sw/rxe/rxe_param.h
++++ b/drivers/infiniband/sw/rxe/rxe_param.h
+@@ -82,7 +82,7 @@ enum rxe_device_param {
+ 	RXE_MAX_SGE			= 32,
+ 	RXE_MAX_SGE_RD			= 32,
+ 	RXE_MAX_CQ			= 16384,
+-	RXE_MAX_LOG_CQE			= 13,
++	RXE_MAX_LOG_CQE			= 15,
+ 	RXE_MAX_MR			= 2 * 1024,
+ 	RXE_MAX_PD			= 0x7ffc,
+ 	RXE_MAX_QP_RD_ATOM		= 128,
+diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
+index 22bd9630dcd9..9f46be52335e 100644
+--- a/drivers/infiniband/sw/rxe/rxe_req.c
++++ b/drivers/infiniband/sw/rxe/rxe_req.c
+@@ -548,23 +548,23 @@ static void update_wqe_psn(struct rxe_qp *qp,
+ static void save_state(struct rxe_send_wqe *wqe,
+ 		       struct rxe_qp *qp,
+ 		       struct rxe_send_wqe *rollback_wqe,
+-		       struct rxe_qp *rollback_qp)
++		       u32 *rollback_psn)
+ {
+ 	rollback_wqe->state     = wqe->state;
+ 	rollback_wqe->first_psn = wqe->first_psn;
+ 	rollback_wqe->last_psn  = wqe->last_psn;
+-	rollback_qp->req.psn    = qp->req.psn;
++	*rollback_psn		= qp->req.psn;
+ }
+ 
+ static void rollback_state(struct rxe_send_wqe *wqe,
+ 			   struct rxe_qp *qp,
+ 			   struct rxe_send_wqe *rollback_wqe,
+-			   struct rxe_qp *rollback_qp)
++			   u32 rollback_psn)
+ {
+ 	wqe->state     = rollback_wqe->state;
+ 	wqe->first_psn = rollback_wqe->first_psn;
+ 	wqe->last_psn  = rollback_wqe->last_psn;
+-	qp->req.psn    = rollback_qp->req.psn;
++	qp->req.psn    = rollback_psn;
+ }
+ 
+ static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+@@ -593,8 +593,8 @@ int rxe_requester(void *arg)
+ 	int mtu;
+ 	int opcode;
+ 	int ret;
+-	struct rxe_qp rollback_qp;
+ 	struct rxe_send_wqe rollback_wqe;
++	u32 rollback_psn;
+ 
+ next_wqe:
+ 	if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
+@@ -719,7 +719,7 @@ int rxe_requester(void *arg)
+ 	 * rxe_xmit_packet().
+ 	 * Otherwise, completer might initiate an unjustified retry flow.
+ 	 */
+-	save_state(wqe, qp, &rollback_wqe, &rollback_qp);
++	save_state(wqe, qp, &rollback_wqe, &rollback_psn);
+ 	update_wqe_state(qp, wqe, &pkt);
+ 	update_wqe_psn(qp, wqe, &pkt, payload);
+ 	ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
+@@ -727,7 +727,7 @@ int rxe_requester(void *arg)
+ 		qp->need_req_skb = 1;
+ 		kfree_skb(skb);
+ 
+-		rollback_state(wqe, qp, &rollback_wqe, &rollback_qp);
++		rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
+ 
+ 		if (ret == -EAGAIN) {
+ 			rxe_run_task(&qp->req.task, 1);
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+index 339a1eecdfe3..81a8080c18b3 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+@@ -1054,8 +1054,6 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
+ 
+ 	tx_qp = ib_create_qp(priv->pd, &attr);
+ 	if (PTR_ERR(tx_qp) == -EINVAL) {
+-		ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n",
+-			   priv->ca->name);
+ 		attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
+ 		tx_qp = ib_create_qp(priv->pd, &attr);
+ 	}
+diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
+index 6d7de9bfed9a..b93fe83a0b63 100644
+--- a/drivers/input/mouse/alps.c
++++ b/drivers/input/mouse/alps.c
+@@ -1346,6 +1346,18 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse)
+ 
+ 	priv->multi_packet = 0;
+ 
++	/* Report trackstick */
++	if (alps_get_pkt_id_ss4_v2(packet) == SS4_PACKET_ID_STICK) {
++		if (priv->flags & ALPS_DUALPOINT) {
++			input_report_key(dev2, BTN_LEFT, f->ts_left);
++			input_report_key(dev2, BTN_RIGHT, f->ts_right);
++			input_report_key(dev2, BTN_MIDDLE, f->ts_middle);
++			input_sync(dev2);
++		}
++		return;
++	}
++
++	/* Report touchpad */
+ 	alps_report_mt_data(psmouse, (f->fingers <= 4) ? f->fingers : 4);
+ 
+ 	input_mt_report_finger_count(dev, f->fingers);
+@@ -1356,13 +1368,6 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse)
+ 
+ 	input_report_abs(dev, ABS_PRESSURE, f->pressure);
+ 	input_sync(dev);
+-
+-	if (priv->flags & ALPS_DUALPOINT) {
+-		input_report_key(dev2, BTN_LEFT, f->ts_left);
+-		input_report_key(dev2, BTN_RIGHT, f->ts_right);
+-		input_report_key(dev2, BTN_MIDDLE, f->ts_middle);
+-		input_sync(dev2);
+-	}
+ }
+ 
+ static bool alps_is_valid_package_ss4_v2(struct psmouse *psmouse)
+diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
+index ce4a96fccc43..5ff803efdc03 100644
+--- a/drivers/media/platform/Kconfig
++++ b/drivers/media/platform/Kconfig
+@@ -93,7 +93,7 @@ config VIDEO_OMAP3_DEBUG
+ 
+ config VIDEO_PXA27x
+ 	tristate "PXA27x Quick Capture Interface driver"
+-	depends on VIDEO_DEV && HAS_DMA
++	depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
+ 	depends on PXA27x || COMPILE_TEST
+ 	select VIDEOBUF2_DMA_SG
+ 	select SG_SPLIT
+diff --git a/drivers/media/platform/blackfin/ppi.c b/drivers/media/platform/blackfin/ppi.c
+index cff63e511e6d..b8f3d9fa66e9 100644
+--- a/drivers/media/platform/blackfin/ppi.c
++++ b/drivers/media/platform/blackfin/ppi.c
+@@ -214,6 +214,8 @@ static int ppi_set_params(struct ppi_if *ppi, struct ppi_params *params)
+ 	if (params->dlen > 24 || params->dlen <= 0)
+ 		return -EINVAL;
+ 	pctrl = devm_pinctrl_get(ppi->dev);
++	if (IS_ERR(pctrl))
++		return PTR_ERR(pctrl);
+ 	pstate = pinctrl_lookup_state(pctrl,
+ 				      pin_state[(params->dlen + 7) / 8 - 1]);
+ 	if (pinctrl_select_state(pctrl, pstate))
+diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
+index 3436eda58855..27e7cf65c2a7 100644
+--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
++++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
+@@ -926,10 +926,11 @@ static int s5p_mfc_release(struct file *file)
+ 	mfc_debug_enter();
+ 	if (dev)
+ 		mutex_lock(&dev->mfc_mutex);
+-	s5p_mfc_clock_on();
+ 	vb2_queue_release(&ctx->vq_src);
+ 	vb2_queue_release(&ctx->vq_dst);
+ 	if (dev) {
++		s5p_mfc_clock_on();
++
+ 		/* Mark context as idle */
+ 		clear_work_bit_irqsave(ctx);
+ 		/*
+@@ -951,9 +952,9 @@ static int s5p_mfc_release(struct file *file)
+ 			if (s5p_mfc_power_off() < 0)
+ 				mfc_err("Power off failed\n");
+ 		}
++		mfc_debug(2, "Shutting down clock\n");
++		s5p_mfc_clock_off();
+ 	}
+-	mfc_debug(2, "Shutting down clock\n");
+-	s5p_mfc_clock_off();
+ 	if (dev)
+ 		dev->ctx[ctx->num] = NULL;
+ 	s5p_mfc_dec_ctrls_delete(ctx);
+diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
+index d341d4994528..cf2a8d884536 100644
+--- a/drivers/media/platform/sti/hva/hva-hw.c
++++ b/drivers/media/platform/sti/hva/hva-hw.c
+@@ -305,16 +305,16 @@ int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
+ 	/* get memory for registers */
+ 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	hva->regs = devm_ioremap_resource(dev, regs);
+-	if (IS_ERR_OR_NULL(hva->regs)) {
++	if (IS_ERR(hva->regs)) {
+ 		dev_err(dev, "%s     failed to get regs\n", HVA_PREFIX);
+ 		return PTR_ERR(hva->regs);
+ 	}
+ 
+ 	/* get memory for esram */
+ 	esram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+-	if (IS_ERR_OR_NULL(esram)) {
++	if (!esram) {
+ 		dev_err(dev, "%s     failed to get esram\n", HVA_PREFIX);
+-		return PTR_ERR(esram);
++		return -ENODEV;
+ 	}
+ 	hva->esram_addr = esram->start;
+ 	hva->esram_size = resource_size(esram);
+diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
+index 0f301903aa6f..63165d324fff 100644
+--- a/drivers/media/rc/ite-cir.c
++++ b/drivers/media/rc/ite-cir.c
+@@ -263,6 +263,8 @@ static void ite_set_carrier_params(struct ite_dev *dev)
+ 
+ 			if (allowance > ITE_RXDCR_MAX)
+ 				allowance = ITE_RXDCR_MAX;
++
++			use_demodulator = true;
+ 		}
+ 	}
+ 
+diff --git a/drivers/media/spi/gs1662.c b/drivers/media/spi/gs1662.c
+index d76f36233f43..5143a90219c0 100644
+--- a/drivers/media/spi/gs1662.c
++++ b/drivers/media/spi/gs1662.c
+@@ -453,10 +453,9 @@ static int gs_probe(struct spi_device *spi)
+ static int gs_remove(struct spi_device *spi)
+ {
+ 	struct v4l2_subdev *sd = spi_get_drvdata(spi);
+-	struct gs *gs = to_gs(sd);
+ 
+ 	v4l2_device_unregister_subdev(sd);
+-	kfree(gs);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
+index 44ecebd1ea8c..c8b8ac66ff7e 100644
+--- a/drivers/mmc/host/mxs-mmc.c
++++ b/drivers/mmc/host/mxs-mmc.c
+@@ -309,6 +309,9 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
+ 	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
+ 	cmd1 = cmd->arg;
+ 
++	if (cmd->opcode == MMC_STOP_TRANSMISSION)
++		cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
++
+ 	if (host->sdio_irq_en) {
+ 		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
+ 		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
+@@ -417,8 +420,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
+ 		       ssp->base + HW_SSP_BLOCK_SIZE);
+ 	}
+ 
+-	if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
+-	    (cmd->opcode == SD_IO_RW_EXTENDED))
++	if (cmd->opcode == SD_IO_RW_EXTENDED)
+ 		cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
+ 
+ 	cmd1 = cmd->arg;
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+index 81d4dc034793..fddd0be196f4 100644
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -394,7 +394,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
+ 	/* Power on the SDHCI controller and its children */
+ 	acpi_device_fix_up_power(device);
+ 	list_for_each_entry(child, &device->children, node)
+-		acpi_device_fix_up_power(child);
++		if (child->status.present && child->status.enabled)
++			acpi_device_fix_up_power(child);
+ 
+ 	if (acpi_bus_get_status(device) || !device->status.present)
+ 		return -ENODEV;
+diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
+index 7b7a887b4709..b254090b8a1b 100644
+--- a/drivers/mtd/nand/Kconfig
++++ b/drivers/mtd/nand/Kconfig
+@@ -537,7 +537,7 @@ config MTD_NAND_FSMC
+ 	  Flexible Static Memory Controller (FSMC)
+ 
+ config MTD_NAND_XWAY
+-	tristate "Support for NAND on Lantiq XWAY SoC"
++	bool "Support for NAND on Lantiq XWAY SoC"
+ 	depends on LANTIQ && SOC_TYPE_XWAY
+ 	help
+ 	  Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
+diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
+index 852388171f20..bc6e49af063a 100644
+--- a/drivers/mtd/nand/lpc32xx_mlc.c
++++ b/drivers/mtd/nand/lpc32xx_mlc.c
+@@ -776,7 +776,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
+ 	init_completion(&host->comp_controller);
+ 
+ 	host->irq = platform_get_irq(pdev, 0);
+-	if ((host->irq < 0) || (host->irq >= NR_IRQS)) {
++	if (host->irq < 0) {
+ 		dev_err(&pdev->dev, "failed to get platform irq\n");
+ 		res = -EINVAL;
+ 		goto err_exit3;
+diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c
+index 1f2948c0c458..895101a5e686 100644
+--- a/drivers/mtd/nand/xway_nand.c
++++ b/drivers/mtd/nand/xway_nand.c
+@@ -232,7 +232,6 @@ static const struct of_device_id xway_nand_match[] = {
+ 	{ .compatible = "lantiq,nand-xway" },
+ 	{},
+ };
+-MODULE_DEVICE_TABLE(of, xway_nand_match);
+ 
+ static struct platform_driver xway_nand_driver = {
+ 	.probe	= xway_nand_probe,
+@@ -243,6 +242,4 @@ static struct platform_driver xway_nand_driver = {
+ 	},
+ };
+ 
+-module_platform_driver(xway_nand_driver);
+-
+-MODULE_LICENSE("GPL");
++builtin_platform_driver(xway_nand_driver);
+diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
+index d403ba7b8f43..d489fbd07c12 100644
+--- a/drivers/mtd/spi-nor/cadence-quadspi.c
++++ b/drivers/mtd/spi-nor/cadence-quadspi.c
+@@ -1077,12 +1077,14 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
+ 
+ 	/* Get flash device data */
+ 	for_each_available_child_of_node(dev->of_node, np) {
+-		if (of_property_read_u32(np, "reg", &cs)) {
++		ret = of_property_read_u32(np, "reg", &cs);
++		if (ret) {
+ 			dev_err(dev, "Couldn't determine chip select.\n");
+ 			goto err;
+ 		}
+ 
+-		if (cs > CQSPI_MAX_CHIPSELECT) {
++		if (cs >= CQSPI_MAX_CHIPSELECT) {
++			ret = -EINVAL;
+ 			dev_err(dev, "Chip select %d out of range.\n", cs);
+ 			goto err;
+ 		}
+diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
+index 1056ed142411..f186e0460cde 100644
+--- a/drivers/net/ieee802154/atusb.c
++++ b/drivers/net/ieee802154/atusb.c
+@@ -112,13 +112,26 @@ static int atusb_read_reg(struct atusb *atusb, uint8_t reg)
+ {
+ 	struct usb_device *usb_dev = atusb->usb_dev;
+ 	int ret;
++	uint8_t *buffer;
+ 	uint8_t value;
+ 
++	buffer = kmalloc(1, GFP_KERNEL);
++	if (!buffer)
++		return -ENOMEM;
++
+ 	dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg);
+ 	ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
+ 				ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+-				0, reg, &value, 1, 1000);
+-	return ret >= 0 ? value : ret;
++				0, reg, buffer, 1, 1000);
++
++	if (ret >= 0) {
++		value = buffer[0];
++		kfree(buffer);
++		return value;
++	} else {
++		kfree(buffer);
++		return ret;
++	}
+ }
+ 
+ static int atusb_write_subreg(struct atusb *atusb, uint8_t reg, uint8_t mask,
+@@ -587,9 +600,13 @@ static struct ieee802154_ops atusb_ops = {
+ static int atusb_get_and_show_revision(struct atusb *atusb)
+ {
+ 	struct usb_device *usb_dev = atusb->usb_dev;
+-	unsigned char buffer[3];
++	unsigned char *buffer;
+ 	int ret;
+ 
++	buffer = kmalloc(3, GFP_KERNEL);
++	if (!buffer)
++		return -ENOMEM;
++
+ 	/* Get a couple of the ATMega Firmware values */
+ 	ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
+ 				ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
+@@ -605,15 +622,20 @@ static int atusb_get_and_show_revision(struct atusb *atusb)
+ 		dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
+ 	}
+ 
++	kfree(buffer);
+ 	return ret;
+ }
+ 
+ static int atusb_get_and_show_build(struct atusb *atusb)
+ {
+ 	struct usb_device *usb_dev = atusb->usb_dev;
+-	char build[ATUSB_BUILD_SIZE + 1];
++	char *build;
+ 	int ret;
+ 
++	build = kmalloc(ATUSB_BUILD_SIZE + 1, GFP_KERNEL);
++	if (!build)
++		return -ENOMEM;
++
+ 	ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
+ 				ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
+ 				build, ATUSB_BUILD_SIZE, 1000);
+@@ -622,6 +644,7 @@ static int atusb_get_and_show_build(struct atusb *atusb)
+ 		dev_info(&usb_dev->dev, "Firmware: build %s\n", build);
+ 	}
+ 
++	kfree(build);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
+index abe5c6bc756c..1480734c2d6e 100644
+--- a/drivers/nvdimm/namespace_devs.c
++++ b/drivers/nvdimm/namespace_devs.c
+@@ -957,6 +957,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
+ {
+ 	resource_size_t allocated = 0, available = 0;
+ 	struct nd_region *nd_region = to_nd_region(dev->parent);
++	struct nd_namespace_common *ndns = to_ndns(dev);
+ 	struct nd_mapping *nd_mapping;
+ 	struct nvdimm_drvdata *ndd;
+ 	struct nd_label_id label_id;
+@@ -964,7 +965,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
+ 	u8 *uuid = NULL;
+ 	int rc, i;
+ 
+-	if (dev->driver || to_ndns(dev)->claim)
++	if (dev->driver || ndns->claim)
+ 		return -EBUSY;
+ 
+ 	if (is_namespace_pmem(dev)) {
+@@ -1034,20 +1035,16 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
+ 
+ 		nd_namespace_pmem_set_resource(nd_region, nspm,
+ 				val * nd_region->ndr_mappings);
+-	} else if (is_namespace_blk(dev)) {
+-		struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
+-
+-		/*
+-		 * Try to delete the namespace if we deleted all of its
+-		 * allocation, this is not the seed device for the
+-		 * region, and it is not actively claimed by a btt
+-		 * instance.
+-		 */
+-		if (val == 0 && nd_region->ns_seed != dev
+-				&& !nsblk->common.claim)
+-			nd_device_unregister(dev, ND_ASYNC);
+ 	}
+ 
++	/*
++	 * Try to delete the namespace if we deleted all of its
++	 * allocation, this is not the seed device for the region, and
++	 * it is not actively claimed by a btt instance.
++	 */
++	if (val == 0 && nd_region->ns_seed != dev && !ndns->claim)
++		nd_device_unregister(dev, ND_ASYNC);
++
+ 	return rc;
+ }
+ 
+diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
+index bed19994c1e9..af8f6e92e885 100644
+--- a/drivers/pci/host/pcie-designware.c
++++ b/drivers/pci/host/pcie-designware.c
+@@ -807,11 +807,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
+ {
+ 	u32 val;
+ 
+-	/* get iATU unroll support */
+-	pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
+-	dev_dbg(pp->dev, "iATU unroll: %s\n",
+-		pp->iatu_unroll_enabled ? "enabled" : "disabled");
+-
+ 	/* set the number of lanes */
+ 	val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
+ 	val &= ~PORT_LINK_MODE_MASK;
+@@ -882,6 +877,11 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
+ 	 * we should not program the ATU here.
+ 	 */
+ 	if (!pp->ops->rd_other_conf) {
++		/* get iATU unroll support */
++		pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
++		dev_dbg(pp->dev, "iATU unroll: %s\n",
++			pp->iatu_unroll_enabled ? "enabled" : "disabled");
++
+ 		dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
+ 					  PCIE_ATU_TYPE_MEM, pp->mem_base,
+ 					  pp->mem_bus_addr, pp->mem_size);
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 104c46d53121..300770cdc084 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -1050,6 +1050,7 @@ void set_pcie_port_type(struct pci_dev *pdev)
+ 	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ 	if (!pos)
+ 		return;
++
+ 	pdev->pcie_cap = pos;
+ 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
+ 	pdev->pcie_flags_reg = reg16;
+@@ -1057,13 +1058,14 @@ void set_pcie_port_type(struct pci_dev *pdev)
+ 	pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
+ 
+ 	/*
+-	 * A Root Port is always the upstream end of a Link.  No PCIe
+-	 * component has two Links.  Two Links are connected by a Switch
+-	 * that has a Port on each Link and internal logic to connect the
+-	 * two Ports.
++	 * A Root Port or a PCI-to-PCIe bridge is always the upstream end
++	 * of a Link.  No PCIe component has two Links.  Two Links are
++	 * connected by a Switch that has a Port on each Link and internal
++	 * logic to connect the two Ports.
+ 	 */
+ 	type = pci_pcie_type(pdev);
+-	if (type == PCI_EXP_TYPE_ROOT_PORT)
++	if (type == PCI_EXP_TYPE_ROOT_PORT ||
++	    type == PCI_EXP_TYPE_PCIE_BRIDGE)
+ 		pdev->has_secondary_link = 1;
+ 	else if (type == PCI_EXP_TYPE_UPSTREAM ||
+ 		 type == PCI_EXP_TYPE_DOWNSTREAM) {
+diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
+index b6ea9ffa7381..e0a629eaceab 100644
+--- a/drivers/rpmsg/rpmsg_core.c
++++ b/drivers/rpmsg/rpmsg_core.c
+@@ -411,8 +411,8 @@ int rpmsg_register_device(struct rpmsg_device *rpdev)
+ 	struct device *dev = &rpdev->dev;
+ 	int ret;
+ 
+-	dev_set_name(&rpdev->dev, "%s:%s",
+-		     dev_name(dev->parent), rpdev->id.name);
++	dev_set_name(&rpdev->dev, "%s.%s.%d.%d", dev_name(dev->parent),
++		     rpdev->id.name, rpdev->src, rpdev->dst);
+ 
+ 	rpdev->dev.bus = &rpmsg_bus;
+ 	rpdev->dev.release = rpmsg_release_device;
+diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+index 608140f16d98..e3b911c895b4 100644
+--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
++++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+@@ -45,6 +45,7 @@
+ 
+ #define	INITIAL_SRP_LIMIT	800
+ #define	DEFAULT_MAX_SECTORS	256
++#define MAX_TXU			1024 * 1024
+ 
+ static uint max_vdma_size = MAX_H_COPY_RDMA;
+ 
+@@ -1239,7 +1240,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
+ 	}
+ 
+ 	info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token,
+-				  GFP_KERNEL);
++				  GFP_ATOMIC);
+ 	if (!info) {
+ 		dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
+ 			iue->target);
+@@ -1291,7 +1292,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
+ 	info->mad_version = cpu_to_be32(MAD_VERSION_1);
+ 	info->os_type = cpu_to_be32(LINUX);
+ 	memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu));
+-	info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE);
++	info->port_max_txu[0] = cpu_to_be32(MAX_TXU);
+ 
+ 	dma_wmb();
+ 	rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn,
+@@ -1357,7 +1358,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
+ 	}
+ 
+ 	cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token,
+-				 GFP_KERNEL);
++				 GFP_ATOMIC);
+ 	if (!cap) {
+ 		dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
+ 			iue->target);
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
+index 3e71bc1b4a80..7008061c4b5b 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
+@@ -393,6 +393,7 @@ struct MPT3SAS_TARGET {
+  * @eedp_enable: eedp support enable bit
+  * @eedp_type: 0(type_1), 1(type_2), 2(type_3)
+  * @eedp_block_length: block size
++ * @ata_command_pending: SATL passthrough outstanding for device
+  */
+ struct MPT3SAS_DEVICE {
+ 	struct MPT3SAS_TARGET *sas_target;
+@@ -402,6 +403,17 @@ struct MPT3SAS_DEVICE {
+ 	u8	block;
+ 	u8	tlr_snoop_check;
+ 	u8	ignore_delay_remove;
++	/*
++	 * Bug workaround for SATL handling: the mpt2/3sas firmware
++	 * doesn't return BUSY or TASK_SET_FULL for subsequent
++	 * commands while a SATL pass through is in operation as the
++	 * spec requires, it simply does nothing with them until the
++	 * pass through completes, causing them possibly to timeout if
++	 * the passthrough is a long executing command (like format or
++	 * secure erase).  This variable allows us to do the right
++	 * thing while a SATL command is pending.
++	 */
++	unsigned long ata_command_pending;
+ };
+ 
+ #define MPT3_CMD_NOT_USED	0x8000	/* free */
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+index 1c4744e78173..f84a6087cebd 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+@@ -3885,9 +3885,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
+ 	}
+ }
+ 
+-static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
++static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
+ {
+-	return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
++	struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
++
++	if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
++		return 0;
++
++	if (pending)
++		return test_and_set_bit(0, &priv->ata_command_pending);
++
++	clear_bit(0, &priv->ata_command_pending);
++	return 0;
+ }
+ 
+ /**
+@@ -3911,9 +3920,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
+ 		if (!scmd)
+ 			continue;
+ 		count++;
+-		if (ata_12_16_cmd(scmd))
+-			scsi_internal_device_unblock(scmd->device,
+-							SDEV_RUNNING);
++		_scsih_set_satl_pending(scmd, false);
+ 		mpt3sas_base_free_smid(ioc, smid);
+ 		scsi_dma_unmap(scmd);
+ 		if (ioc->pci_error_recovery)
+@@ -4044,13 +4051,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+ 	if (ioc->logging_level & MPT_DEBUG_SCSI)
+ 		scsi_print_command(scmd);
+ 
+-	/*
+-	 * Lock the device for any subsequent command until command is
+-	 * done.
+-	 */
+-	if (ata_12_16_cmd(scmd))
+-		scsi_internal_device_block(scmd->device);
+-
+ 	sas_device_priv_data = scmd->device->hostdata;
+ 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ 		scmd->result = DID_NO_CONNECT << 16;
+@@ -4064,6 +4064,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+ 		return 0;
+ 	}
+ 
++	/*
++	 * Bug work around for firmware SATL handling.  The loop
++	 * is based on atomic operations and ensures consistency
++	 * since we're lockless at this point
++	 */
++	do {
++		if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
++			scmd->result = SAM_STAT_BUSY;
++			scmd->scsi_done(scmd);
++			return 0;
++		}
++	} while (_scsih_set_satl_pending(scmd, true));
++
+ 	sas_target_priv_data = sas_device_priv_data->sas_target;
+ 
+ 	/* invalid device handle */
+@@ -4626,8 +4639,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
+ 	if (scmd == NULL)
+ 		return 1;
+ 
+-	if (ata_12_16_cmd(scmd))
+-		scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
++	_scsih_set_satl_pending(scmd, false);
+ 
+ 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 56d6142852a5..078d797cb492 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -3489,7 +3489,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
+ 				sizeof(struct ct6_dsd), 0,
+ 				SLAB_HWCACHE_ALIGN, NULL);
+ 			if (!ctx_cachep)
+-				goto fail_free_gid_list;
++				goto fail_free_srb_mempool;
+ 		}
+ 		ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
+ 			ctx_cachep);
+@@ -3642,7 +3642,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
+ 	ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
+ 	    GFP_KERNEL);
+ 	if (!ha->loop_id_map)
+-		goto fail_async_pd;
++		goto fail_loop_id_map;
+ 	else {
+ 		qla2x00_set_reserved_loop_ids(ha);
+ 		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
+@@ -3651,6 +3651,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
+ 
+ 	return 0;
+ 
++fail_loop_id_map:
++	dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
+ fail_async_pd:
+ 	dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
+ fail_ex_init_cb:
+@@ -3678,6 +3680,10 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
+ 	dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
+ 	ha->ms_iocb = NULL;
+ 	ha->ms_iocb_dma = 0;
++
++	if (ha->sns_cmd)
++		dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
++		    ha->sns_cmd, ha->sns_cmd_dma);
+ fail_dma_pool:
+ 	if (IS_QLA82XX(ha) || ql2xenabledif) {
+ 		dma_pool_destroy(ha->fcp_cmnd_dma_pool);
+@@ -3695,10 +3701,12 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
+ 	kfree(ha->nvram);
+ 	ha->nvram = NULL;
+ fail_free_ctx_mempool:
+-	mempool_destroy(ha->ctx_mempool);
++	if (ha->ctx_mempool)
++		mempool_destroy(ha->ctx_mempool);
+ 	ha->ctx_mempool = NULL;
+ fail_free_srb_mempool:
+-	mempool_destroy(ha->srb_mempool);
++	if (ha->srb_mempool)
++		mempool_destroy(ha->srb_mempool);
+ 	ha->srb_mempool = NULL;
+ fail_free_gid_list:
+ 	dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
+index 8c9a35c91705..50adabbb5808 100644
+--- a/drivers/scsi/ses.c
++++ b/drivers/scsi/ses.c
+@@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
+ 
+ 	ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
+ 
+-	if (scsi_is_sas_rphy(&sdev->sdev_gendev))
++	if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent))
+ 		efd.addr = sas_get_address(sdev);
+ 
+ 	if (efd.addr) {
+diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
+index 8823cc81ae45..5bb376009d98 100644
+--- a/drivers/soc/ti/wkup_m3_ipc.c
++++ b/drivers/soc/ti/wkup_m3_ipc.c
+@@ -459,6 +459,7 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
+ 
+ 	if (IS_ERR(task)) {
+ 		dev_err(dev, "can't create rproc_boot thread\n");
++		ret = PTR_ERR(task);
+ 		goto err_put_rproc;
+ 	}
+ 
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index dd7b5b47291d..d6239fa718be 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -1690,6 +1690,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
+ 		pxa2xx_spi_write(drv_data, SSCR1, tmp);
+ 		tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
+ 		pxa2xx_spi_write(drv_data, SSCR0, tmp);
++		break;
+ 	default:
+ 		tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
+ 		      SSCR1_TxTresh(TX_THRESH_DFLT);
+diff --git a/drivers/staging/media/s5p-cec/s5p_cec.c b/drivers/staging/media/s5p-cec/s5p_cec.c
+index 1780a08b73c9..58d756231136 100644
+--- a/drivers/staging/media/s5p-cec/s5p_cec.c
++++ b/drivers/staging/media/s5p-cec/s5p_cec.c
+@@ -231,7 +231,7 @@ static int s5p_cec_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-static int s5p_cec_runtime_suspend(struct device *dev)
++static int __maybe_unused s5p_cec_runtime_suspend(struct device *dev)
+ {
+ 	struct s5p_cec_dev *cec = dev_get_drvdata(dev);
+ 
+@@ -239,7 +239,7 @@ static int s5p_cec_runtime_suspend(struct device *dev)
+ 	return 0;
+ }
+ 
+-static int s5p_cec_runtime_resume(struct device *dev)
++static int __maybe_unused s5p_cec_runtime_resume(struct device *dev)
+ {
+ 	struct s5p_cec_dev *cec = dev_get_drvdata(dev);
+ 	int ret;
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index 87e6035c9e81..8e7a3d646531 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -392,7 +392,7 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
+ 	if (dma_capable(dev, dev_addr, size) &&
+ 	    !range_straddles_page_boundary(phys, size) &&
+ 		!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
+-		!swiotlb_force) {
++		(swiotlb_force != SWIOTLB_FORCE)) {
+ 		/* we are not interested in the dma_addr returned by
+ 		 * xen_dma_map_page, only in the potential cache flushes executed
+ 		 * by the function. */
+@@ -549,7 +549,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
+ 		phys_addr_t paddr = sg_phys(sg);
+ 		dma_addr_t dev_addr = xen_phys_to_bus(paddr);
+ 
+-		if (swiotlb_force ||
++		if (swiotlb_force == SWIOTLB_FORCE ||
+ 		    xen_arch_need_swiotlb(hwdev, paddr, dev_addr) ||
+ 		    !dma_capable(hwdev, dev_addr, sg->length) ||
+ 		    range_straddles_page_boundary(paddr, sg->length)) {
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index 16e6ded0b7f2..f3f21105b860 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -2507,9 +2507,20 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
+ 			if (err < 0)
+ 				ret = err;
+ 		} else {
+-			ret = wait_event_interruptible(ci->i_cap_wq,
+-					try_get_cap_refs(ci, need, want, endoff,
+-							 true, &_got, &err));
++			DEFINE_WAIT_FUNC(wait, woken_wake_function);
++			add_wait_queue(&ci->i_cap_wq, &wait);
++
++			while (!try_get_cap_refs(ci, need, want, endoff,
++						 true, &_got, &err)) {
++				if (signal_pending(current)) {
++					ret = -ERESTARTSYS;
++					break;
++				}
++				wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
++			}
++
++			remove_wait_queue(&ci->i_cap_wq, &wait);
++
+ 			if (err == -EAGAIN)
+ 				continue;
+ 			if (err < 0)
+diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
+index a594c7879cc2..1afa11191000 100644
+--- a/fs/ceph/dir.c
++++ b/fs/ceph/dir.c
+@@ -1255,7 +1255,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
+ 		struct ceph_mds_client *mdsc =
+ 			ceph_sb_to_client(dir->i_sb)->mdsc;
+ 		struct ceph_mds_request *req;
+-		int op, mask, err;
++		int op, err;
++		u32 mask;
+ 
+ 		if (flags & LOOKUP_RCU)
+ 			return -ECHILD;
+@@ -1270,7 +1271,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
+ 			mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
+ 			if (ceph_security_xattr_wanted(dir))
+ 				mask |= CEPH_CAP_XATTR_SHARED;
+-			req->r_args.getattr.mask = mask;
++			req->r_args.getattr.mask = cpu_to_le32(mask);
+ 
+ 			err = ceph_mdsc_do_request(mdsc, NULL, req);
+ 			switch (err) {
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index ef4d04647325..12f2252f6c98 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -305,7 +305,8 @@ static int frag_tree_split_cmp(const void *l, const void *r)
+ {
+ 	struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
+ 	struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
+-	return ceph_frag_compare(ls->frag, rs->frag);
++	return ceph_frag_compare(le32_to_cpu(ls->frag),
++				 le32_to_cpu(rs->frag));
+ }
+ 
+ static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 815acd1a56d4..6a26c7bd1286 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -288,12 +288,13 @@ static int parse_reply_info_extra(void **p, void *end,
+ 				  struct ceph_mds_reply_info_parsed *info,
+ 				  u64 features)
+ {
+-	if (info->head->op == CEPH_MDS_OP_GETFILELOCK)
++	u32 op = le32_to_cpu(info->head->op);
++
++	if (op == CEPH_MDS_OP_GETFILELOCK)
+ 		return parse_reply_info_filelock(p, end, info, features);
+-	else if (info->head->op == CEPH_MDS_OP_READDIR ||
+-		 info->head->op == CEPH_MDS_OP_LSSNAP)
++	else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
+ 		return parse_reply_info_dir(p, end, info, features);
+-	else if (info->head->op == CEPH_MDS_OP_CREATE)
++	else if (op == CEPH_MDS_OP_CREATE)
+ 		return parse_reply_info_create(p, end, info, features);
+ 	else
+ 		return -EIO;
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index 70ea57c7b6bb..4e06a27ed7f8 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -2025,7 +2025,6 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head)
+ 		struct fuse_req *req;
+ 		req = list_entry(head->next, struct fuse_req, list);
+ 		req->out.h.error = -ECONNABORTED;
+-		clear_bit(FR_PENDING, &req->flags);
+ 		clear_bit(FR_SENT, &req->flags);
+ 		list_del_init(&req->list);
+ 		request_end(fc, req);
+@@ -2103,6 +2102,8 @@ void fuse_abort_conn(struct fuse_conn *fc)
+ 		spin_lock(&fiq->waitq.lock);
+ 		fiq->connected = 0;
+ 		list_splice_init(&fiq->pending, &to_end2);
++		list_for_each_entry(req, &to_end2, list)
++			clear_bit(FR_PENDING, &req->flags);
+ 		while (forget_pending(fiq))
+ 			kfree(dequeue_forget(fiq, 1, NULL));
+ 		wake_up_all_locked(&fiq->waitq);
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index 096f79997f75..642c57b8de7b 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -68,7 +68,7 @@ static u64 time_to_jiffies(u64 sec, u32 nsec)
+ 	if (sec || nsec) {
+ 		struct timespec64 ts = {
+ 			sec,
+-			max_t(u32, nsec, NSEC_PER_SEC - 1)
++			min_t(u32, nsec, NSEC_PER_SEC - 1)
+ 		};
+ 
+ 		return get_jiffies_64() + timespec64_to_jiffies(&ts);
+diff --git a/fs/posix_acl.c b/fs/posix_acl.c
+index 595522022aca..c9d48dc78495 100644
+--- a/fs/posix_acl.c
++++ b/fs/posix_acl.c
+@@ -922,11 +922,10 @@ int simple_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ 	int error;
+ 
+ 	if (type == ACL_TYPE_ACCESS) {
+-		error = posix_acl_equiv_mode(acl, &inode->i_mode);
+-		if (error < 0)
+-			return 0;
+-		if (error == 0)
+-			acl = NULL;
++		error = posix_acl_update_mode(inode,
++				&inode->i_mode, &acl);
++		if (error)
++			return error;
+ 	}
+ 
+ 	inode->i_ctime = current_time(inode);
+diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
+index fa9a20cc60d6..fe5e8d4970ae 100644
+--- a/fs/ubifs/tnc.c
++++ b/fs/ubifs/tnc.c
+@@ -34,6 +34,11 @@
+ #include <linux/slab.h>
+ #include "ubifs.h"
+ 
++static int try_read_node(const struct ubifs_info *c, void *buf, int type,
++			 int len, int lnum, int offs);
++static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
++			      struct ubifs_zbranch *zbr, void *node);
++
+ /*
+  * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions.
+  * @NAME_LESS: name corresponding to the first argument is less than second
+@@ -402,7 +407,19 @@ static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr,
+ 		return 0;
+ 	}
+ 
+-	err = ubifs_tnc_read_node(c, zbr, node);
++	if (c->replaying) {
++		err = fallible_read_node(c, &zbr->key, zbr, node);
++		/*
++		 * When the node was not found, return -ENOENT, 0 otherwise.
++		 * Negative return codes stay as-is.
++		 */
++		if (err == 0)
++			err = -ENOENT;
++		else if (err == 1)
++			err = 0;
++	} else {
++		err = ubifs_tnc_read_node(c, zbr, node);
++	}
+ 	if (err)
+ 		return err;
+ 
+@@ -2766,7 +2783,11 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
+ 	if (nm->name) {
+ 		if (err) {
+ 			/* Handle collisions */
+-			err = resolve_collision(c, key, &znode, &n, nm);
++			if (c->replaying)
++				err = fallible_resolve_collision(c, key, &znode, &n,
++							 nm, 0);
++			else
++				err = resolve_collision(c, key, &znode, &n, nm);
+ 			dbg_tnc("rc returned %d, znode %p, n %d",
+ 				err, znode, n);
+ 			if (unlikely(err < 0))
+diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h
+index 9d02f5317c7c..88e64846cf37 100644
+--- a/include/dt-bindings/clock/r8a7794-clock.h
++++ b/include/dt-bindings/clock/r8a7794-clock.h
+@@ -20,8 +20,7 @@
+ #define R8A7794_CLK_QSPI		5
+ #define R8A7794_CLK_SDH			6
+ #define R8A7794_CLK_SD0			7
+-#define R8A7794_CLK_Z			8
+-#define R8A7794_CLK_RCAN		9
++#define R8A7794_CLK_RCAN		8
+ 
+ /* MSTP0 */
+ #define R8A7794_CLK_MSIOF0		0
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index 321f9ed552a9..01f71e1d2e94 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -444,6 +444,10 @@ bool __rcu_is_watching(void);
+ #error "Unknown RCU implementation specified to kernel configuration"
+ #endif
+ 
++#define RCU_SCHEDULER_INACTIVE	0
++#define RCU_SCHEDULER_INIT	1
++#define RCU_SCHEDULER_RUNNING	2
++
+ /*
+  * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
+  * initialization and destruction of rcu_head on the stack. rcu_head structures
+diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
+index e5d193440374..7440290f64ac 100644
+--- a/include/linux/sunrpc/svc_xprt.h
++++ b/include/linux/sunrpc/svc_xprt.h
+@@ -66,6 +66,7 @@ struct svc_xprt {
+ #define XPT_LISTENER	10		/* listening endpoint */
+ #define XPT_CACHE_AUTH	11		/* cache auth info */
+ #define XPT_LOCAL	12		/* connection from loopback interface */
++#define XPT_KILL_TEMP   13		/* call xpo_kill_temp_xprt before closing */
+ 
+ 	struct svc_serv		*xpt_server;	/* service for transport */
+ 	atomic_t    	    	xpt_reserved;	/* space on outq that is rsvd */
+diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
+index 5f81f8a187f2..d2613536fd03 100644
+--- a/include/linux/swiotlb.h
++++ b/include/linux/swiotlb.h
+@@ -9,7 +9,13 @@ struct device;
+ struct page;
+ struct scatterlist;
+ 
+-extern int swiotlb_force;
++enum swiotlb_force {
++	SWIOTLB_NORMAL,		/* Default - depending on HW DMA mask etc. */
++	SWIOTLB_FORCE,		/* swiotlb=force */
++	SWIOTLB_NO_FORCE,	/* swiotlb=noforce */
++};
++
++extern enum swiotlb_force swiotlb_force;
+ 
+ /*
+  * Maximum allowable number of contiguous slabs to map,
+diff --git a/include/trace/events/swiotlb.h b/include/trace/events/swiotlb.h
+index 7ea4c5e7c448..288c0c54a2b4 100644
+--- a/include/trace/events/swiotlb.h
++++ b/include/trace/events/swiotlb.h
+@@ -11,16 +11,16 @@ TRACE_EVENT(swiotlb_bounced,
+ 	TP_PROTO(struct device *dev,
+ 		 dma_addr_t dev_addr,
+ 		 size_t size,
+-		 int swiotlb_force),
++		 enum swiotlb_force swiotlb_force),
+ 
+ 	TP_ARGS(dev, dev_addr, size, swiotlb_force),
+ 
+ 	TP_STRUCT__entry(
+-		__string(	dev_name,	dev_name(dev)	)
+-		__field(	u64,	dma_mask		)
+-		__field(	dma_addr_t,	dev_addr	)
+-		__field(	size_t,	size			)
+-		__field(	int,	swiotlb_force		)
++		__string(	dev_name,	dev_name(dev)		)
++		__field(	u64,	dma_mask			)
++		__field(	dma_addr_t,	dev_addr		)
++		__field(	size_t,	size				)
++		__field(	enum swiotlb_force,	swiotlb_force	)
+ 	),
+ 
+ 	TP_fast_assign(
+@@ -37,7 +37,10 @@ TRACE_EVENT(swiotlb_bounced,
+ 		__entry->dma_mask,
+ 		(unsigned long long)__entry->dev_addr,
+ 		__entry->size,
+-		__entry->swiotlb_force ? "swiotlb_force" : "" )
++		__print_symbolic(__entry->swiotlb_force,
++			{ SWIOTLB_NORMAL,	"NORMAL" },
++			{ SWIOTLB_FORCE,	"FORCE" },
++			{ SWIOTLB_NO_FORCE,	"NO_FORCE" }))
+ );
+ 
+ #endif /*  _TRACE_SWIOTLB_H */
+diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
+index 80adef7d4c3d..0d6ff3e471be 100644
+--- a/kernel/rcu/rcu.h
++++ b/kernel/rcu/rcu.h
+@@ -136,6 +136,7 @@ int rcu_jiffies_till_stall_check(void);
+ #define TPS(x)  tracepoint_string(x)
+ 
+ void rcu_early_boot_tests(void);
++void rcu_test_sync_prims(void);
+ 
+ /*
+  * This function really isn't for public consumption, but RCU is special in
+diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
+index 1898559e6b60..b23a4d076f3d 100644
+--- a/kernel/rcu/tiny.c
++++ b/kernel/rcu/tiny.c
+@@ -185,9 +185,6 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
+  * benefits of doing might_sleep() to reduce latency.)
+  *
+  * Cool, huh?  (Due to Josh Triplett.)
+- *
+- * But we want to make this a static inline later.  The cond_resched()
+- * currently makes this problematic.
+  */
+ void synchronize_sched(void)
+ {
+@@ -195,7 +192,6 @@ void synchronize_sched(void)
+ 			 lock_is_held(&rcu_lock_map) ||
+ 			 lock_is_held(&rcu_sched_lock_map),
+ 			 "Illegal synchronize_sched() in RCU read-side critical section");
+-	cond_resched();
+ }
+ EXPORT_SYMBOL_GPL(synchronize_sched);
+ 
+diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
+index 196f0302e2f4..c64b827ecbca 100644
+--- a/kernel/rcu/tiny_plugin.h
++++ b/kernel/rcu/tiny_plugin.h
+@@ -60,12 +60,17 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
+ 
+ /*
+  * During boot, we forgive RCU lockdep issues.  After this function is
+- * invoked, we start taking RCU lockdep issues seriously.
++ * invoked, we start taking RCU lockdep issues seriously.  Note that unlike
++ * Tree RCU, Tiny RCU transitions directly from RCU_SCHEDULER_INACTIVE
++ * to RCU_SCHEDULER_RUNNING, skipping the RCU_SCHEDULER_INIT stage.
++ * The reason for this is that Tiny RCU does not need kthreads, so does
++ * not have to care about the fact that the scheduler is half-initialized
++ * at a certain phase of the boot process.
+  */
+ void __init rcu_scheduler_starting(void)
+ {
+ 	WARN_ON(nr_context_switches() > 0);
+-	rcu_scheduler_active = 1;
++	rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
+ }
+ 
+ #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 69a5611a7e7c..10f62c6f48e7 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -127,13 +127,16 @@ int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
+ int sysctl_panic_on_rcu_stall __read_mostly;
+ 
+ /*
+- * The rcu_scheduler_active variable transitions from zero to one just
+- * before the first task is spawned.  So when this variable is zero, RCU
+- * can assume that there is but one task, allowing RCU to (for example)
++ * The rcu_scheduler_active variable is initialized to the value
++ * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
++ * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
++ * RCU can assume that there is but one task, allowing RCU to (for example)
+  * optimize synchronize_rcu() to a simple barrier().  When this variable
+- * is one, RCU must actually do all the hard work required to detect real
+- * grace periods.  This variable is also used to suppress boot-time false
+- * positives from lockdep-RCU error checking.
++ * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
++ * to detect real grace periods.  This variable is also used to suppress
++ * boot-time false positives from lockdep-RCU error checking.  Finally, it
++ * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
++ * is fully initialized, including all of its kthreads having been spawned.
+  */
+ int rcu_scheduler_active __read_mostly;
+ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
+@@ -3985,18 +3988,22 @@ static int __init rcu_spawn_gp_kthread(void)
+ early_initcall(rcu_spawn_gp_kthread);
+ 
+ /*
+- * This function is invoked towards the end of the scheduler's initialization
+- * process.  Before this is called, the idle task might contain
+- * RCU read-side critical sections (during which time, this idle
+- * task is booting the system).  After this function is called, the
+- * idle tasks are prohibited from containing RCU read-side critical
+- * sections.  This function also enables RCU lockdep checking.
++ * This function is invoked towards the end of the scheduler's
++ * initialization process.  Before this is called, the idle task might
++ * contain synchronous grace-period primitives (during which time, this idle
++ * task is booting the system, and such primitives are no-ops).  After this
++ * function is called, any synchronous grace-period primitives are run as
++ * expedited, with the requesting task driving the grace period forward.
++ * A later core_initcall() rcu_exp_runtime_mode() will switch to full
++ * runtime RCU functionality.
+  */
+ void rcu_scheduler_starting(void)
+ {
+ 	WARN_ON(num_online_cpus() != 1);
+ 	WARN_ON(nr_context_switches() > 0);
+-	rcu_scheduler_active = 1;
++	rcu_test_sync_prims();
++	rcu_scheduler_active = RCU_SCHEDULER_INIT;
++	rcu_test_sync_prims();
+ }
+ 
+ /*
+diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
+index 24343eb87b58..78eba4120d46 100644
+--- a/kernel/rcu/tree_exp.h
++++ b/kernel/rcu/tree_exp.h
+@@ -522,18 +522,28 @@ struct rcu_exp_work {
+ };
+ 
+ /*
++ * Common code to drive an expedited grace period forward, used by
++ * workqueues and mid-boot-time tasks.
++ */
++static void rcu_exp_sel_wait_wake(struct rcu_state *rsp,
++				  smp_call_func_t func, unsigned long s)
++{
++	/* Initialize the rcu_node tree in preparation for the wait. */
++	sync_rcu_exp_select_cpus(rsp, func);
++
++	/* Wait and clean up, including waking everyone. */
++	rcu_exp_wait_wake(rsp, s);
++}
++
++/*
+  * Work-queue handler to drive an expedited grace period forward.
+  */
+ static void wait_rcu_exp_gp(struct work_struct *wp)
+ {
+ 	struct rcu_exp_work *rewp;
+ 
+-	/* Initialize the rcu_node tree in preparation for the wait. */
+ 	rewp = container_of(wp, struct rcu_exp_work, rew_work);
+-	sync_rcu_exp_select_cpus(rewp->rew_rsp, rewp->rew_func);
+-
+-	/* Wait and clean up, including waking everyone. */
+-	rcu_exp_wait_wake(rewp->rew_rsp, rewp->rew_s);
++	rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s);
+ }
+ 
+ /*
+@@ -559,12 +569,18 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
+ 	if (exp_funnel_lock(rsp, s))
+ 		return;  /* Someone else did our work for us. */
+ 
+-	/* Marshall arguments and schedule the expedited grace period. */
+-	rew.rew_func = func;
+-	rew.rew_rsp = rsp;
+-	rew.rew_s = s;
+-	INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
+-	schedule_work(&rew.rew_work);
++	/* Ensure that load happens before action based on it. */
++	if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
++		/* Direct call during scheduler init and early_initcalls(). */
++		rcu_exp_sel_wait_wake(rsp, func, s);
++	} else {
++		/* Marshall arguments & schedule the expedited grace period. */
++		rew.rew_func = func;
++		rew.rew_rsp = rsp;
++		rew.rew_s = s;
++		INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
++		schedule_work(&rew.rew_work);
++	}
+ 
+ 	/* Wait for expedited grace period to complete. */
+ 	rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
+@@ -666,6 +682,8 @@ void synchronize_rcu_expedited(void)
+ {
+ 	struct rcu_state *rsp = rcu_state_p;
+ 
++	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
++		return;
+ 	_synchronize_rcu_expedited(rsp, sync_rcu_exp_handler);
+ }
+ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
+@@ -683,3 +701,15 @@ void synchronize_rcu_expedited(void)
+ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
+ 
+ #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
++
++/*
++ * Switch to run-time mode once Tree RCU has fully initialized.
++ */
++static int __init rcu_exp_runtime_mode(void)
++{
++	rcu_test_sync_prims();
++	rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
++	rcu_test_sync_prims();
++	return 0;
++}
++core_initcall(rcu_exp_runtime_mode);
+diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
+index 85c5a883c6e3..56583e764ebf 100644
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
+@@ -670,7 +670,7 @@ void synchronize_rcu(void)
+ 			 lock_is_held(&rcu_lock_map) ||
+ 			 lock_is_held(&rcu_sched_lock_map),
+ 			 "Illegal synchronize_rcu() in RCU read-side critical section");
+-	if (!rcu_scheduler_active)
++	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
+ 		return;
+ 	if (rcu_gp_is_expedited())
+ 		synchronize_rcu_expedited();
+diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
+index f19271dce0a9..4f6db7e6a117 100644
+--- a/kernel/rcu/update.c
++++ b/kernel/rcu/update.c
+@@ -121,11 +121,14 @@ EXPORT_SYMBOL(rcu_read_lock_sched_held);
+  * Should expedited grace-period primitives always fall back to their
+  * non-expedited counterparts?  Intended for use within RCU.  Note
+  * that if the user specifies both rcu_expedited and rcu_normal, then
+- * rcu_normal wins.
++ * rcu_normal wins.  (Except during the time period during boot from
++ * when the first task is spawned until the rcu_exp_runtime_mode()
++ * core_initcall() is invoked, at which point everything is expedited.)
+  */
+ bool rcu_gp_is_normal(void)
+ {
+-	return READ_ONCE(rcu_normal);
++	return READ_ONCE(rcu_normal) &&
++	       rcu_scheduler_active != RCU_SCHEDULER_INIT;
+ }
+ EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
+ 
+@@ -135,13 +138,14 @@ static atomic_t rcu_expedited_nesting =
+ /*
+  * Should normal grace-period primitives be expedited?  Intended for
+  * use within RCU.  Note that this function takes the rcu_expedited
+- * sysfs/boot variable into account as well as the rcu_expedite_gp()
+- * nesting.  So looping on rcu_unexpedite_gp() until rcu_gp_is_expedited()
+- * returns false is a -really- bad idea.
++ * sysfs/boot variable and rcu_scheduler_active into account as well
++ * as the rcu_expedite_gp() nesting.  So looping on rcu_unexpedite_gp()
++ * until rcu_gp_is_expedited() returns false is a -really- bad idea.
+  */
+ bool rcu_gp_is_expedited(void)
+ {
+-	return rcu_expedited || atomic_read(&rcu_expedited_nesting);
++	return rcu_expedited || atomic_read(&rcu_expedited_nesting) ||
++	       rcu_scheduler_active == RCU_SCHEDULER_INIT;
+ }
+ EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
+ 
+@@ -257,7 +261,7 @@ EXPORT_SYMBOL_GPL(rcu_callback_map);
+ 
+ int notrace debug_lockdep_rcu_enabled(void)
+ {
+-	return rcu_scheduler_active && debug_locks &&
++	return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
+ 	       current->lockdep_recursion == 0;
+ }
+ EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
+@@ -591,7 +595,7 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks);
+ void synchronize_rcu_tasks(void)
+ {
+ 	/* Complain if the scheduler has not started.  */
+-	RCU_LOCKDEP_WARN(!rcu_scheduler_active,
++	RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
+ 			 "synchronize_rcu_tasks called too soon");
+ 
+ 	/* Wait for the grace period. */
+@@ -813,6 +817,23 @@ static void rcu_spawn_tasks_kthread(void)
+ 
+ #endif /* #ifdef CONFIG_TASKS_RCU */
+ 
++/*
++ * Test each non-SRCU synchronous grace-period wait API.  This is
++ * useful just after a change in mode for these primitives, and
++ * during early boot.
++ */
++void rcu_test_sync_prims(void)
++{
++	if (!IS_ENABLED(CONFIG_PROVE_RCU))
++		return;
++	synchronize_rcu();
++	synchronize_rcu_bh();
++	synchronize_sched();
++	synchronize_rcu_expedited();
++	synchronize_rcu_bh_expedited();
++	synchronize_sched_expedited();
++}
++
+ #ifdef CONFIG_PROVE_RCU
+ 
+ /*
+@@ -865,6 +886,7 @@ void rcu_early_boot_tests(void)
+ 		early_boot_test_call_rcu_bh();
+ 	if (rcu_self_test_sched)
+ 		early_boot_test_call_rcu_sched();
++	rcu_test_sync_prims();
+ }
+ 
+ static int rcu_verify_early_boot_tests(void)
+diff --git a/lib/swiotlb.c b/lib/swiotlb.c
+index 22e13a0e19d7..ad1d2962d129 100644
+--- a/lib/swiotlb.c
++++ b/lib/swiotlb.c
+@@ -53,7 +53,7 @@
+  */
+ #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
+ 
+-int swiotlb_force;
++enum swiotlb_force swiotlb_force;
+ 
+ /*
+  * Used to do a quick range check in swiotlb_tbl_unmap_single and
+@@ -106,8 +106,12 @@ setup_io_tlb_npages(char *str)
+ 	}
+ 	if (*str == ',')
+ 		++str;
+-	if (!strcmp(str, "force"))
+-		swiotlb_force = 1;
++	if (!strcmp(str, "force")) {
++		swiotlb_force = SWIOTLB_FORCE;
++	} else if (!strcmp(str, "noforce")) {
++		swiotlb_force = SWIOTLB_NO_FORCE;
++		io_tlb_nslabs = 1;
++	}
+ 
+ 	return 0;
+ }
+@@ -541,8 +545,15 @@ static phys_addr_t
+ map_single(struct device *hwdev, phys_addr_t phys, size_t size,
+ 	   enum dma_data_direction dir)
+ {
+-	dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
++	dma_addr_t start_dma_addr;
++
++	if (swiotlb_force == SWIOTLB_NO_FORCE) {
++		dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n",
++				     &phys);
++		return SWIOTLB_MAP_ERROR;
++	}
+ 
++	start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
+ 	return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
+ }
+ 
+@@ -707,6 +718,9 @@ static void
+ swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
+ 	     int do_panic)
+ {
++	if (swiotlb_force == SWIOTLB_NO_FORCE)
++		return;
++
+ 	/*
+ 	 * Ran out of IOMMU space for this operation. This is very bad.
+ 	 * Unfortunately the drivers cannot handle this operation properly.
+@@ -749,7 +763,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
+ 	 * we can safely return the device addr and not worry about bounce
+ 	 * buffering it.
+ 	 */
+-	if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
++	if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
+ 		return dev_addr;
+ 
+ 	trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
+@@ -888,7 +902,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
+ 		phys_addr_t paddr = sg_phys(sg);
+ 		dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
+ 
+-		if (swiotlb_force ||
++		if (swiotlb_force == SWIOTLB_FORCE ||
+ 		    !dma_capable(hwdev, dev_addr, sg->length)) {
+ 			phys_addr_t map = map_single(hwdev, sg_phys(sg),
+ 						     sg->length, dir);
+diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
+index a0905f04bd13..b216131915e7 100644
+--- a/net/ceph/auth_x.c
++++ b/net/ceph/auth_x.c
+@@ -39,56 +39,58 @@ static int ceph_x_should_authenticate(struct ceph_auth_client *ac)
+ 	return need != 0;
+ }
+ 
++static int ceph_x_encrypt_offset(void)
++{
++	return sizeof(u32) + sizeof(struct ceph_x_encrypt_header);
++}
++
+ static int ceph_x_encrypt_buflen(int ilen)
+ {
+-	return sizeof(struct ceph_x_encrypt_header) + ilen + 16 +
+-		sizeof(u32);
++	return ceph_x_encrypt_offset() + ilen + 16;
+ }
+ 
+-static int ceph_x_encrypt(struct ceph_crypto_key *secret,
+-			  void *ibuf, int ilen, void *obuf, size_t olen)
++static int ceph_x_encrypt(struct ceph_crypto_key *secret, void *buf,
++			  int buf_len, int plaintext_len)
+ {
+-	struct ceph_x_encrypt_header head = {
+-		.struct_v = 1,
+-		.magic = cpu_to_le64(CEPHX_ENC_MAGIC)
+-	};
+-	size_t len = olen - sizeof(u32);
++	struct ceph_x_encrypt_header *hdr = buf + sizeof(u32);
++	int ciphertext_len;
+ 	int ret;
+ 
+-	ret = ceph_encrypt2(secret, obuf + sizeof(u32), &len,
+-			    &head, sizeof(head), ibuf, ilen);
++	hdr->struct_v = 1;
++	hdr->magic = cpu_to_le64(CEPHX_ENC_MAGIC);
++
++	ret = ceph_crypt(secret, true, buf + sizeof(u32), buf_len - sizeof(u32),
++			 plaintext_len + sizeof(struct ceph_x_encrypt_header),
++			 &ciphertext_len);
+ 	if (ret)
+ 		return ret;
+-	ceph_encode_32(&obuf, len);
+-	return len + sizeof(u32);
++
++	ceph_encode_32(&buf, ciphertext_len);
++	return sizeof(u32) + ciphertext_len;
+ }
+ 
+-static int ceph_x_decrypt(struct ceph_crypto_key *secret,
+-			  void **p, void *end, void **obuf, size_t olen)
++static int ceph_x_decrypt(struct ceph_crypto_key *secret, void **p, void *end)
+ {
+-	struct ceph_x_encrypt_header head;
+-	size_t head_len = sizeof(head);
+-	int len, ret;
+-
+-	len = ceph_decode_32(p);
+-	if (*p + len > end)
+-		return -EINVAL;
++	struct ceph_x_encrypt_header *hdr = *p + sizeof(u32);
++	int ciphertext_len, plaintext_len;
++	int ret;
+ 
+-	dout("ceph_x_decrypt len %d\n", len);
+-	if (*obuf == NULL) {
+-		*obuf = kmalloc(len, GFP_NOFS);
+-		if (!*obuf)
+-			return -ENOMEM;
+-		olen = len;
+-	}
++	ceph_decode_32_safe(p, end, ciphertext_len, e_inval);
++	ceph_decode_need(p, end, ciphertext_len, e_inval);
+ 
+-	ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len);
++	ret = ceph_crypt(secret, false, *p, end - *p, ciphertext_len,
++			 &plaintext_len);
+ 	if (ret)
+ 		return ret;
+-	if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC)
++
++	if (hdr->struct_v != 1 || le64_to_cpu(hdr->magic) != CEPHX_ENC_MAGIC)
+ 		return -EPERM;
+-	*p += len;
+-	return olen;
++
++	*p += ciphertext_len;
++	return plaintext_len - sizeof(struct ceph_x_encrypt_header);
++
++e_inval:
++	return -EINVAL;
+ }
+ 
+ /*
+@@ -143,13 +145,10 @@ static int process_one_ticket(struct ceph_auth_client *ac,
+ 	int type;
+ 	u8 tkt_struct_v, blob_struct_v;
+ 	struct ceph_x_ticket_handler *th;
+-	void *dbuf = NULL;
+ 	void *dp, *dend;
+ 	int dlen;
+ 	char is_enc;
+ 	struct timespec validity;
+-	struct ceph_crypto_key old_key;
+-	void *ticket_buf = NULL;
+ 	void *tp, *tpend;
+ 	void **ptp;
+ 	struct ceph_crypto_key new_session_key;
+@@ -174,20 +173,17 @@ static int process_one_ticket(struct ceph_auth_client *ac,
+ 	}
+ 
+ 	/* blob for me */
+-	dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0);
+-	if (dlen <= 0) {
+-		ret = dlen;
++	dp = *p + ceph_x_encrypt_offset();
++	ret = ceph_x_decrypt(secret, p, end);
++	if (ret < 0)
+ 		goto out;
+-	}
+-	dout(" decrypted %d bytes\n", dlen);
+-	dp = dbuf;
+-	dend = dp + dlen;
++	dout(" decrypted %d bytes\n", ret);
++	dend = dp + ret;
+ 
+ 	tkt_struct_v = ceph_decode_8(&dp);
+ 	if (tkt_struct_v != 1)
+ 		goto bad;
+ 
+-	memcpy(&old_key, &th->session_key, sizeof(old_key));
+ 	ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
+ 	if (ret)
+ 		goto out;
+@@ -203,15 +199,13 @@ static int process_one_ticket(struct ceph_auth_client *ac,
+ 	ceph_decode_8_safe(p, end, is_enc, bad);
+ 	if (is_enc) {
+ 		/* encrypted */
+-		dout(" encrypted ticket\n");
+-		dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0);
+-		if (dlen < 0) {
+-			ret = dlen;
++		tp = *p + ceph_x_encrypt_offset();
++		ret = ceph_x_decrypt(&th->session_key, p, end);
++		if (ret < 0)
+ 			goto out;
+-		}
+-		tp = ticket_buf;
++		dout(" encrypted ticket, decrypted %d bytes\n", ret);
+ 		ptp = &tp;
+-		tpend = *ptp + dlen;
++		tpend = tp + ret;
+ 	} else {
+ 		/* unencrypted */
+ 		ptp = p;
+@@ -242,8 +236,6 @@ static int process_one_ticket(struct ceph_auth_client *ac,
+ 	xi->have_keys |= th->service;
+ 
+ out:
+-	kfree(ticket_buf);
+-	kfree(dbuf);
+ 	return ret;
+ 
+ bad:
+@@ -294,7 +286,7 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
+ {
+ 	int maxlen;
+ 	struct ceph_x_authorize_a *msg_a;
+-	struct ceph_x_authorize_b msg_b;
++	struct ceph_x_authorize_b *msg_b;
+ 	void *p, *end;
+ 	int ret;
+ 	int ticket_blob_len =
+@@ -308,8 +300,8 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
+ 	if (ret)
+ 		goto out_au;
+ 
+-	maxlen = sizeof(*msg_a) + sizeof(msg_b) +
+-		ceph_x_encrypt_buflen(ticket_blob_len);
++	maxlen = sizeof(*msg_a) + ticket_blob_len +
++		ceph_x_encrypt_buflen(sizeof(*msg_b));
+ 	dout("  need len %d\n", maxlen);
+ 	if (au->buf && au->buf->alloc_len < maxlen) {
+ 		ceph_buffer_put(au->buf);
+@@ -343,18 +335,19 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
+ 	p += ticket_blob_len;
+ 	end = au->buf->vec.iov_base + au->buf->vec.iov_len;
+ 
++	msg_b = p + ceph_x_encrypt_offset();
++	msg_b->struct_v = 1;
+ 	get_random_bytes(&au->nonce, sizeof(au->nonce));
+-	msg_b.struct_v = 1;
+-	msg_b.nonce = cpu_to_le64(au->nonce);
+-	ret = ceph_x_encrypt(&au->session_key, &msg_b, sizeof(msg_b),
+-			     p, end - p);
++	msg_b->nonce = cpu_to_le64(au->nonce);
++	ret = ceph_x_encrypt(&au->session_key, p, end - p, sizeof(*msg_b));
+ 	if (ret < 0)
+ 		goto out_au;
++
+ 	p += ret;
++	WARN_ON(p > end);
+ 	au->buf->vec.iov_len = p - au->buf->vec.iov_base;
+ 	dout(" built authorizer nonce %llx len %d\n", au->nonce,
+ 	     (int)au->buf->vec.iov_len);
+-	BUG_ON(au->buf->vec.iov_len > maxlen);
+ 	return 0;
+ 
+ out_au:
+@@ -452,8 +445,9 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
+ 	if (need & CEPH_ENTITY_TYPE_AUTH) {
+ 		struct ceph_x_authenticate *auth = (void *)(head + 1);
+ 		void *p = auth + 1;
+-		struct ceph_x_challenge_blob tmp;
+-		char tmp_enc[40];
++		void *enc_buf = xi->auth_authorizer.enc_buf;
++		struct ceph_x_challenge_blob *blob = enc_buf +
++							ceph_x_encrypt_offset();
+ 		u64 *u;
+ 
+ 		if (p > end)
+@@ -464,16 +458,16 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
+ 
+ 		/* encrypt and hash */
+ 		get_random_bytes(&auth->client_challenge, sizeof(u64));
+-		tmp.client_challenge = auth->client_challenge;
+-		tmp.server_challenge = cpu_to_le64(xi->server_challenge);
+-		ret = ceph_x_encrypt(&xi->secret, &tmp, sizeof(tmp),
+-				     tmp_enc, sizeof(tmp_enc));
++		blob->client_challenge = auth->client_challenge;
++		blob->server_challenge = cpu_to_le64(xi->server_challenge);
++		ret = ceph_x_encrypt(&xi->secret, enc_buf, CEPHX_AU_ENC_BUF_LEN,
++				     sizeof(*blob));
+ 		if (ret < 0)
+ 			return ret;
+ 
+ 		auth->struct_v = 1;
+ 		auth->key = 0;
+-		for (u = (u64 *)tmp_enc; u + 1 <= (u64 *)(tmp_enc + ret); u++)
++		for (u = (u64 *)enc_buf; u + 1 <= (u64 *)(enc_buf + ret); u++)
+ 			auth->key ^= *(__le64 *)u;
+ 		dout(" server_challenge %llx client_challenge %llx key %llx\n",
+ 		     xi->server_challenge, le64_to_cpu(auth->client_challenge),
+@@ -600,8 +594,8 @@ static int ceph_x_create_authorizer(
+ 	auth->authorizer = (struct ceph_authorizer *) au;
+ 	auth->authorizer_buf = au->buf->vec.iov_base;
+ 	auth->authorizer_buf_len = au->buf->vec.iov_len;
+-	auth->authorizer_reply_buf = au->reply_buf;
+-	auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
++	auth->authorizer_reply_buf = au->enc_buf;
++	auth->authorizer_reply_buf_len = CEPHX_AU_ENC_BUF_LEN;
+ 	auth->sign_message = ac->ops->sign_message;
+ 	auth->check_message_signature = ac->ops->check_message_signature;
+ 
+@@ -632,24 +626,22 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
+ 					  struct ceph_authorizer *a, size_t len)
+ {
+ 	struct ceph_x_authorizer *au = (void *)a;
+-	int ret = 0;
+-	struct ceph_x_authorize_reply reply;
+-	void *preply = &reply;
+-	void *p = au->reply_buf;
+-	void *end = p + sizeof(au->reply_buf);
++	void *p = au->enc_buf;
++	struct ceph_x_authorize_reply *reply = p + ceph_x_encrypt_offset();
++	int ret;
+ 
+-	ret = ceph_x_decrypt(&au->session_key, &p, end, &preply, sizeof(reply));
++	ret = ceph_x_decrypt(&au->session_key, &p, p + CEPHX_AU_ENC_BUF_LEN);
+ 	if (ret < 0)
+ 		return ret;
+-	if (ret != sizeof(reply))
++	if (ret != sizeof(*reply))
+ 		return -EPERM;
+ 
+-	if (au->nonce + 1 != le64_to_cpu(reply.nonce_plus_one))
++	if (au->nonce + 1 != le64_to_cpu(reply->nonce_plus_one))
+ 		ret = -EPERM;
+ 	else
+ 		ret = 0;
+ 	dout("verify_authorizer_reply nonce %llx got %llx ret %d\n",
+-	     au->nonce, le64_to_cpu(reply.nonce_plus_one), ret);
++	     au->nonce, le64_to_cpu(reply->nonce_plus_one), ret);
+ 	return ret;
+ }
+ 
+@@ -704,35 +696,48 @@ static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac,
+ 	invalidate_ticket(ac, CEPH_ENTITY_TYPE_AUTH);
+ }
+ 
+-static int calcu_signature(struct ceph_x_authorizer *au,
+-			   struct ceph_msg *msg, __le64 *sig)
++static int calc_signature(struct ceph_x_authorizer *au, struct ceph_msg *msg,
++			  __le64 *psig)
+ {
++	void *enc_buf = au->enc_buf;
++	struct {
++		__le32 len;
++		__le32 header_crc;
++		__le32 front_crc;
++		__le32 middle_crc;
++		__le32 data_crc;
++	} __packed *sigblock = enc_buf + ceph_x_encrypt_offset();
+ 	int ret;
+-	char tmp_enc[40];
+-	__le32 tmp[5] = {
+-		cpu_to_le32(16), msg->hdr.crc, msg->footer.front_crc,
+-		msg->footer.middle_crc, msg->footer.data_crc,
+-	};
+-	ret = ceph_x_encrypt(&au->session_key, &tmp, sizeof(tmp),
+-			     tmp_enc, sizeof(tmp_enc));
++
++	sigblock->len = cpu_to_le32(4*sizeof(u32));
++	sigblock->header_crc = msg->hdr.crc;
++	sigblock->front_crc = msg->footer.front_crc;
++	sigblock->middle_crc = msg->footer.middle_crc;
++	sigblock->data_crc =  msg->footer.data_crc;
++	ret = ceph_x_encrypt(&au->session_key, enc_buf, CEPHX_AU_ENC_BUF_LEN,
++			     sizeof(*sigblock));
+ 	if (ret < 0)
+ 		return ret;
+-	*sig = *(__le64*)(tmp_enc + 4);
++
++	*psig = *(__le64 *)(enc_buf + sizeof(u32));
+ 	return 0;
+ }
+ 
+ static int ceph_x_sign_message(struct ceph_auth_handshake *auth,
+ 			       struct ceph_msg *msg)
+ {
++	__le64 sig;
+ 	int ret;
+ 
+ 	if (ceph_test_opt(from_msgr(msg->con->msgr), NOMSGSIGN))
+ 		return 0;
+ 
+-	ret = calcu_signature((struct ceph_x_authorizer *)auth->authorizer,
+-			      msg, &msg->footer.sig);
+-	if (ret < 0)
++	ret = calc_signature((struct ceph_x_authorizer *)auth->authorizer,
++			     msg, &sig);
++	if (ret)
+ 		return ret;
++
++	msg->footer.sig = sig;
+ 	msg->footer.flags |= CEPH_MSG_FOOTER_SIGNED;
+ 	return 0;
+ }
+@@ -746,9 +751,9 @@ static int ceph_x_check_message_signature(struct ceph_auth_handshake *auth,
+ 	if (ceph_test_opt(from_msgr(msg->con->msgr), NOMSGSIGN))
+ 		return 0;
+ 
+-	ret = calcu_signature((struct ceph_x_authorizer *)auth->authorizer,
+-			      msg, &sig_check);
+-	if (ret < 0)
++	ret = calc_signature((struct ceph_x_authorizer *)auth->authorizer,
++			     msg, &sig_check);
++	if (ret)
+ 		return ret;
+ 	if (sig_check == msg->footer.sig)
+ 		return 0;
+diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h
+index 21a5af904bae..48e9ad41bd2a 100644
+--- a/net/ceph/auth_x.h
++++ b/net/ceph/auth_x.h
+@@ -24,6 +24,7 @@ struct ceph_x_ticket_handler {
+ 	unsigned long renew_after, expires;
+ };
+ 
++#define CEPHX_AU_ENC_BUF_LEN	128  /* big enough for encrypted blob */
+ 
+ struct ceph_x_authorizer {
+ 	struct ceph_authorizer base;
+@@ -32,7 +33,7 @@ struct ceph_x_authorizer {
+ 	unsigned int service;
+ 	u64 nonce;
+ 	u64 secret_id;
+-	char reply_buf[128];  /* big enough for encrypted blob */
++	char enc_buf[CEPHX_AU_ENC_BUF_LEN] __aligned(8);
+ };
+ 
+ struct ceph_x_info {
+diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
+index db2847ac5f12..292e33bd916e 100644
+--- a/net/ceph/crypto.c
++++ b/net/ceph/crypto.c
+@@ -13,14 +13,60 @@
+ #include <linux/ceph/decode.h>
+ #include "crypto.h"
+ 
++/*
++ * Set ->key and ->tfm.  The rest of the key should be filled in before
++ * this function is called.
++ */
++static int set_secret(struct ceph_crypto_key *key, void *buf)
++{
++	unsigned int noio_flag;
++	int ret;
++
++	key->key = NULL;
++	key->tfm = NULL;
++
++	switch (key->type) {
++	case CEPH_CRYPTO_NONE:
++		return 0; /* nothing to do */
++	case CEPH_CRYPTO_AES:
++		break;
++	default:
++		return -ENOTSUPP;
++	}
++
++	WARN_ON(!key->len);
++	key->key = kmemdup(buf, key->len, GFP_NOIO);
++	if (!key->key) {
++		ret = -ENOMEM;
++		goto fail;
++	}
++
++	/* crypto_alloc_skcipher() allocates with GFP_KERNEL */
++	noio_flag = memalloc_noio_save();
++	key->tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
++	memalloc_noio_restore(noio_flag);
++	if (IS_ERR(key->tfm)) {
++		ret = PTR_ERR(key->tfm);
++		key->tfm = NULL;
++		goto fail;
++	}
++
++	ret = crypto_skcipher_setkey(key->tfm, key->key, key->len);
++	if (ret)
++		goto fail;
++
++	return 0;
++
++fail:
++	ceph_crypto_key_destroy(key);
++	return ret;
++}
++
+ int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
+ 			  const struct ceph_crypto_key *src)
+ {
+ 	memcpy(dst, src, sizeof(struct ceph_crypto_key));
+-	dst->key = kmemdup(src->key, src->len, GFP_NOFS);
+-	if (!dst->key)
+-		return -ENOMEM;
+-	return 0;
++	return set_secret(dst, src->key);
+ }
+ 
+ int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
+@@ -37,16 +83,16 @@ int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
+ 
+ int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
+ {
++	int ret;
++
+ 	ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad);
+ 	key->type = ceph_decode_16(p);
+ 	ceph_decode_copy(p, &key->created, sizeof(key->created));
+ 	key->len = ceph_decode_16(p);
+ 	ceph_decode_need(p, end, key->len, bad);
+-	key->key = kmalloc(key->len, GFP_NOFS);
+-	if (!key->key)
+-		return -ENOMEM;
+-	ceph_decode_copy(p, key->key, key->len);
+-	return 0;
++	ret = set_secret(key, *p);
++	*p += key->len;
++	return ret;
+ 
+ bad:
+ 	dout("failed to decode crypto key\n");
+@@ -80,9 +126,14 @@ int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
+ 	return 0;
+ }
+ 
+-static struct crypto_skcipher *ceph_crypto_alloc_cipher(void)
++void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
+ {
+-	return crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
++	if (key) {
++		kfree(key->key);
++		key->key = NULL;
++		crypto_free_skcipher(key->tfm);
++		key->tfm = NULL;
++	}
+ }
+ 
+ static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
+@@ -157,372 +208,82 @@ static void teardown_sgtable(struct sg_table *sgt)
+ 		sg_free_table(sgt);
+ }
+ 
+-static int ceph_aes_encrypt(const void *key, int key_len,
+-			    void *dst, size_t *dst_len,
+-			    const void *src, size_t src_len)
+-{
+-	struct scatterlist sg_in[2], prealloc_sg;
+-	struct sg_table sg_out;
+-	struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
+-	SKCIPHER_REQUEST_ON_STACK(req, tfm);
+-	int ret;
+-	char iv[AES_BLOCK_SIZE];
+-	size_t zero_padding = (0x10 - (src_len & 0x0f));
+-	char pad[16];
+-
+-	if (IS_ERR(tfm))
+-		return PTR_ERR(tfm);
+-
+-	memset(pad, zero_padding, zero_padding);
+-
+-	*dst_len = src_len + zero_padding;
+-
+-	sg_init_table(sg_in, 2);
+-	sg_set_buf(&sg_in[0], src, src_len);
+-	sg_set_buf(&sg_in[1], pad, zero_padding);
+-	ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
+-	if (ret)
+-		goto out_tfm;
+-
+-	crypto_skcipher_setkey((void *)tfm, key, key_len);
+-	memcpy(iv, aes_iv, AES_BLOCK_SIZE);
+-
+-	skcipher_request_set_tfm(req, tfm);
+-	skcipher_request_set_callback(req, 0, NULL, NULL);
+-	skcipher_request_set_crypt(req, sg_in, sg_out.sgl,
+-				   src_len + zero_padding, iv);
+-
+-	/*
+-	print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
+-		       key, key_len, 1);
+-	print_hex_dump(KERN_ERR, "enc src: ", DUMP_PREFIX_NONE, 16, 1,
+-			src, src_len, 1);
+-	print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
+-			pad, zero_padding, 1);
+-	*/
+-	ret = crypto_skcipher_encrypt(req);
+-	skcipher_request_zero(req);
+-	if (ret < 0) {
+-		pr_err("ceph_aes_crypt failed %d\n", ret);
+-		goto out_sg;
+-	}
+-	/*
+-	print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
+-		       dst, *dst_len, 1);
+-	*/
+-
+-out_sg:
+-	teardown_sgtable(&sg_out);
+-out_tfm:
+-	crypto_free_skcipher(tfm);
+-	return ret;
+-}
+-
+-static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
+-			     size_t *dst_len,
+-			     const void *src1, size_t src1_len,
+-			     const void *src2, size_t src2_len)
+-{
+-	struct scatterlist sg_in[3], prealloc_sg;
+-	struct sg_table sg_out;
+-	struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
+-	SKCIPHER_REQUEST_ON_STACK(req, tfm);
+-	int ret;
+-	char iv[AES_BLOCK_SIZE];
+-	size_t zero_padding = (0x10 - ((src1_len + src2_len) & 0x0f));
+-	char pad[16];
+-
+-	if (IS_ERR(tfm))
+-		return PTR_ERR(tfm);
+-
+-	memset(pad, zero_padding, zero_padding);
+-
+-	*dst_len = src1_len + src2_len + zero_padding;
+-
+-	sg_init_table(sg_in, 3);
+-	sg_set_buf(&sg_in[0], src1, src1_len);
+-	sg_set_buf(&sg_in[1], src2, src2_len);
+-	sg_set_buf(&sg_in[2], pad, zero_padding);
+-	ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
+-	if (ret)
+-		goto out_tfm;
+-
+-	crypto_skcipher_setkey((void *)tfm, key, key_len);
+-	memcpy(iv, aes_iv, AES_BLOCK_SIZE);
+-
+-	skcipher_request_set_tfm(req, tfm);
+-	skcipher_request_set_callback(req, 0, NULL, NULL);
+-	skcipher_request_set_crypt(req, sg_in, sg_out.sgl,
+-				   src1_len + src2_len + zero_padding, iv);
+-
+-	/*
+-	print_hex_dump(KERN_ERR, "enc  key: ", DUMP_PREFIX_NONE, 16, 1,
+-		       key, key_len, 1);
+-	print_hex_dump(KERN_ERR, "enc src1: ", DUMP_PREFIX_NONE, 16, 1,
+-			src1, src1_len, 1);
+-	print_hex_dump(KERN_ERR, "enc src2: ", DUMP_PREFIX_NONE, 16, 1,
+-			src2, src2_len, 1);
+-	print_hex_dump(KERN_ERR, "enc  pad: ", DUMP_PREFIX_NONE, 16, 1,
+-			pad, zero_padding, 1);
+-	*/
+-	ret = crypto_skcipher_encrypt(req);
+-	skcipher_request_zero(req);
+-	if (ret < 0) {
+-		pr_err("ceph_aes_crypt2 failed %d\n", ret);
+-		goto out_sg;
+-	}
+-	/*
+-	print_hex_dump(KERN_ERR, "enc  out: ", DUMP_PREFIX_NONE, 16, 1,
+-		       dst, *dst_len, 1);
+-	*/
+-
+-out_sg:
+-	teardown_sgtable(&sg_out);
+-out_tfm:
+-	crypto_free_skcipher(tfm);
+-	return ret;
+-}
+-
+-static int ceph_aes_decrypt(const void *key, int key_len,
+-			    void *dst, size_t *dst_len,
+-			    const void *src, size_t src_len)
++static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
++			  void *buf, int buf_len, int in_len, int *pout_len)
+ {
+-	struct sg_table sg_in;
+-	struct scatterlist sg_out[2], prealloc_sg;
+-	struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
+-	SKCIPHER_REQUEST_ON_STACK(req, tfm);
+-	char pad[16];
+-	char iv[AES_BLOCK_SIZE];
++	SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
++	struct sg_table sgt;
++	struct scatterlist prealloc_sg;
++	char iv[AES_BLOCK_SIZE] __aligned(8);
++	int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
++	int crypt_len = encrypt ? in_len + pad_byte : in_len;
+ 	int ret;
+-	int last_byte;
+-
+-	if (IS_ERR(tfm))
+-		return PTR_ERR(tfm);
+ 
+-	sg_init_table(sg_out, 2);
+-	sg_set_buf(&sg_out[0], dst, *dst_len);
+-	sg_set_buf(&sg_out[1], pad, sizeof(pad));
+-	ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
++	WARN_ON(crypt_len > buf_len);
++	if (encrypt)
++		memset(buf + in_len, pad_byte, pad_byte);
++	ret = setup_sgtable(&sgt, &prealloc_sg, buf, crypt_len);
+ 	if (ret)
+-		goto out_tfm;
++		return ret;
+ 
+-	crypto_skcipher_setkey((void *)tfm, key, key_len);
+ 	memcpy(iv, aes_iv, AES_BLOCK_SIZE);
+-
+-	skcipher_request_set_tfm(req, tfm);
++	skcipher_request_set_tfm(req, key->tfm);
+ 	skcipher_request_set_callback(req, 0, NULL, NULL);
+-	skcipher_request_set_crypt(req, sg_in.sgl, sg_out,
+-				   src_len, iv);
++	skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv);
+ 
+ 	/*
+-	print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
+-		       key, key_len, 1);
+-	print_hex_dump(KERN_ERR, "dec  in: ", DUMP_PREFIX_NONE, 16, 1,
+-		       src, src_len, 1);
++	print_hex_dump(KERN_ERR, "key: ", DUMP_PREFIX_NONE, 16, 1,
++		       key->key, key->len, 1);
++	print_hex_dump(KERN_ERR, " in: ", DUMP_PREFIX_NONE, 16, 1,
++		       buf, crypt_len, 1);
+ 	*/
+-	ret = crypto_skcipher_decrypt(req);
+-	skcipher_request_zero(req);
+-	if (ret < 0) {
+-		pr_err("ceph_aes_decrypt failed %d\n", ret);
+-		goto out_sg;
+-	}
+-
+-	if (src_len <= *dst_len)
+-		last_byte = ((char *)dst)[src_len - 1];
++	if (encrypt)
++		ret = crypto_skcipher_encrypt(req);
+ 	else
+-		last_byte = pad[src_len - *dst_len - 1];
+-	if (last_byte <= 16 && src_len >= last_byte) {
+-		*dst_len = src_len - last_byte;
+-	} else {
+-		pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
+-		       last_byte, (int)src_len);
+-		return -EPERM;  /* bad padding */
+-	}
+-	/*
+-	print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
+-		       dst, *dst_len, 1);
+-	*/
+-
+-out_sg:
+-	teardown_sgtable(&sg_in);
+-out_tfm:
+-	crypto_free_skcipher(tfm);
+-	return ret;
+-}
+-
+-static int ceph_aes_decrypt2(const void *key, int key_len,
+-			     void *dst1, size_t *dst1_len,
+-			     void *dst2, size_t *dst2_len,
+-			     const void *src, size_t src_len)
+-{
+-	struct sg_table sg_in;
+-	struct scatterlist sg_out[3], prealloc_sg;
+-	struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
+-	SKCIPHER_REQUEST_ON_STACK(req, tfm);
+-	char pad[16];
+-	char iv[AES_BLOCK_SIZE];
+-	int ret;
+-	int last_byte;
+-
+-	if (IS_ERR(tfm))
+-		return PTR_ERR(tfm);
+-
+-	sg_init_table(sg_out, 3);
+-	sg_set_buf(&sg_out[0], dst1, *dst1_len);
+-	sg_set_buf(&sg_out[1], dst2, *dst2_len);
+-	sg_set_buf(&sg_out[2], pad, sizeof(pad));
+-	ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
+-	if (ret)
+-		goto out_tfm;
+-
+-	crypto_skcipher_setkey((void *)tfm, key, key_len);
+-	memcpy(iv, aes_iv, AES_BLOCK_SIZE);
+-
+-	skcipher_request_set_tfm(req, tfm);
+-	skcipher_request_set_callback(req, 0, NULL, NULL);
+-	skcipher_request_set_crypt(req, sg_in.sgl, sg_out,
+-				   src_len, iv);
+-
+-	/*
+-	print_hex_dump(KERN_ERR, "dec  key: ", DUMP_PREFIX_NONE, 16, 1,
+-		       key, key_len, 1);
+-	print_hex_dump(KERN_ERR, "dec   in: ", DUMP_PREFIX_NONE, 16, 1,
+-		       src, src_len, 1);
+-	*/
+-	ret = crypto_skcipher_decrypt(req);
++		ret = crypto_skcipher_decrypt(req);
+ 	skcipher_request_zero(req);
+-	if (ret < 0) {
+-		pr_err("ceph_aes_decrypt failed %d\n", ret);
+-		goto out_sg;
+-	}
+-
+-	if (src_len <= *dst1_len)
+-		last_byte = ((char *)dst1)[src_len - 1];
+-	else if (src_len <= *dst1_len + *dst2_len)
+-		last_byte = ((char *)dst2)[src_len - *dst1_len - 1];
+-	else
+-		last_byte = pad[src_len - *dst1_len - *dst2_len - 1];
+-	if (last_byte <= 16 && src_len >= last_byte) {
+-		src_len -= last_byte;
+-	} else {
+-		pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
+-		       last_byte, (int)src_len);
+-		return -EPERM;  /* bad padding */
+-	}
+-
+-	if (src_len < *dst1_len) {
+-		*dst1_len = src_len;
+-		*dst2_len = 0;
+-	} else {
+-		*dst2_len = src_len - *dst1_len;
++	if (ret) {
++		pr_err("%s %scrypt failed: %d\n", __func__,
++		       encrypt ? "en" : "de", ret);
++		goto out_sgt;
+ 	}
+ 	/*
+-	print_hex_dump(KERN_ERR, "dec  out1: ", DUMP_PREFIX_NONE, 16, 1,
+-		       dst1, *dst1_len, 1);
+-	print_hex_dump(KERN_ERR, "dec  out2: ", DUMP_PREFIX_NONE, 16, 1,
+-		       dst2, *dst2_len, 1);
++	print_hex_dump(KERN_ERR, "out: ", DUMP_PREFIX_NONE, 16, 1,
++		       buf, crypt_len, 1);
+ 	*/
+ 
+-out_sg:
+-	teardown_sgtable(&sg_in);
+-out_tfm:
+-	crypto_free_skcipher(tfm);
+-	return ret;
+-}
+-
+-
+-int ceph_decrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
+-		 const void *src, size_t src_len)
+-{
+-	switch (secret->type) {
+-	case CEPH_CRYPTO_NONE:
+-		if (*dst_len < src_len)
+-			return -ERANGE;
+-		memcpy(dst, src, src_len);
+-		*dst_len = src_len;
+-		return 0;
+-
+-	case CEPH_CRYPTO_AES:
+-		return ceph_aes_decrypt(secret->key, secret->len, dst,
+-					dst_len, src, src_len);
+-
+-	default:
+-		return -EINVAL;
+-	}
+-}
+-
+-int ceph_decrypt2(struct ceph_crypto_key *secret,
+-			void *dst1, size_t *dst1_len,
+-			void *dst2, size_t *dst2_len,
+-			const void *src, size_t src_len)
+-{
+-	size_t t;
+-
+-	switch (secret->type) {
+-	case CEPH_CRYPTO_NONE:
+-		if (*dst1_len + *dst2_len < src_len)
+-			return -ERANGE;
+-		t = min(*dst1_len, src_len);
+-		memcpy(dst1, src, t);
+-		*dst1_len = t;
+-		src += t;
+-		src_len -= t;
+-		if (src_len) {
+-			t = min(*dst2_len, src_len);
+-			memcpy(dst2, src, t);
+-			*dst2_len = t;
++	if (encrypt) {
++		*pout_len = crypt_len;
++	} else {
++		pad_byte = *(char *)(buf + in_len - 1);
++		if (pad_byte > 0 && pad_byte <= AES_BLOCK_SIZE &&
++		    in_len >= pad_byte) {
++			*pout_len = in_len - pad_byte;
++		} else {
++			pr_err("%s got bad padding %d on in_len %d\n",
++			       __func__, pad_byte, in_len);
++			ret = -EPERM;
++			goto out_sgt;
+ 		}
+-		return 0;
+-
+-	case CEPH_CRYPTO_AES:
+-		return ceph_aes_decrypt2(secret->key, secret->len,
+-					 dst1, dst1_len, dst2, dst2_len,
+-					 src, src_len);
+-
+-	default:
+-		return -EINVAL;
+ 	}
+-}
+-
+-int ceph_encrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
+-		 const void *src, size_t src_len)
+-{
+-	switch (secret->type) {
+-	case CEPH_CRYPTO_NONE:
+-		if (*dst_len < src_len)
+-			return -ERANGE;
+-		memcpy(dst, src, src_len);
+-		*dst_len = src_len;
+-		return 0;
+ 
+-	case CEPH_CRYPTO_AES:
+-		return ceph_aes_encrypt(secret->key, secret->len, dst,
+-					dst_len, src, src_len);
+-
+-	default:
+-		return -EINVAL;
+-	}
++out_sgt:
++	teardown_sgtable(&sgt);
++	return ret;
+ }
+ 
+-int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
+-		  const void *src1, size_t src1_len,
+-		  const void *src2, size_t src2_len)
++int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt,
++	       void *buf, int buf_len, int in_len, int *pout_len)
+ {
+-	switch (secret->type) {
++	switch (key->type) {
+ 	case CEPH_CRYPTO_NONE:
+-		if (*dst_len < src1_len + src2_len)
+-			return -ERANGE;
+-		memcpy(dst, src1, src1_len);
+-		memcpy(dst + src1_len, src2, src2_len);
+-		*dst_len = src1_len + src2_len;
++		*pout_len = in_len;
+ 		return 0;
+-
+ 	case CEPH_CRYPTO_AES:
+-		return ceph_aes_encrypt2(secret->key, secret->len, dst, dst_len,
+-					 src1, src1_len, src2, src2_len);
+-
++		return ceph_aes_crypt(key, encrypt, buf, buf_len, in_len,
++				      pout_len);
+ 	default:
+-		return -EINVAL;
++		return -ENOTSUPP;
+ 	}
+ }
+ 
+diff --git a/net/ceph/crypto.h b/net/ceph/crypto.h
+index 2e9cab09f37b..58d83aa7740f 100644
+--- a/net/ceph/crypto.h
++++ b/net/ceph/crypto.h
+@@ -12,37 +12,19 @@ struct ceph_crypto_key {
+ 	struct ceph_timespec created;
+ 	int len;
+ 	void *key;
++	struct crypto_skcipher *tfm;
+ };
+ 
+-static inline void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
+-{
+-	if (key) {
+-		kfree(key->key);
+-		key->key = NULL;
+-	}
+-}
+-
+ int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
+ 			  const struct ceph_crypto_key *src);
+ int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end);
+ int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end);
+ int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *in);
++void ceph_crypto_key_destroy(struct ceph_crypto_key *key);
+ 
+ /* crypto.c */
+-int ceph_decrypt(struct ceph_crypto_key *secret,
+-		 void *dst, size_t *dst_len,
+-		 const void *src, size_t src_len);
+-int ceph_encrypt(struct ceph_crypto_key *secret,
+-		 void *dst, size_t *dst_len,
+-		 const void *src, size_t src_len);
+-int ceph_decrypt2(struct ceph_crypto_key *secret,
+-		  void *dst1, size_t *dst1_len,
+-		  void *dst2, size_t *dst2_len,
+-		  const void *src, size_t src_len);
+-int ceph_encrypt2(struct ceph_crypto_key *secret,
+-		  void *dst, size_t *dst_len,
+-		  const void *src1, size_t src1_len,
+-		  const void *src2, size_t src2_len);
++int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt,
++	       void *buf, int buf_len, int in_len, int *pout_len);
+ int ceph_crypto_init(void);
+ void ceph_crypto_shutdown(void);
+ 
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index a47bbc973f2d..2384b4aae064 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -3939,21 +3939,31 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
+ 	u64_stats_update_end(&stats->syncp);
+ 
+ 	if (fast_rx->internal_forward) {
+-		struct sta_info *dsta = sta_info_get(rx->sdata, skb->data);
++		struct sk_buff *xmit_skb = NULL;
++		bool multicast = is_multicast_ether_addr(skb->data);
+ 
+-		if (dsta) {
++		if (multicast) {
++			xmit_skb = skb_copy(skb, GFP_ATOMIC);
++		} else if (sta_info_get(rx->sdata, skb->data)) {
++			xmit_skb = skb;
++			skb = NULL;
++		}
++
++		if (xmit_skb) {
+ 			/*
+ 			 * Send to wireless media and increase priority by 256
+ 			 * to keep the received priority instead of
+ 			 * reclassifying the frame (see cfg80211_classify8021d).
+ 			 */
+-			skb->priority += 256;
+-			skb->protocol = htons(ETH_P_802_3);
+-			skb_reset_network_header(skb);
+-			skb_reset_mac_header(skb);
+-			dev_queue_xmit(skb);
+-			return true;
++			xmit_skb->priority += 256;
++			xmit_skb->protocol = htons(ETH_P_802_3);
++			skb_reset_network_header(xmit_skb);
++			skb_reset_mac_header(xmit_skb);
++			dev_queue_xmit(xmit_skb);
+ 		}
++
++		if (!skb)
++			return true;
+ 	}
+ 
+ 	/* deliver to local stack */
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index 45662d7f0943..6fdffde28733 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -1489,7 +1489,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
+ 	case RPC_GSS_PROC_DESTROY:
+ 		if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
+ 			goto auth_err;
+-		rsci->h.expiry_time = get_seconds();
++		rsci->h.expiry_time = seconds_since_boot();
+ 		set_bit(CACHE_NEGATIVE, &rsci->h.flags);
+ 		if (resv->iov_len + 4 > PAGE_SIZE)
+ 			goto drop;
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index 3bc1d61694cb..9c9db55a0c1e 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -799,6 +799,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
+ 
+ 	if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
+ 		dprintk("svc_recv: found XPT_CLOSE\n");
++		if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags))
++			xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
+ 		svc_delete_xprt(xprt);
+ 		/* Leave XPT_BUSY set on the dead xprt: */
+ 		goto out;
+@@ -1020,9 +1022,11 @@ void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
+ 		le = to_be_closed.next;
+ 		list_del_init(le);
+ 		xprt = list_entry(le, struct svc_xprt, xpt_list);
+-		dprintk("svc_age_temp_xprts_now: closing %p\n", xprt);
+-		xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
+-		svc_close_xprt(xprt);
++		set_bit(XPT_CLOSE, &xprt->xpt_flags);
++		set_bit(XPT_KILL_TEMP, &xprt->xpt_flags);
++		dprintk("svc_age_temp_xprts_now: queuing xprt %p for closing\n",
++				xprt);
++		svc_xprt_enqueue(xprt);
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(svc_age_temp_xprts_now);
+diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
+index 26b26beef2d4..adbf52c6df83 100644
+--- a/net/sunrpc/xprtrdma/frwr_ops.c
++++ b/net/sunrpc/xprtrdma/frwr_ops.c
+@@ -421,7 +421,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
+ 			 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
+ 			 IB_ACCESS_REMOTE_READ;
+ 
+-	DECR_CQCOUNT(&r_xprt->rx_ep);
++	rpcrdma_set_signaled(&r_xprt->rx_ep, &reg_wr->wr);
+ 	rc = ib_post_send(ia->ri_id->qp, &reg_wr->wr, &bad_wr);
+ 	if (rc)
+ 		goto out_senderr;
+@@ -486,7 +486,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
+ 	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+ 	struct rpcrdma_mw *mw, *tmp;
+ 	struct rpcrdma_frmr *f;
+-	int rc;
++	int count, rc;
+ 
+ 	dprintk("RPC:       %s: req %p\n", __func__, req);
+ 
+@@ -496,6 +496,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
+ 	 * a single ib_post_send() call.
+ 	 */
+ 	f = NULL;
++	count = 0;
+ 	invalidate_wrs = pos = prev = NULL;
+ 	list_for_each_entry(mw, &req->rl_registered, mw_list) {
+ 		if ((rep->rr_wc_flags & IB_WC_WITH_INVALIDATE) &&
+@@ -505,6 +506,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
+ 		}
+ 
+ 		pos = __frwr_prepare_linv_wr(mw);
++		count++;
+ 
+ 		if (!invalidate_wrs)
+ 			invalidate_wrs = pos;
+@@ -523,7 +525,12 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
+ 	f->fr_invwr.send_flags = IB_SEND_SIGNALED;
+ 	f->fr_cqe.done = frwr_wc_localinv_wake;
+ 	reinit_completion(&f->fr_linv_done);
+-	INIT_CQCOUNT(&r_xprt->rx_ep);
++
++	/* Initialize CQ count, since there is always a signaled
++	 * WR being posted here.  The new cqcount depends on how
++	 * many SQEs are about to be consumed.
++	 */
++	rpcrdma_init_cqcount(&r_xprt->rx_ep, count);
+ 
+ 	/* Transport disconnect drains the receive CQ before it
+ 	 * replaces the QP. The RPC reply handler won't call us
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+index ad1df979b3f0..a47c9bdef5fa 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -348,8 +348,6 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
+ 	atomic_inc(&rdma_stat_read);
+ 	return ret;
+  err:
+-	ib_dma_unmap_sg(xprt->sc_cm_id->device,
+-			frmr->sg, frmr->sg_nents, frmr->direction);
+ 	svc_rdma_put_context(ctxt, 0);
+ 	svc_rdma_put_frmr(xprt, frmr);
+ 	return ret;
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index ec74289af7ec..8da7f6a4dfc3 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -223,8 +223,8 @@ rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
+ 		cdata->inline_rsize = rsize;
+ 	if (wsize < cdata->inline_wsize)
+ 		cdata->inline_wsize = wsize;
+-	pr_info("rpcrdma: max send %u, max recv %u\n",
+-		cdata->inline_wsize, cdata->inline_rsize);
++	dprintk("RPC:       %s: max send %u, max recv %u\n",
++		__func__, cdata->inline_wsize, cdata->inline_rsize);
+ 	rpcrdma_set_max_header_sizes(r_xprt);
+ }
+ 
+@@ -532,7 +532,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
+ 	ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
+ 	if (ep->rep_cqinit <= 2)
+ 		ep->rep_cqinit = 0;	/* always signal? */
+-	INIT_CQCOUNT(ep);
++	rpcrdma_init_cqcount(ep, 0);
+ 	init_waitqueue_head(&ep->rep_connect_wait);
+ 	INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
+ 
+@@ -1311,13 +1311,7 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
+ 	dprintk("RPC:       %s: posting %d s/g entries\n",
+ 		__func__, send_wr->num_sge);
+ 
+-	if (DECR_CQCOUNT(ep) > 0)
+-		send_wr->send_flags = 0;
+-	else { /* Provider must take a send completion every now and then */
+-		INIT_CQCOUNT(ep);
+-		send_wr->send_flags = IB_SEND_SIGNALED;
+-	}
+-
++	rpcrdma_set_signaled(ep, send_wr);
+ 	rc = ib_post_send(ia->ri_id->qp, send_wr, &send_wr_fail);
+ 	if (rc)
+ 		goto out_postsend_err;
+diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
+index 6e1bba358203..f6ae1b22da47 100644
+--- a/net/sunrpc/xprtrdma/xprt_rdma.h
++++ b/net/sunrpc/xprtrdma/xprt_rdma.h
+@@ -95,8 +95,24 @@ struct rpcrdma_ep {
+ 	struct delayed_work	rep_connect_worker;
+ };
+ 
+-#define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit)
+-#define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount)
++static inline void
++rpcrdma_init_cqcount(struct rpcrdma_ep *ep, int count)
++{
++	atomic_set(&ep->rep_cqcount, ep->rep_cqinit - count);
++}
++
++/* To update send queue accounting, provider must take a
++ * send completion every now and then.
++ */
++static inline void
++rpcrdma_set_signaled(struct rpcrdma_ep *ep, struct ib_send_wr *send_wr)
++{
++	send_wr->send_flags = 0;
++	if (unlikely(atomic_sub_return(1, &ep->rep_cqcount) <= 0)) {
++		rpcrdma_init_cqcount(ep, 0);
++		send_wr->send_flags = IB_SEND_SIGNALED;
++	}
++}
+ 
+ /* Pre-allocate extra Work Requests for handling backward receives
+  * and sends. This is a fixed value because the Work Queues are
+diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
+index 72edf83d76b7..cffdd9cf3ebf 100644
+--- a/tools/perf/Makefile.config
++++ b/tools/perf/Makefile.config
+@@ -366,7 +366,7 @@ ifndef NO_SDT
+ endif
+ 
+ ifdef PERF_HAVE_JITDUMP
+-  ifndef NO_DWARF
++  ifndef NO_LIBELF
+     $(call detected,CONFIG_JITDUMP)
+     CFLAGS += -DHAVE_JITDUMP
+   endif
+diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
+index d1ce29be560e..cd7bc4d104e2 100644
+--- a/tools/perf/builtin-mem.c
++++ b/tools/perf/builtin-mem.c
+@@ -70,8 +70,8 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
+ 	OPT_UINTEGER(0, "ldlat", &perf_mem_events__loads_ldlat, "mem-loads latency"),
+ 	OPT_INCR('v', "verbose", &verbose,
+ 		 "be more verbose (show counter open errors, etc)"),
+-	OPT_BOOLEAN('U', "--all-user", &all_user, "collect only user level data"),
+-	OPT_BOOLEAN('K', "--all-kernel", &all_kernel, "collect only kernel level data"),
++	OPT_BOOLEAN('U', "all-user", &all_user, "collect only user level data"),
++	OPT_BOOLEAN('K', "all-kernel", &all_kernel, "collect only kernel level data"),
+ 	OPT_END()
+ 	};
+ 
+diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
+index c298bd3e1d90..21f8a81797a0 100644
+--- a/tools/perf/builtin-trace.c
++++ b/tools/perf/builtin-trace.c
+@@ -1452,7 +1452,7 @@ static int trace__printf_interrupted_entry(struct trace *trace, struct perf_samp
+ 
+ 	duration = sample->time - ttrace->entry_time;
+ 
+-	printed  = trace__fprintf_entry_head(trace, trace->current, duration, sample->time, trace->output);
++	printed  = trace__fprintf_entry_head(trace, trace->current, duration, ttrace->entry_time, trace->output);
+ 	printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str);
+ 	ttrace->entry_pending = false;
+ 
+@@ -1499,7 +1499,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
+ 
+ 	if (sc->is_exit) {
+ 		if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) {
+-			trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
++			trace__fprintf_entry_head(trace, thread, 1, ttrace->entry_time, trace->output);
+ 			fprintf(trace->output, "%-70s)\n", ttrace->entry_str);
+ 		}
+ 	} else {
+@@ -1592,7 +1592,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
+ 	if (trace->summary_only)
+ 		goto out;
+ 
+-	trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output);
++	trace__fprintf_entry_head(trace, thread, duration, ttrace->entry_time, trace->output);
+ 
+ 	if (ttrace->entry_pending) {
+ 		fprintf(trace->output, "%-70s", ttrace->entry_str);
+diff --git a/tools/perf/trace/beauty/mmap.c b/tools/perf/trace/beauty/mmap.c
+index fd710ab33684..af1cfde6b97b 100644
+--- a/tools/perf/trace/beauty/mmap.c
++++ b/tools/perf/trace/beauty/mmap.c
+@@ -42,7 +42,9 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
+ 
+ 	P_MMAP_FLAG(SHARED);
+ 	P_MMAP_FLAG(PRIVATE);
++#ifdef MAP_32BIT
+ 	P_MMAP_FLAG(32BIT);
++#endif
+ 	P_MMAP_FLAG(ANONYMOUS);
+ 	P_MMAP_FLAG(DENYWRITE);
+ 	P_MMAP_FLAG(EXECUTABLE);
+diff --git a/tools/perf/util/Build b/tools/perf/util/Build
+index eb60e613d795..1dc67efad634 100644
+--- a/tools/perf/util/Build
++++ b/tools/perf/util/Build
+@@ -120,7 +120,7 @@ libperf-y += demangle-rust.o
+ ifdef CONFIG_JITDUMP
+ libperf-$(CONFIG_LIBELF) += jitdump.o
+ libperf-$(CONFIG_LIBELF) += genelf.o
+-libperf-$(CONFIG_LIBELF) += genelf_debug.o
++libperf-$(CONFIG_DWARF) += genelf_debug.o
+ endif
+ 
+ CFLAGS_config.o   += -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
+diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
+index 07fd30bc2f81..ae58b493af45 100644
+--- a/tools/perf/util/callchain.c
++++ b/tools/perf/util/callchain.c
+@@ -193,7 +193,6 @@ int perf_callchain_config(const char *var, const char *value)
+ 
+ 	if (!strcmp(var, "record-mode"))
+ 		return parse_callchain_record_opt(value, &callchain_param);
+-#ifdef HAVE_DWARF_UNWIND_SUPPORT
+ 	if (!strcmp(var, "dump-size")) {
+ 		unsigned long size = 0;
+ 		int ret;
+@@ -203,7 +202,6 @@ int perf_callchain_config(const char *var, const char *value)
+ 
+ 		return ret;
+ 	}
+-#endif
+ 	if (!strcmp(var, "print-type"))
+ 		return parse_callchain_mode(value);
+ 	if (!strcmp(var, "order"))
+diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
+index 13e75549c440..47cfd1080975 100644
+--- a/tools/perf/util/callchain.h
++++ b/tools/perf/util/callchain.h
+@@ -11,11 +11,7 @@
+ 
+ #define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace):\n\n"
+ 
+-#ifdef HAVE_DWARF_UNWIND_SUPPORT
+ # define RECORD_MODE_HELP  HELP_PAD "record_mode:\tcall graph recording mode (fp|dwarf|lbr)\n"
+-#else
+-# define RECORD_MODE_HELP  HELP_PAD "record_mode:\tcall graph recording mode (fp|lbr)\n"
+-#endif
+ 
+ #define RECORD_SIZE_HELP						\
+ 	HELP_PAD "record_size:\tif record_mode is 'dwarf', max size of stack recording (<bytes>)\n" \
+diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
+index c1ef805c6a8f..14a73acc549c 100644
+--- a/tools/perf/util/genelf.c
++++ b/tools/perf/util/genelf.c
+@@ -19,7 +19,9 @@
+ #include <limits.h>
+ #include <fcntl.h>
+ #include <err.h>
++#ifdef HAVE_DWARF_SUPPORT
+ #include <dwarf.h>
++#endif
+ 
+ #include "perf.h"
+ #include "genelf.h"
+@@ -157,7 +159,7 @@ gen_build_id(struct buildid_note *note, unsigned long load_addr, const void *cod
+ int
+ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
+ 	      const void *code, int csize,
+-	      void *debug, int nr_debug_entries)
++	      void *debug __maybe_unused, int nr_debug_entries __maybe_unused)
+ {
+ 	Elf *e;
+ 	Elf_Data *d;
+@@ -386,11 +388,14 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
+ 	shdr->sh_size = sizeof(bnote);
+ 	shdr->sh_entsize = 0;
+ 
++#ifdef HAVE_DWARF_SUPPORT
+ 	if (debug && nr_debug_entries) {
+ 		retval = jit_add_debug_info(e, load_addr, debug, nr_debug_entries);
+ 		if (retval)
+ 			goto error;
+-	} else {
++	} else
++#endif
++	{
+ 		if (elf_update(e, ELF_C_WRITE) < 0) {
+ 			warnx("elf_update 4 failed");
+ 			goto error;
+diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h
+index 2fbeb59c4bdd..5c933ac71451 100644
+--- a/tools/perf/util/genelf.h
++++ b/tools/perf/util/genelf.h
+@@ -4,8 +4,10 @@
+ /* genelf.c */
+ int jit_write_elf(int fd, uint64_t code_addr, const char *sym,
+ 		  const void *code, int csize, void *debug, int nr_debug_entries);
++#ifdef HAVE_DWARF_SUPPORT
+ /* genelf_debug.c */
+ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_entries);
++#endif
+ 
+ #if   defined(__arm__)
+ #define GEN_ELF_ARCH	EM_ARM
+diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
+index aecff69a510d..f7b35e178582 100644
+--- a/tools/perf/util/symbol.c
++++ b/tools/perf/util/symbol.c
+@@ -1459,7 +1459,8 @@ int dso__load(struct dso *dso, struct map *map)
+ 	 * Read the build id if possible. This is required for
+ 	 * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
+ 	 */
+-	if (is_regular_file(dso->long_name) &&
++	if (!dso->has_build_id &&
++	    is_regular_file(dso->long_name) &&
+ 	    filename__read_build_id(dso->long_name, build_id, BUILD_ID_SIZE) > 0)
+ 		dso__set_build_id(dso, build_id);
+ 
+diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
+index 9df61059a85d..a2fd6e79d5a5 100644
+--- a/tools/perf/util/trace-event-scripting.c
++++ b/tools/perf/util/trace-event-scripting.c
+@@ -95,7 +95,8 @@ static void register_python_scripting(struct scripting_ops *scripting_ops)
+ 	if (err)
+ 		die("error registering py script extension");
+ 
+-	scripting_context = malloc(sizeof(struct scripting_context));
++	if (scripting_context == NULL)
++		scripting_context = malloc(sizeof(*scripting_context));
+ }
+ 
+ #ifdef NO_LIBPYTHON
+@@ -159,7 +160,8 @@ static void register_perl_scripting(struct scripting_ops *scripting_ops)
+ 	if (err)
+ 		die("error registering pl script extension");
+ 
+-	scripting_context = malloc(sizeof(struct scripting_context));
++	if (scripting_context == NULL)
++		scripting_context = malloc(sizeof(*scripting_context));
+ }
+ 
+ #ifdef NO_LIBPERL
+diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
+index c22860ab9733..30e1ac62e8cb 100644
+--- a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
++++ b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
+@@ -66,7 +66,7 @@ int pmc56_overflow(void)
+ 
+ 	FAIL_IF(ebb_event_enable(&event));
+ 
+-	mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
++	mtspr(SPRN_PMC2, pmc_sample_period(sample_period));
+ 	mtspr(SPRN_PMC5, 0);
+ 	mtspr(SPRN_PMC6, 0);
+ 
+diff --git a/tools/virtio/ringtest/run-on-all.sh b/tools/virtio/ringtest/run-on-all.sh
+index 2e69ca812b4c..29b0d3920bfc 100755
+--- a/tools/virtio/ringtest/run-on-all.sh
++++ b/tools/virtio/ringtest/run-on-all.sh
+@@ -1,12 +1,13 @@
+ #!/bin/sh
+ 
++CPUS_ONLINE=$(lscpu --online -p=cpu|grep -v -e '#')
+ #use last CPU for host. Why not the first?
+ #many devices tend to use cpu0 by default so
+ #it tends to be busier
+-HOST_AFFINITY=$(lscpu -p=cpu | tail -1)
++HOST_AFFINITY=$(echo "${CPUS_ONLINE}"|tail -n 1)
+ 
+ #run command on all cpus
+-for cpu in $(seq 0 $HOST_AFFINITY)
++for cpu in $CPUS_ONLINE
+ do
+ 	#Don't run guest and host on same CPU
+ 	#It actually works ok if using signalling
+diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
+index 8cebfbc19e90..539d3f5cb619 100644
+--- a/virt/kvm/arm/vgic/vgic-init.c
++++ b/virt/kvm/arm/vgic/vgic-init.c
+@@ -268,15 +268,11 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
+ {
+ 	struct vgic_dist *dist = &kvm->arch.vgic;
+ 
+-	mutex_lock(&kvm->lock);
+-
+ 	dist->ready = false;
+ 	dist->initialized = false;
+ 
+ 	kfree(dist->spis);
+ 	dist->nr_spis = 0;
+-
+-	mutex_unlock(&kvm->lock);
+ }
+ 
+ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
+@@ -286,7 +282,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
+ 	INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
+ }
+ 
+-void kvm_vgic_destroy(struct kvm *kvm)
++/* To be called with kvm->lock held */
++static void __kvm_vgic_destroy(struct kvm *kvm)
+ {
+ 	struct kvm_vcpu *vcpu;
+ 	int i;
+@@ -297,6 +294,13 @@ void kvm_vgic_destroy(struct kvm *kvm)
+ 		kvm_vgic_vcpu_destroy(vcpu);
+ }
+ 
++void kvm_vgic_destroy(struct kvm *kvm)
++{
++	mutex_lock(&kvm->lock);
++	__kvm_vgic_destroy(kvm);
++	mutex_unlock(&kvm->lock);
++}
++
+ /**
+  * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
+  * is a GICv2. A GICv3 must be explicitly initialized by the guest using the
+@@ -348,6 +352,10 @@ int kvm_vgic_map_resources(struct kvm *kvm)
+ 		ret = vgic_v2_map_resources(kvm);
+ 	else
+ 		ret = vgic_v3_map_resources(kvm);
++
++	if (ret)
++		__kvm_vgic_destroy(kvm);
++
+ out:
+ 	mutex_unlock(&kvm->lock);
+ 	return ret;
+diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
+index 9bab86757fa4..834137e7b83f 100644
+--- a/virt/kvm/arm/vgic/vgic-v2.c
++++ b/virt/kvm/arm/vgic/vgic-v2.c
+@@ -293,8 +293,6 @@ int vgic_v2_map_resources(struct kvm *kvm)
+ 	dist->ready = true;
+ 
+ out:
+-	if (ret)
+-		kvm_vgic_destroy(kvm);
+ 	return ret;
+ }
+ 
+diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
+index 5c9f9745e6ca..e6b03fd8c374 100644
+--- a/virt/kvm/arm/vgic/vgic-v3.c
++++ b/virt/kvm/arm/vgic/vgic-v3.c
+@@ -302,8 +302,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
+ 	dist->ready = true;
+ 
+ out:
+-	if (ret)
+-		kvm_vgic_destroy(kvm);
+ 	return ret;
+ }
+ 


             reply	other threads:[~2017-01-26  8:51 UTC|newest]

Thread overview: 393+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-01-26  8:51 Alice Ferrazzi [this message]
  -- strict thread matches above, loose matches on Subject: below --
2023-01-07 11:37 [gentoo-commits] proj/linux-patches:4.9 commit in: / Mike Pagano
2022-12-14 12:24 Mike Pagano
2022-12-08 13:09 Alice Ferrazzi
2022-11-25 17:02 Mike Pagano
2022-11-10 15:14 Mike Pagano
2022-11-03 15:09 Mike Pagano
2022-10-26 11:43 Mike Pagano
2022-09-28  9:19 Mike Pagano
2022-09-20 12:04 Mike Pagano
2022-09-15 11:10 Mike Pagano
2022-09-05 12:08 Mike Pagano
2022-08-25 10:37 Mike Pagano
2022-07-29 15:25 Mike Pagano
2022-07-21 20:14 Mike Pagano
2022-07-12 16:03 Mike Pagano
2022-07-07 16:20 Mike Pagano
2022-07-02 16:04 Mike Pagano
2022-06-25 10:24 Mike Pagano
2022-06-16 11:42 Mike Pagano
2022-06-14 15:49 Mike Pagano
2022-06-06 11:07 Mike Pagano
2022-05-27 12:41 Mike Pagano
2022-05-25 11:57 Mike Pagano
2022-05-18  9:52 Mike Pagano
2022-05-15 22:14 Mike Pagano
2022-05-12 11:32 Mike Pagano
2022-04-27 11:38 Mike Pagano
2022-04-20 12:12 Mike Pagano
2022-03-28 11:01 Mike Pagano
2022-03-23 11:59 Mike Pagano
2022-03-16 13:22 Mike Pagano
2022-03-11 10:57 Mike Pagano
2022-03-08 18:28 Mike Pagano
2022-03-02 13:09 Mike Pagano
2022-02-26 23:38 Mike Pagano
2022-02-23 12:40 Mike Pagano
2022-02-16 12:49 Mike Pagano
2022-02-11 12:38 Mike Pagano
2022-02-08 18:03 Mike Pagano
2022-01-29 17:46 Mike Pagano
2022-01-27 11:41 Mike Pagano
2022-01-11 12:59 Mike Pagano
2022-01-05 12:57 Mike Pagano
2021-12-29 13:13 Mike Pagano
2021-12-22 14:08 Mike Pagano
2021-12-14 10:37 Mike Pagano
2021-12-08 12:57 Mike Pagano
2021-11-26 12:01 Mike Pagano
2021-11-12 13:38 Mike Pagano
2021-11-02 17:06 Mike Pagano
2021-10-27 12:00 Mike Pagano
2021-10-17 13:14 Mike Pagano
2021-10-09 21:35 Mike Pagano
2021-10-06 11:32 Mike Pagano
2021-09-26 14:15 Mike Pagano
2021-09-22 11:42 Mike Pagano
2021-09-20 22:06 Mike Pagano
2021-09-03 11:24 Mike Pagano
2021-08-26 14:03 Mike Pagano
2021-08-25 23:14 Mike Pagano
2021-08-25 23:13 Mike Pagano
2021-08-15 20:10 Mike Pagano
2021-08-08 13:41 Mike Pagano
2021-08-04 11:55 Mike Pagano
2021-08-03 12:49 Mike Pagano
2021-07-28 12:39 Mike Pagano
2021-07-20 15:29 Alice Ferrazzi
2021-07-11 14:47 Mike Pagano
2021-06-30 14:28 Mike Pagano
2021-06-17 14:23 Alice Ferrazzi
2021-06-17 11:08 Alice Ferrazzi
2021-06-10 11:10 Mike Pagano
2021-06-03 10:41 Alice Ferrazzi
2021-05-26 12:03 Mike Pagano
2021-05-22 10:01 Mike Pagano
2021-04-28 11:03 Alice Ferrazzi
2021-04-16 11:19 Alice Ferrazzi
2021-04-10 13:22 Mike Pagano
2021-04-07 12:14 Mike Pagano
2021-03-30 14:14 Mike Pagano
2021-03-24 12:07 Mike Pagano
2021-03-17 15:58 Mike Pagano
2021-03-11 14:04 Mike Pagano
2021-03-07 15:13 Mike Pagano
2021-03-03 17:24 Alice Ferrazzi
2021-02-23 13:38 Alice Ferrazzi
2021-02-10 10:15 Alice Ferrazzi
2021-02-05 14:53 Alice Ferrazzi
2021-02-03 23:25 Mike Pagano
2021-01-30 13:18 Alice Ferrazzi
2021-01-23 16:34 Mike Pagano
2021-01-17 16:22 Mike Pagano
2021-01-12 20:08 Mike Pagano
2021-01-09 12:54 Mike Pagano
2020-12-29 14:18 Mike Pagano
2020-12-11 12:54 Mike Pagano
2020-12-02 12:48 Mike Pagano
2020-11-24 13:39 Mike Pagano
2020-11-22 19:12 Mike Pagano
2020-11-18 19:23 Mike Pagano
2020-11-11 15:32 Mike Pagano
2020-11-10 13:54 Mike Pagano
2020-10-29 11:17 Mike Pagano
2020-10-17 10:14 Mike Pagano
2020-10-14 20:34 Mike Pagano
2020-10-01 19:03 Mike Pagano
2020-10-01 18:59 Mike Pagano
2020-09-24 16:02 Mike Pagano
2020-09-23 11:59 Mike Pagano
2020-09-23 11:57 Mike Pagano
2020-09-12 17:31 Mike Pagano
2020-09-03 11:34 Mike Pagano
2020-08-26 11:13 Mike Pagano
2020-08-21 11:23 Alice Ferrazzi
2020-08-21 11:02 Alice Ferrazzi
2020-07-31 16:13 Mike Pagano
2020-07-22 12:30 Mike Pagano
2020-07-09 12:07 Mike Pagano
2020-07-01 12:10 Mike Pagano
2020-06-22 14:44 Mike Pagano
2020-06-11 11:28 Mike Pagano
2020-06-03 11:37 Mike Pagano
2020-05-27 15:26 Mike Pagano
2020-05-20 11:24 Mike Pagano
2020-05-13 12:50 Mike Pagano
2020-05-11 22:52 Mike Pagano
2020-05-05 17:39 Mike Pagano
2020-05-02 19:22 Mike Pagano
2020-04-24 12:01 Mike Pagano
2020-04-15 17:55 Mike Pagano
2020-04-13 11:15 Mike Pagano
2020-04-02 18:55 Mike Pagano
2020-03-20 11:54 Mike Pagano
2020-03-11 10:15 Mike Pagano
2020-02-28 15:29 Mike Pagano
2020-02-14 23:36 Mike Pagano
2020-02-05 14:48 Mike Pagano
2020-01-29 12:36 Mike Pagano
2020-01-23 11:02 Mike Pagano
2020-01-14 22:26 Mike Pagano
2020-01-12 14:52 Mike Pagano
2020-01-04 16:48 Mike Pagano
2019-12-21 14:54 Mike Pagano
2019-12-05 15:17 Alice Ferrazzi
2019-11-29 21:39 Thomas Deutschmann
2019-11-28 23:51 Mike Pagano
2019-11-25 12:08 Mike Pagano
2019-11-16 10:54 Mike Pagano
2019-11-12 20:58 Mike Pagano
2019-11-10 16:15 Mike Pagano
2019-11-06 14:24 Mike Pagano
2019-10-29 11:16 Mike Pagano
2019-10-17 22:21 Mike Pagano
2019-10-07 17:37 Mike Pagano
2019-10-05 11:39 Mike Pagano
2019-09-21 15:57 Mike Pagano
2019-09-19 23:16 Mike Pagano
2019-09-16 12:22 Mike Pagano
2019-09-10 11:10 Mike Pagano
2019-09-06 17:18 Mike Pagano
2019-08-25 17:34 Mike Pagano
2019-08-11 10:59 Mike Pagano
2019-08-06 19:16 Mike Pagano
2019-08-04 16:05 Mike Pagano
2019-07-21 14:38 Mike Pagano
2019-07-10 11:03 Mike Pagano
2019-06-27 11:10 Mike Pagano
2019-06-22 19:04 Mike Pagano
2019-06-17 19:19 Mike Pagano
2019-06-11 17:40 Mike Pagano
2019-06-11 12:39 Mike Pagano
2019-05-31 16:42 Mike Pagano
2019-05-26 17:12 Mike Pagano
2019-05-21 17:14 Mike Pagano
2019-05-16 22:59 Mike Pagano
2019-05-14 20:08 Mike Pagano
2019-05-10 19:38 Mike Pagano
2019-05-08 10:03 Mike Pagano
2019-05-04 18:26 Mike Pagano
2019-05-02 10:16 Mike Pagano
2019-04-27 17:29 Mike Pagano
2019-04-20 11:06 Mike Pagano
2019-04-19 19:54 Mike Pagano
2019-04-05 21:42 Mike Pagano
2019-04-03 10:48 Mike Pagano
2019-03-27 10:20 Mike Pagano
2019-03-23 14:57 Mike Pagano
2019-03-23 14:18 Mike Pagano
2019-03-19 16:56 Mike Pagano
2019-03-13 22:05 Mike Pagano
2019-03-06 19:12 Mike Pagano
2019-03-05 17:59 Mike Pagano
2019-02-27 11:20 Mike Pagano
2019-02-23 14:42 Mike Pagano
2019-02-20 11:16 Mike Pagano
2019-02-15 12:46 Mike Pagano
2019-02-12 20:51 Mike Pagano
2019-02-06 20:14 Mike Pagano
2019-01-31 11:22 Mike Pagano
2019-01-26 15:03 Mike Pagano
2019-01-23 11:29 Mike Pagano
2019-01-16 23:29 Mike Pagano
2019-01-13 19:26 Mike Pagano
2019-01-09 18:09 Mike Pagano
2019-01-09 17:52 Mike Pagano
2018-12-29 22:53 Mike Pagano
2018-12-29 18:51 Mike Pagano
2018-12-21 14:44 Mike Pagano
2018-12-17 11:39 Mike Pagano
2018-12-13 11:36 Mike Pagano
2018-12-08 13:25 Mike Pagano
2018-12-05 19:44 Mike Pagano
2018-12-01 18:00 Mike Pagano
2018-12-01 15:04 Mike Pagano
2018-11-27 16:22 Mike Pagano
2018-11-23 12:48 Mike Pagano
2018-11-23 12:45 Mike Pagano
2018-11-21 12:20 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-13 21:20 Mike Pagano
2018-11-11  1:44 Mike Pagano
2018-11-11  1:31 Mike Pagano
2018-11-10 21:30 Mike Pagano
2018-10-20 12:43 Mike Pagano
2018-10-18 10:25 Mike Pagano
2018-10-13 16:34 Mike Pagano
2018-10-10 11:19 Mike Pagano
2018-10-04 10:40 Mike Pagano
2018-09-29 13:33 Mike Pagano
2018-09-26 10:42 Mike Pagano
2018-09-19 22:38 Mike Pagano
2018-09-15 10:10 Mike Pagano
2018-09-09 23:27 Mike Pagano
2018-09-05 15:27 Mike Pagano
2018-08-24 11:43 Mike Pagano
2018-08-22 10:06 Alice Ferrazzi
2018-08-18 18:07 Mike Pagano
2018-08-17 19:32 Mike Pagano
2018-08-17 19:25 Mike Pagano
2018-08-16 11:51 Mike Pagano
2018-08-15 16:46 Mike Pagano
2018-08-09 10:52 Mike Pagano
2018-08-07 18:12 Mike Pagano
2018-08-03 12:25 Mike Pagano
2018-07-28 10:38 Mike Pagano
2018-07-25 10:26 Mike Pagano
2018-07-22 15:14 Mike Pagano
2018-07-17 10:25 Mike Pagano
2018-07-12 15:42 Alice Ferrazzi
2018-07-03 13:16 Mike Pagano
2018-06-26 16:34 Alice Ferrazzi
2018-06-16 15:42 Mike Pagano
2018-06-13 15:03 Mike Pagano
2018-06-06 18:04 Mike Pagano
2018-06-05 11:21 Mike Pagano
2018-05-30 22:34 Mike Pagano
2018-05-30 11:39 Mike Pagano
2018-05-25 14:54 Mike Pagano
2018-05-22 17:28 Mike Pagano
2018-05-20 22:20 Mike Pagano
2018-05-16 10:23 Mike Pagano
2018-05-09 10:54 Mike Pagano
2018-05-02 16:13 Mike Pagano
2018-04-30 10:29 Mike Pagano
2018-04-24 11:30 Mike Pagano
2018-04-20 11:12 Mike Pagano
2018-04-13 22:21 Mike Pagano
2018-04-08 14:26 Mike Pagano
2018-03-31 22:17 Mike Pagano
2018-03-28 17:42 Mike Pagano
2018-03-25 14:31 Mike Pagano
2018-03-25 13:39 Mike Pagano
2018-03-22 12:58 Mike Pagano
2018-03-18 22:15 Mike Pagano
2018-03-11 18:26 Mike Pagano
2018-03-05  2:38 Alice Ferrazzi
2018-02-28 18:46 Alice Ferrazzi
2018-02-28 15:02 Alice Ferrazzi
2018-02-25 15:47 Mike Pagano
2018-02-22 23:22 Mike Pagano
2018-02-17 15:02 Alice Ferrazzi
2018-02-13 13:25 Alice Ferrazzi
2018-02-03 21:22 Mike Pagano
2018-01-31 13:31 Alice Ferrazzi
2018-01-23 21:17 Mike Pagano
2018-01-17 10:18 Alice Ferrazzi
2018-01-17 10:18 Alice Ferrazzi
2018-01-17  9:16 Alice Ferrazzi
2018-01-15 14:57 Alice Ferrazzi
2018-01-10 12:21 Alice Ferrazzi
2018-01-10 11:47 Mike Pagano
2018-01-05 15:54 Alice Ferrazzi
2018-01-05 15:04 Alice Ferrazzi
2018-01-02 20:13 Mike Pagano
2017-12-29 17:20 Alice Ferrazzi
2017-12-25 14:36 Alice Ferrazzi
2017-12-20 12:44 Mike Pagano
2017-12-16 17:42 Alice Ferrazzi
2017-12-14  8:58 Alice Ferrazzi
2017-12-09 23:29 Mike Pagano
2017-12-05 11:38 Mike Pagano
2017-11-30 12:19 Alice Ferrazzi
2017-11-24  9:44 Alice Ferrazzi
2017-11-21  9:18 Alice Ferrazzi
2017-11-18 18:24 Mike Pagano
2017-11-15 15:44 Mike Pagano
2017-11-08 13:49 Mike Pagano
2017-11-02 10:03 Mike Pagano
2017-10-27 10:29 Mike Pagano
2017-10-21 20:15 Mike Pagano
2017-10-18 13:46 Mike Pagano
2017-10-12 22:26 Mike Pagano
2017-10-12 12:37 Mike Pagano
2017-10-08 14:23 Mike Pagano
2017-10-08 14:21 Mike Pagano
2017-10-08 14:13 Mike Pagano
2017-10-05 11:38 Mike Pagano
2017-09-27 16:38 Mike Pagano
2017-09-20 10:11 Mike Pagano
2017-09-14 11:39 Mike Pagano
2017-09-13 22:28 Mike Pagano
2017-09-13 16:25 Mike Pagano
2017-09-10 14:38 Mike Pagano
2017-09-07 22:43 Mike Pagano
2017-09-02 17:45 Mike Pagano
2017-08-30 10:06 Mike Pagano
2017-08-25 10:59 Mike Pagano
2017-08-16 22:29 Mike Pagano
2017-08-13 16:51 Mike Pagano
2017-08-11 17:41 Mike Pagano
2017-08-07 10:26 Mike Pagano
2017-05-14 13:31 Mike Pagano
2017-05-08 10:43 Mike Pagano
2017-05-03 17:45 Mike Pagano
2017-04-27  9:05 Alice Ferrazzi
2017-04-22 17:01 Mike Pagano
2017-04-18 10:23 Mike Pagano
2017-04-12 18:01 Mike Pagano
2017-04-08 13:53 Mike Pagano
2017-03-31 10:44 Mike Pagano
2017-03-30 18:15 Mike Pagano
2017-03-26 11:54 Mike Pagano
2017-03-23 18:38 Mike Pagano
2017-03-22 12:42 Mike Pagano
2017-03-18 14:34 Mike Pagano
2017-03-15 19:21 Mike Pagano
2017-03-12 12:22 Mike Pagano
2017-03-02 16:23 Mike Pagano
2017-02-26 20:38 Mike Pagano
2017-02-26 20:36 Mike Pagano
2017-02-23 20:34 Mike Pagano
2017-02-23 20:11 Mike Pagano
2017-02-18 20:37 Mike Pagano
2017-02-18 16:13 Alice Ferrazzi
2017-02-15 16:02 Alice Ferrazzi
2017-02-14 23:08 Mike Pagano
2017-02-09 11:11 Alice Ferrazzi
2017-02-04 11:34 Alice Ferrazzi
2017-02-01 13:07 Alice Ferrazzi
2017-01-29 23:08 Alice Ferrazzi
2017-01-20 11:33 Alice Ferrazzi
2017-01-15 22:59 Mike Pagano
2017-01-12 22:53 Mike Pagano
2017-01-09 12:41 Mike Pagano
2017-01-07  0:55 Mike Pagano
2017-01-06 23:09 Mike Pagano
2016-12-31 19:39 Mike Pagano
2016-12-11 23:20 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1485420579.94c945baf75ef5a35c3c220ddda71d0060b72aa6.alicef@gentoo \
    --to=alicef@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox