public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Wed, 20 Feb 2019 11:19:15 +0000 (UTC)	[thread overview]
Message-ID: <1550661521.9357e267eea9e76458572750473116becb53c079.mpagano@gentoo> (raw)

commit:     9357e267eea9e76458572750473116becb53c079
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Feb 20 11:18:41 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Feb 20 11:18:41 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9357e267

proj/linux-patches: Linux patch 4.19.24

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1023_linux-4.19.24.patch | 3179 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3183 insertions(+)

diff --git a/0000_README b/0000_README
index bec430b..62f98e4 100644
--- a/0000_README
+++ b/0000_README
@@ -135,6 +135,10 @@ Patch:  1022_linux-4.19.23.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.19.23
 
+Patch:  1023_linux-4.19.24.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.19.24
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1023_linux-4.19.24.patch b/1023_linux-4.19.24.patch
new file mode 100644
index 0000000..9580fbc
--- /dev/null
+++ b/1023_linux-4.19.24.patch
@@ -0,0 +1,3179 @@
+diff --git a/Documentation/devicetree/bindings/eeprom/at24.txt b/Documentation/devicetree/bindings/eeprom/at24.txt
+index aededdbc262b..f9a7c984274c 100644
+--- a/Documentation/devicetree/bindings/eeprom/at24.txt
++++ b/Documentation/devicetree/bindings/eeprom/at24.txt
+@@ -27,6 +27,7 @@ Required properties:
+                 "atmel,24c256",
+                 "atmel,24c512",
+                 "atmel,24c1024",
++                "atmel,24c2048",
+ 
+                 If <manufacturer> is not "atmel", then a fallback must be used
+                 with the same <model> and "atmel" as manufacturer.
+diff --git a/Makefile b/Makefile
+index 3dcf3f2363c1..370ad0d34076 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 23
++SUBLEVEL = 24
+ EXTRAVERSION =
+ NAME = "People's Front"
+ 
+diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h
+index 4d17cacd1462..432402c8e47f 100644
+--- a/arch/alpha/include/asm/irq.h
++++ b/arch/alpha/include/asm/irq.h
+@@ -56,15 +56,15 @@
+ 
+ #elif defined(CONFIG_ALPHA_DP264) || \
+       defined(CONFIG_ALPHA_LYNX)  || \
+-      defined(CONFIG_ALPHA_SHARK) || \
+-      defined(CONFIG_ALPHA_EIGER)
++      defined(CONFIG_ALPHA_SHARK)
+ # define NR_IRQS	64
+ 
+ #elif defined(CONFIG_ALPHA_TITAN)
+ #define NR_IRQS		80
+ 
+ #elif defined(CONFIG_ALPHA_RAWHIDE) || \
+-	defined(CONFIG_ALPHA_TAKARA)
++      defined(CONFIG_ALPHA_TAKARA) || \
++      defined(CONFIG_ALPHA_EIGER)
+ # define NR_IRQS	128
+ 
+ #elif defined(CONFIG_ALPHA_WILDFIRE)
+diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
+index d73dc473fbb9..188fc9256baf 100644
+--- a/arch/alpha/mm/fault.c
++++ b/arch/alpha/mm/fault.c
+@@ -78,7 +78,7 @@ __load_new_mm_context(struct mm_struct *next_mm)
+ /* Macro for exception fixup code to access integer registers.  */
+ #define dpf_reg(r)							\
+ 	(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 :	\
+-				 (r) <= 18 ? (r)+8 : (r)-10])
++				 (r) <= 18 ? (r)+10 : (r)-10])
+ 
+ asmlinkage void
+ do_page_fault(unsigned long address, unsigned long mmcsr,
+diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
+index f9b757905845..016616cc036c 100644
+--- a/arch/arm/boot/dts/da850-evm.dts
++++ b/arch/arm/boot/dts/da850-evm.dts
+@@ -94,6 +94,28 @@
+ 		regulator-boot-on;
+ 	};
+ 
++	baseboard_3v3: fixedregulator-3v3 {
++		/* TPS73701DCQ */
++		compatible = "regulator-fixed";
++		regulator-name = "baseboard_3v3";
++		regulator-min-microvolt = <3300000>;
++		regulator-max-microvolt = <3300000>;
++		vin-supply = <&vbat>;
++		regulator-always-on;
++		regulator-boot-on;
++	};
++
++	baseboard_1v8: fixedregulator-1v8 {
++		/* TPS73701DCQ */
++		compatible = "regulator-fixed";
++		regulator-name = "baseboard_1v8";
++		regulator-min-microvolt = <1800000>;
++		regulator-max-microvolt = <1800000>;
++		vin-supply = <&vbat>;
++		regulator-always-on;
++		regulator-boot-on;
++	};
++
+ 	backlight_lcd: backlight-regulator {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "lcd_backlight_pwr";
+@@ -105,7 +127,7 @@
+ 
+ 	sound {
+ 		compatible = "simple-audio-card";
+-		simple-audio-card,name = "DA850/OMAP-L138 EVM";
++		simple-audio-card,name = "DA850-OMAPL138 EVM";
+ 		simple-audio-card,widgets =
+ 			"Line", "Line In",
+ 			"Line", "Line Out";
+@@ -210,10 +232,9 @@
+ 
+ 		/* Regulators */
+ 		IOVDD-supply = <&vdcdc2_reg>;
+-		/* Derived from VBAT: Baseboard 3.3V / 1.8V */
+-		AVDD-supply = <&vbat>;
+-		DRVDD-supply = <&vbat>;
+-		DVDD-supply = <&vbat>;
++		AVDD-supply = <&baseboard_3v3>;
++		DRVDD-supply = <&baseboard_3v3>;
++		DVDD-supply = <&baseboard_1v8>;
+ 	};
+ 	tca6416: gpio@20 {
+ 		compatible = "ti,tca6416";
+diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts
+index 0177e3ed20fe..3a2fa6e035a3 100644
+--- a/arch/arm/boot/dts/da850-lcdk.dts
++++ b/arch/arm/boot/dts/da850-lcdk.dts
+@@ -39,9 +39,39 @@
+ 		};
+ 	};
+ 
++	vcc_5vd: fixedregulator-vcc_5vd {
++		compatible = "regulator-fixed";
++		regulator-name = "vcc_5vd";
++		regulator-min-microvolt = <5000000>;
++		regulator-max-microvolt = <5000000>;
++		regulator-boot-on;
++	};
++
++	vcc_3v3d: fixedregulator-vcc_3v3d {
++		/* TPS650250 - VDCDC1 */
++		compatible = "regulator-fixed";
++		regulator-name = "vcc_3v3d";
++		regulator-min-microvolt = <3300000>;
++		regulator-max-microvolt = <3300000>;
++		vin-supply = <&vcc_5vd>;
++		regulator-always-on;
++		regulator-boot-on;
++	};
++
++	vcc_1v8d: fixedregulator-vcc_1v8d {
++		/* TPS650250 - VDCDC2 */
++		compatible = "regulator-fixed";
++		regulator-name = "vcc_1v8d";
++		regulator-min-microvolt = <1800000>;
++		regulator-max-microvolt = <1800000>;
++		vin-supply = <&vcc_5vd>;
++		regulator-always-on;
++		regulator-boot-on;
++	};
++
+ 	sound {
+ 		compatible = "simple-audio-card";
+-		simple-audio-card,name = "DA850/OMAP-L138 LCDK";
++		simple-audio-card,name = "DA850-OMAPL138 LCDK";
+ 		simple-audio-card,widgets =
+ 			"Line", "Line In",
+ 			"Line", "Line Out";
+@@ -221,6 +251,12 @@
+ 		compatible = "ti,tlv320aic3106";
+ 		reg = <0x18>;
+ 		status = "okay";
++
++		/* Regulators */
++		IOVDD-supply = <&vcc_3v3d>;
++		AVDD-supply = <&vcc_3v3d>;
++		DRVDD-supply = <&vcc_3v3d>;
++		DVDD-supply = <&vcc_1v8d>;
+ 	};
+ };
+ 
+diff --git a/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
+index cbaf06f2f78e..eb917462b219 100644
+--- a/arch/arm/boot/dts/kirkwood-dnskw.dtsi
++++ b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
+@@ -36,8 +36,8 @@
+ 		compatible = "gpio-fan";
+ 		pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>;
+ 		pinctrl-names = "default";
+-		gpios = <&gpio1 14 GPIO_ACTIVE_LOW
+-			 &gpio1 13 GPIO_ACTIVE_LOW>;
++		gpios = <&gpio1 14 GPIO_ACTIVE_HIGH
++			 &gpio1 13 GPIO_ACTIVE_HIGH>;
+ 		gpio-fan,speed-map = <0    0
+ 				      3000 1
+ 				      6000 2>;
+diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
+index ab6f640b282b..8b8db9d8e912 100644
+--- a/arch/arm/boot/dts/omap5-board-common.dtsi
++++ b/arch/arm/boot/dts/omap5-board-common.dtsi
+@@ -317,7 +317,8 @@
+ 
+ 	palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
+ 		pinctrl-single,pins = <
+-			OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */
++			/* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
++			OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
+ 		>;
+ 	};
+ 
+@@ -385,7 +386,8 @@
+ 
+ 	palmas: palmas@48 {
+ 		compatible = "ti,palmas";
+-		interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
++		/* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
++		interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
+ 		reg = <0x48>;
+ 		interrupt-controller;
+ 		#interrupt-cells = <2>;
+@@ -651,7 +653,8 @@
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&twl6040_pins>;
+ 
+-		interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */
++		/* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
++		interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_LOW>;
+ 
+ 		/* audpwron gpio defined in the board specific dts */
+ 
+diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts
+index 5e21fb430a65..e78d3718f145 100644
+--- a/arch/arm/boot/dts/omap5-cm-t54.dts
++++ b/arch/arm/boot/dts/omap5-cm-t54.dts
+@@ -181,6 +181,13 @@
+ 			OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6)  /* llib_wakereqin.gpio1_wk15 */
+ 		>;
+ 	};
++
++	palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
++		pinctrl-single,pins = <
++			/* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
++			OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
++		>;
++	};
+ };
+ 
+ &omap5_pmx_core {
+@@ -414,8 +421,11 @@
+ 
+ 	palmas: palmas@48 {
+ 		compatible = "ti,palmas";
+-		interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
+ 		reg = <0x48>;
++		pinctrl-0 = <&palmas_sys_nirq_pins>;
++		pinctrl-names = "default";
++		/* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
++		interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
+ 		interrupt-controller;
+ 		#interrupt-cells = <2>;
+ 		ti,system-power-controller;
+diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
+index b17ee03d280b..88286dd483ff 100644
+--- a/arch/arm/include/asm/assembler.h
++++ b/arch/arm/include/asm/assembler.h
+@@ -467,6 +467,17 @@ THUMB(	orr	\reg , \reg , #PSR_T_BIT	)
+ #endif
+ 	.endm
+ 
++	.macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
++#ifdef CONFIG_CPU_SPECTRE
++	sub	\tmp, \limit, #1
++	subs	\tmp, \tmp, \addr	@ tmp = limit - 1 - addr
++	addhs	\tmp, \tmp, #1		@ if (tmp >= 0) {
++	subhss	\tmp, \tmp, \size	@ tmp = limit - (addr + size) }
++	movlo	\addr, #0		@ if (tmp < 0) addr = NULL
++	csdb
++#endif
++	.endm
++
+ 	.macro	uaccess_disable, tmp, isb=1
+ #ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ 	/*
+diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
+index 0d289240b6ca..775cac3c02bb 100644
+--- a/arch/arm/include/asm/cputype.h
++++ b/arch/arm/include/asm/cputype.h
+@@ -111,6 +111,7 @@
+ #include <linux/kernel.h>
+ 
+ extern unsigned int processor_id;
++struct proc_info_list *lookup_processor(u32 midr);
+ 
+ #ifdef CONFIG_CPU_CP15
+ #define read_cpuid(reg)							\
+diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
+index e25f4392e1b2..e1b6f280ab08 100644
+--- a/arch/arm/include/asm/proc-fns.h
++++ b/arch/arm/include/asm/proc-fns.h
+@@ -23,7 +23,7 @@ struct mm_struct;
+ /*
+  * Don't change this structure - ASM code relies on it.
+  */
+-extern struct processor {
++struct processor {
+ 	/* MISC
+ 	 * get data abort address/flags
+ 	 */
+@@ -79,9 +79,13 @@ extern struct processor {
+ 	unsigned int suspend_size;
+ 	void (*do_suspend)(void *);
+ 	void (*do_resume)(void *);
+-} processor;
++};
+ 
+ #ifndef MULTI_CPU
++static inline void init_proc_vtable(const struct processor *p)
++{
++}
++
+ extern void cpu_proc_init(void);
+ extern void cpu_proc_fin(void);
+ extern int cpu_do_idle(void);
+@@ -98,17 +102,50 @@ extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
+ extern void cpu_do_suspend(void *);
+ extern void cpu_do_resume(void *);
+ #else
+-#define cpu_proc_init			processor._proc_init
+-#define cpu_proc_fin			processor._proc_fin
+-#define cpu_reset			processor.reset
+-#define cpu_do_idle			processor._do_idle
+-#define cpu_dcache_clean_area		processor.dcache_clean_area
+-#define cpu_set_pte_ext			processor.set_pte_ext
+-#define cpu_do_switch_mm		processor.switch_mm
+ 
+-/* These three are private to arch/arm/kernel/suspend.c */
+-#define cpu_do_suspend			processor.do_suspend
+-#define cpu_do_resume			processor.do_resume
++extern struct processor processor;
++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
++#include <linux/smp.h>
++/*
++ * This can't be a per-cpu variable because we need to access it before
++ * per-cpu has been initialised.  We have a couple of functions that are
++ * called in a pre-emptible context, and so can't use smp_processor_id()
++ * there, hence PROC_TABLE().  We insist in init_proc_vtable() that the
++ * function pointers for these are identical across all CPUs.
++ */
++extern struct processor *cpu_vtable[];
++#define PROC_VTABLE(f)			cpu_vtable[smp_processor_id()]->f
++#define PROC_TABLE(f)			cpu_vtable[0]->f
++static inline void init_proc_vtable(const struct processor *p)
++{
++	unsigned int cpu = smp_processor_id();
++	*cpu_vtable[cpu] = *p;
++	WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
++		     cpu_vtable[0]->dcache_clean_area);
++	WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
++		     cpu_vtable[0]->set_pte_ext);
++}
++#else
++#define PROC_VTABLE(f)			processor.f
++#define PROC_TABLE(f)			processor.f
++static inline void init_proc_vtable(const struct processor *p)
++{
++	processor = *p;
++}
++#endif
++
++#define cpu_proc_init			PROC_VTABLE(_proc_init)
++#define cpu_check_bugs			PROC_VTABLE(check_bugs)
++#define cpu_proc_fin			PROC_VTABLE(_proc_fin)
++#define cpu_reset			PROC_VTABLE(reset)
++#define cpu_do_idle			PROC_VTABLE(_do_idle)
++#define cpu_dcache_clean_area		PROC_TABLE(dcache_clean_area)
++#define cpu_set_pte_ext			PROC_TABLE(set_pte_ext)
++#define cpu_do_switch_mm		PROC_VTABLE(switch_mm)
++
++/* These two are private to arch/arm/kernel/suspend.c */
++#define cpu_do_suspend			PROC_VTABLE(do_suspend)
++#define cpu_do_resume			PROC_VTABLE(do_resume)
+ #endif
+ 
+ extern void cpu_resume(void);
+diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
+index 9b37b6ab27fe..8f55dc520a3e 100644
+--- a/arch/arm/include/asm/thread_info.h
++++ b/arch/arm/include/asm/thread_info.h
+@@ -121,8 +121,8 @@ extern void vfp_flush_hwstate(struct thread_info *);
+ struct user_vfp;
+ struct user_vfp_exc;
+ 
+-extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
+-					   struct user_vfp_exc __user *);
++extern int vfp_preserve_user_clear_hwstate(struct user_vfp *,
++					   struct user_vfp_exc *);
+ extern int vfp_restore_user_hwstate(struct user_vfp *,
+ 				    struct user_vfp_exc *);
+ #endif
+diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
+index 5451e1f05a19..c136eef8f690 100644
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -69,6 +69,14 @@ extern int __put_user_bad(void);
+ static inline void set_fs(mm_segment_t fs)
+ {
+ 	current_thread_info()->addr_limit = fs;
++
++	/*
++	 * Prevent a mispredicted conditional call to set_fs from forwarding
++	 * the wrong address limit to access_ok under speculation.
++	 */
++	dsb(nsh);
++	isb();
++
+ 	modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
+ }
+ 
+@@ -91,6 +99,32 @@ static inline void set_fs(mm_segment_t fs)
+ #define __inttype(x) \
+ 	__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+ 
++/*
++ * Sanitise a uaccess pointer such that it becomes NULL if addr+size
++ * is above the current addr_limit.
++ */
++#define uaccess_mask_range_ptr(ptr, size)			\
++	((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
++static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
++						    size_t size)
++{
++	void __user *safe_ptr = (void __user *)ptr;
++	unsigned long tmp;
++
++	asm volatile(
++	"	sub	%1, %3, #1\n"
++	"	subs	%1, %1, %0\n"
++	"	addhs	%1, %1, #1\n"
++	"	subhss	%1, %1, %2\n"
++	"	movlo	%0, #0\n"
++	: "+r" (safe_ptr), "=&r" (tmp)
++	: "r" (size), "r" (current_thread_info()->addr_limit)
++	: "cc");
++
++	csdb();
++	return safe_ptr;
++}
++
+ /*
+  * Single-value transfer routines.  They automatically use the right
+  * size if we just have the right pointer type.  Note that the functions
+@@ -362,6 +396,14 @@ do {									\
+ 	__pu_err;							\
+ })
+ 
++#ifdef CONFIG_CPU_SPECTRE
++/*
++ * When mitigating Spectre variant 1.1, all accessors need to include
++ * verification of the address space.
++ */
++#define __put_user(x, ptr) put_user(x, ptr)
++
++#else
+ #define __put_user(x, ptr)						\
+ ({									\
+ 	long __pu_err = 0;						\
+@@ -369,12 +411,6 @@ do {									\
+ 	__pu_err;							\
+ })
+ 
+-#define __put_user_error(x, ptr, err)					\
+-({									\
+-	__put_user_switch((x), (ptr), (err), __put_user_nocheck);	\
+-	(void) 0;							\
+-})
+-
+ #define __put_user_nocheck(x, __pu_ptr, __err, __size)			\
+ 	do {								\
+ 		unsigned long __pu_addr = (unsigned long)__pu_ptr;	\
+@@ -454,6 +490,7 @@ do {									\
+ 	: "r" (x), "i" (-EFAULT)				\
+ 	: "cc")
+ 
++#endif /* !CONFIG_CPU_SPECTRE */
+ 
+ #ifdef CONFIG_MMU
+ extern unsigned long __must_check
+diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c
+index 7be511310191..d41d3598e5e5 100644
+--- a/arch/arm/kernel/bugs.c
++++ b/arch/arm/kernel/bugs.c
+@@ -6,8 +6,8 @@
+ void check_other_bugs(void)
+ {
+ #ifdef MULTI_CPU
+-	if (processor.check_bugs)
+-		processor.check_bugs();
++	if (cpu_check_bugs)
++		cpu_check_bugs();
+ #endif
+ }
+ 
+diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
+index 6e0375e7db05..997b02302c31 100644
+--- a/arch/arm/kernel/head-common.S
++++ b/arch/arm/kernel/head-common.S
+@@ -145,6 +145,9 @@ __mmap_switched_data:
+ #endif
+ 	.size	__mmap_switched_data, . - __mmap_switched_data
+ 
++	__FINIT
++	.text
++
+ /*
+  * This provides a C-API version of __lookup_processor_type
+  */
+@@ -156,9 +159,6 @@ ENTRY(lookup_processor_type)
+ 	ldmfd	sp!, {r4 - r6, r9, pc}
+ ENDPROC(lookup_processor_type)
+ 
+-	__FINIT
+-	.text
+-
+ /*
+  * Read processor ID register (CP#15, CR0), and look up in the linker-built
+  * supported processor list.  Note that we can't use the absolute addresses
+diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
+index 4c249cb261f3..7bbaa293a38c 100644
+--- a/arch/arm/kernel/setup.c
++++ b/arch/arm/kernel/setup.c
+@@ -115,6 +115,11 @@ EXPORT_SYMBOL(elf_hwcap2);
+ 
+ #ifdef MULTI_CPU
+ struct processor processor __ro_after_init;
++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
++struct processor *cpu_vtable[NR_CPUS] = {
++	[0] = &processor,
++};
++#endif
+ #endif
+ #ifdef MULTI_TLB
+ struct cpu_tlb_fns cpu_tlb __ro_after_init;
+@@ -667,28 +672,33 @@ static void __init smp_build_mpidr_hash(void)
+ }
+ #endif
+ 
+-static void __init setup_processor(void)
++/*
++ * locate processor in the list of supported processor types.  The linker
++ * builds this table for us from the entries in arch/arm/mm/proc-*.S
++ */
++struct proc_info_list *lookup_processor(u32 midr)
+ {
+-	struct proc_info_list *list;
++	struct proc_info_list *list = lookup_processor_type(midr);
+ 
+-	/*
+-	 * locate processor in the list of supported processor
+-	 * types.  The linker builds this table for us from the
+-	 * entries in arch/arm/mm/proc-*.S
+-	 */
+-	list = lookup_processor_type(read_cpuid_id());
+ 	if (!list) {
+-		pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
+-		       read_cpuid_id());
+-		while (1);
++		pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
++		       smp_processor_id(), midr);
++		while (1)
++		/* can't use cpu_relax() here as it may require MMU setup */;
+ 	}
+ 
++	return list;
++}
++
++static void __init setup_processor(void)
++{
++	unsigned int midr = read_cpuid_id();
++	struct proc_info_list *list = lookup_processor(midr);
++
+ 	cpu_name = list->cpu_name;
+ 	__cpu_architecture = __get_cpu_architecture();
+ 
+-#ifdef MULTI_CPU
+-	processor = *list->proc;
+-#endif
++	init_proc_vtable(list->proc);
+ #ifdef MULTI_TLB
+ 	cpu_tlb = *list->tlb;
+ #endif
+@@ -700,7 +710,7 @@ static void __init setup_processor(void)
+ #endif
+ 
+ 	pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
+-		cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
++		list->cpu_name, midr, midr & 15,
+ 		proc_arch[cpu_architecture()], get_cr());
+ 
+ 	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
+diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
+index b8f766cf3a90..b908382b69ff 100644
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -77,8 +77,6 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
+ 		kframe->magic = IWMMXT_MAGIC;
+ 		kframe->size = IWMMXT_STORAGE_SIZE;
+ 		iwmmxt_task_copy(current_thread_info(), &kframe->storage);
+-
+-		err = __copy_to_user(frame, kframe, sizeof(*frame));
+ 	} else {
+ 		/*
+ 		 * For bug-compatibility with older kernels, some space
+@@ -86,10 +84,14 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
+ 		 * Set the magic and size appropriately so that properly
+ 		 * written userspace can skip it reliably:
+ 		 */
+-		__put_user_error(DUMMY_MAGIC, &frame->magic, err);
+-		__put_user_error(IWMMXT_STORAGE_SIZE, &frame->size, err);
++		*kframe = (struct iwmmxt_sigframe) {
++			.magic = DUMMY_MAGIC,
++			.size  = IWMMXT_STORAGE_SIZE,
++		};
+ 	}
+ 
++	err = __copy_to_user(frame, kframe, sizeof(*kframe));
++
+ 	return err;
+ }
+ 
+@@ -135,17 +137,18 @@ static int restore_iwmmxt_context(char __user **auxp)
+ 
+ static int preserve_vfp_context(struct vfp_sigframe __user *frame)
+ {
+-	const unsigned long magic = VFP_MAGIC;
+-	const unsigned long size = VFP_STORAGE_SIZE;
++	struct vfp_sigframe kframe;
+ 	int err = 0;
+ 
+-	__put_user_error(magic, &frame->magic, err);
+-	__put_user_error(size, &frame->size, err);
++	memset(&kframe, 0, sizeof(kframe));
++	kframe.magic = VFP_MAGIC;
++	kframe.size = VFP_STORAGE_SIZE;
+ 
++	err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc);
+ 	if (err)
+-		return -EFAULT;
++		return err;
+ 
+-	return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
++	return __copy_to_user(frame, &kframe, sizeof(kframe));
+ }
+ 
+ static int restore_vfp_context(char __user **auxp)
+@@ -288,30 +291,35 @@ static int
+ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
+ {
+ 	struct aux_sigframe __user *aux;
++	struct sigcontext context;
+ 	int err = 0;
+ 
+-	__put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
+-	__put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
+-	__put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
+-	__put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
+-	__put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
+-	__put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
+-	__put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
+-	__put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
+-	__put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
+-	__put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
+-	__put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
+-	__put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
+-	__put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
+-	__put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
+-	__put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
+-	__put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
+-	__put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
+-
+-	__put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
+-	__put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
+-	__put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
+-	__put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
++	context = (struct sigcontext) {
++		.arm_r0        = regs->ARM_r0,
++		.arm_r1        = regs->ARM_r1,
++		.arm_r2        = regs->ARM_r2,
++		.arm_r3        = regs->ARM_r3,
++		.arm_r4        = regs->ARM_r4,
++		.arm_r5        = regs->ARM_r5,
++		.arm_r6        = regs->ARM_r6,
++		.arm_r7        = regs->ARM_r7,
++		.arm_r8        = regs->ARM_r8,
++		.arm_r9        = regs->ARM_r9,
++		.arm_r10       = regs->ARM_r10,
++		.arm_fp        = regs->ARM_fp,
++		.arm_ip        = regs->ARM_ip,
++		.arm_sp        = regs->ARM_sp,
++		.arm_lr        = regs->ARM_lr,
++		.arm_pc        = regs->ARM_pc,
++		.arm_cpsr      = regs->ARM_cpsr,
++
++		.trap_no       = current->thread.trap_no,
++		.error_code    = current->thread.error_code,
++		.fault_address = current->thread.address,
++		.oldmask       = set->sig[0],
++	};
++
++	err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context));
+ 
+ 	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
+ 
+@@ -328,7 +336,7 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
+ 	if (err == 0)
+ 		err |= preserve_vfp_context(&aux->vfp);
+ #endif
+-	__put_user_error(0, &aux->end_magic, err);
++	err |= __put_user(0, &aux->end_magic);
+ 
+ 	return err;
+ }
+@@ -491,7 +499,7 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
+ 	/*
+ 	 * Set uc.uc_flags to a value which sc.trap_no would never have.
+ 	 */
+-	__put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
++	err = __put_user(0x5ac3c35a, &frame->uc.uc_flags);
+ 
+ 	err |= setup_sigframe(frame, regs, set);
+ 	if (err == 0)
+@@ -511,8 +519,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
+ 
+ 	err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+ 
+-	__put_user_error(0, &frame->sig.uc.uc_flags, err);
+-	__put_user_error(NULL, &frame->sig.uc.uc_link, err);
++	err |= __put_user(0, &frame->sig.uc.uc_flags);
++	err |= __put_user(NULL, &frame->sig.uc.uc_link);
+ 
+ 	err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
+ 	err |= setup_sigframe(&frame->sig, regs, set);
+diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
+index f574a5e0d589..3bf82232b1be 100644
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -42,6 +42,7 @@
+ #include <asm/mmu_context.h>
+ #include <asm/pgtable.h>
+ #include <asm/pgalloc.h>
++#include <asm/procinfo.h>
+ #include <asm/processor.h>
+ #include <asm/sections.h>
+ #include <asm/tlbflush.h>
+@@ -102,6 +103,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd)
+ #endif
+ }
+ 
++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
++static int secondary_biglittle_prepare(unsigned int cpu)
++{
++	if (!cpu_vtable[cpu])
++		cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
++
++	return cpu_vtable[cpu] ? 0 : -ENOMEM;
++}
++
++static void secondary_biglittle_init(void)
++{
++	init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
++}
++#else
++static int secondary_biglittle_prepare(unsigned int cpu)
++{
++	return 0;
++}
++
++static void secondary_biglittle_init(void)
++{
++}
++#endif
++
+ int __cpu_up(unsigned int cpu, struct task_struct *idle)
+ {
+ 	int ret;
+@@ -109,6 +134,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
+ 	if (!smp_ops.smp_boot_secondary)
+ 		return -ENOSYS;
+ 
++	ret = secondary_biglittle_prepare(cpu);
++	if (ret)
++		return ret;
++
+ 	/*
+ 	 * We need to tell the secondary core where to find
+ 	 * its stack and the page tables.
+@@ -359,6 +388,8 @@ asmlinkage void secondary_start_kernel(void)
+ 	struct mm_struct *mm = &init_mm;
+ 	unsigned int cpu;
+ 
++	secondary_biglittle_init();
++
+ 	/*
+ 	 * The identity mapping is uncached (strongly ordered), so
+ 	 * switch away from it before attempting any exclusive accesses.
+diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
+index f0dd4b6ebb63..40da0872170f 100644
+--- a/arch/arm/kernel/sys_oabi-compat.c
++++ b/arch/arm/kernel/sys_oabi-compat.c
+@@ -277,6 +277,7 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
+ 				    int maxevents, int timeout)
+ {
+ 	struct epoll_event *kbuf;
++	struct oabi_epoll_event e;
+ 	mm_segment_t fs;
+ 	long ret, err, i;
+ 
+@@ -295,8 +296,11 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
+ 	set_fs(fs);
+ 	err = 0;
+ 	for (i = 0; i < ret; i++) {
+-		__put_user_error(kbuf[i].events, &events->events, err);
+-		__put_user_error(kbuf[i].data,   &events->data,   err);
++		e.events = kbuf[i].events;
++		e.data = kbuf[i].data;
++		err = __copy_to_user(events, &e, sizeof(e));
++		if (err)
++			break;
+ 		events++;
+ 	}
+ 	kfree(kbuf);
+diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
+index a826df3d3814..6709a8d33963 100644
+--- a/arch/arm/lib/copy_from_user.S
++++ b/arch/arm/lib/copy_from_user.S
+@@ -93,11 +93,7 @@ ENTRY(arm_copy_from_user)
+ #ifdef CONFIG_CPU_SPECTRE
+ 	get_thread_info r3
+ 	ldr	r3, [r3, #TI_ADDR_LIMIT]
+-	adds	ip, r1, r2	@ ip=addr+size
+-	sub	r3, r3, #1	@ addr_limit - 1
+-	cmpcc	ip, r3		@ if (addr+size > addr_limit - 1)
+-	movcs	r1, #0		@ addr = NULL
+-	csdb
++	uaccess_mask_range_ptr r1, r2, r3, ip
+ #endif
+ 
+ #include "copy_template.S"
+diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
+index caf5019d8161..970abe521197 100644
+--- a/arch/arm/lib/copy_to_user.S
++++ b/arch/arm/lib/copy_to_user.S
+@@ -94,6 +94,11 @@
+ 
+ ENTRY(__copy_to_user_std)
+ WEAK(arm_copy_to_user)
++#ifdef CONFIG_CPU_SPECTRE
++	get_thread_info r3
++	ldr	r3, [r3, #TI_ADDR_LIMIT]
++	uaccess_mask_range_ptr r0, r2, r3, ip
++#endif
+ 
+ #include "copy_template.S"
+ 
+@@ -108,4 +113,3 @@ ENDPROC(__copy_to_user_std)
+ 	rsb	r0, r0, r2
+ 	copy_abort_end
+ 	.popsection
+-
+diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
+index 9b4ed1728616..73dc7360cbdd 100644
+--- a/arch/arm/lib/uaccess_with_memcpy.c
++++ b/arch/arm/lib/uaccess_with_memcpy.c
+@@ -152,7 +152,8 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
+ 		n = __copy_to_user_std(to, from, n);
+ 		uaccess_restore(ua_flags);
+ 	} else {
+-		n = __copy_to_user_memcpy(to, from, n);
++		n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
++					  from, n);
+ 	}
+ 	return n;
+ }
+diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
+index a109f6482413..0f916c245a2e 100644
+--- a/arch/arm/mach-integrator/impd1.c
++++ b/arch/arm/mach-integrator/impd1.c
+@@ -393,7 +393,11 @@ static int __ref impd1_probe(struct lm_device *dev)
+ 					      sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
+ 					      GFP_KERNEL);
+ 			chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL);
+-			mmciname = kasprintf(GFP_KERNEL, "lm%x:00700", dev->id);
++			mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL,
++						  "lm%x:00700", dev->id);
++			if (!lookup || !chipname || !mmciname)
++				return -ENOMEM;
++
+ 			lookup->dev_id = mmciname;
+ 			/*
+ 			 * Offsets on GPIO block 1:
+diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
+index fc5fb776a710..17558be4bf0a 100644
+--- a/arch/arm/mach-omap2/omap-wakeupgen.c
++++ b/arch/arm/mach-omap2/omap-wakeupgen.c
+@@ -50,6 +50,9 @@
+ #define OMAP4_NR_BANKS		4
+ #define OMAP4_NR_IRQS		128
+ 
++#define SYS_NIRQ1_EXT_SYS_IRQ_1	7
++#define SYS_NIRQ2_EXT_SYS_IRQ_2	119
++
+ static void __iomem *wakeupgen_base;
+ static void __iomem *sar_base;
+ static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
+@@ -153,6 +156,37 @@ static void wakeupgen_unmask(struct irq_data *d)
+ 	irq_chip_unmask_parent(d);
+ }
+ 
++/*
++ * The sys_nirq pins bypass peripheral modules and are wired directly
++ * to MPUSS wakeupgen. They get automatically inverted for GIC.
++ */
++static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type)
++{
++	bool inverted = false;
++
++	switch (type) {
++	case IRQ_TYPE_LEVEL_LOW:
++		type &= ~IRQ_TYPE_LEVEL_MASK;
++		type |= IRQ_TYPE_LEVEL_HIGH;
++		inverted = true;
++		break;
++	case IRQ_TYPE_EDGE_FALLING:
++		type &= ~IRQ_TYPE_EDGE_BOTH;
++		type |= IRQ_TYPE_EDGE_RISING;
++		inverted = true;
++		break;
++	default:
++		break;
++	}
++
++	if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 &&
++	    d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2)
++		pr_warn("wakeupgen: irq%li polarity inverted in dts\n",
++			d->hwirq);
++
++	return irq_chip_set_type_parent(d, type);
++}
++
+ #ifdef CONFIG_HOTPLUG_CPU
+ static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
+ 
+@@ -446,7 +480,7 @@ static struct irq_chip wakeupgen_chip = {
+ 	.irq_mask		= wakeupgen_mask,
+ 	.irq_unmask		= wakeupgen_unmask,
+ 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
+-	.irq_set_type		= irq_chip_set_type_parent,
++	.irq_set_type		= wakeupgen_irq_set_type,
+ 	.flags			= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
+ #ifdef CONFIG_SMP
+ 	.irq_set_affinity	= irq_chip_set_affinity_parent,
+diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
+index 81d0efb055c6..5461d589a1e2 100644
+--- a/arch/arm/mm/proc-macros.S
++++ b/arch/arm/mm/proc-macros.S
+@@ -274,6 +274,13 @@
+ 	.endm
+ 
+ .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
++/*
++ * If we are building for big.Little with branch predictor hardening,
++ * we need the processor function tables to remain available after boot.
++ */
++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
++	.section ".rodata"
++#endif
+ 	.type	\name\()_processor_functions, #object
+ 	.align 2
+ ENTRY(\name\()_processor_functions)
+@@ -309,6 +316,9 @@ ENTRY(\name\()_processor_functions)
+ 	.endif
+ 
+ 	.size	\name\()_processor_functions, . - \name\()_processor_functions
++#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
++	.previous
++#endif
+ .endm
+ 
+ .macro define_cache_functions name:req
+diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
+index 5544b82a2e7a..9a07916af8dd 100644
+--- a/arch/arm/mm/proc-v7-bugs.c
++++ b/arch/arm/mm/proc-v7-bugs.c
+@@ -52,8 +52,6 @@ static void cpu_v7_spectre_init(void)
+ 	case ARM_CPU_PART_CORTEX_A17:
+ 	case ARM_CPU_PART_CORTEX_A73:
+ 	case ARM_CPU_PART_CORTEX_A75:
+-		if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
+-			goto bl_error;
+ 		per_cpu(harden_branch_predictor_fn, cpu) =
+ 			harden_branch_predictor_bpiall;
+ 		spectre_v2_method = "BPIALL";
+@@ -61,8 +59,6 @@ static void cpu_v7_spectre_init(void)
+ 
+ 	case ARM_CPU_PART_CORTEX_A15:
+ 	case ARM_CPU_PART_BRAHMA_B15:
+-		if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
+-			goto bl_error;
+ 		per_cpu(harden_branch_predictor_fn, cpu) =
+ 			harden_branch_predictor_iciallu;
+ 		spectre_v2_method = "ICIALLU";
+@@ -88,11 +84,9 @@ static void cpu_v7_spectre_init(void)
+ 					  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+ 			if ((int)res.a0 != 0)
+ 				break;
+-			if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
+-				goto bl_error;
+ 			per_cpu(harden_branch_predictor_fn, cpu) =
+ 				call_hvc_arch_workaround_1;
+-			processor.switch_mm = cpu_v7_hvc_switch_mm;
++			cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
+ 			spectre_v2_method = "hypervisor";
+ 			break;
+ 
+@@ -101,11 +95,9 @@ static void cpu_v7_spectre_init(void)
+ 					  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+ 			if ((int)res.a0 != 0)
+ 				break;
+-			if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
+-				goto bl_error;
+ 			per_cpu(harden_branch_predictor_fn, cpu) =
+ 				call_smc_arch_workaround_1;
+-			processor.switch_mm = cpu_v7_smc_switch_mm;
++			cpu_do_switch_mm = cpu_v7_smc_switch_mm;
+ 			spectre_v2_method = "firmware";
+ 			break;
+ 
+@@ -119,11 +111,6 @@ static void cpu_v7_spectre_init(void)
+ 	if (spectre_v2_method)
+ 		pr_info("CPU%u: Spectre v2: using %s workaround\n",
+ 			smp_processor_id(), spectre_v2_method);
+-	return;
+-
+-bl_error:
+-	pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
+-		cpu);
+ }
+ #else
+ static void cpu_v7_spectre_init(void)
+diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
+index dc7e6b50ef67..66c5e693428a 100644
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -553,12 +553,11 @@ void vfp_flush_hwstate(struct thread_info *thread)
+  * Save the current VFP state into the provided structures and prepare
+  * for entry into a new function (signal handler).
+  */
+-int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
+-				    struct user_vfp_exc __user *ufp_exc)
++int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
++				    struct user_vfp_exc *ufp_exc)
+ {
+ 	struct thread_info *thread = current_thread_info();
+ 	struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
+-	int err = 0;
+ 
+ 	/* Ensure that the saved hwstate is up-to-date. */
+ 	vfp_sync_hwstate(thread);
+@@ -567,22 +566,19 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
+ 	 * Copy the floating point registers. There can be unused
+ 	 * registers see asm/hwcap.h for details.
+ 	 */
+-	err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
+-			      sizeof(hwstate->fpregs));
++	memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs));
++
+ 	/*
+ 	 * Copy the status and control register.
+ 	 */
+-	__put_user_error(hwstate->fpscr, &ufp->fpscr, err);
++	ufp->fpscr = hwstate->fpscr;
+ 
+ 	/*
+ 	 * Copy the exception registers.
+ 	 */
+-	__put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
+-	__put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
+-	__put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
+-
+-	if (err)
+-		return -EFAULT;
++	ufp_exc->fpexc = hwstate->fpexc;
++	ufp_exc->fpinst = hwstate->fpinst;
++	ufp_exc->fpinst2 = hwstate->fpinst2;
+ 
+ 	/* Ensure that VFP is disabled. */
+ 	vfp_flush_hwstate(thread);
+diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h
+index 2fa2942be221..470755cb7558 100644
+--- a/arch/riscv/include/asm/pgtable-bits.h
++++ b/arch/riscv/include/asm/pgtable-bits.h
+@@ -35,6 +35,12 @@
+ #define _PAGE_SPECIAL   _PAGE_SOFT
+ #define _PAGE_TABLE     _PAGE_PRESENT
+ 
++/*
++ * _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to
++ * distinguish them from swapped out pages
++ */
++#define _PAGE_PROT_NONE _PAGE_READ
++
+ #define _PAGE_PFN_SHIFT 10
+ 
+ /* Set of bits to preserve across pte_modify() */
+diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
+index 16301966d65b..a8179a8c1491 100644
+--- a/arch/riscv/include/asm/pgtable.h
++++ b/arch/riscv/include/asm/pgtable.h
+@@ -44,7 +44,7 @@
+ /* Page protection bits */
+ #define _PAGE_BASE	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
+ 
+-#define PAGE_NONE		__pgprot(0)
++#define PAGE_NONE		__pgprot(_PAGE_PROT_NONE)
+ #define PAGE_READ		__pgprot(_PAGE_BASE | _PAGE_READ)
+ #define PAGE_WRITE		__pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
+ #define PAGE_EXEC		__pgprot(_PAGE_BASE | _PAGE_EXEC)
+@@ -98,7 +98,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+ 
+ static inline int pmd_present(pmd_t pmd)
+ {
+-	return (pmd_val(pmd) & _PAGE_PRESENT);
++	return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
+ }
+ 
+ static inline int pmd_none(pmd_t pmd)
+@@ -178,7 +178,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr)
+ 
+ static inline int pte_present(pte_t pte)
+ {
+-	return (pte_val(pte) & _PAGE_PRESENT);
++	return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
+ }
+ 
+ static inline int pte_none(pte_t pte)
+@@ -380,7 +380,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
+  *
+  * Format of swap PTE:
+  *	bit            0:	_PAGE_PRESENT (zero)
+- *	bit            1:	reserved for future use (zero)
++ *	bit            1:	_PAGE_PROT_NONE (zero)
+  *	bits      2 to 6:	swap type
+  *	bits 7 to XLEN-1:	swap offset
+  */
+diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
+index 9f82a7e34c64..9db7d0076375 100644
+--- a/arch/riscv/kernel/ptrace.c
++++ b/arch/riscv/kernel/ptrace.c
+@@ -120,6 +120,6 @@ void do_syscall_trace_exit(struct pt_regs *regs)
+ 
+ #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
+ 	if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+-		trace_sys_exit(regs, regs->regs[0]);
++		trace_sys_exit(regs, regs_return_value(regs));
+ #endif
+ }
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index c8d08da5b308..c04a8813cff9 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -2253,6 +2253,19 @@ void perf_check_microcode(void)
+ 		x86_pmu.check_microcode();
+ }
+ 
++static int x86_pmu_check_period(struct perf_event *event, u64 value)
++{
++	if (x86_pmu.check_period && x86_pmu.check_period(event, value))
++		return -EINVAL;
++
++	if (value && x86_pmu.limit_period) {
++		if (x86_pmu.limit_period(event, value) > value)
++			return -EINVAL;
++	}
++
++	return 0;
++}
++
+ static struct pmu pmu = {
+ 	.pmu_enable		= x86_pmu_enable,
+ 	.pmu_disable		= x86_pmu_disable,
+@@ -2277,6 +2290,7 @@ static struct pmu pmu = {
+ 	.event_idx		= x86_pmu_event_idx,
+ 	.sched_task		= x86_pmu_sched_task,
+ 	.task_ctx_size          = sizeof(struct x86_perf_task_context),
++	.check_period		= x86_pmu_check_period,
+ };
+ 
+ void arch_perf_update_userpage(struct perf_event *event,
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index d0b186264941..fbd7551a8d44 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -3465,6 +3465,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx,
+ 	intel_pmu_lbr_sched_task(ctx, sched_in);
+ }
+ 
++static int intel_pmu_check_period(struct perf_event *event, u64 value)
++{
++	return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
++}
++
+ PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
+ 
+ PMU_FORMAT_ATTR(ldlat, "config1:0-15");
+@@ -3545,6 +3550,8 @@ static __initconst const struct x86_pmu core_pmu = {
+ 	.cpu_starting		= intel_pmu_cpu_starting,
+ 	.cpu_dying		= intel_pmu_cpu_dying,
+ 	.cpu_dead		= intel_pmu_cpu_dead,
++
++	.check_period		= intel_pmu_check_period,
+ };
+ 
+ static struct attribute *intel_pmu_attrs[];
+@@ -3589,6 +3596,8 @@ static __initconst const struct x86_pmu intel_pmu = {
+ 
+ 	.guest_get_msrs		= intel_guest_get_msrs,
+ 	.sched_task		= intel_pmu_sched_task,
++
++	.check_period		= intel_pmu_check_period,
+ };
+ 
+ static __init void intel_clovertown_quirk(void)
+diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
+index c5ad9cc61f4b..0ee3a441ad79 100644
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -644,6 +644,11 @@ struct x86_pmu {
+ 	 * Intel host/guest support (KVM)
+ 	 */
+ 	struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
++
++	/*
++	 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
++	 */
++	int (*check_period) (struct perf_event *event, u64 period);
+ };
+ 
+ struct x86_perf_task_context {
+@@ -855,7 +860,7 @@ static inline int amd_pmu_init(void)
+ 
+ #ifdef CONFIG_CPU_SUP_INTEL
+ 
+-static inline bool intel_pmu_has_bts(struct perf_event *event)
++static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
+ {
+ 	struct hw_perf_event *hwc = &event->hw;
+ 	unsigned int hw_event, bts_event;
+@@ -866,7 +871,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event)
+ 	hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
+ 	bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
+ 
+-	return hw_event == bts_event && hwc->sample_period == 1;
++	return hw_event == bts_event && period == 1;
++}
++
++static inline bool intel_pmu_has_bts(struct perf_event *event)
++{
++	struct hw_perf_event *hwc = &event->hw;
++
++	return intel_pmu_has_bts_period(event, hwc->sample_period);
+ }
+ 
+ int intel_pmu_save_and_restart(struct perf_event *event);
+diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
+index 8e02b30cf08e..3ebd77770f98 100644
+--- a/arch/x86/ia32/ia32_aout.c
++++ b/arch/x86/ia32/ia32_aout.c
+@@ -51,7 +51,7 @@ static unsigned long get_dr(int n)
+ /*
+  * fill in the user structure for a core dump..
+  */
+-static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
++static void fill_dump(struct pt_regs *regs, struct user32 *dump)
+ {
+ 	u32 fs, gs;
+ 	memset(dump, 0, sizeof(*dump));
+@@ -157,10 +157,12 @@ static int aout_core_dump(struct coredump_params *cprm)
+ 	fs = get_fs();
+ 	set_fs(KERNEL_DS);
+ 	has_dumped = 1;
++
++	fill_dump(cprm->regs, &dump);
++
+ 	strncpy(dump.u_comm, current->comm, sizeof(current->comm));
+ 	dump.u_ar0 = offsetof(struct user32, regs);
+ 	dump.signal = cprm->siginfo->si_signo;
+-	dump_thread32(cprm->regs, &dump);
+ 
+ 	/*
+ 	 * If the size of the dump file exceeds the rlimit, then see
+diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
+index e652a7cc6186..3f697a9e3f59 100644
+--- a/arch/x86/include/asm/uv/bios.h
++++ b/arch/x86/include/asm/uv/bios.h
+@@ -48,7 +48,8 @@ enum {
+ 	BIOS_STATUS_SUCCESS		=  0,
+ 	BIOS_STATUS_UNIMPLEMENTED	= -ENOSYS,
+ 	BIOS_STATUS_EINVAL		= -EINVAL,
+-	BIOS_STATUS_UNAVAIL		= -EBUSY
++	BIOS_STATUS_UNAVAIL		= -EBUSY,
++	BIOS_STATUS_ABORT		= -EINTR,
+ };
+ 
+ /* Address map parameters */
+@@ -167,4 +168,9 @@ extern long system_serial_number;
+ 
+ extern struct kobject *sgi_uv_kobj;	/* /sys/firmware/sgi_uv */
+ 
++/*
++ * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details
++ */
++extern struct semaphore __efi_uv_runtime_lock;
++
+ #endif /* _ASM_X86_UV_BIOS_H */
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 02ac8fa0cd6d..ee8f8d70b98a 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -6256,6 +6256,9 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ 	int asid, ret;
+ 
+ 	ret = -EBUSY;
++	if (unlikely(sev->active))
++		return ret;
++
+ 	asid = sev_asid_new();
+ 	if (asid < 0)
+ 		return ret;
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 0b2e13dd517b..f6da5c37d2e8 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2757,7 +2757,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
+ 	if (!entry_only)
+ 		j = find_msr(&m->host, msr);
+ 
+-	if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
++	if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) ||
++		(j < 0 &&  m->host.nr == NR_AUTOLOAD_MSRS)) {
+ 		printk_once(KERN_WARNING "Not enough msr switch entries. "
+ 				"Can't add msr %x\n", msr);
+ 		return;
+@@ -3601,9 +3602,11 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
+ 	 * secondary cpu-based controls.  Do not include those that
+ 	 * depend on CPUID bits, they are added later by vmx_cpuid_update.
+ 	 */
+-	rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
+-		msrs->secondary_ctls_low,
+-		msrs->secondary_ctls_high);
++	if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
++		rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
++		      msrs->secondary_ctls_low,
++		      msrs->secondary_ctls_high);
++
+ 	msrs->secondary_ctls_low = 0;
+ 	msrs->secondary_ctls_high &=
+ 		SECONDARY_EXEC_DESC |
+diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
+index 4a6a5a26c582..eb33432f2f24 100644
+--- a/arch/x86/platform/uv/bios_uv.c
++++ b/arch/x86/platform/uv/bios_uv.c
+@@ -29,7 +29,8 @@
+ 
+ struct uv_systab *uv_systab;
+ 
+-s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
++static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
++			u64 a4, u64 a5)
+ {
+ 	struct uv_systab *tab = uv_systab;
+ 	s64 ret;
+@@ -51,6 +52,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
+ 
+ 	return ret;
+ }
++
++s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
++{
++	s64 ret;
++
++	if (down_interruptible(&__efi_uv_runtime_lock))
++		return BIOS_STATUS_ABORT;
++
++	ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
++	up(&__efi_uv_runtime_lock);
++
++	return ret;
++}
+ EXPORT_SYMBOL_GPL(uv_bios_call);
+ 
+ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
+@@ -59,10 +73,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
+ 	unsigned long bios_flags;
+ 	s64 ret;
+ 
++	if (down_interruptible(&__efi_uv_runtime_lock))
++		return BIOS_STATUS_ABORT;
++
+ 	local_irq_save(bios_flags);
+-	ret = uv_bios_call(which, a1, a2, a3, a4, a5);
++	ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
+ 	local_irq_restore(bios_flags);
+ 
++	up(&__efi_uv_runtime_lock);
++
+ 	return ret;
+ }
+ 
+diff --git a/block/blk-flush.c b/block/blk-flush.c
+index ce41f666de3e..76487948a27f 100644
+--- a/block/blk-flush.c
++++ b/block/blk-flush.c
+@@ -424,7 +424,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
+ 	blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
+ 	spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
+ 
+-	blk_mq_run_hw_queue(hctx, true);
++	blk_mq_sched_restart(hctx);
+ }
+ 
+ /**
+diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
+index 85167603b9c9..0da58f0bf7e5 100644
+--- a/drivers/acpi/numa.c
++++ b/drivers/acpi/numa.c
+@@ -147,9 +147,9 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
+ 		{
+ 			struct acpi_srat_mem_affinity *p =
+ 			    (struct acpi_srat_mem_affinity *)header;
+-			pr_debug("SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n",
+-				 (unsigned long)p->base_address,
+-				 (unsigned long)p->length,
++			pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n",
++				 (unsigned long long)p->base_address,
++				 (unsigned long long)p->length,
+ 				 p->proximity_domain,
+ 				 (p->flags & ACPI_SRAT_MEM_ENABLED) ?
+ 				 "enabled" : "disabled",
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index f53fb41efb7b..b100260b6ed2 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1530,17 +1530,16 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
+ {
+ 	unsigned int ret_freq = 0;
+ 
+-	if (!cpufreq_driver->get)
++	if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
+ 		return ret_freq;
+ 
+ 	ret_freq = cpufreq_driver->get(policy->cpu);
+ 
+ 	/*
+-	 * Updating inactive policies is invalid, so avoid doing that.  Also
+-	 * if fast frequency switching is used with the given policy, the check
++	 * If fast frequency switching is used with the given policy, the check
+ 	 * against policy->cur is pointless, so skip it in that case too.
+ 	 */
+-	if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
++	if (policy->fast_switch_enabled)
+ 		return ret_freq;
+ 
+ 	if (ret_freq && policy->cur &&
+@@ -1569,10 +1568,7 @@ unsigned int cpufreq_get(unsigned int cpu)
+ 
+ 	if (policy) {
+ 		down_read(&policy->rwsem);
+-
+-		if (!policy_is_inactive(policy))
+-			ret_freq = __cpufreq_get(policy);
+-
++		ret_freq = __cpufreq_get(policy);
+ 		up_read(&policy->rwsem);
+ 
+ 		cpufreq_cpu_put(policy);
+diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
+index aa66cbf23512..b0aeffd4e269 100644
+--- a/drivers/firmware/efi/runtime-wrappers.c
++++ b/drivers/firmware/efi/runtime-wrappers.c
+@@ -172,6 +172,13 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
+  */
+ static DEFINE_SEMAPHORE(efi_runtime_lock);
+ 
++/*
++ * Expose the EFI runtime lock to the UV platform
++ */
++#ifdef CONFIG_X86_UV
++extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
++#endif
++
+ /*
+  * Calls the appropriate efi_runtime_service() with the appropriate
+  * arguments.
+diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
+index 995cf0b9e0b1..2d1dfa1e0745 100644
+--- a/drivers/gpio/gpio-mxc.c
++++ b/drivers/gpio/gpio-mxc.c
+@@ -17,6 +17,7 @@
+ #include <linux/irqchip/chained_irq.h>
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
++#include <linux/syscore_ops.h>
+ #include <linux/gpio/driver.h>
+ #include <linux/of.h>
+ #include <linux/of_device.h>
+@@ -550,33 +551,38 @@ static void mxc_gpio_restore_regs(struct mxc_gpio_port *port)
+ 	writel(port->gpio_saved_reg.dr, port->base + GPIO_DR);
+ }
+ 
+-static int __maybe_unused mxc_gpio_noirq_suspend(struct device *dev)
++static int mxc_gpio_syscore_suspend(void)
+ {
+-	struct platform_device *pdev = to_platform_device(dev);
+-	struct mxc_gpio_port *port = platform_get_drvdata(pdev);
++	struct mxc_gpio_port *port;
+ 
+-	mxc_gpio_save_regs(port);
+-	clk_disable_unprepare(port->clk);
++	/* walk through all ports */
++	list_for_each_entry(port, &mxc_gpio_ports, node) {
++		mxc_gpio_save_regs(port);
++		clk_disable_unprepare(port->clk);
++	}
+ 
+ 	return 0;
+ }
+ 
+-static int __maybe_unused mxc_gpio_noirq_resume(struct device *dev)
++static void mxc_gpio_syscore_resume(void)
+ {
+-	struct platform_device *pdev = to_platform_device(dev);
+-	struct mxc_gpio_port *port = platform_get_drvdata(pdev);
++	struct mxc_gpio_port *port;
+ 	int ret;
+ 
+-	ret = clk_prepare_enable(port->clk);
+-	if (ret)
+-		return ret;
+-	mxc_gpio_restore_regs(port);
+-
+-	return 0;
++	/* walk through all ports */
++	list_for_each_entry(port, &mxc_gpio_ports, node) {
++		ret = clk_prepare_enable(port->clk);
++		if (ret) {
++			pr_err("mxc: failed to enable gpio clock %d\n", ret);
++			return;
++		}
++		mxc_gpio_restore_regs(port);
++	}
+ }
+ 
+-static const struct dev_pm_ops mxc_gpio_dev_pm_ops = {
+-	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mxc_gpio_noirq_suspend, mxc_gpio_noirq_resume)
++static struct syscore_ops mxc_gpio_syscore_ops = {
++	.suspend = mxc_gpio_syscore_suspend,
++	.resume = mxc_gpio_syscore_resume,
+ };
+ 
+ static struct platform_driver mxc_gpio_driver = {
+@@ -584,7 +590,6 @@ static struct platform_driver mxc_gpio_driver = {
+ 		.name	= "gpio-mxc",
+ 		.of_match_table = mxc_gpio_dt_ids,
+ 		.suppress_bind_attrs = true,
+-		.pm = &mxc_gpio_dev_pm_ops,
+ 	},
+ 	.probe		= mxc_gpio_probe,
+ 	.id_table	= mxc_gpio_devtype,
+@@ -592,6 +597,8 @@ static struct platform_driver mxc_gpio_driver = {
+ 
+ static int __init gpio_mxc_init(void)
+ {
++	register_syscore_ops(&mxc_gpio_syscore_ops);
++
+ 	return platform_driver_register(&mxc_gpio_driver);
+ }
+ subsys_initcall(gpio_mxc_init);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 39bf2ce548c6..7f6af421d3e9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1653,8 +1653,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
+ 
+ 	amdgpu_amdkfd_device_init(adev);
+ 
+-	if (amdgpu_sriov_vf(adev))
++	if (amdgpu_sriov_vf(adev)) {
++		amdgpu_virt_init_data_exchange(adev);
+ 		amdgpu_virt_release_full_gpu(adev, true);
++	}
+ 
+ 	return 0;
+ }
+@@ -2555,9 +2557,6 @@ fence_driver_init:
+ 		goto failed;
+ 	}
+ 
+-	if (amdgpu_sriov_vf(adev))
+-		amdgpu_virt_init_data_exchange(adev);
+-
+ 	amdgpu_fbdev_init(adev);
+ 
+ 	r = amdgpu_pm_sysfs_init(adev);
+@@ -3269,6 +3268,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
+ 	r = amdgpu_ib_ring_tests(adev);
+ 
+ error:
++	amdgpu_virt_init_data_exchange(adev);
+ 	amdgpu_virt_release_full_gpu(adev, true);
+ 	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
+ 		atomic_inc(&adev->vram_lost_counter);
+diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+index 078f70faedcb..d06332be59d3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
++++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+@@ -174,7 +174,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
+ 			return r;
+ 		}
+ 		/* Retrieve checksum from mailbox2 */
+-		if (req == IDH_REQ_GPU_INIT_ACCESS) {
++		if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
+ 			adev->virt.fw_reserve.checksum_key =
+ 				RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ 					mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index 7c3b634d8d5f..de5a689e1925 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -71,7 +71,6 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
+ 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
+-	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
+ 	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
+ 	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
+ 	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+@@ -89,6 +88,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
+ static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
+ 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
+ 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
++	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+ 	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
+ 	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
+ };
+@@ -96,6 +96,7 @@ static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
+ static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
+ 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
+ 	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
++	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+ 	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
+ 	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
+ };
+diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
+index 8e28e738cb52..391547358756 100644
+--- a/drivers/gpu/drm/bridge/tc358767.c
++++ b/drivers/gpu/drm/bridge/tc358767.c
+@@ -98,6 +98,8 @@
+ #define DP0_STARTVAL		0x064c
+ #define DP0_ACTIVEVAL		0x0650
+ #define DP0_SYNCVAL		0x0654
++#define SYNCVAL_HS_POL_ACTIVE_LOW	(1 << 15)
++#define SYNCVAL_VS_POL_ACTIVE_LOW	(1 << 31)
+ #define DP0_MISC		0x0658
+ #define TU_SIZE_RECOMMENDED		(63) /* LSCLK cycles per TU */
+ #define BPC_6				(0 << 5)
+@@ -142,6 +144,8 @@
+ #define DP0_LTLOOPCTRL		0x06d8
+ #define DP0_SNKLTCTRL		0x06e4
+ 
++#define DP1_SRCCTRL		0x07a0
++
+ /* PHY */
+ #define DP_PHY_CTRL		0x0800
+ #define DP_PHY_RST			BIT(28)  /* DP PHY Global Soft Reset */
+@@ -150,6 +154,7 @@
+ #define PHY_M1_RST			BIT(12)  /* Reset PHY1 Main Channel */
+ #define PHY_RDY				BIT(16)  /* PHY Main Channels Ready */
+ #define PHY_M0_RST			BIT(8)   /* Reset PHY0 Main Channel */
++#define PHY_2LANE			BIT(2)   /* PHY Enable 2 lanes */
+ #define PHY_A0_EN			BIT(1)   /* PHY Aux Channel0 Enable */
+ #define PHY_M0_EN			BIT(0)   /* PHY Main Channel0 Enable */
+ 
+@@ -540,6 +545,7 @@ static int tc_aux_link_setup(struct tc_data *tc)
+ 	unsigned long rate;
+ 	u32 value;
+ 	int ret;
++	u32 dp_phy_ctrl;
+ 
+ 	rate = clk_get_rate(tc->refclk);
+ 	switch (rate) {
+@@ -564,7 +570,10 @@ static int tc_aux_link_setup(struct tc_data *tc)
+ 	value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
+ 	tc_write(SYS_PLLPARAM, value);
+ 
+-	tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN);
++	dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN;
++	if (tc->link.base.num_lanes == 2)
++		dp_phy_ctrl |= PHY_2LANE;
++	tc_write(DP_PHY_CTRL, dp_phy_ctrl);
+ 
+ 	/*
+ 	 * Initially PLLs are in bypass. Force PLL parameter update,
+@@ -719,7 +728,9 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
+ 
+ 	tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay));
+ 
+-	tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0));
++	tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) |
++		 ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) |
++		 ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0));
+ 
+ 	tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
+ 		 DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
+@@ -829,12 +840,11 @@ static int tc_main_link_setup(struct tc_data *tc)
+ 	if (!tc->mode)
+ 		return -EINVAL;
+ 
+-	/* from excel file - DP0_SrcCtrl */
+-	tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B |
+-		 DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 |
+-		 DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT);
+-	/* from excel file - DP1_SrcCtrl */
+-	tc_write(0x07a0, 0x00003083);
++	tc_write(DP0_SRCCTRL, tc_srcctrl(tc));
++	/* SSCG and BW27 on DP1 must be set to the same as on DP0 */
++	tc_write(DP1_SRCCTRL,
++		 (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
++		 ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
+ 
+ 	rate = clk_get_rate(tc->refclk);
+ 	switch (rate) {
+@@ -855,8 +865,11 @@ static int tc_main_link_setup(struct tc_data *tc)
+ 	}
+ 	value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
+ 	tc_write(SYS_PLLPARAM, value);
++
+ 	/* Setup Main Link */
+-	dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN |  PHY_M0_EN;
++	dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN;
++	if (tc->link.base.num_lanes == 2)
++		dp_phy_ctrl |= PHY_2LANE;
+ 	tc_write(DP_PHY_CTRL, dp_phy_ctrl);
+ 	msleep(100);
+ 
+@@ -1105,10 +1118,20 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
+ static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector,
+ 				   struct drm_display_mode *mode)
+ {
++	struct tc_data *tc = connector_to_tc(connector);
++	u32 req, avail;
++	u32 bits_per_pixel = 24;
++
+ 	/* DPI interface clock limitation: upto 154 MHz */
+ 	if (mode->clock > 154000)
+ 		return MODE_CLOCK_HIGH;
+ 
++	req = mode->clock * bits_per_pixel / 8;
++	avail = tc->link.base.num_lanes * tc->link.base.rate;
++
++	if (req > avail)
++		return MODE_BAD;
++
+ 	return MODE_OK;
+ }
+ 
+@@ -1195,6 +1218,10 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
+ 
+ 	drm_display_info_set_bus_formats(&tc->connector.display_info,
+ 					 &bus_format, 1);
++	tc->connector.display_info.bus_flags =
++		DRM_BUS_FLAG_DE_HIGH |
++		DRM_BUS_FLAG_PIXDATA_NEGEDGE |
++		DRM_BUS_FLAG_SYNC_NEGEDGE;
+ 	drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
+index fe6bfaf8b53f..086f2adc541b 100644
+--- a/drivers/gpu/drm/drm_lease.c
++++ b/drivers/gpu/drm/drm_lease.c
+@@ -521,7 +521,8 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
+ 
+ 	object_count = cl->object_count;
+ 
+-	object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), object_count * sizeof(__u32));
++	object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
++			array_size(object_count, sizeof(__u32)));
+ 	if (IS_ERR(object_ids))
+ 		return PTR_ERR(object_ids);
+ 
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 47cc932e23a7..280c851714e6 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1821,6 +1821,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
+ 	return 0;
+ }
+ 
++static inline bool
++__vma_matches(struct vm_area_struct *vma, struct file *filp,
++	      unsigned long addr, unsigned long size)
++{
++	if (vma->vm_file != filp)
++		return false;
++
++	return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
++}
++
+ /**
+  * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
+  *			 it is mapped to.
+@@ -1879,7 +1889,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ 			return -EINTR;
+ 		}
+ 		vma = find_vma(mm, addr);
+-		if (vma)
++		if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
+ 			vma->vm_page_prot =
+ 				pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ 		else
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index 8fc61e96754f..50d56498de77 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -209,6 +209,16 @@ struct intel_fbdev {
+ 	unsigned long vma_flags;
+ 	async_cookie_t cookie;
+ 	int preferred_bpp;
++
++	/* Whether or not fbdev hpd processing is temporarily suspended */
++	bool hpd_suspended : 1;
++	/* Set when a hotplug was received while HPD processing was
++	 * suspended
++	 */
++	bool hpd_waiting : 1;
++
++	/* Protects hpd_suspended */
++	struct mutex hpd_lock;
+ };
+ 
+ struct intel_encoder {
+diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
+index fb2f9fce34cd..2d6506c08bf7 100644
+--- a/drivers/gpu/drm/i915/intel_fbdev.c
++++ b/drivers/gpu/drm/i915/intel_fbdev.c
+@@ -677,6 +677,7 @@ int intel_fbdev_init(struct drm_device *dev)
+ 	if (ifbdev == NULL)
+ 		return -ENOMEM;
+ 
++	mutex_init(&ifbdev->hpd_lock);
+ 	drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
+ 
+ 	if (!intel_fbdev_init_bios(dev, ifbdev))
+@@ -750,6 +751,26 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv)
+ 	intel_fbdev_destroy(ifbdev);
+ }
+ 
++/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
++ * processing, fbdev will perform a full connector reprobe if a hotplug event
++ * was received while HPD was suspended.
++ */
++static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
++{
++	bool send_hpd = false;
++
++	mutex_lock(&ifbdev->hpd_lock);
++	ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED;
++	send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting;
++	ifbdev->hpd_waiting = false;
++	mutex_unlock(&ifbdev->hpd_lock);
++
++	if (send_hpd) {
++		DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n");
++		drm_fb_helper_hotplug_event(&ifbdev->helper);
++	}
++}
++
+ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
+ {
+ 	struct drm_i915_private *dev_priv = to_i915(dev);
+@@ -771,6 +792,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
+ 		 */
+ 		if (state != FBINFO_STATE_RUNNING)
+ 			flush_work(&dev_priv->fbdev_suspend_work);
++
+ 		console_lock();
+ 	} else {
+ 		/*
+@@ -798,17 +820,26 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
+ 
+ 	drm_fb_helper_set_suspend(&ifbdev->helper, state);
+ 	console_unlock();
++
++	intel_fbdev_hpd_set_suspend(ifbdev, state);
+ }
+ 
+ void intel_fbdev_output_poll_changed(struct drm_device *dev)
+ {
+ 	struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
++	bool send_hpd;
+ 
+ 	if (!ifbdev)
+ 		return;
+ 
+ 	intel_fbdev_sync(ifbdev);
+-	if (ifbdev->vma || ifbdev->helper.deferred_setup)
++
++	mutex_lock(&ifbdev->hpd_lock);
++	send_hpd = !ifbdev->hpd_suspended;
++	ifbdev->hpd_waiting = true;
++	mutex_unlock(&ifbdev->hpd_lock);
++
++	if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
+ 		drm_fb_helper_hotplug_event(&ifbdev->helper);
+ }
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
+index 816ccaedfc73..8675613e142b 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
+@@ -22,6 +22,7 @@
+ #include <engine/falcon.h>
+ 
+ #include <core/gpuobj.h>
++#include <subdev/mc.h>
+ #include <subdev/timer.h>
+ #include <engine/fifo.h>
+ 
+@@ -107,8 +108,10 @@ nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
+ 		}
+ 	}
+ 
+-	nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
+-	nvkm_wr32(device, base + 0x014, 0xffffffff);
++	if (nvkm_mc_enabled(device, engine->subdev.index)) {
++		nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
++		nvkm_wr32(device, base + 0x014, 0xffffffff);
++	}
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+index 3695cde669f8..07914e36939e 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+@@ -132,11 +132,12 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
+ 			duty = nvkm_therm_update_linear(therm);
+ 			break;
+ 		case NVBIOS_THERM_FAN_OTHER:
+-			if (therm->cstate)
++			if (therm->cstate) {
+ 				duty = therm->cstate;
+-			else
++				poll = false;
++			} else {
+ 				duty = nvkm_therm_update_linear_fallback(therm);
+-			poll = false;
++			}
+ 			break;
+ 		}
+ 		immd = false;
+diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
+index 875fca662ac0..1ea2dd35bca9 100644
+--- a/drivers/gpu/drm/vkms/vkms_crtc.c
++++ b/drivers/gpu/drm/vkms/vkms_crtc.c
+@@ -1,10 +1,4 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- */
++// SPDX-License-Identifier: GPL-2.0+
+ 
+ #include "vkms_drv.h"
+ #include <drm/drm_atomic_helper.h>
+diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
+index 6e728b825259..b1201c18d3eb 100644
+--- a/drivers/gpu/drm/vkms/vkms_drv.c
++++ b/drivers/gpu/drm/vkms/vkms_drv.c
+@@ -1,9 +1,4 @@
+-/*
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- */
++// SPDX-License-Identifier: GPL-2.0+
+ 
+ #include <linux/module.h>
+ #include <drm/drm_gem.h>
+diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
+index 07be29f2dc44..e018752d57bb 100644
+--- a/drivers/gpu/drm/vkms/vkms_drv.h
++++ b/drivers/gpu/drm/vkms/vkms_drv.h
+@@ -1,3 +1,5 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++
+ #ifndef _VKMS_DRV_H_
+ #define _VKMS_DRV_H_
+ 
+diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
+index c7e38368602b..ca4a74e04977 100644
+--- a/drivers/gpu/drm/vkms/vkms_gem.c
++++ b/drivers/gpu/drm/vkms/vkms_gem.c
+@@ -1,10 +1,4 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- */
++// SPDX-License-Identifier: GPL-2.0+
+ 
+ #include <linux/shmem_fs.h>
+ 
+diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
+index 901012cb1af1..5697148e0b73 100644
+--- a/drivers/gpu/drm/vkms/vkms_output.c
++++ b/drivers/gpu/drm/vkms/vkms_output.c
+@@ -1,10 +1,4 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- */
++// SPDX-License-Identifier: GPL-2.0+
+ 
+ #include "vkms_drv.h"
+ #include <drm/drm_crtc_helper.h>
+diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
+index 9f75b1e2c1c4..ce043b721e0c 100644
+--- a/drivers/gpu/drm/vkms/vkms_plane.c
++++ b/drivers/gpu/drm/vkms/vkms_plane.c
+@@ -1,10 +1,4 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- */
++// SPDX-License-Identifier: GPL-2.0+
+ 
+ #include "vkms_drv.h"
+ #include <drm/drm_plane_helper.h>
+diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
+index 1efcfdf9f8a8..dd9dd4e40827 100644
+--- a/drivers/input/misc/bma150.c
++++ b/drivers/input/misc/bma150.c
+@@ -481,13 +481,14 @@ static int bma150_register_input_device(struct bma150_data *bma150)
+ 	idev->close = bma150_irq_close;
+ 	input_set_drvdata(idev, bma150);
+ 
++	bma150->input = idev;
++
+ 	error = input_register_device(idev);
+ 	if (error) {
+ 		input_free_device(idev);
+ 		return error;
+ 	}
+ 
+-	bma150->input = idev;
+ 	return 0;
+ }
+ 
+@@ -510,15 +511,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150)
+ 
+ 	bma150_init_input_device(bma150, ipoll_dev->input);
+ 
++	bma150->input_polled = ipoll_dev;
++	bma150->input = ipoll_dev->input;
++
+ 	error = input_register_polled_device(ipoll_dev);
+ 	if (error) {
+ 		input_free_polled_device(ipoll_dev);
+ 		return error;
+ 	}
+ 
+-	bma150->input_polled = ipoll_dev;
+-	bma150->input = ipoll_dev->input;
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
+index f322a1768fbb..225ae6980182 100644
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -1336,7 +1336,6 @@ MODULE_DEVICE_TABLE(i2c, elan_id);
+ static const struct acpi_device_id elan_acpi_id[] = {
+ 	{ "ELAN0000", 0 },
+ 	{ "ELAN0100", 0 },
+-	{ "ELAN0501", 0 },
+ 	{ "ELAN0600", 0 },
+ 	{ "ELAN0602", 0 },
+ 	{ "ELAN0605", 0 },
+@@ -1346,6 +1345,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
+ 	{ "ELAN060C", 0 },
+ 	{ "ELAN0611", 0 },
+ 	{ "ELAN0612", 0 },
++	{ "ELAN0617", 0 },
+ 	{ "ELAN0618", 0 },
+ 	{ "ELAN061C", 0 },
+ 	{ "ELAN061D", 0 },
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 9fe075c137dc..a7f8b1614559 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1119,6 +1119,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
+  * Asus UX31               0x361f00        20, 15, 0e      clickpad
+  * Asus UX32VD             0x361f02        00, 15, 0e      clickpad
+  * Avatar AVIU-145A2       0x361f00        ?               clickpad
++ * Fujitsu CELSIUS H760    0x570f02        40, 14, 0c      3 hw buttons (**)
++ * Fujitsu CELSIUS H780    0x5d0f02        41, 16, 0d      3 hw buttons (**)
+  * Fujitsu LIFEBOOK E544   0x470f00        d0, 12, 09      2 hw buttons
+  * Fujitsu LIFEBOOK E546   0x470f00        50, 12, 09      2 hw buttons
+  * Fujitsu LIFEBOOK E547   0x470f00        50, 12, 09      2 hw buttons
+@@ -1171,6 +1173,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
+ 		},
+ 	},
++	{
++		/* Fujitsu H780 also has a middle button */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"),
++		},
++	},
+ #endif
+ 	{ }
+ };
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 5921ecc670c1..f3dcc7640319 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -932,7 +932,7 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
+ 	if (IS_ERR(bip))
+ 		return PTR_ERR(bip);
+ 
+-	tag_len = io->cc->on_disk_tag_size * bio_sectors(bio);
++	tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
+ 
+ 	bip->bip_iter.bi_size = tag_len;
+ 	bip->bip_iter.bi_sector = io->cc->start + io->sector;
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index c30a7850b2da..cd4220ee7004 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -257,6 +257,7 @@ struct pool {
+ 
+ 	spinlock_t lock;
+ 	struct bio_list deferred_flush_bios;
++	struct bio_list deferred_flush_completions;
+ 	struct list_head prepared_mappings;
+ 	struct list_head prepared_discards;
+ 	struct list_head prepared_discards_pt2;
+@@ -956,6 +957,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
+ 	mempool_free(m, &m->tc->pool->mapping_pool);
+ }
+ 
++static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
++{
++	struct pool *pool = tc->pool;
++	unsigned long flags;
++
++	/*
++	 * If the bio has the REQ_FUA flag set we must commit the metadata
++	 * before signaling its completion.
++	 */
++	if (!bio_triggers_commit(tc, bio)) {
++		bio_endio(bio);
++		return;
++	}
++
++	/*
++	 * Complete bio with an error if earlier I/O caused changes to the
++	 * metadata that can't be committed, e.g, due to I/O errors on the
++	 * metadata device.
++	 */
++	if (dm_thin_aborted_changes(tc->td)) {
++		bio_io_error(bio);
++		return;
++	}
++
++	/*
++	 * Batch together any bios that trigger commits and then issue a
++	 * single commit for them in process_deferred_bios().
++	 */
++	spin_lock_irqsave(&pool->lock, flags);
++	bio_list_add(&pool->deferred_flush_completions, bio);
++	spin_unlock_irqrestore(&pool->lock, flags);
++}
++
+ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
+ {
+ 	struct thin_c *tc = m->tc;
+@@ -988,7 +1022,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
+ 	 */
+ 	if (bio) {
+ 		inc_remap_and_issue_cell(tc, m->cell, m->data_block);
+-		bio_endio(bio);
++		complete_overwrite_bio(tc, bio);
+ 	} else {
+ 		inc_all_io_entry(tc->pool, m->cell->holder);
+ 		remap_and_issue(tc, m->cell->holder, m->data_block);
+@@ -2317,7 +2351,7 @@ static void process_deferred_bios(struct pool *pool)
+ {
+ 	unsigned long flags;
+ 	struct bio *bio;
+-	struct bio_list bios;
++	struct bio_list bios, bio_completions;
+ 	struct thin_c *tc;
+ 
+ 	tc = get_first_thin(pool);
+@@ -2328,26 +2362,36 @@ static void process_deferred_bios(struct pool *pool)
+ 	}
+ 
+ 	/*
+-	 * If there are any deferred flush bios, we must commit
+-	 * the metadata before issuing them.
++	 * If there are any deferred flush bios, we must commit the metadata
++	 * before issuing them or signaling their completion.
+ 	 */
+ 	bio_list_init(&bios);
++	bio_list_init(&bio_completions);
++
+ 	spin_lock_irqsave(&pool->lock, flags);
+ 	bio_list_merge(&bios, &pool->deferred_flush_bios);
+ 	bio_list_init(&pool->deferred_flush_bios);
++
++	bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
++	bio_list_init(&pool->deferred_flush_completions);
+ 	spin_unlock_irqrestore(&pool->lock, flags);
+ 
+-	if (bio_list_empty(&bios) &&
++	if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
+ 	    !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
+ 		return;
+ 
+ 	if (commit(pool)) {
++		bio_list_merge(&bios, &bio_completions);
++
+ 		while ((bio = bio_list_pop(&bios)))
+ 			bio_io_error(bio);
+ 		return;
+ 	}
+ 	pool->last_commit_jiffies = jiffies;
+ 
++	while ((bio = bio_list_pop(&bio_completions)))
++		bio_endio(bio);
++
+ 	while ((bio = bio_list_pop(&bios)))
+ 		generic_make_request(bio);
+ }
+@@ -2954,6 +2998,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
+ 	INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
+ 	spin_lock_init(&pool->lock);
+ 	bio_list_init(&pool->deferred_flush_bios);
++	bio_list_init(&pool->deferred_flush_completions);
+ 	INIT_LIST_HEAD(&pool->prepared_mappings);
+ 	INIT_LIST_HEAD(&pool->prepared_discards);
+ 	INIT_LIST_HEAD(&pool->prepared_discards_pt2);
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 1d54109071cc..fa47249fa3e4 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1863,6 +1863,20 @@ static void end_sync_read(struct bio *bio)
+ 		reschedule_retry(r1_bio);
+ }
+ 
++static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
++{
++	sector_t sync_blocks = 0;
++	sector_t s = r1_bio->sector;
++	long sectors_to_go = r1_bio->sectors;
++
++	/* make sure these bits don't get cleared. */
++	do {
++		md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
++		s += sync_blocks;
++		sectors_to_go -= sync_blocks;
++	} while (sectors_to_go > 0);
++}
++
+ static void end_sync_write(struct bio *bio)
+ {
+ 	int uptodate = !bio->bi_status;
+@@ -1874,15 +1888,7 @@ static void end_sync_write(struct bio *bio)
+ 	struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
+ 
+ 	if (!uptodate) {
+-		sector_t sync_blocks = 0;
+-		sector_t s = r1_bio->sector;
+-		long sectors_to_go = r1_bio->sectors;
+-		/* make sure these bits doesn't get cleared. */
+-		do {
+-			md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
+-			s += sync_blocks;
+-			sectors_to_go -= sync_blocks;
+-		} while (sectors_to_go > 0);
++		abort_sync_write(mddev, r1_bio);
+ 		set_bit(WriteErrorSeen, &rdev->flags);
+ 		if (!test_and_set_bit(WantReplacement, &rdev->flags))
+ 			set_bit(MD_RECOVERY_NEEDED, &
+@@ -2172,8 +2178,10 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
+ 		     (i == r1_bio->read_disk ||
+ 		      !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
+ 			continue;
+-		if (test_bit(Faulty, &conf->mirrors[i].rdev->flags))
++		if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
++			abort_sync_write(mddev, r1_bio);
+ 			continue;
++		}
+ 
+ 		bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
+ 		if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
+diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
+index 68a1ac929917..d382b13c27dd 100644
+--- a/drivers/misc/eeprom/Kconfig
++++ b/drivers/misc/eeprom/Kconfig
+@@ -13,7 +13,7 @@ config EEPROM_AT24
+ 	  ones like at24c64, 24lc02 or fm24c04:
+ 
+ 	     24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08,
+-	     24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024
++	     24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024, 24c2048
+ 
+ 	  Unless you like data loss puzzles, always be sure that any chip
+ 	  you configure as a 24c32 (32 kbit) or larger is NOT really a
+diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
+index 7e50e1d6f58c..94836fcbe721 100644
+--- a/drivers/misc/eeprom/at24.c
++++ b/drivers/misc/eeprom/at24.c
+@@ -173,6 +173,7 @@ AT24_CHIP_DATA(at24_data_24c128, 131072 / 8, AT24_FLAG_ADDR16);
+ AT24_CHIP_DATA(at24_data_24c256, 262144 / 8, AT24_FLAG_ADDR16);
+ AT24_CHIP_DATA(at24_data_24c512, 524288 / 8, AT24_FLAG_ADDR16);
+ AT24_CHIP_DATA(at24_data_24c1024, 1048576 / 8, AT24_FLAG_ADDR16);
++AT24_CHIP_DATA(at24_data_24c2048, 2097152 / 8, AT24_FLAG_ADDR16);
+ /* identical to 24c08 ? */
+ AT24_CHIP_DATA(at24_data_INT3499, 8192 / 8, 0);
+ 
+@@ -199,6 +200,7 @@ static const struct i2c_device_id at24_ids[] = {
+ 	{ "24c256",	(kernel_ulong_t)&at24_data_24c256 },
+ 	{ "24c512",	(kernel_ulong_t)&at24_data_24c512 },
+ 	{ "24c1024",	(kernel_ulong_t)&at24_data_24c1024 },
++	{ "24c2048",    (kernel_ulong_t)&at24_data_24c2048 },
+ 	{ "at24",	0 },
+ 	{ /* END OF LIST */ }
+ };
+@@ -227,6 +229,7 @@ static const struct of_device_id at24_of_match[] = {
+ 	{ .compatible = "atmel,24c256",		.data = &at24_data_24c256 },
+ 	{ .compatible = "atmel,24c512",		.data = &at24_data_24c512 },
+ 	{ .compatible = "atmel,24c1024",	.data = &at24_data_24c1024 },
++	{ .compatible = "atmel,24c2048",	.data = &at24_data_24c2048 },
+ 	{ /* END OF LIST */ },
+ };
+ MODULE_DEVICE_TABLE(of, at24_of_match);
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index f6755b86eba2..eee004fb3c3e 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -2114,7 +2114,7 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
+ 		if (waiting)
+ 			wake_up(&mq->wait);
+ 		else
+-			kblockd_schedule_work(&mq->complete_work);
++			queue_work(mq->card->complete_wq, &mq->complete_work);
+ 
+ 		return;
+ 	}
+@@ -2928,6 +2928,13 @@ static int mmc_blk_probe(struct mmc_card *card)
+ 
+ 	mmc_fixup_device(card, mmc_blk_fixups);
+ 
++	card->complete_wq = alloc_workqueue("mmc_complete",
++					WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
++	if (unlikely(!card->complete_wq)) {
++		pr_err("Failed to create mmc completion workqueue");
++		return -ENOMEM;
++	}
++
+ 	md = mmc_blk_alloc(card);
+ 	if (IS_ERR(md))
+ 		return PTR_ERR(md);
+@@ -2991,6 +2998,7 @@ static void mmc_blk_remove(struct mmc_card *card)
+ 	pm_runtime_put_noidle(&card->dev);
+ 	mmc_blk_remove_req(md);
+ 	dev_set_drvdata(&card->dev, NULL);
++	destroy_workqueue(card->complete_wq);
+ }
+ 
+ static int _mmc_blk_suspend(struct mmc_card *card)
+diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
+index 568349e1fbc2..c4584184525f 100644
+--- a/drivers/mmc/host/sunxi-mmc.c
++++ b/drivers/mmc/host/sunxi-mmc.c
+@@ -1394,6 +1394,21 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto error_free_dma;
+ 
++	/*
++	 * If we don't support delay chains in the SoC, we can't use any
++	 * of the higher speed modes. Mask them out in case the device
++	 * tree specifies the properties for them, which gets added to
++	 * the caps by mmc_of_parse() above.
++	 */
++	if (!(host->cfg->clk_delays || host->use_new_timings)) {
++		mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR |
++			       MMC_CAP_1_2V_DDR | MMC_CAP_UHS);
++		mmc->caps2 &= ~MMC_CAP2_HS200;
++	}
++
++	/* TODO: This driver doesn't support HS400 mode yet */
++	mmc->caps2 &= ~MMC_CAP2_HS400;
++
+ 	ret = sunxi_mmc_init_host(host);
+ 	if (ret)
+ 		goto error_free_dma;
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index e5bddae16ed4..e0d2b7473901 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -2095,7 +2095,7 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
+ 
+ 	/* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
+ 	off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
+-			"nqn.2014.08.org.nvmexpress:%4x%4x",
++			"nqn.2014.08.org.nvmexpress:%04x%04x",
+ 			le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
+ 	memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
+ 	off += sizeof(id->sn);
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index c27af277e14e..815509dbed84 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -556,6 +556,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+ 	return 0;
+ out_free_ana_log_buf:
+ 	kfree(ctrl->ana_log_buf);
++	ctrl->ana_log_buf = NULL;
+ out:
+ 	return error;
+ }
+@@ -563,5 +564,6 @@ out:
+ void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
+ {
+ 	kfree(ctrl->ana_log_buf);
++	ctrl->ana_log_buf = NULL;
+ }
+ 
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index d668682f91df..f46313f441ec 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -908,9 +908,11 @@ static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end)
+ 
+ static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
+ {
+-	if (++nvmeq->cq_head == nvmeq->q_depth) {
++	if (nvmeq->cq_head == nvmeq->q_depth - 1) {
+ 		nvmeq->cq_head = 0;
+ 		nvmeq->cq_phase = !nvmeq->cq_phase;
++	} else {
++		nvmeq->cq_head++;
+ 	}
+ }
+ 
+@@ -1727,8 +1729,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
+ 		struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
+ 		size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;
+ 
+-		dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i],
+-				le64_to_cpu(desc->addr));
++		dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
++			       le64_to_cpu(desc->addr),
++			       DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
+ 	}
+ 
+ 	kfree(dev->host_mem_desc_bufs);
+@@ -1794,8 +1797,9 @@ out_free_bufs:
+ 	while (--i >= 0) {
+ 		size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;
+ 
+-		dma_free_coherent(dev->dev, size, bufs[i],
+-				le64_to_cpu(descs[i].addr));
++		dma_free_attrs(dev->dev, size, bufs[i],
++			       le64_to_cpu(descs[i].addr),
++			       DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
+ 	}
+ 
+ 	kfree(bufs);
+diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
+index f039266b275d..a57b969b8973 100644
+--- a/drivers/s390/crypto/ap_bus.c
++++ b/drivers/s390/crypto/ap_bus.c
+@@ -249,7 +249,8 @@ static inline int ap_test_config(unsigned int *field, unsigned int nr)
+ static inline int ap_test_config_card_id(unsigned int id)
+ {
+ 	if (!ap_configuration)	/* QCI not supported */
+-		return 1;
++		/* only ids 0...3F may be probed */
++		return id < 0x40 ? 1 : 0;
+ 	return ap_test_config(ap_configuration->apm, id);
+ }
+ 
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 0a27917263aa..58b78702c6c9 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2970,9 +2970,6 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
+ 	if (rot == 1) {
+ 		blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+ 		blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
+-	} else {
+-		blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+-		blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
+ 	}
+ 
+ 	if (sdkp->device->type == TYPE_ZBC) {
+@@ -3109,6 +3106,15 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 	if (sdkp->media_present) {
+ 		sd_read_capacity(sdkp, buffer);
+ 
++		/*
++		 * set the default to rotational.  All non-rotational devices
++		 * support the block characteristics VPD page, which will
++		 * cause this to be updated correctly and any device which
++		 * doesn't support it should be treated as rotational.
++		 */
++		blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
++		blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
++
+ 		if (scsi_device_supports_vpd(sdp)) {
+ 			sd_read_block_provisioning(sdkp);
+ 			sd_read_block_limits(sdkp);
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 7b637fc27990..23db881daab5 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -1128,6 +1128,10 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
+ 		return -EINVAL;
+ 	}
+ 
++	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
++		     PAGE_SIZE);
++	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
++			PAGE_SIZE);
+ 	max_num = (max_buf - sizeof(struct smb_hdr)) /
+ 						sizeof(LOCKING_ANDX_RANGE);
+ 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
+@@ -1466,6 +1470,10 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
+ 	if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
+ 		return -EINVAL;
+ 
++	BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
++		     PAGE_SIZE);
++	max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
++			PAGE_SIZE);
+ 	max_num = (max_buf - sizeof(struct smb_hdr)) /
+ 						sizeof(LOCKING_ANDX_RANGE);
+ 	buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
+diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
+index 2fc3d31967ee..b204e84b87fb 100644
+--- a/fs/cifs/smb2file.c
++++ b/fs/cifs/smb2file.c
+@@ -128,6 +128,8 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
+ 	if (max_buf < sizeof(struct smb2_lock_element))
+ 		return -EINVAL;
+ 
++	BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
++	max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
+ 	max_num = max_buf / sizeof(struct smb2_lock_element);
+ 	buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
+ 	if (!buf)
+@@ -264,6 +266,8 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
+ 		return -EINVAL;
+ 	}
+ 
++	BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
++	max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
+ 	max_num = max_buf / sizeof(struct smb2_lock_element);
+ 	buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
+ 	if (!buf) {
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 8a01e89ff827..1e5a1171212f 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -2814,9 +2814,10 @@ smb2_echo_callback(struct mid_q_entry *mid)
+ {
+ 	struct TCP_Server_Info *server = mid->callback_data;
+ 	struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf;
+-	unsigned int credits_received = 1;
++	unsigned int credits_received = 0;
+ 
+-	if (mid->mid_state == MID_RESPONSE_RECEIVED)
++	if (mid->mid_state == MID_RESPONSE_RECEIVED
++	    || mid->mid_state == MID_RESPONSE_MALFORMED)
+ 		credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
+ 
+ 	DeleteMidQEntry(mid);
+@@ -3073,7 +3074,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
+ 	struct TCP_Server_Info *server = tcon->ses->server;
+ 	struct smb2_sync_hdr *shdr =
+ 				(struct smb2_sync_hdr *)rdata->iov[0].iov_base;
+-	unsigned int credits_received = 1;
++	unsigned int credits_received = 0;
+ 	struct smb_rqst rqst = { .rq_iov = rdata->iov,
+ 				 .rq_nvec = 2,
+ 				 .rq_pages = rdata->pages,
+@@ -3112,6 +3113,9 @@ smb2_readv_callback(struct mid_q_entry *mid)
+ 		task_io_account_read(rdata->got_bytes);
+ 		cifs_stats_bytes_read(tcon, rdata->got_bytes);
+ 		break;
++	case MID_RESPONSE_MALFORMED:
++		credits_received = le16_to_cpu(shdr->CreditRequest);
++		/* fall through */
+ 	default:
+ 		if (rdata->result != -ENODATA)
+ 			rdata->result = -EIO;
+@@ -3305,7 +3309,7 @@ smb2_writev_callback(struct mid_q_entry *mid)
+ 	struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
+ 	unsigned int written;
+ 	struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
+-	unsigned int credits_received = 1;
++	unsigned int credits_received = 0;
+ 
+ 	switch (mid->mid_state) {
+ 	case MID_RESPONSE_RECEIVED:
+@@ -3333,6 +3337,9 @@ smb2_writev_callback(struct mid_q_entry *mid)
+ 	case MID_RETRY_NEEDED:
+ 		wdata->result = -EAGAIN;
+ 		break;
++	case MID_RESPONSE_MALFORMED:
++		credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
++		/* fall through */
+ 	default:
+ 		wdata->result = -EIO;
+ 		break;
+diff --git a/fs/inode.c b/fs/inode.c
+index 65ae154df760..42f6d25f32a5 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -730,11 +730,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
+ 		return LRU_REMOVED;
+ 	}
+ 
+-	/*
+-	 * Recently referenced inodes and inodes with many attached pages
+-	 * get one more pass.
+-	 */
+-	if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) {
++	/* recently referenced inodes get one more pass */
++	if (inode->i_state & I_REFERENCED) {
+ 		inode->i_state &= ~I_REFERENCED;
+ 		spin_unlock(&inode->i_lock);
+ 		return LRU_ROTATE;
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index 899174c7a8ae..39b835d7c445 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1239,8 +1239,8 @@ static __net_init int nfsd_init_net(struct net *net)
+ 	retval = nfsd_idmap_init(net);
+ 	if (retval)
+ 		goto out_idmap_error;
+-	nn->nfsd4_lease = 45;	/* default lease time */
+-	nn->nfsd4_grace = 45;
++	nn->nfsd4_lease = 90;	/* default lease time */
++	nn->nfsd4_grace = 90;
+ 	nn->somebody_reclaimed = false;
+ 	nn->clverifier_counter = prandom_u32();
+ 	nn->clientid_counter = prandom_u32();
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index a027473561c6..d76fe166f6ce 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -423,7 +423,7 @@ struct mem_size_stats {
+ };
+ 
+ static void smaps_account(struct mem_size_stats *mss, struct page *page,
+-		bool compound, bool young, bool dirty)
++		bool compound, bool young, bool dirty, bool locked)
+ {
+ 	int i, nr = compound ? 1 << compound_order(page) : 1;
+ 	unsigned long size = nr * PAGE_SIZE;
+@@ -450,24 +450,31 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
+ 		else
+ 			mss->private_clean += size;
+ 		mss->pss += (u64)size << PSS_SHIFT;
++		if (locked)
++			mss->pss_locked += (u64)size << PSS_SHIFT;
+ 		return;
+ 	}
+ 
+ 	for (i = 0; i < nr; i++, page++) {
+ 		int mapcount = page_mapcount(page);
++		unsigned long pss = (PAGE_SIZE << PSS_SHIFT);
+ 
+ 		if (mapcount >= 2) {
+ 			if (dirty || PageDirty(page))
+ 				mss->shared_dirty += PAGE_SIZE;
+ 			else
+ 				mss->shared_clean += PAGE_SIZE;
+-			mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
++			mss->pss += pss / mapcount;
++			if (locked)
++				mss->pss_locked += pss / mapcount;
+ 		} else {
+ 			if (dirty || PageDirty(page))
+ 				mss->private_dirty += PAGE_SIZE;
+ 			else
+ 				mss->private_clean += PAGE_SIZE;
+-			mss->pss += PAGE_SIZE << PSS_SHIFT;
++			mss->pss += pss;
++			if (locked)
++				mss->pss_locked += pss;
+ 		}
+ 	}
+ }
+@@ -490,6 +497,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
+ {
+ 	struct mem_size_stats *mss = walk->private;
+ 	struct vm_area_struct *vma = walk->vma;
++	bool locked = !!(vma->vm_flags & VM_LOCKED);
+ 	struct page *page = NULL;
+ 
+ 	if (pte_present(*pte)) {
+@@ -532,7 +540,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
+ 	if (!page)
+ 		return;
+ 
+-	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
++	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
+ }
+ 
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+@@ -541,6 +549,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
+ {
+ 	struct mem_size_stats *mss = walk->private;
+ 	struct vm_area_struct *vma = walk->vma;
++	bool locked = !!(vma->vm_flags & VM_LOCKED);
+ 	struct page *page;
+ 
+ 	/* FOLL_DUMP will return -EFAULT on huge zero page */
+@@ -555,7 +564,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
+ 		/* pass */;
+ 	else
+ 		VM_BUG_ON_PAGE(1, page);
+-	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
++	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
+ }
+ #else
+ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
+@@ -737,11 +746,8 @@ static void smap_gather_stats(struct vm_area_struct *vma,
+ 		}
+ 	}
+ #endif
+-
+ 	/* mmap_sem is held in m_start */
+ 	walk_page_vma(vma, &smaps_walk);
+-	if (vma->vm_flags & VM_LOCKED)
+-		mss->pss_locked += mss->pss;
+ }
+ 
+ #define SEQ_PUT_DEC(str, val) \
+diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
+index de7377815b6b..8ef330027b13 100644
+--- a/include/linux/mmc/card.h
++++ b/include/linux/mmc/card.h
+@@ -308,6 +308,7 @@ struct mmc_card {
+ 	unsigned int    nr_parts;
+ 
+ 	unsigned int		bouncesz;	/* Bounce buffer size */
++	struct workqueue_struct *complete_wq;	/* Private workqueue */
+ };
+ 
+ static inline bool mmc_large_sector(struct mmc_card *card)
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 53c500f0ca79..c2876e740514 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -447,6 +447,11 @@ struct pmu {
+ 	 * Filter events for PMU-specific reasons.
+ 	 */
+ 	int (*filter_match)		(struct perf_event *event); /* optional */
++
++	/*
++	 * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
++	 */
++	int (*check_period)		(struct perf_event *event, u64 value); /* optional */
+ };
+ 
+ enum perf_addr_filter_action_t {
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 5a97f34bc14c..4fb9d5054618 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event,
+ 	}
+ }
+ 
++static int perf_event_check_period(struct perf_event *event, u64 value)
++{
++	return event->pmu->check_period(event, value);
++}
++
+ static int perf_event_period(struct perf_event *event, u64 __user *arg)
+ {
+ 	u64 value;
+@@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
+ 	if (event->attr.freq && value > sysctl_perf_event_sample_rate)
+ 		return -EINVAL;
+ 
++	if (perf_event_check_period(event, value))
++		return -EINVAL;
++
+ 	event_function_call(event, __perf_event_period, &value);
+ 
+ 	return 0;
+@@ -9362,6 +9370,11 @@ static int perf_pmu_nop_int(struct pmu *pmu)
+ 	return 0;
+ }
+ 
++static int perf_event_nop_int(struct perf_event *event, u64 value)
++{
++	return 0;
++}
++
+ static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
+ 
+ static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
+@@ -9662,6 +9675,9 @@ got_cpu_context:
+ 		pmu->pmu_disable = perf_pmu_nop_void;
+ 	}
+ 
++	if (!pmu->check_period)
++		pmu->check_period = perf_event_nop_int;
++
+ 	if (!pmu->event_idx)
+ 		pmu->event_idx = perf_event_idx_default;
+ 
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index 51386d9105fa..5631af940316 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -724,7 +724,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
+ 	size = sizeof(struct ring_buffer);
+ 	size += nr_pages * sizeof(void *);
+ 
+-	if (order_base_2(size) >= MAX_ORDER)
++	if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
+ 		goto fail;
+ 
+ 	rb = kzalloc(size, GFP_KERNEL);
+diff --git a/kernel/signal.c b/kernel/signal.c
+index c187def3dba6..9102d60fc5c6 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -2433,9 +2433,12 @@ relock:
+ 	}
+ 
+ 	/* Has this task already been marked for death? */
+-	ksig->info.si_signo = signr = SIGKILL;
+-	if (signal_group_exit(signal))
++	if (signal_group_exit(signal)) {
++		ksig->info.si_signo = signr = SIGKILL;
++		sigdelset(&current->pending.signal, SIGKILL);
++		recalc_sigpending();
+ 		goto fatal;
++	}
+ 
+ 	for (;;) {
+ 		struct k_sigaction *ka;
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
+index a6aebbc848fe..0da379b90249 100644
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -141,7 +141,14 @@ static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
+ 
+ 	ret = strncpy_from_user(dst, src, maxlen);
+ 	if (ret == maxlen)
+-		dst[--ret] = '\0';
++		dst[ret - 1] = '\0';
++	else if (ret >= 0)
++		/*
++		 * Include the terminating null byte. In this case it
++		 * was copied by strncpy_from_user but not accounted
++		 * for in ret.
++		 */
++		ret++;
+ 
+ 	if (ret < 0) {	/* Failed to fetch string */
+ 		((u8 *)get_rloc_data(dest))[0] = '\0';
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 961401c46334..3830066018c1 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -477,16 +477,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
+ 	delta *= 4;
+ 	do_div(delta, shrinker->seeks);
+ 
+-	/*
+-	 * Make sure we apply some minimal pressure on default priority
+-	 * even on small cgroups. Stale objects are not only consuming memory
+-	 * by themselves, but can also hold a reference to a dying cgroup,
+-	 * preventing it from being reclaimed. A dying cgroup with all
+-	 * corresponding structures like per-cpu stats and kmem caches
+-	 * can be really big, so it may lead to a significant waste of memory.
+-	 */
+-	delta = max_t(unsigned long long, delta, min(freeable, batch_size));
+-
+ 	total_scan += delta;
+ 	if (total_scan < 0) {
+ 		pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 31a84a5a1338..fead0acb29f7 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -924,6 +924,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
+ 	SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
+ 	SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
++	SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
+ 	SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
+ 	SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
+ 	SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
+diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
+index d00734d31e04..e5b6769b9797 100644
+--- a/sound/soc/codecs/hdmi-codec.c
++++ b/sound/soc/codecs/hdmi-codec.c
+@@ -795,6 +795,8 @@ static int hdmi_codec_probe(struct platform_device *pdev)
+ 	if (hcd->spdif)
+ 		hcp->daidrv[i] = hdmi_spdif_dai;
+ 
++	dev_set_drvdata(dev, hcp);
++
+ 	ret = devm_snd_soc_register_component(dev, &hdmi_driver, hcp->daidrv,
+ 				     dai_count);
+ 	if (ret) {
+@@ -802,8 +804,6 @@ static int hdmi_codec_probe(struct platform_device *pdev)
+ 			__func__, ret);
+ 		return ret;
+ 	}
+-
+-	dev_set_drvdata(dev, hcp);
+ 	return 0;
+ }
+ 
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 382847154227..db114f3977e0 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -314,6 +314,9 @@ static int search_roland_implicit_fb(struct usb_device *dev, int ifnum,
+ 	return 0;
+ }
+ 
++/* Setup an implicit feedback endpoint from a quirk. Returns 0 if no quirk
++ * applies. Returns 1 if a quirk was found.
++ */
+ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
+ 					 struct usb_device *dev,
+ 					 struct usb_interface_descriptor *altsd,
+@@ -384,7 +387,7 @@ add_sync_ep:
+ 
+ 	subs->data_endpoint->sync_master = subs->sync_endpoint;
+ 
+-	return 0;
++	return 1;
+ }
+ 
+ static int set_sync_endpoint(struct snd_usb_substream *subs,
+@@ -423,6 +426,10 @@ static int set_sync_endpoint(struct snd_usb_substream *subs,
+ 	if (err < 0)
+ 		return err;
+ 
++	/* endpoint set by quirk */
++	if (err > 0)
++		return 0;
++
+ 	if (altsd->bNumEndpoints < 2)
+ 		return 0;
+ 
+diff --git a/tools/arch/riscv/include/uapi/asm/bitsperlong.h b/tools/arch/riscv/include/uapi/asm/bitsperlong.h
+new file mode 100644
+index 000000000000..0b3cb52fd29d
+--- /dev/null
++++ b/tools/arch/riscv/include/uapi/asm/bitsperlong.h
+@@ -0,0 +1,25 @@
++/*
++ * Copyright (C) 2012 ARM Ltd.
++ * Copyright (C) 2015 Regents of the University of California
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _UAPI_ASM_RISCV_BITSPERLONG_H
++#define _UAPI_ASM_RISCV_BITSPERLONG_H
++
++#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8)
++
++#include <asm-generic/bitsperlong.h>
++
++#endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */
+diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h
+index 8dd6aefdafa4..57aaeaf8e192 100644
+--- a/tools/include/uapi/asm/bitsperlong.h
++++ b/tools/include/uapi/asm/bitsperlong.h
+@@ -13,6 +13,10 @@
+ #include "../../arch/mips/include/uapi/asm/bitsperlong.h"
+ #elif defined(__ia64__)
+ #include "../../arch/ia64/include/uapi/asm/bitsperlong.h"
++#elif defined(__riscv)
++#include "../../arch/riscv/include/uapi/asm/bitsperlong.h"
++#elif defined(__alpha__)
++#include "../../arch/alpha/include/uapi/asm/bitsperlong.h"
+ #else
+ #include <asm-generic/bitsperlong.h>
+ #endif
+diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
+index 1c16e56cd93e..7cb99b433888 100644
+--- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh
++++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
+@@ -13,7 +13,8 @@ add_probe_vfs_getname() {
+ 	local verbose=$1
+ 	if [ $had_vfs_getname -eq 1 ] ; then
+ 		line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
+-		perf probe $verbose "vfs_getname=getname_flags:${line} pathname=result->name:string"
++		perf probe -q       "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
++		perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string"
+ 	fi
+ }
+ 
+diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
+index 32ef7bdca1cf..dc2212e12184 100644
+--- a/tools/perf/util/callchain.c
++++ b/tools/perf/util/callchain.c
+@@ -766,6 +766,7 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
+ 			cnode->cycles_count += node->branch_flags.cycles;
+ 			cnode->iter_count += node->nr_loop_iter;
+ 			cnode->iter_cycles += node->iter_cycles;
++			cnode->from_count++;
+ 		}
+ 	}
+ 
+@@ -1345,10 +1346,10 @@ static int branch_to_str(char *bf, int bfsize,
+ static int branch_from_str(char *bf, int bfsize,
+ 			   u64 branch_count,
+ 			   u64 cycles_count, u64 iter_count,
+-			   u64 iter_cycles)
++			   u64 iter_cycles, u64 from_count)
+ {
+ 	int printed = 0, i = 0;
+-	u64 cycles;
++	u64 cycles, v = 0;
+ 
+ 	cycles = cycles_count / branch_count;
+ 	if (cycles) {
+@@ -1357,14 +1358,16 @@ static int branch_from_str(char *bf, int bfsize,
+ 				bf + printed, bfsize - printed);
+ 	}
+ 
+-	if (iter_count) {
+-		printed += count_pri64_printf(i++, "iter",
+-				iter_count,
+-				bf + printed, bfsize - printed);
++	if (iter_count && from_count) {
++		v = iter_count / from_count;
++		if (v) {
++			printed += count_pri64_printf(i++, "iter",
++					v, bf + printed, bfsize - printed);
+ 
+-		printed += count_pri64_printf(i++, "avg_cycles",
+-				iter_cycles / iter_count,
+-				bf + printed, bfsize - printed);
++			printed += count_pri64_printf(i++, "avg_cycles",
++					iter_cycles / iter_count,
++					bf + printed, bfsize - printed);
++		}
+ 	}
+ 
+ 	if (i)
+@@ -1377,6 +1380,7 @@ static int counts_str_build(char *bf, int bfsize,
+ 			     u64 branch_count, u64 predicted_count,
+ 			     u64 abort_count, u64 cycles_count,
+ 			     u64 iter_count, u64 iter_cycles,
++			     u64 from_count,
+ 			     struct branch_type_stat *brtype_stat)
+ {
+ 	int printed;
+@@ -1389,7 +1393,8 @@ static int counts_str_build(char *bf, int bfsize,
+ 				predicted_count, abort_count, brtype_stat);
+ 	} else {
+ 		printed = branch_from_str(bf, bfsize, branch_count,
+-				cycles_count, iter_count, iter_cycles);
++				cycles_count, iter_count, iter_cycles,
++				from_count);
+ 	}
+ 
+ 	if (!printed)
+@@ -1402,13 +1407,14 @@ static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
+ 				   u64 branch_count, u64 predicted_count,
+ 				   u64 abort_count, u64 cycles_count,
+ 				   u64 iter_count, u64 iter_cycles,
++				   u64 from_count,
+ 				   struct branch_type_stat *brtype_stat)
+ {
+ 	char str[256];
+ 
+ 	counts_str_build(str, sizeof(str), branch_count,
+ 			 predicted_count, abort_count, cycles_count,
+-			 iter_count, iter_cycles, brtype_stat);
++			 iter_count, iter_cycles, from_count, brtype_stat);
+ 
+ 	if (fp)
+ 		return fprintf(fp, "%s", str);
+@@ -1422,6 +1428,7 @@ int callchain_list_counts__printf_value(struct callchain_list *clist,
+ 	u64 branch_count, predicted_count;
+ 	u64 abort_count, cycles_count;
+ 	u64 iter_count, iter_cycles;
++	u64 from_count;
+ 
+ 	branch_count = clist->branch_count;
+ 	predicted_count = clist->predicted_count;
+@@ -1429,11 +1436,12 @@ int callchain_list_counts__printf_value(struct callchain_list *clist,
+ 	cycles_count = clist->cycles_count;
+ 	iter_count = clist->iter_count;
+ 	iter_cycles = clist->iter_cycles;
++	from_count = clist->from_count;
+ 
+ 	return callchain_counts_printf(fp, bf, bfsize, branch_count,
+ 				       predicted_count, abort_count,
+ 				       cycles_count, iter_count, iter_cycles,
+-				       &clist->brtype_stat);
++				       from_count, &clist->brtype_stat);
+ }
+ 
+ static void free_callchain_node(struct callchain_node *node)
+diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
+index 154560b1eb65..99d38ac019b8 100644
+--- a/tools/perf/util/callchain.h
++++ b/tools/perf/util/callchain.h
+@@ -118,6 +118,7 @@ struct callchain_list {
+ 		bool		has_children;
+ 	};
+ 	u64			branch_count;
++	u64			from_count;
+ 	u64			predicted_count;
+ 	u64			abort_count;
+ 	u64			cycles_count;
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
+index d7403d1207d7..b1508ce3e412 100644
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -1988,7 +1988,7 @@ static void save_iterations(struct iterations *iter,
+ {
+ 	int i;
+ 
+-	iter->nr_loop_iter = nr;
++	iter->nr_loop_iter++;
+ 	iter->cycles = 0;
+ 
+ 	for (i = 0; i < nr; i++)


             reply	other threads:[~2019-02-20 11:19 UTC|newest]

Thread overview: 332+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-02-20 11:19 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2024-04-18  3:06 [gentoo-commits] proj/linux-patches:4.19 commit in: / Alice Ferrazzi
2023-09-02  9:59 Mike Pagano
2023-08-30 15:00 Mike Pagano
2023-08-16 16:59 Mike Pagano
2023-08-11 11:58 Mike Pagano
2023-08-08 18:43 Mike Pagano
2023-07-24 20:30 Mike Pagano
2023-06-28 10:29 Mike Pagano
2023-06-21 14:55 Alice Ferrazzi
2023-06-14 10:21 Mike Pagano
2023-06-09 11:32 Mike Pagano
2023-05-30 12:57 Mike Pagano
2023-05-17 11:14 Mike Pagano
2023-05-17 11:01 Mike Pagano
2023-05-10 17:59 Mike Pagano
2023-04-26  9:35 Alice Ferrazzi
2023-04-20 11:17 Alice Ferrazzi
2023-04-05 11:41 Mike Pagano
2023-03-22 14:16 Alice Ferrazzi
2023-03-17 10:46 Mike Pagano
2023-03-13 11:35 Alice Ferrazzi
2023-03-11 16:01 Mike Pagano
2023-03-03 12:31 Mike Pagano
2023-02-25 11:41 Mike Pagano
2023-02-24  3:19 Alice Ferrazzi
2023-02-24  3:15 Alice Ferrazzi
2023-02-22 14:51 Alice Ferrazzi
2023-02-06 12:49 Mike Pagano
2023-01-24  7:16 Alice Ferrazzi
2023-01-18 11:11 Mike Pagano
2022-12-14 12:15 Mike Pagano
2022-12-08 12:14 Alice Ferrazzi
2022-11-25 17:04 Mike Pagano
2022-11-23  9:39 Alice Ferrazzi
2022-11-10 17:58 Mike Pagano
2022-11-03 15:11 Mike Pagano
2022-11-01 19:48 Mike Pagano
2022-10-26 11:41 Mike Pagano
2022-10-05 11:59 Mike Pagano
2022-09-28  9:18 Mike Pagano
2022-09-20 12:03 Mike Pagano
2022-09-15 11:09 Mike Pagano
2022-09-05 12:06 Mike Pagano
2022-08-25 10:35 Mike Pagano
2022-08-11 12:36 Mike Pagano
2022-07-29 15:28 Mike Pagano
2022-07-21 20:12 Mike Pagano
2022-07-12 16:01 Mike Pagano
2022-07-07 16:18 Mike Pagano
2022-07-02 16:07 Mike Pagano
2022-06-25 10:22 Mike Pagano
2022-06-16 11:40 Mike Pagano
2022-06-14 16:02 Mike Pagano
2022-06-06 11:05 Mike Pagano
2022-05-27 12:24 Mike Pagano
2022-05-25 11:55 Mike Pagano
2022-05-18  9:50 Mike Pagano
2022-05-15 22:12 Mike Pagano
2022-05-12 11:30 Mike Pagano
2022-05-01 17:04 Mike Pagano
2022-04-27 12:03 Mike Pagano
2022-04-20 12:09 Mike Pagano
2022-04-15 13:11 Mike Pagano
2022-04-12 19:24 Mike Pagano
2022-03-28 10:59 Mike Pagano
2022-03-23 11:57 Mike Pagano
2022-03-16 13:27 Mike Pagano
2022-03-11 10:56 Mike Pagano
2022-03-08 18:30 Mike Pagano
2022-03-02 13:08 Mike Pagano
2022-02-26 21:14 Mike Pagano
2022-02-23 12:39 Mike Pagano
2022-02-16 12:47 Mike Pagano
2022-02-11 12:53 Mike Pagano
2022-02-11 12:46 Mike Pagano
2022-02-11 12:45 Mike Pagano
2022-02-11 12:37 Mike Pagano
2022-02-08 17:56 Mike Pagano
2022-01-29 17:45 Mike Pagano
2022-01-27 11:39 Mike Pagano
2022-01-11 13:14 Mike Pagano
2022-01-05 12:55 Mike Pagano
2021-12-29 13:11 Mike Pagano
2021-12-22 14:07 Mike Pagano
2021-12-14 10:36 Mike Pagano
2021-12-08 12:55 Mike Pagano
2021-12-01 12:51 Mike Pagano
2021-11-26 11:59 Mike Pagano
2021-11-12 14:16 Mike Pagano
2021-11-06 13:26 Mike Pagano
2021-11-02 19:32 Mike Pagano
2021-10-27 11:59 Mike Pagano
2021-10-20 13:26 Mike Pagano
2021-10-17 13:12 Mike Pagano
2021-10-13 15:00 Alice Ferrazzi
2021-10-09 21:33 Mike Pagano
2021-10-06 14:06 Mike Pagano
2021-09-26 14:13 Mike Pagano
2021-09-22 11:40 Mike Pagano
2021-09-20 22:05 Mike Pagano
2021-09-03 11:22 Mike Pagano
2021-09-03 10:08 Alice Ferrazzi
2021-08-26 14:06 Mike Pagano
2021-08-25 22:45 Mike Pagano
2021-08-25 20:41 Mike Pagano
2021-08-15 20:07 Mike Pagano
2021-08-12 11:51 Mike Pagano
2021-08-08 13:39 Mike Pagano
2021-08-04 11:54 Mike Pagano
2021-08-03 12:26 Mike Pagano
2021-07-31 10:34 Alice Ferrazzi
2021-07-28 12:37 Mike Pagano
2021-07-20 15:35 Alice Ferrazzi
2021-07-13 12:38 Mike Pagano
2021-07-11 14:45 Mike Pagano
2021-06-30 14:25 Mike Pagano
2021-06-16 12:22 Mike Pagano
2021-06-10 11:46 Mike Pagano
2021-06-03 10:32 Alice Ferrazzi
2021-05-26 12:05 Mike Pagano
2021-05-22 10:03 Mike Pagano
2021-05-07 11:40 Alice Ferrazzi
2021-04-30 19:02 Mike Pagano
2021-04-28 18:31 Mike Pagano
2021-04-28 11:44 Alice Ferrazzi
2021-04-16 11:15 Alice Ferrazzi
2021-04-14 11:22 Alice Ferrazzi
2021-04-10 13:24 Mike Pagano
2021-04-07 12:21 Mike Pagano
2021-03-30 14:17 Mike Pagano
2021-03-24 12:08 Mike Pagano
2021-03-22 15:50 Mike Pagano
2021-03-20 14:26 Mike Pagano
2021-03-17 16:21 Mike Pagano
2021-03-11 14:05 Mike Pagano
2021-03-07 15:15 Mike Pagano
2021-03-04 12:08 Mike Pagano
2021-02-23 14:31 Alice Ferrazzi
2021-02-13 15:28 Alice Ferrazzi
2021-02-10 10:03 Alice Ferrazzi
2021-02-07 14:40 Alice Ferrazzi
2021-02-03 23:43 Mike Pagano
2021-01-30 13:34 Alice Ferrazzi
2021-01-27 11:15 Mike Pagano
2021-01-23 16:36 Mike Pagano
2021-01-19 20:34 Mike Pagano
2021-01-17 16:20 Mike Pagano
2021-01-12 20:06 Mike Pagano
2021-01-09 12:57 Mike Pagano
2021-01-06 14:15 Mike Pagano
2020-12-30 12:52 Mike Pagano
2020-12-11 12:56 Mike Pagano
2020-12-08 12:06 Mike Pagano
2020-12-02 12:49 Mike Pagano
2020-11-24 14:40 Mike Pagano
2020-11-22 19:26 Mike Pagano
2020-11-18 19:56 Mike Pagano
2020-11-11 15:43 Mike Pagano
2020-11-10 13:56 Mike Pagano
2020-11-05 12:35 Mike Pagano
2020-11-01 20:29 Mike Pagano
2020-10-29 11:18 Mike Pagano
2020-10-17 10:17 Mike Pagano
2020-10-14 20:36 Mike Pagano
2020-10-07 12:50 Mike Pagano
2020-10-01 12:45 Mike Pagano
2020-09-26 22:07 Mike Pagano
2020-09-26 22:00 Mike Pagano
2020-09-24 15:58 Mike Pagano
2020-09-23 12:07 Mike Pagano
2020-09-17 15:01 Mike Pagano
2020-09-17 14:55 Mike Pagano
2020-09-12 17:59 Mike Pagano
2020-09-09 17:59 Mike Pagano
2020-09-03 11:37 Mike Pagano
2020-08-26 11:15 Mike Pagano
2020-08-21 10:49 Alice Ferrazzi
2020-08-19  9:36 Alice Ferrazzi
2020-08-12 23:36 Alice Ferrazzi
2020-08-07 19:16 Mike Pagano
2020-08-05 14:51 Thomas Deutschmann
2020-07-31 18:00 Mike Pagano
2020-07-29 12:33 Mike Pagano
2020-07-22 12:42 Mike Pagano
2020-07-16 11:17 Mike Pagano
2020-07-09 12:12 Mike Pagano
2020-07-01 12:14 Mike Pagano
2020-06-29 17:41 Mike Pagano
2020-06-25 15:07 Mike Pagano
2020-06-22 14:47 Mike Pagano
2020-06-10 21:27 Mike Pagano
2020-06-07 21:52 Mike Pagano
2020-06-03 11:41 Mike Pagano
2020-05-27 16:25 Mike Pagano
2020-05-20 11:30 Mike Pagano
2020-05-20 11:27 Mike Pagano
2020-05-14 11:30 Mike Pagano
2020-05-13 12:33 Mike Pagano
2020-05-11 22:50 Mike Pagano
2020-05-09 22:20 Mike Pagano
2020-05-06 11:46 Mike Pagano
2020-05-02 19:24 Mike Pagano
2020-04-29 17:57 Mike Pagano
2020-04-23 11:44 Mike Pagano
2020-04-21 11:15 Mike Pagano
2020-04-17 11:45 Mike Pagano
2020-04-15 17:09 Mike Pagano
2020-04-13 11:34 Mike Pagano
2020-04-02 15:24 Mike Pagano
2020-03-25 14:58 Mike Pagano
2020-03-20 11:57 Mike Pagano
2020-03-18 14:21 Mike Pagano
2020-03-16 12:23 Mike Pagano
2020-03-11 17:20 Mike Pagano
2020-03-05 16:23 Mike Pagano
2020-02-28 16:38 Mike Pagano
2020-02-24 11:06 Mike Pagano
2020-02-19 23:45 Mike Pagano
2020-02-14 23:52 Mike Pagano
2020-02-11 16:20 Mike Pagano
2020-02-05 17:05 Mike Pagano
2020-02-01 10:37 Mike Pagano
2020-02-01 10:30 Mike Pagano
2020-01-29 16:16 Mike Pagano
2020-01-27 14:25 Mike Pagano
2020-01-23 11:07 Mike Pagano
2020-01-17 19:56 Mike Pagano
2020-01-14 22:30 Mike Pagano
2020-01-12 15:00 Mike Pagano
2020-01-09 11:15 Mike Pagano
2020-01-04 19:50 Mike Pagano
2019-12-31 17:46 Mike Pagano
2019-12-21 15:03 Mike Pagano
2019-12-17 21:56 Mike Pagano
2019-12-13 12:35 Mike Pagano
2019-12-05 12:03 Alice Ferrazzi
2019-12-01 14:06 Thomas Deutschmann
2019-11-24 15:44 Mike Pagano
2019-11-20 19:36 Mike Pagano
2019-11-12 21:00 Mike Pagano
2019-11-10 16:20 Mike Pagano
2019-11-06 14:26 Mike Pagano
2019-10-29 12:04 Mike Pagano
2019-10-17 22:27 Mike Pagano
2019-10-11 17:04 Mike Pagano
2019-10-07 17:42 Mike Pagano
2019-10-05 11:42 Mike Pagano
2019-10-01 10:10 Mike Pagano
2019-09-21 17:11 Mike Pagano
2019-09-19 12:34 Mike Pagano
2019-09-19 10:04 Mike Pagano
2019-09-16 12:26 Mike Pagano
2019-09-10 11:12 Mike Pagano
2019-09-06 17:25 Mike Pagano
2019-08-29 14:15 Mike Pagano
2019-08-25 17:37 Mike Pagano
2019-08-23 22:18 Mike Pagano
2019-08-16 12:26 Mike Pagano
2019-08-16 12:13 Mike Pagano
2019-08-09 17:45 Mike Pagano
2019-08-06 19:19 Mike Pagano
2019-08-04 16:15 Mike Pagano
2019-07-31 15:09 Mike Pagano
2019-07-31 10:22 Mike Pagano
2019-07-28 16:27 Mike Pagano
2019-07-26 11:35 Mike Pagano
2019-07-21 14:41 Mike Pagano
2019-07-14 15:44 Mike Pagano
2019-07-10 11:05 Mike Pagano
2019-07-03 11:34 Mike Pagano
2019-06-25 10:53 Mike Pagano
2019-06-22 19:06 Mike Pagano
2019-06-19 17:17 Thomas Deutschmann
2019-06-17 19:22 Mike Pagano
2019-06-15 15:07 Mike Pagano
2019-06-11 12:42 Mike Pagano
2019-06-10 19:43 Mike Pagano
2019-06-09 16:19 Mike Pagano
2019-06-04 11:11 Mike Pagano
2019-05-31 15:02 Mike Pagano
2019-05-26 17:10 Mike Pagano
2019-05-22 11:02 Mike Pagano
2019-05-16 23:03 Mike Pagano
2019-05-14 21:00 Mike Pagano
2019-05-10 19:40 Mike Pagano
2019-05-08 10:06 Mike Pagano
2019-05-05 13:42 Mike Pagano
2019-05-04 18:28 Mike Pagano
2019-05-02 10:13 Mike Pagano
2019-04-27 17:36 Mike Pagano
2019-04-20 11:09 Mike Pagano
2019-04-19 19:51 Mike Pagano
2019-04-05 21:46 Mike Pagano
2019-04-03 10:59 Mike Pagano
2019-03-27 10:22 Mike Pagano
2019-03-23 20:23 Mike Pagano
2019-03-19 16:58 Mike Pagano
2019-03-13 22:08 Mike Pagano
2019-03-10 14:15 Mike Pagano
2019-03-06 19:06 Mike Pagano
2019-03-05 18:04 Mike Pagano
2019-02-27 11:23 Mike Pagano
2019-02-23 11:35 Mike Pagano
2019-02-23  0:46 Mike Pagano
2019-02-16  0:42 Mike Pagano
2019-02-15 12:39 Mike Pagano
2019-02-12 20:53 Mike Pagano
2019-02-06 17:08 Mike Pagano
2019-01-31 11:28 Mike Pagano
2019-01-26 15:09 Mike Pagano
2019-01-22 23:06 Mike Pagano
2019-01-16 23:32 Mike Pagano
2019-01-13 19:29 Mike Pagano
2019-01-09 17:54 Mike Pagano
2018-12-29 18:55 Mike Pagano
2018-12-29  1:08 Mike Pagano
2018-12-21 14:58 Mike Pagano
2018-12-19 19:09 Mike Pagano
2018-12-17 11:42 Mike Pagano
2018-12-13 11:40 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-05 20:16 Mike Pagano
2018-12-01 15:08 Mike Pagano
2018-11-27 16:16 Mike Pagano
2018-11-23 12:42 Mike Pagano
2018-11-21 12:30 Mike Pagano
2018-11-14  0:47 Mike Pagano
2018-11-14  0:47 Mike Pagano
2018-11-13 20:44 Mike Pagano
2018-11-04 16:22 Alice Ferrazzi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1550661521.9357e267eea9e76458572750473116becb53c079.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox