public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Wed, 16 Mar 2016 19:43:22 +0000 (UTC)	[thread overview]
Message-ID: <1458157394.9a367b9a66de5e7d07306be8a36a47d44068689b.mpagano@gentoo> (raw)

commit:     9a367b9a66de5e7d07306be8a36a47d44068689b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Mar 16 19:43:14 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Mar 16 19:43:14 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9a367b9a

Linux patch 4.4.6

 0000_README            |    4 +
 1005_linux-4.4.6.patch | 1740 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1744 insertions(+)

diff --git a/0000_README b/0000_README
index 5c23bd6..9dc0b5b 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch:  1004_linux-4.4.5.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.4.5
 
+Patch:  1005_linux-4.4.6.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.4.6
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1005_linux-4.4.6.patch b/1005_linux-4.4.6.patch
new file mode 100644
index 0000000..ea95a92
--- /dev/null
+++ b/1005_linux-4.4.6.patch
@@ -0,0 +1,1740 @@
+diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt
+index 9f4e5136e568..12af302bca6a 100644
+--- a/Documentation/devicetree/bindings/arm/omap/omap.txt
++++ b/Documentation/devicetree/bindings/arm/omap/omap.txt
+@@ -23,6 +23,7 @@ Optional properties:
+   during suspend.
+ - ti,no-reset-on-init: When present, the module should not be reset at init
+ - ti,no-idle-on-init: When present, the module should not be idled at init
++- ti,no-idle: When present, the module is never allowed to idle.
+ 
+ Example:
+ 
+diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt
+index 3a4d681c3e98..b653641d4261 100644
+--- a/Documentation/virtual/kvm/mmu.txt
++++ b/Documentation/virtual/kvm/mmu.txt
+@@ -358,7 +358,8 @@ In the first case there are two additional complications:
+ - if CR4.SMEP is enabled: since we've turned the page into a kernel page,
+   the kernel may now execute it.  We handle this by also setting spte.nx.
+   If we get a user fetch or read fault, we'll change spte.u=1 and
+-  spte.nx=gpte.nx back.
++  spte.nx=gpte.nx back.  For this to work, KVM forces EFER.NX to 1 when
++  shadow paging is in use.
+ - if CR4.SMAP is disabled: since the page has been changed to a kernel
+   page, it can not be reused when CR4.SMAP is enabled. We set
+   CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note,
+diff --git a/Makefile b/Makefile
+index d13322ade3a0..87d12b44ab66 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+diff --git a/arch/arm/boot/dts/armada-xp-axpwifiap.dts b/arch/arm/boot/dts/armada-xp-axpwifiap.dts
+index 23fc670c0427..5c21b236721f 100644
+--- a/arch/arm/boot/dts/armada-xp-axpwifiap.dts
++++ b/arch/arm/boot/dts/armada-xp-axpwifiap.dts
+@@ -70,8 +70,8 @@
+ 	soc {
+ 		ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
+ 			  MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
+-			  MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
+-			  MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
++			  MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
++			  MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
+ 
+ 		pcie-controller {
+ 			status = "okay";
+diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts
+index f774101416a5..ebe1d267406d 100644
+--- a/arch/arm/boot/dts/armada-xp-db.dts
++++ b/arch/arm/boot/dts/armada-xp-db.dts
+@@ -76,8 +76,8 @@
+ 		ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
+ 			  MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
+ 			  MBUS_ID(0x01, 0x2f) 0 0 0xf0000000 0x1000000
+-			  MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
+-			  MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
++			  MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
++			  MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
+ 
+ 		devbus-bootcs {
+ 			status = "okay";
+diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts
+index 4878d7353069..5730b875c4f5 100644
+--- a/arch/arm/boot/dts/armada-xp-gp.dts
++++ b/arch/arm/boot/dts/armada-xp-gp.dts
+@@ -95,8 +95,8 @@
+ 		ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
+ 			  MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
+ 			  MBUS_ID(0x01, 0x2f) 0 0 0xf0000000 0x1000000
+-			  MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
+-			  MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
++			  MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
++			  MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
+ 
+ 		devbus-bootcs {
+ 			status = "okay";
+diff --git a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
+index 58b500873bfd..d960fef77ca1 100644
+--- a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
++++ b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
+@@ -65,8 +65,8 @@
+ 	soc {
+ 		ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xd0000000 0x100000
+ 			MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
+-			MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
+-			MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
++			MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
++			MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
+ 
+ 		pcie-controller {
+ 			status = "okay";
+diff --git a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
+index 6e9820e141f8..b89e6cf1271a 100644
+--- a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
++++ b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
+@@ -70,8 +70,8 @@
+ 	soc {
+ 		ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
+ 			  MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
+-			  MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
+-			  MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
++			  MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
++			  MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
+ 
+ 		pcie-controller {
+ 			status = "okay";
+diff --git a/arch/arm/boot/dts/armada-xp-matrix.dts b/arch/arm/boot/dts/armada-xp-matrix.dts
+index 6ab33837a2b6..6522b04f4a8e 100644
+--- a/arch/arm/boot/dts/armada-xp-matrix.dts
++++ b/arch/arm/boot/dts/armada-xp-matrix.dts
+@@ -68,8 +68,8 @@
+ 	soc {
+ 		ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
+ 			  MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
+-			  MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
+-			  MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
++			  MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
++			  MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
+ 
+ 		internal-regs {
+ 			serial@12000 {
+diff --git a/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts b/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts
+index 6fe8972de0a2..db54c7158a36 100644
+--- a/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts
++++ b/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts
+@@ -64,8 +64,8 @@
+ 	soc {
+ 		ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xd0000000 0x100000
+ 			  MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
+-			  MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
+-			  MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
++			  MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
++			  MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
+ 
+ 		pcie-controller {
+ 			status = "okay";
+diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+index a5db17782e08..853bd392a4fe 100644
+--- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
++++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+@@ -65,9 +65,9 @@
+ 	soc {
+ 		ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xd0000000 0x100000
+ 			  MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
+-			  MBUS_ID(0x01, 0x2f) 0 0 0xf0000000 0x8000000
+-			  MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
+-			  MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
++			  MBUS_ID(0x01, 0x2f) 0 0 0xe8000000 0x8000000
++			  MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
++			  MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
+ 
+ 		devbus-bootcs {
+ 			status = "okay";
+diff --git a/arch/arm/boot/dts/armada-xp-synology-ds414.dts b/arch/arm/boot/dts/armada-xp-synology-ds414.dts
+index 2391b11dc546..d17dab0a6f51 100644
+--- a/arch/arm/boot/dts/armada-xp-synology-ds414.dts
++++ b/arch/arm/boot/dts/armada-xp-synology-ds414.dts
+@@ -78,8 +78,8 @@
+ 	soc {
+ 		ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
+ 			  MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
+-			  MBUS_ID(0x09, 0x09) 0 0 0xf8100000 0x10000
+-			  MBUS_ID(0x09, 0x05) 0 0 0xf8110000 0x10000>;
++			  MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
++			  MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
+ 
+ 		pcie-controller {
+ 			status = "okay";
+diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
+index fe99231cbde5..c2a03c740e79 100644
+--- a/arch/arm/boot/dts/dra7.dtsi
++++ b/arch/arm/boot/dts/dra7.dtsi
+@@ -1497,6 +1497,16 @@
+ 			       0x48485200 0x2E00>;
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
++
++			/*
++			 * Do not allow gating of cpsw clock as workaround
++			 * for errata i877. Keeping internal clock disabled
++			 * causes the device switching characteristics
++			 * to degrade over time and eventually fail to meet
++			 * the data manual delay time/skew specs.
++			 */
++			ti,no-idle;
++
+ 			/*
+ 			 * rx_thresh_pend
+ 			 * rx_pend
+diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
+index 48495ad82aba..8e0bd5939e5a 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -2200,6 +2200,11 @@ static int _enable(struct omap_hwmod *oh)
+  */
+ static int _idle(struct omap_hwmod *oh)
+ {
++	if (oh->flags & HWMOD_NO_IDLE) {
++		oh->_int_flags |= _HWMOD_SKIP_ENABLE;
++		return 0;
++	}
++
+ 	pr_debug("omap_hwmod: %s: idling\n", oh->name);
+ 
+ 	if (oh->_state != _HWMOD_STATE_ENABLED) {
+@@ -2504,6 +2509,8 @@ static int __init _init(struct omap_hwmod *oh, void *data)
+ 			oh->flags |= HWMOD_INIT_NO_RESET;
+ 		if (of_find_property(np, "ti,no-idle-on-init", NULL))
+ 			oh->flags |= HWMOD_INIT_NO_IDLE;
++		if (of_find_property(np, "ti,no-idle", NULL))
++			oh->flags |= HWMOD_NO_IDLE;
+ 	}
+ 
+ 	oh->_state = _HWMOD_STATE_INITIALIZED;
+@@ -2630,7 +2637,7 @@ static void __init _setup_postsetup(struct omap_hwmod *oh)
+ 	 * XXX HWMOD_INIT_NO_IDLE does not belong in hwmod data -
+ 	 * it should be set by the core code as a runtime flag during startup
+ 	 */
+-	if ((oh->flags & HWMOD_INIT_NO_IDLE) &&
++	if ((oh->flags & (HWMOD_INIT_NO_IDLE | HWMOD_NO_IDLE)) &&
+ 	    (postsetup_state == _HWMOD_STATE_IDLE)) {
+ 		oh->_int_flags |= _HWMOD_SKIP_ENABLE;
+ 		postsetup_state = _HWMOD_STATE_ENABLED;
+diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
+index 76bce11c85a4..7c7a31169475 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.h
++++ b/arch/arm/mach-omap2/omap_hwmod.h
+@@ -525,6 +525,8 @@ struct omap_hwmod_omap4_prcm {
+  *     or idled.
+  * HWMOD_OPT_CLKS_NEEDED: The optional clocks are needed for the module to
+  *     operate and they need to be handled at the same time as the main_clk.
++ * HWMOD_NO_IDLE: Do not idle the hwmod at all. Useful to handle certain
++ *     IPs like CPSW on DRA7, where clocks to this module cannot be disabled.
+  */
+ #define HWMOD_SWSUP_SIDLE			(1 << 0)
+ #define HWMOD_SWSUP_MSTANDBY			(1 << 1)
+@@ -541,6 +543,7 @@ struct omap_hwmod_omap4_prcm {
+ #define HWMOD_SWSUP_SIDLE_ACT			(1 << 12)
+ #define HWMOD_RECONFIG_IO_CHAIN			(1 << 13)
+ #define HWMOD_OPT_CLKS_NEEDED			(1 << 14)
++#define HWMOD_NO_IDLE				(1 << 15)
+ 
+ /*
+  * omap_hwmod._int_flags definitions
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index fc9f7ef2f4ab..eaa9cabf4066 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -40,7 +40,7 @@
+  * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
+  *	fixed mappings and modules
+  */
+-#define VMEMMAP_SIZE		ALIGN((1UL << (VA_BITS - PAGE_SHIFT - 1)) * sizeof(struct page), PUD_SIZE)
++#define VMEMMAP_SIZE		ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
+ 
+ #ifndef CONFIG_KASAN
+ #define VMALLOC_START		(VA_START)
+@@ -52,7 +52,8 @@
+ #define VMALLOC_END		(PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
+ 
+ #define VMEMMAP_START		(VMALLOC_END + SZ_64K)
+-#define vmemmap			((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
++#define vmemmap			((struct page *)VMEMMAP_START - \
++				 SECTION_ALIGN_DOWN(memstart_addr >> PAGE_SHIFT))
+ 
+ #define FIRST_USER_ADDRESS	0UL
+ 
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index 71683a853372..db459612de44 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -2155,7 +2155,7 @@ config MIPS_MT_SMP
+ 	select CPU_MIPSR2_IRQ_VI
+ 	select CPU_MIPSR2_IRQ_EI
+ 	select SYNC_R4K
+-	select MIPS_GIC_IPI
++	select MIPS_GIC_IPI if MIPS_GIC
+ 	select MIPS_MT
+ 	select SMP
+ 	select SMP_UP
+@@ -2253,7 +2253,7 @@ config MIPS_VPE_APSP_API_MT
+ config MIPS_CMP
+ 	bool "MIPS CMP framework support (DEPRECATED)"
+ 	depends on SYS_SUPPORTS_MIPS_CMP && !CPU_MIPSR6
+-	select MIPS_GIC_IPI
++	select MIPS_GIC_IPI if MIPS_GIC
+ 	select SMP
+ 	select SYNC_R4K
+ 	select SYS_SUPPORTS_SMP
+@@ -2273,7 +2273,7 @@ config MIPS_CPS
+ 	select MIPS_CM
+ 	select MIPS_CPC
+ 	select MIPS_CPS_PM if HOTPLUG_CPU
+-	select MIPS_GIC_IPI
++	select MIPS_GIC_IPI if MIPS_GIC
+ 	select SMP
+ 	select SYNC_R4K if (CEVT_R4K || CSRC_R4K)
+ 	select SYS_SUPPORTS_HOTPLUG_CPU
+@@ -2292,6 +2292,7 @@ config MIPS_CPS_PM
+ 	bool
+ 
+ config MIPS_GIC_IPI
++	depends on MIPS_GIC
+ 	bool
+ 
+ config MIPS_CM
+diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
+index bd4385a8e6e8..2b521e07b860 100644
+--- a/arch/mips/kernel/smp.c
++++ b/arch/mips/kernel/smp.c
+@@ -121,6 +121,7 @@ static inline void calculate_cpu_foreign_map(void)
+ 	cpumask_t temp_foreign_map;
+ 
+ 	/* Re-calculate the mask */
++	cpumask_clear(&temp_foreign_map);
+ 	for_each_online_cpu(i) {
+ 		core_present = 0;
+ 		for_each_cpu(k, &temp_foreign_map)
+diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
+index 8374afed9d0a..f8faaaeeca1e 100644
+--- a/arch/powerpc/include/asm/opal-api.h
++++ b/arch/powerpc/include/asm/opal-api.h
+@@ -157,7 +157,8 @@
+ #define OPAL_LEDS_GET_INDICATOR			114
+ #define OPAL_LEDS_SET_INDICATOR			115
+ #define OPAL_CEC_REBOOT2			116
+-#define OPAL_LAST				116
++#define OPAL_CONSOLE_FLUSH			117
++#define OPAL_LAST				117
+ 
+ /* Device tree flags */
+ 
+diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
+index 800115910e43..07a99e638449 100644
+--- a/arch/powerpc/include/asm/opal.h
++++ b/arch/powerpc/include/asm/opal.h
+@@ -35,6 +35,7 @@ int64_t opal_console_read(int64_t term_number, __be64 *length,
+ 			  uint8_t *buffer);
+ int64_t opal_console_write_buffer_space(int64_t term_number,
+ 					__be64 *length);
++int64_t opal_console_flush(int64_t term_number);
+ int64_t opal_rtc_read(__be32 *year_month_day,
+ 		      __be64 *hour_minute_second_millisecond);
+ int64_t opal_rtc_write(uint32_t year_month_day,
+@@ -262,6 +263,8 @@ extern int opal_resync_timebase(void);
+ 
+ extern void opal_lpc_init(void);
+ 
++extern void opal_kmsg_init(void);
++
+ extern int opal_event_request(unsigned int opal_event_nr);
+ 
+ struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
+diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
+index 59663af9315f..e4f7d4eed20c 100644
+--- a/arch/powerpc/kernel/module_64.c
++++ b/arch/powerpc/kernel/module_64.c
+@@ -335,7 +335,7 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
+ 		if (syms[i].st_shndx == SHN_UNDEF) {
+ 			char *name = strtab + syms[i].st_name;
+ 			if (name[0] == '.')
+-				memmove(name, name+1, strlen(name));
++				syms[i].st_name++;
+ 		}
+ 	}
+ }
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index e57cc383e5da..463af88c95a2 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -1370,6 +1370,20 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+ 	std	r6, VCPU_ACOP(r9)
+ 	stw	r7, VCPU_GUEST_PID(r9)
+ 	std	r8, VCPU_WORT(r9)
++	/*
++	 * Restore various registers to 0, where non-zero values
++	 * set by the guest could disrupt the host.
++	 */
++	li	r0, 0
++	mtspr	SPRN_IAMR, r0
++	mtspr	SPRN_CIABR, r0
++	mtspr	SPRN_DAWRX, r0
++	mtspr	SPRN_TCSCR, r0
++	mtspr	SPRN_WORT, r0
++	/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
++	li	r0, 1
++	sldi	r0, r0, 31
++	mtspr	SPRN_MMCRS, r0
+ 8:
+ 
+ 	/* Save and reset AMR and UAMOR before turning on the MMU */
+diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
+index 1c8cdb6250e7..b9de7ef48849 100644
+--- a/arch/powerpc/platforms/powernv/Makefile
++++ b/arch/powerpc/platforms/powernv/Makefile
+@@ -2,6 +2,7 @@ obj-y			+= setup.o opal-wrappers.o opal.o opal-async.o idle.o
+ obj-y			+= opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o
+ obj-y			+= rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o
+ obj-y			+= opal-msglog.o opal-hmi.o opal-power.o opal-irqchip.o
++obj-y			+= opal-kmsg.o
+ 
+ obj-$(CONFIG_SMP)	+= smp.o subcore.o subcore-asm.o
+ obj-$(CONFIG_PCI)	+= pci.o pci-p5ioc2.o pci-ioda.o
+diff --git a/arch/powerpc/platforms/powernv/opal-kmsg.c b/arch/powerpc/platforms/powernv/opal-kmsg.c
+new file mode 100644
+index 000000000000..6f1214d4de92
+--- /dev/null
++++ b/arch/powerpc/platforms/powernv/opal-kmsg.c
+@@ -0,0 +1,75 @@
++/*
++ * kmsg dumper that ensures the OPAL console fully flushes panic messages
++ *
++ * Author: Russell Currey <ruscur@russell.cc>
++ *
++ * Copyright 2015 IBM Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ */
++
++#include <linux/kmsg_dump.h>
++
++#include <asm/opal.h>
++#include <asm/opal-api.h>
++
++/*
++ * Console output is controlled by OPAL firmware.  The kernel regularly calls
++ * OPAL_POLL_EVENTS, which flushes some console output.  In a panic state,
++ * however, the kernel no longer calls OPAL_POLL_EVENTS and the panic message
++ * may not be completely printed.  This function does not actually dump the
++ * message, it just ensures that OPAL completely flushes the console buffer.
++ */
++static void force_opal_console_flush(struct kmsg_dumper *dumper,
++				     enum kmsg_dump_reason reason)
++{
++	int i;
++	int64_t ret;
++
++	/*
++	 * Outside of a panic context the pollers will continue to run,
++	 * so we don't need to do any special flushing.
++	 */
++	if (reason != KMSG_DUMP_PANIC)
++		return;
++
++	if (opal_check_token(OPAL_CONSOLE_FLUSH)) {
++		ret = opal_console_flush(0);
++
++		if (ret == OPAL_UNSUPPORTED || ret == OPAL_PARAMETER)
++			return;
++
++		/* Incrementally flush until there's nothing left */
++		while (opal_console_flush(0) != OPAL_SUCCESS);
++	} else {
++		/*
++		 * If OPAL_CONSOLE_FLUSH is not implemented in the firmware,
++		 * the console can still be flushed by calling the polling
++		 * function enough times to flush the buffer.  We don't know
++		 * how much output still needs to be flushed, but we can be
++		 * generous since the kernel is in panic and doesn't need
++		 * to do much else.
++		 */
++		printk(KERN_NOTICE "opal: OPAL_CONSOLE_FLUSH missing.\n");
++		for (i = 0; i < 1024; i++) {
++			opal_poll_events(NULL);
++		}
++	}
++}
++
++static struct kmsg_dumper opal_kmsg_dumper = {
++	.dump = force_opal_console_flush
++};
++
++void __init opal_kmsg_init(void)
++{
++	int rc;
++
++	/* Add our dumper to the list */
++	rc = kmsg_dump_register(&opal_kmsg_dumper);
++	if (rc != 0)
++		pr_err("opal: kmsg_dump_register failed; returned %d\n", rc);
++}
+diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
+index b7a464fef7a7..e45b88a5d7e0 100644
+--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
++++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
+@@ -301,3 +301,4 @@ OPAL_CALL(opal_flash_erase,			OPAL_FLASH_ERASE);
+ OPAL_CALL(opal_prd_msg,				OPAL_PRD_MSG);
+ OPAL_CALL(opal_leds_get_ind,			OPAL_LEDS_GET_INDICATOR);
+ OPAL_CALL(opal_leds_set_ind,			OPAL_LEDS_SET_INDICATOR);
++OPAL_CALL(opal_console_flush,			OPAL_CONSOLE_FLUSH);
+diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
+index 57cffb80bc36..ae29eaf85e9e 100644
+--- a/arch/powerpc/platforms/powernv/opal.c
++++ b/arch/powerpc/platforms/powernv/opal.c
+@@ -758,6 +758,9 @@ static int __init opal_init(void)
+ 	opal_pdev_init(opal_node, "ibm,opal-flash");
+ 	opal_pdev_init(opal_node, "ibm,opal-prd");
+ 
++	/* Initialise OPAL kmsg dumper for flushing console on panic */
++	opal_kmsg_init();
++
+ 	return 0;
+ }
+ machine_subsys_initcall(powernv, opal_init);
+diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
+index fb1b93ea3e3f..e485817f7b1a 100644
+--- a/arch/s390/include/asm/mmu_context.h
++++ b/arch/s390/include/asm/mmu_context.h
+@@ -15,17 +15,25 @@
+ static inline int init_new_context(struct task_struct *tsk,
+ 				   struct mm_struct *mm)
+ {
++	spin_lock_init(&mm->context.list_lock);
++	INIT_LIST_HEAD(&mm->context.pgtable_list);
++	INIT_LIST_HEAD(&mm->context.gmap_list);
+ 	cpumask_clear(&mm->context.cpu_attach_mask);
+ 	atomic_set(&mm->context.attach_count, 0);
+ 	mm->context.flush_mm = 0;
+-	mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
+-	mm->context.asce_bits |= _ASCE_TYPE_REGION3;
+ #ifdef CONFIG_PGSTE
+ 	mm->context.alloc_pgste = page_table_allocate_pgste;
+ 	mm->context.has_pgste = 0;
+ 	mm->context.use_skey = 0;
+ #endif
+-	mm->context.asce_limit = STACK_TOP_MAX;
++	if (mm->context.asce_limit == 0) {
++		/* context created by exec, set asce limit to 4TB */
++		mm->context.asce_bits = _ASCE_TABLE_LENGTH |
++			_ASCE_USER_BITS | _ASCE_TYPE_REGION3;
++		mm->context.asce_limit = STACK_TOP_MAX;
++	} else if (mm->context.asce_limit == (1UL << 31)) {
++		mm_inc_nr_pmds(mm);
++	}
+ 	crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
+ 	return 0;
+ }
+@@ -111,8 +119,6 @@ static inline void activate_mm(struct mm_struct *prev,
+ static inline void arch_dup_mmap(struct mm_struct *oldmm,
+ 				 struct mm_struct *mm)
+ {
+-	if (oldmm->context.asce_limit < mm->context.asce_limit)
+-		crst_table_downgrade(mm, oldmm->context.asce_limit);
+ }
+ 
+ static inline void arch_exit_mmap(struct mm_struct *mm)
+diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
+index 7b7858f158b4..d7cc79fb6191 100644
+--- a/arch/s390/include/asm/pgalloc.h
++++ b/arch/s390/include/asm/pgalloc.h
+@@ -100,12 +100,26 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ 
+ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+ {
+-	spin_lock_init(&mm->context.list_lock);
+-	INIT_LIST_HEAD(&mm->context.pgtable_list);
+-	INIT_LIST_HEAD(&mm->context.gmap_list);
+-	return (pgd_t *) crst_table_alloc(mm);
++	unsigned long *table = crst_table_alloc(mm);
++
++	if (!table)
++		return NULL;
++	if (mm->context.asce_limit == (1UL << 31)) {
++		/* Forking a compat process with 2 page table levels */
++		if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
++			crst_table_free(mm, table);
++			return NULL;
++		}
++	}
++	return (pgd_t *) table;
++}
++
++static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
++{
++	if (mm->context.asce_limit == (1UL << 31))
++		pgtable_pmd_page_dtor(virt_to_page(pgd));
++	crst_table_free(mm, (unsigned long *) pgd);
+ }
+-#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
+ 
+ static inline void pmd_populate(struct mm_struct *mm,
+ 				pmd_t *pmd, pgtable_t pte)
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index a08d0afd5ff6..575dc123bda2 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -2249,7 +2249,7 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
+ 
+ 	/* manually convert vector registers if necessary */
+ 	if (MACHINE_HAS_VX) {
+-		convert_vx_to_fp(fprs, current->thread.fpu.vxrs);
++		convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
+ 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
+ 				     fprs, 128);
+ 	} else {
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index e7c2c1428a69..8eb8a934b531 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -3754,13 +3754,15 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
+ void
+ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
+ {
++	bool uses_nx = context->nx || context->base_role.smep_andnot_wp;
++
+ 	/*
+ 	 * Passing "true" to the last argument is okay; it adds a check
+ 	 * on bit 8 of the SPTEs which KVM doesn't use anyway.
+ 	 */
+ 	__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
+ 				boot_cpu_data.x86_phys_bits,
+-				context->shadow_root_level, context->nx,
++				context->shadow_root_level, uses_nx,
+ 				guest_cpuid_has_gbpages(vcpu), is_pse(vcpu),
+ 				true);
+ }
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 5fd846cd6e0e..0958fa2b7cb7 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1748,6 +1748,13 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
+ 			return;
+ 		}
+ 		break;
++	case MSR_IA32_PEBS_ENABLE:
++		/* PEBS needs a quiescent period after being disabled (to write
++		 * a record).  Disabling PEBS through VMX MSR swapping doesn't
++		 * provide that period, so a CPU could write host's record into
++		 * guest's memory.
++		 */
++		wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
+ 	}
+ 
+ 	for (i = 0; i < m->nr; ++i)
+@@ -1785,26 +1792,31 @@ static void reload_tss(void)
+ 
+ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
+ {
+-	u64 guest_efer;
+-	u64 ignore_bits;
++	u64 guest_efer = vmx->vcpu.arch.efer;
++	u64 ignore_bits = 0;
+ 
+-	guest_efer = vmx->vcpu.arch.efer;
++	if (!enable_ept) {
++		/*
++		 * NX is needed to handle CR0.WP=1, CR4.SMEP=1.  Testing
++		 * host CPUID is more efficient than testing guest CPUID
++		 * or CR4.  Host SMEP is anyway a requirement for guest SMEP.
++		 */
++		if (boot_cpu_has(X86_FEATURE_SMEP))
++			guest_efer |= EFER_NX;
++		else if (!(guest_efer & EFER_NX))
++			ignore_bits |= EFER_NX;
++	}
+ 
+ 	/*
+-	 * NX is emulated; LMA and LME handled by hardware; SCE meaningless
+-	 * outside long mode
++	 * LMA and LME handled by hardware; SCE meaningless outside long mode.
+ 	 */
+-	ignore_bits = EFER_NX | EFER_SCE;
++	ignore_bits |= EFER_SCE;
+ #ifdef CONFIG_X86_64
+ 	ignore_bits |= EFER_LMA | EFER_LME;
+ 	/* SCE is meaningful only in long mode on Intel */
+ 	if (guest_efer & EFER_LMA)
+ 		ignore_bits &= ~(u64)EFER_SCE;
+ #endif
+-	guest_efer &= ~ignore_bits;
+-	guest_efer |= host_efer & ignore_bits;
+-	vmx->guest_msrs[efer_offset].data = guest_efer;
+-	vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
+ 
+ 	clear_atomic_switch_msr(vmx, MSR_EFER);
+ 
+@@ -1815,16 +1827,21 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
+ 	 */
+ 	if (cpu_has_load_ia32_efer ||
+ 	    (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
+-		guest_efer = vmx->vcpu.arch.efer;
+ 		if (!(guest_efer & EFER_LMA))
+ 			guest_efer &= ~EFER_LME;
+ 		if (guest_efer != host_efer)
+ 			add_atomic_switch_msr(vmx, MSR_EFER,
+ 					      guest_efer, host_efer);
+ 		return false;
+-	}
++	} else {
++		guest_efer &= ~ignore_bits;
++		guest_efer |= host_efer & ignore_bits;
+ 
+-	return true;
++		vmx->guest_msrs[efer_offset].data = guest_efer;
++		vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
++
++		return true;
++	}
+ }
+ 
+ static unsigned long segment_base(u16 selector)
+diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
+index db20ee9a413a..b599a780a5a9 100644
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -414,24 +414,30 @@ pmd_t *lookup_pmd_address(unsigned long address)
+ phys_addr_t slow_virt_to_phys(void *__virt_addr)
+ {
+ 	unsigned long virt_addr = (unsigned long)__virt_addr;
+-	unsigned long phys_addr, offset;
++	phys_addr_t phys_addr;
++	unsigned long offset;
+ 	enum pg_level level;
+ 	pte_t *pte;
+ 
+ 	pte = lookup_address(virt_addr, &level);
+ 	BUG_ON(!pte);
+ 
++	/*
++	 * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t
++	 * before being left-shifted PAGE_SHIFT bits -- this trick is to
++	 * make 32-PAE kernel work correctly.
++	 */
+ 	switch (level) {
+ 	case PG_LEVEL_1G:
+-		phys_addr = pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
++		phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
+ 		offset = virt_addr & ~PUD_PAGE_MASK;
+ 		break;
+ 	case PG_LEVEL_2M:
+-		phys_addr = pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
++		phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
+ 		offset = virt_addr & ~PMD_PAGE_MASK;
+ 		break;
+ 	default:
+-		phys_addr = pte_pfn(*pte) << PAGE_SHIFT;
++		phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
+ 		offset = virt_addr & ~PAGE_MASK;
+ 	}
+ 
+diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
+index fa00f3a186da..02f9aa4ebe05 100644
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -176,6 +176,7 @@
+ #define AT_XDMAC_MAX_CHAN	0x20
+ #define AT_XDMAC_MAX_CSIZE	16	/* 16 data */
+ #define AT_XDMAC_MAX_DWIDTH	8	/* 64 bits */
++#define AT_XDMAC_RESIDUE_MAX_RETRIES	5
+ 
+ #define AT_XDMAC_DMA_BUSWIDTHS\
+ 	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
+@@ -1383,8 +1384,8 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ 	struct at_xdmac_desc	*desc, *_desc;
+ 	struct list_head	*descs_list;
+ 	enum dma_status		ret;
+-	int			residue;
+-	u32			cur_nda, mask, value;
++	int			residue, retry;
++	u32			cur_nda, check_nda, cur_ubc, mask, value;
+ 	u8			dwidth = 0;
+ 	unsigned long		flags;
+ 
+@@ -1421,7 +1422,42 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ 			cpu_relax();
+ 	}
+ 
++	/*
++	 * When processing the residue, we need to read two registers but we
++	 * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
++	 * we stand in the descriptor list and AT_XDMAC_CUBC is used
++	 * to know how many data are remaining for the current descriptor.
++	 * Since the dma channel is not paused to not loose data, between the
++	 * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
++	 * descriptor.
++	 * For that reason, after reading AT_XDMAC_CUBC, we check if we are
++	 * still using the same descriptor by reading a second time
++	 * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
++	 * read again AT_XDMAC_CUBC.
++	 * Memory barriers are used to ensure the read order of the registers.
++	 * A max number of retries is set because unlikely it can never ends if
++	 * we are transferring a lot of data with small buffers.
++	 */
+ 	cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
++	rmb();
++	cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
++	for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
++		rmb();
++		check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
++
++		if (likely(cur_nda == check_nda))
++			break;
++
++		cur_nda = check_nda;
++		rmb();
++		cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
++	}
++
++	if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
++		ret = DMA_ERROR;
++		goto spin_unlock;
++	}
++
+ 	/*
+ 	 * Remove size of all microblocks already transferred and the current
+ 	 * one. Then add the remaining size to transfer of the current
+@@ -1434,7 +1470,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ 		if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
+ 			break;
+ 	}
+-	residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
++	residue += cur_ubc << dwidth;
+ 
+ 	dma_set_residue(txstate, residue);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+index 0c713a908304..82903ca78529 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+@@ -96,7 +96,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
+ 	 * In practice this won't execute very often unless on very fast
+ 	 * machines because the time window for this to happen is very small.
+ 	 */
+-	while (amdgpuCrtc->enabled && repcnt--) {
++	while (amdgpuCrtc->enabled && --repcnt) {
+ 		/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
+ 		 * start in hpos, and to the "fudged earlier" vblank start in
+ 		 * vpos.
+@@ -112,13 +112,13 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
+ 			break;
+ 
+ 		/* Sleep at least until estimated real start of hw vblank */
+-		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ 		min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
+ 		if (min_udelay > vblank->framedur_ns / 2000) {
+ 			/* Don't wait ridiculously long - something is wrong */
+ 			repcnt = 0;
+ 			break;
+ 		}
++		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ 		usleep_range(min_udelay, 2 * min_udelay);
+ 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ 	};
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index d690df545b4d..c566993a2ec3 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -1744,7 +1744,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
+ 	}
+ 
+ 	drm_kms_helper_poll_enable(dev);
+-	drm_helper_hpd_irq_event(dev);
+ 
+ 	/* set the power state here in case we are a PX system or headless */
+ 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index 13767d21835f..3645b223aa37 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -455,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
+ 	 * In practice this won't execute very often unless on very fast
+ 	 * machines because the time window for this to happen is very small.
+ 	 */
+-	while (radeon_crtc->enabled && repcnt--) {
++	while (radeon_crtc->enabled && --repcnt) {
+ 		/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
+ 		 * start in hpos, and to the "fudged earlier" vblank start in
+ 		 * vpos.
+@@ -471,13 +471,13 @@ static void radeon_flip_work_func(struct work_struct *__work)
+ 			break;
+ 
+ 		/* Sleep at least until estimated real start of hw vblank */
+-		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ 		min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
+ 		if (min_udelay > vblank->framedur_ns / 2000) {
+ 			/* Don't wait ridiculously long - something is wrong */
+ 			repcnt = 0;
+ 			break;
+ 		}
++		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ 		usleep_range(min_udelay, 2 * min_udelay);
+ 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ 	};
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index 1fa81215cea1..60ab31517153 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -1075,6 +1075,8 @@ force:
+ 
+ 	/* update display watermarks based on new power state */
+ 	radeon_bandwidth_update(rdev);
++	/* update displays */
++	radeon_dpm_display_configuration_changed(rdev);
+ 
+ 	/* wait for the rings to drain */
+ 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
+@@ -1091,9 +1093,6 @@ force:
+ 
+ 	radeon_dpm_post_set_power_state(rdev);
+ 
+-	/* update displays */
+-	radeon_dpm_display_configuration_changed(rdev);
+-
+ 	rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
+ 	rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
+ 	rdev->pm.dpm.single_display = single_display;
+diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
+index f2e13eb8339f..a0e28f3a278d 100644
+--- a/drivers/gpu/ipu-v3/ipu-common.c
++++ b/drivers/gpu/ipu-v3/ipu-common.c
+@@ -1050,6 +1050,17 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
+ 	for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
+ 		const struct ipu_platform_reg *reg = &client_reg[i];
+ 		struct platform_device *pdev;
++		struct device_node *of_node;
++
++		/* Associate subdevice with the corresponding port node */
++		of_node = of_graph_get_port_by_id(dev->of_node, i);
++		if (!of_node) {
++			dev_info(dev,
++				 "no port@%d node in %s, not using %s%d\n",
++				 i, dev->of_node->full_name,
++				 (i / 2) ? "DI" : "CSI", i % 2);
++			continue;
++		}
+ 
+ 		pdev = platform_device_alloc(reg->name, id++);
+ 		if (!pdev) {
+@@ -1057,17 +1068,9 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
+ 			goto err_register;
+ 		}
+ 
++		pdev->dev.of_node = of_node;
+ 		pdev->dev.parent = dev;
+ 
+-		/* Associate subdevice with the corresponding port node */
+-		pdev->dev.of_node = of_graph_get_port_by_id(dev->of_node, i);
+-		if (!pdev->dev.of_node) {
+-			dev_err(dev, "missing port@%d node in %s\n", i,
+-				dev->of_node->full_name);
+-			ret = -ENODEV;
+-			goto err_register;
+-		}
+-
+ 		ret = platform_device_add_data(pdev, &reg->pdata,
+ 					       sizeof(reg->pdata));
+ 		if (!ret)
+diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
+index 5eee62badf45..cbc99d5649af 100644
+--- a/drivers/net/can/usb/gs_usb.c
++++ b/drivers/net/can/usb/gs_usb.c
+@@ -826,9 +826,8 @@ static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface
+ static void gs_destroy_candev(struct gs_can *dev)
+ {
+ 	unregister_candev(dev->netdev);
+-	free_candev(dev->netdev);
+ 	usb_kill_anchored_urbs(&dev->tx_submitted);
+-	kfree(dev);
++	free_candev(dev->netdev);
+ }
+ 
+ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+@@ -913,12 +912,15 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
+ 	for (i = 0; i < icount; i++) {
+ 		dev->canch[i] = gs_make_candev(i, intf);
+ 		if (IS_ERR_OR_NULL(dev->canch[i])) {
++			/* save error code to return later */
++			rc = PTR_ERR(dev->canch[i]);
++
+ 			/* on failure destroy previously created candevs */
+ 			icount = i;
+-			for (i = 0; i < icount; i++) {
++			for (i = 0; i < icount; i++)
+ 				gs_destroy_candev(dev->canch[i]);
+-				dev->canch[i] = NULL;
+-			}
++
++			usb_kill_anchored_urbs(&dev->rx_submitted);
+ 			kfree(dev);
+ 			return rc;
+ 		}
+@@ -939,16 +941,12 @@ static void gs_usb_disconnect(struct usb_interface *intf)
+ 		return;
+ 	}
+ 
+-	for (i = 0; i < GS_MAX_INTF; i++) {
+-		struct gs_can *can = dev->canch[i];
+-
+-		if (!can)
+-			continue;
+-
+-		gs_destroy_candev(can);
+-	}
++	for (i = 0; i < GS_MAX_INTF; i++)
++		if (dev->canch[i])
++			gs_destroy_candev(dev->canch[i]);
+ 
+ 	usb_kill_anchored_urbs(&dev->rx_submitted);
++	kfree(dev);
+ }
+ 
+ static const struct usb_device_id gs_usb_table[] = {
+diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
+index c652a66be803..6743edf43aa8 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
+@@ -421,6 +421,15 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
+ 		return -1;
+ 	}
+ 
++	/*
++	 * Increase the pending frames counter, so that later when a reply comes
++	 * in and the counter is decreased - we don't start getting negative
++	 * values.
++	 * Note that we don't need to make sure it isn't agg'd, since we're
++	 * TXing non-sta
++	 */
++	atomic_inc(&mvm->pending_frames[sta_id]);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 314db8c1047a..42d8617352ae 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -4772,8 +4772,10 @@ int pci_get_new_domain_nr(void)
+ void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
+ {
+ 	static int use_dt_domains = -1;
+-	int domain = of_get_pci_domain_nr(parent->of_node);
++	int domain = -1;
+ 
++	if (parent)
++		domain = of_get_pci_domain_nr(parent->of_node);
+ 	/*
+ 	 * Check DT domain and use_dt_domains values.
+ 	 *
+diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
+index cb61f300f8b5..277b5c8c825c 100644
+--- a/drivers/s390/block/dasd_diag.c
++++ b/drivers/s390/block/dasd_diag.c
+@@ -67,7 +67,7 @@ static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
+  * and function code cmd.
+  * In case of an exception return 3. Otherwise return result of bitwise OR of
+  * resulting condition code and DIAG return code. */
+-static inline int dia250(void *iob, int cmd)
++static inline int __dia250(void *iob, int cmd)
+ {
+ 	register unsigned long reg2 asm ("2") = (unsigned long) iob;
+ 	typedef union {
+@@ -77,7 +77,6 @@ static inline int dia250(void *iob, int cmd)
+ 	int rc;
+ 
+ 	rc = 3;
+-	diag_stat_inc(DIAG_STAT_X250);
+ 	asm volatile(
+ 		"	diag	2,%2,0x250\n"
+ 		"0:	ipm	%0\n"
+@@ -91,6 +90,12 @@ static inline int dia250(void *iob, int cmd)
+ 	return rc;
+ }
+ 
++static inline int dia250(void *iob, int cmd)
++{
++	diag_stat_inc(DIAG_STAT_X250);
++	return __dia250(iob, cmd);
++}
++
+ /* Initialize block I/O to DIAG device using the specified blocksize and
+  * block offset. On success, return zero and set end_block to contain the
+  * number of blocks on the device minus the specified offset. Return non-zero
+diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
+index 88029cc6de5e..46b1991fbb50 100644
+--- a/drivers/target/target_core_tmr.c
++++ b/drivers/target/target_core_tmr.c
+@@ -177,7 +177,6 @@ void core_tmr_abort_task(
+ 
+ 		if (!__target_check_io_state(se_cmd, se_sess, 0)) {
+ 			spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+-			target_put_sess_cmd(se_cmd);
+ 			goto out;
+ 		}
+ 		list_del_init(&se_cmd->se_cmd_list);
+diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
+index d211b8e18566..30c4c9ebb693 100644
+--- a/fs/jffs2/dir.c
++++ b/fs/jffs2/dir.c
+@@ -843,9 +843,14 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
+ 
+ 		pr_notice("%s(): Link succeeded, unlink failed (err %d). You now have a hard link\n",
+ 			  __func__, ret);
+-		/* Might as well let the VFS know */
+-		d_instantiate(new_dentry, d_inode(old_dentry));
+-		ihold(d_inode(old_dentry));
++		/*
++		 * We can't keep the target in dcache after that.
++		 * For one thing, we can't afford dentry aliases for directories.
++		 * For another, if there was a victim, we _can't_ set new inode
++		 * for that sucker and we have to trigger mount eviction - the
++		 * caller won't do it on its own since we are returning an error.
++		 */
++		d_invalidate(new_dentry);
+ 		new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now);
+ 		return ret;
+ 	}
+diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
+index f0e3e9e747dd..03446c5a3ec1 100644
+--- a/fs/ncpfs/dir.c
++++ b/fs/ncpfs/dir.c
+@@ -633,7 +633,7 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx,
+ 				d_rehash(newdent);
+ 		} else {
+ 			spin_lock(&dentry->d_lock);
+-			NCP_FINFO(inode)->flags &= ~NCPI_DIR_CACHE;
++			NCP_FINFO(dir)->flags &= ~NCPI_DIR_CACHE;
+ 			spin_unlock(&dentry->d_lock);
+ 		}
+ 	} else {
+diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
+index 692ceda3bc21..a2b1d7ce3e1a 100644
+--- a/fs/overlayfs/dir.c
++++ b/fs/overlayfs/dir.c
+@@ -618,7 +618,8 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir)
+ 	 * sole user of this dentry.  Too tricky...  Just unhash for
+ 	 * now.
+ 	 */
+-	d_drop(dentry);
++	if (!err)
++		d_drop(dentry);
+ 	mutex_unlock(&dir->i_mutex);
+ 
+ 	return err;
+@@ -903,6 +904,13 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
+ 	if (!overwrite && new_is_dir && !old_opaque && new_opaque)
+ 		ovl_remove_opaque(newdentry);
+ 
++	/*
++	 * Old dentry now lives in different location. Dentries in
++	 * lowerstack are stale. We cannot drop them here because
++	 * access to them is lockless. This could be only pure upper
++	 * or opaque directory - numlower is zero. Or upper non-dir
++	 * entry - its pureness is tracked by flag opaque.
++	 */
+ 	if (old_opaque != new_opaque) {
+ 		ovl_dentry_set_opaque(old, new_opaque);
+ 		if (!overwrite)
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index b29036aa8d7c..05ac9a95e881 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -65,6 +65,8 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
+ 
+ 		mutex_lock(&upperdentry->d_inode->i_mutex);
+ 		err = notify_change(upperdentry, attr, NULL);
++		if (!err)
++			ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
+ 		mutex_unlock(&upperdentry->d_inode->i_mutex);
+ 	}
+ 	ovl_drop_write(dentry);
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index f42c9407fbad..000b2ed05c29 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -76,12 +76,14 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry)
+ 	if (oe->__upperdentry) {
+ 		type = __OVL_PATH_UPPER;
+ 
+-		if (oe->numlower) {
+-			if (S_ISDIR(dentry->d_inode->i_mode))
+-				type |= __OVL_PATH_MERGE;
+-		} else if (!oe->opaque) {
++		/*
++		 * Non-dir dentry can hold lower dentry from previous
++		 * location. Its purity depends only on opaque flag.
++		 */
++		if (oe->numlower && S_ISDIR(dentry->d_inode->i_mode))
++			type |= __OVL_PATH_MERGE;
++		else if (!oe->opaque)
+ 			type |= __OVL_PATH_PURE;
+-		}
+ 	} else {
+ 		if (oe->numlower > 1)
+ 			type |= __OVL_PATH_MERGE;
+@@ -322,6 +324,7 @@ static const struct dentry_operations ovl_dentry_operations = {
+ 
+ static const struct dentry_operations ovl_reval_dentry_operations = {
+ 	.d_release = ovl_dentry_release,
++	.d_select_inode = ovl_d_select_inode,
+ 	.d_revalidate = ovl_dentry_revalidate,
+ 	.d_weak_revalidate = ovl_dentry_weak_revalidate,
+ };
+diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
+index 50311703135b..66cdb44616d5 100644
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -287,6 +287,12 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
+ 		goto out;
+ 
+ 	/*
++	 * We don't do userfault handling for the final child pid update.
++	 */
++	if (current->flags & PF_EXITING)
++		goto out;
++
++	/*
+ 	 * Check that we can return VM_FAULT_RETRY.
+ 	 *
+ 	 * NOTE: it should become possible to return VM_FAULT_RETRY
+diff --git a/include/linux/bio.h b/include/linux/bio.h
+index 79cfaeef1b0d..fbe47bc700bd 100644
+--- a/include/linux/bio.h
++++ b/include/linux/bio.h
+@@ -320,11 +320,6 @@ static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
+ 	struct bvec_iter iter = bio->bi_iter;
+ 	int idx;
+ 
+-	if (!bio_flagged(bio, BIO_CLONED)) {
+-		*bv = bio->bi_io_vec[bio->bi_vcnt - 1];
+-		return;
+-	}
+-
+ 	if (unlikely(!bio_multiple_segments(bio))) {
+ 		*bv = bio_iovec(bio);
+ 		return;
+diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
+index 03c7efb60c91..27e32b2b602f 100644
+--- a/include/linux/tracepoint.h
++++ b/include/linux/tracepoint.h
+@@ -148,9 +148,6 @@ extern void syscall_unregfunc(void);
+ 		void *it_func;						\
+ 		void *__data;						\
+ 									\
+-		if (!cpu_online(raw_smp_processor_id()))		\
+-			return;						\
+-									\
+ 		if (!(cond))						\
+ 			return;						\
+ 		prercu;							\
+@@ -357,15 +354,19 @@ extern void syscall_unregfunc(void);
+  * "void *__data, proto" as the callback prototype.
+  */
+ #define DECLARE_TRACE_NOARGS(name)					\
+-		__DECLARE_TRACE(name, void, , 1, void *__data, __data)
++	__DECLARE_TRACE(name, void, ,					\
++			cpu_online(raw_smp_processor_id()),		\
++			void *__data, __data)
+ 
+ #define DECLARE_TRACE(name, proto, args)				\
+-		__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), 1,	\
+-				PARAMS(void *__data, proto),		\
+-				PARAMS(__data, args))
++	__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args),		\
++			cpu_online(raw_smp_processor_id()),		\
++			PARAMS(void *__data, proto),			\
++			PARAMS(__data, args))
+ 
+ #define DECLARE_TRACE_CONDITION(name, proto, args, cond)		\
+-	__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \
++	__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args),		\
++			cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \
+ 			PARAMS(void *__data, proto),			\
+ 			PARAMS(__data, args))
+ 
+diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
+index 8f81bbbc38fc..e0f4109e64c6 100644
+--- a/include/net/iw_handler.h
++++ b/include/net/iw_handler.h
+@@ -439,6 +439,12 @@ int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length);
+ /* Send a single event to user space */
+ void wireless_send_event(struct net_device *dev, unsigned int cmd,
+ 			 union iwreq_data *wrqu, const char *extra);
++#ifdef CONFIG_WEXT_CORE
++/* flush all previous wext events - if work is done from netdev notifiers */
++void wireless_nlevent_flush(void);
++#else
++static inline void wireless_nlevent_flush(void) {}
++#endif
+ 
+ /* We may need a function to send a stream of events to user space.
+  * More on that later... */
+diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
+index 10ad4ac1fa0b..367784be5df2 100644
+--- a/net/mac80211/agg-rx.c
++++ b/net/mac80211/agg-rx.c
+@@ -291,7 +291,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
+ 	}
+ 
+ 	/* prepare A-MPDU MLME for Rx aggregation */
+-	tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL);
++	tid_agg_rx = kzalloc(sizeof(*tid_agg_rx), GFP_KERNEL);
+ 	if (!tid_agg_rx)
+ 		goto end;
+ 
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 5322b4c71630..6837a46ca4a2 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -92,7 +92,7 @@ struct ieee80211_fragment_entry {
+ 	u16 extra_len;
+ 	u16 last_frag;
+ 	u8 rx_queue;
+-	bool ccmp; /* Whether fragments were encrypted with CCMP */
++	bool check_sequential_pn; /* needed for CCMP/GCMP */
+ 	u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
+ };
+ 
+diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
+index 3ece7d1034c8..b54f398cda5d 100644
+--- a/net/mac80211/rc80211_minstrel.c
++++ b/net/mac80211/rc80211_minstrel.c
+@@ -711,7 +711,7 @@ static u32 minstrel_get_expected_throughput(void *priv_sta)
+ 	 * computing cur_tp
+ 	 */
+ 	tmp_mrs = &mi->r[idx].stats;
+-	tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma);
++	tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma) * 10;
+ 	tmp_cur_tp = tmp_cur_tp * 1200 * 8 / 1024;
+ 
+ 	return tmp_cur_tp;
+diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
+index 3928dbd24e25..239ed6e92b89 100644
+--- a/net/mac80211/rc80211_minstrel_ht.c
++++ b/net/mac80211/rc80211_minstrel_ht.c
+@@ -691,7 +691,7 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
+ 	if (likely(sta->ampdu_mlme.tid_tx[tid]))
+ 		return;
+ 
+-	ieee80211_start_tx_ba_session(pubsta, tid, 5000);
++	ieee80211_start_tx_ba_session(pubsta, tid, 0);
+ }
+ 
+ static void
+@@ -871,7 +871,7 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
+ 	 *  - if station is in dynamic SMPS (and streams > 1)
+ 	 *  - for fallback rates, to increase chances of getting through
+ 	 */
+-	if (offset > 0 &&
++	if (offset > 0 ||
+ 	    (mi->sta->smps_mode == IEEE80211_SMPS_DYNAMIC &&
+ 	     group->streams > 1)) {
+ 		ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts;
+@@ -1334,7 +1334,8 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
+ 	prob = mi->groups[i].rates[j].prob_ewma;
+ 
+ 	/* convert tp_avg from pkt per second in kbps */
+-	tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * AVG_PKT_SIZE * 8 / 1024;
++	tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * 10;
++	tp_avg = tp_avg * AVG_PKT_SIZE * 8 / 1024;
+ 
+ 	return tp_avg;
+ }
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 82af407fea7a..4cbf36cae806 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -1754,7 +1754,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
+ 	entry->seq = seq;
+ 	entry->rx_queue = rx_queue;
+ 	entry->last_frag = frag;
+-	entry->ccmp = 0;
++	entry->check_sequential_pn = false;
+ 	entry->extra_len = 0;
+ 
+ 	return entry;
+@@ -1850,15 +1850,27 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
+ 						 rx->seqno_idx, &(rx->skb));
+ 		if (rx->key &&
+ 		    (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
+-		     rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256) &&
++		     rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
++		     rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
++		     rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
+ 		    ieee80211_has_protected(fc)) {
+ 			int queue = rx->security_idx;
+-			/* Store CCMP PN so that we can verify that the next
+-			 * fragment has a sequential PN value. */
+-			entry->ccmp = 1;
++
++			/* Store CCMP/GCMP PN so that we can verify that the
++			 * next fragment has a sequential PN value.
++			 */
++			entry->check_sequential_pn = true;
+ 			memcpy(entry->last_pn,
+ 			       rx->key->u.ccmp.rx_pn[queue],
+ 			       IEEE80211_CCMP_PN_LEN);
++			BUILD_BUG_ON(offsetof(struct ieee80211_key,
++					      u.ccmp.rx_pn) !=
++				     offsetof(struct ieee80211_key,
++					      u.gcmp.rx_pn));
++			BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
++				     sizeof(rx->key->u.gcmp.rx_pn[queue]));
++			BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
++				     IEEE80211_GCMP_PN_LEN);
+ 		}
+ 		return RX_QUEUED;
+ 	}
+@@ -1873,15 +1885,21 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
+ 		return RX_DROP_MONITOR;
+ 	}
+ 
+-	/* Verify that MPDUs within one MSDU have sequential PN values.
+-	 * (IEEE 802.11i, 8.3.3.4.5) */
+-	if (entry->ccmp) {
++	/* "The receiver shall discard MSDUs and MMPDUs whose constituent
++	 *  MPDU PN values are not incrementing in steps of 1."
++	 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP)
++	 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP)
++	 */
++	if (entry->check_sequential_pn) {
+ 		int i;
+ 		u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
+ 		int queue;
++
+ 		if (!rx->key ||
+ 		    (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
+-		     rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256))
++		     rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 &&
++		     rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP &&
++		     rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256))
+ 			return RX_DROP_UNUSABLE;
+ 		memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
+ 		for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
+@@ -3367,6 +3385,7 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
+ 				return false;
+ 			/* ignore action frames to TDLS-peers */
+ 			if (ieee80211_is_action(hdr->frame_control) &&
++			    !is_broadcast_ether_addr(bssid) &&
+ 			    !ether_addr_equal(bssid, hdr->addr1))
+ 				return false;
+ 		}
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index b0915515640e..8f0bac7e03c4 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -1147,6 +1147,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
+ 		return NOTIFY_DONE;
+ 	}
+ 
++	wireless_nlevent_flush();
++
+ 	return NOTIFY_OK;
+ }
+ 
+diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
+index c8717c1d082e..b50ee5d622e1 100644
+--- a/net/wireless/wext-core.c
++++ b/net/wireless/wext-core.c
+@@ -342,6 +342,40 @@ static const int compat_event_type_size[] = {
+ 
+ /* IW event code */
+ 
++void wireless_nlevent_flush(void)
++{
++	struct sk_buff *skb;
++	struct net *net;
++
++	ASSERT_RTNL();
++
++	for_each_net(net) {
++		while ((skb = skb_dequeue(&net->wext_nlevents)))
++			rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
++				    GFP_KERNEL);
++	}
++}
++EXPORT_SYMBOL_GPL(wireless_nlevent_flush);
++
++static int wext_netdev_notifier_call(struct notifier_block *nb,
++				     unsigned long state, void *ptr)
++{
++	/*
++	 * When a netdev changes state in any way, flush all pending messages
++	 * to avoid them going out in a strange order, e.g. RTM_NEWLINK after
++	 * RTM_DELLINK, or with IFF_UP after without IFF_UP during dev_close()
++	 * or similar - all of which could otherwise happen due to delays from
++	 * schedule_work().
++	 */
++	wireless_nlevent_flush();
++
++	return NOTIFY_OK;
++}
++
++static struct notifier_block wext_netdev_notifier = {
++	.notifier_call = wext_netdev_notifier_call,
++};
++
+ static int __net_init wext_pernet_init(struct net *net)
+ {
+ 	skb_queue_head_init(&net->wext_nlevents);
+@@ -360,7 +394,12 @@ static struct pernet_operations wext_pernet_ops = {
+ 
+ static int __init wireless_nlevent_init(void)
+ {
+-	return register_pernet_subsys(&wext_pernet_ops);
++	int err = register_pernet_subsys(&wext_pernet_ops);
++
++	if (err)
++		return err;
++
++	return register_netdevice_notifier(&wext_netdev_notifier);
+ }
+ 
+ subsys_initcall(wireless_nlevent_init);
+@@ -368,17 +407,8 @@ subsys_initcall(wireless_nlevent_init);
+ /* Process events generated by the wireless layer or the driver. */
+ static void wireless_nlevent_process(struct work_struct *work)
+ {
+-	struct sk_buff *skb;
+-	struct net *net;
+-
+ 	rtnl_lock();
+-
+-	for_each_net(net) {
+-		while ((skb = skb_dequeue(&net->wext_nlevents)))
+-			rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
+-				    GFP_KERNEL);
+-	}
+-
++	wireless_nlevent_flush();
+ 	rtnl_unlock();
+ }
+ 
+diff --git a/scripts/ld-version.sh b/scripts/ld-version.sh
+index 198580d245e0..1659b409ef10 100755
+--- a/scripts/ld-version.sh
++++ b/scripts/ld-version.sh
+@@ -1,7 +1,7 @@
+ #!/usr/bin/awk -f
+ # extract linker version number from stdin and turn into single number
+ 	{
+-	gsub(".*)", "");
++	gsub(".*\\)", "");
+ 	split($1,a, ".");
+ 	print a[1]*10000000 + a[2]*100000 + a[3]*10000 + a[4]*100 + a[5];
+ 	exit
+diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
+index c799cca5abeb..6b864c0fc2b6 100644
+--- a/sound/soc/codecs/wm8958-dsp2.c
++++ b/sound/soc/codecs/wm8958-dsp2.c
+@@ -459,7 +459,7 @@ static int wm8958_put_mbc_enum(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+ 	struct wm8994 *control = wm8994->wm8994;
+-	int value = ucontrol->value.integer.value[0];
++	int value = ucontrol->value.enumerated.item[0];
+ 	int reg;
+ 
+ 	/* Don't allow on the fly reconfiguration */
+@@ -549,7 +549,7 @@ static int wm8958_put_vss_enum(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+ 	struct wm8994 *control = wm8994->wm8994;
+-	int value = ucontrol->value.integer.value[0];
++	int value = ucontrol->value.enumerated.item[0];
+ 	int reg;
+ 
+ 	/* Don't allow on the fly reconfiguration */
+@@ -582,7 +582,7 @@ static int wm8958_put_vss_hpf_enum(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+ 	struct wm8994 *control = wm8994->wm8994;
+-	int value = ucontrol->value.integer.value[0];
++	int value = ucontrol->value.enumerated.item[0];
+ 	int reg;
+ 
+ 	/* Don't allow on the fly reconfiguration */
+@@ -749,7 +749,7 @@ static int wm8958_put_enh_eq_enum(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+ 	struct wm8994 *control = wm8994->wm8994;
+-	int value = ucontrol->value.integer.value[0];
++	int value = ucontrol->value.enumerated.item[0];
+ 	int reg;
+ 
+ 	/* Don't allow on the fly reconfiguration */
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index 2ccbb322df77..a18aecb49935 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -362,7 +362,7 @@ static int wm8994_put_drc_enum(struct snd_kcontrol *kcontrol,
+ 	struct wm8994 *control = wm8994->wm8994;
+ 	struct wm8994_pdata *pdata = &control->pdata;
+ 	int drc = wm8994_get_drc(kcontrol->id.name);
+-	int value = ucontrol->value.integer.value[0];
++	int value = ucontrol->value.enumerated.item[0];
+ 
+ 	if (drc < 0)
+ 		return drc;
+@@ -469,7 +469,7 @@ static int wm8994_put_retune_mobile_enum(struct snd_kcontrol *kcontrol,
+ 	struct wm8994 *control = wm8994->wm8994;
+ 	struct wm8994_pdata *pdata = &control->pdata;
+ 	int block = wm8994_get_retune_mobile_block(kcontrol->id.name);
+-	int value = ucontrol->value.integer.value[0];
++	int value = ucontrol->value.enumerated.item[0];
+ 
+ 	if (block < 0)
+ 		return block;
+diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
+index ea4ab374a223..7dbf899b2af2 100644
+--- a/sound/soc/samsung/i2s.c
++++ b/sound/soc/samsung/i2s.c
+@@ -480,10 +480,11 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
+ 	unsigned int cdcon_mask = 1 << i2s_regs->cdclkcon_off;
+ 	unsigned int rsrc_mask = 1 << i2s_regs->rclksrc_off;
+ 	u32 mod, mask, val = 0;
++	unsigned long flags;
+ 
+-	spin_lock(i2s->lock);
++	spin_lock_irqsave(i2s->lock, flags);
+ 	mod = readl(i2s->addr + I2SMOD);
+-	spin_unlock(i2s->lock);
++	spin_unlock_irqrestore(i2s->lock, flags);
+ 
+ 	switch (clk_id) {
+ 	case SAMSUNG_I2S_OPCLK:
+@@ -574,11 +575,11 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
+ 		return -EINVAL;
+ 	}
+ 
+-	spin_lock(i2s->lock);
++	spin_lock_irqsave(i2s->lock, flags);
+ 	mod = readl(i2s->addr + I2SMOD);
+ 	mod = (mod & ~mask) | val;
+ 	writel(mod, i2s->addr + I2SMOD);
+-	spin_unlock(i2s->lock);
++	spin_unlock_irqrestore(i2s->lock, flags);
+ 
+ 	return 0;
+ }
+@@ -589,6 +590,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
+ 	struct i2s_dai *i2s = to_info(dai);
+ 	int lrp_shift, sdf_shift, sdf_mask, lrp_rlow, mod_slave;
+ 	u32 mod, tmp = 0;
++	unsigned long flags;
+ 
+ 	lrp_shift = i2s->variant_regs->lrp_off;
+ 	sdf_shift = i2s->variant_regs->sdf_off;
+@@ -648,7 +650,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
+ 		return -EINVAL;
+ 	}
+ 
+-	spin_lock(i2s->lock);
++	spin_lock_irqsave(i2s->lock, flags);
+ 	mod = readl(i2s->addr + I2SMOD);
+ 	/*
+ 	 * Don't change the I2S mode if any controller is active on this
+@@ -656,7 +658,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
+ 	 */
+ 	if (any_active(i2s) &&
+ 		((mod & (sdf_mask | lrp_rlow | mod_slave)) != tmp)) {
+-		spin_unlock(i2s->lock);
++		spin_unlock_irqrestore(i2s->lock, flags);
+ 		dev_err(&i2s->pdev->dev,
+ 				"%s:%d Other DAI busy\n", __func__, __LINE__);
+ 		return -EAGAIN;
+@@ -665,7 +667,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
+ 	mod &= ~(sdf_mask | lrp_rlow | mod_slave);
+ 	mod |= tmp;
+ 	writel(mod, i2s->addr + I2SMOD);
+-	spin_unlock(i2s->lock);
++	spin_unlock_irqrestore(i2s->lock, flags);
+ 
+ 	return 0;
+ }
+@@ -675,6 +677,7 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
+ {
+ 	struct i2s_dai *i2s = to_info(dai);
+ 	u32 mod, mask = 0, val = 0;
++	unsigned long flags;
+ 
+ 	if (!is_secondary(i2s))
+ 		mask |= (MOD_DC2_EN | MOD_DC1_EN);
+@@ -743,11 +746,11 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
+ 		return -EINVAL;
+ 	}
+ 
+-	spin_lock(i2s->lock);
++	spin_lock_irqsave(i2s->lock, flags);
+ 	mod = readl(i2s->addr + I2SMOD);
+ 	mod = (mod & ~mask) | val;
+ 	writel(mod, i2s->addr + I2SMOD);
+-	spin_unlock(i2s->lock);
++	spin_unlock_irqrestore(i2s->lock, flags);
+ 
+ 	samsung_asoc_init_dma_data(dai, &i2s->dma_playback, &i2s->dma_capture);
+ 
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 7d009428934a..416514fe9e63 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -3568,7 +3568,7 @@ static int snd_soc_dapm_dai_link_get(struct snd_kcontrol *kcontrol,
+ {
+ 	struct snd_soc_dapm_widget *w = snd_kcontrol_chip(kcontrol);
+ 
+-	ucontrol->value.integer.value[0] = w->params_select;
++	ucontrol->value.enumerated.item[0] = w->params_select;
+ 
+ 	return 0;
+ }
+@@ -3582,13 +3582,13 @@ static int snd_soc_dapm_dai_link_put(struct snd_kcontrol *kcontrol,
+ 	if (w->power)
+ 		return -EBUSY;
+ 
+-	if (ucontrol->value.integer.value[0] == w->params_select)
++	if (ucontrol->value.enumerated.item[0] == w->params_select)
+ 		return 0;
+ 
+-	if (ucontrol->value.integer.value[0] >= w->num_params)
++	if (ucontrol->value.enumerated.item[0] >= w->num_params)
+ 		return -EINVAL;
+ 
+-	w->params_select = ucontrol->value.integer.value[0];
++	w->params_select = ucontrol->value.enumerated.item[0];
+ 
+ 	return 0;
+ }
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 484079efea5b..7338e30421d8 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1961,6 +1961,9 @@ static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
+ 	else
+ 		val *= halt_poll_ns_grow;
+ 
++	if (val > halt_poll_ns)
++		val = halt_poll_ns;
++
+ 	vcpu->halt_poll_ns = val;
+ 	trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
+ }


             reply	other threads:[~2016-03-16 19:43 UTC|newest]

Thread overview: 355+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-03-16 19:43 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2022-02-03 11:46 [gentoo-commits] proj/linux-patches:4.4 commit in: / Mike Pagano
2022-01-29 17:47 Mike Pagano
2022-01-27 11:42 Mike Pagano
2022-01-11 12:57 Mike Pagano
2022-01-05 12:57 Mike Pagano
2021-12-29 13:13 Mike Pagano
2021-12-22 14:09 Mike Pagano
2021-12-14 10:38 Mike Pagano
2021-12-08 12:58 Mike Pagano
2021-11-26 12:02 Mike Pagano
2021-11-12 13:39 Mike Pagano
2021-11-02 17:07 Mike Pagano
2021-10-27 12:01 Mike Pagano
2021-10-17 13:15 Mike Pagano
2021-10-09 21:36 Mike Pagano
2021-10-07 10:37 Mike Pagano
2021-10-06 11:33 Mike Pagano
2021-09-26 14:16 Mike Pagano
2021-09-22 11:43 Mike Pagano
2021-09-20 22:07 Mike Pagano
2021-09-03 11:26 Mike Pagano
2021-08-26 14:02 Mike Pagano
2021-08-25 23:20 Mike Pagano
2021-08-15 20:12 Mike Pagano
2021-08-10 16:22 Mike Pagano
2021-08-08 13:47 Mike Pagano
2021-08-04 11:56 Mike Pagano
2021-08-03 12:51 Mike Pagano
2021-07-28 12:39 Mike Pagano
2021-07-20 15:17 Alice Ferrazzi
2021-07-11 14:48 Mike Pagano
2021-06-30 14:29 Mike Pagano
2021-06-17 11:05 Alice Ferrazzi
2021-06-10 11:09 Mike Pagano
2021-06-03 10:43 Alice Ferrazzi
2021-05-26 11:59 Mike Pagano
2021-05-22 10:00 Mike Pagano
2021-04-28 11:08 Alice Ferrazzi
2021-04-16 11:20 Alice Ferrazzi
2021-04-10 13:21 Mike Pagano
2021-04-07 12:10 Mike Pagano
2021-03-30 14:13 Mike Pagano
2021-03-24 12:06 Mike Pagano
2021-03-17 15:39 Mike Pagano
2021-03-11 13:34 Mike Pagano
2021-03-07 15:12 Mike Pagano
2021-03-03 16:34 Alice Ferrazzi
2021-02-23 13:46 Mike Pagano
2021-02-10 10:17 Alice Ferrazzi
2021-02-05 14:57 Alice Ferrazzi
2021-02-03 23:23 Mike Pagano
2021-01-30 13:11 Alice Ferrazzi
2021-01-23 16:33 Mike Pagano
2021-01-17 16:23 Mike Pagano
2021-01-12 20:08 Mike Pagano
2021-01-09 12:53 Mike Pagano
2020-12-29 14:16 Mike Pagano
2020-12-11 12:54 Mike Pagano
2020-12-02 12:17 Mike Pagano
2020-11-24 13:29 Mike Pagano
2020-11-22 19:08 Mike Pagano
2020-11-18 19:21 Mike Pagano
2020-11-11 15:27 Mike Pagano
2020-11-10 13:53 Mike Pagano
2020-10-29 11:14 Mike Pagano
2020-10-17 10:13 Mike Pagano
2020-10-14 20:30 Mike Pagano
2020-10-01 11:41 Mike Pagano
2020-10-01 11:24 Mike Pagano
2020-09-24 16:04 Mike Pagano
2020-09-23 11:51 Mike Pagano
2020-09-23 11:50 Mike Pagano
2020-09-12 17:08 Mike Pagano
2020-09-03 11:32 Mike Pagano
2020-08-26 11:12 Mike Pagano
2020-08-21 11:11 Alice Ferrazzi
2020-07-31 16:10 Mike Pagano
2020-07-22 12:24 Mike Pagano
2020-07-09 12:05 Mike Pagano
2020-07-01 12:09 Mike Pagano
2020-06-22 14:43 Mike Pagano
2020-06-11 11:25 Mike Pagano
2020-06-03 11:35 Mike Pagano
2020-05-27 15:26 Mike Pagano
2020-05-20 11:20 Mike Pagano
2020-05-13 13:01 Mike Pagano
2020-05-11 22:52 Mike Pagano
2020-05-05 17:37 Mike Pagano
2020-05-02 19:20 Mike Pagano
2020-04-24 11:59 Mike Pagano
2020-04-15 18:24 Mike Pagano
2020-04-13 11:14 Mike Pagano
2020-04-02 18:55 Mike Pagano
2020-03-20 11:53 Mike Pagano
2020-03-20 11:51 Mike Pagano
2020-03-20 11:49 Mike Pagano
2020-03-11 10:14 Mike Pagano
2020-02-28 15:24 Mike Pagano
2020-02-14 23:34 Mike Pagano
2020-02-05 14:47 Mike Pagano
2020-01-29 12:36 Mike Pagano
2020-01-23 11:00 Mike Pagano
2020-01-14 22:24 Mike Pagano
2020-01-12 14:48 Mike Pagano
2020-01-04 16:46 Mike Pagano
2019-12-21 14:51 Mike Pagano
2019-12-05 14:47 Alice Ferrazzi
2019-11-29 21:41 Thomas Deutschmann
2019-11-28 23:49 Mike Pagano
2019-11-25 16:25 Mike Pagano
2019-11-16 10:54 Mike Pagano
2019-11-12 20:57 Mike Pagano
2019-11-10 16:13 Mike Pagano
2019-11-06 14:22 Mike Pagano
2019-10-29 10:08 Mike Pagano
2019-10-17 22:18 Mike Pagano
2019-10-07 21:03 Mike Pagano
2019-10-05 20:43 Mike Pagano
2019-09-21 15:56 Mike Pagano
2019-09-20 15:50 Mike Pagano
2019-09-16 12:21 Mike Pagano
2019-09-10 11:10 Mike Pagano
2019-09-06 17:17 Mike Pagano
2019-08-25 17:33 Mike Pagano
2019-08-11 10:58 Mike Pagano
2019-08-06 19:14 Mike Pagano
2019-08-04 16:03 Mike Pagano
2019-07-21 14:36 Mike Pagano
2019-07-10 11:01 Mike Pagano
2019-06-27 11:11 Mike Pagano
2019-06-22 19:01 Mike Pagano
2019-06-17 19:18 Mike Pagano
2019-06-11 17:30 Mike Pagano
2019-06-11 12:38 Mike Pagano
2019-05-16 23:01 Mike Pagano
2019-04-27 17:28 Mike Pagano
2019-04-03 10:49 Mike Pagano
2019-04-03 10:49 Mike Pagano
2019-03-23 14:17 Mike Pagano
2019-02-23 14:40 Mike Pagano
2019-02-20 11:14 Mike Pagano
2019-02-15 23:38 Mike Pagano
2019-02-15 23:35 Mike Pagano
2019-02-08 15:21 Mike Pagano
2019-02-06 20:51 Mike Pagano
2019-02-06  0:05 Mike Pagano
2019-01-26 14:59 Mike Pagano
2019-01-16 23:27 Mike Pagano
2019-01-13 19:46 Mike Pagano
2019-01-13 19:24 Mike Pagano
2018-12-29 22:56 Mike Pagano
2018-12-21 14:40 Mike Pagano
2018-12-17 21:56 Mike Pagano
2018-12-13 11:35 Mike Pagano
2018-12-01 18:35 Mike Pagano
2018-12-01 15:02 Mike Pagano
2018-11-27 16:59 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 12:18 Mike Pagano
2018-11-10 21:27 Mike Pagano
2018-10-20 12:33 Mike Pagano
2018-10-13 16:35 Mike Pagano
2018-10-10 11:20 Mike Pagano
2018-09-29 13:32 Mike Pagano
2018-09-26 10:44 Mike Pagano
2018-09-19 22:37 Mike Pagano
2018-09-15 10:09 Mike Pagano
2018-09-09 23:26 Mike Pagano
2018-09-05 15:21 Mike Pagano
2018-08-28 22:32 Mike Pagano
2018-08-24 11:41 Mike Pagano
2018-08-22 10:08 Alice Ferrazzi
2018-08-18 18:06 Mike Pagano
2018-08-17 19:24 Mike Pagano
2018-08-15 16:44 Mike Pagano
2018-08-09 10:49 Mike Pagano
2018-08-07 18:14 Mike Pagano
2018-07-28 10:37 Mike Pagano
2018-07-22 15:15 Mike Pagano
2018-07-19 15:27 Mike Pagano
2018-07-17 10:24 Mike Pagano
2018-07-12 16:21 Alice Ferrazzi
2018-07-04 14:26 Mike Pagano
2018-06-16 15:41 Mike Pagano
2018-06-13 14:54 Mike Pagano
2018-06-06 18:00 Mike Pagano
2018-05-30 22:35 Mike Pagano
2018-05-30 11:38 Mike Pagano
2018-05-26 13:43 Mike Pagano
2018-05-16 10:22 Mike Pagano
2018-05-02 16:11 Mike Pagano
2018-04-29 11:48 Mike Pagano
2018-04-24 11:28 Mike Pagano
2018-04-13 22:20 Mike Pagano
2018-04-08 14:25 Mike Pagano
2018-03-31 23:00 Mike Pagano
2018-03-31 22:16 Mike Pagano
2018-03-25 13:42 Mike Pagano
2018-03-22 12:54 Mike Pagano
2018-03-11 18:25 Mike Pagano
2018-03-05  2:52 Alice Ferrazzi
2018-02-28 15:05 Alice Ferrazzi
2018-02-25 15:46 Mike Pagano
2018-02-22 23:20 Mike Pagano
2018-02-17 15:10 Alice Ferrazzi
2018-02-03 21:23 Mike Pagano
2018-01-31 13:36 Alice Ferrazzi
2018-01-23 21:15 Mike Pagano
2018-01-17 10:20 Alice Ferrazzi
2018-01-17  9:18 Alice Ferrazzi
2018-01-15 15:01 Alice Ferrazzi
2018-01-10 11:56 Mike Pagano
2018-01-10 11:48 Mike Pagano
2018-01-05 15:59 Alice Ferrazzi
2018-01-05 15:05 Alice Ferrazzi
2018-01-02 20:12 Mike Pagano
2017-12-25 14:41 Alice Ferrazzi
2017-12-20 12:45 Mike Pagano
2017-12-16 11:46 Alice Ferrazzi
2017-12-09 18:50 Alice Ferrazzi
2017-12-05 11:39 Mike Pagano
2017-11-30 12:25 Alice Ferrazzi
2017-11-24 10:49 Alice Ferrazzi
2017-11-24  9:46 Alice Ferrazzi
2017-11-21  8:40 Alice Ferrazzi
2017-11-18 18:12 Mike Pagano
2017-11-15 16:44 Alice Ferrazzi
2017-11-08 13:50 Mike Pagano
2017-11-02 10:02 Mike Pagano
2017-10-27 10:33 Mike Pagano
2017-10-21 20:13 Mike Pagano
2017-10-18 13:44 Mike Pagano
2017-10-12 12:22 Mike Pagano
2017-10-08 14:25 Mike Pagano
2017-10-05 11:39 Mike Pagano
2017-09-27 10:38 Mike Pagano
2017-09-14 13:37 Mike Pagano
2017-09-13 22:26 Mike Pagano
2017-09-13 14:33 Mike Pagano
2017-09-07 22:42 Mike Pagano
2017-09-02 17:14 Mike Pagano
2017-08-30 10:08 Mike Pagano
2017-08-25 10:53 Mike Pagano
2017-08-16 22:30 Mike Pagano
2017-08-13 16:52 Mike Pagano
2017-08-11 17:44 Mike Pagano
2017-08-07 10:25 Mike Pagano
2017-05-14 13:32 Mike Pagano
2017-05-08 10:40 Mike Pagano
2017-05-03 17:41 Mike Pagano
2017-04-30 18:08 Mike Pagano
2017-04-30 17:59 Mike Pagano
2017-04-27  8:18 Alice Ferrazzi
2017-04-22 17:00 Mike Pagano
2017-04-18 10:21 Mike Pagano
2017-04-12 17:59 Mike Pagano
2017-04-08 13:56 Mike Pagano
2017-03-31 10:43 Mike Pagano
2017-03-30 18:16 Mike Pagano
2017-03-26 11:53 Mike Pagano
2017-03-22 12:28 Mike Pagano
2017-03-18 14:32 Mike Pagano
2017-03-15 14:39 Mike Pagano
2017-03-12 12:17 Mike Pagano
2017-03-02 16:29 Mike Pagano
2017-03-02 16:29 Mike Pagano
2017-02-26 20:45 Mike Pagano
2017-02-24  0:38 Mike Pagano
2017-02-23 20:12 Mike Pagano
2017-02-18 16:27 Alice Ferrazzi
2017-02-15 16:22 Alice Ferrazzi
2017-02-09  8:05 Alice Ferrazzi
2017-02-04 13:47 Alice Ferrazzi
2017-02-01 12:59 Alice Ferrazzi
2017-01-26  8:24 Alice Ferrazzi
2017-01-20 12:45 Alice Ferrazzi
2017-01-15 22:57 Mike Pagano
2017-01-14 14:46 Mike Pagano
2017-01-12 12:11 Mike Pagano
2017-01-09 12:46 Mike Pagano
2017-01-06 23:13 Mike Pagano
2016-12-15 23:41 Mike Pagano
2016-12-11 15:02 Alice Ferrazzi
2016-12-09 13:57 Alice Ferrazzi
2016-12-08  0:03 Mike Pagano
2016-12-02 16:21 Mike Pagano
2016-11-26 18:51 Mike Pagano
2016-11-26 18:40 Mike Pagano
2016-11-22  0:14 Mike Pagano
2016-11-19 11:03 Mike Pagano
2016-11-15 10:05 Alice Ferrazzi
2016-11-10 18:13 Alice Ferrazzi
2016-11-01  3:14 Alice Ferrazzi
2016-10-31 14:09 Alice Ferrazzi
2016-10-28 18:27 Alice Ferrazzi
2016-10-22 13:05 Mike Pagano
2016-10-21 11:10 Mike Pagano
2016-10-16 19:25 Mike Pagano
2016-10-08 19:55 Mike Pagano
2016-09-30 19:07 Mike Pagano
2016-09-24 10:51 Mike Pagano
2016-09-16 19:10 Mike Pagano
2016-09-15 13:58 Mike Pagano
2016-09-09 19:20 Mike Pagano
2016-08-20 16:31 Mike Pagano
2016-08-17 11:48 Mike Pagano
2016-08-10 12:56 Mike Pagano
2016-07-27 19:19 Mike Pagano
2016-07-11 19:59 Mike Pagano
2016-07-02 15:30 Mike Pagano
2016-07-01  0:55 Mike Pagano
2016-06-24 20:40 Mike Pagano
2016-06-08 13:38 Mike Pagano
2016-06-02 18:24 Mike Pagano
2016-05-19 13:00 Mike Pagano
2016-05-12  0:14 Mike Pagano
2016-05-04 23:51 Mike Pagano
2016-04-20 11:27 Mike Pagano
2016-04-12 18:59 Mike Pagano
2016-03-22 22:47 Mike Pagano
2016-03-10  0:51 Mike Pagano
2016-03-04 11:15 Mike Pagano
2016-02-26  0:02 Mike Pagano
2016-02-19 23:33 Mike Pagano
2016-02-18  0:20 Mike Pagano
2016-02-01  0:19 Mike Pagano
2016-02-01  0:13 Mike Pagano
2016-01-31 23:33 Mike Pagano
2016-01-20 12:38 Mike Pagano
2016-01-10 17:19 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1458157394.9a367b9a66de5e7d07306be8a36a47d44068689b.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox