public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/hardened-patchset:master commit in: 4.7.1/, 4.7.2/
@ 2016-08-22 10:17 Anthony G. Basile
  0 siblings, 0 replies; only message in thread
From: Anthony G. Basile @ 2016-08-22 10:17 UTC (permalink / raw
  To: gentoo-commits

commit:     24d63fa83065992427eade5014909b9f40767798
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Mon Aug 22 10:16:29 2016 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Mon Aug 22 10:16:29 2016 +0000
URL:        https://gitweb.gentoo.org/proj/hardened-patchset.git/commit/?id=24d63fa8

grsecurity-3.1-4.7.2-201608211829

 {4.7.1 => 4.7.2}/0000_README                       |    6 +-
 {4.7.1 => 4.7.2}/1000_linux-4.7.1.patch            |    0
 4.7.2/1001_linux-4.7.2.patch                       | 7668 ++++++++++++++++++++
 .../4420_grsecurity-3.1-4.7.2-201608211829.patch   |  344 +-
 {4.7.1 => 4.7.2}/4425_grsec_remove_EI_PAX.patch    |    0
 {4.7.1 => 4.7.2}/4427_force_XATTR_PAX_tmpfs.patch  |    0
 .../4430_grsec-remove-localversion-grsec.patch     |    0
 {4.7.1 => 4.7.2}/4435_grsec-mute-warnings.patch    |    0
 .../4440_grsec-remove-protected-paths.patch        |    0
 .../4450_grsec-kconfig-default-gids.patch          |    0
 .../4465_selinux-avc_audit-log-curr_ip.patch       |    0
 {4.7.1 => 4.7.2}/4470_disable-compat_vdso.patch    |    0
 {4.7.1 => 4.7.2}/4475_emutramp_default_on.patch    |    0
 13 files changed, 7841 insertions(+), 177 deletions(-)

diff --git a/4.7.1/0000_README b/4.7.2/0000_README
similarity index 92%
rename from 4.7.1/0000_README
rename to 4.7.2/0000_README
index a9a1b4e..0fbc43d 100644
--- a/4.7.1/0000_README
+++ b/4.7.2/0000_README
@@ -6,7 +6,11 @@ Patch:	1000_linux-4.7.1.patch
 From:	http://www.kernel.org
 Desc:	Linux 4.7.1
 
-Patch:	4420_grsecurity-3.1-4.7.1-201608161813.patch
+Patch:	1001_linux-4.7.2.patch
+From:	http://www.kernel.org
+Desc:	Linux 4.7.2
+
+Patch:	4420_grsecurity-3.1-4.7.2-201608211829.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/4.7.1/1000_linux-4.7.1.patch b/4.7.2/1000_linux-4.7.1.patch
similarity index 100%
rename from 4.7.1/1000_linux-4.7.1.patch
rename to 4.7.2/1000_linux-4.7.1.patch

diff --git a/4.7.2/1001_linux-4.7.2.patch b/4.7.2/1001_linux-4.7.2.patch
new file mode 100644
index 0000000..d0ef798
--- /dev/null
+++ b/4.7.2/1001_linux-4.7.2.patch
@@ -0,0 +1,7668 @@
+diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt
+index 696d5ca..f0e3361 100644
+--- a/Documentation/module-signing.txt
++++ b/Documentation/module-signing.txt
+@@ -271,3 +271,9 @@ Since the private key is used to sign modules, viruses and malware could use
+ the private key to sign modules and compromise the operating system.  The
+ private key must be either destroyed or moved to a secure location and not kept
+ in the root node of the kernel source tree.
++
++If you use the same private key to sign modules for multiple kernel
++configurations, you must ensure that the module version information is
++sufficient to prevent loading a module into a different kernel.  Either
++set CONFIG_MODVERSIONS=y or ensure that each configuration has a different
++kernel release string by changing EXTRAVERSION or CONFIG_LOCALVERSION.
+diff --git a/Makefile b/Makefile
+index 84335c0..bb98f1c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 7
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Psychotic Stoned Sheep
+ 
+diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
+index 858f98e..0f92d97 100644
+--- a/arch/arc/include/asm/pgtable.h
++++ b/arch/arc/include/asm/pgtable.h
+@@ -110,7 +110,7 @@
+ #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
+ 
+ /* Set of bits not changed in pte_modify */
+-#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
++#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
+ 
+ /* More Abbrevaited helpers */
+ #define PAGE_U_NONE     __pgprot(___DEF)
+diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
+index 73d7e4c..ab74b5d 100644
+--- a/arch/arc/mm/dma.c
++++ b/arch/arc/mm/dma.c
+@@ -92,7 +92,8 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
+ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
+ 		dma_addr_t dma_handle, struct dma_attrs *attrs)
+ {
+-	struct page *page = virt_to_page(dma_handle);
++	phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle);
++	struct page *page = virt_to_page(paddr);
+ 	int is_non_coh = 1;
+ 
+ 	is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) ||
+diff --git a/arch/arm/boot/dts/arm-realview-pbx-a9.dts b/arch/arm/boot/dts/arm-realview-pbx-a9.dts
+index db808f9..90d00b4 100644
+--- a/arch/arm/boot/dts/arm-realview-pbx-a9.dts
++++ b/arch/arm/boot/dts/arm-realview-pbx-a9.dts
+@@ -70,13 +70,12 @@
+ 		 * associativity as these may be erroneously set
+ 		 * up by boot loader(s).
+ 		 */
+-		cache-size = <1048576>; // 1MB
+-		cache-sets = <4096>;
++		cache-size = <131072>; // 128KB
++		cache-sets = <512>;
+ 		cache-line-size = <32>;
+ 		arm,parity-disable;
+-		arm,tag-latency = <1>;
+-		arm,data-latency = <1 1>;
+-		arm,dirty-latency = <1>;
++		arm,tag-latency = <1 1 1>;
++		arm,data-latency = <1 1 1>;
+ 	};
+ 
+ 	scu: scu@1f000000 {
+diff --git a/arch/arm/boot/dts/sun4i-a10-a1000.dts b/arch/arm/boot/dts/sun4i-a10-a1000.dts
+index c92a1ae..fa70b8f 100644
+--- a/arch/arm/boot/dts/sun4i-a10-a1000.dts
++++ b/arch/arm/boot/dts/sun4i-a10-a1000.dts
+@@ -84,6 +84,7 @@
+ 		regulator-name = "emac-3v3";
+ 		regulator-min-microvolt = <3300000>;
+ 		regulator-max-microvolt = <3300000>;
++		startup-delay-us = <20000>;
+ 		enable-active-high;
+ 		gpio = <&pio 7 15 GPIO_ACTIVE_HIGH>;
+ 	};
+diff --git a/arch/arm/boot/dts/sun4i-a10-hackberry.dts b/arch/arm/boot/dts/sun4i-a10-hackberry.dts
+index 2b17c51..6de83a6 100644
+--- a/arch/arm/boot/dts/sun4i-a10-hackberry.dts
++++ b/arch/arm/boot/dts/sun4i-a10-hackberry.dts
+@@ -66,6 +66,7 @@
+ 		regulator-name = "emac-3v3";
+ 		regulator-min-microvolt = <3300000>;
+ 		regulator-max-microvolt = <3300000>;
++		startup-delay-us = <20000>;
+ 		enable-active-high;
+ 		gpio = <&pio 7 19 GPIO_ACTIVE_HIGH>;
+ 	};
+diff --git a/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts b/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
+index 7afc7a6..e28f080 100644
+--- a/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
++++ b/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts
+@@ -80,6 +80,7 @@
+ 		regulator-name = "emac-3v3";
+ 		regulator-min-microvolt = <3300000>;
+ 		regulator-max-microvolt = <3300000>;
++		startup-delay-us = <20000>;
+ 		enable-active-high;
+ 		gpio = <&pio 7 19 GPIO_ACTIVE_HIGH>;   /* PH19 */
+ 	};
+diff --git a/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts b/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
+index 9fea918..39731a7 100644
+--- a/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
++++ b/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts
+@@ -79,6 +79,7 @@
+ 		regulator-name = "emac-3v3";
+ 		regulator-min-microvolt = <3300000>;
+ 		regulator-max-microvolt = <3300000>;
++		startup-delay-us = <20000>;
+ 		enable-active-high;
+ 		gpio = <&pio 0 2 GPIO_ACTIVE_HIGH>;
+ 	};
+diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
+index 941f362..f4d8125 100644
+--- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts
++++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
+@@ -1386,7 +1386,7 @@
+ 	 *   Pin 41: BR_UART1_TXD
+ 	 *   Pin 44: BR_UART1_RXD
+ 	 */
+-	serial@70006000 {
++	serial@0,70006000 {
+ 		compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
+ 		status = "okay";
+ 	};
+@@ -1398,7 +1398,7 @@
+ 	 *   Pin 71: UART2_CTS_L
+ 	 *   Pin 74: UART2_RTS_L
+ 	 */
+-	serial@70006040 {
++	serial@0,70006040 {
+ 		compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
+ 		status = "okay";
+ 	};
+diff --git a/arch/arm/configs/aspeed_g4_defconfig b/arch/arm/configs/aspeed_g4_defconfig
+index b6e54ee..ca39c04 100644
+--- a/arch/arm/configs/aspeed_g4_defconfig
++++ b/arch/arm/configs/aspeed_g4_defconfig
+@@ -58,7 +58,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
+ # CONFIG_IOMMU_SUPPORT is not set
+ CONFIG_FIRMWARE_MEMMAP=y
+ CONFIG_FANOTIFY=y
+-CONFIG_PRINTK_TIME=1
++CONFIG_PRINTK_TIME=y
+ CONFIG_DYNAMIC_DEBUG=y
+ CONFIG_STRIP_ASM_SYMS=y
+ CONFIG_PAGE_POISONING=y
+diff --git a/arch/arm/configs/aspeed_g5_defconfig b/arch/arm/configs/aspeed_g5_defconfig
+index 8926051..4f366b0 100644
+--- a/arch/arm/configs/aspeed_g5_defconfig
++++ b/arch/arm/configs/aspeed_g5_defconfig
+@@ -59,7 +59,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
+ # CONFIG_IOMMU_SUPPORT is not set
+ CONFIG_FIRMWARE_MEMMAP=y
+ CONFIG_FANOTIFY=y
+-CONFIG_PRINTK_TIME=1
++CONFIG_PRINTK_TIME=y
+ CONFIG_DYNAMIC_DEBUG=y
+ CONFIG_STRIP_ASM_SYMS=y
+ CONFIG_PAGE_POISONING=y
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index ff7ed56..d2485c7 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -49,6 +49,7 @@ struct arm_dma_alloc_args {
+ 	pgprot_t prot;
+ 	const void *caller;
+ 	bool want_vaddr;
++	int coherent_flag;
+ };
+ 
+ struct arm_dma_free_args {
+@@ -59,6 +60,9 @@ struct arm_dma_free_args {
+ 	bool want_vaddr;
+ };
+ 
++#define NORMAL	    0
++#define COHERENT    1
++
+ struct arm_dma_allocator {
+ 	void *(*alloc)(struct arm_dma_alloc_args *args,
+ 		       struct page **ret_page);
+@@ -272,7 +276,7 @@ static u64 get_coherent_dma_mask(struct device *dev)
+ 	return mask;
+ }
+ 
+-static void __dma_clear_buffer(struct page *page, size_t size)
++static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
+ {
+ 	/*
+ 	 * Ensure that the allocated pages are zeroed, and that any data
+@@ -284,17 +288,21 @@ static void __dma_clear_buffer(struct page *page, size_t size)
+ 		while (size > 0) {
+ 			void *ptr = kmap_atomic(page);
+ 			memset(ptr, 0, PAGE_SIZE);
+-			dmac_flush_range(ptr, ptr + PAGE_SIZE);
++			if (coherent_flag != COHERENT)
++				dmac_flush_range(ptr, ptr + PAGE_SIZE);
+ 			kunmap_atomic(ptr);
+ 			page++;
+ 			size -= PAGE_SIZE;
+ 		}
+-		outer_flush_range(base, end);
++		if (coherent_flag != COHERENT)
++			outer_flush_range(base, end);
+ 	} else {
+ 		void *ptr = page_address(page);
+ 		memset(ptr, 0, size);
+-		dmac_flush_range(ptr, ptr + size);
+-		outer_flush_range(__pa(ptr), __pa(ptr) + size);
++		if (coherent_flag != COHERENT) {
++			dmac_flush_range(ptr, ptr + size);
++			outer_flush_range(__pa(ptr), __pa(ptr) + size);
++		}
+ 	}
+ }
+ 
+@@ -302,7 +310,8 @@ static void __dma_clear_buffer(struct page *page, size_t size)
+  * Allocate a DMA buffer for 'dev' of size 'size' using the
+  * specified gfp mask.  Note that 'size' must be page aligned.
+  */
+-static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
++static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
++				       gfp_t gfp, int coherent_flag)
+ {
+ 	unsigned long order = get_order(size);
+ 	struct page *page, *p, *e;
+@@ -318,7 +327,7 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
+ 	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
+ 		__free_page(p);
+ 
+-	__dma_clear_buffer(page, size);
++	__dma_clear_buffer(page, size, coherent_flag);
+ 
+ 	return page;
+ }
+@@ -340,7 +349,8 @@ static void __dma_free_buffer(struct page *page, size_t size)
+ 
+ static void *__alloc_from_contiguous(struct device *dev, size_t size,
+ 				     pgprot_t prot, struct page **ret_page,
+-				     const void *caller, bool want_vaddr);
++				     const void *caller, bool want_vaddr,
++				     int coherent_flag);
+ 
+ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
+ 				 pgprot_t prot, struct page **ret_page,
+@@ -405,10 +415,13 @@ static int __init atomic_pool_init(void)
+ 	atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
+ 	if (!atomic_pool)
+ 		goto out;
+-
++	/*
++	 * The atomic pool is only used for non-coherent allocations
++	 * so we must pass NORMAL for coherent_flag.
++	 */
+ 	if (dev_get_cma_area(NULL))
+ 		ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
+-					      &page, atomic_pool_init, true);
++				      &page, atomic_pool_init, true, NORMAL);
+ 	else
+ 		ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
+ 					   &page, atomic_pool_init, true);
+@@ -522,7 +535,11 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
+ {
+ 	struct page *page;
+ 	void *ptr = NULL;
+-	page = __dma_alloc_buffer(dev, size, gfp);
++	/*
++	 * __alloc_remap_buffer is only called when the device is
++	 * non-coherent
++	 */
++	page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
+ 	if (!page)
+ 		return NULL;
+ 	if (!want_vaddr)
+@@ -577,7 +594,8 @@ static int __free_from_pool(void *start, size_t size)
+ 
+ static void *__alloc_from_contiguous(struct device *dev, size_t size,
+ 				     pgprot_t prot, struct page **ret_page,
+-				     const void *caller, bool want_vaddr)
++				     const void *caller, bool want_vaddr,
++				     int coherent_flag)
+ {
+ 	unsigned long order = get_order(size);
+ 	size_t count = size >> PAGE_SHIFT;
+@@ -588,7 +606,7 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
+ 	if (!page)
+ 		return NULL;
+ 
+-	__dma_clear_buffer(page, size);
++	__dma_clear_buffer(page, size, coherent_flag);
+ 
+ 	if (!want_vaddr)
+ 		goto out;
+@@ -638,7 +656,7 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
+ #define __get_dma_pgprot(attrs, prot)				__pgprot(0)
+ #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv)	NULL
+ #define __alloc_from_pool(size, ret_page)			NULL
+-#define __alloc_from_contiguous(dev, size, prot, ret, c, wv)	NULL
++#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag)	NULL
+ #define __free_from_pool(cpu_addr, size)			do { } while (0)
+ #define __free_from_contiguous(dev, page, cpu_addr, size, wv)	do { } while (0)
+ #define __dma_free_remap(cpu_addr, size)			do { } while (0)
+@@ -649,7 +667,8 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
+ 				   struct page **ret_page)
+ {
+ 	struct page *page;
+-	page = __dma_alloc_buffer(dev, size, gfp);
++	/* __alloc_simple_buffer is only called when the device is coherent */
++	page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
+ 	if (!page)
+ 		return NULL;
+ 
+@@ -679,7 +698,7 @@ static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
+ {
+ 	return __alloc_from_contiguous(args->dev, args->size, args->prot,
+ 				       ret_page, args->caller,
+-				       args->want_vaddr);
++				       args->want_vaddr, args->coherent_flag);
+ }
+ 
+ static void cma_allocator_free(struct arm_dma_free_args *args)
+@@ -746,6 +765,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ 		.prot = prot,
+ 		.caller = caller,
+ 		.want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs),
++		.coherent_flag = is_coherent ? COHERENT : NORMAL,
+ 	};
+ 
+ #ifdef CONFIG_DMA_API_DEBUG
+@@ -1253,7 +1273,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
+ static const int iommu_order_array[] = { 9, 8, 4, 0 };
+ 
+ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
+-					  gfp_t gfp, struct dma_attrs *attrs)
++					  gfp_t gfp, struct dma_attrs *attrs,
++					  int coherent_flag)
+ {
+ 	struct page **pages;
+ 	int count = size >> PAGE_SHIFT;
+@@ -1277,7 +1298,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
+ 		if (!page)
+ 			goto error;
+ 
+-		__dma_clear_buffer(page, size);
++		__dma_clear_buffer(page, size, coherent_flag);
+ 
+ 		for (i = 0; i < count; i++)
+ 			pages[i] = page + i;
+@@ -1327,7 +1348,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
+ 				pages[i + j] = pages[i] + j;
+ 		}
+ 
+-		__dma_clear_buffer(pages[i], PAGE_SIZE << order);
++		__dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
+ 		i += 1 << order;
+ 		count -= 1 << order;
+ 	}
+@@ -1505,7 +1526,8 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+ 	 */
+ 	gfp &= ~(__GFP_COMP);
+ 
+-	pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
++	/* For now always consider we are in a non-coherent case */
++	pages = __iommu_alloc_buffer(dev, size, gfp, attrs, NORMAL);
+ 	if (!pages)
+ 		return NULL;
+ 
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 5a0a691..2038492 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -872,7 +872,7 @@ config RELOCATABLE
+ 
+ config RANDOMIZE_BASE
+ 	bool "Randomize the address of the kernel image"
+-	select ARM64_MODULE_PLTS
++	select ARM64_MODULE_PLTS if MODULES
+ 	select RELOCATABLE
+ 	help
+ 	  Randomizes the virtual address at which the kernel image is
+diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+index 8b4a7c9..080203e 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+@@ -670,7 +670,7 @@
+ 		#address-cells = <0>;
+ 
+ 		reg = <0x0 0xffb71000 0x0 0x1000>,
+-		      <0x0 0xffb72000 0x0 0x1000>,
++		      <0x0 0xffb72000 0x0 0x2000>,
+ 		      <0x0 0xffb74000 0x0 0x2000>,
+ 		      <0x0 0xffb76000 0x0 0x2000>;
+ 		interrupts = <GIC_PPI 9
+diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
+index 4fbf3c5..0800d23 100644
+--- a/arch/arm64/kernel/debug-monitors.c
++++ b/arch/arm64/kernel/debug-monitors.c
+@@ -151,7 +151,6 @@ static int debug_monitors_init(void)
+ 	/* Clear the OS lock. */
+ 	on_each_cpu(clear_os_lock, NULL, 1);
+ 	isb();
+-	local_dbg_enable();
+ 
+ 	/* Register hotplug handler. */
+ 	__register_cpu_notifier(&os_lock_nb);
+diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
+index 21ab5df..65d81f9 100644
+--- a/arch/arm64/kernel/hibernate.c
++++ b/arch/arm64/kernel/hibernate.c
+@@ -35,6 +35,7 @@
+ #include <asm/sections.h>
+ #include <asm/smp.h>
+ #include <asm/suspend.h>
++#include <asm/sysreg.h>
+ #include <asm/virt.h>
+ 
+ /*
+@@ -217,12 +218,22 @@ static int create_safe_exec_page(void *src_start, size_t length,
+ 	set_pte(pte, __pte(virt_to_phys((void *)dst) |
+ 			 pgprot_val(PAGE_KERNEL_EXEC)));
+ 
+-	/* Load our new page tables */
+-	asm volatile("msr	ttbr0_el1, %0;"
+-		     "isb;"
+-		     "tlbi	vmalle1is;"
+-		     "dsb	ish;"
+-		     "isb" : : "r"(virt_to_phys(pgd)));
++	/*
++	 * Load our new page tables. A strict BBM approach requires that we
++	 * ensure that TLBs are free of any entries that may overlap with the
++	 * global mappings we are about to install.
++	 *
++	 * For a real hibernate/resume cycle TTBR0 currently points to a zero
++	 * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
++	 * runtime services), while for a userspace-driven test_resume cycle it
++	 * points to userspace page tables (and we must point it at a zero page
++	 * ourselves). Elsewhere we only (un)install the idmap with preemption
++	 * disabled, so T0SZ should be as required regardless.
++	 */
++	cpu_set_reserved_ttbr0();
++	local_flush_tlb_all();
++	write_sysreg(virt_to_phys(pgd), ttbr0_el1);
++	isb();
+ 
+ 	*phys_dst_addr = virt_to_phys((void *)dst);
+ 
+@@ -394,6 +405,38 @@ int swsusp_arch_resume(void)
+ 					  void *, phys_addr_t, phys_addr_t);
+ 
+ 	/*
++	 * Restoring the memory image will overwrite the ttbr1 page tables.
++	 * Create a second copy of just the linear map, and use this when
++	 * restoring.
++	 */
++	tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
++	if (!tmp_pg_dir) {
++		pr_err("Failed to allocate memory for temporary page tables.");
++		rc = -ENOMEM;
++		goto out;
++	}
++	rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
++	if (rc)
++		goto out;
++
++	/*
++	 * Since we only copied the linear map, we need to find restore_pblist's
++	 * linear map address.
++	 */
++	lm_restore_pblist = LMADDR(restore_pblist);
++
++	/*
++	 * We need a zero page that is zero before & after resume in order to
++	 * to break before make on the ttbr1 page tables.
++	 */
++	zero_page = (void *)get_safe_page(GFP_ATOMIC);
++	if (!zero_page) {
++		pr_err("Failed to allocate zero page.");
++		rc = -ENOMEM;
++		goto out;
++	}
++
++	/*
+ 	 * Locate the exit code in the bottom-but-one page, so that *NULL
+ 	 * still has disastrous affects.
+ 	 */
+@@ -419,27 +462,6 @@ int swsusp_arch_resume(void)
+ 	__flush_dcache_area(hibernate_exit, exit_size);
+ 
+ 	/*
+-	 * Restoring the memory image will overwrite the ttbr1 page tables.
+-	 * Create a second copy of just the linear map, and use this when
+-	 * restoring.
+-	 */
+-	tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
+-	if (!tmp_pg_dir) {
+-		pr_err("Failed to allocate memory for temporary page tables.");
+-		rc = -ENOMEM;
+-		goto out;
+-	}
+-	rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
+-	if (rc)
+-		goto out;
+-
+-	/*
+-	 * Since we only copied the linear map, we need to find restore_pblist's
+-	 * linear map address.
+-	 */
+-	lm_restore_pblist = LMADDR(restore_pblist);
+-
+-	/*
+ 	 * KASLR will cause the el2 vectors to be in a different location in
+ 	 * the resumed kernel. Load hibernate's temporary copy into el2.
+ 	 *
+@@ -453,12 +475,6 @@ int swsusp_arch_resume(void)
+ 		__hyp_set_vectors(el2_vectors);
+ 	}
+ 
+-	/*
+-	 * We need a zero page that is zero before & after resume in order to
+-	 * to break before make on the ttbr1 page tables.
+-	 */
+-	zero_page = (void *)get_safe_page(GFP_ATOMIC);
+-
+ 	hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
+ 		       resume_hdr.reenter_kernel, lm_restore_pblist,
+ 		       resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
+diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
+index 62ff3c0..490db85 100644
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -267,7 +267,6 @@ asmlinkage void secondary_start_kernel(void)
+ 	set_cpu_online(cpu, true);
+ 	complete(&cpu_running);
+ 
+-	local_dbg_enable();
+ 	local_irq_enable();
+ 	local_async_enable();
+ 
+@@ -437,9 +436,9 @@ void __init smp_cpus_done(unsigned int max_cpus)
+ 
+ void __init smp_prepare_boot_cpu(void)
+ {
++	set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+ 	cpuinfo_store_boot_cpu();
+ 	save_boot_cpu_run_el();
+-	set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+ }
+ 
+ static u64 __init of_get_cpu_mpidr(struct device_node *dn)
+@@ -694,6 +693,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
+ 	smp_store_cpu_info(smp_processor_id());
+ 
+ 	/*
++	 * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
++	 * secondary CPUs present.
++	 */
++	if (max_cpus == 0)
++		return;
++
++	/*
+ 	 * Initialise the present map (which describes the set of CPUs
+ 	 * actually populated at the present time) and release the
+ 	 * secondaries from the bootloader.
+diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
+index 435e820..e564d45 100644
+--- a/arch/arm64/kernel/vmlinux.lds.S
++++ b/arch/arm64/kernel/vmlinux.lds.S
+@@ -181,9 +181,9 @@ SECTIONS
+ 		*(.hash)
+ 	}
+ 
+-	__rela_offset	= ADDR(.rela) - KIMAGE_VADDR;
++	__rela_offset	= ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
+ 	__rela_size	= SIZEOF(.rela);
+-	__dynsym_offset	= ADDR(.dynsym) - KIMAGE_VADDR;
++	__dynsym_offset	= ABSOLUTE(ADDR(.dynsym) - KIMAGE_VADDR);
+ 
+ 	. = ALIGN(SEGMENT_ALIGN);
+ 	__init_end = .;
+diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
+index 0f7c40e..9341376 100644
+--- a/arch/arm64/kvm/hyp/sysreg-sr.c
++++ b/arch/arm64/kvm/hyp/sysreg-sr.c
+@@ -27,8 +27,8 @@ static void __hyp_text __sysreg_do_nothing(struct kvm_cpu_context *ctxt) { }
+ /*
+  * Non-VHE: Both host and guest must save everything.
+  *
+- * VHE: Host must save tpidr*_el[01], actlr_el1, sp0, pc, pstate, and
+- * guest must save everything.
++ * VHE: Host must save tpidr*_el[01], actlr_el1, mdscr_el1, sp0, pc,
++ * pstate, and guest must save everything.
+  */
+ 
+ static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
+@@ -37,6 +37,7 @@ static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
+ 	ctxt->sys_regs[TPIDR_EL0]	= read_sysreg(tpidr_el0);
+ 	ctxt->sys_regs[TPIDRRO_EL0]	= read_sysreg(tpidrro_el0);
+ 	ctxt->sys_regs[TPIDR_EL1]	= read_sysreg(tpidr_el1);
++	ctxt->sys_regs[MDSCR_EL1]	= read_sysreg(mdscr_el1);
+ 	ctxt->gp_regs.regs.sp		= read_sysreg(sp_el0);
+ 	ctxt->gp_regs.regs.pc		= read_sysreg_el2(elr);
+ 	ctxt->gp_regs.regs.pstate	= read_sysreg_el2(spsr);
+@@ -61,7 +62,6 @@ static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
+ 	ctxt->sys_regs[AMAIR_EL1]	= read_sysreg_el1(amair);
+ 	ctxt->sys_regs[CNTKCTL_EL1]	= read_sysreg_el1(cntkctl);
+ 	ctxt->sys_regs[PAR_EL1]		= read_sysreg(par_el1);
+-	ctxt->sys_regs[MDSCR_EL1]	= read_sysreg(mdscr_el1);
+ 
+ 	ctxt->gp_regs.sp_el1		= read_sysreg(sp_el1);
+ 	ctxt->gp_regs.elr_el1		= read_sysreg_el1(elr);
+@@ -90,6 +90,7 @@ static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctx
+ 	write_sysreg(ctxt->sys_regs[TPIDR_EL0],	  tpidr_el0);
+ 	write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
+ 	write_sysreg(ctxt->sys_regs[TPIDR_EL1],	  tpidr_el1);
++	write_sysreg(ctxt->sys_regs[MDSCR_EL1],	  mdscr_el1);
+ 	write_sysreg(ctxt->gp_regs.regs.sp,	  sp_el0);
+ 	write_sysreg_el2(ctxt->gp_regs.regs.pc,	  elr);
+ 	write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
+@@ -114,7 +115,6 @@ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
+ 	write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1],	amair);
+ 	write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], 	cntkctl);
+ 	write_sysreg(ctxt->sys_regs[PAR_EL1],		par_el1);
+-	write_sysreg(ctxt->sys_regs[MDSCR_EL1],		mdscr_el1);
+ 
+ 	write_sysreg(ctxt->gp_regs.sp_el1,		sp_el1);
+ 	write_sysreg_el1(ctxt->gp_regs.elr_el1,		elr);
+diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
+index e9e0e6d..898c0e6 100644
+--- a/arch/arm64/kvm/inject_fault.c
++++ b/arch/arm64/kvm/inject_fault.c
+@@ -132,16 +132,14 @@ static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
+ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
+ {
+ 	unsigned long cpsr = *vcpu_cpsr(vcpu);
+-	bool is_aarch32;
++	bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
+ 	u32 esr = 0;
+ 
+-	is_aarch32 = vcpu_mode_is_32bit(vcpu);
+-
+-	*vcpu_spsr(vcpu) = cpsr;
+ 	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
+-
+ 	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
++
+ 	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
++	*vcpu_spsr(vcpu) = cpsr;
+ 
+ 	vcpu_sys_reg(vcpu, FAR_EL1) = addr;
+ 
+@@ -172,11 +170,11 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
+ 	unsigned long cpsr = *vcpu_cpsr(vcpu);
+ 	u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
+ 
+-	*vcpu_spsr(vcpu) = cpsr;
+ 	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
+-
+ 	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
++
+ 	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
++	*vcpu_spsr(vcpu) = cpsr;
+ 
+ 	/*
+ 	 * Build an unknown exception, depending on the instruction
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index 0f85a46..3e90a2c 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -748,9 +748,9 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
+ 	/*
+ 	 * Check whether the physical FDT address is set and meets the minimum
+ 	 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
+-	 * at least 8 bytes so that we can always access the size field of the
+-	 * FDT header after mapping the first chunk, double check here if that
+-	 * is indeed the case.
++	 * at least 8 bytes so that we can always access the magic and size
++	 * fields of the FDT header after mapping the first chunk, double check
++	 * here if that is indeed the case.
+ 	 */
+ 	BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
+ 	if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
+@@ -778,7 +778,7 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
+ 	create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
+ 			dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
+ 
+-	if (fdt_check_header(dt_virt) != 0)
++	if (fdt_magic(dt_virt) != FDT_MAGIC)
+ 		return NULL;
+ 
+ 	*size = fdt_totalsize(dt_virt);
+diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
+index c431787..5bb61de 100644
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -180,6 +180,8 @@ ENTRY(__cpu_setup)
+ 	msr	cpacr_el1, x0			// Enable FP/ASIMD
+ 	mov	x0, #1 << 12			// Reset mdscr_el1 and disable
+ 	msr	mdscr_el1, x0			// access to the DCC from EL0
++	isb					// Unmask debug exceptions now,
++	enable_dbg				// since this is per-cpu
+ 	reset_pmuserenr_el0 x0			// Disable PMU access from EL0
+ 	/*
+ 	 * Memory region attributes for LPAE:
+diff --git a/arch/metag/include/asm/cmpxchg_lnkget.h b/arch/metag/include/asm/cmpxchg_lnkget.h
+index 0154e28..2369ad3 100644
+--- a/arch/metag/include/asm/cmpxchg_lnkget.h
++++ b/arch/metag/include/asm/cmpxchg_lnkget.h
+@@ -73,7 +73,7 @@ static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
+ 		      "	DCACHE	[%2], %0\n"
+ #endif
+ 		      "2:\n"
+-		      : "=&d" (temp), "=&da" (retval)
++		      : "=&d" (temp), "=&d" (retval)
+ 		      : "da" (m), "bd" (old), "da" (new)
+ 		      : "cc"
+ 		      );
+diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
+index e4c21bb..804d2a2 100644
+--- a/arch/mips/kernel/cevt-r4k.c
++++ b/arch/mips/kernel/cevt-r4k.c
+@@ -276,12 +276,7 @@ int r4k_clockevent_init(void)
+ 				  CLOCK_EVT_FEAT_C3STOP |
+ 				  CLOCK_EVT_FEAT_PERCPU;
+ 
+-	clockevent_set_clock(cd, mips_hpt_frequency);
+-
+-	/* Calculate the min / max delta */
+-	cd->max_delta_ns	= clockevent_delta2ns(0x7fffffff, cd);
+ 	min_delta		= calculate_min_delta();
+-	cd->min_delta_ns	= clockevent_delta2ns(min_delta, cd);
+ 
+ 	cd->rating		= 300;
+ 	cd->irq			= irq;
+@@ -289,7 +284,7 @@ int r4k_clockevent_init(void)
+ 	cd->set_next_event	= mips_next_event;
+ 	cd->event_handler	= mips_event_handler;
+ 
+-	clockevents_register_device(cd);
++	clockevents_config_and_register(cd, mips_hpt_frequency, min_delta, 0x7fffffff);
+ 
+ 	if (cp0_timer_irq_installed)
+ 		return 0;
+diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c
+index 1f91056..d76275d 100644
+--- a/arch/mips/kernel/csrc-r4k.c
++++ b/arch/mips/kernel/csrc-r4k.c
+@@ -23,7 +23,7 @@ static struct clocksource clocksource_mips = {
+ 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
+ };
+ 
+-static u64 notrace r4k_read_sched_clock(void)
++static u64 __maybe_unused notrace r4k_read_sched_clock(void)
+ {
+ 	return read_c0_count();
+ }
+@@ -82,7 +82,9 @@ int __init init_r4k_clocksource(void)
+ 
+ 	clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
+ 
++#ifndef CONFIG_CPU_FREQ
+ 	sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency);
++#endif
+ 
+ 	return 0;
+ }
+diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
+index 645c8a1..2b42a74 100644
+--- a/arch/mips/kvm/emulate.c
++++ b/arch/mips/kvm/emulate.c
+@@ -1615,8 +1615,14 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
+ 
+ 	preempt_disable();
+ 	if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
+-		if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
+-			kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
++		if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
++		    kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
++			kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
++				__func__, va, vcpu, read_c0_entryhi());
++			er = EMULATE_FAIL;
++			preempt_enable();
++			goto done;
++		}
+ 	} else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
+ 		   KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
+ 		int index;
+@@ -1654,14 +1660,19 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
+ 								run, vcpu);
+ 				preempt_enable();
+ 				goto dont_update_pc;
+-			} else {
+-				/*
+-				 * We fault an entry from the guest tlb to the
+-				 * shadow host TLB
+-				 */
+-				kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
+-								     NULL,
+-								     NULL);
++			}
++			/*
++			 * We fault an entry from the guest tlb to the
++			 * shadow host TLB
++			 */
++			if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
++								 NULL, NULL)) {
++				kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
++					__func__, va, index, vcpu,
++					read_c0_entryhi());
++				er = EMULATE_FAIL;
++				preempt_enable();
++				goto done;
+ 			}
+ 		}
+ 	} else {
+@@ -2625,8 +2636,13 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
+ 			 * OK we have a Guest TLB entry, now inject it into the
+ 			 * shadow host TLB
+ 			 */
+-			kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
+-							     NULL);
++			if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
++								 NULL, NULL)) {
++				kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
++					__func__, va, index, vcpu,
++					read_c0_entryhi());
++				er = EMULATE_FAIL;
++			}
+ 		}
+ 	}
+ 
+diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
+index ed021ae..ad2270f 100644
+--- a/arch/mips/kvm/tlb.c
++++ b/arch/mips/kvm/tlb.c
+@@ -284,7 +284,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
+ 	}
+ 
+ 	gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
+-	if (gfn >= kvm->arch.guest_pmap_npages) {
++	if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
+ 		kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
+ 			gfn, badvaddr);
+ 		kvm_mips_dump_host_tlbs();
+@@ -373,26 +373,40 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+ 	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+ 	struct kvm *kvm = vcpu->kvm;
+ 	kvm_pfn_t pfn0, pfn1;
++	gfn_t gfn0, gfn1;
++	long tlb_lo[2];
+ 	int ret;
+ 
+-	if ((tlb->tlb_hi & VPN2_MASK) == 0) {
+-		pfn0 = 0;
+-		pfn1 = 0;
+-	} else {
+-		if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
+-					   >> PAGE_SHIFT) < 0)
+-			return -1;
+-
+-		if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
+-					   >> PAGE_SHIFT) < 0)
+-			return -1;
+-
+-		pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
+-					    >> PAGE_SHIFT];
+-		pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
+-					    >> PAGE_SHIFT];
++	tlb_lo[0] = tlb->tlb_lo0;
++	tlb_lo[1] = tlb->tlb_lo1;
++
++	/*
++	 * The commpage address must not be mapped to anything else if the guest
++	 * TLB contains entries nearby, or commpage accesses will break.
++	 */
++	if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
++			VPN2_MASK & (PAGE_MASK << 1)))
++		tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
++
++	gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
++	gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
++	if (gfn0 >= kvm->arch.guest_pmap_npages ||
++	    gfn1 >= kvm->arch.guest_pmap_npages) {
++		kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
++			__func__, gfn0, gfn1, tlb->tlb_hi);
++		kvm_mips_dump_guest_tlbs(vcpu);
++		return -1;
+ 	}
+ 
++	if (kvm_mips_map_page(kvm, gfn0) < 0)
++		return -1;
++
++	if (kvm_mips_map_page(kvm, gfn1) < 0)
++		return -1;
++
++	pfn0 = kvm->arch.guest_pmap[gfn0];
++	pfn1 = kvm->arch.guest_pmap[gfn1];
++
+ 	if (hpa0)
+ 		*hpa0 = pfn0 << PAGE_SHIFT;
+ 
+@@ -401,9 +415,9 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+ 
+ 	/* Get attributes from the Guest TLB */
+ 	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
+-		   (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
++		   (tlb_lo[0] & MIPS3_PG_D) | (tlb_lo[0] & MIPS3_PG_V);
+ 	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
+-		   (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
++		   (tlb_lo[1] & MIPS3_PG_D) | (tlb_lo[1] & MIPS3_PG_V);
+ 
+ 	kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
+ 		  tlb->tlb_lo0, tlb->tlb_lo1);
+@@ -776,10 +790,16 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
+ 				local_irq_restore(flags);
+ 				return KVM_INVALID_INST;
+ 			}
+-			kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+-							     &vcpu->arch.
+-							     guest_tlb[index],
+-							     NULL, NULL);
++			if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
++						&vcpu->arch.guest_tlb[index],
++						NULL, NULL)) {
++				kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
++					__func__, opc, index, vcpu,
++					read_c0_entryhi());
++				kvm_mips_dump_guest_tlbs(vcpu);
++				local_irq_restore(flags);
++				return KVM_INVALID_INST;
++			}
+ 			inst = *(opc);
+ 		}
+ 		local_irq_restore(flags);
+diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c
+index 249039a..4788bea 100644
+--- a/arch/mips/loongson64/loongson-3/hpet.c
++++ b/arch/mips/loongson64/loongson-3/hpet.c
+@@ -13,8 +13,8 @@
+ #define SMBUS_PCI_REG64		0x64
+ #define SMBUS_PCI_REGB4		0xb4
+ 
+-#define HPET_MIN_CYCLES		64
+-#define HPET_MIN_PROG_DELTA	(HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
++#define HPET_MIN_CYCLES		16
++#define HPET_MIN_PROG_DELTA	(HPET_MIN_CYCLES * 12)
+ 
+ static DEFINE_SPINLOCK(hpet_lock);
+ DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device);
+@@ -157,14 +157,14 @@ static int hpet_tick_resume(struct clock_event_device *evt)
+ static int hpet_next_event(unsigned long delta,
+ 		struct clock_event_device *evt)
+ {
+-	unsigned int cnt;
+-	int res;
++	u32 cnt;
++	s32 res;
+ 
+ 	cnt = hpet_read(HPET_COUNTER);
+-	cnt += delta;
++	cnt += (u32) delta;
+ 	hpet_write(HPET_T0_CMP, cnt);
+ 
+-	res = (int)(cnt - hpet_read(HPET_COUNTER));
++	res = (s32)(cnt - hpet_read(HPET_COUNTER));
+ 
+ 	return res < HPET_MIN_CYCLES ? -ETIME : 0;
+ }
+@@ -230,7 +230,7 @@ void __init setup_hpet_timer(void)
+ 
+ 	cd = &per_cpu(hpet_clockevent_device, cpu);
+ 	cd->name = "hpet";
+-	cd->rating = 320;
++	cd->rating = 100;
+ 	cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ 	cd->set_state_shutdown = hpet_set_state_shutdown;
+ 	cd->set_state_periodic = hpet_set_state_periodic;
+diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
+index 9c2220a..45e3b87 100644
+--- a/arch/mips/mm/uasm-mips.c
++++ b/arch/mips/mm/uasm-mips.c
+@@ -65,7 +65,7 @@ static struct insn insn_table[] = {
+ #ifndef CONFIG_CPU_MIPSR6
+ 	{ insn_cache,  M(cache_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+ #else
+-	{ insn_cache,  M6(cache_op, 0, 0, 0, cache6_op),  RS | RT | SIMM9 },
++	{ insn_cache,  M6(spec3_op, 0, 0, 0, cache6_op),  RS | RT | SIMM9 },
+ #endif
+ 	{ insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ 	{ insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
+diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
+index b7019b5..298afcf 100644
+--- a/arch/powerpc/kernel/tm.S
++++ b/arch/powerpc/kernel/tm.S
+@@ -338,8 +338,6 @@ _GLOBAL(__tm_recheckpoint)
+ 	 */
+ 	subi	r7, r7, STACK_FRAME_OVERHEAD
+ 
+-	SET_SCRATCH0(r1)
+-
+ 	mfmsr	r6
+ 	/* R4 = original MSR to indicate whether thread used FP/Vector etc. */
+ 
+@@ -468,6 +466,7 @@ restore_gprs:
+ 	 * until we turn MSR RI back on.
+ 	 */
+ 
++	SET_SCRATCH0(r1)
+ 	ld	r5, -8(r1)
+ 	ld	r1, -16(r1)
+ 
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index e571ad2..38e108e 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -655,112 +655,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+ 
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ BEGIN_FTR_SECTION
+-	b	skip_tm
+-END_FTR_SECTION_IFCLR(CPU_FTR_TM)
+-
+-	/* Turn on TM/FP/VSX/VMX so we can restore them. */
+-	mfmsr	r5
+-	li	r6, MSR_TM >> 32
+-	sldi	r6, r6, 32
+-	or	r5, r5, r6
+-	ori	r5, r5, MSR_FP
+-	oris	r5, r5, (MSR_VEC | MSR_VSX)@h
+-	mtmsrd	r5
+-
+-	/*
+-	 * The user may change these outside of a transaction, so they must
+-	 * always be context switched.
+-	 */
+-	ld	r5, VCPU_TFHAR(r4)
+-	ld	r6, VCPU_TFIAR(r4)
+-	ld	r7, VCPU_TEXASR(r4)
+-	mtspr	SPRN_TFHAR, r5
+-	mtspr	SPRN_TFIAR, r6
+-	mtspr	SPRN_TEXASR, r7
+-
+-	ld	r5, VCPU_MSR(r4)
+-	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
+-	beq	skip_tm	/* TM not active in guest */
+-
+-	/* Make sure the failure summary is set, otherwise we'll program check
+-	 * when we trechkpt.  It's possible that this might have been not set
+-	 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
+-	 * host.
+-	 */
+-	oris	r7, r7, (TEXASR_FS)@h
+-	mtspr	SPRN_TEXASR, r7
+-
+-	/*
+-	 * We need to load up the checkpointed state for the guest.
+-	 * We need to do this early as it will blow away any GPRs, VSRs and
+-	 * some SPRs.
+-	 */
+-
+-	mr	r31, r4
+-	addi	r3, r31, VCPU_FPRS_TM
+-	bl	load_fp_state
+-	addi	r3, r31, VCPU_VRS_TM
+-	bl	load_vr_state
+-	mr	r4, r31
+-	lwz	r7, VCPU_VRSAVE_TM(r4)
+-	mtspr	SPRN_VRSAVE, r7
+-
+-	ld	r5, VCPU_LR_TM(r4)
+-	lwz	r6, VCPU_CR_TM(r4)
+-	ld	r7, VCPU_CTR_TM(r4)
+-	ld	r8, VCPU_AMR_TM(r4)
+-	ld	r9, VCPU_TAR_TM(r4)
+-	mtlr	r5
+-	mtcr	r6
+-	mtctr	r7
+-	mtspr	SPRN_AMR, r8
+-	mtspr	SPRN_TAR, r9
+-
+-	/*
+-	 * Load up PPR and DSCR values but don't put them in the actual SPRs
+-	 * till the last moment to avoid running with userspace PPR and DSCR for
+-	 * too long.
+-	 */
+-	ld	r29, VCPU_DSCR_TM(r4)
+-	ld	r30, VCPU_PPR_TM(r4)
+-
+-	std	r2, PACATMSCRATCH(r13) /* Save TOC */
+-
+-	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
+-	li	r5, 0
+-	mtmsrd	r5, 1
+-
+-	/* Load GPRs r0-r28 */
+-	reg = 0
+-	.rept	29
+-	ld	reg, VCPU_GPRS_TM(reg)(r31)
+-	reg = reg + 1
+-	.endr
+-
+-	mtspr	SPRN_DSCR, r29
+-	mtspr	SPRN_PPR, r30
+-
+-	/* Load final GPRs */
+-	ld	29, VCPU_GPRS_TM(29)(r31)
+-	ld	30, VCPU_GPRS_TM(30)(r31)
+-	ld	31, VCPU_GPRS_TM(31)(r31)
+-
+-	/* TM checkpointed state is now setup.  All GPRs are now volatile. */
+-	TRECHKPT
+-
+-	/* Now let's get back the state we need. */
+-	HMT_MEDIUM
+-	GET_PACA(r13)
+-	ld	r29, HSTATE_DSCR(r13)
+-	mtspr	SPRN_DSCR, r29
+-	ld	r4, HSTATE_KVM_VCPU(r13)
+-	ld	r1, HSTATE_HOST_R1(r13)
+-	ld	r2, PACATMSCRATCH(r13)
+-
+-	/* Set the MSR RI since we have our registers back. */
+-	li	r5, MSR_RI
+-	mtmsrd	r5, 1
+-skip_tm:
++	bl	kvmppc_restore_tm
++END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ #endif
+ 
+ 	/* Load guest PMU registers */
+@@ -841,12 +737,6 @@ BEGIN_FTR_SECTION
+ 	/* Skip next section on POWER7 */
+ 	b	8f
+ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+-	/* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
+-	mfmsr	r8
+-	li	r0, 1
+-	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+-	mtmsrd	r8
+-
+ 	/* Load up POWER8-specific registers */
+ 	ld	r5, VCPU_IAMR(r4)
+ 	lwz	r6, VCPU_PSPB(r4)
+@@ -1436,106 +1326,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+ 
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ BEGIN_FTR_SECTION
+-	b	2f
+-END_FTR_SECTION_IFCLR(CPU_FTR_TM)
+-	/* Turn on TM. */
+-	mfmsr	r8
+-	li	r0, 1
+-	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+-	mtmsrd	r8
+-
+-	ld	r5, VCPU_MSR(r9)
+-	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
+-	beq	1f	/* TM not active in guest. */
+-
+-	li	r3, TM_CAUSE_KVM_RESCHED
+-
+-	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
+-	li	r5, 0
+-	mtmsrd	r5, 1
+-
+-	/* All GPRs are volatile at this point. */
+-	TRECLAIM(R3)
+-
+-	/* Temporarily store r13 and r9 so we have some regs to play with */
+-	SET_SCRATCH0(r13)
+-	GET_PACA(r13)
+-	std	r9, PACATMSCRATCH(r13)
+-	ld	r9, HSTATE_KVM_VCPU(r13)
+-
+-	/* Get a few more GPRs free. */
+-	std	r29, VCPU_GPRS_TM(29)(r9)
+-	std	r30, VCPU_GPRS_TM(30)(r9)
+-	std	r31, VCPU_GPRS_TM(31)(r9)
+-
+-	/* Save away PPR and DSCR soon so don't run with user values. */
+-	mfspr	r31, SPRN_PPR
+-	HMT_MEDIUM
+-	mfspr	r30, SPRN_DSCR
+-	ld	r29, HSTATE_DSCR(r13)
+-	mtspr	SPRN_DSCR, r29
+-
+-	/* Save all but r9, r13 & r29-r31 */
+-	reg = 0
+-	.rept	29
+-	.if (reg != 9) && (reg != 13)
+-	std	reg, VCPU_GPRS_TM(reg)(r9)
+-	.endif
+-	reg = reg + 1
+-	.endr
+-	/* ... now save r13 */
+-	GET_SCRATCH0(r4)
+-	std	r4, VCPU_GPRS_TM(13)(r9)
+-	/* ... and save r9 */
+-	ld	r4, PACATMSCRATCH(r13)
+-	std	r4, VCPU_GPRS_TM(9)(r9)
+-
+-	/* Reload stack pointer and TOC. */
+-	ld	r1, HSTATE_HOST_R1(r13)
+-	ld	r2, PACATOC(r13)
+-
+-	/* Set MSR RI now we have r1 and r13 back. */
+-	li	r5, MSR_RI
+-	mtmsrd	r5, 1
+-
+-	/* Save away checkpinted SPRs. */
+-	std	r31, VCPU_PPR_TM(r9)
+-	std	r30, VCPU_DSCR_TM(r9)
+-	mflr	r5
+-	mfcr	r6
+-	mfctr	r7
+-	mfspr	r8, SPRN_AMR
+-	mfspr	r10, SPRN_TAR
+-	std	r5, VCPU_LR_TM(r9)
+-	stw	r6, VCPU_CR_TM(r9)
+-	std	r7, VCPU_CTR_TM(r9)
+-	std	r8, VCPU_AMR_TM(r9)
+-	std	r10, VCPU_TAR_TM(r9)
+-
+-	/* Restore r12 as trap number. */
+-	lwz	r12, VCPU_TRAP(r9)
+-
+-	/* Save FP/VSX. */
+-	addi	r3, r9, VCPU_FPRS_TM
+-	bl	store_fp_state
+-	addi	r3, r9, VCPU_VRS_TM
+-	bl	store_vr_state
+-	mfspr	r6, SPRN_VRSAVE
+-	stw	r6, VCPU_VRSAVE_TM(r9)
+-1:
+-	/*
+-	 * We need to save these SPRs after the treclaim so that the software
+-	 * error code is recorded correctly in the TEXASR.  Also the user may
+-	 * change these outside of a transaction, so they must always be
+-	 * context switched.
+-	 */
+-	mfspr	r5, SPRN_TFHAR
+-	mfspr	r6, SPRN_TFIAR
+-	mfspr	r7, SPRN_TEXASR
+-	std	r5, VCPU_TFHAR(r9)
+-	std	r6, VCPU_TFIAR(r9)
+-	std	r7, VCPU_TEXASR(r9)
+-2:
++	bl	kvmppc_save_tm
++END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ #endif
+ 
+ 	/* Increment yield count if they have a VPA */
+@@ -2245,6 +2037,13 @@ _GLOBAL(kvmppc_h_cede)		/* r3 = vcpu pointer, r11 = msr, r13 = paca */
+ 	/* save FP state */
+ 	bl	kvmppc_save_fp
+ 
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++BEGIN_FTR_SECTION
++	ld	r9, HSTATE_KVM_VCPU(r13)
++	bl	kvmppc_save_tm
++END_FTR_SECTION_IFSET(CPU_FTR_TM)
++#endif
++
+ 	/*
+ 	 * Set DEC to the smaller of DEC and HDEC, so that we wake
+ 	 * no later than the end of our timeslice (HDEC interrupts
+@@ -2321,6 +2120,12 @@ kvm_end_cede:
+ 	bl	kvmhv_accumulate_time
+ #endif
+ 
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++BEGIN_FTR_SECTION
++	bl	kvmppc_restore_tm
++END_FTR_SECTION_IFSET(CPU_FTR_TM)
++#endif
++
+ 	/* load up FP state */
+ 	bl	kvmppc_load_fp
+ 
+@@ -2631,6 +2436,239 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+ 	mr	r4,r31
+ 	blr
+ 
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++/*
++ * Save transactional state and TM-related registers.
++ * Called with r9 pointing to the vcpu struct.
++ * This can modify all checkpointed registers, but
++ * restores r1, r2 and r9 (vcpu pointer) before exit.
++ */
++kvmppc_save_tm:
++	mflr	r0
++	std	r0, PPC_LR_STKOFF(r1)
++
++	/* Turn on TM. */
++	mfmsr	r8
++	li	r0, 1
++	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
++	mtmsrd	r8
++
++	ld	r5, VCPU_MSR(r9)
++	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
++	beq	1f	/* TM not active in guest. */
++
++	std	r1, HSTATE_HOST_R1(r13)
++	li	r3, TM_CAUSE_KVM_RESCHED
++
++	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
++	li	r5, 0
++	mtmsrd	r5, 1
++
++	/* All GPRs are volatile at this point. */
++	TRECLAIM(R3)
++
++	/* Temporarily store r13 and r9 so we have some regs to play with */
++	SET_SCRATCH0(r13)
++	GET_PACA(r13)
++	std	r9, PACATMSCRATCH(r13)
++	ld	r9, HSTATE_KVM_VCPU(r13)
++
++	/* Get a few more GPRs free. */
++	std	r29, VCPU_GPRS_TM(29)(r9)
++	std	r30, VCPU_GPRS_TM(30)(r9)
++	std	r31, VCPU_GPRS_TM(31)(r9)
++
++	/* Save away PPR and DSCR soon so don't run with user values. */
++	mfspr	r31, SPRN_PPR
++	HMT_MEDIUM
++	mfspr	r30, SPRN_DSCR
++	ld	r29, HSTATE_DSCR(r13)
++	mtspr	SPRN_DSCR, r29
++
++	/* Save all but r9, r13 & r29-r31 */
++	reg = 0
++	.rept	29
++	.if (reg != 9) && (reg != 13)
++	std	reg, VCPU_GPRS_TM(reg)(r9)
++	.endif
++	reg = reg + 1
++	.endr
++	/* ... now save r13 */
++	GET_SCRATCH0(r4)
++	std	r4, VCPU_GPRS_TM(13)(r9)
++	/* ... and save r9 */
++	ld	r4, PACATMSCRATCH(r13)
++	std	r4, VCPU_GPRS_TM(9)(r9)
++
++	/* Reload stack pointer and TOC. */
++	ld	r1, HSTATE_HOST_R1(r13)
++	ld	r2, PACATOC(r13)
++
++	/* Set MSR RI now we have r1 and r13 back. */
++	li	r5, MSR_RI
++	mtmsrd	r5, 1
++
++	/* Save away checkpinted SPRs. */
++	std	r31, VCPU_PPR_TM(r9)
++	std	r30, VCPU_DSCR_TM(r9)
++	mflr	r5
++	mfcr	r6
++	mfctr	r7
++	mfspr	r8, SPRN_AMR
++	mfspr	r10, SPRN_TAR
++	std	r5, VCPU_LR_TM(r9)
++	stw	r6, VCPU_CR_TM(r9)
++	std	r7, VCPU_CTR_TM(r9)
++	std	r8, VCPU_AMR_TM(r9)
++	std	r10, VCPU_TAR_TM(r9)
++
++	/* Restore r12 as trap number. */
++	lwz	r12, VCPU_TRAP(r9)
++
++	/* Save FP/VSX. */
++	addi	r3, r9, VCPU_FPRS_TM
++	bl	store_fp_state
++	addi	r3, r9, VCPU_VRS_TM
++	bl	store_vr_state
++	mfspr	r6, SPRN_VRSAVE
++	stw	r6, VCPU_VRSAVE_TM(r9)
++1:
++	/*
++	 * We need to save these SPRs after the treclaim so that the software
++	 * error code is recorded correctly in the TEXASR.  Also the user may
++	 * change these outside of a transaction, so they must always be
++	 * context switched.
++	 */
++	mfspr	r5, SPRN_TFHAR
++	mfspr	r6, SPRN_TFIAR
++	mfspr	r7, SPRN_TEXASR
++	std	r5, VCPU_TFHAR(r9)
++	std	r6, VCPU_TFIAR(r9)
++	std	r7, VCPU_TEXASR(r9)
++
++	ld	r0, PPC_LR_STKOFF(r1)
++	mtlr	r0
++	blr
++
++/*
++ * Restore transactional state and TM-related registers.
++ * Called with r4 pointing to the vcpu struct.
++ * This potentially modifies all checkpointed registers.
++ * It restores r1, r2, r4 from the PACA.
++ */
++kvmppc_restore_tm:
++	mflr	r0
++	std	r0, PPC_LR_STKOFF(r1)
++
++	/* Turn on TM/FP/VSX/VMX so we can restore them. */
++	mfmsr	r5
++	li	r6, MSR_TM >> 32
++	sldi	r6, r6, 32
++	or	r5, r5, r6
++	ori	r5, r5, MSR_FP
++	oris	r5, r5, (MSR_VEC | MSR_VSX)@h
++	mtmsrd	r5
++
++	/*
++	 * The user may change these outside of a transaction, so they must
++	 * always be context switched.
++	 */
++	ld	r5, VCPU_TFHAR(r4)
++	ld	r6, VCPU_TFIAR(r4)
++	ld	r7, VCPU_TEXASR(r4)
++	mtspr	SPRN_TFHAR, r5
++	mtspr	SPRN_TFIAR, r6
++	mtspr	SPRN_TEXASR, r7
++
++	ld	r5, VCPU_MSR(r4)
++	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
++	beqlr		/* TM not active in guest */
++	std	r1, HSTATE_HOST_R1(r13)
++
++	/* Make sure the failure summary is set, otherwise we'll program check
++	 * when we trechkpt.  It's possible that this might have been not set
++	 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
++	 * host.
++	 */
++	oris	r7, r7, (TEXASR_FS)@h
++	mtspr	SPRN_TEXASR, r7
++
++	/*
++	 * We need to load up the checkpointed state for the guest.
++	 * We need to do this early as it will blow away any GPRs, VSRs and
++	 * some SPRs.
++	 */
++
++	mr	r31, r4
++	addi	r3, r31, VCPU_FPRS_TM
++	bl	load_fp_state
++	addi	r3, r31, VCPU_VRS_TM
++	bl	load_vr_state
++	mr	r4, r31
++	lwz	r7, VCPU_VRSAVE_TM(r4)
++	mtspr	SPRN_VRSAVE, r7
++
++	ld	r5, VCPU_LR_TM(r4)
++	lwz	r6, VCPU_CR_TM(r4)
++	ld	r7, VCPU_CTR_TM(r4)
++	ld	r8, VCPU_AMR_TM(r4)
++	ld	r9, VCPU_TAR_TM(r4)
++	mtlr	r5
++	mtcr	r6
++	mtctr	r7
++	mtspr	SPRN_AMR, r8
++	mtspr	SPRN_TAR, r9
++
++	/*
++	 * Load up PPR and DSCR values but don't put them in the actual SPRs
++	 * till the last moment to avoid running with userspace PPR and DSCR for
++	 * too long.
++	 */
++	ld	r29, VCPU_DSCR_TM(r4)
++	ld	r30, VCPU_PPR_TM(r4)
++
++	std	r2, PACATMSCRATCH(r13) /* Save TOC */
++
++	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
++	li	r5, 0
++	mtmsrd	r5, 1
++
++	/* Load GPRs r0-r28 */
++	reg = 0
++	.rept	29
++	ld	reg, VCPU_GPRS_TM(reg)(r31)
++	reg = reg + 1
++	.endr
++
++	mtspr	SPRN_DSCR, r29
++	mtspr	SPRN_PPR, r30
++
++	/* Load final GPRs */
++	ld	29, VCPU_GPRS_TM(29)(r31)
++	ld	30, VCPU_GPRS_TM(30)(r31)
++	ld	31, VCPU_GPRS_TM(31)(r31)
++
++	/* TM checkpointed state is now setup.  All GPRs are now volatile. */
++	TRECHKPT
++
++	/* Now let's get back the state we need. */
++	HMT_MEDIUM
++	GET_PACA(r13)
++	ld	r29, HSTATE_DSCR(r13)
++	mtspr	SPRN_DSCR, r29
++	ld	r4, HSTATE_KVM_VCPU(r13)
++	ld	r1, HSTATE_HOST_R1(r13)
++	ld	r2, PACATMSCRATCH(r13)
++
++	/* Set the MSR RI since we have our registers back. */
++	li	r5, MSR_RI
++	mtmsrd	r5, 1
++
++	ld	r0, PPC_LR_STKOFF(r1)
++	mtlr	r0
++	blr
++#endif
++
+ /*
+  * We come here if we get any exception or interrupt while we are
+  * executing host real mode code while in guest MMU context.
+diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
+index 18d2beb..42b968a8 100644
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -893,7 +893,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+ bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address);
+ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
+ 			  unsigned char key, bool nq);
+-unsigned char get_guest_storage_key(struct mm_struct *mm, unsigned long addr);
++unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr);
+ 
+ /*
+  * Certain architectures need to do special things when PTEs
+diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
+index a2e6ef3..0a20316 100644
+--- a/arch/s390/include/asm/tlbflush.h
++++ b/arch/s390/include/asm/tlbflush.h
+@@ -81,7 +81,8 @@ static inline void __tlb_flush_full(struct mm_struct *mm)
+ }
+ 
+ /*
+- * Flush TLB entries for a specific ASCE on all CPUs.
++ * Flush TLB entries for a specific ASCE on all CPUs. Should never be used
++ * when more than one asce (e.g. gmap) ran on this mm.
+  */
+ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
+ {
+diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
+index cace818..313c3b8 100644
+--- a/arch/s390/mm/gmap.c
++++ b/arch/s390/mm/gmap.c
+@@ -85,7 +85,7 @@ EXPORT_SYMBOL_GPL(gmap_alloc);
+ static void gmap_flush_tlb(struct gmap *gmap)
+ {
+ 	if (MACHINE_HAS_IDTE)
+-		__tlb_flush_asce(gmap->mm, gmap->asce);
++		__tlb_flush_idte(gmap->asce);
+ 	else
+ 		__tlb_flush_global();
+ }
+@@ -124,7 +124,7 @@ void gmap_free(struct gmap *gmap)
+ 
+ 	/* Flush tlb. */
+ 	if (MACHINE_HAS_IDTE)
+-		__tlb_flush_asce(gmap->mm, gmap->asce);
++		__tlb_flush_idte(gmap->asce);
+ 	else
+ 		__tlb_flush_global();
+ 
+diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
+index 9f0ce0e..ebb4f87 100644
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -543,7 +543,7 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
+ }
+ EXPORT_SYMBOL(set_guest_storage_key);
+ 
+-unsigned char get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
++unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
+ {
+ 	unsigned char key;
+ 	spinlock_t *ptl;
+diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
+index 8acaf4e..a86d7cc 100644
+--- a/arch/um/os-Linux/signal.c
++++ b/arch/um/os-Linux/signal.c
+@@ -15,6 +15,7 @@
+ #include <kern_util.h>
+ #include <os.h>
+ #include <sysdep/mcontext.h>
++#include <um_malloc.h>
+ 
+ void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
+ 	[SIGTRAP]	= relay_signal,
+@@ -32,7 +33,7 @@ static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
+ 	struct uml_pt_regs *r;
+ 	int save_errno = errno;
+ 
+-	r = malloc(sizeof(struct uml_pt_regs));
++	r = uml_kmalloc(sizeof(struct uml_pt_regs), UM_GFP_ATOMIC);
+ 	if (!r)
+ 		panic("out of memory");
+ 
+@@ -91,7 +92,7 @@ static void timer_real_alarm_handler(mcontext_t *mc)
+ {
+ 	struct uml_pt_regs *regs;
+ 
+-	regs = malloc(sizeof(struct uml_pt_regs));
++	regs = uml_kmalloc(sizeof(struct uml_pt_regs), UM_GFP_ATOMIC);
+ 	if (!regs)
+ 		panic("out of memory");
+ 
+diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h
+index e35632e..62dfc64 100644
+--- a/arch/unicore32/include/asm/mmu_context.h
++++ b/arch/unicore32/include/asm/mmu_context.h
+@@ -98,7 +98,7 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
+ }
+ 
+ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
+-		bool write, bool foreign)
++		bool write, bool execute, bool foreign)
+ {
+ 	/* by default, allow everything */
+ 	return true;
+diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
+index 874e8bd..bd136ac 100644
+--- a/arch/x86/events/intel/uncore_snbep.c
++++ b/arch/x86/events/intel/uncore_snbep.c
+@@ -2546,7 +2546,7 @@ void hswep_uncore_cpu_init(void)
+ 
+ static struct intel_uncore_type hswep_uncore_ha = {
+ 	.name		= "ha",
+-	.num_counters   = 5,
++	.num_counters   = 4,
+ 	.num_boxes	= 2,
+ 	.perf_ctr_bits	= 48,
+ 	SNBEP_UNCORE_PCI_COMMON_INIT(),
+@@ -2565,7 +2565,7 @@ static struct uncore_event_desc hswep_uncore_imc_events[] = {
+ 
+ static struct intel_uncore_type hswep_uncore_imc = {
+ 	.name		= "imc",
+-	.num_counters   = 5,
++	.num_counters   = 4,
+ 	.num_boxes	= 8,
+ 	.perf_ctr_bits	= 48,
+ 	.fixed_ctr_bits	= 48,
+@@ -2611,7 +2611,7 @@ static struct intel_uncore_type hswep_uncore_irp = {
+ 
+ static struct intel_uncore_type hswep_uncore_qpi = {
+ 	.name			= "qpi",
+-	.num_counters		= 5,
++	.num_counters		= 4,
+ 	.num_boxes		= 3,
+ 	.perf_ctr_bits		= 48,
+ 	.perf_ctr		= SNBEP_PCI_PMON_CTR0,
+@@ -2693,7 +2693,7 @@ static struct event_constraint hswep_uncore_r3qpi_constraints[] = {
+ 
+ static struct intel_uncore_type hswep_uncore_r3qpi = {
+ 	.name		= "r3qpi",
+-	.num_counters   = 4,
++	.num_counters   = 3,
+ 	.num_boxes	= 3,
+ 	.perf_ctr_bits	= 44,
+ 	.constraints	= hswep_uncore_r3qpi_constraints,
+@@ -2892,7 +2892,7 @@ static struct intel_uncore_type bdx_uncore_ha = {
+ 
+ static struct intel_uncore_type bdx_uncore_imc = {
+ 	.name		= "imc",
+-	.num_counters   = 5,
++	.num_counters   = 4,
+ 	.num_boxes	= 8,
+ 	.perf_ctr_bits	= 48,
+ 	.fixed_ctr_bits	= 48,
+diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
+index c146f3c..0149ac5 100644
+--- a/arch/x86/kvm/mtrr.c
++++ b/arch/x86/kvm/mtrr.c
+@@ -539,6 +539,7 @@ static void mtrr_lookup_var_start(struct mtrr_iter *iter)
+ 
+ 	iter->fixed = false;
+ 	iter->start_max = iter->start;
++	iter->range = NULL;
+ 	iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
+ 
+ 	__mtrr_lookup_var_next(iter);
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 64a79f2..8326d68 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8224,6 +8224,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
+ 	if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
+ 			(exit_reason != EXIT_REASON_EXCEPTION_NMI &&
+ 			exit_reason != EXIT_REASON_EPT_VIOLATION &&
++			exit_reason != EXIT_REASON_PML_FULL &&
+ 			exit_reason != EXIT_REASON_TASK_SWITCH)) {
+ 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ 		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
+@@ -8854,6 +8855,22 @@ static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
+ 	put_cpu();
+ }
+ 
++/*
++ * Ensure that the current vmcs of the logical processor is the
++ * vmcs01 of the vcpu before calling free_nested().
++ */
++static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu)
++{
++       struct vcpu_vmx *vmx = to_vmx(vcpu);
++       int r;
++
++       r = vcpu_load(vcpu);
++       BUG_ON(r);
++       vmx_load_vmcs01(vcpu);
++       free_nested(vmx);
++       vcpu_put(vcpu);
++}
++
+ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_vmx *vmx = to_vmx(vcpu);
+@@ -8862,8 +8879,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
+ 		vmx_destroy_pml_buffer(vmx);
+ 	free_vpid(vmx->vpid);
+ 	leave_guest_mode(vcpu);
+-	vmx_load_vmcs01(vcpu);
+-	free_nested(vmx);
++	vmx_free_vcpu_nested(vcpu);
+ 	free_loaded_vmcs(vmx->loaded_vmcs);
+ 	kfree(vmx->guest_msrs);
+ 	kvm_vcpu_uninit(vcpu);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 7da5dd2..fea2c57 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -91,6 +91,7 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
+ 
+ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
+ static void process_nmi(struct kvm_vcpu *vcpu);
++static void process_smi(struct kvm_vcpu *vcpu);
+ static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
+ 
+ struct kvm_x86_ops *kvm_x86_ops __read_mostly;
+@@ -5296,13 +5297,8 @@ static void kvm_smm_changed(struct kvm_vcpu *vcpu)
+ 		/* This is a good place to trace that we are exiting SMM.  */
+ 		trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false);
+ 
+-		if (unlikely(vcpu->arch.smi_pending)) {
+-			kvm_make_request(KVM_REQ_SMI, vcpu);
+-			vcpu->arch.smi_pending = 0;
+-		} else {
+-			/* Process a latched INIT, if any.  */
+-			kvm_make_request(KVM_REQ_EVENT, vcpu);
+-		}
++		/* Process a latched INIT or SMI, if any.  */
++		kvm_make_request(KVM_REQ_EVENT, vcpu);
+ 	}
+ 
+ 	kvm_mmu_reset_context(vcpu);
+@@ -6102,7 +6098,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
+ 	}
+ 
+ 	/* try to inject new event if pending */
+-	if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
++	if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
++		vcpu->arch.smi_pending = false;
++		process_smi(vcpu);
++	} else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
+ 		--vcpu->arch.nmi_pending;
+ 		vcpu->arch.nmi_injected = true;
+ 		kvm_x86_ops->set_nmi(vcpu);
+@@ -6312,11 +6311,6 @@ static void process_smi(struct kvm_vcpu *vcpu)
+ 	char buf[512];
+ 	u32 cr0;
+ 
+-	if (is_smm(vcpu)) {
+-		vcpu->arch.smi_pending = true;
+-		return;
+-	}
+-
+ 	trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
+ 	vcpu->arch.hflags |= HF_SMM_MASK;
+ 	memset(buf, 0, 512);
+@@ -6379,6 +6373,12 @@ static void process_smi(struct kvm_vcpu *vcpu)
+ 	kvm_mmu_reset_context(vcpu);
+ }
+ 
++static void process_smi_request(struct kvm_vcpu *vcpu)
++{
++	vcpu->arch.smi_pending = true;
++	kvm_make_request(KVM_REQ_EVENT, vcpu);
++}
++
+ void kvm_make_scan_ioapic_request(struct kvm *kvm)
+ {
+ 	kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
+@@ -6500,7 +6500,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 		if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
+ 			record_steal_time(vcpu);
+ 		if (kvm_check_request(KVM_REQ_SMI, vcpu))
+-			process_smi(vcpu);
++			process_smi_request(vcpu);
+ 		if (kvm_check_request(KVM_REQ_NMI, vcpu))
+ 			process_nmi(vcpu);
+ 		if (kvm_check_request(KVM_REQ_PMU, vcpu))
+@@ -6573,8 +6573,18 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 
+ 		if (inject_pending_event(vcpu, req_int_win) != 0)
+ 			req_immediate_exit = true;
+-		/* enable NMI/IRQ window open exits if needed */
+ 		else {
++			/* Enable NMI/IRQ window open exits if needed.
++			 *
++			 * SMIs have two cases: 1) they can be nested, and
++			 * then there is nothing to do here because RSM will
++			 * cause a vmexit anyway; 2) or the SMI can be pending
++			 * because inject_pending_event has completed the
++			 * injection of an IRQ or NMI from the previous vmexit,
++			 * and then we request an immediate exit to inject the SMI.
++			 */
++			if (vcpu->arch.smi_pending && !is_smm(vcpu))
++				req_immediate_exit = true;
+ 			if (vcpu->arch.nmi_pending)
+ 				kvm_x86_ops->enable_nmi_window(vcpu);
+ 			if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
+@@ -6625,8 +6635,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 
+ 	kvm_load_guest_xcr0(vcpu);
+ 
+-	if (req_immediate_exit)
++	if (req_immediate_exit) {
++		kvm_make_request(KVM_REQ_EVENT, vcpu);
+ 		smp_send_reschedule(vcpu->cpu);
++	}
+ 
+ 	trace_kvm_entry(vcpu->vcpu_id);
+ 	wait_lapic_expire(vcpu);
+@@ -7427,6 +7439,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
+ {
+ 	vcpu->arch.hflags = 0;
+ 
++	vcpu->arch.smi_pending = 0;
+ 	atomic_set(&vcpu->arch.nmi_queued, 0);
+ 	vcpu->arch.nmi_pending = 0;
+ 	vcpu->arch.nmi_injected = false;
+diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
+index 8b93e63..ae97f24 100644
+--- a/arch/x86/pci/intel_mid_pci.c
++++ b/arch/x86/pci/intel_mid_pci.c
+@@ -37,6 +37,7 @@
+ 
+ /* Quirks for the listed devices */
+ #define PCI_DEVICE_ID_INTEL_MRFL_MMC	0x1190
++#define PCI_DEVICE_ID_INTEL_MRFL_HSU	0x1191
+ 
+ /* Fixed BAR fields */
+ #define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00	/* Fixed BAR (TBD) */
+@@ -225,13 +226,20 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
+ 		/* Special treatment for IRQ0 */
+ 		if (dev->irq == 0) {
+ 			/*
++			 * Skip HS UART common registers device since it has
++			 * IRQ0 assigned and not used by the kernel.
++			 */
++			if (dev->device == PCI_DEVICE_ID_INTEL_MRFL_HSU)
++				return -EBUSY;
++			/*
+ 			 * TNG has IRQ0 assigned to eMMC controller. But there
+ 			 * are also other devices with bogus PCI configuration
+ 			 * that have IRQ0 assigned. This check ensures that
+-			 * eMMC gets it.
++			 * eMMC gets it. The rest of devices still could be
++			 * enabled without interrupt line being allocated.
+ 			 */
+ 			if (dev->device != PCI_DEVICE_ID_INTEL_MRFL_MMC)
+-				return -EBUSY;
++				return 0;
+ 		}
+ 		break;
+ 	default:
+diff --git a/block/bio.c b/block/bio.c
+index 0e4aa42..4623869 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -579,6 +579,8 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
+ 	bio->bi_rw = bio_src->bi_rw;
+ 	bio->bi_iter = bio_src->bi_iter;
+ 	bio->bi_io_vec = bio_src->bi_io_vec;
++
++	bio_clone_blkcg_association(bio, bio_src);
+ }
+ EXPORT_SYMBOL(__bio_clone_fast);
+ 
+@@ -684,6 +686,8 @@ integrity_clone:
+ 		}
+ 	}
+ 
++	bio_clone_blkcg_association(bio, bio_src);
++
+ 	return bio;
+ }
+ EXPORT_SYMBOL(bio_clone_bioset);
+@@ -2005,6 +2009,17 @@ void bio_disassociate_task(struct bio *bio)
+ 	}
+ }
+ 
++/**
++ * bio_clone_blkcg_association - clone blkcg association from src to dst bio
++ * @dst: destination bio
++ * @src: source bio
++ */
++void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
++{
++	if (src->bi_css)
++		WARN_ON(bio_associate_blkcg(dst, src->bi_css));
++}
++
+ #endif /* CONFIG_BLK_CGROUP */
+ 
+ static void __init biovec_init_slabs(void)
+diff --git a/block/genhd.c b/block/genhd.c
+index 3eebd25..086f1a3 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -613,7 +613,7 @@ void add_disk(struct gendisk *disk)
+ 
+ 	/* Register BDI before referencing it from bdev */
+ 	bdi = &disk->queue->backing_dev_info;
+-	bdi_register_dev(bdi, disk_devt(disk));
++	bdi_register_owner(bdi, disk_to_dev(disk));
+ 
+ 	blk_register_region(disk_devt(disk), disk->minors, NULL,
+ 			    exact_match, exact_lock, disk);
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 290d6f5..f4218df 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -101,6 +101,7 @@ enum ec_command {
+ #define ACPI_EC_UDELAY_POLL	550	/* Wait 1ms for EC transaction polling */
+ #define ACPI_EC_CLEAR_MAX	100	/* Maximum number of events to query
+ 					 * when trying to clear the EC */
++#define ACPI_EC_MAX_QUERIES	16	/* Maximum number of parallel queries */
+ 
+ enum {
+ 	EC_FLAGS_QUERY_PENDING,		/* Query is pending */
+@@ -121,6 +122,10 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
+ module_param(ec_delay, uint, 0644);
+ MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
+ 
++static unsigned int ec_max_queries __read_mostly = ACPI_EC_MAX_QUERIES;
++module_param(ec_max_queries, uint, 0644);
++MODULE_PARM_DESC(ec_max_queries, "Maximum parallel _Qxx evaluations");
++
+ static bool ec_busy_polling __read_mostly;
+ module_param(ec_busy_polling, bool, 0644);
+ MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction");
+@@ -174,6 +179,7 @@ static void acpi_ec_event_processor(struct work_struct *work);
+ 
+ struct acpi_ec *boot_ec, *first_ec;
+ EXPORT_SYMBOL(first_ec);
++static struct workqueue_struct *ec_query_wq;
+ 
+ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
+ static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
+@@ -1098,7 +1104,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
+ 	 * work queue execution.
+ 	 */
+ 	ec_dbg_evt("Query(0x%02x) scheduled", value);
+-	if (!schedule_work(&q->work)) {
++	if (!queue_work(ec_query_wq, &q->work)) {
+ 		ec_dbg_evt("Query(0x%02x) overlapped", value);
+ 		result = -EBUSY;
+ 	}
+@@ -1660,15 +1666,41 @@ static struct acpi_driver acpi_ec_driver = {
+ 		},
+ };
+ 
++static inline int acpi_ec_query_init(void)
++{
++	if (!ec_query_wq) {
++		ec_query_wq = alloc_workqueue("kec_query", 0,
++					      ec_max_queries);
++		if (!ec_query_wq)
++			return -ENODEV;
++	}
++	return 0;
++}
++
++static inline void acpi_ec_query_exit(void)
++{
++	if (ec_query_wq) {
++		destroy_workqueue(ec_query_wq);
++		ec_query_wq = NULL;
++	}
++}
++
+ int __init acpi_ec_init(void)
+ {
+-	int result = 0;
++	int result;
+ 
++	/* register workqueue for _Qxx evaluations */
++	result = acpi_ec_query_init();
++	if (result)
++		goto err_exit;
+ 	/* Now register the driver for the EC */
+ 	result = acpi_bus_register_driver(&acpi_ec_driver);
+-	if (result < 0)
+-		return -ENODEV;
++	if (result)
++		goto err_exit;
+ 
++err_exit:
++	if (result)
++		acpi_ec_query_exit();
+ 	return result;
+ }
+ 
+@@ -1678,5 +1710,6 @@ static void __exit acpi_ec_exit(void)
+ {
+ 
+ 	acpi_bus_unregister_driver(&acpi_ec_driver);
++	acpi_ec_query_exit();
+ }
+ #endif	/* 0 */
+diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
+index cae5385..bd46569 100644
+--- a/drivers/bcma/host_pci.c
++++ b/drivers/bcma/host_pci.c
+@@ -295,6 +295,7 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) },
+ 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0016) },
++	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_FOXCONN, 0xe092) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index 84708a5..a1dcf12 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -3663,11 +3663,6 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
+ 
+ 	opened_bdev[drive] = bdev;
+ 
+-	if (!(mode & (FMODE_READ|FMODE_WRITE))) {
+-		res = -EINVAL;
+-		goto out;
+-	}
+-
+ 	res = -ENXIO;
+ 
+ 	if (!floppy_track_buffer) {
+@@ -3711,13 +3706,15 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
+ 	if (UFDCS->rawcmd == 1)
+ 		UFDCS->rawcmd = 2;
+ 
+-	UDRS->last_checked = 0;
+-	clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
+-	check_disk_change(bdev);
+-	if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
+-		goto out;
+-	if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
+-		goto out;
++	if (mode & (FMODE_READ|FMODE_WRITE)) {
++		UDRS->last_checked = 0;
++		clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
++		check_disk_change(bdev);
++		if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
++			goto out;
++		if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
++			goto out;
++	}
+ 
+ 	res = -EROFS;
+ 
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 2589468..fadba88 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -123,6 +123,7 @@ static const struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3472) },
+ 	{ USB_DEVICE(0x13d3, 0x3474) },
+ 	{ USB_DEVICE(0x13d3, 0x3487) },
++	{ USB_DEVICE(0x13d3, 0x3490) },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xE02C) },
+@@ -190,6 +191,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3487), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3490), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU22 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index a3be65e..9f40c34 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -237,6 +237,7 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3487), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3490), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 87ab9f6..d72c6d1 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -949,6 +949,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
+ 	/* award one bit for the contents of the fast pool */
+ 	credit_entropy_bits(r, credit + 1);
+ }
++EXPORT_SYMBOL_GPL(add_interrupt_randomness);
+ 
+ #ifdef CONFIG_BLOCK
+ void add_disk_randomness(struct gendisk *disk)
+@@ -1461,12 +1462,16 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+ static ssize_t
+ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+ {
++	static int maxwarn = 10;
+ 	int ret;
+ 
+-	if (unlikely(nonblocking_pool.initialized == 0))
+-		printk_once(KERN_NOTICE "random: %s urandom read "
+-			    "with %d bits of entropy available\n",
+-			    current->comm, nonblocking_pool.entropy_total);
++	if (unlikely(nonblocking_pool.initialized == 0) &&
++	    maxwarn > 0) {
++		maxwarn--;
++		printk(KERN_NOTICE "random: %s: uninitialized urandom read "
++		       "(%zd bytes read, %d bits of entropy available)\n",
++		       current->comm, nbytes, nonblocking_pool.entropy_total);
++	}
+ 
+ 	nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
+ 	ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
+@@ -1774,13 +1779,15 @@ int random_int_secret_init(void)
+ 	return 0;
+ }
+ 
++static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash)
++		__aligned(sizeof(unsigned long));
++
+ /*
+  * Get a random word for internal kernel use only. Similar to urandom but
+  * with the goal of minimal entropy pool depletion. As a result, the random
+  * value is not cryptographically secure but for several uses the cost of
+  * depleting entropy is too high
+  */
+-static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
+ unsigned int get_random_int(void)
+ {
+ 	__u32 *hash;
+@@ -1850,12 +1857,18 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
+ {
+ 	struct entropy_store *poolp = &input_pool;
+ 
+-	/* Suspend writing if we're above the trickle threshold.
+-	 * We'll be woken up again once below random_write_wakeup_thresh,
+-	 * or when the calling thread is about to terminate.
+-	 */
+-	wait_event_interruptible(random_write_wait, kthread_should_stop() ||
++	if (unlikely(nonblocking_pool.initialized == 0))
++		poolp = &nonblocking_pool;
++	else {
++		/* Suspend writing if we're above the trickle
++		 * threshold.  We'll be woken up again once below
++		 * random_write_wakeup_thresh, or when the calling
++		 * thread is about to terminate.
++		 */
++		wait_event_interruptible(random_write_wait,
++					 kthread_should_stop() ||
+ 			ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
++	}
+ 	mix_pool_bytes(poolp, buffer, count);
+ 	credit_entropy_bits(poolp, entropy);
+ }
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
+index a12b319..e9fd1d8 100644
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -246,7 +246,7 @@ static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
+ 
+ 	/* Detect a 64 bit address on a 32 bit system */
+ 	if (start != new_res.start)
+-		return ERR_PTR(-EINVAL);
++		return (void __iomem *) ERR_PTR(-EINVAL);
+ 
+ 	if (!resource_contains(&priv->res, &new_res))
+ 		return devm_ioremap_resource(dev, &new_res);
+diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
+index 8059a8d..31b77f7 100644
+--- a/drivers/clk/rockchip/clk-rk3399.c
++++ b/drivers/clk/rockchip/clk-rk3399.c
+@@ -586,7 +586,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
+ 			RK3399_CLKGATE_CON(8), 15, GFLAGS),
+ 
+ 	COMPOSITE(SCLK_SPDIF_REC_DPTX, "clk_spdif_rec_dptx", mux_pll_src_cpll_gpll_p, 0,
+-			RK3399_CLKSEL_CON(32), 15, 1, MFLAGS, 0, 5, DFLAGS,
++			RK3399_CLKSEL_CON(32), 15, 1, MFLAGS, 8, 5, DFLAGS,
+ 			RK3399_CLKGATE_CON(10), 6, GFLAGS),
+ 	/* i2s */
+ 	COMPOSITE(0, "clk_i2s0_div", mux_pll_src_cpll_gpll_p, 0,
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 1fa1a32..1b15917 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -944,7 +944,7 @@ static int core_get_max_pstate(void)
+ 			if (err)
+ 				goto skip_tar;
+ 
+-			tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl;
++			tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3);
+ 			err = rdmsrl_safe(tdp_msr, &tdp_ratio);
+ 			if (err)
+ 				goto skip_tar;
+diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
+index 10c305b..4e0f8e7 100644
+--- a/drivers/edac/edac_mc_sysfs.c
++++ b/drivers/edac/edac_mc_sysfs.c
+@@ -313,7 +313,6 @@ static struct device_type csrow_attr_type = {
+  * possible dynamic channel DIMM Label attribute files
+  *
+  */
+-
+ DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR,
+ 	channel_dimm_label_show, channel_dimm_label_store, 0);
+ DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR,
+@@ -326,6 +325,10 @@ DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR,
+ 	channel_dimm_label_show, channel_dimm_label_store, 4);
+ DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR,
+ 	channel_dimm_label_show, channel_dimm_label_store, 5);
++DEVICE_CHANNEL(ch6_dimm_label, S_IRUGO | S_IWUSR,
++	channel_dimm_label_show, channel_dimm_label_store, 6);
++DEVICE_CHANNEL(ch7_dimm_label, S_IRUGO | S_IWUSR,
++	channel_dimm_label_show, channel_dimm_label_store, 7);
+ 
+ /* Total possible dynamic DIMM Label attribute file table */
+ static struct attribute *dynamic_csrow_dimm_attr[] = {
+@@ -335,6 +338,8 @@ static struct attribute *dynamic_csrow_dimm_attr[] = {
+ 	&dev_attr_legacy_ch3_dimm_label.attr.attr,
+ 	&dev_attr_legacy_ch4_dimm_label.attr.attr,
+ 	&dev_attr_legacy_ch5_dimm_label.attr.attr,
++	&dev_attr_legacy_ch6_dimm_label.attr.attr,
++	&dev_attr_legacy_ch7_dimm_label.attr.attr,
+ 	NULL
+ };
+ 
+@@ -351,6 +356,10 @@ DEVICE_CHANNEL(ch4_ce_count, S_IRUGO,
+ 		   channel_ce_count_show, NULL, 4);
+ DEVICE_CHANNEL(ch5_ce_count, S_IRUGO,
+ 		   channel_ce_count_show, NULL, 5);
++DEVICE_CHANNEL(ch6_ce_count, S_IRUGO,
++		   channel_ce_count_show, NULL, 6);
++DEVICE_CHANNEL(ch7_ce_count, S_IRUGO,
++		   channel_ce_count_show, NULL, 7);
+ 
+ /* Total possible dynamic ce_count attribute file table */
+ static struct attribute *dynamic_csrow_ce_count_attr[] = {
+@@ -360,6 +369,8 @@ static struct attribute *dynamic_csrow_ce_count_attr[] = {
+ 	&dev_attr_legacy_ch3_ce_count.attr.attr,
+ 	&dev_attr_legacy_ch4_ce_count.attr.attr,
+ 	&dev_attr_legacy_ch5_ce_count.attr.attr,
++	&dev_attr_legacy_ch6_ce_count.attr.attr,
++	&dev_attr_legacy_ch7_ce_count.attr.attr,
+ 	NULL
+ };
+ 
+@@ -371,9 +382,16 @@ static umode_t csrow_dev_is_visible(struct kobject *kobj,
+ 
+ 	if (idx >= csrow->nr_channels)
+ 		return 0;
++
++	if (idx >= ARRAY_SIZE(dynamic_csrow_ce_count_attr) - 1) {
++		WARN_ONCE(1, "idx: %d\n", idx);
++		return 0;
++	}
++
+ 	/* Only expose populated DIMMs */
+ 	if (!csrow->channels[idx]->dimm->nr_pages)
+ 		return 0;
++
+ 	return attr->mode;
+ }
+ 
+diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
+index cdaba13..c0f7cce 100644
+--- a/drivers/gpio/gpio-intel-mid.c
++++ b/drivers/gpio/gpio-intel-mid.c
+@@ -17,7 +17,6 @@
+  * Moorestown platform Langwell chip.
+  * Medfield platform Penwell chip.
+  * Clovertrail platform Cloverview chip.
+- * Merrifield platform Tangier chip.
+  */
+ 
+ #include <linux/module.h>
+@@ -64,10 +63,6 @@ enum GPIO_REG {
+ /* intel_mid gpio driver data */
+ struct intel_mid_gpio_ddata {
+ 	u16 ngpio;		/* number of gpio pins */
+-	u32 gplr_offset;	/* offset of first GPLR register from base */
+-	u32 flis_base;		/* base address of FLIS registers */
+-	u32 flis_len;		/* length of FLIS registers */
+-	u32 (*get_flis_offset)(int gpio);
+ 	u32 chip_irq_type;	/* chip interrupt type */
+ };
+ 
+@@ -252,15 +247,6 @@ static const struct intel_mid_gpio_ddata gpio_cloverview_core = {
+ 	.chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
+ };
+ 
+-static const struct intel_mid_gpio_ddata gpio_tangier = {
+-	.ngpio = 192,
+-	.gplr_offset = 4,
+-	.flis_base = 0xff0c0000,
+-	.flis_len = 0x8000,
+-	.get_flis_offset = NULL,
+-	.chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
+-};
+-
+ static const struct pci_device_id intel_gpio_ids[] = {
+ 	{
+ 		/* Lincroft */
+@@ -287,11 +273,6 @@ static const struct pci_device_id intel_gpio_ids[] = {
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7),
+ 		.driver_data = (kernel_ulong_t)&gpio_cloverview_core,
+ 	},
+-	{
+-		/* Tangier */
+-		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1199),
+-		.driver_data = (kernel_ulong_t)&gpio_tangier,
+-	},
+ 	{ 0 }
+ };
+ MODULE_DEVICE_TABLE(pci, intel_gpio_ids);
+@@ -401,7 +382,7 @@ static int intel_gpio_probe(struct pci_dev *pdev,
+ 	spin_lock_init(&priv->lock);
+ 
+ 	pci_set_drvdata(pdev, priv);
+-	retval = gpiochip_add_data(&priv->chip, priv);
++	retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
+ 	if (retval) {
+ 		dev_err(&pdev->dev, "gpiochip_add error %d\n", retval);
+ 		return retval;
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index 5e3be32..3745de6 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -90,7 +90,7 @@ MODULE_DEVICE_TABLE(acpi, pca953x_acpi_ids);
+ #define MAX_BANK 5
+ #define BANK_SZ 8
+ 
+-#define NBANK(chip) (chip->gpio_chip.ngpio / BANK_SZ)
++#define NBANK(chip) DIV_ROUND_UP(chip->gpio_chip.ngpio, BANK_SZ)
+ 
+ struct pca953x_chip {
+ 	unsigned gpio_start;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+index 9df1bcb..9831753 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+@@ -551,28 +551,19 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
+ 		    le16_to_cpu(firmware_info->info.usReferenceClock);
+ 		ppll->reference_div = 0;
+ 
+-		if (crev < 2)
+-			ppll->pll_out_min =
+-				le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
+-		else
+-			ppll->pll_out_min =
+-				le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
++		ppll->pll_out_min =
++			le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
+ 		ppll->pll_out_max =
+ 		    le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
+ 
+-		if (crev >= 4) {
+-			ppll->lcd_pll_out_min =
+-				le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
+-			if (ppll->lcd_pll_out_min == 0)
+-				ppll->lcd_pll_out_min = ppll->pll_out_min;
+-			ppll->lcd_pll_out_max =
+-				le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
+-			if (ppll->lcd_pll_out_max == 0)
+-				ppll->lcd_pll_out_max = ppll->pll_out_max;
+-		} else {
++		ppll->lcd_pll_out_min =
++			le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
++		if (ppll->lcd_pll_out_min == 0)
+ 			ppll->lcd_pll_out_min = ppll->pll_out_min;
++		ppll->lcd_pll_out_max =
++			le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
++		if (ppll->lcd_pll_out_max == 0)
+ 			ppll->lcd_pll_out_max = ppll->pll_out_max;
+-		}
+ 
+ 		if (ppll->pll_out_min == 0)
+ 			ppll->pll_out_min = 64800;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+index 35a1248..1b4c069 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+@@ -10,6 +10,7 @@
+ #include <linux/slab.h>
+ #include <linux/acpi.h>
+ #include <linux/pci.h>
++#include <linux/delay.h>
+ 
+ #include "amd_acpi.h"
+ 
+@@ -259,6 +260,10 @@ static int amdgpu_atpx_set_discrete_state(struct amdgpu_atpx *atpx, u8 state)
+ 		if (!info)
+ 			return -EIO;
+ 		kfree(info);
++
++		/* 200ms delay is required after off */
++		if (state == 0)
++			msleep(200);
+ 	}
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+index cb07da4..ff0b55a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+@@ -1690,7 +1690,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
+ 						   DRM_MODE_SCALE_NONE);
+ 			/* no HPD on analog connectors */
+ 			amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE;
+-			connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ 			connector->interlace_allowed = true;
+ 			connector->doublescan_allowed = true;
+ 			break;
+@@ -1893,8 +1892,10 @@ amdgpu_connector_add(struct amdgpu_device *adev,
+ 	}
+ 
+ 	if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) {
+-		if (i2c_bus->valid)
+-			connector->polled = DRM_CONNECTOR_POLL_CONNECT;
++		if (i2c_bus->valid) {
++			connector->polled = DRM_CONNECTOR_POLL_CONNECT |
++			                    DRM_CONNECTOR_POLL_DISCONNECT;
++		}
+ 	} else
+ 		connector->polled = DRM_CONNECTOR_POLL_HPD;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 6e92008..b7f5650 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1841,7 +1841,23 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
+ 	}
+ 
+ 	drm_kms_helper_poll_enable(dev);
++
++	/*
++	 * Most of the connector probing functions try to acquire runtime pm
++	 * refs to ensure that the GPU is powered on when connector polling is
++	 * performed. Since we're calling this from a runtime PM callback,
++	 * trying to acquire rpm refs will cause us to deadlock.
++	 *
++	 * Since we're guaranteed to be holding the rpm lock, it's safe to
++	 * temporarily disable the rpm helpers so this doesn't deadlock us.
++	 */
++#ifdef CONFIG_PM
++	dev->dev->power.disable_depth++;
++#endif
+ 	drm_helper_hpd_irq_event(dev);
++#ifdef CONFIG_PM
++	dev->dev->power.disable_depth--;
++#endif
+ 
+ 	if (fbcon) {
+ 		amdgpu_fbdev_set_suspend(adev, 0);
+diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+index 48b6bd6..c32eca2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
++++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+@@ -98,6 +98,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode
+ 		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
++		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+ 			if (dig->backlight_level == 0)
+ 				amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
+ 								       ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index 1feb643..9269548 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -167,6 +167,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
+ 		break;
+ 	case CHIP_KAVERI:
+ 	case CHIP_KABINI:
++	case CHIP_MULLINS:
+ 		return 0;
+ 	default: BUG();
+ 	}
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+index 90b35c5..ffc7c0d 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+@@ -592,12 +592,12 @@ bool atomctrl_get_pp_assign_pin(
+ 		const uint32_t pinId,
+ 		pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment)
+ {
+-	bool bRet = 0;
++	bool bRet = false;
+ 	ATOM_GPIO_PIN_LUT *gpio_lookup_table =
+ 		get_gpio_lookup_table(hwmgr->device);
+ 
+ 	PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table),
+-			"Could not find GPIO lookup Table in BIOS.", return -1);
++			"Could not find GPIO lookup Table in BIOS.", return false);
+ 
+ 	bRet = atomctrl_lookup_gpio_pin(gpio_lookup_table, pinId,
+ 		gpio_pin_assignment);
+diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
+index 059f7c3..a7916e5 100644
+--- a/drivers/gpu/drm/drm_cache.c
++++ b/drivers/gpu/drm/drm_cache.c
+@@ -136,6 +136,7 @@ drm_clflush_virt_range(void *addr, unsigned long length)
+ 		mb();
+ 		for (; addr < end; addr += size)
+ 			clflushopt(addr);
++		clflushopt(end - 1); /* force serialisation */
+ 		mb();
+ 		return;
+ 	}
+diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
+index eeaf5a7..67b28f8 100644
+--- a/drivers/gpu/drm/drm_dp_helper.c
++++ b/drivers/gpu/drm/drm_dp_helper.c
+@@ -203,7 +203,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
+ 
+ 		ret = aux->transfer(aux, &msg);
+ 
+-		if (ret > 0) {
++		if (ret >= 0) {
+ 			native_reply = msg.reply & DP_AUX_NATIVE_REPLY_MASK;
+ 			if (native_reply == DP_AUX_NATIVE_REPLY_ACK) {
+ 				if (ret == size)
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 7df26d4..2cb472b 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -74,6 +74,8 @@
+ #define EDID_QUIRK_FORCE_8BPC			(1 << 8)
+ /* Force 12bpc */
+ #define EDID_QUIRK_FORCE_12BPC			(1 << 9)
++/* Force 6bpc */
++#define EDID_QUIRK_FORCE_6BPC			(1 << 10)
+ 
+ struct detailed_mode_closure {
+ 	struct drm_connector *connector;
+@@ -100,6 +102,9 @@ static struct edid_quirk {
+ 	/* Unknown Acer */
+ 	{ "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
+ 
++	/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
++	{ "AEO", 0, EDID_QUIRK_FORCE_6BPC },
++
+ 	/* Belinea 10 15 55 */
+ 	{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
+ 	{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
+@@ -4082,6 +4087,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
+ 
+ 	drm_add_display_info(edid, &connector->display_info, connector);
+ 
++	if (quirks & EDID_QUIRK_FORCE_6BPC)
++		connector->display_info.bpc = 6;
++
+ 	if (quirks & EDID_QUIRK_FORCE_8BPC)
+ 		connector->display_info.bpc = 8;
+ 
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index bc3f2e6..227a63e 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -2591,6 +2591,8 @@ struct drm_i915_cmd_table {
+ #define SKL_REVID_D0		0x3
+ #define SKL_REVID_E0		0x4
+ #define SKL_REVID_F0		0x5
++#define SKL_REVID_G0		0x6
++#define SKL_REVID_H0		0x7
+ 
+ #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
+ 
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 3fcf7dd..bc3b6dd 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -1672,6 +1672,9 @@ enum skl_disp_power_wells {
+ 
+ #define GEN7_TLB_RD_ADDR	_MMIO(0x4700)
+ 
++#define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0)
++#define   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS	(1<<18)
++
+ #define GAMT_CHKN_BIT_REG	_MMIO(0x4ab8)
+ #define   GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING	(1<<28)
+ 
+@@ -7538,6 +7541,8 @@ enum skl_disp_power_wells {
+ 
+ #define CDCLK_FREQ			_MMIO(0x46200)
+ 
++#define CDCLK_FREQ			_MMIO(0x46200)
++
+ #define _TRANSA_MSA_MISC		0x60410
+ #define _TRANSB_MSA_MISC		0x61410
+ #define _TRANSC_MSA_MISC		0x62410
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 3074c56..3289319 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -9700,6 +9700,8 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
+ 
+ 	I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
+ 
++	I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
++
+ 	intel_update_cdclk(dev);
+ 
+ 	WARN(cdclk != dev_priv->cdclk_freq,
+@@ -12095,21 +12097,11 @@ connected_sink_compute_bpp(struct intel_connector *connector,
+ 		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
+ 	}
+ 
+-	/* Clamp bpp to default limit on screens without EDID 1.4 */
+-	if (connector->base.display_info.bpc == 0) {
+-		int type = connector->base.connector_type;
+-		int clamp_bpp = 24;
+-
+-		/* Fall back to 18 bpp when DP sink capability is unknown. */
+-		if (type == DRM_MODE_CONNECTOR_DisplayPort ||
+-		    type == DRM_MODE_CONNECTOR_eDP)
+-			clamp_bpp = 18;
+-
+-		if (bpp > clamp_bpp) {
+-			DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
+-				      bpp, clamp_bpp);
+-			pipe_config->pipe_bpp = clamp_bpp;
+-		}
++	/* Clamp bpp to 8 on screens without EDID 1.4 */
++	if (connector->base.display_info.bpc == 0 && bpp > 24) {
++		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
++			      bpp);
++		pipe_config->pipe_bpp = 24;
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index 2863b92..c1ca5a7 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -4563,7 +4563,8 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
+ 		else
+ 			gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+ 		dev_priv->rps.last_adj = 0;
+-		I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
++		I915_WRITE(GEN6_PMINTRMSK,
++			   gen6_sanitize_rps_pm_mask(dev_priv, ~0));
+ 	}
+ 	mutex_unlock(&dev_priv->rps.hw_lock);
+ 
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 68c5af0..9d778f3 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -1135,6 +1135,11 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
+ 	/* WaDisableGafsUnitClkGating:skl */
+ 	WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ 
++	/* WaInPlaceDecompressionHang:skl */
++	if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
++		WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
++			   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
++
+ 	/* WaDisableLSQCROPERFforOCL:skl */
+ 	ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+ 	if (ret)
+@@ -1194,6 +1199,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
+ 		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ 				  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+ 
++	/* WaInPlaceDecompressionHang:bxt */
++	if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
++		WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
++			   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
++
+ 	return 0;
+ }
+ 
+@@ -1241,6 +1251,10 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
+ 		GEN7_HALF_SLICE_CHICKEN1,
+ 		GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ 
++	/* WaInPlaceDecompressionHang:kbl */
++	WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
++		   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
++
+ 	/* WaDisableLSQCROPERFforOCL:kbl */
+ 	ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+ 	if (ret)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
+index 11f8dd9..d6c134b 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
+@@ -324,7 +324,16 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
+ 	    !vga_switcheroo_handler_flags())
+ 		return -EPROBE_DEFER;
+ 
+-	/* remove conflicting drivers (vesafb, efifb etc) */
++	/* We need to check that the chipset is supported before booting
++	 * fbdev off the hardware, as there's no way to put it back.
++	 */
++	ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device);
++	if (ret)
++		return ret;
++
++	nvkm_device_del(&device);
++
++	/* Remove conflicting drivers (vesafb, efifb etc). */
+ 	aper = alloc_apertures(3);
+ 	if (!aper)
+ 		return -ENOMEM;
+diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
+index 7d9248b..da8fd5f 100644
+--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
+@@ -107,11 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 			 ((image->dx + image->width) & 0xffff));
+ 	OUT_RING(chan, bg);
+ 	OUT_RING(chan, fg);
+-	OUT_RING(chan, (image->height << 16) | image->width);
++	OUT_RING(chan, (image->height << 16) | ALIGN(image->width, 8));
+ 	OUT_RING(chan, (image->height << 16) | image->width);
+ 	OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
+ 
+-	dsize = ALIGN(image->width * image->height, 32) >> 5;
++	dsize = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
+ 	while (dsize) {
+ 		int iter_len = dsize > 128 ? 128 : dsize;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
+index 1aeb698..af3d3c4 100644
+--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
+@@ -125,7 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 	OUT_RING(chan, 0);
+ 	OUT_RING(chan, image->dy);
+ 
+-	dwords = ALIGN(image->width * image->height, 32) >> 5;
++	dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
+ 	while (dwords) {
+ 		int push = dwords > 2047 ? 2047 : dwords;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+index 839f4c8..054b6a0 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+@@ -125,7 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 	OUT_RING  (chan, 0);
+ 	OUT_RING  (chan, image->dy);
+ 
+-	dwords = ALIGN(image->width * image->height, 32) >> 5;
++	dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
+ 	while (dwords) {
+ 		int push = dwords > 2047 ? 2047 : dwords;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+index 69de8c6..f1e15a4 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+@@ -76,8 +76,8 @@ nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+ 		nvkm_wo32(chan->inst, i, 0x00040004);
+ 	for (i = 0x1f18; i <= 0x3088 ; i += 16) {
+ 		nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
+-		nvkm_wo32(chan->inst, i + 1, 0x0436086c);
+-		nvkm_wo32(chan->inst, i + 2, 0x000c001b);
++		nvkm_wo32(chan->inst, i + 4, 0x0436086c);
++		nvkm_wo32(chan->inst, i + 8, 0x000c001b);
+ 	}
+ 	for (i = 0x30b8; i < 0x30c8; i += 4)
+ 		nvkm_wo32(chan->inst, i, 0x0000ffff);
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+index 2207dac2..300f5ed 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+@@ -75,8 +75,8 @@ nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
+ 		nvkm_wo32(chan->inst, i, 0x00040004);
+ 	for (i = 0x15ac; i <= 0x271c ; i += 16) {
+ 		nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
+-		nvkm_wo32(chan->inst, i + 1, 0x0436086c);
+-		nvkm_wo32(chan->inst, i + 2, 0x000c001b);
++		nvkm_wo32(chan->inst, i + 4, 0x0436086c);
++		nvkm_wo32(chan->inst, i + 8, 0x000c001b);
+ 	}
+ 	for (i = 0x274c; i < 0x275c; i += 4)
+ 		nvkm_wo32(chan->inst, i, 0x0000ffff);
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 587cae4..56bb758 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -120,6 +120,7 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
+ 		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
++		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+ 			if (dig->backlight_level == 0)
+ 				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
+ 			else {
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index f8097a0..5df3ec7 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -1155,7 +1155,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
+ 		    le16_to_cpu(firmware_info->info.usReferenceClock);
+ 		p1pll->reference_div = 0;
+ 
+-		if (crev < 2)
++		if ((frev < 2) && (crev < 2))
+ 			p1pll->pll_out_min =
+ 				le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
+ 		else
+@@ -1164,7 +1164,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
+ 		p1pll->pll_out_max =
+ 		    le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
+ 
+-		if (crev >= 4) {
++		if (((frev < 2) && (crev >= 4)) || (frev >= 2)) {
+ 			p1pll->lcd_pll_out_min =
+ 				le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
+ 			if (p1pll->lcd_pll_out_min == 0)
+diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+index 95f4fea..1b3f4e5 100644
+--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
++++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+@@ -10,6 +10,7 @@
+ #include <linux/slab.h>
+ #include <linux/acpi.h>
+ #include <linux/pci.h>
++#include <linux/delay.h>
+ 
+ #include "radeon_acpi.h"
+ 
+@@ -258,6 +259,10 @@ static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state)
+ 		if (!info)
+ 			return -EIO;
+ 		kfree(info);
++
++		/* 200ms delay is required after off */
++		if (state == 0)
++			msleep(200);
+ 	}
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 81a63d7..b79f3b0 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -2064,7 +2064,6 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 							   RADEON_OUTPUT_CSC_BYPASS);
+ 			/* no HPD on analog connectors */
+ 			radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+-			connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ 			connector->interlace_allowed = true;
+ 			connector->doublescan_allowed = true;
+ 			break;
+@@ -2314,8 +2313,10 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 	}
+ 
+ 	if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
+-		if (i2c_bus->valid)
+-			connector->polled = DRM_CONNECTOR_POLL_CONNECT;
++		if (i2c_bus->valid) {
++			connector->polled = DRM_CONNECTOR_POLL_CONNECT |
++			                    DRM_CONNECTOR_POLL_DISCONNECT;
++		}
+ 	} else
+ 		connector->polled = DRM_CONNECTOR_POLL_HPD;
+ 
+@@ -2391,7 +2392,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
+ 					      1);
+ 		/* no HPD on analog connectors */
+ 		radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+-		connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ 		connector->interlace_allowed = true;
+ 		connector->doublescan_allowed = true;
+ 		break;
+@@ -2476,10 +2476,13 @@ radeon_add_legacy_connector(struct drm_device *dev,
+ 	}
+ 
+ 	if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
+-		if (i2c_bus->valid)
+-			connector->polled = DRM_CONNECTOR_POLL_CONNECT;
++		if (i2c_bus->valid) {
++			connector->polled = DRM_CONNECTOR_POLL_CONNECT |
++			                    DRM_CONNECTOR_POLL_DISCONNECT;
++		}
+ 	} else
+ 		connector->polled = DRM_CONNECTOR_POLL_HPD;
++
+ 	connector->display_info.subpixel_order = subpixel_order;
+ 	drm_connector_register(connector);
+ }
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+index 1c4d5b5..b167323 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+@@ -1048,6 +1048,17 @@ static void vop_crtc_destroy(struct drm_crtc *crtc)
+ 	drm_crtc_cleanup(crtc);
+ }
+ 
++static void vop_crtc_reset(struct drm_crtc *crtc)
++{
++	if (crtc->state)
++		__drm_atomic_helper_crtc_destroy_state(crtc->state);
++	kfree(crtc->state);
++
++	crtc->state = kzalloc(sizeof(struct rockchip_crtc_state), GFP_KERNEL);
++	if (crtc->state)
++		crtc->state->crtc = crtc;
++}
++
+ static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
+ {
+ 	struct rockchip_crtc_state *rockchip_state;
+@@ -1073,7 +1084,7 @@ static const struct drm_crtc_funcs vop_crtc_funcs = {
+ 	.set_config = drm_atomic_helper_set_config,
+ 	.page_flip = drm_atomic_helper_page_flip,
+ 	.destroy = vop_crtc_destroy,
+-	.reset = drm_atomic_helper_crtc_reset,
++	.reset = vop_crtc_reset,
+ 	.atomic_duplicate_state = vop_crtc_duplicate_state,
+ 	.atomic_destroy_state = vop_crtc_destroy_state,
+ };
+diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
+index 16b6f11..99ec3ff 100644
+--- a/drivers/hid/uhid.c
++++ b/drivers/hid/uhid.c
+@@ -51,10 +51,26 @@ struct uhid_device {
+ 	u32 report_id;
+ 	u32 report_type;
+ 	struct uhid_event report_buf;
++	struct work_struct worker;
+ };
+ 
+ static struct miscdevice uhid_misc;
+ 
++static void uhid_device_add_worker(struct work_struct *work)
++{
++	struct uhid_device *uhid = container_of(work, struct uhid_device, worker);
++	int ret;
++
++	ret = hid_add_device(uhid->hid);
++	if (ret) {
++		hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret);
++
++		hid_destroy_device(uhid->hid);
++		uhid->hid = NULL;
++		uhid->running = false;
++	}
++}
++
+ static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev)
+ {
+ 	__u8 newhead;
+@@ -498,18 +514,14 @@ static int uhid_dev_create2(struct uhid_device *uhid,
+ 	uhid->hid = hid;
+ 	uhid->running = true;
+ 
+-	ret = hid_add_device(hid);
+-	if (ret) {
+-		hid_err(hid, "Cannot register HID device\n");
+-		goto err_hid;
+-	}
++	/* Adding of a HID device is done through a worker, to allow HID drivers
++	 * which use feature requests during .probe to work, without they would
++	 * be blocked on devlock, which is held by uhid_char_write.
++	 */
++	schedule_work(&uhid->worker);
+ 
+ 	return 0;
+ 
+-err_hid:
+-	hid_destroy_device(hid);
+-	uhid->hid = NULL;
+-	uhid->running = false;
+ err_free:
+ 	kfree(uhid->rd_data);
+ 	uhid->rd_data = NULL;
+@@ -550,6 +562,8 @@ static int uhid_dev_destroy(struct uhid_device *uhid)
+ 	uhid->running = false;
+ 	wake_up_interruptible(&uhid->report_wait);
+ 
++	cancel_work_sync(&uhid->worker);
++
+ 	hid_destroy_device(uhid->hid);
+ 	kfree(uhid->rd_data);
+ 
+@@ -612,6 +626,7 @@ static int uhid_char_open(struct inode *inode, struct file *file)
+ 	init_waitqueue_head(&uhid->waitq);
+ 	init_waitqueue_head(&uhid->report_wait);
+ 	uhid->running = false;
++	INIT_WORK(&uhid->worker, uhid_device_add_worker);
+ 
+ 	file->private_data = uhid;
+ 	nonseekable_open(inode, file);
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 952f20f..e82f7e1 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -42,6 +42,7 @@
+ #include <linux/screen_info.h>
+ #include <linux/kdebug.h>
+ #include <linux/efi.h>
++#include <linux/random.h>
+ #include "hyperv_vmbus.h"
+ 
+ static struct acpi_device  *hv_acpi_dev;
+@@ -806,6 +807,8 @@ static void vmbus_isr(void)
+ 		else
+ 			tasklet_schedule(hv_context.msg_dpc[cpu]);
+ 	}
++
++	add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
+ }
+ 
+ 
+diff --git a/drivers/i2c/busses/i2c-efm32.c b/drivers/i2c/busses/i2c-efm32.c
+index 8eff627..e253598 100644
+--- a/drivers/i2c/busses/i2c-efm32.c
++++ b/drivers/i2c/busses/i2c-efm32.c
+@@ -433,7 +433,7 @@ static int efm32_i2c_probe(struct platform_device *pdev)
+ 	ret = request_irq(ddata->irq, efm32_i2c_irq, 0, DRIVER_NAME, ddata);
+ 	if (ret < 0) {
+ 		dev_err(&pdev->dev, "failed to request irq (%d)\n", ret);
+-		return ret;
++		goto err_disable_clk;
+ 	}
+ 
+ 	ret = i2c_add_adapter(&ddata->adapter);
+diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
+index 1eb9b12..dbfd854 100644
+--- a/drivers/infiniband/core/rw.c
++++ b/drivers/infiniband/core/rw.c
+@@ -58,19 +58,13 @@ static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num,
+ 	return false;
+ }
+ 
+-static inline u32 rdma_rw_max_sge(struct ib_device *dev,
+-		enum dma_data_direction dir)
+-{
+-	return dir == DMA_TO_DEVICE ?
+-		dev->attrs.max_sge : dev->attrs.max_sge_rd;
+-}
+-
+ static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev)
+ {
+ 	/* arbitrary limit to avoid allocating gigantic resources */
+ 	return min_t(u32, dev->attrs.max_fast_reg_page_list_len, 256);
+ }
+ 
++/* Caller must have zero-initialized *reg. */
+ static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num,
+ 		struct rdma_rw_reg_ctx *reg, struct scatterlist *sg,
+ 		u32 sg_cnt, u32 offset)
+@@ -114,6 +108,7 @@ static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ 		u8 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset,
+ 		u64 remote_addr, u32 rkey, enum dma_data_direction dir)
+ {
++	struct rdma_rw_reg_ctx *prev = NULL;
+ 	u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device);
+ 	int i, j, ret = 0, count = 0;
+ 
+@@ -125,7 +120,6 @@ static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ 	}
+ 
+ 	for (i = 0; i < ctx->nr_ops; i++) {
+-		struct rdma_rw_reg_ctx *prev = i ? &ctx->reg[i - 1] : NULL;
+ 		struct rdma_rw_reg_ctx *reg = &ctx->reg[i];
+ 		u32 nents = min(sg_cnt, pages_per_mr);
+ 
+@@ -162,9 +156,13 @@ static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ 		sg_cnt -= nents;
+ 		for (j = 0; j < nents; j++)
+ 			sg = sg_next(sg);
++		prev = reg;
+ 		offset = 0;
+ 	}
+ 
++	if (prev)
++		prev->wr.wr.next = NULL;
++
+ 	ctx->type = RDMA_RW_MR;
+ 	return count;
+ 
+@@ -181,7 +179,8 @@ static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ 		u64 remote_addr, u32 rkey, enum dma_data_direction dir)
+ {
+ 	struct ib_device *dev = qp->pd->device;
+-	u32 max_sge = rdma_rw_max_sge(dev, dir);
++	u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge :
++		      qp->max_read_sge;
+ 	struct ib_sge *sge;
+ 	u32 total_len = 0, i, j;
+ 
+@@ -205,11 +204,10 @@ static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ 			rdma_wr->wr.opcode = IB_WR_RDMA_READ;
+ 		rdma_wr->remote_addr = remote_addr + total_len;
+ 		rdma_wr->rkey = rkey;
++		rdma_wr->wr.num_sge = nr_sge;
+ 		rdma_wr->wr.sg_list = sge;
+ 
+ 		for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) {
+-			rdma_wr->wr.num_sge++;
+-
+ 			sge->addr = ib_sg_dma_address(dev, sg) + offset;
+ 			sge->length = ib_sg_dma_len(dev, sg) - offset;
+ 			sge->lkey = qp->pd->local_dma_lkey;
+@@ -220,8 +218,8 @@ static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ 			offset = 0;
+ 		}
+ 
+-		if (i + 1 < ctx->nr_ops)
+-			rdma_wr->wr.next = &ctx->map.wrs[i + 1].wr;
++		rdma_wr->wr.next = i + 1 < ctx->nr_ops ?
++			&ctx->map.wrs[i + 1].wr : NULL;
+ 	}
+ 
+ 	ctx->type = RDMA_RW_MULTI_WR;
+diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
+index 6298f54..e39a0b5 100644
+--- a/drivers/infiniband/core/verbs.c
++++ b/drivers/infiniband/core/verbs.c
+@@ -814,6 +814,15 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
+ 		}
+ 	}
+ 
++	/*
++	 * Note: all hw drivers guarantee that max_send_sge is lower than
++	 * the device RDMA WRITE SGE limit but not all hw drivers ensure that
++	 * max_send_sge <= max_sge_rd.
++	 */
++	qp->max_write_sge = qp_init_attr->cap.max_send_sge;
++	qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
++				 device->attrs.max_sge_rd);
++
+ 	return qp;
+ }
+ EXPORT_SYMBOL(ib_create_qp);
+diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
+index 53e03c8..79e6309 100644
+--- a/drivers/infiniband/hw/mlx5/gsi.c
++++ b/drivers/infiniband/hw/mlx5/gsi.c
+@@ -69,15 +69,6 @@ static bool mlx5_ib_deth_sqpn_cap(struct mlx5_ib_dev *dev)
+ 	return MLX5_CAP_GEN(dev->mdev, set_deth_sqpn);
+ }
+ 
+-static u32 next_outstanding(struct mlx5_ib_gsi_qp *gsi, u32 index)
+-{
+-	return ++index % gsi->cap.max_send_wr;
+-}
+-
+-#define for_each_outstanding_wr(gsi, index) \
+-	for (index = gsi->outstanding_ci; index != gsi->outstanding_pi; \
+-	     index = next_outstanding(gsi, index))
+-
+ /* Call with gsi->lock locked */
+ static void generate_completions(struct mlx5_ib_gsi_qp *gsi)
+ {
+@@ -85,8 +76,9 @@ static void generate_completions(struct mlx5_ib_gsi_qp *gsi)
+ 	struct mlx5_ib_gsi_wr *wr;
+ 	u32 index;
+ 
+-	for_each_outstanding_wr(gsi, index) {
+-		wr = &gsi->outstanding_wrs[index];
++	for (index = gsi->outstanding_ci; index != gsi->outstanding_pi;
++	     index++) {
++		wr = &gsi->outstanding_wrs[index % gsi->cap.max_send_wr];
+ 
+ 		if (!wr->completed)
+ 			break;
+@@ -430,8 +422,9 @@ static int mlx5_ib_add_outstanding_wr(struct mlx5_ib_gsi_qp *gsi,
+ 		return -ENOMEM;
+ 	}
+ 
+-	gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi];
+-	gsi->outstanding_pi = next_outstanding(gsi, gsi->outstanding_pi);
++	gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi %
++				       gsi->cap.max_send_wr];
++	gsi->outstanding_pi++;
+ 
+ 	if (!wc) {
+ 		memset(&gsi_wr->wc, 0, sizeof(gsi_wr->wc));
+diff --git a/drivers/infiniband/sw/rdmavt/Kconfig b/drivers/infiniband/sw/rdmavt/Kconfig
+index 11aa6a3..1da8d01 100644
+--- a/drivers/infiniband/sw/rdmavt/Kconfig
++++ b/drivers/infiniband/sw/rdmavt/Kconfig
+@@ -1,6 +1,5 @@
+ config INFINIBAND_RDMAVT
+ 	tristate "RDMA verbs transport library"
+ 	depends on 64BIT
+-	default m
+ 	---help---
+ 	This is a common software verbs provider for RDMA networks.
+diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
+index 6ca6fa8..f2f229e 100644
+--- a/drivers/infiniband/sw/rdmavt/cq.c
++++ b/drivers/infiniband/sw/rdmavt/cq.c
+@@ -510,6 +510,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
+ 
+ 	if (rdi->worker)
+ 		return 0;
++	spin_lock_init(&rdi->n_cqs_lock);
+ 	rdi->worker = kzalloc(sizeof(*rdi->worker), GFP_KERNEL);
+ 	if (!rdi->worker)
+ 		return -ENOMEM;
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index 4a41556..9a3b954 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -1601,6 +1601,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
+ 	struct ib_qp_init_attr *qp_init;
+ 	struct srpt_port *sport = ch->sport;
+ 	struct srpt_device *sdev = sport->sdev;
++	const struct ib_device_attr *attrs = &sdev->device->attrs;
+ 	u32 srp_sq_size = sport->port_attrib.srp_sq_size;
+ 	int ret;
+ 
+@@ -1638,7 +1639,7 @@ retry:
+ 	 */
+ 	qp_init->cap.max_send_wr = srp_sq_size / 2;
+ 	qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
+-	qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
++	qp_init->cap.max_send_sge = min(attrs->max_sge, SRPT_MAX_SG_PER_WQE);
+ 	qp_init->port_num = ch->sport->port;
+ 
+ 	ch->qp = ib_create_qp(sdev->pd, qp_init);
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
+index 3890304..5818787 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
+@@ -106,7 +106,11 @@ enum {
+ 	SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
+ 
+ 	SRPT_DEF_SG_TABLESIZE = 128,
+-	SRPT_DEF_SG_PER_WQE = 16,
++	/*
++	 * An experimentally determined value that avoids that QP creation
++	 * fails due to "swiotlb buffer is full" on systems using the swiotlb.
++	 */
++	SRPT_MAX_SG_PER_WQE = 16,
+ 
+ 	MIN_SRPT_SQ_SIZE = 16,
+ 	DEF_SRPT_SQ_SIZE = 4096,
+diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
+index 2f58985..d15b338 100644
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -4,7 +4,8 @@
+  * Copyright (c) 2013 ELAN Microelectronics Corp.
+  *
+  * Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
+- * Version: 1.6.0
++ * Author: KT Liao <kt.liao@emc.com.tw>
++ * Version: 1.6.2
+  *
+  * Based on cyapa driver:
+  * copyright (c) 2011-2012 Cypress Semiconductor, Inc.
+@@ -40,7 +41,7 @@
+ #include "elan_i2c.h"
+ 
+ #define DRIVER_NAME		"elan_i2c"
+-#define ELAN_DRIVER_VERSION	"1.6.1"
++#define ELAN_DRIVER_VERSION	"1.6.2"
+ #define ELAN_VENDOR_ID		0x04f3
+ #define ETP_MAX_PRESSURE	255
+ #define ETP_FWIDTH_REDUCE	90
+@@ -199,9 +200,41 @@ static int elan_sleep(struct elan_tp_data *data)
+ 	return error;
+ }
+ 
++static int elan_query_product(struct elan_tp_data *data)
++{
++	int error;
++
++	error = data->ops->get_product_id(data->client, &data->product_id);
++	if (error)
++		return error;
++
++	error = data->ops->get_sm_version(data->client, &data->ic_type,
++					  &data->sm_version);
++	if (error)
++		return error;
++
++	return 0;
++}
++
++static int elan_check_ASUS_special_fw(struct elan_tp_data *data)
++{
++	if (data->ic_type != 0x0E)
++		return false;
++
++	switch (data->product_id) {
++	case 0x05 ... 0x07:
++	case 0x09:
++	case 0x13:
++		return true;
++	default:
++		return false;
++	}
++}
++
+ static int __elan_initialize(struct elan_tp_data *data)
+ {
+ 	struct i2c_client *client = data->client;
++	bool woken_up = false;
+ 	int error;
+ 
+ 	error = data->ops->initialize(client);
+@@ -210,6 +243,27 @@ static int __elan_initialize(struct elan_tp_data *data)
+ 		return error;
+ 	}
+ 
++	error = elan_query_product(data);
++	if (error)
++		return error;
++
++	/*
++	 * Some ASUS devices were shipped with firmware that requires
++	 * touchpads to be woken up first, before attempting to switch
++	 * them into absolute reporting mode.
++	 */
++	if (elan_check_ASUS_special_fw(data)) {
++		error = data->ops->sleep_control(client, false);
++		if (error) {
++			dev_err(&client->dev,
++				"failed to wake device up: %d\n", error);
++			return error;
++		}
++
++		msleep(200);
++		woken_up = true;
++	}
++
+ 	data->mode |= ETP_ENABLE_ABS;
+ 	error = data->ops->set_mode(client, data->mode);
+ 	if (error) {
+@@ -218,11 +272,13 @@ static int __elan_initialize(struct elan_tp_data *data)
+ 		return error;
+ 	}
+ 
+-	error = data->ops->sleep_control(client, false);
+-	if (error) {
+-		dev_err(&client->dev,
+-			"failed to wake device up: %d\n", error);
+-		return error;
++	if (!woken_up) {
++		error = data->ops->sleep_control(client, false);
++		if (error) {
++			dev_err(&client->dev,
++				"failed to wake device up: %d\n", error);
++			return error;
++		}
+ 	}
+ 
+ 	return 0;
+@@ -248,10 +304,6 @@ static int elan_query_device_info(struct elan_tp_data *data)
+ {
+ 	int error;
+ 
+-	error = data->ops->get_product_id(data->client, &data->product_id);
+-	if (error)
+-		return error;
+-
+ 	error = data->ops->get_version(data->client, false, &data->fw_version);
+ 	if (error)
+ 		return error;
+@@ -261,11 +313,6 @@ static int elan_query_device_info(struct elan_tp_data *data)
+ 	if (error)
+ 		return error;
+ 
+-	error = data->ops->get_sm_version(data->client, &data->ic_type,
+-					  &data->sm_version);
+-	if (error)
+-		return error;
+-
+ 	error = data->ops->get_version(data->client, true, &data->iap_version);
+ 	if (error)
+ 		return error;
+diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
+index 880c40b..b7e8c11 100644
+--- a/drivers/input/touchscreen/sur40.c
++++ b/drivers/input/touchscreen/sur40.c
+@@ -126,7 +126,7 @@ struct sur40_image_header {
+ #define VIDEO_PACKET_SIZE  16384
+ 
+ /* polling interval (ms) */
+-#define POLL_INTERVAL 4
++#define POLL_INTERVAL 1
+ 
+ /* maximum number of contacts FIXME: this is a guess? */
+ #define MAX_CONTACTS 64
+@@ -448,7 +448,7 @@ static void sur40_process_video(struct sur40_state *sur40)
+ 
+ 	/* return error if streaming was stopped in the meantime */
+ 	if (sur40->sequence == -1)
+-		goto err_poll;
++		return;
+ 
+ 	/* mark as finished */
+ 	new_buf->vb.vb2_buf.timestamp = ktime_get_ns();
+@@ -736,6 +736,7 @@ static int sur40_start_streaming(struct vb2_queue *vq, unsigned int count)
+ static void sur40_stop_streaming(struct vb2_queue *vq)
+ {
+ 	struct sur40_state *sur40 = vb2_get_drv_priv(vq);
++	vb2_wait_for_all_buffers(vq);
+ 	sur40->sequence = -1;
+ 
+ 	/* Release all active buffers */
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 634f636..2511c8b 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -466,9 +466,11 @@ static void init_iommu_group(struct device *dev)
+ 	if (!domain)
+ 		goto out;
+ 
+-	dma_domain = to_pdomain(domain)->priv;
++	if (to_pdomain(domain)->flags == PD_DMA_OPS_MASK) {
++		dma_domain = to_pdomain(domain)->priv;
++		init_unity_mappings_for_device(dev, dma_domain);
++	}
+ 
+-	init_unity_mappings_for_device(dev, dma_domain);
+ out:
+ 	iommu_group_put(group);
+ }
+@@ -2512,8 +2514,15 @@ static void update_device_table(struct protection_domain *domain)
+ {
+ 	struct iommu_dev_data *dev_data;
+ 
+-	list_for_each_entry(dev_data, &domain->dev_list, list)
++	list_for_each_entry(dev_data, &domain->dev_list, list) {
+ 		set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
++
++		if (dev_data->devid == dev_data->alias)
++			continue;
++
++		/* There is an alias, update device table entry for it */
++		set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled);
++	}
+ }
+ 
+ static void update_domain(struct protection_domain *domain)
+@@ -3103,9 +3112,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
+ static void amd_iommu_domain_free(struct iommu_domain *dom)
+ {
+ 	struct protection_domain *domain;
+-
+-	if (!dom)
+-		return;
++	struct dma_ops_domain *dma_dom;
+ 
+ 	domain = to_pdomain(dom);
+ 
+@@ -3114,13 +3121,24 @@ static void amd_iommu_domain_free(struct iommu_domain *dom)
+ 
+ 	BUG_ON(domain->dev_cnt != 0);
+ 
+-	if (domain->mode != PAGE_MODE_NONE)
+-		free_pagetable(domain);
++	if (!dom)
++		return;
++
++	switch (dom->type) {
++	case IOMMU_DOMAIN_DMA:
++		dma_dom = domain->priv;
++		dma_ops_domain_free(dma_dom);
++		break;
++	default:
++		if (domain->mode != PAGE_MODE_NONE)
++			free_pagetable(domain);
+ 
+-	if (domain->flags & PD_IOMMUV2_MASK)
+-		free_gcr3_table(domain);
++		if (domain->flags & PD_IOMMUV2_MASK)
++			free_gcr3_table(domain);
+ 
+-	protection_domain_free(domain);
++		protection_domain_free(domain);
++		break;
++	}
+ }
+ 
+ static void amd_iommu_detach_device(struct iommu_domain *dom,
+diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
+index 5ecc86c..e27e3b7df 100644
+--- a/drivers/iommu/exynos-iommu.c
++++ b/drivers/iommu/exynos-iommu.c
+@@ -709,6 +709,7 @@ static struct platform_driver exynos_sysmmu_driver __refdata = {
+ 		.name		= "exynos-sysmmu",
+ 		.of_match_table	= sysmmu_of_match,
+ 		.pm		= &sysmmu_pm_ops,
++		.suppress_bind_attrs = true,
+ 	}
+ };
+ 
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 323dac9..d416242 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -2076,7 +2076,7 @@ out_unlock:
+ 	spin_unlock(&iommu->lock);
+ 	spin_unlock_irqrestore(&device_domain_lock, flags);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ struct domain_context_mapping_data {
+diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
+index a1ed1b7..f5c90e1 100644
+--- a/drivers/iommu/io-pgtable-arm.c
++++ b/drivers/iommu/io-pgtable-arm.c
+@@ -576,7 +576,7 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
+ 	return 0;
+ 
+ found_translation:
+-	iova &= (ARM_LPAE_GRANULE(data) - 1);
++	iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
+ 	return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
+ }
+ 
+diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
+index b7341de..4bb49cd 100644
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -289,10 +289,16 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
+ 		pb->bio_submitted = true;
+ 
+ 		/*
+-		 * Map reads as normal.
++		 * Map reads as normal only if corrupt_bio_byte set.
+ 		 */
+-		if (bio_data_dir(bio) == READ)
+-			goto map_bio;
++		if (bio_data_dir(bio) == READ) {
++			/* If flags were specified, only corrupt those that match. */
++			if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
++			    all_corrupt_bio_flags_match(bio, fc))
++				goto map_bio;
++			else
++				return -EIO;
++		}
+ 
+ 		/*
+ 		 * Drop writes?
+@@ -330,12 +336,13 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
+ 
+ 	/*
+ 	 * Corrupt successful READs while in down state.
+-	 * If flags were specified, only corrupt those that match.
+ 	 */
+-	if (fc->corrupt_bio_byte && !error && pb->bio_submitted &&
+-	    (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) &&
+-	    all_corrupt_bio_flags_match(bio, fc))
+-		corrupt_bio_data(bio, fc);
++	if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
++		if (fc->corrupt_bio_byte)
++			corrupt_bio_data(bio, fc);
++		else
++			return -EIO;
++	}
+ 
+ 	return error;
+ }
+diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
+index 459a9f8..0f0eb8a 100644
+--- a/drivers/md/dm-verity-fec.c
++++ b/drivers/md/dm-verity-fec.c
+@@ -453,9 +453,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
+ 	 */
+ 
+ 	offset = block << v->data_dev_block_bits;
+-
+-	res = offset;
+-	div64_u64(res, v->fec->rounds << v->data_dev_block_bits);
++	res = div64_u64(offset, v->fec->rounds << v->data_dev_block_bits);
+ 
+ 	/*
+ 	 * The base RS block we can feed to the interleaver to find out all
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 1b2f962..fd40bcb 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2175,7 +2175,7 @@ static void dm_request_fn(struct request_queue *q)
+ 		     md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
+ 		     md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
+ 		    (ti->type->busy && ti->type->busy(ti))) {
+-			blk_delay_queue(q, HZ / 100);
++			blk_delay_queue(q, 10);
+ 			return;
+ 		}
+ 
+@@ -3128,7 +3128,8 @@ static void unlock_fs(struct mapped_device *md)
+  * Caller must hold md->suspend_lock
+  */
+ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
+-			unsigned suspend_flags, int interruptible)
++			unsigned suspend_flags, int interruptible,
++			int dmf_suspended_flag)
+ {
+ 	bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
+ 	bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
+@@ -3195,6 +3196,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
+ 	 * to finish.
+ 	 */
+ 	r = dm_wait_for_completion(md, interruptible);
++	if (!r)
++		set_bit(dmf_suspended_flag, &md->flags);
+ 
+ 	if (noflush)
+ 		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
+@@ -3256,12 +3259,10 @@ retry:
+ 
+ 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
+ 
+-	r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE);
++	r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
+ 	if (r)
+ 		goto out_unlock;
+ 
+-	set_bit(DMF_SUSPENDED, &md->flags);
+-
+ 	dm_table_postsuspend_targets(map);
+ 
+ out_unlock:
+@@ -3355,9 +3356,8 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
+ 	 * would require changing .presuspend to return an error -- avoid this
+ 	 * until there is a need for more elaborate variants of internal suspend.
+ 	 */
+-	(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE);
+-
+-	set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
++	(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
++			    DMF_SUSPENDED_INTERNALLY);
+ 
+ 	dm_table_postsuspend_targets(map);
+ }
+diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c
+index 1100e98..7df7fb3 100644
+--- a/drivers/media/dvb-core/dvb_ringbuffer.c
++++ b/drivers/media/dvb-core/dvb_ringbuffer.c
+@@ -55,7 +55,13 @@ void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, size_t len)
+ 
+ int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf)
+ {
+-	return (rbuf->pread==rbuf->pwrite);
++	/* smp_load_acquire() to load write pointer on reader side
++	 * this pairs with smp_store_release() in dvb_ringbuffer_write(),
++	 * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset()
++	 *
++	 * for memory barriers also see Documentation/circular-buffers.txt
++	 */
++	return (rbuf->pread == smp_load_acquire(&rbuf->pwrite));
+ }
+ 
+ 
+@@ -64,7 +70,12 @@ ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf)
+ {
+ 	ssize_t free;
+ 
+-	free = rbuf->pread - rbuf->pwrite;
++	/* ACCESS_ONCE() to load read pointer on writer side
++	 * this pairs with smp_store_release() in dvb_ringbuffer_read(),
++	 * dvb_ringbuffer_read_user(), dvb_ringbuffer_flush(),
++	 * or dvb_ringbuffer_reset()
++	 */
++	free = ACCESS_ONCE(rbuf->pread) - rbuf->pwrite;
+ 	if (free <= 0)
+ 		free += rbuf->size;
+ 	return free-1;
+@@ -76,7 +87,11 @@ ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf)
+ {
+ 	ssize_t avail;
+ 
+-	avail = rbuf->pwrite - rbuf->pread;
++	/* smp_load_acquire() to load write pointer on reader side
++	 * this pairs with smp_store_release() in dvb_ringbuffer_write(),
++	 * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset()
++	 */
++	avail = smp_load_acquire(&rbuf->pwrite) - rbuf->pread;
+ 	if (avail < 0)
+ 		avail += rbuf->size;
+ 	return avail;
+@@ -86,14 +101,25 @@ ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf)
+ 
+ void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf)
+ {
+-	rbuf->pread = rbuf->pwrite;
++	/* dvb_ringbuffer_flush() counts as read operation
++	 * smp_load_acquire() to load write pointer
++	 * smp_store_release() to update read pointer, this ensures that the
++	 * correct pointer is visible for subsequent dvb_ringbuffer_free()
++	 * calls on other cpu cores
++	 */
++	smp_store_release(&rbuf->pread, smp_load_acquire(&rbuf->pwrite));
+ 	rbuf->error = 0;
+ }
+ EXPORT_SYMBOL(dvb_ringbuffer_flush);
+ 
+ void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf)
+ {
+-	rbuf->pread = rbuf->pwrite = 0;
++	/* dvb_ringbuffer_reset() counts as read and write operation
++	 * smp_store_release() to update read pointer
++	 */
++	smp_store_release(&rbuf->pread, 0);
++	/* smp_store_release() to update write pointer */
++	smp_store_release(&rbuf->pwrite, 0);
+ 	rbuf->error = 0;
+ }
+ 
+@@ -119,12 +145,17 @@ ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, u8 __user *buf, si
+ 			return -EFAULT;
+ 		buf += split;
+ 		todo -= split;
+-		rbuf->pread = 0;
++		/* smp_store_release() for read pointer update to ensure
++		 * that buf is not overwritten until read is complete,
++		 * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free()
++		 */
++		smp_store_release(&rbuf->pread, 0);
+ 	}
+ 	if (copy_to_user(buf, rbuf->data+rbuf->pread, todo))
+ 		return -EFAULT;
+ 
+-	rbuf->pread = (rbuf->pread + todo) % rbuf->size;
++	/* smp_store_release() to update read pointer, see above */
++	smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size);
+ 
+ 	return len;
+ }
+@@ -139,11 +170,16 @@ void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf, size_t len)
+ 		memcpy(buf, rbuf->data+rbuf->pread, split);
+ 		buf += split;
+ 		todo -= split;
+-		rbuf->pread = 0;
++		/* smp_store_release() for read pointer update to ensure
++		 * that buf is not overwritten until read is complete,
++		 * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free()
++		 */
++		smp_store_release(&rbuf->pread, 0);
+ 	}
+ 	memcpy(buf, rbuf->data+rbuf->pread, todo);
+ 
+-	rbuf->pread = (rbuf->pread + todo) % rbuf->size;
++	/* smp_store_release() to update read pointer, see above */
++	smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size);
+ }
+ 
+ 
+@@ -158,10 +194,16 @@ ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf, size_t
+ 		memcpy(rbuf->data+rbuf->pwrite, buf, split);
+ 		buf += split;
+ 		todo -= split;
+-		rbuf->pwrite = 0;
++		/* smp_store_release() for write pointer update to ensure that
++		 * written data is visible on other cpu cores before the pointer
++		 * update, this pairs with smp_load_acquire() in
++		 * dvb_ringbuffer_empty() or dvb_ringbuffer_avail()
++		 */
++		smp_store_release(&rbuf->pwrite, 0);
+ 	}
+ 	memcpy(rbuf->data+rbuf->pwrite, buf, todo);
+-	rbuf->pwrite = (rbuf->pwrite + todo) % rbuf->size;
++	/* smp_store_release() for write pointer update, see above */
++	smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size);
+ 
+ 	return len;
+ }
+@@ -181,12 +223,18 @@ ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf,
+ 			return len - todo;
+ 		buf += split;
+ 		todo -= split;
+-		rbuf->pwrite = 0;
++		/* smp_store_release() for write pointer update to ensure that
++		 * written data is visible on other cpu cores before the pointer
++		 * update, this pairs with smp_load_acquire() in
++		 * dvb_ringbuffer_empty() or dvb_ringbuffer_avail()
++		 */
++		smp_store_release(&rbuf->pwrite, 0);
+ 	}
+ 	status = copy_from_user(rbuf->data+rbuf->pwrite, buf, todo);
+ 	if (status)
+ 		return len - todo;
+-	rbuf->pwrite = (rbuf->pwrite + todo) % rbuf->size;
++	/* smp_store_release() for write pointer update, see above */
++	smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size);
+ 
+ 	return len;
+ }
+diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
+index b16466f..beb4fd5 100644
+--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
++++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
+@@ -1050,6 +1050,11 @@ static int match_child(struct device *dev, void *data)
+ 	return !strcmp(dev_name(dev), (char *)data);
+ }
+ 
++static void s5p_mfc_memdev_release(struct device *dev)
++{
++	dma_release_declared_memory(dev);
++}
++
+ static void *mfc_get_drv_data(struct platform_device *pdev);
+ 
+ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
+@@ -1062,6 +1067,9 @@ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
+ 		mfc_err("Not enough memory\n");
+ 		return -ENOMEM;
+ 	}
++
++	dev_set_name(dev->mem_dev_l, "%s", "s5p-mfc-l");
++	dev->mem_dev_l->release = s5p_mfc_memdev_release;
+ 	device_initialize(dev->mem_dev_l);
+ 	of_property_read_u32_array(dev->plat_dev->dev.of_node,
+ 			"samsung,mfc-l", mem_info, 2);
+@@ -1079,6 +1087,9 @@ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
+ 		mfc_err("Not enough memory\n");
+ 		return -ENOMEM;
+ 	}
++
++	dev_set_name(dev->mem_dev_r, "%s", "s5p-mfc-r");
++	dev->mem_dev_r->release = s5p_mfc_memdev_release;
+ 	device_initialize(dev->mem_dev_r);
+ 	of_property_read_u32_array(dev->plat_dev->dev.of_node,
+ 			"samsung,mfc-r", mem_info, 2);
+diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c
+index 6ffe776..a0fd4e6 100644
+--- a/drivers/media/rc/ir-rc5-decoder.c
++++ b/drivers/media/rc/ir-rc5-decoder.c
+@@ -29,7 +29,7 @@
+ #define RC5_BIT_START		(1 * RC5_UNIT)
+ #define RC5_BIT_END		(1 * RC5_UNIT)
+ #define RC5X_SPACE		(4 * RC5_UNIT)
+-#define RC5_TRAILER		(10 * RC5_UNIT) /* In reality, approx 100 */
++#define RC5_TRAILER		(6 * RC5_UNIT) /* In reality, approx 100 */
+ 
+ enum rc5_state {
+ 	STATE_INACTIVE,
+diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
+index 99b303b..e8ceb0e 100644
+--- a/drivers/media/rc/nuvoton-cir.c
++++ b/drivers/media/rc/nuvoton-cir.c
+@@ -401,6 +401,7 @@ static int nvt_hw_detect(struct nvt_dev *nvt)
+ 	/* Check if we're wired for the alternate EFER setup */
+ 	nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
+ 	if (nvt->chip_major == 0xff) {
++		nvt_efm_disable(nvt);
+ 		nvt->cr_efir = CR_EFIR2;
+ 		nvt->cr_efdr = CR_EFDR2;
+ 		nvt_efm_enable(nvt);
+diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c
+index 78c12d2..5dab024 100644
+--- a/drivers/media/usb/usbtv/usbtv-audio.c
++++ b/drivers/media/usb/usbtv/usbtv-audio.c
+@@ -278,6 +278,9 @@ static void snd_usbtv_trigger(struct work_struct *work)
+ {
+ 	struct usbtv *chip = container_of(work, struct usbtv, snd_trigger);
+ 
++	if (!chip->snd)
++		return;
++
+ 	if (atomic_read(&chip->snd_stream))
+ 		usbtv_audio_start(chip);
+ 	else
+@@ -378,6 +381,8 @@ err:
+ 
+ void usbtv_audio_free(struct usbtv *usbtv)
+ {
++	cancel_work_sync(&usbtv->snd_trigger);
++
+ 	if (usbtv->snd && usbtv->udev) {
+ 		snd_card_free(usbtv->snd);
+ 		usbtv->snd = NULL;
+diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
+index 9fbcb67..633fc1a 100644
+--- a/drivers/media/v4l2-core/videobuf2-core.c
++++ b/drivers/media/v4l2-core/videobuf2-core.c
+@@ -1648,7 +1648,7 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
+ 			     void *pb, int nonblocking)
+ {
+ 	unsigned long flags;
+-	int ret;
++	int ret = 0;
+ 
+ 	/*
+ 	 * Wait for at least one buffer to become available on the done_list.
+@@ -1664,10 +1664,12 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
+ 	spin_lock_irqsave(&q->done_lock, flags);
+ 	*vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
+ 	/*
+-	 * Only remove the buffer from done_list if v4l2_buffer can handle all
+-	 * the planes.
++	 * Only remove the buffer from done_list if all planes can be
++	 * handled. Some cases such as V4L2 file I/O and DVB have pb
++	 * == NULL; skip the check then as there's nothing to verify.
+ 	 */
+-	ret = call_bufop(q, verify_planes_array, *vb, pb);
++	if (pb)
++		ret = call_bufop(q, verify_planes_array, *vb, pb);
+ 	if (!ret)
+ 		list_del(&(*vb)->done_entry);
+ 	spin_unlock_irqrestore(&q->done_lock, flags);
+diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
+index 0b1b8c7..7f366f1 100644
+--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
++++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
+@@ -74,6 +74,11 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
+ 	return 0;
+ }
+ 
++static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
++{
++	return __verify_planes_array(vb, pb);
++}
++
+ /**
+  * __verify_length() - Verify that the bytesused value for each plane fits in
+  * the plane length and that the data offset doesn't exceed the bytesused value.
+@@ -437,6 +442,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
+ }
+ 
+ static const struct vb2_buf_ops v4l2_buf_ops = {
++	.verify_planes_array	= __verify_planes_array_core,
+ 	.fill_user_buffer	= __fill_v4l2_buffer,
+ 	.fill_vb2_buffer	= __fill_vb2_buffer,
+ 	.copy_timestamp		= __copy_timestamp,
+diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
+index 1be47ad..880d469 100644
+--- a/drivers/mfd/qcom_rpm.c
++++ b/drivers/mfd/qcom_rpm.c
+@@ -34,7 +34,13 @@ struct qcom_rpm_resource {
+ struct qcom_rpm_data {
+ 	u32 version;
+ 	const struct qcom_rpm_resource *resource_table;
+-	unsigned n_resources;
++	unsigned int n_resources;
++	unsigned int req_ctx_off;
++	unsigned int req_sel_off;
++	unsigned int ack_ctx_off;
++	unsigned int ack_sel_off;
++	unsigned int req_sel_size;
++	unsigned int ack_sel_size;
+ };
+ 
+ struct qcom_rpm {
+@@ -61,11 +67,7 @@ struct qcom_rpm {
+ 
+ #define RPM_REQUEST_TIMEOUT	(5 * HZ)
+ 
+-#define RPM_REQUEST_CONTEXT	3
+-#define RPM_REQ_SELECT		11
+-#define RPM_ACK_CONTEXT		15
+-#define RPM_ACK_SELECTOR	23
+-#define RPM_SELECT_SIZE		7
++#define RPM_MAX_SEL_SIZE	7
+ 
+ #define RPM_NOTIFICATION	BIT(30)
+ #define RPM_REJECTED		BIT(31)
+@@ -157,6 +159,12 @@ static const struct qcom_rpm_data apq8064_template = {
+ 	.version = 3,
+ 	.resource_table = apq8064_rpm_resource_table,
+ 	.n_resources = ARRAY_SIZE(apq8064_rpm_resource_table),
++	.req_ctx_off = 3,
++	.req_sel_off = 11,
++	.ack_ctx_off = 15,
++	.ack_sel_off = 23,
++	.req_sel_size = 4,
++	.ack_sel_size = 7,
+ };
+ 
+ static const struct qcom_rpm_resource msm8660_rpm_resource_table[] = {
+@@ -240,6 +248,12 @@ static const struct qcom_rpm_data msm8660_template = {
+ 	.version = 2,
+ 	.resource_table = msm8660_rpm_resource_table,
+ 	.n_resources = ARRAY_SIZE(msm8660_rpm_resource_table),
++	.req_ctx_off = 3,
++	.req_sel_off = 11,
++	.ack_ctx_off = 19,
++	.ack_sel_off = 27,
++	.req_sel_size = 7,
++	.ack_sel_size = 7,
+ };
+ 
+ static const struct qcom_rpm_resource msm8960_rpm_resource_table[] = {
+@@ -322,6 +336,12 @@ static const struct qcom_rpm_data msm8960_template = {
+ 	.version = 3,
+ 	.resource_table = msm8960_rpm_resource_table,
+ 	.n_resources = ARRAY_SIZE(msm8960_rpm_resource_table),
++	.req_ctx_off = 3,
++	.req_sel_off = 11,
++	.ack_ctx_off = 15,
++	.ack_sel_off = 23,
++	.req_sel_size = 4,
++	.ack_sel_size = 7,
+ };
+ 
+ static const struct qcom_rpm_resource ipq806x_rpm_resource_table[] = {
+@@ -362,6 +382,12 @@ static const struct qcom_rpm_data ipq806x_template = {
+ 	.version = 3,
+ 	.resource_table = ipq806x_rpm_resource_table,
+ 	.n_resources = ARRAY_SIZE(ipq806x_rpm_resource_table),
++	.req_ctx_off = 3,
++	.req_sel_off = 11,
++	.ack_ctx_off = 15,
++	.ack_sel_off = 23,
++	.req_sel_size = 4,
++	.ack_sel_size = 7,
+ };
+ 
+ static const struct of_device_id qcom_rpm_of_match[] = {
+@@ -380,7 +406,7 @@ int qcom_rpm_write(struct qcom_rpm *rpm,
+ {
+ 	const struct qcom_rpm_resource *res;
+ 	const struct qcom_rpm_data *data = rpm->data;
+-	u32 sel_mask[RPM_SELECT_SIZE] = { 0 };
++	u32 sel_mask[RPM_MAX_SEL_SIZE] = { 0 };
+ 	int left;
+ 	int ret = 0;
+ 	int i;
+@@ -398,12 +424,12 @@ int qcom_rpm_write(struct qcom_rpm *rpm,
+ 		writel_relaxed(buf[i], RPM_REQ_REG(rpm, res->target_id + i));
+ 
+ 	bitmap_set((unsigned long *)sel_mask, res->select_id, 1);
+-	for (i = 0; i < ARRAY_SIZE(sel_mask); i++) {
++	for (i = 0; i < rpm->data->req_sel_size; i++) {
+ 		writel_relaxed(sel_mask[i],
+-			       RPM_CTRL_REG(rpm, RPM_REQ_SELECT + i));
++			       RPM_CTRL_REG(rpm, rpm->data->req_sel_off + i));
+ 	}
+ 
+-	writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, RPM_REQUEST_CONTEXT));
++	writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, rpm->data->req_ctx_off));
+ 
+ 	reinit_completion(&rpm->ack);
+ 	regmap_write(rpm->ipc_regmap, rpm->ipc_offset, BIT(rpm->ipc_bit));
+@@ -426,10 +452,11 @@ static irqreturn_t qcom_rpm_ack_interrupt(int irq, void *dev)
+ 	u32 ack;
+ 	int i;
+ 
+-	ack = readl_relaxed(RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT));
+-	for (i = 0; i < RPM_SELECT_SIZE; i++)
+-		writel_relaxed(0, RPM_CTRL_REG(rpm, RPM_ACK_SELECTOR + i));
+-	writel(0, RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT));
++	ack = readl_relaxed(RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off));
++	for (i = 0; i < rpm->data->ack_sel_size; i++)
++		writel_relaxed(0,
++			RPM_CTRL_REG(rpm, rpm->data->ack_sel_off + i));
++	writel(0, RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off));
+ 
+ 	if (ack & RPM_NOTIFICATION) {
+ 		dev_warn(rpm->dev, "ignoring notification!\n");
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index 0b0dc29..77533f7 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -2610,7 +2610,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
+ 		int cached = writelen > bytes && page != blockmask;
+ 		uint8_t *wbuf = buf;
+ 		int use_bufpoi;
+-		int part_pagewr = (column || writelen < (mtd->writesize - 1));
++		int part_pagewr = (column || writelen < mtd->writesize);
+ 
+ 		if (part_pagewr)
+ 			use_bufpoi = 1;
+diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
+index ef36182..0680516 100644
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -874,7 +874,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ 	for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ 		ubi = ubi_devices[i];
+ 		if (ubi && mtd->index == ubi->mtd->index) {
+-			ubi_err(ubi, "mtd%d is already attached to ubi%d",
++			pr_err("ubi: mtd%d is already attached to ubi%d",
+ 				mtd->index, i);
+ 			return -EEXIST;
+ 		}
+@@ -889,7 +889,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ 	 * no sense to attach emulated MTD devices, so we prohibit this.
+ 	 */
+ 	if (mtd->type == MTD_UBIVOLUME) {
+-		ubi_err(ubi, "refuse attaching mtd%d - it is already emulated on top of UBI",
++		pr_err("ubi: refuse attaching mtd%d - it is already emulated on top of UBI",
+ 			mtd->index);
+ 		return -EINVAL;
+ 	}
+@@ -900,7 +900,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ 			if (!ubi_devices[ubi_num])
+ 				break;
+ 		if (ubi_num == UBI_MAX_DEVICES) {
+-			ubi_err(ubi, "only %d UBI devices may be created",
++			pr_err("ubi: only %d UBI devices may be created",
+ 				UBI_MAX_DEVICES);
+ 			return -ENFILE;
+ 		}
+@@ -910,7 +910,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ 
+ 		/* Make sure ubi_num is not busy */
+ 		if (ubi_devices[ubi_num]) {
+-			ubi_err(ubi, "already exists");
++			pr_err("ubi: ubi%i already exists", ubi_num);
+ 			return -EEXIST;
+ 		}
+ 	}
+@@ -992,6 +992,9 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ 			goto out_detach;
+ 	}
+ 
++	/* Make device "available" before it becomes accessible via sysfs */
++	ubi_devices[ubi_num] = ubi;
++
+ 	err = uif_init(ubi, &ref);
+ 	if (err)
+ 		goto out_detach;
+@@ -1036,7 +1039,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ 	wake_up_process(ubi->bgt_thread);
+ 	spin_unlock(&ubi->wl_lock);
+ 
+-	ubi_devices[ubi_num] = ubi;
+ 	ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
+ 	return ubi_num;
+ 
+@@ -1047,6 +1049,7 @@ out_uif:
+ 	ubi_assert(ref);
+ 	uif_close(ubi);
+ out_detach:
++	ubi_devices[ubi_num] = NULL;
+ 	ubi_wl_close(ubi);
+ 	ubi_free_internal_volumes(ubi);
+ 	vfree(ubi->vtbl);
+diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
+index 10059df..0138f52 100644
+--- a/drivers/mtd/ubi/vmt.c
++++ b/drivers/mtd/ubi/vmt.c
+@@ -488,13 +488,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
+ 		spin_unlock(&ubi->volumes_lock);
+ 	}
+ 
+-	/* Change volume table record */
+-	vtbl_rec = ubi->vtbl[vol_id];
+-	vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
+-	err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+-	if (err)
+-		goto out_acc;
+-
+ 	if (pebs < 0) {
+ 		for (i = 0; i < -pebs; i++) {
+ 			err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
+@@ -512,6 +505,24 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
+ 		spin_unlock(&ubi->volumes_lock);
+ 	}
+ 
++	/*
++	 * When we shrink a volume we have to flush all pending (erase) work.
++	 * Otherwise it can happen that upon next attach UBI finds a LEB with
++	 * lnum > highest_lnum and refuses to attach.
++	 */
++	if (pebs < 0) {
++		err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
++		if (err)
++			goto out_acc;
++	}
++
++	/* Change volume table record */
++	vtbl_rec = ubi->vtbl[vol_id];
++	vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
++	err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
++	if (err)
++		goto out_acc;
++
+ 	vol->reserved_pebs = reserved_pebs;
+ 	if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
+ 		vol->used_ebs = reserved_pebs;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+index 5b30922..2ce3199 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+@@ -2469,10 +2469,22 @@ void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
+ void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked)
+ {
+ 	struct brcmf_fws_info *fws = drvr->fws;
++	struct brcmf_if *ifp;
++	int i;
+ 
+-	fws->bus_flow_blocked = flow_blocked;
+-	if (!flow_blocked)
+-		brcmf_fws_schedule_deq(fws);
+-	else
+-		fws->stats.bus_flow_block++;
++	if (fws->avoid_queueing) {
++		for (i = 0; i < BRCMF_MAX_IFS; i++) {
++			ifp = drvr->iflist[i];
++			if (!ifp || !ifp->ndev)
++				continue;
++			brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FLOW,
++					     flow_blocked);
++		}
++	} else {
++		fws->bus_flow_blocked = flow_blocked;
++		if (!flow_blocked)
++			brcmf_fws_schedule_deq(fws);
++		else
++			fws->stats.bus_flow_block++;
++	}
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index a588b05..6f020e4 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -433,6 +433,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ /* 8000 Series */
+ 	{IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0x10B0, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)},
+@@ -454,6 +455,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ 	{IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0xD0B0, iwl8260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0xB0B0, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)},
+@@ -481,6 +484,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ 	{IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24FD, 0x1130, iwl8265_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24FD, 0x0130, iwl8265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)},
+@@ -491,6 +496,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ 	{IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24FD, 0x0910, iwl8265_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)},
+ 
+ /* 9000 Series */
+ 	{IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+index de6974f..2d8cce2 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+@@ -496,7 +496,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans);
+ /*****************************************************
+ * Helpers
+ ******************************************************/
+-static inline void iwl_disable_interrupts(struct iwl_trans *trans)
++static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
+ {
+ 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ 
+@@ -519,7 +519,16 @@ static inline void iwl_disable_interrupts(struct iwl_trans *trans)
+ 	IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
+ }
+ 
+-static inline void iwl_enable_interrupts(struct iwl_trans *trans)
++static inline void iwl_disable_interrupts(struct iwl_trans *trans)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++
++	spin_lock(&trans_pcie->irq_lock);
++	_iwl_disable_interrupts(trans);
++	spin_unlock(&trans_pcie->irq_lock);
++}
++
++static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
+ {
+ 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ 
+@@ -542,6 +551,14 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans)
+ 	}
+ }
+ 
++static inline void iwl_enable_interrupts(struct iwl_trans *trans)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++
++	spin_lock(&trans_pcie->irq_lock);
++	_iwl_enable_interrupts(trans);
++	spin_unlock(&trans_pcie->irq_lock);
++}
+ static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
+ {
+ 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+index 0a4a3c5..aaaf2ad 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+@@ -1507,7 +1507,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
+ 		 * have anything to service
+ 		 */
+ 		if (test_bit(STATUS_INT_ENABLED, &trans->status))
+-			iwl_enable_interrupts(trans);
++			_iwl_enable_interrupts(trans);
+ 		spin_unlock(&trans_pcie->irq_lock);
+ 		lock_map_release(&trans->sync_cmd_lockdep_map);
+ 		return IRQ_NONE;
+@@ -1699,15 +1699,17 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
+ 			 inta & ~trans_pcie->inta_mask);
+ 	}
+ 
++	spin_lock(&trans_pcie->irq_lock);
++	/* only Re-enable all interrupt if disabled by irq */
++	if (test_bit(STATUS_INT_ENABLED, &trans->status))
++		_iwl_enable_interrupts(trans);
+ 	/* we are loading the firmware, enable FH_TX interrupt only */
+-	if (handled & CSR_INT_BIT_FH_TX)
++	else if (handled & CSR_INT_BIT_FH_TX)
+ 		iwl_enable_fw_load_int(trans);
+-	/* only Re-enable all interrupt if disabled by irq */
+-	else if (test_bit(STATUS_INT_ENABLED, &trans->status))
+-		iwl_enable_interrupts(trans);
+ 	/* Re-enable RF_KILL if it occurred */
+ 	else if (handled & CSR_INT_BIT_RF_KILL)
+ 		iwl_enable_rfkill_int(trans);
++	spin_unlock(&trans_pcie->irq_lock);
+ 
+ out:
+ 	lock_map_release(&trans->sync_cmd_lockdep_map);
+@@ -1771,7 +1773,7 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
+ 		return;
+ 
+ 	spin_lock(&trans_pcie->irq_lock);
+-	iwl_disable_interrupts(trans);
++	_iwl_disable_interrupts(trans);
+ 
+ 	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
+ 
+@@ -1787,7 +1789,7 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
+ 	trans_pcie->use_ict = true;
+ 	trans_pcie->ict_index = 0;
+ 	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
+-	iwl_enable_interrupts(trans);
++	_iwl_enable_interrupts(trans);
+ 	spin_unlock(&trans_pcie->irq_lock);
+ }
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index f603d78..d9f1394 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -801,6 +801,8 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
+ 
+ 	*first_ucode_section = last_read_idx;
+ 
++	iwl_enable_interrupts(trans);
++
+ 	if (cpu == 1)
+ 		iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF);
+ 	else
+@@ -980,6 +982,8 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
+ 		iwl_pcie_apply_destination(trans);
+ 	}
+ 
++	iwl_enable_interrupts(trans);
++
+ 	/* release CPU reset */
+ 	iwl_write32(trans, CSR_RESET, 0);
+ 
+@@ -1033,9 +1037,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+ 	was_hw_rfkill = iwl_is_rfkill_set(trans);
+ 
+ 	/* tell the device to stop sending interrupts */
+-	spin_lock(&trans_pcie->irq_lock);
+ 	iwl_disable_interrupts(trans);
+-	spin_unlock(&trans_pcie->irq_lock);
+ 
+ 	/* device going down, Stop using ICT table */
+ 	iwl_pcie_disable_ict(trans);
+@@ -1079,9 +1081,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+ 	 * the time, unless the interrupt is ACKed even if the interrupt
+ 	 * should be masked. Re-ACK all the interrupts here.
+ 	 */
+-	spin_lock(&trans_pcie->irq_lock);
+ 	iwl_disable_interrupts(trans);
+-	spin_unlock(&trans_pcie->irq_lock);
+ 
+ 	/* clear all status bits */
+ 	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+@@ -1215,7 +1215,6 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
+ 		ret = iwl_pcie_load_given_ucode_8000(trans, fw);
+ 	else
+ 		ret = iwl_pcie_load_given_ucode(trans, fw);
+-	iwl_enable_interrupts(trans);
+ 
+ 	/* re-check RF-Kill state since we may have missed the interrupt */
+ 	hw_rfkill = iwl_is_rfkill_set(trans);
+@@ -1567,15 +1566,11 @@ static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
+ 	mutex_lock(&trans_pcie->mutex);
+ 
+ 	/* disable interrupts - don't enable HW RF kill interrupt */
+-	spin_lock(&trans_pcie->irq_lock);
+ 	iwl_disable_interrupts(trans);
+-	spin_unlock(&trans_pcie->irq_lock);
+ 
+ 	iwl_pcie_apm_stop(trans, true);
+ 
+-	spin_lock(&trans_pcie->irq_lock);
+ 	iwl_disable_interrupts(trans);
+-	spin_unlock(&trans_pcie->irq_lock);
+ 
+ 	iwl_pcie_disable_ict(trans);
+ 
+diff --git a/drivers/of/base.c b/drivers/of/base.c
+index ebf84e3..8bb3d1a 100644
+--- a/drivers/of/base.c
++++ b/drivers/of/base.c
+@@ -112,6 +112,7 @@ static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj,
+ 	return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length);
+ }
+ 
++/* always return newly allocated name, caller must free after use */
+ static const char *safe_name(struct kobject *kobj, const char *orig_name)
+ {
+ 	const char *name = orig_name;
+@@ -126,9 +127,12 @@ static const char *safe_name(struct kobject *kobj, const char *orig_name)
+ 		name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i);
+ 	}
+ 
+-	if (name != orig_name)
++	if (name == orig_name) {
++		name = kstrdup(orig_name, GFP_KERNEL);
++	} else {
+ 		pr_warn("device-tree: Duplicate name in %s, renamed to \"%s\"\n",
+ 			kobject_name(kobj), name);
++	}
+ 	return name;
+ }
+ 
+@@ -159,6 +163,7 @@ int __of_add_property_sysfs(struct device_node *np, struct property *pp)
+ int __of_attach_node_sysfs(struct device_node *np)
+ {
+ 	const char *name;
++	struct kobject *parent;
+ 	struct property *pp;
+ 	int rc;
+ 
+@@ -171,15 +176,16 @@ int __of_attach_node_sysfs(struct device_node *np)
+ 	np->kobj.kset = of_kset;
+ 	if (!np->parent) {
+ 		/* Nodes without parents are new top level trees */
+-		rc = kobject_add(&np->kobj, NULL, "%s",
+-				 safe_name(&of_kset->kobj, "base"));
++		name = safe_name(&of_kset->kobj, "base");
++		parent = NULL;
+ 	} else {
+ 		name = safe_name(&np->parent->kobj, kbasename(np->full_name));
+-		if (!name || !name[0])
+-			return -EINVAL;
+-
+-		rc = kobject_add(&np->kobj, &np->parent->kobj, "%s", name);
++		parent = &np->parent->kobj;
+ 	}
++	if (!name)
++		return -ENOMEM;
++	rc = kobject_add(&np->kobj, parent, "%s", name);
++	kfree(name);
+ 	if (rc)
+ 		return rc;
+ 
+@@ -1815,6 +1821,12 @@ int __of_remove_property(struct device_node *np, struct property *prop)
+ 	return 0;
+ }
+ 
++void __of_sysfs_remove_bin_file(struct device_node *np, struct property *prop)
++{
++	sysfs_remove_bin_file(&np->kobj, &prop->attr);
++	kfree(prop->attr.attr.name);
++}
++
+ void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
+ {
+ 	if (!IS_ENABLED(CONFIG_SYSFS))
+@@ -1822,7 +1834,7 @@ void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
+ 
+ 	/* at early boot, bail here and defer setup to of_init() */
+ 	if (of_kset && of_node_is_attached(np))
+-		sysfs_remove_bin_file(&np->kobj, &prop->attr);
++		__of_sysfs_remove_bin_file(np, prop);
+ }
+ 
+ /**
+@@ -1895,7 +1907,7 @@ void __of_update_property_sysfs(struct device_node *np, struct property *newprop
+ 		return;
+ 
+ 	if (oldprop)
+-		sysfs_remove_bin_file(&np->kobj, &oldprop->attr);
++		__of_sysfs_remove_bin_file(np, oldprop);
+ 	__of_add_property_sysfs(np, newprop);
+ }
+ 
+diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
+index 3033fa3..a201559 100644
+--- a/drivers/of/dynamic.c
++++ b/drivers/of/dynamic.c
+@@ -55,7 +55,7 @@ void __of_detach_node_sysfs(struct device_node *np)
+ 	/* only remove properties if on sysfs */
+ 	if (of_node_is_attached(np)) {
+ 		for_each_property_of_node(np, pp)
+-			sysfs_remove_bin_file(&np->kobj, &pp->attr);
++			__of_sysfs_remove_bin_file(np, pp);
+ 		kobject_del(&np->kobj);
+ 	}
+ 
+diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
+index 829469f..18bbb451 100644
+--- a/drivers/of/of_private.h
++++ b/drivers/of/of_private.h
+@@ -83,6 +83,9 @@ extern int __of_attach_node_sysfs(struct device_node *np);
+ extern void __of_detach_node(struct device_node *np);
+ extern void __of_detach_node_sysfs(struct device_node *np);
+ 
++extern void __of_sysfs_remove_bin_file(struct device_node *np,
++				       struct property *prop);
++
+ /* iterators for transactions, used for overlays */
+ /* forward iterator */
+ #define for_each_transaction_entry(_oft, _te) \
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index ee72ebe..7902fbf 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -3189,13 +3189,15 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
+ }
+ 
+ /*
+- * Atheros AR93xx chips do not behave after a bus reset.  The device will
+- * throw a Link Down error on AER-capable systems and regardless of AER,
+- * config space of the device is never accessible again and typically
+- * causes the system to hang or reset when access is attempted.
++ * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
++ * The device will throw a Link Down error on AER-capable systems and
++ * regardless of AER, config space of the device is never accessible again
++ * and typically causes the system to hang or reset when access is attempted.
+  * http://www.spinics.net/lists/linux-pci/msg34797.html
+  */
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
+ 
+ static void quirk_no_pm_reset(struct pci_dev *dev)
+ {
+diff --git a/drivers/phy/phy-rcar-gen3-usb2.c b/drivers/phy/phy-rcar-gen3-usb2.c
+index 4be3f5d..31156c9 100644
+--- a/drivers/phy/phy-rcar-gen3-usb2.c
++++ b/drivers/phy/phy-rcar-gen3-usb2.c
+@@ -21,6 +21,7 @@
+ #include <linux/phy/phy.h>
+ #include <linux/platform_device.h>
+ #include <linux/regulator/consumer.h>
++#include <linux/workqueue.h>
+ 
+ /******* USB2.0 Host registers (original offset is +0x200) *******/
+ #define USB2_INT_ENABLE		0x000
+@@ -81,9 +82,25 @@ struct rcar_gen3_chan {
+ 	struct extcon_dev *extcon;
+ 	struct phy *phy;
+ 	struct regulator *vbus;
++	struct work_struct work;
++	bool extcon_host;
+ 	bool has_otg;
+ };
+ 
++static void rcar_gen3_phy_usb2_work(struct work_struct *work)
++{
++	struct rcar_gen3_chan *ch = container_of(work, struct rcar_gen3_chan,
++						 work);
++
++	if (ch->extcon_host) {
++		extcon_set_cable_state_(ch->extcon, EXTCON_USB_HOST, true);
++		extcon_set_cable_state_(ch->extcon, EXTCON_USB, false);
++	} else {
++		extcon_set_cable_state_(ch->extcon, EXTCON_USB_HOST, false);
++		extcon_set_cable_state_(ch->extcon, EXTCON_USB, true);
++	}
++}
++
+ static void rcar_gen3_set_host_mode(struct rcar_gen3_chan *ch, int host)
+ {
+ 	void __iomem *usb2_base = ch->base;
+@@ -130,8 +147,8 @@ static void rcar_gen3_init_for_host(struct rcar_gen3_chan *ch)
+ 	rcar_gen3_set_host_mode(ch, 1);
+ 	rcar_gen3_enable_vbus_ctrl(ch, 1);
+ 
+-	extcon_set_cable_state_(ch->extcon, EXTCON_USB_HOST, true);
+-	extcon_set_cable_state_(ch->extcon, EXTCON_USB, false);
++	ch->extcon_host = true;
++	schedule_work(&ch->work);
+ }
+ 
+ static void rcar_gen3_init_for_peri(struct rcar_gen3_chan *ch)
+@@ -140,8 +157,8 @@ static void rcar_gen3_init_for_peri(struct rcar_gen3_chan *ch)
+ 	rcar_gen3_set_host_mode(ch, 0);
+ 	rcar_gen3_enable_vbus_ctrl(ch, 0);
+ 
+-	extcon_set_cable_state_(ch->extcon, EXTCON_USB_HOST, false);
+-	extcon_set_cable_state_(ch->extcon, EXTCON_USB, true);
++	ch->extcon_host = false;
++	schedule_work(&ch->work);
+ }
+ 
+ static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
+@@ -301,6 +318,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
+ 	if (irq >= 0) {
+ 		int ret;
+ 
++		INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
+ 		irq = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq,
+ 				       IRQF_SHARED, dev_name(dev), channel);
+ 		if (irq < 0)
+diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
+index ac4f564..bf65c94 100644
+--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
++++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
+@@ -160,7 +160,6 @@ struct chv_pin_context {
+  * @pctldev: Pointer to the pin controller device
+  * @chip: GPIO chip in this pin controller
+  * @regs: MMIO registers
+- * @lock: Lock to serialize register accesses
+  * @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO
+  *		offset (in GPIO number space)
+  * @community: Community this pinctrl instance represents
+@@ -174,7 +173,6 @@ struct chv_pinctrl {
+ 	struct pinctrl_dev *pctldev;
+ 	struct gpio_chip chip;
+ 	void __iomem *regs;
+-	raw_spinlock_t lock;
+ 	unsigned intr_lines[16];
+ 	const struct chv_community *community;
+ 	u32 saved_intmask;
+@@ -657,6 +655,17 @@ static const struct chv_community *chv_communities[] = {
+ 	&southeast_community,
+ };
+ 
++/*
++ * Lock to serialize register accesses
++ *
++ * Due to a silicon issue, a shared lock must be used to prevent
++ * concurrent accesses across the 4 GPIO controllers.
++ *
++ * See Intel Atom Z8000 Processor Series Specification Update (Rev. 005),
++ * errata #CHT34, for further information.
++ */
++static DEFINE_RAW_SPINLOCK(chv_lock);
++
+ static void __iomem *chv_padreg(struct chv_pinctrl *pctrl, unsigned offset,
+ 				unsigned reg)
+ {
+@@ -718,13 +727,13 @@ static void chv_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+ 	u32 ctrl0, ctrl1;
+ 	bool locked;
+ 
+-	raw_spin_lock_irqsave(&pctrl->lock, flags);
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 
+ 	ctrl0 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
+ 	ctrl1 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL1));
+ 	locked = chv_pad_locked(pctrl, offset);
+ 
+-	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 
+ 	if (ctrl0 & CHV_PADCTRL0_GPIOEN) {
+ 		seq_puts(s, "GPIO ");
+@@ -787,14 +796,14 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
+ 
+ 	grp = &pctrl->community->groups[group];
+ 
+-	raw_spin_lock_irqsave(&pctrl->lock, flags);
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 
+ 	/* Check first that the pad is not locked */
+ 	for (i = 0; i < grp->npins; i++) {
+ 		if (chv_pad_locked(pctrl, grp->pins[i])) {
+ 			dev_warn(pctrl->dev, "unable to set mode for locked pin %u\n",
+ 				 grp->pins[i]);
+-			raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++			raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 			return -EBUSY;
+ 		}
+ 	}
+@@ -837,7 +846,7 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
+ 			pin, altfunc->mode, altfunc->invert_oe ? "" : "not ");
+ 	}
+ 
+-	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 
+ 	return 0;
+ }
+@@ -851,13 +860,13 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
+ 	void __iomem *reg;
+ 	u32 value;
+ 
+-	raw_spin_lock_irqsave(&pctrl->lock, flags);
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 
+ 	if (chv_pad_locked(pctrl, offset)) {
+ 		value = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
+ 		if (!(value & CHV_PADCTRL0_GPIOEN)) {
+ 			/* Locked so cannot enable */
+-			raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++			raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 			return -EBUSY;
+ 		}
+ 	} else {
+@@ -897,7 +906,7 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
+ 		chv_writel(value, reg);
+ 	}
+ 
+-	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 
+ 	return 0;
+ }
+@@ -911,13 +920,13 @@ static void chv_gpio_disable_free(struct pinctrl_dev *pctldev,
+ 	void __iomem *reg;
+ 	u32 value;
+ 
+-	raw_spin_lock_irqsave(&pctrl->lock, flags);
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 
+ 	reg = chv_padreg(pctrl, offset, CHV_PADCTRL0);
+ 	value = readl(reg) & ~CHV_PADCTRL0_GPIOEN;
+ 	chv_writel(value, reg);
+ 
+-	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ }
+ 
+ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
+@@ -929,7 +938,7 @@ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
+ 	unsigned long flags;
+ 	u32 ctrl0;
+ 
+-	raw_spin_lock_irqsave(&pctrl->lock, flags);
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 
+ 	ctrl0 = readl(reg) & ~CHV_PADCTRL0_GPIOCFG_MASK;
+ 	if (input)
+@@ -938,7 +947,7 @@ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
+ 		ctrl0 |= CHV_PADCTRL0_GPIOCFG_GPO << CHV_PADCTRL0_GPIOCFG_SHIFT;
+ 	chv_writel(ctrl0, reg);
+ 
+-	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 
+ 	return 0;
+ }
+@@ -963,10 +972,10 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned pin,
+ 	u16 arg = 0;
+ 	u32 term;
+ 
+-	raw_spin_lock_irqsave(&pctrl->lock, flags);
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 	ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+ 	ctrl1 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1));
+-	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 
+ 	term = (ctrl0 & CHV_PADCTRL0_TERM_MASK) >> CHV_PADCTRL0_TERM_SHIFT;
+ 
+@@ -1040,7 +1049,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
+ 	unsigned long flags;
+ 	u32 ctrl0, pull;
+ 
+-	raw_spin_lock_irqsave(&pctrl->lock, flags);
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 	ctrl0 = readl(reg);
+ 
+ 	switch (param) {
+@@ -1063,7 +1072,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
+ 			pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
+ 			break;
+ 		default:
+-			raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++			raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 			return -EINVAL;
+ 		}
+ 
+@@ -1081,7 +1090,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
+ 			pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
+ 			break;
+ 		default:
+-			raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++			raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 			return -EINVAL;
+ 		}
+ 
+@@ -1089,12 +1098,12 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
+ 		break;
+ 
+ 	default:
+-		raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++		raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 		return -EINVAL;
+ 	}
+ 
+ 	chv_writel(ctrl0, reg);
+-	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 
+ 	return 0;
+ }
+@@ -1160,9 +1169,9 @@ static int chv_gpio_get(struct gpio_chip *chip, unsigned offset)
+ 	unsigned long flags;
+ 	u32 ctrl0, cfg;
+ 
+-	raw_spin_lock_irqsave(&pctrl->lock, flags);
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 	ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+-	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 
+ 	cfg = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
+ 	cfg >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
+@@ -1180,7 +1189,7 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+ 	void __iomem *reg;
+ 	u32 ctrl0;
+ 
+-	raw_spin_lock_irqsave(&pctrl->lock, flags);
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 
+ 	reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
+ 	ctrl0 = readl(reg);
+@@ -1192,7 +1201,7 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+ 
+ 	chv_writel(ctrl0, reg);
+ 
+-	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ }
+ 
+ static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+@@ -1202,9 +1211,9 @@ static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+ 	u32 ctrl0, direction;
+ 	unsigned long flags;
+ 
+-	raw_spin_lock_irqsave(&pctrl->lock, flags);
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 	ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+-	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 
+ 	direction = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
+ 	direction >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
+@@ -1242,14 +1251,14 @@ static void chv_gpio_irq_ack(struct irq_data *d)
+ 	int pin = chv_gpio_offset_to_pin(pctrl, irqd_to_hwirq(d));
+ 	u32 intr_line;
+ 
+-	raw_spin_lock(&pctrl->lock);
++	raw_spin_lock(&chv_lock);
+ 
+ 	intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+ 	intr_line &= CHV_PADCTRL0_INTSEL_MASK;
+ 	intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT;
+ 	chv_writel(BIT(intr_line), pctrl->regs + CHV_INTSTAT);
+ 
+-	raw_spin_unlock(&pctrl->lock);
++	raw_spin_unlock(&chv_lock);
+ }
+ 
+ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
+@@ -1260,7 +1269,7 @@ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
+ 	u32 value, intr_line;
+ 	unsigned long flags;
+ 
+-	raw_spin_lock_irqsave(&pctrl->lock, flags);
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 
+ 	intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+ 	intr_line &= CHV_PADCTRL0_INTSEL_MASK;
+@@ -1273,7 +1282,7 @@ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
+ 		value |= BIT(intr_line);
+ 	chv_writel(value, pctrl->regs + CHV_INTMASK);
+ 
+-	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ }
+ 
+ static void chv_gpio_irq_mask(struct irq_data *d)
+@@ -1307,7 +1316,7 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
+ 		unsigned long flags;
+ 		u32 intsel, value;
+ 
+-		raw_spin_lock_irqsave(&pctrl->lock, flags);
++		raw_spin_lock_irqsave(&chv_lock, flags);
+ 		intsel = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+ 		intsel &= CHV_PADCTRL0_INTSEL_MASK;
+ 		intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
+@@ -1322,7 +1331,7 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
+ 			irq_set_handler_locked(d, handler);
+ 			pctrl->intr_lines[intsel] = offset;
+ 		}
+-		raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++		raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 	}
+ 
+ 	chv_gpio_irq_unmask(d);
+@@ -1338,7 +1347,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type)
+ 	unsigned long flags;
+ 	u32 value;
+ 
+-	raw_spin_lock_irqsave(&pctrl->lock, flags);
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 
+ 	/*
+ 	 * Pins which can be used as shared interrupt are configured in
+@@ -1387,7 +1396,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type)
+ 	else if (type & IRQ_TYPE_LEVEL_MASK)
+ 		irq_set_handler_locked(d, handle_level_irq);
+ 
+-	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 
+ 	return 0;
+ }
+@@ -1499,7 +1508,6 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
+ 	if (i == ARRAY_SIZE(chv_communities))
+ 		return -ENODEV;
+ 
+-	raw_spin_lock_init(&pctrl->lock);
+ 	pctrl->dev = &pdev->dev;
+ 
+ #ifdef CONFIG_PM_SLEEP
+diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
+index 6f145f2..96ffda4 100644
+--- a/drivers/platform/x86/hp-wmi.c
++++ b/drivers/platform/x86/hp-wmi.c
+@@ -718,6 +718,11 @@ static int __init hp_wmi_rfkill_setup(struct platform_device *device)
+ 	if (err)
+ 		return err;
+ 
++	err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, &wireless,
++				   sizeof(wireless), 0);
++	if (err)
++		return err;
++
+ 	if (wireless & 0x1) {
+ 		wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev,
+ 					   RFKILL_TYPE_WLAN,
+@@ -882,7 +887,7 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
+ 	wwan_rfkill = NULL;
+ 	rfkill2_count = 0;
+ 
+-	if (hp_wmi_bios_2009_later() || hp_wmi_rfkill_setup(device))
++	if (hp_wmi_rfkill_setup(device))
+ 		hp_wmi_rfkill2_setup(device);
+ 
+ 	err = device_create_file(&device->dev, &dev_attr_display);
+diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
+index 02fb6b4..d838e77 100644
+--- a/drivers/regulator/s2mps11.c
++++ b/drivers/regulator/s2mps11.c
+@@ -750,7 +750,7 @@ static const struct regulator_linear_range s2mps15_ldo_voltage_ranges3[] = {
+ 
+ /* voltage range for s2mps15 LDO 7, 8, 9 and 10 */
+ static const struct regulator_linear_range s2mps15_ldo_voltage_ranges4[] = {
+-	REGULATOR_LINEAR_RANGE(700000, 0xc, 0x18, 25000),
++	REGULATOR_LINEAR_RANGE(700000, 0x10, 0x20, 25000),
+ };
+ 
+ /* voltage range for s2mps15 LDO 1 */
+@@ -760,12 +760,12 @@ static const struct regulator_linear_range s2mps15_ldo_voltage_ranges5[] = {
+ 
+ /* voltage range for s2mps15 BUCK 1, 2, 3, 4, 5, 6 and 7 */
+ static const struct regulator_linear_range s2mps15_buck_voltage_ranges1[] = {
+-	REGULATOR_LINEAR_RANGE(500000, 0x20, 0xb0, 6250),
++	REGULATOR_LINEAR_RANGE(500000, 0x20, 0xc0, 6250),
+ };
+ 
+ /* voltage range for s2mps15 BUCK 8, 9 and 10 */
+ static const struct regulator_linear_range s2mps15_buck_voltage_ranges2[] = {
+-	REGULATOR_LINEAR_RANGE(1000000, 0x20, 0xc0, 12500),
++	REGULATOR_LINEAR_RANGE(1000000, 0x20, 0x78, 12500),
+ };
+ 
+ static const struct regulator_desc s2mps15_regulators[] = {
+diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
+index db3958b..fe0539e 100644
+--- a/drivers/remoteproc/remoteproc_core.c
++++ b/drivers/remoteproc/remoteproc_core.c
+@@ -1264,11 +1264,6 @@ int rproc_add(struct rproc *rproc)
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	/* expose to rproc_get_by_phandle users */
+-	mutex_lock(&rproc_list_mutex);
+-	list_add(&rproc->node, &rproc_list);
+-	mutex_unlock(&rproc_list_mutex);
+-
+ 	dev_info(dev, "%s is available\n", rproc->name);
+ 
+ 	dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n");
+@@ -1276,8 +1271,16 @@ int rproc_add(struct rproc *rproc)
+ 
+ 	/* create debugfs entries */
+ 	rproc_create_debug_dir(rproc);
++	ret = rproc_add_virtio_devices(rproc);
++	if (ret < 0)
++		return ret;
+ 
+-	return rproc_add_virtio_devices(rproc);
++	/* expose to rproc_get_by_phandle users */
++	mutex_lock(&rproc_list_mutex);
++	list_add(&rproc->node, &rproc_list);
++	mutex_unlock(&rproc_list_mutex);
++
++	return 0;
+ }
+ EXPORT_SYMBOL(rproc_add);
+ 
+diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
+index d01ad7e..4e823c4 100644
+--- a/drivers/rtc/rtc-s3c.c
++++ b/drivers/rtc/rtc-s3c.c
+@@ -149,12 +149,14 @@ static int s3c_rtc_setfreq(struct s3c_rtc *info, int freq)
+ 	if (!is_power_of_2(freq))
+ 		return -EINVAL;
+ 
++	s3c_rtc_enable_clk(info);
+ 	spin_lock_irq(&info->pie_lock);
+ 
+ 	if (info->data->set_freq)
+ 		info->data->set_freq(info, freq);
+ 
+ 	spin_unlock_irq(&info->pie_lock);
++	s3c_rtc_disable_clk(info);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
+index b2afad5..2a34eb5 100644
+--- a/drivers/s390/cio/cmf.c
++++ b/drivers/s390/cio/cmf.c
+@@ -753,6 +753,17 @@ static void reset_cmb(struct ccw_device *cdev)
+ 	cmf_generic_reset(cdev);
+ }
+ 
++static int cmf_enabled(struct ccw_device *cdev)
++{
++	int enabled;
++
++	spin_lock_irq(cdev->ccwlock);
++	enabled = !!cdev->private->cmb;
++	spin_unlock_irq(cdev->ccwlock);
++
++	return enabled;
++}
++
+ static struct attribute_group cmf_attr_group;
+ 
+ static struct cmb_operations cmbops_basic = {
+@@ -1153,13 +1164,8 @@ static ssize_t cmb_enable_show(struct device *dev,
+ 			       char *buf)
+ {
+ 	struct ccw_device *cdev = to_ccwdev(dev);
+-	int enabled;
+ 
+-	spin_lock_irq(cdev->ccwlock);
+-	enabled = !!cdev->private->cmb;
+-	spin_unlock_irq(cdev->ccwlock);
+-
+-	return sprintf(buf, "%d\n", enabled);
++	return sprintf(buf, "%d\n", cmf_enabled(cdev));
+ }
+ 
+ static ssize_t cmb_enable_store(struct device *dev,
+@@ -1199,15 +1205,20 @@ int ccw_set_cmf(struct ccw_device *cdev, int enable)
+  *  @cdev:	The ccw device to be enabled
+  *
+  *  Returns %0 for success or a negative error value.
+- *
++ *  Note: If this is called on a device for which channel measurement is already
++ *	  enabled a reset of the measurement data is triggered.
+  *  Context:
+  *    non-atomic
+  */
+ int enable_cmf(struct ccw_device *cdev)
+ {
+-	int ret;
++	int ret = 0;
+ 
+ 	device_lock(&cdev->dev);
++	if (cmf_enabled(cdev)) {
++		cmbops->reset(cdev);
++		goto out_unlock;
++	}
+ 	get_device(&cdev->dev);
+ 	ret = cmbops->alloc(cdev);
+ 	if (ret)
+@@ -1226,7 +1237,7 @@ int enable_cmf(struct ccw_device *cdev)
+ out:
+ 	if (ret)
+ 		put_device(&cdev->dev);
+-
++out_unlock:
+ 	device_unlock(&cdev->dev);
+ 	return ret;
+ }
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
+index 3bd0be6..c7e5695 100644
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -3874,7 +3874,7 @@ int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
+ 	uint32_t tag;
+ 	uint16_t hwq;
+ 
+-	if (shost_use_blk_mq(cmnd->device->host)) {
++	if (cmnd && shost_use_blk_mq(cmnd->device->host)) {
+ 		tag = blk_mq_unique_tag(cmnd->request);
+ 		hwq = blk_mq_unique_tag_to_hwq(tag);
+ 
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index f4b0690..2dab3dc 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -4079,6 +4079,12 @@ megasas_get_pd_list(struct megasas_instance *instance)
+ 	struct MR_PD_ADDRESS *pd_addr;
+ 	dma_addr_t ci_h = 0;
+ 
++	if (instance->pd_list_not_supported) {
++		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
++		"not supported by firmware\n");
++		return ret;
++	}
++
+ 	cmd = megasas_get_cmd(instance);
+ 
+ 	if (!cmd) {
+diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
+index f1eed7f..9c2788b 100644
+--- a/drivers/soc/qcom/smp2p.c
++++ b/drivers/soc/qcom/smp2p.c
+@@ -344,11 +344,12 @@ static int qcom_smp2p_outbound_entry(struct qcom_smp2p *smp2p,
+ 	/* Allocate an entry from the smem item */
+ 	strlcpy(buf, entry->name, SMP2P_MAX_ENTRY_NAME);
+ 	memcpy_toio(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME);
+-	out->valid_entries++;
+ 
+ 	/* Make the logical entry reference the physical value */
+ 	entry->value = &out->entries[out->valid_entries].value;
+ 
++	out->valid_entries++;
++
+ 	entry->state = qcom_smem_state_register(node, &smp2p_state_ops, entry);
+ 	if (IS_ERR(entry->state)) {
+ 		dev_err(smp2p->dev, "failed to register qcom_smem_state\n");
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index fe07c05..daf2844 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -585,7 +585,14 @@ static void reset_sccr1(struct driver_data *drv_data)
+ 	u32 sccr1_reg;
+ 
+ 	sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
+-	sccr1_reg &= ~SSCR1_RFT;
++	switch (drv_data->ssp_type) {
++	case QUARK_X1000_SSP:
++		sccr1_reg &= ~QUARK_X1000_SSCR1_RFT;
++		break;
++	default:
++		sccr1_reg &= ~SSCR1_RFT;
++		break;
++	}
+ 	sccr1_reg |= chip->threshold;
+ 	pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
+ }
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 50f3d3a..39b928c 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -492,7 +492,8 @@ void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+ 	bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD);
+ 
+ 	spin_lock_bh(&conn->cmd_lock);
+-	if (!list_empty(&cmd->i_conn_node))
++	if (!list_empty(&cmd->i_conn_node) &&
++	    !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP))
+ 		list_del_init(&cmd->i_conn_node);
+ 	spin_unlock_bh(&conn->cmd_lock);
+ 
+@@ -4034,6 +4035,7 @@ int iscsi_target_rx_thread(void *arg)
+ 
+ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
+ {
++	LIST_HEAD(tmp_list);
+ 	struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
+ 	struct iscsi_session *sess = conn->sess;
+ 	/*
+@@ -4042,18 +4044,26 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
+ 	 * has been reset -> returned sleeping pre-handler state.
+ 	 */
+ 	spin_lock_bh(&conn->cmd_lock);
+-	list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
++	list_splice_init(&conn->conn_cmd_list, &tmp_list);
+ 
++	list_for_each_entry(cmd, &tmp_list, i_conn_node) {
++		struct se_cmd *se_cmd = &cmd->se_cmd;
++
++		if (se_cmd->se_tfo != NULL) {
++			spin_lock(&se_cmd->t_state_lock);
++			se_cmd->transport_state |= CMD_T_FABRIC_STOP;
++			spin_unlock(&se_cmd->t_state_lock);
++		}
++	}
++	spin_unlock_bh(&conn->cmd_lock);
++
++	list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
+ 		list_del_init(&cmd->i_conn_node);
+-		spin_unlock_bh(&conn->cmd_lock);
+ 
+ 		iscsit_increment_maxcmdsn(cmd, sess);
+-
+ 		iscsit_free_cmd(cmd, true);
+ 
+-		spin_lock_bh(&conn->cmd_lock);
+ 	}
+-	spin_unlock_bh(&conn->cmd_lock);
+ }
+ 
+ static void iscsit_stop_timers_for_cmds(
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index b5212f0..adf419f 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -1371,8 +1371,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ 	}
+ 	login->zero_tsih = zero_tsih;
+ 
+-	conn->sess->se_sess->sup_prot_ops =
+-		conn->conn_transport->iscsit_get_sup_prot_ops(conn);
++	if (conn->sess)
++		conn->sess->se_sess->sup_prot_ops =
++			conn->conn_transport->iscsit_get_sup_prot_ops(conn);
+ 
+ 	tpg = conn->tpg;
+ 	if (!tpg) {
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index a4046ca..6b42348 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -821,13 +821,15 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
+  * in ATA and we need to set TPE=1
+  */
+ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
+-				       struct request_queue *q, int block_size)
++				       struct request_queue *q)
+ {
++	int block_size = queue_logical_block_size(q);
++
+ 	if (!blk_queue_discard(q))
+ 		return false;
+ 
+-	attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) /
+-								block_size;
++	attrib->max_unmap_lba_count =
++		q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
+ 	/*
+ 	 * Currently hardcoded to 1 in Linux/SCSI code..
+ 	 */
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index 75f0f08..7929186 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -161,8 +161,7 @@ static int fd_configure_device(struct se_device *dev)
+ 			dev_size, div_u64(dev_size, fd_dev->fd_block_size),
+ 			fd_dev->fd_block_size);
+ 
+-		if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
+-						      fd_dev->fd_block_size))
++		if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
+ 			pr_debug("IFILE: BLOCK Discard support available,"
+ 				 " disabled by default\n");
+ 		/*
+diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
+index 7c4efb4..2077bc2 100644
+--- a/drivers/target/target_core_iblock.c
++++ b/drivers/target/target_core_iblock.c
+@@ -121,8 +121,7 @@ static int iblock_configure_device(struct se_device *dev)
+ 	dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
+ 	dev->dev_attrib.hw_queue_depth = q->nr_requests;
+ 
+-	if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
+-					      dev->dev_attrib.hw_block_size))
++	if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
+ 		pr_debug("IBLOCK: BLOCK Discard support available,"
+ 			 " disabled by default\n");
+ 
+diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
+index fc91e85..e2c970a 100644
+--- a/drivers/target/target_core_internal.h
++++ b/drivers/target/target_core_internal.h
+@@ -146,6 +146,7 @@ sense_reason_t	target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
+ void	target_qf_do_work(struct work_struct *work);
+ bool	target_check_wce(struct se_device *dev);
+ bool	target_check_fua(struct se_device *dev);
++void	__target_execute_cmd(struct se_cmd *, bool);
+ 
+ /* target_core_stat.c */
+ void	target_stat_setup_dev_default_groups(struct se_device *);
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index a9057aa..04f616b 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -602,7 +602,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
+ 	cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
+ 	spin_unlock_irq(&cmd->t_state_lock);
+ 
+-	__target_execute_cmd(cmd);
++	__target_execute_cmd(cmd, false);
+ 
+ 	kfree(buf);
+ 	return ret;
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 5ab3967..42c2a44 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1303,23 +1303,6 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
+ 
+ 	trace_target_sequencer_start(cmd);
+ 
+-	/*
+-	 * Check for an existing UNIT ATTENTION condition
+-	 */
+-	ret = target_scsi3_ua_check(cmd);
+-	if (ret)
+-		return ret;
+-
+-	ret = target_alua_state_check(cmd);
+-	if (ret)
+-		return ret;
+-
+-	ret = target_check_reservation(cmd);
+-	if (ret) {
+-		cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
+-		return ret;
+-	}
+-
+ 	ret = dev->transport->parse_cdb(cmd);
+ 	if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
+ 		pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
+@@ -1761,20 +1744,45 @@ queue_full:
+ }
+ EXPORT_SYMBOL(transport_generic_request_failure);
+ 
+-void __target_execute_cmd(struct se_cmd *cmd)
++void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
+ {
+ 	sense_reason_t ret;
+ 
+-	if (cmd->execute_cmd) {
+-		ret = cmd->execute_cmd(cmd);
+-		if (ret) {
+-			spin_lock_irq(&cmd->t_state_lock);
+-			cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
+-			spin_unlock_irq(&cmd->t_state_lock);
++	if (!cmd->execute_cmd) {
++		ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
++		goto err;
++	}
++	if (do_checks) {
++		/*
++		 * Check for an existing UNIT ATTENTION condition after
++		 * target_handle_task_attr() has done SAM task attr
++		 * checking, and possibly have already defered execution
++		 * out to target_restart_delayed_cmds() context.
++		 */
++		ret = target_scsi3_ua_check(cmd);
++		if (ret)
++			goto err;
++
++		ret = target_alua_state_check(cmd);
++		if (ret)
++			goto err;
+ 
+-			transport_generic_request_failure(cmd, ret);
++		ret = target_check_reservation(cmd);
++		if (ret) {
++			cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
++			goto err;
+ 		}
+ 	}
++
++	ret = cmd->execute_cmd(cmd);
++	if (!ret)
++		return;
++err:
++	spin_lock_irq(&cmd->t_state_lock);
++	cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
++	spin_unlock_irq(&cmd->t_state_lock);
++
++	transport_generic_request_failure(cmd, ret);
+ }
+ 
+ static int target_write_prot_action(struct se_cmd *cmd)
+@@ -1819,6 +1827,8 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
+ 	if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+ 		return false;
+ 
++	cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
++
+ 	/*
+ 	 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
+ 	 * to allow the passed struct se_cmd list of tasks to the front of the list.
+@@ -1899,7 +1909,7 @@ void target_execute_cmd(struct se_cmd *cmd)
+ 		return;
+ 	}
+ 
+-	__target_execute_cmd(cmd);
++	__target_execute_cmd(cmd, true);
+ }
+ EXPORT_SYMBOL(target_execute_cmd);
+ 
+@@ -1923,7 +1933,7 @@ static void target_restart_delayed_cmds(struct se_device *dev)
+ 		list_del(&cmd->se_delayed_node);
+ 		spin_unlock(&dev->delayed_cmd_lock);
+ 
+-		__target_execute_cmd(cmd);
++		__target_execute_cmd(cmd, true);
+ 
+ 		if (cmd->sam_task_attr == TCM_ORDERED_TAG)
+ 			break;
+@@ -1941,6 +1951,9 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
+ 	if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+ 		return;
+ 
++	if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
++		goto restart;
++
+ 	if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
+ 		atomic_dec_mb(&dev->simple_cmds);
+ 		dev->dev_cur_ordered_id++;
+@@ -1957,7 +1970,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
+ 		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
+ 			 dev->dev_cur_ordered_id);
+ 	}
+-
++restart:
+ 	target_restart_delayed_cmds(dev);
+ }
+ 
+@@ -2557,15 +2570,10 @@ static void target_release_cmd_kref(struct kref *kref)
+ 	bool fabric_stop;
+ 
+ 	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+-	if (list_empty(&se_cmd->se_cmd_list)) {
+-		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+-		target_free_cmd_mem(se_cmd);
+-		se_cmd->se_tfo->release_cmd(se_cmd);
+-		return;
+-	}
+ 
+ 	spin_lock(&se_cmd->t_state_lock);
+-	fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
++	fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) &&
++		      (se_cmd->transport_state & CMD_T_ABORTED);
+ 	spin_unlock(&se_cmd->t_state_lock);
+ 
+ 	if (se_cmd->cmd_wait_set || fabric_stop) {
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 954941d..f9c798c 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -482,19 +482,21 @@ static void atmel_start_tx(struct uart_port *port)
+ {
+ 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ 
+-	if (atmel_use_pdc_tx(port)) {
+-		if (atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN)
+-			/* The transmitter is already running.  Yes, we
+-			   really need this.*/
+-			return;
++	if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR)
++				       & ATMEL_PDC_TXTEN))
++		/* The transmitter is already running.  Yes, we
++		   really need this.*/
++		return;
+ 
++	if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
+ 		if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ 		    !(port->rs485.flags & SER_RS485_RX_DURING_TX))
+ 			atmel_stop_rx(port);
+ 
++	if (atmel_use_pdc_tx(port))
+ 		/* re-enable PDC transmit */
+ 		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
+-	}
++
+ 	/* Enable interrupts */
+ 	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
+ }
+diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
+index b7d80bd..7d62610 100644
+--- a/drivers/tty/serial/msm_serial.c
++++ b/drivers/tty/serial/msm_serial.c
+@@ -726,7 +726,7 @@ static void msm_handle_tx(struct uart_port *port)
+ 		return;
+ 	}
+ 
+-	pio_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE);
++	pio_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ 	dma_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ 
+ 	dma_min = 1;	/* Always DMA */
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
+index 99bb231..f0bd2ec 100644
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -1684,7 +1684,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
+ 		return -ENODEV;
+ 
+ 	if (port->mapbase != 0)
+-		return 0;
++		return -EINVAL;
+ 
+ 	/* setup info for port */
+ 	port->dev	= &platdev->dev;
+@@ -1738,22 +1738,25 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
+ 		ourport->dma = devm_kzalloc(port->dev,
+ 					    sizeof(*ourport->dma),
+ 					    GFP_KERNEL);
+-		if (!ourport->dma)
+-			return -ENOMEM;
++		if (!ourport->dma) {
++			ret = -ENOMEM;
++			goto err;
++		}
+ 	}
+ 
+ 	ourport->clk	= clk_get(&platdev->dev, "uart");
+ 	if (IS_ERR(ourport->clk)) {
+ 		pr_err("%s: Controller clock not found\n",
+ 				dev_name(&platdev->dev));
+-		return PTR_ERR(ourport->clk);
++		ret = PTR_ERR(ourport->clk);
++		goto err;
+ 	}
+ 
+ 	ret = clk_prepare_enable(ourport->clk);
+ 	if (ret) {
+ 		pr_err("uart: clock failed to prepare+enable: %d\n", ret);
+ 		clk_put(ourport->clk);
+-		return ret;
++		goto err;
+ 	}
+ 
+ 	/* Keep all interrupts masked and cleared */
+@@ -1769,7 +1772,12 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
+ 
+ 	/* reset the fifos (and setup the uart) */
+ 	s3c24xx_serial_resetport(port, cfg);
++
+ 	return 0;
++
++err:
++	port->mapbase = 0;
++	return ret;
+ }
+ 
+ /* Device driver serial port probe */
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 944a6dc..d2e50a2 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -128,6 +128,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x04f3, 0x016f), .driver_info =
+ 			USB_QUIRK_DEVICE_QUALIFIER },
+ 
++	{ USB_DEVICE(0x04f3, 0x0381), .driver_info =
++			USB_QUIRK_NO_LPM },
++
+ 	{ USB_DEVICE(0x04f3, 0x21b8), .driver_info =
+ 			USB_QUIRK_DEVICE_QUALIFIER },
+ 
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 07248ff..716f4f0 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -258,11 +258,13 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
+ 	 * We will also set SUSPHY bit to what it was before returning as stated
+ 	 * by the same section on Synopsys databook.
+ 	 */
+-	reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+-	if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
+-		susphy = true;
+-		reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+-		dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
++	if (dwc->gadget.speed <= USB_SPEED_HIGH) {
++		reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
++		if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
++			susphy = true;
++			reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
++			dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
++		}
+ 	}
+ 
+ 	if (cmd == DWC3_DEPCMD_STARTTRANSFER) {
+@@ -2023,6 +2025,10 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
+ 		return 1;
+ 	}
+ 
++	if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
++		if ((event->status & DEPEVT_STATUS_IOC) &&
++				(trb->ctrl & DWC3_TRB_CTRL_IOC))
++			return 0;
+ 	return 1;
+ }
+ 
+diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
+index 18569de..bb1f6c8 100644
+--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
++++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
+@@ -1920,6 +1920,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
+ 
+ 	udc->errata = match->data;
+ 	udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9g45-pmc");
++	if (IS_ERR(udc->pmc))
++		udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9x5-pmc");
+ 	if (udc->errata && IS_ERR(udc->pmc))
+ 		return ERR_CAST(udc->pmc);
+ 
+diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
+index ebc51ec..7175142 100644
+--- a/drivers/usb/gadget/udc/pch_udc.c
++++ b/drivers/usb/gadget/udc/pch_udc.c
+@@ -1477,11 +1477,11 @@ static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
+ 		req->dma_mapped = 0;
+ 	}
+ 	ep->halted = 1;
+-	spin_lock(&dev->lock);
++	spin_unlock(&dev->lock);
+ 	if (!ep->in)
+ 		pch_udc_ep_clear_rrdy(ep);
+ 	usb_gadget_giveback_request(&ep->ep, &req->req);
+-	spin_unlock(&dev->lock);
++	spin_lock(&dev->lock);
+ 	ep->halted = halted;
+ }
+ 
+@@ -2573,9 +2573,9 @@ static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
+ 		empty_req_queue(ep);
+ 	}
+ 	if (dev->driver) {
+-		spin_lock(&dev->lock);
+-		usb_gadget_udc_reset(&dev->gadget, dev->driver);
+ 		spin_unlock(&dev->lock);
++		usb_gadget_udc_reset(&dev->gadget, dev->driver);
++		spin_lock(&dev->lock);
+ 	}
+ }
+ 
+@@ -2654,9 +2654,9 @@ static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
+ 		dev->ep[i].halted = 0;
+ 	}
+ 	dev->stall = 0;
+-	spin_lock(&dev->lock);
+-	dev->driver->setup(&dev->gadget, &dev->setup_data);
+ 	spin_unlock(&dev->lock);
++	dev->driver->setup(&dev->gadget, &dev->setup_data);
++	spin_lock(&dev->lock);
+ }
+ 
+ /**
+@@ -2691,9 +2691,9 @@ static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
+ 	dev->stall = 0;
+ 
+ 	/* call gadget zero with setup data received */
+-	spin_lock(&dev->lock);
+-	dev->driver->setup(&dev->gadget, &dev->setup_data);
+ 	spin_unlock(&dev->lock);
++	dev->driver->setup(&dev->gadget, &dev->setup_data);
++	spin_lock(&dev->lock);
+ }
+ 
+ /**
+diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
+index 7be4e7d..280ed5f 100644
+--- a/drivers/usb/renesas_usbhs/fifo.c
++++ b/drivers/usb/renesas_usbhs/fifo.c
+@@ -810,20 +810,27 @@ static void xfer_work(struct work_struct *work)
+ {
+ 	struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
+ 	struct usbhs_pipe *pipe = pkt->pipe;
+-	struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
++	struct usbhs_fifo *fifo;
+ 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ 	struct dma_async_tx_descriptor *desc;
+-	struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
++	struct dma_chan *chan;
+ 	struct device *dev = usbhs_priv_to_dev(priv);
+ 	enum dma_transfer_direction dir;
++	unsigned long flags;
+ 
++	usbhs_lock(priv, flags);
++	fifo = usbhs_pipe_to_fifo(pipe);
++	if (!fifo)
++		goto xfer_work_end;
++
++	chan = usbhsf_dma_chan_get(fifo, pkt);
+ 	dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+ 
+ 	desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual,
+ 					pkt->trans, dir,
+ 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ 	if (!desc)
+-		return;
++		goto xfer_work_end;
+ 
+ 	desc->callback		= usbhsf_dma_complete;
+ 	desc->callback_param	= pipe;
+@@ -831,7 +838,7 @@ static void xfer_work(struct work_struct *work)
+ 	pkt->cookie = dmaengine_submit(desc);
+ 	if (pkt->cookie < 0) {
+ 		dev_err(dev, "Failed to submit dma descriptor\n");
+-		return;
++		goto xfer_work_end;
+ 	}
+ 
+ 	dev_dbg(dev, "  %s %d (%d/ %d)\n",
+@@ -842,6 +849,9 @@ static void xfer_work(struct work_struct *work)
+ 	usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
+ 	dma_async_issue_pending(chan);
+ 	usbhs_pipe_enable(pipe);
++
++xfer_work_end:
++	usbhs_unlock(priv, flags);
+ }
+ 
+ /*
+diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
+index 30345c2..50f3363 100644
+--- a/drivers/usb/renesas_usbhs/mod_gadget.c
++++ b/drivers/usb/renesas_usbhs/mod_gadget.c
+@@ -585,6 +585,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
+ 	struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
+ 	struct usbhs_pipe *pipe;
+ 	int ret = -EIO;
++	unsigned long flags;
++
++	usbhs_lock(priv, flags);
+ 
+ 	/*
+ 	 * if it already have pipe,
+@@ -593,7 +596,8 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
+ 	if (uep->pipe) {
+ 		usbhs_pipe_clear(uep->pipe);
+ 		usbhs_pipe_sequence_data0(uep->pipe);
+-		return 0;
++		ret = 0;
++		goto usbhsg_ep_enable_end;
+ 	}
+ 
+ 	pipe = usbhs_pipe_malloc(priv,
+@@ -621,6 +625,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
+ 		ret = 0;
+ 	}
+ 
++usbhsg_ep_enable_end:
++	usbhs_unlock(priv, flags);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index d96d423..8e07536 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -273,6 +273,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_LE922_USBCFG5		0x1045
+ #define TELIT_PRODUCT_LE920			0x1200
+ #define TELIT_PRODUCT_LE910			0x1201
++#define TELIT_PRODUCT_LE910_USBCFG4		0x1206
+ 
+ /* ZTE PRODUCTS */
+ #define ZTE_VENDOR_ID				0x19d2
+@@ -1198,6 +1199,8 @@ static const struct usb_device_id option_ids[] = {
+ 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
+ 		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
++		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+ 		.driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
+index 476c0e3..f6ea8f4 100644
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -202,6 +202,8 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
+ 	num = min(num, ARRAY_SIZE(vb->pfns));
+ 
+ 	mutex_lock(&vb->balloon_lock);
++	/* We can't release more pages than taken */
++	num = min(num, (size_t)vb->num_pages);
+ 	for (vb->num_pfns = 0; vb->num_pfns < num;
+ 	     vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
+ 		page = balloon_page_dequeue(vb_dev_info);
+diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
+index a2eec97..bb09de6 100644
+--- a/drivers/w1/masters/omap_hdq.c
++++ b/drivers/w1/masters/omap_hdq.c
+@@ -390,8 +390,6 @@ static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
+ 		goto out;
+ 	}
+ 
+-	hdq_data->hdq_irqstatus = 0;
+-
+ 	if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
+ 		hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
+ 			OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 75533ad..92fe3f8 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -2696,12 +2696,6 @@ struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
+ 		btrfs_bio->csum = NULL;
+ 		btrfs_bio->csum_allocated = NULL;
+ 		btrfs_bio->end_io = NULL;
+-
+-#ifdef CONFIG_BLK_CGROUP
+-		/* FIXME, put this into bio_clone_bioset */
+-		if (bio->bi_css)
+-			bio_associate_blkcg(new, bio->bi_css);
+-#endif
+ 	}
+ 	return new;
+ }
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 2234e88..b56887b 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1629,13 +1629,11 @@ again:
+ 		 * managed to copy.
+ 		 */
+ 		if (num_sectors > dirty_sectors) {
+-			/*
+-			 * we round down because we don't want to count
+-			 * any partial blocks actually sent through the
+-			 * IO machines
+-			 */
+-			release_bytes = round_down(release_bytes - copied,
+-				      root->sectorsize);
++
++			/* release everything except the sectors we dirtied */
++			release_bytes -= dirty_sectors <<
++				root->fs_info->sb->s_blocksize_bits;
++
+ 			if (copied > 0) {
+ 				spin_lock(&BTRFS_I(inode)->lock);
+ 				BTRFS_I(inode)->outstanding_extents++;
+diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
+index 4ae7500..3f7c2cd 100644
+--- a/fs/cachefiles/namei.c
++++ b/fs/cachefiles/namei.c
+@@ -263,6 +263,8 @@ requeue:
+ void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
+ 				     struct cachefiles_object *object)
+ {
++	blkcnt_t i_blocks = d_backing_inode(object->dentry)->i_blocks;
++
+ 	write_lock(&cache->active_lock);
+ 	rb_erase(&object->active_node, &cache->active_nodes);
+ 	clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
+@@ -273,8 +275,7 @@ void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
+ 	/* This object can now be culled, so we need to let the daemon know
+ 	 * that there is something it can remove if it needs to.
+ 	 */
+-	atomic_long_add(d_backing_inode(object->dentry)->i_blocks,
+-			&cache->b_released);
++	atomic_long_add(i_blocks, &cache->b_released);
+ 	if (atomic_inc_return(&cache->f_released))
+ 		cachefiles_state_changed(cache);
+ }
+diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
+index 3182273..1418daa 100644
+--- a/fs/cifs/cifs_fs_sb.h
++++ b/fs/cifs/cifs_fs_sb.h
+@@ -46,6 +46,9 @@
+ #define CIFS_MOUNT_CIFS_BACKUPUID 0x200000 /* backup intent bit for a user */
+ #define CIFS_MOUNT_CIFS_BACKUPGID 0x400000 /* backup intent bit for a group */
+ #define CIFS_MOUNT_MAP_SFM_CHR	0x800000 /* SFM/MAC mapping for illegal chars */
++#define CIFS_MOUNT_USE_PREFIX_PATH 0x1000000 /* make subpath with unaccessible
++					      * root mountable
++					      */
+ 
+ struct cifs_sb_info {
+ 	struct rb_root tlink_tree;
+@@ -67,5 +70,6 @@ struct cifs_sb_info {
+ 	struct backing_dev_info bdi;
+ 	struct delayed_work prune_tlinks;
+ 	struct rcu_head rcu;
++	char *prepath;
+ };
+ #endif				/* _CIFS_FS_SB_H */
+diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
+index 6aeb8d4..8347c90 100644
+--- a/fs/cifs/cifsencrypt.c
++++ b/fs/cifs/cifsencrypt.c
+@@ -743,24 +743,26 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 
+ 	memcpy(ses->auth_key.response + baselen, tiblob, tilen);
+ 
++	mutex_lock(&ses->server->srv_mutex);
++
+ 	rc = crypto_hmacmd5_alloc(ses->server);
+ 	if (rc) {
+ 		cifs_dbg(VFS, "could not crypto alloc hmacmd5 rc %d\n", rc);
+-		goto setup_ntlmv2_rsp_ret;
++		goto unlock;
+ 	}
+ 
+ 	/* calculate ntlmv2_hash */
+ 	rc = calc_ntlmv2_hash(ses, ntlmv2_hash, nls_cp);
+ 	if (rc) {
+ 		cifs_dbg(VFS, "could not get v2 hash rc %d\n", rc);
+-		goto setup_ntlmv2_rsp_ret;
++		goto unlock;
+ 	}
+ 
+ 	/* calculate first part of the client response (CR1) */
+ 	rc = CalcNTLMv2_response(ses, ntlmv2_hash);
+ 	if (rc) {
+ 		cifs_dbg(VFS, "Could not calculate CR1 rc: %d\n", rc);
+-		goto setup_ntlmv2_rsp_ret;
++		goto unlock;
+ 	}
+ 
+ 	/* now calculate the session key for NTLMv2 */
+@@ -769,13 +771,13 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 	if (rc) {
+ 		cifs_dbg(VFS, "%s: Could not set NTLMV2 Hash as a key\n",
+ 			 __func__);
+-		goto setup_ntlmv2_rsp_ret;
++		goto unlock;
+ 	}
+ 
+ 	rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash);
+ 	if (rc) {
+ 		cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
+-		goto setup_ntlmv2_rsp_ret;
++		goto unlock;
+ 	}
+ 
+ 	rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
+@@ -783,7 +785,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 		CIFS_HMAC_MD5_HASH_SIZE);
+ 	if (rc) {
+ 		cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
+-		goto setup_ntlmv2_rsp_ret;
++		goto unlock;
+ 	}
+ 
+ 	rc = crypto_shash_final(&ses->server->secmech.sdeschmacmd5->shash,
+@@ -791,6 +793,8 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 	if (rc)
+ 		cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
+ 
++unlock:
++	mutex_unlock(&ses->server->srv_mutex);
+ setup_ntlmv2_rsp_ret:
+ 	kfree(tiblob);
+ 
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 5d841f3..6bbec5e 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -689,6 +689,14 @@ cifs_do_mount(struct file_system_type *fs_type,
+ 		goto out_cifs_sb;
+ 	}
+ 
++	if (volume_info->prepath) {
++		cifs_sb->prepath = kstrdup(volume_info->prepath, GFP_KERNEL);
++		if (cifs_sb->prepath == NULL) {
++			root = ERR_PTR(-ENOMEM);
++			goto out_cifs_sb;
++		}
++	}
++
+ 	cifs_setup_cifs_sb(volume_info, cifs_sb);
+ 
+ 	rc = cifs_mount(cifs_sb, volume_info);
+@@ -727,7 +735,11 @@ cifs_do_mount(struct file_system_type *fs_type,
+ 		sb->s_flags |= MS_ACTIVE;
+ 	}
+ 
+-	root = cifs_get_root(volume_info, sb);
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
++		root = dget(sb->s_root);
++	else
++		root = cifs_get_root(volume_info, sb);
++
+ 	if (IS_ERR(root))
+ 		goto out_super;
+ 
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 7d2b15c..7ae0328 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -1228,6 +1228,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
+ 	vol->ops = &smb1_operations;
+ 	vol->vals = &smb1_values;
+ 
++	vol->echo_interval = SMB_ECHO_INTERVAL_DEFAULT;
++
+ 	if (!mountdata)
+ 		goto cifs_parse_mount_err;
+ 
+@@ -2049,7 +2051,7 @@ static int match_server(struct TCP_Server_Info *server, struct smb_vol *vol)
+ 	if (!match_security(server, vol))
+ 		return 0;
+ 
+-	if (server->echo_interval != vol->echo_interval)
++	if (server->echo_interval != vol->echo_interval * HZ)
+ 		return 0;
+ 
+ 	return 1;
+@@ -3483,6 +3485,44 @@ cifs_get_volume_info(char *mount_data, const char *devname)
+ 	return volume_info;
+ }
+ 
++static int
++cifs_are_all_path_components_accessible(struct TCP_Server_Info *server,
++					unsigned int xid,
++					struct cifs_tcon *tcon,
++					struct cifs_sb_info *cifs_sb,
++					char *full_path)
++{
++	int rc;
++	char *s;
++	char sep, tmp;
++
++	sep = CIFS_DIR_SEP(cifs_sb);
++	s = full_path;
++
++	rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, "");
++	while (rc == 0) {
++		/* skip separators */
++		while (*s == sep)
++			s++;
++		if (!*s)
++			break;
++		/* next separator */
++		while (*s && *s != sep)
++			s++;
++
++		/*
++		 * temporarily null-terminate the path at the end of
++		 * the current component
++		 */
++		tmp = *s;
++		*s = 0;
++		rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
++						     full_path);
++		*s = tmp;
++	}
++	return rc;
++}
++
+ int
+ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
+ {
+@@ -3620,6 +3660,16 @@ remote_path_check:
+ 			kfree(full_path);
+ 			goto mount_fail_check;
+ 		}
++
++		rc = cifs_are_all_path_components_accessible(server,
++							     xid, tcon, cifs_sb,
++							     full_path);
++		if (rc != 0) {
++			cifs_dbg(VFS, "cannot query dirs between root and final path, "
++				 "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
++			cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
++			rc = 0;
++		}
+ 		kfree(full_path);
+ 	}
+ 
+@@ -3889,6 +3939,7 @@ cifs_umount(struct cifs_sb_info *cifs_sb)
+ 
+ 	bdi_destroy(&cifs_sb->bdi);
+ 	kfree(cifs_sb->mountdata);
++	kfree(cifs_sb->prepath);
+ 	call_rcu(&cifs_sb->rcu, delayed_free);
+ }
+ 
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index fb0903f..6f7333d 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -84,6 +84,7 @@ build_path_from_dentry(struct dentry *direntry)
+ 	struct dentry *temp;
+ 	int namelen;
+ 	int dfsplen;
++	int pplen = 0;
+ 	char *full_path;
+ 	char dirsep;
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+@@ -95,8 +96,12 @@ build_path_from_dentry(struct dentry *direntry)
+ 		dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
+ 	else
+ 		dfsplen = 0;
++
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
++		pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0;
++
+ cifs_bp_rename_retry:
+-	namelen = dfsplen;
++	namelen = dfsplen + pplen;
+ 	seq = read_seqbegin(&rename_lock);
+ 	rcu_read_lock();
+ 	for (temp = direntry; !IS_ROOT(temp);) {
+@@ -137,7 +142,7 @@ cifs_bp_rename_retry:
+ 		}
+ 	}
+ 	rcu_read_unlock();
+-	if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) {
++	if (namelen != dfsplen + pplen || read_seqretry(&rename_lock, seq)) {
+ 		cifs_dbg(FYI, "did not end path lookup where expected. namelen=%ddfsplen=%d\n",
+ 			 namelen, dfsplen);
+ 		/* presumably this is only possible if racing with a rename
+@@ -153,6 +158,17 @@ cifs_bp_rename_retry:
+ 	   those safely to '/' if any are found in the middle of the prepath */
+ 	/* BB test paths to Windows with '/' in the midst of prepath */
+ 
++	if (pplen) {
++		int i;
++
++		cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath);
++		memcpy(full_path+dfsplen+1, cifs_sb->prepath, pplen-1);
++		full_path[dfsplen] = '\\';
++		for (i = 0; i < pplen-1; i++)
++			if (full_path[dfsplen+1+i] == '/')
++				full_path[dfsplen+1+i] = CIFS_DIR_SEP(cifs_sb);
++	}
++
+ 	if (dfsplen) {
+ 		strncpy(full_path, tcon->treeName, dfsplen);
+ 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
+@@ -229,6 +245,13 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
+ 				goto cifs_create_get_file_info;
+ 			}
+ 
++			if (S_ISDIR(newinode->i_mode)) {
++				CIFSSMBClose(xid, tcon, fid->netfid);
++				iput(newinode);
++				rc = -EISDIR;
++				goto out;
++			}
++
+ 			if (!S_ISREG(newinode->i_mode)) {
+ 				/*
+ 				 * The server may allow us to open things like
+@@ -399,10 +422,14 @@ cifs_create_set_dentry:
+ 	if (rc != 0) {
+ 		cifs_dbg(FYI, "Create worked, get_inode_info failed rc = %d\n",
+ 			 rc);
+-		if (server->ops->close)
+-			server->ops->close(xid, tcon, fid);
+-		goto out;
++		goto out_err;
+ 	}
++
++	if (S_ISDIR(newinode->i_mode)) {
++		rc = -EISDIR;
++		goto out_err;
++	}
++
+ 	d_drop(direntry);
+ 	d_add(direntry, newinode);
+ 
+@@ -410,6 +437,13 @@ out:
+ 	kfree(buf);
+ 	kfree(full_path);
+ 	return rc;
++
++out_err:
++	if (server->ops->close)
++		server->ops->close(xid, tcon, fid);
++	if (newinode)
++		iput(newinode);
++	goto out;
+ }
+ 
+ int
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index 514dadb..b87efd0 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -1002,10 +1002,26 @@ struct inode *cifs_root_iget(struct super_block *sb)
+ 	struct inode *inode = NULL;
+ 	long rc;
+ 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
++	char *path = NULL;
++	int len;
++
++	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
++	    && cifs_sb->prepath) {
++		len = strlen(cifs_sb->prepath);
++		path = kzalloc(len + 2 /* leading sep + null */, GFP_KERNEL);
++		if (path == NULL)
++			return ERR_PTR(-ENOMEM);
++		path[0] = '/';
++		memcpy(path+1, cifs_sb->prepath, len);
++	} else {
++		path = kstrdup("", GFP_KERNEL);
++		if (path == NULL)
++			return ERR_PTR(-ENOMEM);
++	}
+ 
+ 	xid = get_xid();
+ 	if (tcon->unix_ext) {
+-		rc = cifs_get_inode_info_unix(&inode, "", sb, xid);
++		rc = cifs_get_inode_info_unix(&inode, path, sb, xid);
+ 		/* some servers mistakenly claim POSIX support */
+ 		if (rc != -EOPNOTSUPP)
+ 			goto iget_no_retry;
+@@ -1013,7 +1029,8 @@ struct inode *cifs_root_iget(struct super_block *sb)
+ 		tcon->unix_ext = false;
+ 	}
+ 
+-	rc = cifs_get_inode_info(&inode, "", NULL, sb, xid, NULL);
++	convert_delimiter(path, CIFS_DIR_SEP(cifs_sb));
++	rc = cifs_get_inode_info(&inode, path, NULL, sb, xid, NULL);
+ 
+ iget_no_retry:
+ 	if (!inode) {
+@@ -1042,6 +1059,7 @@ iget_no_retry:
+ 	}
+ 
+ out:
++	kfree(path);
+ 	/* can not call macro free_xid here since in a void func
+ 	 * TODO: This is no longer true
+ 	 */
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 3525ed7..505e6d6 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -1044,6 +1044,9 @@ smb2_new_lease_key(struct cifs_fid *fid)
+ 	get_random_bytes(fid->lease_key, SMB2_LEASE_KEY_SIZE);
+ }
+ 
++#define SMB2_SYMLINK_STRUCT_SIZE \
++	(sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
++
+ static int
+ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 		   const char *full_path, char **target_path,
+@@ -1056,7 +1059,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 	struct cifs_fid fid;
+ 	struct smb2_err_rsp *err_buf = NULL;
+ 	struct smb2_symlink_err_rsp *symlink;
+-	unsigned int sub_len, sub_offset;
++	unsigned int sub_len;
++	unsigned int sub_offset;
++	unsigned int print_len;
++	unsigned int print_offset;
+ 
+ 	cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
+ 
+@@ -1077,11 +1083,33 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 		kfree(utf16_path);
+ 		return -ENOENT;
+ 	}
++
++	if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
++	    get_rfc1002_length(err_buf) + 4 < SMB2_SYMLINK_STRUCT_SIZE) {
++		kfree(utf16_path);
++		return -ENOENT;
++	}
++
+ 	/* open must fail on symlink - reset rc */
+ 	rc = 0;
+ 	symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
+ 	sub_len = le16_to_cpu(symlink->SubstituteNameLength);
+ 	sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
++	print_len = le16_to_cpu(symlink->PrintNameLength);
++	print_offset = le16_to_cpu(symlink->PrintNameOffset);
++
++	if (get_rfc1002_length(err_buf) + 4 <
++			SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
++		kfree(utf16_path);
++		return -ENOENT;
++	}
++
++	if (get_rfc1002_length(err_buf) + 4 <
++			SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
++		kfree(utf16_path);
++		return -ENOENT;
++	}
++
+ 	*target_path = cifs_strndup_from_utf16(
+ 				(char *)symlink->PathBuffer + sub_offset,
+ 				sub_len, true, cifs_sb->local_nls);
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index 7007809..78313ad 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -124,7 +124,7 @@ static int journal_submit_commit_record(journal_t *journal,
+ 	struct commit_header *tmp;
+ 	struct buffer_head *bh;
+ 	int ret;
+-	struct timespec now = current_kernel_time();
++	struct timespec64 now = current_kernel_time64();
+ 
+ 	*cbh = NULL;
+ 
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index ff416d0..7796bea 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -427,6 +427,7 @@ static int nfs4_do_handle_exception(struct nfs_server *server,
+ 		case -NFS4ERR_DELAY:
+ 			nfs_inc_server_stats(server, NFSIOS_DELAY);
+ 		case -NFS4ERR_GRACE:
++		case -NFS4ERR_LAYOUTTRYLATER:
+ 		case -NFS4ERR_RECALLCONFLICT:
+ 			exception->delay = 1;
+ 			return 0;
+@@ -7869,11 +7870,13 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
+ 	struct inode *inode = lgp->args.inode;
+ 	struct nfs_server *server = NFS_SERVER(inode);
+ 	struct pnfs_layout_hdr *lo;
+-	int status = task->tk_status;
++	int nfs4err = task->tk_status;
++	int err, status = 0;
++	LIST_HEAD(head);
+ 
+ 	dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
+ 
+-	switch (status) {
++	switch (nfs4err) {
+ 	case 0:
+ 		goto out;
+ 
+@@ -7905,45 +7908,43 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
+ 			status = -EOVERFLOW;
+ 			goto out;
+ 		}
+-		/* Fallthrough */
++		status = -EBUSY;
++		break;
+ 	case -NFS4ERR_RECALLCONFLICT:
+-		nfs4_handle_exception(server, -NFS4ERR_RECALLCONFLICT,
+-					exception);
+ 		status = -ERECALLCONFLICT;
+-		goto out;
++		break;
+ 	case -NFS4ERR_EXPIRED:
+ 	case -NFS4ERR_BAD_STATEID:
+ 		exception->timeout = 0;
+ 		spin_lock(&inode->i_lock);
+-		if (nfs4_stateid_match(&lgp->args.stateid,
++		lo = NFS_I(inode)->layout;
++		/* If the open stateid was bad, then recover it. */
++		if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
++		    nfs4_stateid_match_other(&lgp->args.stateid,
+ 					&lgp->args.ctx->state->stateid)) {
+ 			spin_unlock(&inode->i_lock);
+-			/* If the open stateid was bad, then recover it. */
+ 			exception->state = lgp->args.ctx->state;
+ 			break;
+ 		}
+-		lo = NFS_I(inode)->layout;
+-		if (lo && !test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) &&
+-		    nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
+-			LIST_HEAD(head);
+-
+-			/*
+-			 * Mark the bad layout state as invalid, then retry
+-			 * with the current stateid.
+-			 */
+-			set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
+-			pnfs_mark_matching_lsegs_invalid(lo, &head, NULL, 0);
+-			spin_unlock(&inode->i_lock);
+-			pnfs_free_lseg_list(&head);
+-			status = -EAGAIN;
+-			goto out;
+-		} else
+-			spin_unlock(&inode->i_lock);
+-	}
+ 
+-	status = nfs4_handle_exception(server, status, exception);
+-	if (exception->retry)
++		/*
++		 * Mark the bad layout state as invalid, then retry
++		 */
++		set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
++		pnfs_mark_matching_lsegs_invalid(lo, &head, NULL, 0);
++		spin_unlock(&inode->i_lock);
++		pnfs_free_lseg_list(&head);
+ 		status = -EAGAIN;
++		goto out;
++	}
++
++	err = nfs4_handle_exception(server, nfs4err, exception);
++	if (!status) {
++		if (exception->retry)
++			status = -EAGAIN;
++		else
++			status = err;
++	}
+ out:
+ 	dprintk("<-- %s\n", __func__);
+ 	return status;
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 0fbe734..7d99236 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1505,7 +1505,7 @@ pnfs_update_layout(struct inode *ino,
+ 	struct pnfs_layout_segment *lseg = NULL;
+ 	nfs4_stateid stateid;
+ 	long timeout = 0;
+-	unsigned long giveup = jiffies + rpc_get_timeout(server->client);
++	unsigned long giveup = jiffies + (clp->cl_lease_time << 1);
+ 	bool first;
+ 
+ 	if (!pnfs_enabled_sb(NFS_SERVER(ino))) {
+@@ -1645,33 +1645,44 @@ lookup_again:
+ 	lseg = send_layoutget(lo, ctx, &stateid, &arg, &timeout, gfp_flags);
+ 	trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
+ 				 PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
++	atomic_dec(&lo->plh_outstanding);
+ 	if (IS_ERR(lseg)) {
+ 		switch(PTR_ERR(lseg)) {
+-		case -ERECALLCONFLICT:
++		case -EBUSY:
+ 			if (time_after(jiffies, giveup))
+ 				lseg = NULL;
+-			/* Fallthrough */
+-		case -EAGAIN:
+-			pnfs_put_layout_hdr(lo);
+-			if (first)
+-				pnfs_clear_first_layoutget(lo);
+-			if (lseg) {
+-				trace_pnfs_update_layout(ino, pos, count,
+-					iomode, lo, lseg, PNFS_UPDATE_LAYOUT_RETRY);
+-				goto lookup_again;
++			break;
++		case -ERECALLCONFLICT:
++			/* Huh? We hold no layouts, how is there a recall? */
++			if (first) {
++				lseg = NULL;
++				break;
+ 			}
++			/* Destroy the existing layout and start over */
++			if (time_after(jiffies, giveup))
++				pnfs_destroy_layout(NFS_I(ino));
+ 			/* Fallthrough */
++		case -EAGAIN:
++			break;
+ 		default:
+ 			if (!nfs_error_is_fatal(PTR_ERR(lseg))) {
+ 				pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
+ 				lseg = NULL;
+ 			}
++			goto out_put_layout_hdr;
++		}
++		if (lseg) {
++			if (first)
++				pnfs_clear_first_layoutget(lo);
++			trace_pnfs_update_layout(ino, pos, count,
++				iomode, lo, lseg, PNFS_UPDATE_LAYOUT_RETRY);
++			pnfs_put_layout_hdr(lo);
++			goto lookup_again;
+ 		}
+ 	} else {
+ 		pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
+ 	}
+ 
+-	atomic_dec(&lo->plh_outstanding);
+ out_put_layout_hdr:
+ 	if (first)
+ 		pnfs_clear_first_layoutget(lo);
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index e1c74d3..649fa5e 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -1289,6 +1289,9 @@ int nfs_updatepage(struct file *file, struct page *page,
+ 	dprintk("NFS:       nfs_updatepage(%pD2 %d@%lld)\n",
+ 		file, count, (long long)(page_file_offset(page) + offset));
+ 
++	if (!count)
++		goto out;
++
+ 	if (nfs_can_extend_write(file, page, inode)) {
+ 		count = max(count + offset, nfs_page_length(page));
+ 		offset = 0;
+@@ -1299,7 +1302,7 @@ int nfs_updatepage(struct file *file, struct page *page,
+ 		nfs_set_pageerror(page);
+ 	else
+ 		__set_page_dirty_nobuffers(page);
+-
++out:
+ 	dprintk("NFS:       nfs_updatepage returns %d (isize %lld)\n",
+ 			status, (long long)i_size_read(inode));
+ 	return status;
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 70d0b9b..806eda1 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -4906,6 +4906,32 @@ nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 	return nfs_ok;
+ }
+ 
++static __be32
++nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
++{
++	struct nfs4_ol_stateid *stp = openlockstateid(s);
++	__be32 ret;
++
++	mutex_lock(&stp->st_mutex);
++
++	ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
++	if (ret)
++		goto out;
++
++	ret = nfserr_locks_held;
++	if (check_for_locks(stp->st_stid.sc_file,
++			    lockowner(stp->st_stateowner)))
++		goto out;
++
++	release_lock_stateid(stp);
++	ret = nfs_ok;
++
++out:
++	mutex_unlock(&stp->st_mutex);
++	nfs4_put_stid(s);
++	return ret;
++}
++
+ __be32
+ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 		   struct nfsd4_free_stateid *free_stateid)
+@@ -4913,7 +4939,6 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 	stateid_t *stateid = &free_stateid->fr_stateid;
+ 	struct nfs4_stid *s;
+ 	struct nfs4_delegation *dp;
+-	struct nfs4_ol_stateid *stp;
+ 	struct nfs4_client *cl = cstate->session->se_client;
+ 	__be32 ret = nfserr_bad_stateid;
+ 
+@@ -4932,18 +4957,9 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 		ret = nfserr_locks_held;
+ 		break;
+ 	case NFS4_LOCK_STID:
+-		ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
+-		if (ret)
+-			break;
+-		stp = openlockstateid(s);
+-		ret = nfserr_locks_held;
+-		if (check_for_locks(stp->st_stid.sc_file,
+-				    lockowner(stp->st_stateowner)))
+-			break;
+-		WARN_ON(!unhash_lock_stateid(stp));
++		atomic_inc(&s->sc_count);
+ 		spin_unlock(&cl->cl_lock);
+-		nfs4_put_stid(s);
+-		ret = nfs_ok;
++		ret = nfsd4_free_lock_stateid(stateid, s);
+ 		goto out;
+ 	case NFS4_REVOKED_DELEG_STID:
+ 		dp = delegstateid(s);
+@@ -5510,7 +5526,7 @@ static __be32
+ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
+ 			    struct nfs4_ol_stateid *ost,
+ 			    struct nfsd4_lock *lock,
+-			    struct nfs4_ol_stateid **lst, bool *new)
++			    struct nfs4_ol_stateid **plst, bool *new)
+ {
+ 	__be32 status;
+ 	struct nfs4_file *fi = ost->st_stid.sc_file;
+@@ -5518,7 +5534,9 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
+ 	struct nfs4_client *cl = oo->oo_owner.so_client;
+ 	struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
+ 	struct nfs4_lockowner *lo;
++	struct nfs4_ol_stateid *lst;
+ 	unsigned int strhashval;
++	bool hashed;
+ 
+ 	lo = find_lockowner_str(cl, &lock->lk_new_owner);
+ 	if (!lo) {
+@@ -5534,12 +5552,27 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
+ 			goto out;
+ 	}
+ 
+-	*lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
+-	if (*lst == NULL) {
++retry:
++	lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
++	if (lst == NULL) {
+ 		status = nfserr_jukebox;
+ 		goto out;
+ 	}
++
++	mutex_lock(&lst->st_mutex);
++
++	/* See if it's still hashed to avoid race with FREE_STATEID */
++	spin_lock(&cl->cl_lock);
++	hashed = !list_empty(&lst->st_perfile);
++	spin_unlock(&cl->cl_lock);
++
++	if (!hashed) {
++		mutex_unlock(&lst->st_mutex);
++		nfs4_put_stid(&lst->st_stid);
++		goto retry;
++	}
+ 	status = nfs_ok;
++	*plst = lst;
+ out:
+ 	nfs4_put_stateowner(&lo->lo_owner);
+ 	return status;
+@@ -5606,8 +5639,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 			goto out;
+ 		status = lookup_or_create_lock_state(cstate, open_stp, lock,
+ 							&lock_stp, &new);
+-		if (status == nfs_ok)
+-			mutex_lock(&lock_stp->st_mutex);
+ 	} else {
+ 		status = nfs4_preprocess_seqid_op(cstate,
+ 				       lock->lk_old_lock_seqid,
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index 9a7693d..6db75cb 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -404,7 +404,8 @@ static struct ovl_entry *ovl_alloc_entry(unsigned int numlower)
+ static bool ovl_dentry_remote(struct dentry *dentry)
+ {
+ 	return dentry->d_flags &
+-		(DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE);
++		(DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE |
++		 DCACHE_OP_REAL);
+ }
+ 
+ static bool ovl_dentry_weird(struct dentry *dentry)
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index 4c463b9..a36a5a4 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -87,6 +87,12 @@ xfs_find_bdev_for_inode(
+  * We're now finished for good with this page.  Update the page state via the
+  * associated buffer_heads, paying attention to the start and end offsets that
+  * we need to process on the page.
++ *
++ * Landmine Warning: bh->b_end_io() will call end_page_writeback() on the last
++ * buffer in the IO. Once it does this, it is unsafe to access the bufferhead or
++ * the page at all, as we may be racing with memory reclaim and it can free both
++ * the bufferhead chain and the page as it will see the page as clean and
++ * unused.
+  */
+ static void
+ xfs_finish_page_writeback(
+@@ -95,8 +101,9 @@ xfs_finish_page_writeback(
+ 	int			error)
+ {
+ 	unsigned int		end = bvec->bv_offset + bvec->bv_len - 1;
+-	struct buffer_head	*head, *bh;
++	struct buffer_head	*head, *bh, *next;
+ 	unsigned int		off = 0;
++	unsigned int		bsize;
+ 
+ 	ASSERT(bvec->bv_offset < PAGE_SIZE);
+ 	ASSERT((bvec->bv_offset & ((1 << inode->i_blkbits) - 1)) == 0);
+@@ -105,15 +112,17 @@ xfs_finish_page_writeback(
+ 
+ 	bh = head = page_buffers(bvec->bv_page);
+ 
++	bsize = bh->b_size;
+ 	do {
++		next = bh->b_this_page;
+ 		if (off < bvec->bv_offset)
+ 			goto next_bh;
+ 		if (off > end)
+ 			break;
+ 		bh->b_end_io(bh, !error);
+ next_bh:
+-		off += bh->b_size;
+-	} while ((bh = bh->b_this_page) != head);
++		off += bsize;
++	} while ((bh = next) != head);
+ }
+ 
+ /*
+diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
+index 3f10307..c357f27 100644
+--- a/include/linux/backing-dev-defs.h
++++ b/include/linux/backing-dev-defs.h
+@@ -163,6 +163,7 @@ struct backing_dev_info {
+ 	wait_queue_head_t wb_waitq;
+ 
+ 	struct device *dev;
++	struct device *owner;
+ 
+ 	struct timer_list laptop_mode_wb_timer;
+ 
+diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
+index c82794f..89d3de3 100644
+--- a/include/linux/backing-dev.h
++++ b/include/linux/backing-dev.h
+@@ -24,6 +24,7 @@ __printf(3, 4)
+ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
+ 		const char *fmt, ...);
+ int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
++int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner);
+ void bdi_unregister(struct backing_dev_info *bdi);
+ 
+ int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
+diff --git a/include/linux/bio.h b/include/linux/bio.h
+index 9faebf7..75fadd2 100644
+--- a/include/linux/bio.h
++++ b/include/linux/bio.h
+@@ -527,11 +527,14 @@ extern unsigned int bvec_nr_vecs(unsigned short idx);
+ int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
+ int bio_associate_current(struct bio *bio);
+ void bio_disassociate_task(struct bio *bio);
++void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
+ #else	/* CONFIG_BLK_CGROUP */
+ static inline int bio_associate_blkcg(struct bio *bio,
+ 			struct cgroup_subsys_state *blkcg_css) { return 0; }
+ static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
+ static inline void bio_disassociate_task(struct bio *bio) { }
++static inline void bio_clone_blkcg_association(struct bio *dst,
++			struct bio *src) { }
+ #endif	/* CONFIG_BLK_CGROUP */
+ 
+ #ifdef CONFIG_HIGHMEM
+diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
+index ab31081..7879bf4 100644
+--- a/include/linux/mlx5/qp.h
++++ b/include/linux/mlx5/qp.h
+@@ -556,9 +556,9 @@ struct mlx5_destroy_qp_mbox_out {
+ struct mlx5_modify_qp_mbox_in {
+ 	struct mlx5_inbox_hdr	hdr;
+ 	__be32			qpn;
+-	u8			rsvd1[4];
+-	__be32			optparam;
+ 	u8			rsvd0[4];
++	__be32			optparam;
++	u8			rsvd1[4];
+ 	struct mlx5_qp_context	ctx;
+ 	u8			rsvd2[16];
+ };
+diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
+index 7e440d4..e694f02 100644
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -1428,6 +1428,10 @@ struct ib_srq {
+ 	} ext;
+ };
+ 
++/*
++ * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
++ * @max_read_sge:  Maximum SGE elements per RDMA READ request.
++ */
+ struct ib_qp {
+ 	struct ib_device       *device;
+ 	struct ib_pd	       *pd;
+@@ -1449,6 +1453,8 @@ struct ib_qp {
+ 	void                  (*event_handler)(struct ib_event *, void *);
+ 	void		       *qp_context;
+ 	u32			qp_num;
++	u32			max_write_sge;
++	u32			max_read_sge;
+ 	enum ib_qp_type		qp_type;
+ };
+ 
+diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
+index d8ab510..f6f3bc5 100644
+--- a/include/target/target_core_backend.h
++++ b/include/target/target_core_backend.h
+@@ -95,6 +95,6 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
+ bool target_sense_desc_format(struct se_device *dev);
+ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
+ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
+-				       struct request_queue *q, int block_size);
++				       struct request_queue *q);
+ 
+ #endif /* TARGET_CORE_BACKEND_H */
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index b316b44..fb8e3b6 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -142,6 +142,7 @@ enum se_cmd_flags_table {
+ 	SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
+ 	SCF_ACK_KREF			= 0x00400000,
+ 	SCF_USE_CPUID			= 0x00800000,
++	SCF_TASK_ATTR_SET		= 0x01000000,
+ };
+ 
+ /*
+diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
+index de44462..5cd6faa 100644
+--- a/include/target/target_core_fabric.h
++++ b/include/target/target_core_fabric.h
+@@ -163,7 +163,6 @@ int	core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
+ void	core_tmr_release_req(struct se_tmr_req *);
+ int	transport_generic_handle_tmr(struct se_cmd *);
+ void	transport_generic_request_failure(struct se_cmd *, sense_reason_t);
+-void	__target_execute_cmd(struct se_cmd *);
+ int	transport_lookup_tmr_lun(struct se_cmd *, u64);
+ void	core_allocate_nexus_loss_ua(struct se_node_acl *acl);
+ 
+diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
+index 003dca9..5664ca0 100644
+--- a/include/trace/events/sunrpc.h
++++ b/include/trace/events/sunrpc.h
+@@ -529,20 +529,27 @@ TRACE_EVENT(svc_xprt_do_enqueue,
+ 
+ 	TP_STRUCT__entry(
+ 		__field(struct svc_xprt *, xprt)
+-		__field_struct(struct sockaddr_storage, ss)
+ 		__field(int, pid)
+ 		__field(unsigned long, flags)
++		__dynamic_array(unsigned char, addr, xprt != NULL ?
++			xprt->xpt_remotelen : 0)
+ 	),
+ 
+ 	TP_fast_assign(
+ 		__entry->xprt = xprt;
+-		xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
+ 		__entry->pid = rqst? rqst->rq_task->pid : 0;
+-		__entry->flags = xprt ? xprt->xpt_flags : 0;
++		if (xprt) {
++			memcpy(__get_dynamic_array(addr),
++				&xprt->xpt_remote,
++				xprt->xpt_remotelen);
++			__entry->flags = xprt->xpt_flags;
++		} else
++			__entry->flags = 0;
+ 	),
+ 
+ 	TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt,
+-		(struct sockaddr *)&__entry->ss,
++		__get_dynamic_array_len(addr) != 0 ?
++			(struct sockaddr *)__get_dynamic_array(addr) : NULL,
+ 		__entry->pid, show_svc_xprt_flags(__entry->flags))
+ );
+ 
+@@ -553,18 +560,25 @@ TRACE_EVENT(svc_xprt_dequeue,
+ 
+ 	TP_STRUCT__entry(
+ 		__field(struct svc_xprt *, xprt)
+-		__field_struct(struct sockaddr_storage, ss)
+ 		__field(unsigned long, flags)
++		__dynamic_array(unsigned char, addr, xprt != NULL ?
++			xprt->xpt_remotelen : 0)
+ 	),
+ 
+ 	TP_fast_assign(
+-		__entry->xprt = xprt,
+-		xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
+-		__entry->flags = xprt ? xprt->xpt_flags : 0;
++		__entry->xprt = xprt;
++		if (xprt) {
++			memcpy(__get_dynamic_array(addr),
++					&xprt->xpt_remote,
++					xprt->xpt_remotelen);
++			__entry->flags = xprt->xpt_flags;
++		} else
++			__entry->flags = 0;
+ 	),
+ 
+ 	TP_printk("xprt=0x%p addr=%pIScp flags=%s", __entry->xprt,
+-		(struct sockaddr *)&__entry->ss,
++		__get_dynamic_array_len(addr) != 0 ?
++			(struct sockaddr *)__get_dynamic_array(addr) : NULL,
+ 		show_svc_xprt_flags(__entry->flags))
+ );
+ 
+@@ -592,19 +606,26 @@ TRACE_EVENT(svc_handle_xprt,
+ 	TP_STRUCT__entry(
+ 		__field(struct svc_xprt *, xprt)
+ 		__field(int, len)
+-		__field_struct(struct sockaddr_storage, ss)
+ 		__field(unsigned long, flags)
++		__dynamic_array(unsigned char, addr, xprt != NULL ?
++			xprt->xpt_remotelen : 0)
+ 	),
+ 
+ 	TP_fast_assign(
+ 		__entry->xprt = xprt;
+-		xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
+ 		__entry->len = len;
+-		__entry->flags = xprt ? xprt->xpt_flags : 0;
++		if (xprt) {
++			memcpy(__get_dynamic_array(addr),
++					&xprt->xpt_remote,
++					xprt->xpt_remotelen);
++			__entry->flags = xprt->xpt_flags;
++		} else
++			__entry->flags = 0;
+ 	),
+ 
+ 	TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt,
+-		(struct sockaddr *)&__entry->ss,
++		__get_dynamic_array_len(addr) != 0 ?
++			(struct sockaddr *)__get_dynamic_array(addr) : NULL,
+ 		__entry->len, show_svc_xprt_flags(__entry->flags))
+ );
+ #endif /* _TRACE_SUNRPC_H */
+diff --git a/kernel/auditsc.c b/kernel/auditsc.c
+index 2672d10..b334128 100644
+--- a/kernel/auditsc.c
++++ b/kernel/auditsc.c
+@@ -72,6 +72,7 @@
+ #include <linux/compat.h>
+ #include <linux/ctype.h>
+ #include <linux/string.h>
++#include <linux/uaccess.h>
+ #include <uapi/linux/limits.h>
+ 
+ #include "audit.h"
+@@ -81,7 +82,8 @@
+ #define AUDITSC_SUCCESS 1
+ #define AUDITSC_FAILURE 2
+ 
+-/* no execve audit message should be longer than this (userspace limits) */
++/* no execve audit message should be longer than this (userspace limits),
++ * see the note near the top of audit_log_execve_info() about this value */
+ #define MAX_EXECVE_AUDIT_LEN 7500
+ 
+ /* max length to print of cmdline/proctitle value during audit */
+@@ -987,184 +989,178 @@ static int audit_log_pid_context(struct audit_context *context, pid_t pid,
+ 	return rc;
+ }
+ 
+-/*
+- * to_send and len_sent accounting are very loose estimates.  We aren't
+- * really worried about a hard cap to MAX_EXECVE_AUDIT_LEN so much as being
+- * within about 500 bytes (next page boundary)
+- *
+- * why snprintf?  an int is up to 12 digits long.  if we just assumed when
+- * logging that a[%d]= was going to be 16 characters long we would be wasting
+- * space in every audit message.  In one 7500 byte message we can log up to
+- * about 1000 min size arguments.  That comes down to about 50% waste of space
+- * if we didn't do the snprintf to find out how long arg_num_len was.
+- */
+-static int audit_log_single_execve_arg(struct audit_context *context,
+-					struct audit_buffer **ab,
+-					int arg_num,
+-					size_t *len_sent,
+-					const char __user *p,
+-					char *buf)
++static void audit_log_execve_info(struct audit_context *context,
++				  struct audit_buffer **ab)
+ {
+-	char arg_num_len_buf[12];
+-	const char __user *tmp_p = p;
+-	/* how many digits are in arg_num? 5 is the length of ' a=""' */
+-	size_t arg_num_len = snprintf(arg_num_len_buf, 12, "%d", arg_num) + 5;
+-	size_t len, len_left, to_send;
+-	size_t max_execve_audit_len = MAX_EXECVE_AUDIT_LEN;
+-	unsigned int i, has_cntl = 0, too_long = 0;
+-	int ret;
+-
+-	/* strnlen_user includes the null we don't want to send */
+-	len_left = len = strnlen_user(p, MAX_ARG_STRLEN) - 1;
+-
+-	/*
+-	 * We just created this mm, if we can't find the strings
+-	 * we just copied into it something is _very_ wrong. Similar
+-	 * for strings that are too long, we should not have created
+-	 * any.
+-	 */
+-	if (WARN_ON_ONCE(len < 0 || len > MAX_ARG_STRLEN - 1)) {
+-		send_sig(SIGKILL, current, 0);
+-		return -1;
++	long len_max;
++	long len_rem;
++	long len_full;
++	long len_buf;
++	long len_abuf;
++	long len_tmp;
++	bool require_data;
++	bool encode;
++	unsigned int iter;
++	unsigned int arg;
++	char *buf_head;
++	char *buf;
++	const char __user *p = (const char __user *)current->mm->arg_start;
++
++	/* NOTE: this buffer needs to be large enough to hold all the non-arg
++	 *       data we put in the audit record for this argument (see the
++	 *       code below) ... at this point in time 96 is plenty */
++	char abuf[96];
++
++	/* NOTE: we set MAX_EXECVE_AUDIT_LEN to a rather arbitrary limit, the
++	 *       current value of 7500 is not as important as the fact that it
++	 *       is less than 8k, a setting of 7500 gives us plenty of wiggle
++	 *       room if we go over a little bit in the logging below */
++	WARN_ON_ONCE(MAX_EXECVE_AUDIT_LEN > 7500);
++	len_max = MAX_EXECVE_AUDIT_LEN;
++
++	/* scratch buffer to hold the userspace args */
++	buf_head = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL);
++	if (!buf_head) {
++		audit_panic("out of memory for argv string");
++		return;
+ 	}
++	buf = buf_head;
+ 
+-	/* walk the whole argument looking for non-ascii chars */
++	audit_log_format(*ab, "argc=%d", context->execve.argc);
++
++	len_rem = len_max;
++	len_buf = 0;
++	len_full = 0;
++	require_data = true;
++	encode = false;
++	iter = 0;
++	arg = 0;
+ 	do {
+-		if (len_left > MAX_EXECVE_AUDIT_LEN)
+-			to_send = MAX_EXECVE_AUDIT_LEN;
+-		else
+-			to_send = len_left;
+-		ret = copy_from_user(buf, tmp_p, to_send);
+-		/*
+-		 * There is no reason for this copy to be short. We just
+-		 * copied them here, and the mm hasn't been exposed to user-
+-		 * space yet.
+-		 */
+-		if (ret) {
+-			WARN_ON(1);
+-			send_sig(SIGKILL, current, 0);
+-			return -1;
+-		}
+-		buf[to_send] = '\0';
+-		has_cntl = audit_string_contains_control(buf, to_send);
+-		if (has_cntl) {
+-			/*
+-			 * hex messages get logged as 2 bytes, so we can only
+-			 * send half as much in each message
+-			 */
+-			max_execve_audit_len = MAX_EXECVE_AUDIT_LEN / 2;
+-			break;
+-		}
+-		len_left -= to_send;
+-		tmp_p += to_send;
+-	} while (len_left > 0);
+-
+-	len_left = len;
+-
+-	if (len > max_execve_audit_len)
+-		too_long = 1;
+-
+-	/* rewalk the argument actually logging the message */
+-	for (i = 0; len_left > 0; i++) {
+-		int room_left;
+-
+-		if (len_left > max_execve_audit_len)
+-			to_send = max_execve_audit_len;
+-		else
+-			to_send = len_left;
+-
+-		/* do we have space left to send this argument in this ab? */
+-		room_left = MAX_EXECVE_AUDIT_LEN - arg_num_len - *len_sent;
+-		if (has_cntl)
+-			room_left -= (to_send * 2);
+-		else
+-			room_left -= to_send;
+-		if (room_left < 0) {
+-			*len_sent = 0;
+-			audit_log_end(*ab);
+-			*ab = audit_log_start(context, GFP_KERNEL, AUDIT_EXECVE);
+-			if (!*ab)
+-				return 0;
+-		}
++		/* NOTE: we don't ever want to trust this value for anything
++		 *       serious, but the audit record format insists we
++		 *       provide an argument length for really long arguments,
++		 *       e.g. > MAX_EXECVE_AUDIT_LEN, so we have no choice but
++		 *       to use strncpy_from_user() to obtain this value for
++		 *       recording in the log, although we don't use it
++		 *       anywhere here to avoid a double-fetch problem */
++		if (len_full == 0)
++			len_full = strnlen_user(p, MAX_ARG_STRLEN) - 1;
++
++		/* read more data from userspace */
++		if (require_data) {
++			/* can we make more room in the buffer? */
++			if (buf != buf_head) {
++				memmove(buf_head, buf, len_buf);
++				buf = buf_head;
++			}
++
++			/* fetch as much as we can of the argument */
++			len_tmp = strncpy_from_user(&buf_head[len_buf], p,
++						    len_max - len_buf);
++			if (len_tmp == -EFAULT) {
++				/* unable to copy from userspace */
++				send_sig(SIGKILL, current, 0);
++				goto out;
++			} else if (len_tmp == (len_max - len_buf)) {
++				/* buffer is not large enough */
++				require_data = true;
++				/* NOTE: if we are going to span multiple
++				 *       buffers force the encoding so we stand
++				 *       a chance at a sane len_full value and
++				 *       consistent record encoding */
++				encode = true;
++				len_full = len_full * 2;
++				p += len_tmp;
++			} else {
++				require_data = false;
++				if (!encode)
++					encode = audit_string_contains_control(
++								buf, len_tmp);
++				/* try to use a trusted value for len_full */
++				if (len_full < len_max)
++					len_full = (encode ?
++						    len_tmp * 2 : len_tmp);
++				p += len_tmp + 1;
++			}
++			len_buf += len_tmp;
++			buf_head[len_buf] = '\0';
+ 
+-		/*
+-		 * first record needs to say how long the original string was
+-		 * so we can be sure nothing was lost.
+-		 */
+-		if ((i == 0) && (too_long))
+-			audit_log_format(*ab, " a%d_len=%zu", arg_num,
+-					 has_cntl ? 2*len : len);
+-
+-		/*
+-		 * normally arguments are small enough to fit and we already
+-		 * filled buf above when we checked for control characters
+-		 * so don't bother with another copy_from_user
+-		 */
+-		if (len >= max_execve_audit_len)
+-			ret = copy_from_user(buf, p, to_send);
+-		else
+-			ret = 0;
+-		if (ret) {
+-			WARN_ON(1);
+-			send_sig(SIGKILL, current, 0);
+-			return -1;
++			/* length of the buffer in the audit record? */
++			len_abuf = (encode ? len_buf * 2 : len_buf + 2);
+ 		}
+-		buf[to_send] = '\0';
+-
+-		/* actually log it */
+-		audit_log_format(*ab, " a%d", arg_num);
+-		if (too_long)
+-			audit_log_format(*ab, "[%d]", i);
+-		audit_log_format(*ab, "=");
+-		if (has_cntl)
+-			audit_log_n_hex(*ab, buf, to_send);
+-		else
+-			audit_log_string(*ab, buf);
+-
+-		p += to_send;
+-		len_left -= to_send;
+-		*len_sent += arg_num_len;
+-		if (has_cntl)
+-			*len_sent += to_send * 2;
+-		else
+-			*len_sent += to_send;
+-	}
+-	/* include the null we didn't log */
+-	return len + 1;
+-}
+ 
+-static void audit_log_execve_info(struct audit_context *context,
+-				  struct audit_buffer **ab)
+-{
+-	int i, len;
+-	size_t len_sent = 0;
+-	const char __user *p;
+-	char *buf;
++		/* write as much as we can to the audit log */
++		if (len_buf > 0) {
++			/* NOTE: some magic numbers here - basically if we
++			 *       can't fit a reasonable amount of data into the
++			 *       existing audit buffer, flush it and start with
++			 *       a new buffer */
++			if ((sizeof(abuf) + 8) > len_rem) {
++				len_rem = len_max;
++				audit_log_end(*ab);
++				*ab = audit_log_start(context,
++						      GFP_KERNEL, AUDIT_EXECVE);
++				if (!*ab)
++					goto out;
++			}
+ 
+-	p = (const char __user *)current->mm->arg_start;
++			/* create the non-arg portion of the arg record */
++			len_tmp = 0;
++			if (require_data || (iter > 0) ||
++			    ((len_abuf + sizeof(abuf)) > len_rem)) {
++				if (iter == 0) {
++					len_tmp += snprintf(&abuf[len_tmp],
++							sizeof(abuf) - len_tmp,
++							" a%d_len=%lu",
++							arg, len_full);
++				}
++				len_tmp += snprintf(&abuf[len_tmp],
++						    sizeof(abuf) - len_tmp,
++						    " a%d[%d]=", arg, iter++);
++			} else
++				len_tmp += snprintf(&abuf[len_tmp],
++						    sizeof(abuf) - len_tmp,
++						    " a%d=", arg);
++			WARN_ON(len_tmp >= sizeof(abuf));
++			abuf[sizeof(abuf) - 1] = '\0';
++
++			/* log the arg in the audit record */
++			audit_log_format(*ab, "%s", abuf);
++			len_rem -= len_tmp;
++			len_tmp = len_buf;
++			if (encode) {
++				if (len_abuf > len_rem)
++					len_tmp = len_rem / 2; /* encoding */
++				audit_log_n_hex(*ab, buf, len_tmp);
++				len_rem -= len_tmp * 2;
++				len_abuf -= len_tmp * 2;
++			} else {
++				if (len_abuf > len_rem)
++					len_tmp = len_rem - 2; /* quotes */
++				audit_log_n_string(*ab, buf, len_tmp);
++				len_rem -= len_tmp + 2;
++				/* don't subtract the "2" because we still need
++				 * to add quotes to the remaining string */
++				len_abuf -= len_tmp;
++			}
++			len_buf -= len_tmp;
++			buf += len_tmp;
++		}
+ 
+-	audit_log_format(*ab, "argc=%d", context->execve.argc);
++		/* ready to move to the next argument? */
++		if ((len_buf == 0) && !require_data) {
++			arg++;
++			iter = 0;
++			len_full = 0;
++			require_data = true;
++			encode = false;
++		}
++	} while (arg < context->execve.argc);
+ 
+-	/*
+-	 * we need some kernel buffer to hold the userspace args.  Just
+-	 * allocate one big one rather than allocating one of the right size
+-	 * for every single argument inside audit_log_single_execve_arg()
+-	 * should be <8k allocation so should be pretty safe.
+-	 */
+-	buf = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL);
+-	if (!buf) {
+-		audit_panic("out of memory for argv string");
+-		return;
+-	}
++	/* NOTE: the caller handles the final audit_log_end() call */
+ 
+-	for (i = 0; i < context->execve.argc; i++) {
+-		len = audit_log_single_execve_arg(context, ab, i,
+-						  &len_sent, p, buf);
+-		if (len <= 0)
+-			break;
+-		p += len;
+-	}
+-	kfree(buf);
++out:
++	kfree(buf_head);
+ }
+ 
+ static void show_special(struct audit_context *context, int *call_panic)
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 75c0ff0..e0be49f 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -2215,12 +2215,8 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
+ 		goto out_unlock;
+ 	}
+ 
+-	/*
+-	 * We know this subsystem has not yet been bound.  Users in a non-init
+-	 * user namespace may only mount hierarchies with no bound subsystems,
+-	 * i.e. 'none,name=user1'
+-	 */
+-	if (!opts.none && !capable(CAP_SYS_ADMIN)) {
++	/* Hierarchies may only be created in the initial cgroup namespace. */
++	if (ns != &init_cgroup_ns) {
+ 		ret = -EPERM;
+ 		goto out_unlock;
+ 	}
+@@ -2962,6 +2958,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
+ 	int retval = 0;
+ 
+ 	mutex_lock(&cgroup_mutex);
++	percpu_down_write(&cgroup_threadgroup_rwsem);
+ 	for_each_root(root) {
+ 		struct cgroup *from_cgrp;
+ 
+@@ -2976,6 +2973,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
+ 		if (retval)
+ 			break;
+ 	}
++	percpu_up_write(&cgroup_threadgroup_rwsem);
+ 	mutex_unlock(&cgroup_mutex);
+ 
+ 	return retval;
+@@ -4343,6 +4341,8 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
+ 
+ 	mutex_lock(&cgroup_mutex);
+ 
++	percpu_down_write(&cgroup_threadgroup_rwsem);
++
+ 	/* all tasks in @from are being moved, all csets are source */
+ 	spin_lock_irq(&css_set_lock);
+ 	list_for_each_entry(link, &from->cset_links, cset_link)
+@@ -4371,6 +4371,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
+ 	} while (task && !ret);
+ out_err:
+ 	cgroup_migrate_finish(&preloaded_csets);
++	percpu_up_write(&cgroup_threadgroup_rwsem);
+ 	mutex_unlock(&cgroup_mutex);
+ 	return ret;
+ }
+@@ -6309,14 +6310,11 @@ struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
+ 	if (!ns_capable(user_ns, CAP_SYS_ADMIN))
+ 		return ERR_PTR(-EPERM);
+ 
+-	mutex_lock(&cgroup_mutex);
++	/* It is not safe to take cgroup_mutex here */
+ 	spin_lock_irq(&css_set_lock);
+-
+ 	cset = task_css_set(current);
+ 	get_css_set(cset);
+-
+ 	spin_unlock_irq(&css_set_lock);
+-	mutex_unlock(&cgroup_mutex);
+ 
+ 	new_ns = alloc_cgroup_ns();
+ 	if (IS_ERR(new_ns)) {
+diff --git a/kernel/module.c b/kernel/module.c
+index 5f71aa6..6458a2f 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -2687,13 +2687,18 @@ static inline void kmemleak_load_module(const struct module *mod,
+ #endif
+ 
+ #ifdef CONFIG_MODULE_SIG
+-static int module_sig_check(struct load_info *info)
++static int module_sig_check(struct load_info *info, int flags)
+ {
+ 	int err = -ENOKEY;
+ 	const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
+ 	const void *mod = info->hdr;
+ 
+-	if (info->len > markerlen &&
++	/*
++	 * Require flags == 0, as a module with version information
++	 * removed is no longer the module that was signed
++	 */
++	if (flags == 0 &&
++	    info->len > markerlen &&
+ 	    memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
+ 		/* We truncate the module to discard the signature */
+ 		info->len -= markerlen;
+@@ -2712,7 +2717,7 @@ static int module_sig_check(struct load_info *info)
+ 	return err;
+ }
+ #else /* !CONFIG_MODULE_SIG */
+-static int module_sig_check(struct load_info *info)
++static int module_sig_check(struct load_info *info, int flags)
+ {
+ 	return 0;
+ }
+@@ -3498,7 +3503,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
+ 	long err;
+ 	char *after_dashes;
+ 
+-	err = module_sig_check(info);
++	err = module_sig_check(info, flags);
+ 	if (err)
+ 		goto free_copy;
+ 
+diff --git a/mm/backing-dev.c b/mm/backing-dev.c
+index ed173b8..9269911 100644
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -825,6 +825,20 @@ int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
+ }
+ EXPORT_SYMBOL(bdi_register_dev);
+ 
++int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
++{
++	int rc;
++
++	rc = bdi_register(bdi, NULL, "%u:%u", MAJOR(owner->devt),
++			MINOR(owner->devt));
++	if (rc)
++		return rc;
++	bdi->owner = owner;
++	get_device(owner);
++	return 0;
++}
++EXPORT_SYMBOL(bdi_register_owner);
++
+ /*
+  * Remove bdi from bdi_list, and ensure that it is no longer visible
+  */
+@@ -849,6 +863,11 @@ void bdi_unregister(struct backing_dev_info *bdi)
+ 		device_unregister(bdi->dev);
+ 		bdi->dev = NULL;
+ 	}
++
++	if (bdi->owner) {
++		put_device(bdi->owner);
++		bdi->owner = NULL;
++	}
+ }
+ 
+ void bdi_exit(struct backing_dev_info *bdi)
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index addfe4ac..d9ec1a5 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2214,6 +2214,10 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
+ 		 * and reducing the surplus.
+ 		 */
+ 		spin_unlock(&hugetlb_lock);
++
++		/* yield cpu to avoid soft lockup */
++		cond_resched();
++
+ 		if (hstate_is_gigantic(h))
+ 			ret = alloc_fresh_gigantic_page(h, nodes_allowed);
+ 		else
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 388ee8b..1842141 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -927,7 +927,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
+-		if (get_user(opt, (u32 __user *) optval)) {
++		if (get_user(opt, (u16 __user *) optval)) {
+ 			err = -EFAULT;
+ 			break;
+ 		}
+diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
+index 1325776..bd007a9 100644
+--- a/net/netlabel/netlabel_kapi.c
++++ b/net/netlabel/netlabel_kapi.c
+@@ -824,7 +824,11 @@ socket_setattr_return:
+  */
+ void netlbl_sock_delattr(struct sock *sk)
+ {
+-	cipso_v4_sock_delattr(sk);
++	switch (sk->sk_family) {
++	case AF_INET:
++		cipso_v4_sock_delattr(sk);
++		break;
++	}
+ }
+ 
+ /**
+@@ -987,7 +991,11 @@ req_setattr_return:
+ */
+ void netlbl_req_delattr(struct request_sock *req)
+ {
+-	cipso_v4_req_delattr(req);
++	switch (req->rsk_ops->family) {
++	case AF_INET:
++		cipso_v4_req_delattr(req);
++		break;
++	}
+ }
+ 
+ /**
+diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
+index e1675927..42396a7 100644
+--- a/scripts/recordmcount.c
++++ b/scripts/recordmcount.c
+@@ -33,10 +33,17 @@
+ #include <string.h>
+ #include <unistd.h>
+ 
++/*
++ * glibc synced up and added the metag number but didn't add the relocations.
++ * Work around this in a crude manner for now.
++ */
+ #ifndef EM_METAG
+-/* Remove this when these make it to the standard system elf.h. */
+ #define EM_METAG      174
++#endif
++#ifndef R_METAG_ADDR32
+ #define R_METAG_ADDR32                   2
++#endif
++#ifndef R_METAG_NONE
+ #define R_METAG_NONE                     3
+ #endif
+ 
+diff --git a/sound/hda/array.c b/sound/hda/array.c
+index 516795b..5dfa610 100644
+--- a/sound/hda/array.c
++++ b/sound/hda/array.c
+@@ -21,13 +21,15 @@ void *snd_array_new(struct snd_array *array)
+ 		return NULL;
+ 	if (array->used >= array->alloced) {
+ 		int num = array->alloced + array->alloc_align;
++		int oldsize = array->alloced * array->elem_size;
+ 		int size = (num + 1) * array->elem_size;
+ 		void *nlist;
+ 		if (snd_BUG_ON(num >= 4096))
+ 			return NULL;
+-		nlist = krealloc(array->list, size, GFP_KERNEL | __GFP_ZERO);
++		nlist = krealloc(array->list, size, GFP_KERNEL);
+ 		if (!nlist)
+ 			return NULL;
++		memset(nlist + oldsize, 0, size - oldsize);
+ 		array->list = nlist;
+ 		array->alloced = num;
+ 	}
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 6f8ea13..89dacf9 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2265,6 +2265,8 @@ static const struct pci_device_id azx_ids[] = {
+ 	{ PCI_DEVICE(0x1022, 0x780d),
+ 	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+ 	/* ATI HDMI */
++	{ PCI_DEVICE(0x1002, 0x0002),
++	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ 	{ PCI_DEVICE(0x1002, 0x1308),
+ 	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ 	{ PCI_DEVICE(0x1002, 0x157a),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index abcb5a6..f25479b 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4674,6 +4674,22 @@ static void alc290_fixup_mono_speakers(struct hda_codec *codec,
+ 	}
+ }
+ 
++static void alc298_fixup_speaker_volume(struct hda_codec *codec,
++					const struct hda_fixup *fix, int action)
++{
++	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		/* The speaker is routed to the Node 0x06 by a mistake, as a result
++		   we can't adjust the speaker's volume since this node does not has
++		   Amp-out capability. we change the speaker's route to:
++		   Node 0x02 (Audio Output) -> Node 0x0c (Audio Mixer) -> Node 0x17 (
++		   Pin Complex), since Node 0x02 has Amp-out caps, we can adjust
++		   speaker's volume now. */
++
++		hda_nid_t conn1[1] = { 0x0c };
++		snd_hda_override_conn_list(codec, 0x17, 1, conn1);
++	}
++}
++
+ /* Hook to update amp GPIO4 for automute */
+ static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec,
+ 					  struct hda_jack_callback *jack)
+@@ -4823,6 +4839,7 @@ enum {
+ 	ALC280_FIXUP_HP_HEADSET_MIC,
+ 	ALC221_FIXUP_HP_FRONT_MIC,
+ 	ALC292_FIXUP_TPT460,
++	ALC298_FIXUP_SPK_VOLUME,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -5478,6 +5495,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE,
+ 	},
++	[ALC298_FIXUP_SPK_VOLUME] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc298_fixup_speaker_volume,
++		.chained = true,
++		.chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -5524,6 +5547,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ 	SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
+ 	SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
++	SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
+ 	SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+@@ -5799,6 +5823,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x1b, 0x01014020},
+ 		{0x21, 0x0221103f}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++		{0x14, 0x90170130},
++		{0x1b, 0x02011020},
++		{0x21, 0x0221103f}),
++	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 		{0x14, 0x90170150},
+ 		{0x1b, 0x02011020},
+ 		{0x21, 0x0221105f}),
+diff --git a/tools/objtool/.gitignore b/tools/objtool/.gitignore
+index a0b3128..d3102c8 100644
+--- a/tools/objtool/.gitignore
++++ b/tools/objtool/.gitignore
+@@ -1,2 +1,3 @@
+ arch/x86/insn/inat-tables.c
+ objtool
++fixdep
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 48bd520..dd25346 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -148,6 +148,7 @@ int vcpu_load(struct kvm_vcpu *vcpu)
+ 	put_cpu();
+ 	return 0;
+ }
++EXPORT_SYMBOL_GPL(vcpu_load);
+ 
+ void vcpu_put(struct kvm_vcpu *vcpu)
+ {
+@@ -157,6 +158,7 @@ void vcpu_put(struct kvm_vcpu *vcpu)
+ 	preempt_enable();
+ 	mutex_unlock(&vcpu->mutex);
+ }
++EXPORT_SYMBOL_GPL(vcpu_put);
+ 
+ static void ack_flush(void *_completed)
+ {

diff --git a/4.7.1/4420_grsecurity-3.1-4.7.1-201608161813.patch b/4.7.2/4420_grsecurity-3.1-4.7.2-201608211829.patch
similarity index 99%
rename from 4.7.1/4420_grsecurity-3.1-4.7.1-201608161813.patch
rename to 4.7.2/4420_grsecurity-3.1-4.7.2-201608211829.patch
index d01aa5c..6aabc5c 100644
--- a/4.7.1/4420_grsecurity-3.1-4.7.1-201608161813.patch
+++ b/4.7.2/4420_grsecurity-3.1-4.7.2-201608211829.patch
@@ -420,7 +420,7 @@ index a3683ce..5ec8bf4 100644
  
  A toggle value indicating if modules are allowed to be loaded
 diff --git a/Makefile b/Makefile
-index 84335c0..6cb42d3 100644
+index bb98f1c..eca0654 100644
 --- a/Makefile
 +++ b/Makefile
 @@ -302,7 +302,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -4775,7 +4775,7 @@ index a5bc92d..0bb4730 100644
 +	pax_close_kernel();
  }
 diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
-index 5a0a691..658577c 100644
+index 2038492..b81b162 100644
 --- a/arch/arm64/Kconfig
 +++ b/arch/arm64/Kconfig
 @@ -76,6 +76,7 @@ config ARM64
@@ -31946,7 +31946,7 @@ index 16ef31b..23496f1 100644
  	.disabled_by_bios = is_disabled,
  	.hardware_setup = svm_hardware_setup,
 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index 64a79f2..d392b19 100644
+index 8326d68..3cc3895 100644
 --- a/arch/x86/kvm/vmx.c
 +++ b/arch/x86/kvm/vmx.c
 @@ -1589,14 +1589,14 @@ static __always_inline void vmcs_writel(unsigned long field, unsigned long value
@@ -32051,7 +32051,7 @@ index 64a79f2..d392b19 100644
  	}
  
  	kvm_set_posted_intr_wakeup_handler(wakeup_handler);
-@@ -8718,6 +8737,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -8719,6 +8738,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
  		"jmp 2f \n\t"
  		"1: " __ex(ASM_VMX_VMRESUME) "\n\t"
  		"2: "
@@ -32064,7 +32064,7 @@ index 64a79f2..d392b19 100644
  		/* Save guest registers, load host registers, keep flags */
  		"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
  		"pop %0 \n\t"
-@@ -8770,6 +8795,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -8771,6 +8796,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
  #endif
  		[cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
  		[wordsize]"i"(sizeof(ulong))
@@ -32076,7 +32076,7 @@ index 64a79f2..d392b19 100644
  	      : "cc", "memory"
  #ifdef CONFIG_X86_64
  		, "rax", "rbx", "rdi", "rsi"
-@@ -8783,7 +8813,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -8784,7 +8814,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
  	if (debugctlmsr)
  		update_debugctlmsr(debugctlmsr);
  
@@ -32085,7 +32085,7 @@ index 64a79f2..d392b19 100644
  	/*
  	 * The sysexit path does not restore ds/es, so we must set them to
  	 * a reasonable value ourselves.
-@@ -8792,8 +8822,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -8793,8 +8823,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
  	 * may be executed in interrupt context, which saves and restore segments
  	 * around it, nullifying its effect.
  	 */
@@ -32106,7 +32106,7 @@ index 64a79f2..d392b19 100644
  #endif
  
  	vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
-@@ -10905,7 +10945,7 @@ out:
+@@ -10921,7 +10961,7 @@ out:
  	return ret;
  }
  
@@ -32116,10 +32116,10 @@ index 64a79f2..d392b19 100644
  	.disabled_by_bios = vmx_disabled_by_bios,
  	.hardware_setup = hardware_setup,
 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 7da5dd2..b667a94 100644
+index fea2c57..19b3e60 100644
 --- a/arch/x86/kvm/x86.c
 +++ b/arch/x86/kvm/x86.c
-@@ -1940,8 +1940,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
+@@ -1941,8 +1941,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
  {
  	struct kvm *kvm = vcpu->kvm;
  	int lm = is_long_mode(vcpu);
@@ -32130,7 +32130,7 @@ index 7da5dd2..b667a94 100644
  	u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
  		: kvm->arch.xen_hvm_config.blob_size_32;
  	u32 page_num = data & ~PAGE_MASK;
-@@ -2646,6 +2646,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
+@@ -2647,6 +2647,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
  		if (n < msr_list.nmsrs)
  			goto out;
  		r = -EFAULT;
@@ -32139,7 +32139,7 @@ index 7da5dd2..b667a94 100644
  		if (copy_to_user(user_msr_list->indices, &msrs_to_save,
  				 num_msrs_to_save * sizeof(u32)))
  			goto out;
-@@ -3054,7 +3056,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
+@@ -3055,7 +3057,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
  
  static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
  {
@@ -32148,7 +32148,7 @@ index 7da5dd2..b667a94 100644
  	u64 xstate_bv = xsave->header.xfeatures;
  	u64 valid;
  
-@@ -3090,7 +3092,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
+@@ -3091,7 +3093,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
  
  static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
  {
@@ -32157,7 +32157,7 @@ index 7da5dd2..b667a94 100644
  	u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
  	u64 valid;
  
-@@ -3134,7 +3136,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
+@@ -3135,7 +3137,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
  		fill_xsave((u8 *) guest_xsave->region, vcpu);
  	} else {
  		memcpy(guest_xsave->region,
@@ -32166,7 +32166,7 @@ index 7da5dd2..b667a94 100644
  			sizeof(struct fxregs_state));
  		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
  			XFEATURE_MASK_FPSSE;
-@@ -3159,7 +3161,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
+@@ -3160,7 +3162,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
  	} else {
  		if (xstate_bv & ~XFEATURE_MASK_FPSSE)
  			return -EINVAL;
@@ -32183,7 +32183,7 @@ index 7da5dd2..b667a94 100644
  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
  {
  	int r;
-@@ -6720,6 +6723,7 @@ out:
+@@ -6732,6 +6735,7 @@ out:
  	return r;
  }
  
@@ -32191,7 +32191,7 @@ index 7da5dd2..b667a94 100644
  static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
  {
  	if (!kvm_arch_vcpu_runnable(vcpu) &&
-@@ -7267,7 +7271,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+@@ -7279,7 +7283,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  {
  	struct fxregs_state *fxsave =
@@ -32200,7 +32200,7 @@ index 7da5dd2..b667a94 100644
  
  	memcpy(fpu->fpr, fxsave->st_space, 128);
  	fpu->fcw = fxsave->cwd;
-@@ -7284,7 +7288,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+@@ -7296,7 +7300,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  {
  	struct fxregs_state *fxsave =
@@ -32209,7 +32209,7 @@ index 7da5dd2..b667a94 100644
  
  	memcpy(fxsave->st_space, fpu->fpr, 128);
  	fxsave->cwd = fpu->fcw;
-@@ -7300,9 +7304,9 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+@@ -7312,9 +7316,9 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  
  static void fx_init(struct kvm_vcpu *vcpu)
  {
@@ -32221,7 +32221,7 @@ index 7da5dd2..b667a94 100644
  			host_xcr0 | XSTATE_COMPACTION_ENABLED;
  
  	/*
-@@ -7325,7 +7329,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
+@@ -7337,7 +7341,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
  	 */
  	vcpu->guest_fpu_loaded = 1;
  	__kernel_fpu_begin();
@@ -32230,7 +32230,7 @@ index 7da5dd2..b667a94 100644
  	trace_kvm_fpu(1);
  }
  
-@@ -7627,6 +7631,8 @@ bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+@@ -7640,6 +7644,8 @@ bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
  struct static_key kvm_no_apic_vcpu __read_mostly;
  EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu);
  
@@ -32239,7 +32239,7 @@ index 7da5dd2..b667a94 100644
  int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
  {
  	struct page *page;
-@@ -7644,11 +7650,14 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+@@ -7657,11 +7663,14 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
  	else
  		vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
  
@@ -32258,7 +32258,7 @@ index 7da5dd2..b667a94 100644
  	vcpu->arch.pio_data = page_address(page);
  
  	kvm_set_tsc_khz(vcpu, max_tsc_khz);
-@@ -7706,6 +7715,9 @@ fail_mmu_destroy:
+@@ -7719,6 +7728,9 @@ fail_mmu_destroy:
  	kvm_mmu_destroy(vcpu);
  fail_free_pio_data:
  	free_page((unsigned long)vcpu->arch.pio_data);
@@ -32268,7 +32268,7 @@ index 7da5dd2..b667a94 100644
  fail:
  	return r;
  }
-@@ -7724,6 +7736,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+@@ -7737,6 +7749,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
  	free_page((unsigned long)vcpu->arch.pio_data);
  	if (!lapic_in_kernel(vcpu))
  		static_key_slow_dec(&kvm_no_apic_vcpu);
@@ -38100,10 +38100,10 @@ index 71e8a67..6a313bb 100644
  struct op_counter_config;
  
 diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
-index 8b93e63..9cf21c3 100644
+index ae97f24..d042e97 100644
 --- a/arch/x86/pci/intel_mid_pci.c
 +++ b/arch/x86/pci/intel_mid_pci.c
-@@ -280,7 +280,7 @@ int __init intel_mid_pci_init(void)
+@@ -288,7 +288,7 @@ int __init intel_mid_pci_init(void)
  	pci_mmcfg_late_init();
  	pcibios_enable_irq = intel_mid_pci_irq_enable;
  	pcibios_disable_irq = intel_mid_pci_irq_disable;
@@ -39565,10 +39565,10 @@ index 2f33760..835e50a 100644
  #define XCHAL_ICACHE_SIZE		8192	/* I-cache size in bytes or 0 */
  #define XCHAL_DCACHE_SIZE		8192	/* D-cache size in bytes or 0 */
 diff --git a/block/bio.c b/block/bio.c
-index 0e4aa42..a05fa6e 100644
+index 4623869..7acfd5c 100644
 --- a/block/bio.c
 +++ b/block/bio.c
-@@ -1138,7 +1138,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
+@@ -1142,7 +1142,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
  		/*
  		 * Overflow, abort
  		 */
@@ -39577,7 +39577,7 @@ index 0e4aa42..a05fa6e 100644
  			return ERR_PTR(-EINVAL);
  
  		nr_pages += end - start;
-@@ -1263,7 +1263,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
+@@ -1267,7 +1267,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
  		/*
  		 * Overflow, abort
  		 */
@@ -39735,7 +39735,7 @@ index 556826a..4e7c5fd 100644
  			err = -EFAULT;
  			goto out;
 diff --git a/block/genhd.c b/block/genhd.c
-index 3eebd25..e8524d8 100644
+index 086f1a3..ee669cd 100644
 --- a/block/genhd.c
 +++ b/block/genhd.c
 @@ -471,21 +471,24 @@ static char *bdevt_str(dev_t devt, char *buf)
@@ -40287,10 +40287,10 @@ index 993fd31..cc15d14 100644
  }
  EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
 diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
-index 290d6f5..888bde8 100644
+index f4218df..0d3109e 100644
 --- a/drivers/acpi/ec.c
 +++ b/drivers/acpi/ec.c
-@@ -1536,7 +1536,7 @@ static int ec_correct_ecdt(const struct dmi_system_id *id)
+@@ -1542,7 +1542,7 @@ static int ec_correct_ecdt(const struct dmi_system_id *id)
  	return 0;
  }
  
@@ -40299,7 +40299,7 @@ index 290d6f5..888bde8 100644
  	{
  	ec_correct_ecdt, "Asus L4R", {
  	DMI_MATCH(DMI_BIOS_VERSION, "1008.006"),
-@@ -1613,7 +1613,7 @@ error:
+@@ -1619,7 +1619,7 @@ error:
  	return ret;
  }
  
@@ -40308,7 +40308,7 @@ index 290d6f5..888bde8 100644
  {
  	int result = 0;
  
-@@ -1631,7 +1631,7 @@ static int param_set_event_clearing(const char *val, struct kernel_param *kp)
+@@ -1637,7 +1637,7 @@ static int param_set_event_clearing(const char *val, struct kernel_param *kp)
  	return result;
  }
  
@@ -42613,7 +42613,7 @@ index 4d87499..1e2bcce 100644
  	device->rs_last_events =
  		(int)part_stat_read(&disk->part0, sectors[0]) +
 diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
-index 84708a5..95c0e55 100644
+index a1dcf12..3e2ccbd 100644
 --- a/drivers/block/floppy.c
 +++ b/drivers/block/floppy.c
 @@ -961,6 +961,10 @@ static void empty(void)
@@ -43318,7 +43318,7 @@ index d28922d..3c343d6 100644
  
  	if (cmd != SIOCWANDEV)
 diff --git a/drivers/char/random.c b/drivers/char/random.c
-index 87ab9f6..4ce0b84 100644
+index d72c6d1..b85990a 100644
 --- a/drivers/char/random.c
 +++ b/drivers/char/random.c
 @@ -290,9 +290,6 @@
@@ -43353,7 +43353,7 @@ index 87ab9f6..4ce0b84 100644
  			unsigned int add =
  				((pool_size - entropy_count)*anfrac*3) >> s;
  
-@@ -1231,7 +1228,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+@@ -1232,7 +1229,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
  
  		extract_buf(r, tmp);
  		i = min_t(int, nbytes, EXTRACT_SIZE);
@@ -43362,7 +43362,7 @@ index 87ab9f6..4ce0b84 100644
  			ret = -EFAULT;
  			break;
  		}
-@@ -1650,7 +1647,7 @@ static char sysctl_bootid[16];
+@@ -1655,7 +1652,7 @@ static char sysctl_bootid[16];
  static int proc_do_uuid(struct ctl_table *table, int write,
  			void __user *buffer, size_t *lenp, loff_t *ppos)
  {
@@ -43371,7 +43371,7 @@ index 87ab9f6..4ce0b84 100644
  	unsigned char buf[64], tmp_uuid[16], *uuid;
  
  	uuid = table->data;
-@@ -1680,7 +1677,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
+@@ -1685,7 +1682,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
  static int proc_do_entropy(struct ctl_table *table, int write,
  			   void __user *buffer, size_t *lenp, loff_t *ppos)
  {
@@ -43912,7 +43912,7 @@ index 3001634..911f7d1 100644
  }
  EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
-index 1fa1a32..0f3e23e 100644
+index 1b15917..14b4b83 100644
 --- a/drivers/cpufreq/intel_pstate.c
 +++ b/drivers/cpufreq/intel_pstate.c
 @@ -276,13 +276,13 @@ struct pstate_funcs {
@@ -44537,7 +44537,7 @@ index 93da1a4..5e2c149 100644
  				goto err_out;
  
 diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
-index 10c305b..4e92041 100644
+index 4e0f8e7..0eb9499 100644
 --- a/drivers/edac/edac_mc_sysfs.c
 +++ b/drivers/edac/edac_mc_sysfs.c
 @@ -50,7 +50,7 @@ int edac_mc_get_poll_msec(void)
@@ -45202,10 +45202,10 @@ index e055d5be..45982ec 100644
  	int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype);
  };
 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
-index 35a1248..fd2510a 100644
+index 1b4c069..4cb9768 100644
 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
-@@ -496,7 +496,7 @@ static int amdgpu_atpx_init(void)
+@@ -501,7 +501,7 @@ static int amdgpu_atpx_init(void)
   * look up whether we are the integrated or discrete GPU (all asics).
   * Returns the client id.
   */
@@ -45311,7 +45311,7 @@ index cf6f49f..dffb8ab 100644
  
  struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
-index cb07da4..e7cd288 100644
+index ff0b55a..c58880e 100644
 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
 @@ -701,7 +701,7 @@ static int amdgpu_connector_lvds_get_modes(struct drm_connector *connector)
@@ -45351,7 +45351,7 @@ index cb07da4..e7cd288 100644
  {
  	struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
-index 6e92008..6051afc 100644
+index b7f5650..1f1f11a 100644
 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
 @@ -1054,7 +1054,7 @@ static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
@@ -46963,10 +46963,10 @@ index 85c4deb..1b6fe7d 100644
  	return drm_pci_init(&driver, &i915_pci_driver);
  }
 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
-index bc3f2e6..f8e9150 100644
+index 227a63e..705363d 100644
 --- a/drivers/gpu/drm/i915/i915_drv.h
 +++ b/drivers/gpu/drm/i915/i915_drv.h
-@@ -2747,7 +2747,7 @@ struct drm_i915_cmd_table {
+@@ -2749,7 +2749,7 @@ struct drm_i915_cmd_table {
  #include "i915_trace.h"
  
  extern const struct drm_ioctl_desc i915_ioctls[];
@@ -47197,10 +47197,10 @@ index aab47f7..28aabbe 100644
  
  /**
 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index 3074c56..2f67a37 100644
+index 3289319..c42af93 100644
 --- a/drivers/gpu/drm/i915/intel_display.c
 +++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -15190,13 +15190,13 @@ struct intel_quirk {
+@@ -15182,13 +15182,13 @@ struct intel_quirk {
  	int subsystem_vendor;
  	int subsystem_device;
  	void (*hook)(struct drm_device *dev);
@@ -47217,7 +47217,7 @@ index 3074c56..2f67a37 100644
  
  static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
  {
-@@ -15204,18 +15204,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+@@ -15196,18 +15196,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
  	return 1;
  }
  
@@ -47248,7 +47248,7 @@ index 3074c56..2f67a37 100644
  		.hook = quirk_invert_brightness,
  	},
  };
-@@ -15298,7 +15300,7 @@ static void intel_init_quirks(struct drm_device *dev)
+@@ -15290,7 +15292,7 @@ static void intel_init_quirks(struct drm_device *dev)
  			q->hook(dev);
  	}
  	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
@@ -47467,7 +47467,7 @@ index c108408..575750a 100644
  			     struct drm_display_mode *mode)
  {
 diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
-index 11f8dd9..8efc333 100644
+index d6c134b..57fa370 100644
 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c
 +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
 @@ -82,9 +82,8 @@ MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1
@@ -47481,7 +47481,7 @@ index 11f8dd9..8efc333 100644
  
  static u64
  nouveau_pci_name(struct pci_dev *pdev)
-@@ -931,7 +930,7 @@ nouveau_driver_fops = {
+@@ -940,7 +939,7 @@ nouveau_driver_fops = {
  };
  
  static struct drm_driver
@@ -47490,7 +47490,7 @@ index 11f8dd9..8efc333 100644
  	.driver_features =
  		DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
  		DRIVER_KMS_LEGACY_CONTEXT,
-@@ -943,6 +942,8 @@ driver_stub = {
+@@ -952,6 +951,8 @@ driver_stub = {
  	.postclose = nouveau_drm_postclose,
  	.lastclose = nouveau_vga_lastclose,
  
@@ -47499,7 +47499,7 @@ index 11f8dd9..8efc333 100644
  #if defined(CONFIG_DEBUG_FS)
  	.debugfs_init = nouveau_drm_debugfs_init,
  	.debugfs_cleanup = nouveau_drm_debugfs_cleanup,
-@@ -1075,10 +1076,10 @@ err_free:
+@@ -1084,10 +1085,10 @@ err_free:
  static int __init
  nouveau_drm_init(void)
  {
@@ -48133,10 +48133,10 @@ index b928c17..e5d9400 100644
  	if (regcomp
  	    (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
 diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
-index 95f4fea..acecfdf 100644
+index 1b3f4e5..fb17055 100644
 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
 +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
-@@ -494,7 +494,7 @@ static int radeon_atpx_init(void)
+@@ -499,7 +499,7 @@ static int radeon_atpx_init(void)
   * look up whether we are the integrated or discrete GPU (all asics).
   * Returns the client id.
   */
@@ -48146,7 +48146,7 @@ index 95f4fea..acecfdf 100644
  	if (radeon_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev))
  		return VGA_SWITCHEROO_IGD;
 diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
-index 81a63d7..5c7f8e7 100644
+index b79f3b0..a1fd177 100644
 --- a/drivers/gpu/drm/radeon/radeon_connectors.c
 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
 @@ -857,7 +857,7 @@ static int radeon_lvds_get_modes(struct drm_connector *connector)
@@ -51340,7 +51340,7 @@ index cdc7df4..a2fdfdb 100644
  	.maxtype	= IFLA_IPOIB_MAX,
  	.policy		= ipoib_policy,
 diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
-index 4a41556..8fcf256 100644
+index 9a3b954..8cd0080 100644
 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c
 +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
 @@ -80,7 +80,7 @@ module_param(srpt_srq_size, int, 0444);
@@ -51363,7 +51363,7 @@ index 4a41556..8fcf256 100644
  	pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
  		 event->event, ch->cm_id, ch->sess_name, ch->state);
  
-@@ -1622,8 +1623,7 @@ retry:
+@@ -1623,8 +1624,7 @@ retry:
  	}
  
  	qp_init->qp_context = (void *)ch;
@@ -51547,10 +51547,10 @@ index 92e2243..8fd9092 100644
  		.ident = "Shift",
  		.matches = {
 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
-index 634f6363..cfe6f70 100644
+index 2511c8b..e069daf 100644
 --- a/drivers/iommu/amd_iommu.c
 +++ b/drivers/iommu/amd_iommu.c
-@@ -825,11 +825,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
+@@ -827,11 +827,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
  
  static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
  {
@@ -52005,7 +52005,7 @@ index 8c61399..a141537 100644
  	selftest_running = false;
  
 diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
-index a1ed1b7..3df0f22 100644
+index f5c90e1..90a737c 100644
 --- a/drivers/iommu/io-pgtable-arm.c
 +++ b/drivers/iommu/io-pgtable-arm.c
 @@ -39,9 +39,6 @@
@@ -55810,7 +55810,7 @@ index 43824d7..fb6a7b0 100644
  	pmd->bl_info.value_type.inc = data_block_inc;
  	pmd->bl_info.value_type.dec = data_block_dec;
 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
-index 1b2f962..e2efabd 100644
+index fd40bcb..13b3f293 100644
 --- a/drivers/md/dm.c
 +++ b/drivers/md/dm.c
 @@ -170,8 +170,8 @@ struct mapped_device {
@@ -59540,10 +59540,10 @@ index 3692dd5..b731a9b 100644
  	struct sm_sysfs_attribute *vendor_attribute;
  	char *vendor;
 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
-index ef36182..d5736e4 100644
+index 0680516..eb890f3 100644
 --- a/drivers/mtd/ubi/build.c
 +++ b/drivers/mtd/ubi/build.c
-@@ -1386,7 +1386,7 @@ static int __init bytes_str_to_int(const char *str)
+@@ -1389,7 +1389,7 @@ static int __init bytes_str_to_int(const char *str)
   * This function returns zero in case of success and a negative error code in
   * case of error.
   */
@@ -66288,10 +66288,10 @@ index 779bafc..6ec6ea1 100644
  		ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
  		break;
 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
-index f603d78..4a66baa 100644
+index d9f1394..ba6ee91 100644
 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
-@@ -2299,7 +2299,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
+@@ -2294,7 +2294,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
  	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
  
  	char buf[8];
@@ -66300,7 +66300,7 @@ index f603d78..4a66baa 100644
  	u32 reset_flag;
  
  	memset(buf, 0, sizeof(buf));
-@@ -2320,7 +2320,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
+@@ -2315,7 +2315,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
  {
  	struct iwl_trans *trans = file->private_data;
  	char buf[8];
@@ -70316,7 +70316,7 @@ index 0d17c92..ce5897e 100644
  
  	mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
 diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
-index db3958b..4d1f2ae 100644
+index fe0539e..247590f 100644
 --- a/drivers/remoteproc/remoteproc_core.c
 +++ b/drivers/remoteproc/remoteproc_core.c
 @@ -329,9 +329,10 @@ void rproc_free_vring(struct rproc_vring *rvring)
@@ -72882,7 +72882,7 @@ index b43f7ac..a635694 100644
  	lpfc_transport_template =
  				fc_attach_transport(&lpfc_transport_functions);
 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
-index 3bd0be6..f9b0725e 100644
+index c7e5695..ea80cbc 100644
 --- a/drivers/scsi/lpfc/lpfc_scsi.c
 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
 @@ -261,7 +261,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
@@ -77687,7 +77687,7 @@ index a260cde..604fce9 100644
  /* This is only available if kgdboc is a built in for early debugging */
  static int __init kgdboc_early_init(char *opt)
 diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
-index b7d80bd..50fc855 100644
+index 7d62610..0d0981b 100644
 --- a/drivers/tty/serial/msm_serial.c
 +++ b/drivers/tty/serial/msm_serial.c
 @@ -1561,7 +1561,7 @@ static struct uart_driver msm_uart_driver = {
@@ -77709,7 +77709,7 @@ index b7d80bd..50fc855 100644
  	if (unlikely(line < 0 || line >= UART_NR))
  		return -ENXIO;
 diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
-index 99bb231..ba80b07 100644
+index f0bd2ec..da28e78 100644
 --- a/drivers/tty/serial/samsung.c
 +++ b/drivers/tty/serial/samsung.c
 @@ -978,11 +978,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
@@ -97230,10 +97230,10 @@ index e0715fc..76857e9 100644
  		RB_CLEAR_NODE(&merge->rb_node);
  		em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
 diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
-index 2234e88..6f43c26 100644
+index b56887b..d032076 100644
 --- a/fs/btrfs/file.c
 +++ b/fs/btrfs/file.c
-@@ -1937,7 +1937,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+@@ -1935,7 +1935,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
  		return ret;
  
  	inode_lock(inode);
@@ -97242,7 +97242,7 @@ index 2234e88..6f43c26 100644
  	full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
  			     &BTRFS_I(inode)->runtime_flags);
  	/*
-@@ -1991,7 +1991,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+@@ -1989,7 +1989,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
  		inode_unlock(inode);
  		goto out;
  	}
@@ -97791,22 +97791,21 @@ index 2fcde1a..5986a27 100644
  
  #else
 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
-index 4ae7500..fef2d10 100644
+index 3f7c2cd..6014026 100644
 --- a/fs/cachefiles/namei.c
 +++ b/fs/cachefiles/namei.c
-@@ -273,9 +273,9 @@ void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
+@@ -275,8 +275,8 @@ void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
  	/* This object can now be culled, so we need to let the daemon know
  	 * that there is something it can remove if it needs to.
  	 */
--	atomic_long_add(d_backing_inode(object->dentry)->i_blocks,
-+	atomic_long_add_unchecked(d_backing_inode(object->dentry)->i_blocks,
- 			&cache->b_released);
+-	atomic_long_add(i_blocks, &cache->b_released);
 -	if (atomic_inc_return(&cache->f_released))
++	atomic_long_add_unchecked(i_blocks, &cache->b_released);
 +	if (atomic_inc_return_unchecked(&cache->f_released))
  		cachefiles_state_changed(cache);
  }
  
-@@ -334,7 +334,7 @@ try_again:
+@@ -335,7 +335,7 @@ try_again:
  	/* first step is to make up a grave dentry in the graveyard */
  	sprintf(nbuffer, "%08x%08x",
  		(uint32_t) get_seconds(),
@@ -97936,10 +97935,10 @@ index 788e191..ca209d6 100644
  					server->ops->print_stats(m, tcon);
  			}
 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
-index 5d841f3..761ed68 100644
+index 6bbec5e..fc3f5cd 100644
 --- a/fs/cifs/cifsfs.c
 +++ b/fs/cifs/cifsfs.c
-@@ -1143,7 +1143,7 @@ cifs_init_request_bufs(void)
+@@ -1155,7 +1155,7 @@ cifs_init_request_bufs(void)
  */
  	cifs_req_cachep = kmem_cache_create("cifs_request",
  					    CIFSMaxBufSize + max_hdr_size, 0,
@@ -97948,7 +97947,7 @@ index 5d841f3..761ed68 100644
  	if (cifs_req_cachep == NULL)
  		return -ENOMEM;
  
-@@ -1170,7 +1170,7 @@ cifs_init_request_bufs(void)
+@@ -1182,7 +1182,7 @@ cifs_init_request_bufs(void)
  	efficient to alloc 1 per page off the slab compared to 17K (5page)
  	alloc of large cifs buffers even when page debugging is on */
  	cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
@@ -97957,7 +97956,7 @@ index 5d841f3..761ed68 100644
  			NULL);
  	if (cifs_sm_req_cachep == NULL) {
  		mempool_destroy(cifs_req_poolp);
-@@ -1255,8 +1255,8 @@ init_cifs(void)
+@@ -1267,8 +1267,8 @@ init_cifs(void)
  	atomic_set(&bufAllocCount, 0);
  	atomic_set(&smBufAllocCount, 0);
  #ifdef CONFIG_CIFS_STATS2
@@ -98224,7 +98223,7 @@ index fc537c2..47d654c 100644
  }
  
 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
-index 3525ed7..ac8afb7 100644
+index 505e6d6..7ef9511 100644
 --- a/fs/cifs/smb2ops.c
 +++ b/fs/cifs/smb2ops.c
 @@ -427,8 +427,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
@@ -102458,7 +102457,7 @@ index 9ea4219..68587e2 100644
  	put_cpu_var(last_ino);
  	return res;
 diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
-index 7007809..8b69fb9 100644
+index 78313ad..3f7bce4 100644
 --- a/fs/jbd2/commit.c
 +++ b/fs/jbd2/commit.c
 @@ -1077,7 +1077,7 @@ restart_loop:
@@ -109966,7 +109965,7 @@ index de1ff1d..bd4c347 100644
  		.pc_ressize = sizeof(struct nfsd4_compoundres),
  		.pc_release = nfsd4_release_compoundargs,
 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
-index 70d0b9b..351c298 100644
+index 806eda1..125792c 100644
 --- a/fs/nfsd/nfs4state.c
 +++ b/fs/nfsd/nfs4state.c
 @@ -2362,8 +2362,9 @@ static bool client_has_state(struct nfs4_client *clp)
@@ -110101,7 +110100,7 @@ index 70d0b9b..351c298 100644
  	struct nfsd4_test_stateid_id *stateid;
  	struct nfs4_client *cl = cstate->session->se_client;
  
-@@ -4908,8 +4920,9 @@ nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+@@ -4934,8 +4946,9 @@ out:
  
  __be32
  nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -110112,7 +110111,7 @@ index 70d0b9b..351c298 100644
  	stateid_t *stateid = &free_stateid->fr_stateid;
  	struct nfs4_stid *s;
  	struct nfs4_delegation *dp;
-@@ -5047,8 +5060,9 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
+@@ -5063,8 +5076,9 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
  
  __be32
  nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -110123,7 +110122,7 @@ index 70d0b9b..351c298 100644
  	__be32 status;
  	struct nfs4_openowner *oo;
  	struct nfs4_ol_stateid *stp;
-@@ -5116,8 +5130,9 @@ static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_ac
+@@ -5132,8 +5146,9 @@ static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_ac
  __be32
  nfsd4_open_downgrade(struct svc_rqst *rqstp,
  		     struct nfsd4_compound_state *cstate,
@@ -110134,7 +110133,7 @@ index 70d0b9b..351c298 100644
  	__be32 status;
  	struct nfs4_ol_stateid *stp;
  	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
-@@ -5185,8 +5200,9 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
+@@ -5201,8 +5216,9 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
   */
  __be32
  nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -110145,7 +110144,7 @@ index 70d0b9b..351c298 100644
  	__be32 status;
  	struct nfs4_ol_stateid *stp;
  	struct net *net = SVC_NET(rqstp);
-@@ -5215,8 +5231,9 @@ out:
+@@ -5231,8 +5247,9 @@ out:
  
  __be32
  nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -110156,7 +110155,7 @@ index 70d0b9b..351c298 100644
  	struct nfs4_delegation *dp;
  	stateid_t *stateid = &dr->dr_stateid;
  	struct nfs4_stid *s;
-@@ -5550,8 +5567,9 @@ out:
+@@ -5583,8 +5600,9 @@ out:
   */
  __be32
  nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -110167,7 +110166,7 @@ index 70d0b9b..351c298 100644
  	struct nfs4_openowner *open_sop = NULL;
  	struct nfs4_lockowner *lock_sop = NULL;
  	struct nfs4_ol_stateid *lock_stp = NULL;
-@@ -5754,8 +5772,9 @@ static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct
+@@ -5785,8 +5803,9 @@ static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct
   */
  __be32
  nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -110178,7 +110177,7 @@ index 70d0b9b..351c298 100644
  	struct file_lock *file_lock = NULL;
  	struct nfs4_lockowner *lo = NULL;
  	__be32 status;
-@@ -5827,8 +5846,9 @@ out:
+@@ -5858,8 +5877,9 @@ out:
  
  __be32
  nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
@@ -110189,7 +110188,7 @@ index 70d0b9b..351c298 100644
  	struct nfs4_ol_stateid *stp;
  	struct file *filp = NULL;
  	struct file_lock *file_lock = NULL;
-@@ -5934,8 +5954,9 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
+@@ -5965,8 +5985,9 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
  __be32
  nfsd4_release_lockowner(struct svc_rqst *rqstp,
  			struct nfsd4_compound_state *cstate,
@@ -110200,7 +110199,7 @@ index 70d0b9b..351c298 100644
  	clientid_t *clid = &rlockowner->rl_clientid;
  	struct nfs4_stateowner *sop;
  	struct nfs4_lockowner *lo = NULL;
-@@ -6879,26 +6900,34 @@ clear_current_stateid(struct nfsd4_compound_state *cstate)
+@@ -6910,26 +6931,34 @@ clear_current_stateid(struct nfsd4_compound_state *cstate)
   * functions to set current state id
   */
  void
@@ -110239,7 +110238,7 @@ index 70d0b9b..351c298 100644
  	put_stateid(cstate, &lock->lk_resp_stateid);
  }
  
-@@ -6907,49 +6936,65 @@ nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lo
+@@ -6938,49 +6967,65 @@ nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lo
   */
  
  void
@@ -113383,7 +113382,7 @@ index d1cdc60..38f2608 100644
  	if (ovl_open_need_copy_up(file_flags, type, realpath.dentry)) {
  		err = ovl_want_write(dentry);
 diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
-index 9a7693d..ca30875 100644
+index 6db75cb..b2fe139 100644
 --- a/fs/overlayfs/super.c
 +++ b/fs/overlayfs/super.c
 @@ -196,7 +196,7 @@ void ovl_path_lower(struct dentry *dentry, struct path *path)
@@ -113395,7 +113394,7 @@ index 9a7693d..ca30875 100644
  }
  
  int ovl_want_write(struct dentry *dentry)
-@@ -952,8 +952,8 @@ static unsigned int ovl_split_lowerdirs(char *str)
+@@ -953,8 +953,8 @@ static unsigned int ovl_split_lowerdirs(char *str)
  
  static int ovl_fill_super(struct super_block *sb, void *data, int silent)
  {
@@ -137888,7 +137887,7 @@ index 92a7d85..1779570 100644
  		   const void *private_data,
  		   u8 private_data_len);
 diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
-index 7e440d4..2fc0a83 100644
+index e694f02..ea3d1e4 100644
 --- a/include/rdma/ib_verbs.h
 +++ b/include/rdma/ib_verbs.h
 @@ -1187,7 +1187,7 @@ struct ib_sge {
@@ -139353,19 +139352,10 @@ index 8d528f9..ec997a7 100644
  	.exit = audit_net_exit,
  	.id = &audit_net_id,
 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
-index 2672d10..63bd1ca 100644
+index b334128..579016e 100644
 --- a/kernel/auditsc.c
 +++ b/kernel/auditsc.c
-@@ -1023,7 +1023,7 @@ static int audit_log_single_execve_arg(struct audit_context *context,
- 	 * for strings that are too long, we should not have created
- 	 * any.
- 	 */
--	if (WARN_ON_ONCE(len < 0 || len > MAX_ARG_STRLEN - 1)) {
-+	if (WARN_ON_ONCE(len > MAX_ARG_STRLEN - 1)) {
- 		send_sig(SIGKILL, current, 0);
- 		return -1;
- 	}
-@@ -1954,7 +1954,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
+@@ -1950,7 +1950,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
  }
  
  /* global counter which is incremented every time something logs in */
@@ -139374,7 +139364,7 @@ index 2672d10..63bd1ca 100644
  
  static int audit_set_loginuid_perm(kuid_t loginuid)
  {
-@@ -2026,7 +2026,7 @@ int audit_set_loginuid(kuid_t loginuid)
+@@ -2022,7 +2022,7 @@ int audit_set_loginuid(kuid_t loginuid)
  
  	/* are we setting or clearing? */
  	if (uid_valid(loginuid))
@@ -139583,10 +139573,10 @@ index 45432b5..7d860f7 100644
 +}
 +EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
-index 75c0ff0..bf4be97 100644
+index e0be49f..d599a0e 100644
 --- a/kernel/cgroup.c
 +++ b/kernel/cgroup.c
-@@ -3634,7 +3634,7 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
+@@ -3632,7 +3632,7 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
  	key = &cft->lockdep_key;
  #endif
  	kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
@@ -139595,7 +139585,7 @@ index 75c0ff0..bf4be97 100644
  				  NULL, key);
  	if (IS_ERR(kn))
  		return PTR_ERR(kn);
-@@ -3738,11 +3738,14 @@ static void cgroup_exit_cftypes(struct cftype *cfts)
+@@ -3736,11 +3736,14 @@ static void cgroup_exit_cftypes(struct cftype *cfts)
  		/* free copy for custom atomic_write_len, see init_cftypes() */
  		if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
  			kfree(cft->kf_ops);
@@ -139613,7 +139603,7 @@ index 75c0ff0..bf4be97 100644
  	}
  }
  
-@@ -3773,8 +3776,10 @@ static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+@@ -3771,8 +3774,10 @@ static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
  			kf_ops->atomic_write_len = cft->max_write_len;
  		}
  
@@ -139626,7 +139616,7 @@ index 75c0ff0..bf4be97 100644
  	}
  
  	return 0;
-@@ -3787,7 +3792,7 @@ static int cgroup_rm_cftypes_locked(struct cftype *cfts)
+@@ -3785,7 +3790,7 @@ static int cgroup_rm_cftypes_locked(struct cftype *cfts)
  	if (!cfts || !cfts[0].ss)
  		return -ENOENT;
  
@@ -139635,7 +139625,7 @@ index 75c0ff0..bf4be97 100644
  	cgroup_apply_cftypes(cfts, false);
  	cgroup_exit_cftypes(cfts);
  	return 0;
-@@ -3844,7 +3849,7 @@ static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+@@ -3842,7 +3847,7 @@ static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
  
  	mutex_lock(&cgroup_mutex);
  
@@ -139644,7 +139634,7 @@ index 75c0ff0..bf4be97 100644
  	ret = cgroup_apply_cftypes(cfts, true);
  	if (ret)
  		cgroup_rm_cftypes_locked(cfts);
-@@ -3865,8 +3870,10 @@ int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+@@ -3863,8 +3868,10 @@ int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
  {
  	struct cftype *cft;
  
@@ -139656,7 +139646,7 @@ index 75c0ff0..bf4be97 100644
  	return cgroup_add_cftypes(ss, cfts);
  }
  
-@@ -3882,8 +3889,10 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+@@ -3880,8 +3887,10 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
  {
  	struct cftype *cft;
  
@@ -139668,7 +139658,7 @@ index 75c0ff0..bf4be97 100644
  	return cgroup_add_cftypes(ss, cfts);
  }
  
-@@ -6048,6 +6057,9 @@ static void cgroup_release_agent(struct work_struct *work)
+@@ -6049,6 +6058,9 @@ static void cgroup_release_agent(struct work_struct *work)
  	if (!pathbuf || !agentbuf)
  		goto out;
  
@@ -139678,7 +139668,7 @@ index 75c0ff0..bf4be97 100644
  	spin_lock_irq(&css_set_lock);
  	path = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
  	spin_unlock_irq(&css_set_lock);
-@@ -6467,7 +6479,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
+@@ -6465,7 +6477,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
  		struct task_struct *task;
  		int count = 0;
  
@@ -141658,7 +141648,7 @@ index a0f61ef..b6aef3c 100644
  		seq_printf(m, "%40s %14lu %29s %pS\n",
  			   name, stats->contending_point[i],
 diff --git a/kernel/module.c b/kernel/module.c
-index 5f71aa6..cae0865 100644
+index 6458a2f..ebdeb641 100644
 --- a/kernel/module.c
 +++ b/kernel/module.c
 @@ -60,6 +60,7 @@
@@ -142212,7 +142202,7 @@ index 5f71aa6..cae0865 100644
  }
  #else
  static inline void layout_symtab(struct module *mod, struct load_info *info)
-@@ -2888,7 +2976,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
+@@ -2893,7 +2981,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
  	mod = (void *)info->sechdrs[info->index.mod].sh_addr;
  
  	if (info->index.sym == 0) {
@@ -142228,7 +142218,7 @@ index 5f71aa6..cae0865 100644
  		return ERR_PTR(-ENOEXEC);
  	}
  
-@@ -2904,8 +3000,16 @@ static struct module *setup_load_info(struct load_info *info, int flags)
+@@ -2909,8 +3005,16 @@ static struct module *setup_load_info(struct load_info *info, int flags)
  static int check_modinfo(struct module *mod, struct load_info *info, int flags)
  {
  	const char *modmagic = get_modinfo(info, "vermagic");
@@ -142245,7 +142235,7 @@ index 5f71aa6..cae0865 100644
  	if (flags & MODULE_INIT_IGNORE_VERMAGIC)
  		modmagic = NULL;
  
-@@ -2934,7 +3038,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
+@@ -2939,7 +3043,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
  		return err;
  
  	/* Set up license info based on the info section */
@@ -142254,7 +142244,7 @@ index 5f71aa6..cae0865 100644
  
  	return 0;
  }
-@@ -3031,7 +3135,7 @@ static int move_module(struct module *mod, struct load_info *info)
+@@ -3036,7 +3140,7 @@ static int move_module(struct module *mod, struct load_info *info)
  	void *ptr;
  
  	/* Do the allocs. */
@@ -142263,7 +142253,7 @@ index 5f71aa6..cae0865 100644
  	/*
  	 * The pointer to this block is stored in the module structure
  	 * which is inside the block. Just mark it as not being a
-@@ -3041,11 +3145,11 @@ static int move_module(struct module *mod, struct load_info *info)
+@@ -3046,11 +3150,11 @@ static int move_module(struct module *mod, struct load_info *info)
  	if (!ptr)
  		return -ENOMEM;
  
@@ -142279,7 +142269,7 @@ index 5f71aa6..cae0865 100644
  		/*
  		 * The pointer to this block is stored in the module structure
  		 * which is inside the block. This block doesn't need to be
-@@ -3054,13 +3158,45 @@ static int move_module(struct module *mod, struct load_info *info)
+@@ -3059,13 +3163,45 @@ static int move_module(struct module *mod, struct load_info *info)
  		 */
  		kmemleak_ignore(ptr);
  		if (!ptr) {
@@ -142329,7 +142319,7 @@ index 5f71aa6..cae0865 100644
  
  	/* Transfer each section which specifies SHF_ALLOC */
  	pr_debug("final section addresses:\n");
-@@ -3071,16 +3207,45 @@ static int move_module(struct module *mod, struct load_info *info)
+@@ -3076,16 +3212,45 @@ static int move_module(struct module *mod, struct load_info *info)
  		if (!(shdr->sh_flags & SHF_ALLOC))
  			continue;
  
@@ -142382,7 +142372,7 @@ index 5f71aa6..cae0865 100644
  		pr_debug("\t0x%lx %s\n",
  			 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
  	}
-@@ -3137,12 +3302,12 @@ static void flush_module_icache(const struct module *mod)
+@@ -3142,12 +3307,12 @@ static void flush_module_icache(const struct module *mod)
  	 * Do it before processing of module parameters, so the module
  	 * can provide parameter accessor functions of its own.
  	 */
@@ -142401,7 +142391,7 @@ index 5f71aa6..cae0865 100644
  
  	set_fs(old_fs);
  }
-@@ -3200,8 +3365,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
+@@ -3205,8 +3370,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
  {
  	percpu_modfree(mod);
  	module_arch_freeing_init(mod);
@@ -142414,7 +142404,7 @@ index 5f71aa6..cae0865 100644
  }
  
  int __weak module_finalize(const Elf_Ehdr *hdr,
-@@ -3214,7 +3381,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
+@@ -3219,7 +3386,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
  static int post_relocation(struct module *mod, const struct load_info *info)
  {
  	/* Sort exception table now relocations are done. */
@@ -142424,7 +142414,7 @@ index 5f71aa6..cae0865 100644
  
  	/* Copy relocated percpu area over. */
  	percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
-@@ -3262,13 +3431,15 @@ static void do_mod_ctors(struct module *mod)
+@@ -3267,13 +3436,15 @@ static void do_mod_ctors(struct module *mod)
  /* For freeing module_init on success, in case kallsyms traversing */
  struct mod_initfree {
  	struct rcu_head rcu;
@@ -142442,7 +142432,7 @@ index 5f71aa6..cae0865 100644
  	kfree(m);
  }
  
-@@ -3288,7 +3459,8 @@ static noinline int do_init_module(struct module *mod)
+@@ -3293,7 +3464,8 @@ static noinline int do_init_module(struct module *mod)
  		ret = -ENOMEM;
  		goto fail;
  	}
@@ -142452,7 +142442,7 @@ index 5f71aa6..cae0865 100644
  
  	/*
  	 * We want to find out whether @mod uses async during init.  Clear
-@@ -3347,10 +3519,10 @@ static noinline int do_init_module(struct module *mod)
+@@ -3352,10 +3524,10 @@ static noinline int do_init_module(struct module *mod)
  	mod_tree_remove_init(mod);
  	disable_ro_nx(&mod->init_layout);
  	module_arch_freeing_init(mod);
@@ -142467,7 +142457,7 @@ index 5f71aa6..cae0865 100644
  	/*
  	 * We want to free module_init, but be aware that kallsyms may be
  	 * walking this with preempt disabled.  In all the failure paths, we
-@@ -3550,9 +3722,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
+@@ -3555,9 +3727,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
  	if (err)
  		goto free_unload;
  
@@ -142506,7 +142496,7 @@ index 5f71aa6..cae0865 100644
  	/* Fix up syms, so that st_value is a pointer to location. */
  	err = simplify_symbols(mod, info);
  	if (err < 0)
-@@ -3568,13 +3769,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
+@@ -3573,13 +3774,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
  
  	flush_module_icache(mod);
  
@@ -142520,7 +142510,7 @@ index 5f71aa6..cae0865 100644
  	dynamic_debug_setup(info->debug, info->num_debug);
  
  	/* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
-@@ -3639,11 +3833,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
+@@ -3644,11 +3838,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
   ddebug_cleanup:
  	dynamic_debug_remove(info->debug);
  	synchronize_sched();
@@ -142533,7 +142523,7 @@ index 5f71aa6..cae0865 100644
   free_unload:
  	module_unload_free(mod);
   unlink_mod:
-@@ -3663,7 +3856,8 @@ static int load_module(struct load_info *info, const char __user *uargs,
+@@ -3668,7 +3861,8 @@ static int load_module(struct load_info *info, const char __user *uargs,
  	 */
  	ftrace_release_mod(mod);
  	/* Free lock-classes; relies on the preceding sync_rcu() */
@@ -142543,7 +142533,7 @@ index 5f71aa6..cae0865 100644
  
  	module_deallocate(mod, info);
   free_copy:
-@@ -3751,10 +3945,16 @@ static const char *get_ksymbol(struct module *mod,
+@@ -3756,10 +3950,16 @@ static const char *get_ksymbol(struct module *mod,
  	struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
  
  	/* At worse, next value is at end of module */
@@ -142563,7 +142553,7 @@ index 5f71aa6..cae0865 100644
  
  	/* Scan for closest preceding symbol, and next symbol. (ELF
  	   starts real symbols at 1). */
-@@ -4007,7 +4207,7 @@ static int m_show(struct seq_file *m, void *p)
+@@ -4012,7 +4212,7 @@ static int m_show(struct seq_file *m, void *p)
  		return 0;
  
  	seq_printf(m, "%s %u",
@@ -142572,7 +142562,7 @@ index 5f71aa6..cae0865 100644
  	print_unload_info(m, mod);
  
  	/* Informative for users. */
-@@ -4016,7 +4216,7 @@ static int m_show(struct seq_file *m, void *p)
+@@ -4021,7 +4221,7 @@ static int m_show(struct seq_file *m, void *p)
  		   mod->state == MODULE_STATE_COMING ? "Loading" :
  		   "Live");
  	/* Used by oprofile and other similar tools. */
@@ -142581,7 +142571,7 @@ index 5f71aa6..cae0865 100644
  
  	/* Taints info */
  	if (mod->taints)
-@@ -4052,7 +4252,17 @@ static const struct file_operations proc_modules_operations = {
+@@ -4057,7 +4257,17 @@ static const struct file_operations proc_modules_operations = {
  
  static int __init proc_modules_init(void)
  {
@@ -142599,7 +142589,7 @@ index 5f71aa6..cae0865 100644
  	return 0;
  }
  module_init(proc_modules_init);
-@@ -4113,7 +4323,8 @@ struct module *__module_address(unsigned long addr)
+@@ -4118,7 +4328,8 @@ struct module *__module_address(unsigned long addr)
  {
  	struct module *mod;
  
@@ -142609,7 +142599,7 @@ index 5f71aa6..cae0865 100644
  		return NULL;
  
  	module_assert_mutex_or_preempt();
-@@ -4156,11 +4367,21 @@ bool is_module_text_address(unsigned long addr)
+@@ -4161,11 +4372,21 @@ bool is_module_text_address(unsigned long addr)
   */
  struct module *__module_text_address(unsigned long addr)
  {
@@ -147557,7 +147547,7 @@ index 22f4cd9..402f316 100644
  	depends on !KMEMCHECK
  	select PAGE_EXTENSION
 diff --git a/mm/backing-dev.c b/mm/backing-dev.c
-index ed173b8..df5f896 100644
+index 9269911..f53557a 100644
 --- a/mm/backing-dev.c
 +++ b/mm/backing-dev.c
 @@ -12,7 +12,7 @@
@@ -147569,7 +147559,7 @@ index ed173b8..df5f896 100644
  
  struct backing_dev_info noop_backing_dev_info = {
  	.name		= "noop",
-@@ -879,7 +879,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
+@@ -898,7 +898,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
  		return err;
  
  	err = bdi_register(bdi, NULL, "%.28s-%ld", name,
@@ -147718,7 +147708,7 @@ index 50b4ca6..cf64608 100644
  	pkmap_count[last_pkmap_nr] = 1;
  	set_page_address(page, (void *)vaddr);
 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
-index addfe4ac..bc76631 100644
+index d9ec1a5..70615e1 100644
 --- a/mm/hugetlb.c
 +++ b/mm/hugetlb.c
 @@ -38,7 +38,72 @@ int hugepages_treat_as_movable;
@@ -147795,7 +147785,7 @@ index addfe4ac..bc76631 100644
  /*
   * Minimum page order among possible hugepage sizes, set to a proper value
   * at boot time.
-@@ -2800,6 +2865,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
+@@ -2804,6 +2869,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
  			 struct ctl_table *table, int write,
  			 void __user *buffer, size_t *length, loff_t *ppos)
  {
@@ -147803,7 +147793,7 @@ index addfe4ac..bc76631 100644
  	struct hstate *h = &default_hstate;
  	unsigned long tmp = h->max_huge_pages;
  	int ret;
-@@ -2807,9 +2873,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
+@@ -2811,9 +2877,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
  	if (!hugepages_supported())
  		return -EOPNOTSUPP;
  
@@ -147817,7 +147807,7 @@ index addfe4ac..bc76631 100644
  	if (ret)
  		goto out;
  
-@@ -2844,6 +2911,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
+@@ -2848,6 +2915,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
  	struct hstate *h = &default_hstate;
  	unsigned long tmp;
  	int ret;
@@ -147825,7 +147815,7 @@ index addfe4ac..bc76631 100644
  
  	if (!hugepages_supported())
  		return -EOPNOTSUPP;
-@@ -2853,9 +2921,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
+@@ -2857,9 +2925,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
  	if (write && hstate_is_gigantic(h))
  		return -EINVAL;
  
@@ -147839,7 +147829,7 @@ index addfe4ac..bc76631 100644
  	if (ret)
  		goto out;
  
-@@ -3361,6 +3430,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3365,6 +3434,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
  	i_mmap_unlock_write(mapping);
  }
  
@@ -147867,7 +147857,7 @@ index addfe4ac..bc76631 100644
  /*
   * Hugetlb_cow() should be called with page lock of the original hugepage held.
   * Called with hugetlb_instantiation_mutex held and pte_page locked so we
-@@ -3474,6 +3564,11 @@ retry_avoidcopy:
+@@ -3478,6 +3568,11 @@ retry_avoidcopy:
  				make_huge_pte(vma, new_page, 1));
  		page_remove_rmap(old_page, true);
  		hugepage_add_new_anon_rmap(new_page, vma, address);
@@ -147879,7 +147869,7 @@ index addfe4ac..bc76631 100644
  		/* Make the old page be freed below */
  		new_page = old_page;
  	}
-@@ -3647,6 +3742,10 @@ retry:
+@@ -3651,6 +3746,10 @@ retry:
  				&& (vma->vm_flags & VM_SHARED)));
  	set_huge_pte_at(mm, address, ptep, new_pte);
  
@@ -147890,7 +147880,7 @@ index addfe4ac..bc76631 100644
  	hugetlb_count_add(pages_per_huge_page(h), mm);
  	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
  		/* Optimization, do the COW without a second fault */
-@@ -3715,6 +3814,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3719,6 +3818,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  	struct address_space *mapping;
  	int need_wait_lock = 0;
  
@@ -147901,7 +147891,7 @@ index addfe4ac..bc76631 100644
  	address &= huge_page_mask(h);
  
  	ptep = huge_pte_offset(mm, address);
-@@ -3732,6 +3835,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3736,6 +3839,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  			return VM_FAULT_OOM;
  	}
  
@@ -153892,7 +153882,7 @@ index eb4f5f2..2cc4c50 100644
  			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
  			    rfc.mode != chan->mode)
 diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
-index 388ee8b..9edb2bc 100644
+index 1842141..1562f36 100644
 --- a/net/bluetooth/l2cap_sock.c
 +++ b/net/bluetooth/l2cap_sock.c
 @@ -633,7 +633,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
@@ -185457,10 +185447,10 @@ index 0000000..00c7430
 +}
 diff --git a/scripts/gcc-plugins/size_overflow_plugin/size_overflow_hash.data b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_hash.data
 new file mode 100644
-index 0000000..d64cc64
+index 0000000..9d7e744
 --- /dev/null
 +++ b/scripts/gcc-plugins/size_overflow_plugin/size_overflow_hash.data
-@@ -0,0 +1,22254 @@
+@@ -0,0 +1,22256 @@
 +enable_so_recv_ctrl_pipe_us_data_0 recv_ctrl_pipe us_data 0 0 NULL
 +enable_so___earlyonly_bootmem_alloc_fndecl_3 __earlyonly_bootmem_alloc fndecl 2-3-4 3 NULL
 +enable_so_v9fs_xattr_get_acl_fndecl_4 v9fs_xattr_get_acl fndecl 5 4 NULL
@@ -198490,7 +198480,8 @@ index 0000000..d64cc64
 +enable_so__osd_req_cdb_len_fndecl_38259 _osd_req_cdb_len fndecl 0 38259 NULL
 +enable_so_security_set_fndecl_38262 security_set fndecl 5 38262 NULL
 +enable_so_i2c2_debugfs_read_fndecl_38270 i2c2_debugfs_read fndecl 3 38270 NULL
-+enable_so_prepare_header100_fndecl_38275 prepare_header100 fndecl 3-0 38275 NULL
++enable_so_prepare_header100_fndecl_38275 prepare_header100 fndecl 3-0 38275 NULL nohasharray
++enable_so_max_read_sge_ib_qp_38275 max_read_sge ib_qp 0 38275 &enable_so_prepare_header100_fndecl_38275
 +enable_so_min_pnp_port_38276 min pnp_port 0 38276 NULL
 +enable_so_numpmkid_host_if_pmkid_attr_38277 numpmkid host_if_pmkid_attr 0 38277 NULL nohasharray
 +enable_so_irnet_connect_confirm_fndecl_38277 irnet_connect_confirm fndecl 5 38277 &enable_so_numpmkid_host_if_pmkid_attr_38277
@@ -202841,6 +202832,7 @@ index 0000000..d64cc64
 +enable_so_new_read_fndecl_51123 new_read fndecl 2 51123 NULL
 +enable_so_adau1373_set_pll_fndecl_51130 adau1373_set_pll fndecl 4-5 51130 NULL
 +enable_so_igb_pci_enable_sriov_fndecl_51131 igb_pci_enable_sriov fndecl 2 51131 NULL
++enable_so_max_write_sge_ib_qp_51132 max_write_sge ib_qp 0 51132 NULL
 +enable_so_wqe_cnt_mlx4_ib_wq_51135 wqe_cnt mlx4_ib_wq 0 51135 NULL
 +enable_so_cachelsz_ath_common_51138 cachelsz ath_common 0 51138 NULL
 +enable_so_length_sky2_status_le_51139 length sky2_status_le 0 51139 NULL
@@ -216037,7 +216029,7 @@ index 0a578fe..b81f62d 100644
  })
  
 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
-index 48bd520..88f2088 100644
+index dd25346..cf22106 100644
 --- a/virt/kvm/kvm_main.c
 +++ b/virt/kvm/kvm_main.c
 @@ -93,12 +93,17 @@ LIST_HEAD(vm_list);
@@ -216060,7 +216052,7 @@ index 48bd520..88f2088 100644
  
  struct dentry *kvm_debugfs_dir;
  EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
-@@ -902,7 +907,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
+@@ -904,7 +909,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
  	/* We can read the guest memory with __xxx_user() later on. */
  	if ((id < KVM_USER_MEM_SLOTS) &&
  	    ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
@@ -216069,7 +216061,7 @@ index 48bd520..88f2088 100644
  			(void __user *)(unsigned long)mem->userspace_addr,
  			mem->memory_size)))
  		goto out;
-@@ -1964,9 +1969,17 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
+@@ -1966,9 +1971,17 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
  
  int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
  {
@@ -216089,7 +216081,7 @@ index 48bd520..88f2088 100644
  }
  EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
  
-@@ -2317,7 +2330,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
+@@ -2319,7 +2332,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
  	return 0;
  }
  
@@ -216098,7 +216090,7 @@ index 48bd520..88f2088 100644
  	.release        = kvm_vcpu_release,
  	.unlocked_ioctl = kvm_vcpu_ioctl,
  #ifdef CONFIG_KVM_COMPAT
-@@ -3035,7 +3048,7 @@ out:
+@@ -3037,7 +3050,7 @@ out:
  }
  #endif
  
@@ -216107,7 +216099,7 @@ index 48bd520..88f2088 100644
  	.release        = kvm_vm_release,
  	.unlocked_ioctl = kvm_vm_ioctl,
  #ifdef CONFIG_KVM_COMPAT
-@@ -3113,7 +3126,7 @@ out:
+@@ -3115,7 +3128,7 @@ out:
  	return r;
  }
  
@@ -216116,7 +216108,7 @@ index 48bd520..88f2088 100644
  	.unlocked_ioctl = kvm_dev_ioctl,
  	.compat_ioctl   = kvm_dev_ioctl,
  	.llseek		= noop_llseek,
-@@ -3139,7 +3152,7 @@ static void hardware_enable_nolock(void *junk)
+@@ -3141,7 +3154,7 @@ static void hardware_enable_nolock(void *junk)
  
  	if (r) {
  		cpumask_clear_cpu(cpu, cpus_hardware_enabled);
@@ -216125,7 +216117,7 @@ index 48bd520..88f2088 100644
  		pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu);
  	}
  }
-@@ -3194,10 +3207,10 @@ static int hardware_enable_all(void)
+@@ -3196,10 +3209,10 @@ static int hardware_enable_all(void)
  
  	kvm_usage_count++;
  	if (kvm_usage_count == 1) {
@@ -216138,7 +216130,7 @@ index 48bd520..88f2088 100644
  			hardware_disable_all_nolock();
  			r = -EBUSY;
  		}
-@@ -3752,7 +3765,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+@@ -3754,7 +3767,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
  	if (!vcpu_align)
  		vcpu_align = __alignof__(struct kvm_vcpu);
  	kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
@@ -216147,7 +216139,7 @@ index 48bd520..88f2088 100644
  	if (!kvm_vcpu_cache) {
  		r = -ENOMEM;
  		goto out_free_3;
-@@ -3762,9 +3775,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+@@ -3764,9 +3777,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
  	if (r)
  		goto out_free;
  
@@ -216159,7 +216151,7 @@ index 48bd520..88f2088 100644
  
  	r = misc_register(&kvm_dev);
  	if (r) {
-@@ -3774,9 +3789,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+@@ -3776,9 +3791,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
  
  	register_syscore_ops(&kvm_syscore_ops);
  

diff --git a/4.7.1/4425_grsec_remove_EI_PAX.patch b/4.7.2/4425_grsec_remove_EI_PAX.patch
similarity index 100%
rename from 4.7.1/4425_grsec_remove_EI_PAX.patch
rename to 4.7.2/4425_grsec_remove_EI_PAX.patch

diff --git a/4.7.1/4427_force_XATTR_PAX_tmpfs.patch b/4.7.2/4427_force_XATTR_PAX_tmpfs.patch
similarity index 100%
rename from 4.7.1/4427_force_XATTR_PAX_tmpfs.patch
rename to 4.7.2/4427_force_XATTR_PAX_tmpfs.patch

diff --git a/4.7.1/4430_grsec-remove-localversion-grsec.patch b/4.7.2/4430_grsec-remove-localversion-grsec.patch
similarity index 100%
rename from 4.7.1/4430_grsec-remove-localversion-grsec.patch
rename to 4.7.2/4430_grsec-remove-localversion-grsec.patch

diff --git a/4.7.1/4435_grsec-mute-warnings.patch b/4.7.2/4435_grsec-mute-warnings.patch
similarity index 100%
rename from 4.7.1/4435_grsec-mute-warnings.patch
rename to 4.7.2/4435_grsec-mute-warnings.patch

diff --git a/4.7.1/4440_grsec-remove-protected-paths.patch b/4.7.2/4440_grsec-remove-protected-paths.patch
similarity index 100%
rename from 4.7.1/4440_grsec-remove-protected-paths.patch
rename to 4.7.2/4440_grsec-remove-protected-paths.patch

diff --git a/4.7.1/4450_grsec-kconfig-default-gids.patch b/4.7.2/4450_grsec-kconfig-default-gids.patch
similarity index 100%
rename from 4.7.1/4450_grsec-kconfig-default-gids.patch
rename to 4.7.2/4450_grsec-kconfig-default-gids.patch

diff --git a/4.7.1/4465_selinux-avc_audit-log-curr_ip.patch b/4.7.2/4465_selinux-avc_audit-log-curr_ip.patch
similarity index 100%
rename from 4.7.1/4465_selinux-avc_audit-log-curr_ip.patch
rename to 4.7.2/4465_selinux-avc_audit-log-curr_ip.patch

diff --git a/4.7.1/4470_disable-compat_vdso.patch b/4.7.2/4470_disable-compat_vdso.patch
similarity index 100%
rename from 4.7.1/4470_disable-compat_vdso.patch
rename to 4.7.2/4470_disable-compat_vdso.patch

diff --git a/4.7.1/4475_emutramp_default_on.patch b/4.7.2/4475_emutramp_default_on.patch
similarity index 100%
rename from 4.7.1/4475_emutramp_default_on.patch
rename to 4.7.2/4475_emutramp_default_on.patch


^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2016-08-22 10:17 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-08-22 10:17 [gentoo-commits] proj/hardened-patchset:master commit in: 4.7.1/, 4.7.2/ Anthony G. Basile

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox