From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <gentoo-commits+bounces-1008257-garchives=archives.gentoo.org@lists.gentoo.org>
Received: from lists.gentoo.org (pigeon.gentoo.org [208.92.234.80])
	(using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
	(No client certificate requested)
	by finch.gentoo.org (Postfix) with ESMTPS id 713051382C5
	for <garchives@archives.gentoo.org>; Fri,  9 Mar 2018 16:38:23 +0000 (UTC)
Received: from pigeon.gentoo.org (localhost [127.0.0.1])
	by pigeon.gentoo.org (Postfix) with SMTP id 9F4F4E0969;
	Fri,  9 Mar 2018 16:38:22 +0000 (UTC)
Received: from smtp.gentoo.org (smtp.gentoo.org [IPv6:2001:470:ea4a:1:5054:ff:fec7:86e4])
	(using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits))
	(No client certificate requested)
	by pigeon.gentoo.org (Postfix) with ESMTPS id 60DFEE0969
	for <gentoo-commits@lists.gentoo.org>; Fri,  9 Mar 2018 16:38:22 +0000 (UTC)
Received: from oystercatcher.gentoo.org (unknown [IPv6:2a01:4f8:202:4333:225:90ff:fed9:fc84])
	(using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits))
	(No client certificate requested)
	by smtp.gentoo.org (Postfix) with ESMTPS id C634A335C0A
	for <gentoo-commits@lists.gentoo.org>; Fri,  9 Mar 2018 16:38:20 +0000 (UTC)
Received: from localhost.localdomain (localhost [IPv6:::1])
	by oystercatcher.gentoo.org (Postfix) with ESMTP id 8F8031ED
	for <gentoo-commits@lists.gentoo.org>; Fri,  9 Mar 2018 16:38:19 +0000 (UTC)
From: "Alice Ferrazzi" <alicef@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Content-Transfer-Encoding: 8bit
Content-type: text/plain; charset=UTF-8
Reply-To: gentoo-dev@lists.gentoo.org, "Alice Ferrazzi" <alicef@gentoo.org>
Message-ID: <1520613479.278b0fa2dd63fef732577b7d90ad5629e92b2596.alicef@gentoo>
Subject: [gentoo-commits] proj/linux-patches:4.15 commit in: /
X-VCS-Repository: proj/linux-patches
X-VCS-Files: 0000_README 1007_linux-4.15.8.patch
X-VCS-Directories: /
X-VCS-Committer: alicef
X-VCS-Committer-Name: Alice Ferrazzi
X-VCS-Revision: 278b0fa2dd63fef732577b7d90ad5629e92b2596
X-VCS-Branch: 4.15
Date: Fri,  9 Mar 2018 16:38:19 +0000 (UTC)
Precedence: bulk
List-Post: <mailto:gentoo-commits@lists.gentoo.org>
List-Help: <mailto:gentoo-commits+help@lists.gentoo.org>
List-Unsubscribe: <mailto:gentoo-commits+unsubscribe@lists.gentoo.org>
List-Subscribe: <mailto:gentoo-commits+subscribe@lists.gentoo.org>
List-Id: Gentoo Linux mail <gentoo-commits.gentoo.org>
X-BeenThere: gentoo-commits@lists.gentoo.org
X-Archives-Salt: e26db482-8135-4a88-a6c8-24f42dbb5914
X-Archives-Hash: 77a5f80b780d3d3c1efa02fa947d233e

commit:     278b0fa2dd63fef732577b7d90ad5629e92b2596
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Fri Mar  9 16:37:59 2018 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Fri Mar  9 16:37:59 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=278b0fa2

linux kernel 4.15.8

 0000_README             |    4 +
 1007_linux-4.15.8.patch | 4922 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4926 insertions(+)

diff --git a/0000_README b/0000_README
index 994c735..bd1cdaf 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch:  1006_linux-4.15.7.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.15.7
 
+Patch:  1007_linux-4.15.8.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.15.8
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1007_linux-4.15.8.patch b/1007_linux-4.15.8.patch
new file mode 100644
index 0000000..27ea8d6
--- /dev/null
+++ b/1007_linux-4.15.8.patch
@@ -0,0 +1,4922 @@
+diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
+index 46c7e1085efc..e269541a7d10 100644
+--- a/Documentation/networking/ip-sysctl.txt
++++ b/Documentation/networking/ip-sysctl.txt
+@@ -508,7 +508,7 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
+ 	min: Minimal size of receive buffer used by TCP sockets.
+ 	It is guaranteed to each TCP socket, even under moderate memory
+ 	pressure.
+-	Default: 1 page
++	Default: 4K
+ 
+ 	default: initial size of receive buffer used by TCP sockets.
+ 	This value overrides net.core.rmem_default used by other protocols.
+@@ -666,7 +666,7 @@ tcp_window_scaling - BOOLEAN
+ tcp_wmem - vector of 3 INTEGERs: min, default, max
+ 	min: Amount of memory reserved for send buffers for TCP sockets.
+ 	Each TCP socket has rights to use it due to fact of its birth.
+-	Default: 1 page
++	Default: 4K
+ 
+ 	default: initial size of send buffer used by TCP sockets.  This
+ 	value overrides net.core.wmem_default used by other protocols.
+diff --git a/Makefile b/Makefile
+index 49f524444050..eb18d200a603 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 15
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
+index 29cb804d10cc..06cce72508a2 100644
+--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
++++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
+@@ -98,6 +98,8 @@
+ };
+ 
+ &i2c1 {
++	pinctrl-names = "default";
++	pinctrl-0 = <&i2c1_pins>;
+ 	clock-frequency = <2600000>;
+ 
+ 	twl: twl@48 {
+@@ -216,7 +218,12 @@
+ 		>;
+ 	};
+ 
+-
++	i2c1_pins: pinmux_i2c1_pins {
++		pinctrl-single,pins = <
++			OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0)        /* i2c1_scl.i2c1_scl */
++			OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0)        /* i2c1_sda.i2c1_sda */
++		>;
++	};
+ };
+ 
+ &omap3_pmx_wkup {
+diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+index 6d89736c7b44..cf22b35f0a28 100644
+--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
++++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+@@ -104,6 +104,8 @@
+ };
+ 
+ &i2c1 {
++	pinctrl-names = "default";
++	pinctrl-0 = <&i2c1_pins>;
+ 	clock-frequency = <2600000>;
+ 
+ 	twl: twl@48 {
+@@ -211,6 +213,12 @@
+ 			OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT | MUX_MODE0)	/* hsusb0_data7.hsusb0_data7 */
+ 		>;
+ 	};
++	i2c1_pins: pinmux_i2c1_pins {
++		pinctrl-single,pins = <
++			OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0)        /* i2c1_scl.i2c1_scl */
++			OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0)        /* i2c1_sda.i2c1_sda */
++		>;
++	};
+ };
+ 
+ &uart2 {
+diff --git a/arch/arm/boot/dts/rk3288-phycore-som.dtsi b/arch/arm/boot/dts/rk3288-phycore-som.dtsi
+index 99cfae875e12..5eae4776ffde 100644
+--- a/arch/arm/boot/dts/rk3288-phycore-som.dtsi
++++ b/arch/arm/boot/dts/rk3288-phycore-som.dtsi
+@@ -110,26 +110,6 @@
+ 	};
+ };
+ 
+-&cpu0 {
+-	cpu0-supply = <&vdd_cpu>;
+-	operating-points = <
+-		/* KHz    uV */
+-		1800000	1400000
+-		1608000	1350000
+-		1512000 1300000
+-		1416000 1200000
+-		1200000 1100000
+-		1008000 1050000
+-		 816000 1000000
+-		 696000  950000
+-		 600000  900000
+-		 408000  900000
+-		 312000  900000
+-		 216000  900000
+-		 126000  900000
+-	>;
+-};
+-
+ &emmc {
+ 	status = "okay";
+ 	bus-width = <8>;
+diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
+index 5638ce0c9524..63d6b404d88e 100644
+--- a/arch/arm/kvm/hyp/Makefile
++++ b/arch/arm/kvm/hyp/Makefile
+@@ -7,6 +7,8 @@ ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
+ 
+ KVM=../../../../virt/kvm
+ 
++CFLAGS_ARMV7VE		   :=$(call cc-option, -march=armv7ve)
++
+ obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
+ obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
+ obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
+@@ -15,7 +17,10 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
+ obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
+ obj-$(CONFIG_KVM_ARM_HOST) += vfp.o
+ obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o
++CFLAGS_banked-sr.o	   += $(CFLAGS_ARMV7VE)
++
+ obj-$(CONFIG_KVM_ARM_HOST) += entry.o
+ obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
+ obj-$(CONFIG_KVM_ARM_HOST) += switch.o
++CFLAGS_switch.o		   += $(CFLAGS_ARMV7VE)
+ obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
+diff --git a/arch/arm/kvm/hyp/banked-sr.c b/arch/arm/kvm/hyp/banked-sr.c
+index 111bda8cdebd..be4b8b0a40ad 100644
+--- a/arch/arm/kvm/hyp/banked-sr.c
++++ b/arch/arm/kvm/hyp/banked-sr.c
+@@ -20,6 +20,10 @@
+ 
+ #include <asm/kvm_hyp.h>
+ 
++/*
++ * gcc before 4.9 doesn't understand -march=armv7ve, so we have to
++ * trick the assembler.
++ */
+ __asm__(".arch_extension     virt");
+ 
+ void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt)
+diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
+index 9b49867154bf..63fa79f9f121 100644
+--- a/arch/arm/mach-mvebu/Kconfig
++++ b/arch/arm/mach-mvebu/Kconfig
+@@ -42,7 +42,7 @@ config MACH_ARMADA_375
+ 	depends on ARCH_MULTI_V7
+ 	select ARMADA_370_XP_IRQ
+ 	select ARM_ERRATA_720789
+-	select ARM_ERRATA_753970
++	select PL310_ERRATA_753970
+ 	select ARM_GIC
+ 	select ARMADA_375_CLK
+ 	select HAVE_ARM_SCU
+@@ -58,7 +58,7 @@ config MACH_ARMADA_38X
+ 	bool "Marvell Armada 380/385 boards"
+ 	depends on ARCH_MULTI_V7
+ 	select ARM_ERRATA_720789
+-	select ARM_ERRATA_753970
++	select PL310_ERRATA_753970
+ 	select ARM_GIC
+ 	select ARM_GLOBAL_TIMER
+ 	select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
+diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
+index aff6994950ba..a2399fd66e97 100644
+--- a/arch/arm/plat-orion/common.c
++++ b/arch/arm/plat-orion/common.c
+@@ -472,28 +472,27 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
+ /*****************************************************************************
+  * Ethernet switch
+  ****************************************************************************/
+-static __initconst const char *orion_ge00_mvmdio_bus_name = "orion-mii";
+-static __initdata struct mdio_board_info
+-		  orion_ge00_switch_board_info;
++static __initdata struct mdio_board_info orion_ge00_switch_board_info = {
++	.bus_id   = "orion-mii",
++	.modalias = "mv88e6085",
++};
+ 
+ void __init orion_ge00_switch_init(struct dsa_chip_data *d)
+ {
+-	struct mdio_board_info *bd;
+ 	unsigned int i;
+ 
+ 	if (!IS_BUILTIN(CONFIG_PHYLIB))
+ 		return;
+ 
+-	for (i = 0; i < ARRAY_SIZE(d->port_names); i++)
+-		if (!strcmp(d->port_names[i], "cpu"))
++	for (i = 0; i < ARRAY_SIZE(d->port_names); i++) {
++		if (!strcmp(d->port_names[i], "cpu")) {
++			d->netdev[i] = &orion_ge00.dev;
+ 			break;
++		}
++	}
+ 
+-	bd = &orion_ge00_switch_board_info;
+-	bd->bus_id = orion_ge00_mvmdio_bus_name;
+-	bd->mdio_addr = d->sw_addr;
+-	d->netdev[i] = &orion_ge00.dev;
+-	strcpy(bd->modalias, "mv88e6085");
+-	bd->platform_data = d;
++	orion_ge00_switch_board_info.mdio_addr = d->sw_addr;
++	orion_ge00_switch_board_info.platform_data = d;
+ 
+ 	mdiobus_register_board_info(&orion_ge00_switch_board_info, 1);
+ }
+diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
+index 3742508cc534..bd5ce31936f5 100644
+--- a/arch/parisc/include/asm/cacheflush.h
++++ b/arch/parisc/include/asm/cacheflush.h
+@@ -26,6 +26,7 @@ void flush_user_icache_range_asm(unsigned long, unsigned long);
+ void flush_kernel_icache_range_asm(unsigned long, unsigned long);
+ void flush_user_dcache_range_asm(unsigned long, unsigned long);
+ void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
++void purge_kernel_dcache_range_asm(unsigned long, unsigned long);
+ void flush_kernel_dcache_page_asm(void *);
+ void flush_kernel_icache_page(void *);
+ 
+diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
+index 0e6ab6e4a4e9..2dbe5580a1a4 100644
+--- a/arch/parisc/include/asm/processor.h
++++ b/arch/parisc/include/asm/processor.h
+@@ -316,6 +316,8 @@ extern int _parisc_requires_coherency;
+ #define parisc_requires_coherency()	(0)
+ #endif
+ 
++extern int running_on_qemu;
++
+ #endif /* __ASSEMBLY__ */
+ 
+ #endif /* __ASM_PARISC_PROCESSOR_H */
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
+index 19c0c141bc3f..79089778725b 100644
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -465,10 +465,10 @@ EXPORT_SYMBOL(copy_user_page);
+ int __flush_tlb_range(unsigned long sid, unsigned long start,
+ 		      unsigned long end)
+ {
+-	unsigned long flags, size;
++	unsigned long flags;
+ 
+-	size = (end - start);
+-	if (size >= parisc_tlb_flush_threshold) {
++	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
++	    end - start >= parisc_tlb_flush_threshold) {
+ 		flush_tlb_all();
+ 		return 1;
+ 	}
+@@ -539,13 +539,11 @@ void flush_cache_mm(struct mm_struct *mm)
+ 	struct vm_area_struct *vma;
+ 	pgd_t *pgd;
+ 
+-	/* Flush the TLB to avoid speculation if coherency is required. */
+-	if (parisc_requires_coherency())
+-		flush_tlb_all();
+-
+ 	/* Flushing the whole cache on each cpu takes forever on
+ 	   rp3440, etc.  So, avoid it if the mm isn't too big.  */
+-	if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
++	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
++	    mm_total_size(mm) >= parisc_cache_flush_threshold) {
++		flush_tlb_all();
+ 		flush_cache_all();
+ 		return;
+ 	}
+@@ -553,9 +551,9 @@ void flush_cache_mm(struct mm_struct *mm)
+ 	if (mm->context == mfsp(3)) {
+ 		for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ 			flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
+-			if ((vma->vm_flags & VM_EXEC) == 0)
+-				continue;
+-			flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
++			if (vma->vm_flags & VM_EXEC)
++				flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
++			flush_tlb_range(vma, vma->vm_start, vma->vm_end);
+ 		}
+ 		return;
+ 	}
+@@ -581,14 +579,9 @@ void flush_cache_mm(struct mm_struct *mm)
+ void flush_cache_range(struct vm_area_struct *vma,
+ 		unsigned long start, unsigned long end)
+ {
+-	BUG_ON(!vma->vm_mm->context);
+-
+-	/* Flush the TLB to avoid speculation if coherency is required. */
+-	if (parisc_requires_coherency())
++	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
++	    end - start >= parisc_cache_flush_threshold) {
+ 		flush_tlb_range(vma, start, end);
+-
+-	if ((end - start) >= parisc_cache_flush_threshold
+-	    || vma->vm_mm->context != mfsp(3)) {
+ 		flush_cache_all();
+ 		return;
+ 	}
+@@ -596,6 +589,7 @@ void flush_cache_range(struct vm_area_struct *vma,
+ 	flush_user_dcache_range_asm(start, end);
+ 	if (vma->vm_flags & VM_EXEC)
+ 		flush_user_icache_range_asm(start, end);
++	flush_tlb_range(vma, start, end);
+ }
+ 
+ void
+@@ -604,8 +598,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
+ 	BUG_ON(!vma->vm_mm->context);
+ 
+ 	if (pfn_valid(pfn)) {
+-		if (parisc_requires_coherency())
+-			flush_tlb_page(vma, vmaddr);
++		flush_tlb_page(vma, vmaddr);
+ 		__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+ 	}
+ }
+@@ -613,21 +606,33 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
+ void flush_kernel_vmap_range(void *vaddr, int size)
+ {
+ 	unsigned long start = (unsigned long)vaddr;
++	unsigned long end = start + size;
+ 
+-	if ((unsigned long)size > parisc_cache_flush_threshold)
++	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
++	    (unsigned long)size >= parisc_cache_flush_threshold) {
++		flush_tlb_kernel_range(start, end);
+ 		flush_data_cache();
+-	else
+-		flush_kernel_dcache_range_asm(start, start + size);
++		return;
++	}
++
++	flush_kernel_dcache_range_asm(start, end);
++	flush_tlb_kernel_range(start, end);
+ }
+ EXPORT_SYMBOL(flush_kernel_vmap_range);
+ 
+ void invalidate_kernel_vmap_range(void *vaddr, int size)
+ {
+ 	unsigned long start = (unsigned long)vaddr;
++	unsigned long end = start + size;
+ 
+-	if ((unsigned long)size > parisc_cache_flush_threshold)
++	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
++	    (unsigned long)size >= parisc_cache_flush_threshold) {
++		flush_tlb_kernel_range(start, end);
+ 		flush_data_cache();
+-	else
+-		flush_kernel_dcache_range_asm(start, start + size);
++		return;
++	}
++
++	purge_kernel_dcache_range_asm(start, end);
++	flush_tlb_kernel_range(start, end);
+ }
+ EXPORT_SYMBOL(invalidate_kernel_vmap_range);
+diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
+index 2d40c4ff3f69..67b0f7532e83 100644
+--- a/arch/parisc/kernel/pacache.S
++++ b/arch/parisc/kernel/pacache.S
+@@ -1110,6 +1110,28 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
+ 	.procend
+ ENDPROC_CFI(flush_kernel_dcache_range_asm)
+ 
++ENTRY_CFI(purge_kernel_dcache_range_asm)
++	.proc
++	.callinfo NO_CALLS
++	.entry
++
++	ldil		L%dcache_stride, %r1
++	ldw		R%dcache_stride(%r1), %r23
++	ldo		-1(%r23), %r21
++	ANDCM		%r26, %r21, %r26
++
++1:      cmpb,COND(<<),n	%r26, %r25,1b
++	pdc,m		%r23(%r26)
++
++	sync
++	syncdma
++	bv		%r0(%r2)
++	nop
++	.exit
++
++	.procend
++ENDPROC_CFI(purge_kernel_dcache_range_asm)
++
+ ENTRY_CFI(flush_user_icache_range_asm)
+ 	.proc
+ 	.callinfo NO_CALLS
+diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
+index 4b8fd6dc22da..f7e684560186 100644
+--- a/arch/parisc/kernel/time.c
++++ b/arch/parisc/kernel/time.c
+@@ -76,10 +76,10 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
+ 	next_tick = cpuinfo->it_value;
+ 
+ 	/* Calculate how many ticks have elapsed. */
++	now = mfctl(16);
+ 	do {
+ 		++ticks_elapsed;
+ 		next_tick += cpt;
+-		now = mfctl(16);
+ 	} while (next_tick - now > cpt);
+ 
+ 	/* Store (in CR16 cycles) up to when we are accounting right now. */
+@@ -103,16 +103,17 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
+ 	 * if one or the other wrapped. If "now" is "bigger" we'll end up
+ 	 * with a very large unsigned number.
+ 	 */
+-	while (next_tick - mfctl(16) > cpt)
++	now = mfctl(16);
++	while (next_tick - now > cpt)
+ 		next_tick += cpt;
+ 
+ 	/* Program the IT when to deliver the next interrupt.
+ 	 * Only bottom 32-bits of next_tick are writable in CR16!
+ 	 * Timer interrupt will be delivered at least a few hundred cycles
+-	 * after the IT fires, so if we are too close (<= 500 cycles) to the
++	 * after the IT fires, so if we are too close (<= 8000 cycles) to the
+ 	 * next cycle, simply skip it.
+ 	 */
+-	if (next_tick - mfctl(16) <= 500)
++	if (next_tick - now <= 8000)
+ 		next_tick += cpt;
+ 	mtctl(next_tick, 16);
+ 
+@@ -248,7 +249,7 @@ static int __init init_cr16_clocksource(void)
+ 	 * different sockets, so mark them unstable and lower rating on
+ 	 * multi-socket SMP systems.
+ 	 */
+-	if (num_online_cpus() > 1) {
++	if (num_online_cpus() > 1 && !running_on_qemu) {
+ 		int cpu;
+ 		unsigned long cpu0_loc;
+ 		cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
+diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
+index 48f41399fc0b..cab32ee824d2 100644
+--- a/arch/parisc/mm/init.c
++++ b/arch/parisc/mm/init.c
+@@ -629,7 +629,12 @@ void __init mem_init(void)
+ #endif
+ 
+ 	mem_init_print_info(NULL);
+-#ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
++
++#if 0
++	/*
++	 * Do not expose the virtual kernel memory layout to userspace.
++	 * But keep code for debugging purposes.
++	 */
+ 	printk("virtual kernel memory layout:\n"
+ 	       "    vmalloc : 0x%px - 0x%px   (%4ld MB)\n"
+ 	       "    memory  : 0x%px - 0x%px   (%4ld MB)\n"
+diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
+index 17ae5c15a9e0..804ba030d859 100644
+--- a/arch/powerpc/mm/pgtable-radix.c
++++ b/arch/powerpc/mm/pgtable-radix.c
+@@ -21,6 +21,7 @@
+ 
+ #include <asm/pgtable.h>
+ #include <asm/pgalloc.h>
++#include <asm/mmu_context.h>
+ #include <asm/dma.h>
+ #include <asm/machdep.h>
+ #include <asm/mmu.h>
+@@ -334,6 +335,22 @@ static void __init radix_init_pgtable(void)
+ 		     "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
+ 	asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+ 	trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1);
++
++	/*
++	 * The init_mm context is given the first available (non-zero) PID,
++	 * which is the "guard PID" and contains no page table. PIDR should
++	 * never be set to zero because that duplicates the kernel address
++	 * space at the 0x0... offset (quadrant 0)!
++	 *
++	 * An arbitrary PID that may later be allocated by the PID allocator
++	 * for userspace processes must not be used either, because that
++	 * would cause stale user mappings for that PID on CPUs outside of
++	 * the TLB invalidation scheme (because it won't be in mm_cpumask).
++	 *
++	 * So permanently carve out one PID for the purpose of a guard PID.
++	 */
++	init_mm.context.id = mmu_base_pid;
++	mmu_base_pid++;
+ }
+ 
+ static void __init radix_init_partition_table(void)
+@@ -580,6 +597,8 @@ void __init radix__early_init_mmu(void)
+ 
+ 	radix_init_iamr();
+ 	radix_init_pgtable();
++	/* Switch to the guard PID before turning on MMU */
++	radix__switch_mmu_context(NULL, &init_mm);
+ }
+ 
+ void radix__early_init_mmu_secondary(void)
+@@ -601,6 +620,7 @@ void radix__early_init_mmu_secondary(void)
+ 		radix_init_amor();
+ 	}
+ 	radix_init_iamr();
++	radix__switch_mmu_context(NULL, &init_mm);
+ }
+ 
+ void radix__mmu_cleanup_all(void)
+diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
+index 81d8614e7379..5e1ef9150182 100644
+--- a/arch/powerpc/platforms/pseries/ras.c
++++ b/arch/powerpc/platforms/pseries/ras.c
+@@ -48,6 +48,28 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id);
+ static irqreturn_t ras_error_interrupt(int irq, void *dev_id);
+ 
+ 
++/*
++ * Enable the hotplug interrupt late because processing them may touch other
++ * devices or systems (e.g. hugepages) that have not been initialized at the
++ * subsys stage.
++ */
++int __init init_ras_hotplug_IRQ(void)
++{
++	struct device_node *np;
++
++	/* Hotplug Events */
++	np = of_find_node_by_path("/event-sources/hot-plug-events");
++	if (np != NULL) {
++		if (dlpar_workqueue_init() == 0)
++			request_event_sources_irqs(np, ras_hotplug_interrupt,
++						   "RAS_HOTPLUG");
++		of_node_put(np);
++	}
++
++	return 0;
++}
++machine_late_initcall(pseries, init_ras_hotplug_IRQ);
++
+ /*
+  * Initialize handlers for the set of interrupts caused by hardware errors
+  * and power system events.
+@@ -66,15 +88,6 @@ static int __init init_ras_IRQ(void)
+ 		of_node_put(np);
+ 	}
+ 
+-	/* Hotplug Events */
+-	np = of_find_node_by_path("/event-sources/hot-plug-events");
+-	if (np != NULL) {
+-		if (dlpar_workqueue_init() == 0)
+-			request_event_sources_irqs(np, ras_hotplug_interrupt,
+-					   "RAS_HOTPLUG");
+-		of_node_put(np);
+-	}
+-
+ 	/* EPOW Events */
+ 	np = of_find_node_by_path("/event-sources/epow-events");
+ 	if (np != NULL) {
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index 024ad8bcc516..5b8089b0d3ee 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -170,8 +170,15 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
+ 
+ static int ckc_irq_pending(struct kvm_vcpu *vcpu)
+ {
+-	if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm))
++	const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
++	const u64 ckc = vcpu->arch.sie_block->ckc;
++
++	if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
++		if ((s64)ckc >= (s64)now)
++			return 0;
++	} else if (ckc >= now) {
+ 		return 0;
++	}
+ 	return ckc_interrupts_enabled(vcpu);
+ }
+ 
+@@ -1011,13 +1018,19 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+ 
+ static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
+ {
+-	u64 now, cputm, sltime = 0;
++	const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
++	const u64 ckc = vcpu->arch.sie_block->ckc;
++	u64 cputm, sltime = 0;
+ 
+ 	if (ckc_interrupts_enabled(vcpu)) {
+-		now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
+-		sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
+-		/* already expired or overflow? */
+-		if (!sltime || vcpu->arch.sie_block->ckc <= now)
++		if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
++			if ((s64)now < (s64)ckc)
++				sltime = tod_to_ns((s64)ckc - (s64)now);
++		} else if (now < ckc) {
++			sltime = tod_to_ns(ckc - now);
++		}
++		/* already expired */
++		if (!sltime)
+ 			return 0;
+ 		if (cpu_timer_interrupts_enabled(vcpu)) {
+ 			cputm = kvm_s390_get_cpu_timer(vcpu);
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 1371dff2b90d..5c03e371b7b8 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -166,6 +166,28 @@ int kvm_arch_hardware_enable(void)
+ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
+ 			      unsigned long end);
+ 
++static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
++{
++	u8 delta_idx = 0;
++
++	/*
++	 * The TOD jumps by delta, we have to compensate this by adding
++	 * -delta to the epoch.
++	 */
++	delta = -delta;
++
++	/* sign-extension - we're adding to signed values below */
++	if ((s64)delta < 0)
++		delta_idx = -1;
++
++	scb->epoch += delta;
++	if (scb->ecd & ECD_MEF) {
++		scb->epdx += delta_idx;
++		if (scb->epoch < delta)
++			scb->epdx += 1;
++	}
++}
++
+ /*
+  * This callback is executed during stop_machine(). All CPUs are therefore
+  * temporarily stopped. In order not to change guest behavior, we have to
+@@ -181,13 +203,17 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
+ 	unsigned long long *delta = v;
+ 
+ 	list_for_each_entry(kvm, &vm_list, vm_list) {
+-		kvm->arch.epoch -= *delta;
+ 		kvm_for_each_vcpu(i, vcpu, kvm) {
+-			vcpu->arch.sie_block->epoch -= *delta;
++			kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
++			if (i == 0) {
++				kvm->arch.epoch = vcpu->arch.sie_block->epoch;
++				kvm->arch.epdx = vcpu->arch.sie_block->epdx;
++			}
+ 			if (vcpu->arch.cputm_enabled)
+ 				vcpu->arch.cputm_start += *delta;
+ 			if (vcpu->arch.vsie_block)
+-				vcpu->arch.vsie_block->epoch -= *delta;
++				kvm_clock_sync_scb(vcpu->arch.vsie_block,
++						   *delta);
+ 		}
+ 	}
+ 	return NOTIFY_OK;
+@@ -889,12 +915,9 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
+ 	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
+ 		return -EFAULT;
+ 
+-	if (test_kvm_facility(kvm, 139))
+-		kvm_s390_set_tod_clock_ext(kvm, &gtod);
+-	else if (gtod.epoch_idx == 0)
+-		kvm_s390_set_tod_clock(kvm, gtod.tod);
+-	else
++	if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
+ 		return -EINVAL;
++	kvm_s390_set_tod_clock(kvm, &gtod);
+ 
+ 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
+ 		gtod.epoch_idx, gtod.tod);
+@@ -919,13 +942,14 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
+ 
+ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
+ {
+-	u64 gtod;
++	struct kvm_s390_vm_tod_clock gtod = { 0 };
+ 
+-	if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
++	if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
++			   sizeof(gtod.tod)))
+ 		return -EFAULT;
+ 
+-	kvm_s390_set_tod_clock(kvm, gtod);
+-	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
++	kvm_s390_set_tod_clock(kvm, &gtod);
++	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
+ 	return 0;
+ }
+ 
+@@ -2361,6 +2385,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+ 	mutex_lock(&vcpu->kvm->lock);
+ 	preempt_disable();
+ 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
++	vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
+ 	preempt_enable();
+ 	mutex_unlock(&vcpu->kvm->lock);
+ 	if (!kvm_is_ucontrol(vcpu->kvm)) {
+@@ -2947,8 +2972,8 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
+ 	return 0;
+ }
+ 
+-void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
+-				 const struct kvm_s390_vm_tod_clock *gtod)
++void kvm_s390_set_tod_clock(struct kvm *kvm,
++			    const struct kvm_s390_vm_tod_clock *gtod)
+ {
+ 	struct kvm_vcpu *vcpu;
+ 	struct kvm_s390_tod_clock_ext htod;
+@@ -2960,10 +2985,12 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
+ 	get_tod_clock_ext((char *)&htod);
+ 
+ 	kvm->arch.epoch = gtod->tod - htod.tod;
+-	kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
+-
+-	if (kvm->arch.epoch > gtod->tod)
+-		kvm->arch.epdx -= 1;
++	kvm->arch.epdx = 0;
++	if (test_kvm_facility(kvm, 139)) {
++		kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
++		if (kvm->arch.epoch > gtod->tod)
++			kvm->arch.epdx -= 1;
++	}
+ 
+ 	kvm_s390_vcpu_block_all(kvm);
+ 	kvm_for_each_vcpu(i, vcpu, kvm) {
+@@ -2976,22 +3003,6 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
+ 	mutex_unlock(&kvm->lock);
+ }
+ 
+-void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
+-{
+-	struct kvm_vcpu *vcpu;
+-	int i;
+-
+-	mutex_lock(&kvm->lock);
+-	preempt_disable();
+-	kvm->arch.epoch = tod - get_tod_clock();
+-	kvm_s390_vcpu_block_all(kvm);
+-	kvm_for_each_vcpu(i, vcpu, kvm)
+-		vcpu->arch.sie_block->epoch = kvm->arch.epoch;
+-	kvm_s390_vcpu_unblock_all(kvm);
+-	preempt_enable();
+-	mutex_unlock(&kvm->lock);
+-}
+-
+ /**
+  * kvm_arch_fault_in_page - fault-in guest page if necessary
+  * @vcpu: The corresponding virtual cpu
+diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
+index 5e46ba429bcb..efa186f065fb 100644
+--- a/arch/s390/kvm/kvm-s390.h
++++ b/arch/s390/kvm/kvm-s390.h
+@@ -268,9 +268,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
+ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
+ 
+ /* implemented in kvm-s390.c */
+-void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
+-				 const struct kvm_s390_vm_tod_clock *gtod);
+-void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod);
++void kvm_s390_set_tod_clock(struct kvm *kvm,
++			    const struct kvm_s390_vm_tod_clock *gtod);
+ long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
+ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
+ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
+diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
+index 0714bfa56da0..23bebdbbf490 100644
+--- a/arch/s390/kvm/priv.c
++++ b/arch/s390/kvm/priv.c
+@@ -81,9 +81,10 @@ int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
+ /* Handle SCK (SET CLOCK) interception */
+ static int handle_set_clock(struct kvm_vcpu *vcpu)
+ {
++	struct kvm_s390_vm_tod_clock gtod = { 0 };
+ 	int rc;
+ 	u8 ar;
+-	u64 op2, val;
++	u64 op2;
+ 
+ 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+ 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+@@ -91,12 +92,12 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
+ 	op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
+ 	if (op2 & 7)	/* Operand must be on a doubleword boundary */
+ 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+-	rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
++	rc = read_guest(vcpu, op2, ar, &gtod.tod, sizeof(gtod.tod));
+ 	if (rc)
+ 		return kvm_s390_inject_prog_cond(vcpu, rc);
+ 
+-	VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
+-	kvm_s390_set_tod_clock(vcpu->kvm, val);
++	VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
++	kvm_s390_set_tod_clock(vcpu->kvm, &gtod);
+ 
+ 	kvm_s390_set_psw_cc(vcpu, 0);
+ 	return 0;
+diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
+index e42b8943cb1a..cd0dba7a2293 100644
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -350,14 +350,14 @@ static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
+ {
+ 	pmdval_t v = native_pmd_val(pmd);
+ 
+-	return __pmd(v | set);
++	return native_make_pmd(v | set);
+ }
+ 
+ static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
+ {
+ 	pmdval_t v = native_pmd_val(pmd);
+ 
+-	return __pmd(v & ~clear);
++	return native_make_pmd(v & ~clear);
+ }
+ 
+ static inline pmd_t pmd_mkold(pmd_t pmd)
+@@ -409,14 +409,14 @@ static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
+ {
+ 	pudval_t v = native_pud_val(pud);
+ 
+-	return __pud(v | set);
++	return native_make_pud(v | set);
+ }
+ 
+ static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
+ {
+ 	pudval_t v = native_pud_val(pud);
+ 
+-	return __pud(v & ~clear);
++	return native_make_pud(v & ~clear);
+ }
+ 
+ static inline pud_t pud_mkold(pud_t pud)
+diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
+index e55466760ff8..b3ec519e3982 100644
+--- a/arch/x86/include/asm/pgtable_32.h
++++ b/arch/x86/include/asm/pgtable_32.h
+@@ -32,6 +32,7 @@ extern pmd_t initial_pg_pmd[];
+ static inline void pgtable_cache_init(void) { }
+ static inline void check_pgt_cache(void) { }
+ void paging_init(void);
++void sync_initial_page_table(void);
+ 
+ /*
+  * Define this if things work differently on an i386 and an i486:
+diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
+index 81462e9a34f6..1149d2112b2e 100644
+--- a/arch/x86/include/asm/pgtable_64.h
++++ b/arch/x86/include/asm/pgtable_64.h
+@@ -28,6 +28,7 @@ extern pgd_t init_top_pgt[];
+ #define swapper_pg_dir init_top_pgt
+ 
+ extern void paging_init(void);
++static inline void sync_initial_page_table(void) { }
+ 
+ #define pte_ERROR(e)					\
+ 	pr_err("%s:%d: bad pte %p(%016lx)\n",		\
+diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
+index 3696398a9475..246f15b4e64c 100644
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -323,6 +323,11 @@ static inline pudval_t native_pud_val(pud_t pud)
+ #else
+ #include <asm-generic/pgtable-nopud.h>
+ 
++static inline pud_t native_make_pud(pudval_t val)
++{
++	return (pud_t) { .p4d.pgd = native_make_pgd(val) };
++}
++
+ static inline pudval_t native_pud_val(pud_t pud)
+ {
+ 	return native_pgd_val(pud.p4d.pgd);
+@@ -344,6 +349,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
+ #else
+ #include <asm-generic/pgtable-nopmd.h>
+ 
++static inline pmd_t native_make_pmd(pmdval_t val)
++{
++	return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) };
++}
++
+ static inline pmdval_t native_pmd_val(pmd_t pmd)
+ {
+ 	return native_pgd_val(pmd.pud.p4d.pgd);
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index 68d7ab81c62f..1fbe6b9fff37 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -1205,20 +1205,13 @@ void __init setup_arch(char **cmdline_p)
+ 
+ 	kasan_init();
+ 
+-#ifdef CONFIG_X86_32
+-	/* sync back kernel address range */
+-	clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
+-			swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
+-			KERNEL_PGD_PTRS);
+-
+ 	/*
+-	 * sync back low identity map too.  It is used for example
+-	 * in the 32-bit EFI stub.
++	 * Sync back kernel address range.
++	 *
++	 * FIXME: Can the later sync in setup_cpu_entry_areas() replace
++	 * this call?
+ 	 */
+-	clone_pgd_range(initial_page_table,
+-			swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
+-			min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
+-#endif
++	sync_initial_page_table();
+ 
+ 	tboot_probe();
+ 
+diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
+index 497aa766fab3..ea554f812ee1 100644
+--- a/arch/x86/kernel/setup_percpu.c
++++ b/arch/x86/kernel/setup_percpu.c
+@@ -287,24 +287,15 @@ void __init setup_per_cpu_areas(void)
+ 	/* Setup cpu initialized, callin, callout masks */
+ 	setup_cpu_local_masks();
+ 
+-#ifdef CONFIG_X86_32
+ 	/*
+ 	 * Sync back kernel address range again.  We already did this in
+ 	 * setup_arch(), but percpu data also needs to be available in
+ 	 * the smpboot asm.  We can't reliably pick up percpu mappings
+ 	 * using vmalloc_fault(), because exception dispatch needs
+ 	 * percpu data.
++	 *
++	 * FIXME: Can the later sync in setup_cpu_entry_areas() replace
++	 * this call?
+ 	 */
+-	clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
+-			swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
+-			KERNEL_PGD_PTRS);
+-
+-	/*
+-	 * sync back low identity map too.  It is used for example
+-	 * in the 32-bit EFI stub.
+-	 */
+-	clone_pgd_range(initial_page_table,
+-			swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
+-			min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
+-#endif
++	sync_initial_page_table();
+ }
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index e2c1fb8d35ce..dbb8b476b41b 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1993,14 +1993,13 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
+ 
+ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
+ {
+-	struct kvm_lapic *apic;
++	struct kvm_lapic *apic = vcpu->arch.apic;
+ 	int i;
+ 
+-	apic_debug("%s\n", __func__);
++	if (!apic)
++		return;
+ 
+-	ASSERT(vcpu);
+-	apic = vcpu->arch.apic;
+-	ASSERT(apic != NULL);
++	apic_debug("%s\n", __func__);
+ 
+ 	/* Stop the timer in case it's a reset to an active apic */
+ 	hrtimer_cancel(&apic->lapic_timer.timer);
+@@ -2156,7 +2155,6 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
+ 	 */
+ 	vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
+ 	static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
+-	kvm_lapic_reset(vcpu, false);
+ 	kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
+ 
+ 	return 0;
+@@ -2560,7 +2558,6 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
+ 
+ 	pe = xchg(&apic->pending_events, 0);
+ 	if (test_bit(KVM_APIC_INIT, &pe)) {
+-		kvm_lapic_reset(vcpu, true);
+ 		kvm_vcpu_reset(vcpu, true);
+ 		if (kvm_vcpu_is_bsp(apic->vcpu))
+ 			vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index cc83bdcb65d1..e080dbe55360 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -3017,7 +3017,7 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
+ 		return RET_PF_RETRY;
+ 	}
+ 
+-	return -EFAULT;
++	return RET_PF_EMULATE;
+ }
+ 
+ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 4e3c79530526..3505afabce5d 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -45,6 +45,7 @@
+ #include <asm/debugreg.h>
+ #include <asm/kvm_para.h>
+ #include <asm/irq_remapping.h>
++#include <asm/microcode.h>
+ #include <asm/nospec-branch.h>
+ 
+ #include <asm/virtext.h>
+@@ -5029,7 +5030,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
+ 	 * being speculatively taken.
+ 	 */
+ 	if (svm->spec_ctrl)
+-		wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
++		native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+ 
+ 	asm volatile (
+ 		"push %%" _ASM_BP "; \n\t"
+@@ -5138,11 +5139,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
+ 	 * If the L02 MSR bitmap does not intercept the MSR, then we need to
+ 	 * save it.
+ 	 */
+-	if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
+-		rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
++	if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
++		svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
+ 
+ 	if (svm->spec_ctrl)
+-		wrmsrl(MSR_IA32_SPEC_CTRL, 0);
++		native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+ 
+ 	/* Eliminate branch target predictions from guest mode */
+ 	vmexit_fill_RSB();
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 561d8937fac5..87b453eeae40 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -51,6 +51,7 @@
+ #include <asm/apic.h>
+ #include <asm/irq_remapping.h>
+ #include <asm/mmu_context.h>
++#include <asm/microcode.h>
+ #include <asm/nospec-branch.h>
+ 
+ #include "trace.h"
+@@ -9443,7 +9444,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ 	 * being speculatively taken.
+ 	 */
+ 	if (vmx->spec_ctrl)
+-		wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
++		native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+ 
+ 	vmx->__launched = vmx->loaded_vmcs->launched;
+ 	asm(
+@@ -9578,11 +9579,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ 	 * If the L02 MSR bitmap does not intercept the MSR, then we need to
+ 	 * save it.
+ 	 */
+-	if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
+-		rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
++	if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
++		vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
+ 
+ 	if (vmx->spec_ctrl)
+-		wrmsrl(MSR_IA32_SPEC_CTRL, 0);
++		native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+ 
+ 	/* Eliminate branch target predictions from guest mode */
+ 	vmexit_fill_RSB();
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 17f4eca37d22..a10da5052072 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -7835,6 +7835,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+ 
+ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
+ {
++	kvm_lapic_reset(vcpu, init_event);
++
+ 	vcpu->arch.hflags = 0;
+ 
+ 	vcpu->arch.smi_pending = 0;
+@@ -8279,10 +8281,8 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
+ 			return r;
+ 	}
+ 
+-	if (!size) {
+-		r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
+-		WARN_ON(r < 0);
+-	}
++	if (!size)
++		vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
+ 
+ 	return 0;
+ }
+diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
+index b9283cc27622..476d810639a8 100644
+--- a/arch/x86/mm/cpu_entry_area.c
++++ b/arch/x86/mm/cpu_entry_area.c
+@@ -163,4 +163,10 @@ void __init setup_cpu_entry_areas(void)
+ 
+ 	for_each_possible_cpu(cpu)
+ 		setup_cpu_entry_area(cpu);
++
++	/*
++	 * This is the last essential update to swapper_pgdir which needs
++	 * to be synchronized to initial_page_table on 32bit.
++	 */
++	sync_initial_page_table();
+ }
+diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
+index 135c9a7898c7..3141e67ec24c 100644
+--- a/arch/x86/mm/init_32.c
++++ b/arch/x86/mm/init_32.c
+@@ -453,6 +453,21 @@ static inline void permanent_kmaps_init(pgd_t *pgd_base)
+ }
+ #endif /* CONFIG_HIGHMEM */
+ 
++void __init sync_initial_page_table(void)
++{
++	clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
++			swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
++			KERNEL_PGD_PTRS);
++
++	/*
++	 * sync back low identity map too.  It is used for example
++	 * in the 32-bit EFI stub.
++	 */
++	clone_pgd_range(initial_page_table,
++			swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
++			min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
++}
++
+ void __init native_pagetable_init(void)
+ {
+ 	unsigned long pfn, va;
+diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
+index 86676cec99a1..09dd7f3cf621 100644
+--- a/arch/x86/platform/intel-mid/intel-mid.c
++++ b/arch/x86/platform/intel-mid/intel-mid.c
+@@ -79,7 +79,7 @@ static void intel_mid_power_off(void)
+ 
+ static void intel_mid_reboot(void)
+ {
+-	intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
++	intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
+ }
+ 
+ static unsigned long __init intel_mid_calibrate_tsc(void)
+diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
+index d9f96cc5d743..1d83152c761b 100644
+--- a/arch/x86/xen/suspend.c
++++ b/arch/x86/xen/suspend.c
+@@ -1,12 +1,15 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <linux/types.h>
+ #include <linux/tick.h>
++#include <linux/percpu-defs.h>
+ 
+ #include <xen/xen.h>
+ #include <xen/interface/xen.h>
+ #include <xen/grant_table.h>
+ #include <xen/events.h>
+ 
++#include <asm/cpufeatures.h>
++#include <asm/msr-index.h>
+ #include <asm/xen/hypercall.h>
+ #include <asm/xen/page.h>
+ #include <asm/fixmap.h>
+@@ -15,6 +18,8 @@
+ #include "mmu.h"
+ #include "pmu.h"
+ 
++static DEFINE_PER_CPU(u64, spec_ctrl);
++
+ void xen_arch_pre_suspend(void)
+ {
+ 	xen_save_time_memory_area();
+@@ -35,6 +40,9 @@ void xen_arch_post_suspend(int cancelled)
+ 
+ static void xen_vcpu_notify_restore(void *data)
+ {
++	if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL))
++		wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl));
++
+ 	/* Boot processor notified via generic timekeeping_resume() */
+ 	if (smp_processor_id() == 0)
+ 		return;
+@@ -44,7 +52,15 @@ static void xen_vcpu_notify_restore(void *data)
+ 
+ static void xen_vcpu_notify_suspend(void *data)
+ {
++	u64 tmp;
++
+ 	tick_suspend_local();
++
++	if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) {
++		rdmsrl(MSR_IA32_SPEC_CTRL, tmp);
++		this_cpu_write(spec_ctrl, tmp);
++		wrmsrl(MSR_IA32_SPEC_CTRL, 0);
++	}
+ }
+ 
+ void xen_arch_resume(void)
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 82b92adf3477..b725d9e340c2 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -2401,7 +2401,7 @@ blk_qc_t submit_bio(struct bio *bio)
+ 		unsigned int count;
+ 
+ 		if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
+-			count = queue_logical_block_size(bio->bi_disk->queue);
++			count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
+ 		else
+ 			count = bio_sectors(bio);
+ 
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 3d3797327491..5629f18b51bd 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -655,7 +655,6 @@ static void __blk_mq_requeue_request(struct request *rq)
+ 
+ 	trace_block_rq_requeue(q, rq);
+ 	wbt_requeue(q->rq_wb, &rq->issue_stat);
+-	blk_mq_sched_requeue_request(rq);
+ 
+ 	if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
+ 		if (q->dma_drain_size && blk_rq_bytes(rq))
+@@ -667,6 +666,9 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
+ {
+ 	__blk_mq_requeue_request(rq);
+ 
++	/* this request will be re-inserted to io scheduler queue */
++	blk_mq_sched_requeue_request(rq);
++
+ 	BUG_ON(blk_queued_rq(rq));
+ 	blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
+ }
+diff --git a/block/ioctl.c b/block/ioctl.c
+index 1668506d8ed8..3884d810efd2 100644
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -225,7 +225,7 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
+ 
+ 	if (start + len > i_size_read(bdev->bd_inode))
+ 		return -EINVAL;
+-	truncate_inode_pages_range(mapping, start, start + len);
++	truncate_inode_pages_range(mapping, start, start + len - 1);
+ 	return blkdev_issue_discard(bdev, start >> 9, len >> 9,
+ 				    GFP_KERNEL, flags);
+ }
+diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
+index f95c60774ce8..0d6d25e32e1f 100644
+--- a/block/kyber-iosched.c
++++ b/block/kyber-iosched.c
+@@ -833,6 +833,7 @@ static struct elevator_type kyber_sched = {
+ 		.limit_depth = kyber_limit_depth,
+ 		.prepare_request = kyber_prepare_request,
+ 		.finish_request = kyber_finish_request,
++		.requeue_request = kyber_finish_request,
+ 		.completed_request = kyber_completed_request,
+ 		.dispatch_request = kyber_dispatch_request,
+ 		.has_work = kyber_has_work,
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index 4d0979e02a28..b6d58cc58f5f 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -66,10 +66,37 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
+ 	return 0;
+ }
+ #endif
++static int set_gbl_term_list(const struct dmi_system_id *id)
++{
++	acpi_gbl_parse_table_as_term_list = 1;
++	return 0;
++}
+ 
+-static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
++static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = {
++	/*
++	 * Touchpad on Dell XPS 9570/Precision M5530 doesn't work under I2C
++	 * mode.
++	 * https://bugzilla.kernel.org/show_bug.cgi?id=198515
++	 */
++	{
++		.callback = set_gbl_term_list,
++		.ident = "Dell Precision M5530",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Precision M5530"),
++		},
++	},
++	{
++		.callback = set_gbl_term_list,
++		.ident = "Dell XPS 15 9570",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9570"),
++		},
++	},
+ 	/*
+ 	 * Invoke DSDT corruption work-around on all Toshiba Satellite.
++	 * DSDT will be copied to memory.
+ 	 * https://bugzilla.kernel.org/show_bug.cgi?id=14679
+ 	 */
+ 	{
+@@ -83,7 +110,7 @@ static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
+ 	{}
+ };
+ #else
+-static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
++static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = {
+ 	{}
+ };
+ #endif
+@@ -1001,11 +1028,8 @@ void __init acpi_early_init(void)
+ 
+ 	acpi_permanent_mmap = true;
+ 
+-	/*
+-	 * If the machine falls into the DMI check table,
+-	 * DSDT will be copied to memory
+-	 */
+-	dmi_check_system(dsdt_dmi_table);
++	/* Check machine-specific quirks */
++	dmi_check_system(acpi_quirks_dmi_table);
+ 
+ 	status = acpi_reallocate_root_table();
+ 	if (ACPI_FAILURE(status)) {
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 76980e78ae56..e71e54c478da 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -21,6 +21,7 @@
+  *
+  */
+ 
++#include <linux/dmi.h>
+ #include <linux/module.h>
+ #include <linux/usb.h>
+ #include <linux/usb/quirks.h>
+@@ -376,6 +377,21 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ }	/* Terminating entry */
+ };
+ 
++/* The Bluetooth USB module build into some devices needs to be reset on resume,
++ * this is a problem with the platform (likely shutting off all power) not with
++ * the module itself. So we use a DMI list to match known broken platforms.
++ */
++static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
++	{
++		/* Lenovo Yoga 920 (QCA Rome device 0cf3:e300) */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920"),
++		},
++	},
++	{}
++};
++
+ #define BTUSB_MAX_ISOC_FRAMES	10
+ 
+ #define BTUSB_INTR_RUNNING	0
+@@ -3031,6 +3047,9 @@ static int btusb_probe(struct usb_interface *intf,
+ 	hdev->send   = btusb_send_frame;
+ 	hdev->notify = btusb_notify;
+ 
++	if (dmi_check_system(btusb_needs_reset_resume_table))
++		interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
++
+ #ifdef CONFIG_PM
+ 	err = btusb_config_oob_wake(hdev);
+ 	if (err)
+@@ -3117,12 +3136,6 @@ static int btusb_probe(struct usb_interface *intf,
+ 	if (id->driver_info & BTUSB_QCA_ROME) {
+ 		data->setup_on_usb = btusb_setup_qca;
+ 		hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
+-
+-		/* QCA Rome devices lose their updated firmware over suspend,
+-		 * but the USB hub doesn't notice any status change.
+-		 * explicitly request a device reset on resume.
+-		 */
+-		interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
+ 	}
+ 
+ #ifdef CONFIG_BT_HCIBTUSB_RTL
+diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
+index 71fad747c0c7..7499b0cd8326 100644
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -2045,6 +2045,7 @@ static int try_smi_init(struct smi_info *new_smi)
+ 	int rv = 0;
+ 	int i;
+ 	char *init_name = NULL;
++	bool platform_device_registered = false;
+ 
+ 	pr_info(PFX "Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
+ 		ipmi_addr_src_to_str(new_smi->io.addr_source),
+@@ -2173,6 +2174,7 @@ static int try_smi_init(struct smi_info *new_smi)
+ 				rv);
+ 			goto out_err;
+ 		}
++		platform_device_registered = true;
+ 	}
+ 
+ 	dev_set_drvdata(new_smi->io.dev, new_smi);
+@@ -2279,10 +2281,11 @@ static int try_smi_init(struct smi_info *new_smi)
+ 	}
+ 
+ 	if (new_smi->pdev) {
+-		platform_device_unregister(new_smi->pdev);
++		if (platform_device_registered)
++			platform_device_unregister(new_smi->pdev);
++		else
++			platform_device_put(new_smi->pdev);
+ 		new_smi->pdev = NULL;
+-	} else if (new_smi->pdev) {
+-		platform_device_put(new_smi->pdev);
+ 	}
+ 
+ 	kfree(init_name);
+diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
+index 4d1dc8b46877..f95b9c75175b 100644
+--- a/drivers/char/tpm/st33zp24/st33zp24.c
++++ b/drivers/char/tpm/st33zp24/st33zp24.c
+@@ -457,7 +457,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
+ 			    size_t count)
+ {
+ 	int size = 0;
+-	int expected;
++	u32 expected;
+ 
+ 	if (!chip)
+ 		return -EBUSY;
+@@ -474,7 +474,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
+ 	}
+ 
+ 	expected = be32_to_cpu(*(__be32 *)(buf + 2));
+-	if (expected > count) {
++	if (expected > count || expected < TPM_HEADER_SIZE) {
+ 		size = -EIO;
+ 		goto out;
+ 	}
+diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
+index 1d6729be4cd6..3cec403a80b3 100644
+--- a/drivers/char/tpm/tpm-interface.c
++++ b/drivers/char/tpm/tpm-interface.c
+@@ -1228,6 +1228,10 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
+ 			break;
+ 
+ 		recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len);
++		if (recd > num_bytes) {
++			total = -EFAULT;
++			break;
++		}
+ 
+ 		rlength = be32_to_cpu(tpm_cmd.header.out.length);
+ 		if (rlength < offsetof(struct tpm_getrandom_out, rng_data) +
+diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
+index f40d20671a78..f6be08483ae6 100644
+--- a/drivers/char/tpm/tpm2-cmd.c
++++ b/drivers/char/tpm/tpm2-cmd.c
+@@ -683,6 +683,10 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
+ 	if (!rc) {
+ 		data_len = be16_to_cpup(
+ 			(__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
++		if (data_len < MIN_KEY_SIZE ||  data_len > MAX_KEY_SIZE + 1) {
++			rc = -EFAULT;
++			goto out;
++		}
+ 
+ 		rlength = be32_to_cpu(((struct tpm2_cmd *)&buf)
+ 					->header.out.length);
+diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
+index 79d6bbb58e39..d5b44cadac56 100644
+--- a/drivers/char/tpm/tpm_i2c_infineon.c
++++ b/drivers/char/tpm/tpm_i2c_infineon.c
+@@ -473,7 +473,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
+ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+ {
+ 	int size = 0;
+-	int expected, status;
++	int status;
++	u32 expected;
+ 
+ 	if (count < TPM_HEADER_SIZE) {
+ 		size = -EIO;
+@@ -488,7 +489,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+ 	}
+ 
+ 	expected = be32_to_cpu(*(__be32 *)(buf + 2));
+-	if ((size_t) expected > count) {
++	if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) {
+ 		size = -EIO;
+ 		goto out;
+ 	}
+diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
+index c6428771841f..caa86b19c76d 100644
+--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
++++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
+@@ -281,7 +281,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+ 	struct device *dev = chip->dev.parent;
+ 	struct i2c_client *client = to_i2c_client(dev);
+ 	s32 rc;
+-	int expected, status, burst_count, retries, size = 0;
++	int status;
++	int burst_count;
++	int retries;
++	int size = 0;
++	u32 expected;
+ 
+ 	if (count < TPM_HEADER_SIZE) {
+ 		i2c_nuvoton_ready(chip);    /* return to idle */
+@@ -323,7 +327,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+ 		 * to machine native
+ 		 */
+ 		expected = be32_to_cpu(*(__be32 *) (buf + 2));
+-		if (expected > count) {
++		if (expected > count || expected < size) {
+ 			dev_err(dev, "%s() expected > count\n", __func__);
+ 			size = -EIO;
+ 			continue;
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index fdde971bc810..7561922bc8f8 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -202,7 +202,8 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+ {
+ 	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ 	int size = 0;
+-	int expected, status;
++	int status;
++	u32 expected;
+ 
+ 	if (count < TPM_HEADER_SIZE) {
+ 		size = -EIO;
+@@ -217,7 +218,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+ 	}
+ 
+ 	expected = be32_to_cpu(*(__be32 *) (buf + 2));
+-	if (expected > count) {
++	if (expected > count || expected < TPM_HEADER_SIZE) {
+ 		size = -EIO;
+ 		goto out;
+ 	}
+diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
+index 7b596fa38ad2..6bebc1f9f55a 100644
+--- a/drivers/cpufreq/s3c24xx-cpufreq.c
++++ b/drivers/cpufreq/s3c24xx-cpufreq.c
+@@ -351,7 +351,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
+ static int s3c_cpufreq_init(struct cpufreq_policy *policy)
+ {
+ 	policy->clk = clk_arm;
+-	return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
++
++	policy->cpuinfo.transition_latency = cpu_cur.info->latency;
++
++	if (ftab)
++		return cpufreq_table_validate_and_show(policy, ftab);
++
++	return 0;
+ }
+ 
+ static int __init s3c_cpufreq_initclks(void)
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index f34430f99fd8..872100215ca0 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -279,7 +279,7 @@ static const u32 correrrthrsld[] = {
+  * sbridge structs
+  */
+ 
+-#define NUM_CHANNELS		4	/* Max channels per MC */
++#define NUM_CHANNELS		6	/* Max channels per MC */
+ #define MAX_DIMMS		3	/* Max DIMMS per channel */
+ #define KNL_MAX_CHAS		38	/* KNL max num. of Cache Home Agents */
+ #define KNL_MAX_CHANNELS	6	/* KNL max num. of PCI channels */
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 4e4dee0ec2de..926542fbc892 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -8554,6 +8554,10 @@ static int remove_and_add_spares(struct mddev *mddev,
+ 	int removed = 0;
+ 	bool remove_some = false;
+ 
++	if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
++		/* Mustn't remove devices when resync thread is running */
++		return 0;
++
+ 	rdev_for_each(rdev, mddev) {
+ 		if ((this == NULL || rdev == this) &&
+ 		    rdev->raid_disk >= 0 &&
+diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
+index 50bce68ffd66..65d157fe76d1 100644
+--- a/drivers/media/dvb-frontends/m88ds3103.c
++++ b/drivers/media/dvb-frontends/m88ds3103.c
+@@ -1262,11 +1262,12 @@ static int m88ds3103_select(struct i2c_mux_core *muxc, u32 chan)
+  * New users must use I2C client binding directly!
+  */
+ struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
+-		struct i2c_adapter *i2c, struct i2c_adapter **tuner_i2c_adapter)
++				      struct i2c_adapter *i2c,
++				      struct i2c_adapter **tuner_i2c_adapter)
+ {
+ 	struct i2c_client *client;
+ 	struct i2c_board_info board_info;
+-	struct m88ds3103_platform_data pdata;
++	struct m88ds3103_platform_data pdata = {};
+ 
+ 	pdata.clk = cfg->clock;
+ 	pdata.i2c_wr_max = cfg->i2c_wr_max;
+@@ -1409,6 +1410,8 @@ static int m88ds3103_probe(struct i2c_client *client,
+ 	case M88DS3103_CHIP_ID:
+ 		break;
+ 	default:
++		ret = -ENODEV;
++		dev_err(&client->dev, "Unknown device. Chip_id=%02x\n", dev->chip_id);
+ 		goto err_kfree;
+ 	}
+ 
+diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
+index 35026795be28..fa41d9422d57 100644
+--- a/drivers/mmc/host/dw_mmc-exynos.c
++++ b/drivers/mmc/host/dw_mmc-exynos.c
+@@ -487,6 +487,7 @@ static unsigned long exynos_dwmmc_caps[4] = {
+ 
+ static const struct dw_mci_drv_data exynos_drv_data = {
+ 	.caps			= exynos_dwmmc_caps,
++	.num_caps		= ARRAY_SIZE(exynos_dwmmc_caps),
+ 	.init			= dw_mci_exynos_priv_init,
+ 	.set_ios		= dw_mci_exynos_set_ios,
+ 	.parse_dt		= dw_mci_exynos_parse_dt,
+diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
+index 73fd75c3c824..89cdb3d533bb 100644
+--- a/drivers/mmc/host/dw_mmc-k3.c
++++ b/drivers/mmc/host/dw_mmc-k3.c
+@@ -135,6 +135,9 @@ static int dw_mci_hi6220_parse_dt(struct dw_mci *host)
+ 	if (priv->ctrl_id < 0)
+ 		priv->ctrl_id = 0;
+ 
++	if (priv->ctrl_id >= TIMING_MODE)
++		return -EINVAL;
++
+ 	host->priv = priv;
+ 	return 0;
+ }
+@@ -207,6 +210,7 @@ static int dw_mci_hi6220_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
+ 
+ static const struct dw_mci_drv_data hi6220_data = {
+ 	.caps			= dw_mci_hi6220_caps,
++	.num_caps		= ARRAY_SIZE(dw_mci_hi6220_caps),
+ 	.switch_voltage		= dw_mci_hi6220_switch_voltage,
+ 	.set_ios		= dw_mci_hi6220_set_ios,
+ 	.parse_dt		= dw_mci_hi6220_parse_dt,
+diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
+index a3f1c2b30145..339295212935 100644
+--- a/drivers/mmc/host/dw_mmc-rockchip.c
++++ b/drivers/mmc/host/dw_mmc-rockchip.c
+@@ -319,6 +319,7 @@ static const struct dw_mci_drv_data rk2928_drv_data = {
+ 
+ static const struct dw_mci_drv_data rk3288_drv_data = {
+ 	.caps			= dw_mci_rk3288_dwmmc_caps,
++	.num_caps		= ARRAY_SIZE(dw_mci_rk3288_dwmmc_caps),
+ 	.set_ios		= dw_mci_rk3288_set_ios,
+ 	.execute_tuning		= dw_mci_rk3288_execute_tuning,
+ 	.parse_dt		= dw_mci_rk3288_parse_dt,
+diff --git a/drivers/mmc/host/dw_mmc-zx.c b/drivers/mmc/host/dw_mmc-zx.c
+index d38e94ae2b85..c06b5393312f 100644
+--- a/drivers/mmc/host/dw_mmc-zx.c
++++ b/drivers/mmc/host/dw_mmc-zx.c
+@@ -195,6 +195,7 @@ static unsigned long zx_dwmmc_caps[3] = {
+ 
+ static const struct dw_mci_drv_data zx_drv_data = {
+ 	.caps			= zx_dwmmc_caps,
++	.num_caps		= ARRAY_SIZE(zx_dwmmc_caps),
+ 	.execute_tuning		= dw_mci_zx_execute_tuning,
+ 	.prepare_hs400_tuning	= dw_mci_zx_prepare_hs400_tuning,
+ 	.parse_dt               = dw_mci_zx_parse_dt,
+diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
+index 0aa39975f33b..d9b4acefed31 100644
+--- a/drivers/mmc/host/dw_mmc.c
++++ b/drivers/mmc/host/dw_mmc.c
+@@ -165,6 +165,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v)
+ {
+ 	struct dw_mci *host = s->private;
+ 
++	pm_runtime_get_sync(host->dev);
++
+ 	seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
+ 	seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
+ 	seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
+@@ -172,6 +174,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v)
+ 	seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
+ 	seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
+ 
++	pm_runtime_put_autosuspend(host->dev);
++
+ 	return 0;
+ }
+ 
+@@ -2778,12 +2782,57 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
+ 	return IRQ_HANDLED;
+ }
+ 
++static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
++{
++	struct dw_mci *host = slot->host;
++	const struct dw_mci_drv_data *drv_data = host->drv_data;
++	struct mmc_host *mmc = slot->mmc;
++	int ctrl_id;
++
++	if (host->pdata->caps)
++		mmc->caps = host->pdata->caps;
++
++	/*
++	 * Support MMC_CAP_ERASE by default.
++	 * It needs to use trim/discard/erase commands.
++	 */
++	mmc->caps |= MMC_CAP_ERASE;
++
++	if (host->pdata->pm_caps)
++		mmc->pm_caps = host->pdata->pm_caps;
++
++	if (host->dev->of_node) {
++		ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
++		if (ctrl_id < 0)
++			ctrl_id = 0;
++	} else {
++		ctrl_id = to_platform_device(host->dev)->id;
++	}
++
++	if (drv_data && drv_data->caps) {
++		if (ctrl_id >= drv_data->num_caps) {
++			dev_err(host->dev, "invalid controller id %d\n",
++				ctrl_id);
++			return -EINVAL;
++		}
++		mmc->caps |= drv_data->caps[ctrl_id];
++	}
++
++	if (host->pdata->caps2)
++		mmc->caps2 = host->pdata->caps2;
++
++	/* Process SDIO IRQs through the sdio_irq_work. */
++	if (mmc->caps & MMC_CAP_SDIO_IRQ)
++		mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
++
++	return 0;
++}
++
+ static int dw_mci_init_slot(struct dw_mci *host)
+ {
+ 	struct mmc_host *mmc;
+ 	struct dw_mci_slot *slot;
+-	const struct dw_mci_drv_data *drv_data = host->drv_data;
+-	int ctrl_id, ret;
++	int ret;
+ 	u32 freq[2];
+ 
+ 	mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
+@@ -2817,38 +2866,13 @@ static int dw_mci_init_slot(struct dw_mci *host)
+ 	if (!mmc->ocr_avail)
+ 		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ 
+-	if (host->pdata->caps)
+-		mmc->caps = host->pdata->caps;
+-
+-	/*
+-	 * Support MMC_CAP_ERASE by default.
+-	 * It needs to use trim/discard/erase commands.
+-	 */
+-	mmc->caps |= MMC_CAP_ERASE;
+-
+-	if (host->pdata->pm_caps)
+-		mmc->pm_caps = host->pdata->pm_caps;
+-
+-	if (host->dev->of_node) {
+-		ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
+-		if (ctrl_id < 0)
+-			ctrl_id = 0;
+-	} else {
+-		ctrl_id = to_platform_device(host->dev)->id;
+-	}
+-	if (drv_data && drv_data->caps)
+-		mmc->caps |= drv_data->caps[ctrl_id];
+-
+-	if (host->pdata->caps2)
+-		mmc->caps2 = host->pdata->caps2;
+-
+ 	ret = mmc_of_parse(mmc);
+ 	if (ret)
+ 		goto err_host_allocated;
+ 
+-	/* Process SDIO IRQs through the sdio_irq_work. */
+-	if (mmc->caps & MMC_CAP_SDIO_IRQ)
+-		mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
++	ret = dw_mci_init_slot_caps(slot);
++	if (ret)
++		goto err_host_allocated;
+ 
+ 	/* Useful defaults if platform data is unset. */
+ 	if (host->use_dma == TRANS_MODE_IDMAC) {
+diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
+index e3124f06a47e..1424bd490dd1 100644
+--- a/drivers/mmc/host/dw_mmc.h
++++ b/drivers/mmc/host/dw_mmc.h
+@@ -543,6 +543,7 @@ struct dw_mci_slot {
+ /**
+  * dw_mci driver data - dw-mshc implementation specific driver data.
+  * @caps: mmc subsystem specified capabilities of the controller(s).
++ * @num_caps: number of capabilities specified by @caps.
+  * @init: early implementation specific initialization.
+  * @set_ios: handle bus specific extensions.
+  * @parse_dt: parse implementation specific device tree properties.
+@@ -554,6 +555,7 @@ struct dw_mci_slot {
+  */
+ struct dw_mci_drv_data {
+ 	unsigned long	*caps;
++	u32		num_caps;
+ 	int		(*init)(struct dw_mci *host);
+ 	void		(*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
+ 	int		(*parse_dt)(struct dw_mci *host);
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index 3e4f04fd5175..bf93e8b0b191 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -593,9 +593,36 @@ static void byt_read_dsm(struct sdhci_pci_slot *slot)
+ 	slot->chip->rpm_retune = intel_host->d3_retune;
+ }
+ 
+-static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
++static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode)
++{
++	int err = sdhci_execute_tuning(mmc, opcode);
++	struct sdhci_host *host = mmc_priv(mmc);
++
++	if (err)
++		return err;
++
++	/*
++	 * Tuning can leave the IP in an active state (Buffer Read Enable bit
++	 * set) which prevents the entry to low power states (i.e. S0i3). Data
++	 * reset will clear it.
++	 */
++	sdhci_reset(host, SDHCI_RESET_DATA);
++
++	return 0;
++}
++
++static void byt_probe_slot(struct sdhci_pci_slot *slot)
+ {
++	struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
++
+ 	byt_read_dsm(slot);
++
++	ops->execute_tuning = intel_execute_tuning;
++}
++
++static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
++{
++	byt_probe_slot(slot);
+ 	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
+ 				 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
+ 				 MMC_CAP_CMD_DURING_TFR |
+@@ -650,7 +677,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
+ {
+ 	int err;
+ 
+-	byt_read_dsm(slot);
++	byt_probe_slot(slot);
+ 
+ 	err = ni_set_max_freq(slot);
+ 	if (err)
+@@ -663,7 +690,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
+ 
+ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
+ {
+-	byt_read_dsm(slot);
++	byt_probe_slot(slot);
+ 	slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
+ 				 MMC_CAP_WAIT_WHILE_BUSY;
+ 	return 0;
+@@ -671,7 +698,7 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
+ 
+ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
+ {
+-	byt_read_dsm(slot);
++	byt_probe_slot(slot);
+ 	slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |
+ 				 MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE;
+ 	slot->cd_idx = 0;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index a74a8fbad53a..2e6075ce5dca 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -595,7 +595,7 @@ static void xgbe_isr_task(unsigned long data)
+ 
+ 		reissue_mask = 1 << 0;
+ 		if (!pdata->per_channel_irq)
+-			reissue_mask |= 0xffff < 4;
++			reissue_mask |= 0xffff << 4;
+ 
+ 		XP_IOWRITE(pdata, XP_INT_REISSUE_EN, reissue_mask);
+ 	}
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+index 3e5833cf1fab..eb23f9ba1a9a 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+@@ -426,6 +426,8 @@ static int xgbe_pci_resume(struct pci_dev *pdev)
+ 	struct net_device *netdev = pdata->netdev;
+ 	int ret = 0;
+ 
++	XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
++
+ 	pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
+ 	XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
+ 
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+index d699bf88d18f..6044fdcf6056 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+@@ -156,7 +156,7 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
+ 
+ 	if (is_t6(padap->params.chip)) {
+ 		size = padap->params.cim_la_size / 10 + 1;
+-		size *= 11 * sizeof(u32);
++		size *= 10 * sizeof(u32);
+ 	} else {
+ 		size = padap->params.cim_la_size / 8;
+ 		size *= 8 * sizeof(u32);
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
+index 29cc625e9833..97465101e0b9 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
+@@ -97,7 +97,7 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
+ 	case CUDBG_CIM_LA:
+ 		if (is_t6(adap->params.chip)) {
+ 			len = adap->params.cim_la_size / 10 + 1;
+-			len *= 11 * sizeof(u32);
++			len *= 10 * sizeof(u32);
+ 		} else {
+ 			len = adap->params.cim_la_size / 8;
+ 			len *= 8 * sizeof(u32);
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 62a18914f00f..a7113e702f58 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -1878,6 +1878,14 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
+ 				     ixgbe_rx_pg_size(rx_ring),
+ 				     DMA_FROM_DEVICE,
+ 				     IXGBE_RX_DMA_ATTR);
++	} else if (ring_uses_build_skb(rx_ring)) {
++		unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
++
++		dma_sync_single_range_for_cpu(rx_ring->dev,
++					      IXGBE_CB(skb)->dma,
++					      offset,
++					      skb_headlen(skb),
++					      DMA_FROM_DEVICE);
+ 	} else {
+ 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index d8aefeed124d..0d352d4cf48c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -1911,13 +1911,16 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
+ 	param->wq.linear = 1;
+ }
+ 
+-static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
++static void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
++				      struct mlx5e_rq_param *param)
+ {
+ 	void *rqc = param->rqc;
+ 	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ 
+ 	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ 	MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
++
++	param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
+ }
+ 
+ static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
+@@ -2774,6 +2777,9 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
+ 			       struct mlx5e_cq *cq,
+ 			       struct mlx5e_cq_param *param)
+ {
++	param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
++	param->wq.db_numa_node  = dev_to_node(&mdev->pdev->dev);
++
+ 	return mlx5e_alloc_cq_common(mdev, param, cq);
+ }
+ 
+@@ -2785,7 +2791,7 @@ static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
+ 	struct mlx5e_cq *cq = &drop_rq->cq;
+ 	int err;
+ 
+-	mlx5e_build_drop_rq_param(&rq_param);
++	mlx5e_build_drop_rq_param(mdev, &rq_param);
+ 
+ 	err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
+ 	if (err)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 5b499c7a698f..36611b64a91c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -36,6 +36,7 @@
+ #include <linux/tcp.h>
+ #include <linux/bpf_trace.h>
+ #include <net/busy_poll.h>
++#include <net/ip6_checksum.h>
+ #include "en.h"
+ #include "en_tc.h"
+ #include "eswitch.h"
+@@ -547,20 +548,33 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
+ 	return true;
+ }
+ 
++static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
++{
++	u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
++	u8 tcp_ack     = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
++			 (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
++
++	tcp->check                      = 0;
++	tcp->psh                        = get_cqe_lro_tcppsh(cqe);
++
++	if (tcp_ack) {
++		tcp->ack                = 1;
++		tcp->ack_seq            = cqe->lro_ack_seq_num;
++		tcp->window             = cqe->lro_tcp_win;
++	}
++}
++
+ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
+ 				 u32 cqe_bcnt)
+ {
+ 	struct ethhdr	*eth = (struct ethhdr *)(skb->data);
+ 	struct tcphdr	*tcp;
+ 	int network_depth = 0;
++	__wsum check;
+ 	__be16 proto;
+ 	u16 tot_len;
+ 	void *ip_p;
+ 
+-	u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
+-	u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
+-		(l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
+-
+ 	proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
+ 
+ 	tot_len = cqe_bcnt - network_depth;
+@@ -577,23 +591,30 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
+ 		ipv4->check             = 0;
+ 		ipv4->check             = ip_fast_csum((unsigned char *)ipv4,
+ 						       ipv4->ihl);
++
++		mlx5e_lro_update_tcp_hdr(cqe, tcp);
++		check = csum_partial(tcp, tcp->doff * 4,
++				     csum_unfold((__force __sum16)cqe->check_sum));
++		/* Almost done, don't forget the pseudo header */
++		tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
++					       tot_len - sizeof(struct iphdr),
++					       IPPROTO_TCP, check);
+ 	} else {
++		u16 payload_len = tot_len - sizeof(struct ipv6hdr);
+ 		struct ipv6hdr *ipv6 = ip_p;
+ 
+ 		tcp = ip_p + sizeof(struct ipv6hdr);
+ 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ 
+ 		ipv6->hop_limit         = cqe->lro_min_ttl;
+-		ipv6->payload_len       = cpu_to_be16(tot_len -
+-						      sizeof(struct ipv6hdr));
+-	}
+-
+-	tcp->psh = get_cqe_lro_tcppsh(cqe);
+-
+-	if (tcp_ack) {
+-		tcp->ack                = 1;
+-		tcp->ack_seq            = cqe->lro_ack_seq_num;
+-		tcp->window             = cqe->lro_tcp_win;
++		ipv6->payload_len       = cpu_to_be16(payload_len);
++
++		mlx5e_lro_update_tcp_hdr(cqe, tcp);
++		check = csum_partial(tcp, tcp->doff * 4,
++				     csum_unfold((__force __sum16)cqe->check_sum));
++		/* Almost done, don't forget the pseudo header */
++		tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
++					     IPPROTO_TCP, check);
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+index 5a4608281f38..707976482c09 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+@@ -216,7 +216,8 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
+ 	if (iph->protocol != IPPROTO_UDP)
+ 		goto out;
+ 
+-	udph = udp_hdr(skb);
++	/* Don't assume skb_transport_header() was set */
++	udph = (struct udphdr *)((u8 *)iph + 4 * iph->ihl);
+ 	if (udph->dest != htons(9))
+ 		goto out;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+index 569b42a01026..11b4f1089d1c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+@@ -176,7 +176,7 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
+ 	default:
+ 		hlen = mlx5e_skb_l2_header_offset(skb);
+ 	}
+-	return min_t(u16, hlen, skb->len);
++	return min_t(u16, hlen, skb_headlen(skb));
+ }
+ 
+ static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index dfaad9ecb2b8..a681693631aa 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -1755,8 +1755,11 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
+ 
+ 	/* Collect all fgs which has a matching match_criteria */
+ 	err = build_match_list(&match_head, ft, spec);
+-	if (err)
++	if (err) {
++		if (take_write)
++			up_write_ref_node(&ft->node);
+ 		return ERR_PTR(err);
++	}
+ 
+ 	if (!take_write)
+ 		up_read_ref_node(&ft->node);
+@@ -1765,8 +1768,11 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
+ 				      dest_num, version);
+ 	free_match_list(&match_head);
+ 	if (!IS_ERR(rule) ||
+-	    (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN))
++	    (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
++		if (take_write)
++			up_write_ref_node(&ft->node);
+ 		return rule;
++	}
+ 
+ 	if (!take_write) {
+ 		nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+index 7042c855a5d6..7e50dbc8282c 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+@@ -737,6 +737,9 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
+ 					      u32 tb_id,
+ 					      struct netlink_ext_ack *extack)
+ {
++	struct mlxsw_sp_mr_table *mr4_table;
++	struct mlxsw_sp_fib *fib4;
++	struct mlxsw_sp_fib *fib6;
+ 	struct mlxsw_sp_vr *vr;
+ 	int err;
+ 
+@@ -745,29 +748,30 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
+ 		NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
+ 		return ERR_PTR(-EBUSY);
+ 	}
+-	vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
+-	if (IS_ERR(vr->fib4))
+-		return ERR_CAST(vr->fib4);
+-	vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
+-	if (IS_ERR(vr->fib6)) {
+-		err = PTR_ERR(vr->fib6);
++	fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
++	if (IS_ERR(fib4))
++		return ERR_CAST(fib4);
++	fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
++	if (IS_ERR(fib6)) {
++		err = PTR_ERR(fib6);
+ 		goto err_fib6_create;
+ 	}
+-	vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
+-						 MLXSW_SP_L3_PROTO_IPV4);
+-	if (IS_ERR(vr->mr4_table)) {
+-		err = PTR_ERR(vr->mr4_table);
++	mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
++					     MLXSW_SP_L3_PROTO_IPV4);
++	if (IS_ERR(mr4_table)) {
++		err = PTR_ERR(mr4_table);
+ 		goto err_mr_table_create;
+ 	}
++	vr->fib4 = fib4;
++	vr->fib6 = fib6;
++	vr->mr4_table = mr4_table;
+ 	vr->tb_id = tb_id;
+ 	return vr;
+ 
+ err_mr_table_create:
+-	mlxsw_sp_fib_destroy(vr->fib6);
+-	vr->fib6 = NULL;
++	mlxsw_sp_fib_destroy(fib6);
+ err_fib6_create:
+-	mlxsw_sp_fib_destroy(vr->fib4);
+-	vr->fib4 = NULL;
++	mlxsw_sp_fib_destroy(fib4);
+ 	return ERR_PTR(err);
+ }
+ 
+@@ -3761,6 +3765,9 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
+ 	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
+ 	int i;
+ 
++	if (!list_is_singular(&nh_grp->fib_list))
++		return;
++
+ 	for (i = 0; i < nh_grp->count; i++) {
+ 		struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+index 593ad31be749..161bcdc012f0 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+@@ -1203,6 +1203,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ 				     bool dynamic)
+ {
+ 	char *sfd_pl;
++	u8 num_rec;
+ 	int err;
+ 
+ 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
+@@ -1212,9 +1213,16 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
+ 	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
+ 			      mac, fid, action, local_port);
++	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
+ 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
+-	kfree(sfd_pl);
++	if (err)
++		goto out;
++
++	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
++		err = -EBUSY;
+ 
++out:
++	kfree(sfd_pl);
+ 	return err;
+ }
+ 
+@@ -1239,6 +1247,7 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
+ 				       bool adding, bool dynamic)
+ {
+ 	char *sfd_pl;
++	u8 num_rec;
+ 	int err;
+ 
+ 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
+@@ -1249,9 +1258,16 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
+ 	mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
+ 				  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
+ 				  lag_vid, lag_id);
++	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
+ 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
+-	kfree(sfd_pl);
++	if (err)
++		goto out;
++
++	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
++		err = -EBUSY;
+ 
++out:
++	kfree(sfd_pl);
+ 	return err;
+ }
+ 
+@@ -1296,6 +1312,7 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
+ 				u16 fid, u16 mid_idx, bool adding)
+ {
+ 	char *sfd_pl;
++	u8 num_rec;
+ 	int err;
+ 
+ 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
+@@ -1305,7 +1322,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
+ 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
+ 	mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
+ 			      MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
++	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
+ 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
++	if (err)
++		goto out;
++
++	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
++		err = -EBUSY;
++
++out:
+ 	kfree(sfd_pl);
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
+index a73600dceb8b..a1ffc3ed77f9 100644
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -1618,6 +1618,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
+ 		q_idx = q_idx % cpsw->tx_ch_num;
+ 
+ 	txch = cpsw->txv[q_idx].ch;
++	txq = netdev_get_tx_queue(ndev, q_idx);
+ 	ret = cpsw_tx_packet_submit(priv, skb, txch);
+ 	if (unlikely(ret != 0)) {
+ 		cpsw_err(priv, tx_err, "desc submit failed\n");
+@@ -1628,15 +1629,26 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
+ 	 * tell the kernel to stop sending us tx frames.
+ 	 */
+ 	if (unlikely(!cpdma_check_free_tx_desc(txch))) {
+-		txq = netdev_get_tx_queue(ndev, q_idx);
+ 		netif_tx_stop_queue(txq);
++
++		/* Barrier, so that stop_queue visible to other cpus */
++		smp_mb__after_atomic();
++
++		if (cpdma_check_free_tx_desc(txch))
++			netif_tx_wake_queue(txq);
+ 	}
+ 
+ 	return NETDEV_TX_OK;
+ fail:
+ 	ndev->stats.tx_dropped++;
+-	txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
+ 	netif_tx_stop_queue(txq);
++
++	/* Barrier, so that stop_queue visible to other cpus */
++	smp_mb__after_atomic();
++
++	if (cpdma_check_free_tx_desc(txch))
++		netif_tx_wake_queue(txq);
++
+ 	return NETDEV_TX_BUSY;
+ }
+ 
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index ed10d1fc8f59..39de77a8bb63 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -841,10 +841,10 @@ void phy_start(struct phy_device *phydev)
+ 		break;
+ 	case PHY_HALTED:
+ 		/* if phy was suspended, bring the physical link up again */
+-		phy_resume(phydev);
++		__phy_resume(phydev);
+ 
+ 		/* make sure interrupts are re-enabled for the PHY */
+-		if (phydev->irq != PHY_POLL) {
++		if (phy_interrupt_is_valid(phydev)) {
+ 			err = phy_enable_interrupts(phydev);
+ 			if (err < 0)
+ 				break;
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index b15b31ca2618..d312b314825e 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -135,9 +135,7 @@ static int mdio_bus_phy_resume(struct device *dev)
+ 	if (!mdio_bus_phy_may_suspend(phydev))
+ 		goto no_resume;
+ 
+-	mutex_lock(&phydev->lock);
+ 	ret = phy_resume(phydev);
+-	mutex_unlock(&phydev->lock);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -1028,9 +1026,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
+ 	if (err)
+ 		goto error;
+ 
+-	mutex_lock(&phydev->lock);
+ 	phy_resume(phydev);
+-	mutex_unlock(&phydev->lock);
+ 	phy_led_triggers_register(phydev);
+ 
+ 	return err;
+@@ -1156,7 +1152,7 @@ int phy_suspend(struct phy_device *phydev)
+ }
+ EXPORT_SYMBOL(phy_suspend);
+ 
+-int phy_resume(struct phy_device *phydev)
++int __phy_resume(struct phy_device *phydev)
+ {
+ 	struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
+ 	int ret = 0;
+@@ -1173,6 +1169,18 @@ int phy_resume(struct phy_device *phydev)
+ 
+ 	return ret;
+ }
++EXPORT_SYMBOL(__phy_resume);
++
++int phy_resume(struct phy_device *phydev)
++{
++	int ret;
++
++	mutex_lock(&phydev->lock);
++	ret = __phy_resume(phydev);
++	mutex_unlock(&phydev->lock);
++
++	return ret;
++}
+ EXPORT_SYMBOL(phy_resume);
+ 
+ int phy_loopback(struct phy_device *phydev, bool enable)
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index 264d4af0bf69..9f79f9274c50 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -3161,6 +3161,15 @@ ppp_connect_channel(struct channel *pch, int unit)
+ 		goto outl;
+ 
+ 	ppp_lock(ppp);
++	spin_lock_bh(&pch->downl);
++	if (!pch->chan) {
++		/* Don't connect unregistered channels */
++		spin_unlock_bh(&pch->downl);
++		ppp_unlock(ppp);
++		ret = -ENOTCONN;
++		goto outl;
++	}
++	spin_unlock_bh(&pch->downl);
+ 	if (pch->file.hdrlen > ppp->file.hdrlen)
+ 		ppp->file.hdrlen = pch->file.hdrlen;
+ 	hdrlen = pch->file.hdrlen + 2;	/* for protocol bytes */
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index e29cd5c7d39f..f50cf06c9353 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1471,6 +1471,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
+ 	else
+ 		*skb_xdp = 0;
+ 
++	preempt_disable();
+ 	rcu_read_lock();
+ 	xdp_prog = rcu_dereference(tun->xdp_prog);
+ 	if (xdp_prog && !*skb_xdp) {
+@@ -1490,9 +1491,11 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
+ 			get_page(alloc_frag->page);
+ 			alloc_frag->offset += buflen;
+ 			err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
++			xdp_do_flush_map();
+ 			if (err)
+ 				goto err_redirect;
+ 			rcu_read_unlock();
++			preempt_enable();
+ 			return NULL;
+ 		case XDP_TX:
+ 			xdp_xmit = true;
+@@ -1514,6 +1517,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
+ 	skb = build_skb(buf, buflen);
+ 	if (!skb) {
+ 		rcu_read_unlock();
++		preempt_enable();
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+@@ -1526,10 +1530,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
+ 		skb->dev = tun->dev;
+ 		generic_xdp_tx(skb, xdp_prog);
+ 		rcu_read_unlock();
++		preempt_enable();
+ 		return NULL;
+ 	}
+ 
+ 	rcu_read_unlock();
++	preempt_enable();
+ 
+ 	return skb;
+ 
+@@ -1537,6 +1543,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
+ 	put_page(alloc_frag->page);
+ err_xdp:
+ 	rcu_read_unlock();
++	preempt_enable();
+ 	this_cpu_inc(tun->pcpu_stats->rx_dropped);
+ 	return NULL;
+ }
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 559b215c0169..5907a8d0e921 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -2040,8 +2040,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
+ 	}
+ 
+ 	/* Make sure NAPI is not using any XDP TX queues for RX. */
+-	for (i = 0; i < vi->max_queue_pairs; i++)
+-		napi_disable(&vi->rq[i].napi);
++	if (netif_running(dev))
++		for (i = 0; i < vi->max_queue_pairs; i++)
++			napi_disable(&vi->rq[i].napi);
+ 
+ 	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
+ 	err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
+@@ -2060,7 +2061,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
+ 		}
+ 		if (old_prog)
+ 			bpf_prog_put(old_prog);
+-		virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
++		if (netif_running(dev))
++			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
+index afeca6bcdade..ab8b3cbbb205 100644
+--- a/drivers/net/wan/hdlc_ppp.c
++++ b/drivers/net/wan/hdlc_ppp.c
+@@ -574,7 +574,10 @@ static void ppp_timer(struct timer_list *t)
+ 			ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
+ 				     0, NULL);
+ 			proto->restart_counter--;
+-		} else
++		} else if (netif_carrier_ok(proto->dev))
++			ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
++				     0, NULL);
++		else
+ 			ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
+ 				     0, NULL);
+ 		break;
+diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
+index cd4725e7e0b5..c864430b9fcf 100644
+--- a/drivers/platform/x86/dell-laptop.c
++++ b/drivers/platform/x86/dell-laptop.c
+@@ -78,7 +78,6 @@ static struct platform_driver platform_driver = {
+ 	}
+ };
+ 
+-static struct calling_interface_buffer *buffer;
+ static struct platform_device *platform_device;
+ static struct backlight_device *dell_backlight_device;
+ static struct rfkill *wifi_rfkill;
+@@ -286,7 +285,8 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
+ 	{ }
+ };
+ 
+-void dell_set_arguments(u32 arg0, u32 arg1, u32 arg2, u32 arg3)
++static void dell_fill_request(struct calling_interface_buffer *buffer,
++			      u32 arg0, u32 arg1, u32 arg2, u32 arg3)
+ {
+ 	memset(buffer, 0, sizeof(struct calling_interface_buffer));
+ 	buffer->input[0] = arg0;
+@@ -295,7 +295,8 @@ void dell_set_arguments(u32 arg0, u32 arg1, u32 arg2, u32 arg3)
+ 	buffer->input[3] = arg3;
+ }
+ 
+-int dell_send_request(u16 class, u16 select)
++static int dell_send_request(struct calling_interface_buffer *buffer,
++			     u16 class, u16 select)
+ {
+ 	int ret;
+ 
+@@ -432,21 +433,22 @@ static int dell_rfkill_set(void *data, bool blocked)
+ 	int disable = blocked ? 1 : 0;
+ 	unsigned long radio = (unsigned long)data;
+ 	int hwswitch_bit = (unsigned long)data - 1;
++	struct calling_interface_buffer buffer;
+ 	int hwswitch;
+ 	int status;
+ 	int ret;
+ 
+-	dell_set_arguments(0, 0, 0, 0);
+-	ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
++	dell_fill_request(&buffer, 0, 0, 0, 0);
++	ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
+ 	if (ret)
+ 		return ret;
+-	status = buffer->output[1];
++	status = buffer.output[1];
+ 
+-	dell_set_arguments(0x2, 0, 0, 0);
+-	ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
++	dell_fill_request(&buffer, 0x2, 0, 0, 0);
++	ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
+ 	if (ret)
+ 		return ret;
+-	hwswitch = buffer->output[1];
++	hwswitch = buffer.output[1];
+ 
+ 	/* If the hardware switch controls this radio, and the hardware
+ 	   switch is disabled, always disable the radio */
+@@ -454,8 +456,8 @@ static int dell_rfkill_set(void *data, bool blocked)
+ 	    (status & BIT(0)) && !(status & BIT(16)))
+ 		disable = 1;
+ 
+-	dell_set_arguments(1 | (radio<<8) | (disable << 16), 0, 0, 0);
+-	ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
++	dell_fill_request(&buffer, 1 | (radio<<8) | (disable << 16), 0, 0, 0);
++	ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
+ 	return ret;
+ }
+ 
+@@ -464,9 +466,11 @@ static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio,
+ {
+ 	if (status & BIT(0)) {
+ 		/* Has hw-switch, sync sw_state to BIOS */
++		struct calling_interface_buffer buffer;
+ 		int block = rfkill_blocked(rfkill);
+-		dell_set_arguments(1 | (radio << 8) | (block << 16), 0, 0, 0);
+-		dell_send_request(CLASS_INFO, SELECT_RFKILL);
++		dell_fill_request(&buffer,
++				   1 | (radio << 8) | (block << 16), 0, 0, 0);
++		dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
+ 	} else {
+ 		/* No hw-switch, sync BIOS state to sw_state */
+ 		rfkill_set_sw_state(rfkill, !!(status & BIT(radio + 16)));
+@@ -483,21 +487,22 @@ static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio,
+ static void dell_rfkill_query(struct rfkill *rfkill, void *data)
+ {
+ 	int radio = ((unsigned long)data & 0xF);
++	struct calling_interface_buffer buffer;
+ 	int hwswitch;
+ 	int status;
+ 	int ret;
+ 
+-	dell_set_arguments(0, 0, 0, 0);
+-	ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
+-	status = buffer->output[1];
++	dell_fill_request(&buffer, 0, 0, 0, 0);
++	ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
++	status = buffer.output[1];
+ 
+ 	if (ret != 0 || !(status & BIT(0))) {
+ 		return;
+ 	}
+ 
+-	dell_set_arguments(0, 0x2, 0, 0);
+-	ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
+-	hwswitch = buffer->output[1];
++	dell_fill_request(&buffer, 0, 0x2, 0, 0);
++	ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
++	hwswitch = buffer.output[1];
+ 
+ 	if (ret != 0)
+ 		return;
+@@ -514,22 +519,23 @@ static struct dentry *dell_laptop_dir;
+ 
+ static int dell_debugfs_show(struct seq_file *s, void *data)
+ {
++	struct calling_interface_buffer buffer;
+ 	int hwswitch_state;
+ 	int hwswitch_ret;
+ 	int status;
+ 	int ret;
+ 
+-	dell_set_arguments(0, 0, 0, 0);
+-	ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
++	dell_fill_request(&buffer, 0, 0, 0, 0);
++	ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
+ 	if (ret)
+ 		return ret;
+-	status = buffer->output[1];
++	status = buffer.output[1];
+ 
+-	dell_set_arguments(0, 0x2, 0, 0);
+-	hwswitch_ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
++	dell_fill_request(&buffer, 0, 0x2, 0, 0);
++	hwswitch_ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
+ 	if (hwswitch_ret)
+ 		return hwswitch_ret;
+-	hwswitch_state = buffer->output[1];
++	hwswitch_state = buffer.output[1];
+ 
+ 	seq_printf(s, "return:\t%d\n", ret);
+ 	seq_printf(s, "status:\t0x%X\n", status);
+@@ -610,22 +616,23 @@ static const struct file_operations dell_debugfs_fops = {
+ 
+ static void dell_update_rfkill(struct work_struct *ignored)
+ {
++	struct calling_interface_buffer buffer;
+ 	int hwswitch = 0;
+ 	int status;
+ 	int ret;
+ 
+-	dell_set_arguments(0, 0, 0, 0);
+-	ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
+-	status = buffer->output[1];
++	dell_fill_request(&buffer, 0, 0, 0, 0);
++	ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
++	status = buffer.output[1];
+ 
+ 	if (ret != 0)
+ 		return;
+ 
+-	dell_set_arguments(0, 0x2, 0, 0);
+-	ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
++	dell_fill_request(&buffer, 0, 0x2, 0, 0);
++	ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
+ 
+ 	if (ret == 0 && (status & BIT(0)))
+-		hwswitch = buffer->output[1];
++		hwswitch = buffer.output[1];
+ 
+ 	if (wifi_rfkill) {
+ 		dell_rfkill_update_hw_state(wifi_rfkill, 1, status, hwswitch);
+@@ -683,6 +690,7 @@ static struct notifier_block dell_laptop_rbtn_notifier = {
+ 
+ static int __init dell_setup_rfkill(void)
+ {
++	struct calling_interface_buffer buffer;
+ 	int status, ret, whitelisted;
+ 	const char *product;
+ 
+@@ -698,9 +706,9 @@ static int __init dell_setup_rfkill(void)
+ 	if (!force_rfkill && !whitelisted)
+ 		return 0;
+ 
+-	dell_set_arguments(0, 0, 0, 0);
+-	ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
+-	status = buffer->output[1];
++	dell_fill_request(&buffer, 0, 0, 0, 0);
++	ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
++	status = buffer.output[1];
+ 
+ 	/* dell wireless info smbios call is not supported */
+ 	if (ret != 0)
+@@ -853,6 +861,7 @@ static void dell_cleanup_rfkill(void)
+ 
+ static int dell_send_intensity(struct backlight_device *bd)
+ {
++	struct calling_interface_buffer buffer;
+ 	struct calling_interface_token *token;
+ 	int ret;
+ 
+@@ -860,17 +869,21 @@ static int dell_send_intensity(struct backlight_device *bd)
+ 	if (!token)
+ 		return -ENODEV;
+ 
+-	dell_set_arguments(token->location, bd->props.brightness, 0, 0);
++	dell_fill_request(&buffer,
++			   token->location, bd->props.brightness, 0, 0);
+ 	if (power_supply_is_system_supplied() > 0)
+-		ret = dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_AC);
++		ret = dell_send_request(&buffer,
++					CLASS_TOKEN_WRITE, SELECT_TOKEN_AC);
+ 	else
+-		ret = dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_BAT);
++		ret = dell_send_request(&buffer,
++					CLASS_TOKEN_WRITE, SELECT_TOKEN_BAT);
+ 
+ 	return ret;
+ }
+ 
+ static int dell_get_intensity(struct backlight_device *bd)
+ {
++	struct calling_interface_buffer buffer;
+ 	struct calling_interface_token *token;
+ 	int ret;
+ 
+@@ -878,14 +891,17 @@ static int dell_get_intensity(struct backlight_device *bd)
+ 	if (!token)
+ 		return -ENODEV;
+ 
+-	dell_set_arguments(token->location, 0, 0, 0);
++	dell_fill_request(&buffer, token->location, 0, 0, 0);
+ 	if (power_supply_is_system_supplied() > 0)
+-		ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_AC);
++		ret = dell_send_request(&buffer,
++					CLASS_TOKEN_READ, SELECT_TOKEN_AC);
+ 	else
+-		ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_BAT);
++		ret = dell_send_request(&buffer,
++					CLASS_TOKEN_READ, SELECT_TOKEN_BAT);
+ 
+ 	if (ret == 0)
+-		ret = buffer->output[1];
++		ret = buffer.output[1];
++
+ 	return ret;
+ }
+ 
+@@ -1149,31 +1165,33 @@ static DEFINE_MUTEX(kbd_led_mutex);
+ 
+ static int kbd_get_info(struct kbd_info *info)
+ {
++	struct calling_interface_buffer buffer;
+ 	u8 units;
+ 	int ret;
+ 
+-	dell_set_arguments(0, 0, 0, 0);
+-	ret = dell_send_request(CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
++	dell_fill_request(&buffer, 0, 0, 0, 0);
++	ret = dell_send_request(&buffer,
++				CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
+ 	if (ret)
+ 		return ret;
+ 
+-	info->modes = buffer->output[1] & 0xFFFF;
+-	info->type = (buffer->output[1] >> 24) & 0xFF;
+-	info->triggers = buffer->output[2] & 0xFF;
+-	units = (buffer->output[2] >> 8) & 0xFF;
+-	info->levels = (buffer->output[2] >> 16) & 0xFF;
++	info->modes = buffer.output[1] & 0xFFFF;
++	info->type = (buffer.output[1] >> 24) & 0xFF;
++	info->triggers = buffer.output[2] & 0xFF;
++	units = (buffer.output[2] >> 8) & 0xFF;
++	info->levels = (buffer.output[2] >> 16) & 0xFF;
+ 
+ 	if (quirks && quirks->kbd_led_levels_off_1 && info->levels)
+ 		info->levels--;
+ 
+ 	if (units & BIT(0))
+-		info->seconds = (buffer->output[3] >> 0) & 0xFF;
++		info->seconds = (buffer.output[3] >> 0) & 0xFF;
+ 	if (units & BIT(1))
+-		info->minutes = (buffer->output[3] >> 8) & 0xFF;
++		info->minutes = (buffer.output[3] >> 8) & 0xFF;
+ 	if (units & BIT(2))
+-		info->hours = (buffer->output[3] >> 16) & 0xFF;
++		info->hours = (buffer.output[3] >> 16) & 0xFF;
+ 	if (units & BIT(3))
+-		info->days = (buffer->output[3] >> 24) & 0xFF;
++		info->days = (buffer.output[3] >> 24) & 0xFF;
+ 
+ 	return ret;
+ }
+@@ -1233,31 +1251,34 @@ static int kbd_set_level(struct kbd_state *state, u8 level)
+ 
+ static int kbd_get_state(struct kbd_state *state)
+ {
++	struct calling_interface_buffer buffer;
+ 	int ret;
+ 
+-	dell_set_arguments(0x1, 0, 0, 0);
+-	ret = dell_send_request(CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
++	dell_fill_request(&buffer, 0x1, 0, 0, 0);
++	ret = dell_send_request(&buffer,
++				CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
+ 	if (ret)
+ 		return ret;
+ 
+-	state->mode_bit = ffs(buffer->output[1] & 0xFFFF);
++	state->mode_bit = ffs(buffer.output[1] & 0xFFFF);
+ 	if (state->mode_bit != 0)
+ 		state->mode_bit--;
+ 
+-	state->triggers = (buffer->output[1] >> 16) & 0xFF;
+-	state->timeout_value = (buffer->output[1] >> 24) & 0x3F;
+-	state->timeout_unit = (buffer->output[1] >> 30) & 0x3;
+-	state->als_setting = buffer->output[2] & 0xFF;
+-	state->als_value = (buffer->output[2] >> 8) & 0xFF;
+-	state->level = (buffer->output[2] >> 16) & 0xFF;
+-	state->timeout_value_ac = (buffer->output[2] >> 24) & 0x3F;
+-	state->timeout_unit_ac = (buffer->output[2] >> 30) & 0x3;
++	state->triggers = (buffer.output[1] >> 16) & 0xFF;
++	state->timeout_value = (buffer.output[1] >> 24) & 0x3F;
++	state->timeout_unit = (buffer.output[1] >> 30) & 0x3;
++	state->als_setting = buffer.output[2] & 0xFF;
++	state->als_value = (buffer.output[2] >> 8) & 0xFF;
++	state->level = (buffer.output[2] >> 16) & 0xFF;
++	state->timeout_value_ac = (buffer.output[2] >> 24) & 0x3F;
++	state->timeout_unit_ac = (buffer.output[2] >> 30) & 0x3;
+ 
+ 	return ret;
+ }
+ 
+ static int kbd_set_state(struct kbd_state *state)
+ {
++	struct calling_interface_buffer buffer;
+ 	int ret;
+ 	u32 input1;
+ 	u32 input2;
+@@ -1270,8 +1291,9 @@ static int kbd_set_state(struct kbd_state *state)
+ 	input2 |= (state->level & 0xFF) << 16;
+ 	input2 |= (state->timeout_value_ac & 0x3F) << 24;
+ 	input2 |= (state->timeout_unit_ac & 0x3) << 30;
+-	dell_set_arguments(0x2, input1, input2, 0);
+-	ret = dell_send_request(CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
++	dell_fill_request(&buffer, 0x2, input1, input2, 0);
++	ret = dell_send_request(&buffer,
++				CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
+ 
+ 	return ret;
+ }
+@@ -1298,6 +1320,7 @@ static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
+ 
+ static int kbd_set_token_bit(u8 bit)
+ {
++	struct calling_interface_buffer buffer;
+ 	struct calling_interface_token *token;
+ 	int ret;
+ 
+@@ -1308,14 +1331,15 @@ static int kbd_set_token_bit(u8 bit)
+ 	if (!token)
+ 		return -EINVAL;
+ 
+-	dell_set_arguments(token->location, token->value, 0, 0);
+-	ret = dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_STD);
++	dell_fill_request(&buffer, token->location, token->value, 0, 0);
++	ret = dell_send_request(&buffer, CLASS_TOKEN_WRITE, SELECT_TOKEN_STD);
+ 
+ 	return ret;
+ }
+ 
+ static int kbd_get_token_bit(u8 bit)
+ {
++	struct calling_interface_buffer buffer;
+ 	struct calling_interface_token *token;
+ 	int ret;
+ 	int val;
+@@ -1327,9 +1351,9 @@ static int kbd_get_token_bit(u8 bit)
+ 	if (!token)
+ 		return -EINVAL;
+ 
+-	dell_set_arguments(token->location, 0, 0, 0);
+-	ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_STD);
+-	val = buffer->output[1];
++	dell_fill_request(&buffer, token->location, 0, 0, 0);
++	ret = dell_send_request(&buffer, CLASS_TOKEN_READ, SELECT_TOKEN_STD);
++	val = buffer.output[1];
+ 
+ 	if (ret)
+ 		return ret;
+@@ -2046,6 +2070,7 @@ static struct notifier_block dell_laptop_notifier = {
+ 
+ int dell_micmute_led_set(int state)
+ {
++	struct calling_interface_buffer buffer;
+ 	struct calling_interface_token *token;
+ 
+ 	if (state == 0)
+@@ -2058,8 +2083,8 @@ int dell_micmute_led_set(int state)
+ 	if (!token)
+ 		return -ENODEV;
+ 
+-	dell_set_arguments(token->location, token->value, 0, 0);
+-	dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_STD);
++	dell_fill_request(&buffer, token->location, token->value, 0, 0);
++	dell_send_request(&buffer, CLASS_TOKEN_WRITE, SELECT_TOKEN_STD);
+ 
+ 	return state;
+ }
+@@ -2090,13 +2115,6 @@ static int __init dell_init(void)
+ 	if (ret)
+ 		goto fail_platform_device2;
+ 
+-	buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL);
+-	if (!buffer) {
+-		ret = -ENOMEM;
+-		goto fail_buffer;
+-	}
+-
+-
+ 	ret = dell_setup_rfkill();
+ 
+ 	if (ret) {
+@@ -2121,10 +2139,13 @@ static int __init dell_init(void)
+ 
+ 	token = dell_smbios_find_token(BRIGHTNESS_TOKEN);
+ 	if (token) {
+-		dell_set_arguments(token->location, 0, 0, 0);
+-		ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_AC);
++		struct calling_interface_buffer buffer;
++
++		dell_fill_request(&buffer, token->location, 0, 0, 0);
++		ret = dell_send_request(&buffer,
++					CLASS_TOKEN_READ, SELECT_TOKEN_AC);
+ 		if (ret)
+-			max_intensity = buffer->output[3];
++			max_intensity = buffer.output[3];
+ 	}
+ 
+ 	if (max_intensity) {
+@@ -2158,8 +2179,6 @@ static int __init dell_init(void)
+ fail_get_brightness:
+ 	backlight_device_unregister(dell_backlight_device);
+ fail_backlight:
+-	kfree(buffer);
+-fail_buffer:
+ 	dell_cleanup_rfkill();
+ fail_rfkill:
+ 	platform_device_del(platform_device);
+@@ -2179,7 +2198,6 @@ static void __exit dell_exit(void)
+ 		touchpad_led_exit();
+ 	kbd_led_exit();
+ 	backlight_device_unregister(dell_backlight_device);
+-	kfree(buffer);
+ 	dell_cleanup_rfkill();
+ 	if (platform_device) {
+ 		platform_device_unregister(platform_device);
+diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
+index badf42acbf95..185b3cd48b88 100644
+--- a/drivers/s390/net/qeth_core.h
++++ b/drivers/s390/net/qeth_core.h
+@@ -581,6 +581,11 @@ struct qeth_cmd_buffer {
+ 	void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
+ };
+ 
++static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
++{
++	return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
++}
++
+ /**
+  * definition of a qeth channel, used for read and write
+  */
+@@ -836,7 +841,7 @@ struct qeth_trap_id {
+  */
+ static inline int qeth_get_elements_for_range(addr_t start, addr_t end)
+ {
+-	return PFN_UP(end - 1) - PFN_DOWN(start);
++	return PFN_UP(end) - PFN_DOWN(start);
+ }
+ 
+ static inline int qeth_get_micros(void)
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index 3614df68830f..61e9d0bca197 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -2057,7 +2057,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
+ 	unsigned long flags;
+ 	struct qeth_reply *reply = NULL;
+ 	unsigned long timeout, event_timeout;
+-	struct qeth_ipa_cmd *cmd;
++	struct qeth_ipa_cmd *cmd = NULL;
+ 
+ 	QETH_CARD_TEXT(card, 2, "sendctl");
+ 
+@@ -2071,22 +2071,26 @@ int qeth_send_control_data(struct qeth_card *card, int len,
+ 	}
+ 	reply->callback = reply_cb;
+ 	reply->param = reply_param;
+-	if (card->state == CARD_STATE_DOWN)
+-		reply->seqno = QETH_IDX_COMMAND_SEQNO;
+-	else
+-		reply->seqno = card->seqno.ipa++;
++
+ 	init_waitqueue_head(&reply->wait_q);
+-	spin_lock_irqsave(&card->lock, flags);
+-	list_add_tail(&reply->list, &card->cmd_waiter_list);
+-	spin_unlock_irqrestore(&card->lock, flags);
+ 
+ 	while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
+-	qeth_prepare_control_data(card, len, iob);
+ 
+-	if (IS_IPA(iob->data))
++	if (IS_IPA(iob->data)) {
++		cmd = __ipa_cmd(iob);
++		cmd->hdr.seqno = card->seqno.ipa++;
++		reply->seqno = cmd->hdr.seqno;
+ 		event_timeout = QETH_IPA_TIMEOUT;
+-	else
++	} else {
++		reply->seqno = QETH_IDX_COMMAND_SEQNO;
+ 		event_timeout = QETH_TIMEOUT;
++	}
++	qeth_prepare_control_data(card, len, iob);
++
++	spin_lock_irqsave(&card->lock, flags);
++	list_add_tail(&reply->list, &card->cmd_waiter_list);
++	spin_unlock_irqrestore(&card->lock, flags);
++
+ 	timeout = jiffies + event_timeout;
+ 
+ 	QETH_CARD_TEXT(card, 6, "noirqpnd");
+@@ -2111,9 +2115,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
+ 
+ 	/* we have only one long running ipassist, since we can ensure
+ 	   process context of this command we can sleep */
+-	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+-	if ((cmd->hdr.command == IPA_CMD_SETIP) &&
+-	    (cmd->hdr.prot_version == QETH_PROT_IPV4)) {
++	if (cmd && cmd->hdr.command == IPA_CMD_SETIP &&
++	    cmd->hdr.prot_version == QETH_PROT_IPV4) {
+ 		if (!wait_event_timeout(reply->wait_q,
+ 		    atomic_read(&reply->received), event_timeout))
+ 			goto time_err;
+@@ -2868,7 +2871,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
+ 	memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
+ 	cmd->hdr.command = command;
+ 	cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
+-	cmd->hdr.seqno = card->seqno.ipa;
++	/* cmd->hdr.seqno is set by qeth_send_control_data() */
+ 	cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
+ 	cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
+ 	if (card->options.layer2)
+@@ -3833,10 +3836,12 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
+ int qeth_get_elements_no(struct qeth_card *card,
+ 		     struct sk_buff *skb, int extra_elems, int data_offset)
+ {
+-	int elements = qeth_get_elements_for_range(
+-				(addr_t)skb->data + data_offset,
+-				(addr_t)skb->data + skb_headlen(skb)) +
+-			qeth_get_elements_for_frags(skb);
++	addr_t end = (addr_t)skb->data + skb_headlen(skb);
++	int elements = qeth_get_elements_for_frags(skb);
++	addr_t start = (addr_t)skb->data + data_offset;
++
++	if (start != end)
++		elements += qeth_get_elements_for_range(start, end);
+ 
+ 	if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
+ 		QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
+diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
+index e5833837b799..8727b9517de8 100644
+--- a/drivers/s390/net/qeth_l3.h
++++ b/drivers/s390/net/qeth_l3.h
+@@ -40,8 +40,40 @@ struct qeth_ipaddr {
+ 			unsigned int pfxlen;
+ 		} a6;
+ 	} u;
+-
+ };
++
++static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1,
++					 struct qeth_ipaddr *a2)
++{
++	if (a1->proto != a2->proto)
++		return false;
++	if (a1->proto == QETH_PROT_IPV6)
++		return ipv6_addr_equal(&a1->u.a6.addr, &a2->u.a6.addr);
++	return a1->u.a4.addr == a2->u.a4.addr;
++}
++
++static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1,
++					  struct qeth_ipaddr *a2)
++{
++	/* Assumes that the pair was obtained via qeth_l3_addr_find_by_ip(),
++	 * so 'proto' and 'addr' match for sure.
++	 *
++	 * For ucast:
++	 * -	'mac' is always 0.
++	 * -	'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching
++	 *	values are required to avoid mixups in takeover eligibility.
++	 *
++	 * For mcast,
++	 * -	'mac' is mapped from the IP, and thus always matches.
++	 * -	'mask'/'pfxlen' is always 0.
++	 */
++	if (a1->type != a2->type)
++		return false;
++	if (a1->proto == QETH_PROT_IPV6)
++		return a1->u.a6.pfxlen == a2->u.a6.pfxlen;
++	return a1->u.a4.mask == a2->u.a4.mask;
++}
++
+ static inline  u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
+ {
+ 	u64  ret = 0;
+diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
+index ef0961e18686..33131c594627 100644
+--- a/drivers/s390/net/qeth_l3_main.c
++++ b/drivers/s390/net/qeth_l3_main.c
+@@ -150,6 +150,24 @@ int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
+ 		return -EINVAL;
+ }
+ 
++static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
++						   struct qeth_ipaddr *query)
++{
++	u64 key = qeth_l3_ipaddr_hash(query);
++	struct qeth_ipaddr *addr;
++
++	if (query->is_multicast) {
++		hash_for_each_possible(card->ip_mc_htable, addr, hnode, key)
++			if (qeth_l3_addr_match_ip(addr, query))
++				return addr;
++	} else {
++		hash_for_each_possible(card->ip_htable,  addr, hnode, key)
++			if (qeth_l3_addr_match_ip(addr, query))
++				return addr;
++	}
++	return NULL;
++}
++
+ static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
+ {
+ 	int i, j;
+@@ -203,34 +221,6 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
+ 	return rc;
+ }
+ 
+-inline int
+-qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2)
+-{
+-	return addr1->proto == addr2->proto &&
+-		!memcmp(&addr1->u, &addr2->u, sizeof(addr1->u))  &&
+-		!memcmp(&addr1->mac, &addr2->mac, sizeof(addr1->mac));
+-}
+-
+-static struct qeth_ipaddr *
+-qeth_l3_ip_from_hash(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
+-{
+-	struct qeth_ipaddr *addr;
+-
+-	if (tmp_addr->is_multicast) {
+-		hash_for_each_possible(card->ip_mc_htable,  addr,
+-				hnode, qeth_l3_ipaddr_hash(tmp_addr))
+-			if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
+-				return addr;
+-	} else {
+-		hash_for_each_possible(card->ip_htable,  addr,
+-				hnode, qeth_l3_ipaddr_hash(tmp_addr))
+-			if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
+-				return addr;
+-	}
+-
+-	return NULL;
+-}
+-
+ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
+ {
+ 	int rc = 0;
+@@ -245,23 +235,18 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
+ 		QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
+ 	}
+ 
+-	addr = qeth_l3_ip_from_hash(card, tmp_addr);
+-	if (!addr)
++	addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
++	if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr))
+ 		return -ENOENT;
+ 
+ 	addr->ref_counter--;
+-	if (addr->ref_counter > 0 && (addr->type == QETH_IP_TYPE_NORMAL ||
+-				      addr->type == QETH_IP_TYPE_RXIP))
++	if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
+ 		return rc;
+ 	if (addr->in_progress)
+ 		return -EINPROGRESS;
+ 
+-	if (!qeth_card_hw_is_reachable(card)) {
+-		addr->disp_flag = QETH_DISP_ADDR_DELETE;
+-		return 0;
+-	}
+-
+-	rc = qeth_l3_deregister_addr_entry(card, addr);
++	if (qeth_card_hw_is_reachable(card))
++		rc = qeth_l3_deregister_addr_entry(card, addr);
+ 
+ 	hash_del(&addr->hnode);
+ 	kfree(addr);
+@@ -273,6 +258,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
+ {
+ 	int rc = 0;
+ 	struct qeth_ipaddr *addr;
++	char buf[40];
+ 
+ 	QETH_CARD_TEXT(card, 4, "addip");
+ 
+@@ -283,8 +269,20 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
+ 		QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
+ 	}
+ 
+-	addr = qeth_l3_ip_from_hash(card, tmp_addr);
+-	if (!addr) {
++	addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
++	if (addr) {
++		if (tmp_addr->type != QETH_IP_TYPE_NORMAL)
++			return -EADDRINUSE;
++		if (qeth_l3_addr_match_all(addr, tmp_addr)) {
++			addr->ref_counter++;
++			return 0;
++		}
++		qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u,
++					 buf);
++		dev_warn(&card->gdev->dev,
++			 "Registering IP address %s failed\n", buf);
++		return -EADDRINUSE;
++	} else {
+ 		addr = qeth_l3_get_addr_buffer(tmp_addr->proto);
+ 		if (!addr)
+ 			return -ENOMEM;
+@@ -324,19 +322,15 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
+ 				(rc == IPA_RC_LAN_OFFLINE)) {
+ 			addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
+ 			if (addr->ref_counter < 1) {
+-				qeth_l3_delete_ip(card, addr);
++				qeth_l3_deregister_addr_entry(card, addr);
++				hash_del(&addr->hnode);
+ 				kfree(addr);
+ 			}
+ 		} else {
+ 			hash_del(&addr->hnode);
+ 			kfree(addr);
+ 		}
+-	} else {
+-		if (addr->type == QETH_IP_TYPE_NORMAL ||
+-		    addr->type == QETH_IP_TYPE_RXIP)
+-			addr->ref_counter++;
+ 	}
+-
+ 	return rc;
+ }
+ 
+@@ -404,11 +398,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
+ 	spin_lock_bh(&card->ip_lock);
+ 
+ 	hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
+-		if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
+-			qeth_l3_deregister_addr_entry(card, addr);
+-			hash_del(&addr->hnode);
+-			kfree(addr);
+-		} else if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
++		if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
+ 			if (addr->proto == QETH_PROT_IPV4) {
+ 				addr->in_progress = 1;
+ 				spin_unlock_bh(&card->ip_lock);
+@@ -724,12 +714,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
+ 		return -ENOMEM;
+ 
+ 	spin_lock_bh(&card->ip_lock);
+-
+-	if (qeth_l3_ip_from_hash(card, ipaddr))
+-		rc = -EEXIST;
+-	else
+-		qeth_l3_add_ip(card, ipaddr);
+-
++	rc = qeth_l3_add_ip(card, ipaddr);
+ 	spin_unlock_bh(&card->ip_lock);
+ 
+ 	kfree(ipaddr);
+@@ -792,12 +777,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
+ 		return -ENOMEM;
+ 
+ 	spin_lock_bh(&card->ip_lock);
+-
+-	if (qeth_l3_ip_from_hash(card, ipaddr))
+-		rc = -EEXIST;
+-	else
+-		qeth_l3_add_ip(card, ipaddr);
+-
++	rc = qeth_l3_add_ip(card, ipaddr);
+ 	spin_unlock_bh(&card->ip_lock);
+ 
+ 	kfree(ipaddr);
+@@ -1405,8 +1385,9 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
+ 		memcpy(tmp->mac, buf, sizeof(tmp->mac));
+ 		tmp->is_multicast = 1;
+ 
+-		ipm = qeth_l3_ip_from_hash(card, tmp);
++		ipm = qeth_l3_find_addr_by_ip(card, tmp);
+ 		if (ipm) {
++			/* for mcast, by-IP match means full match */
+ 			ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
+ 		} else {
+ 			ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
+@@ -1489,8 +1470,9 @@ qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev)
+ 		       sizeof(struct in6_addr));
+ 		tmp->is_multicast = 1;
+ 
+-		ipm = qeth_l3_ip_from_hash(card, tmp);
++		ipm = qeth_l3_find_addr_by_ip(card, tmp);
+ 		if (ipm) {
++			/* for mcast, by-IP match means full match */
+ 			ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
+ 			continue;
+ 		}
+@@ -2629,11 +2611,12 @@ static void qeth_tso_fill_header(struct qeth_card *card,
+ static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
+ 			struct sk_buff *skb, int extra_elems)
+ {
+-	addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
+-	int elements = qeth_get_elements_for_range(
+-				tcpdptr,
+-				(addr_t)skb->data + skb_headlen(skb)) +
+-				qeth_get_elements_for_frags(skb);
++	addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
++	addr_t end = (addr_t)skb->data + skb_headlen(skb);
++	int elements = qeth_get_elements_for_frags(skb);
++
++	if (start != end)
++		elements += qeth_get_elements_for_range(start, end);
+ 
+ 	if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
+ 		QETH_DBF_MESSAGE(2,
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index e30e29ae4819..45657e2b1ff7 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -338,11 +338,12 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
+ {
+ 	struct page *page[1];
+ 	struct vm_area_struct *vma;
++	struct vm_area_struct *vmas[1];
+ 	int ret;
+ 
+ 	if (mm == current->mm) {
+-		ret = get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE),
+-					  page);
++		ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE),
++					      page, vmas);
+ 	} else {
+ 		unsigned int flags = 0;
+ 
+@@ -351,7 +352,18 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
+ 
+ 		down_read(&mm->mmap_sem);
+ 		ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
+-					    NULL, NULL);
++					    vmas, NULL);
++		/*
++		 * The lifetime of a vaddr_get_pfn() page pin is
++		 * userspace-controlled. In the fs-dax case this could
++		 * lead to indefinite stalls in filesystem operations.
++		 * Disallow attempts to pin fs-dax pages via this
++		 * interface.
++		 */
++		if (ret > 0 && vma_is_fsdax(vmas[0])) {
++			ret = -EOPNOTSUPP;
++			put_page(page[0]);
++		}
+ 		up_read(&mm->mmap_sem);
+ 	}
+ 
+diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
+index a28bba801264..27baaff96880 100644
+--- a/fs/btrfs/sysfs.c
++++ b/fs/btrfs/sysfs.c
+@@ -423,7 +423,7 @@ static ssize_t btrfs_nodesize_show(struct kobject *kobj,
+ {
+ 	struct btrfs_fs_info *fs_info = to_fs_info(kobj);
+ 
+-	return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->super_copy->nodesize);
++	return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->nodesize);
+ }
+ 
+ BTRFS_ATTR(, nodesize, btrfs_nodesize_show);
+@@ -433,8 +433,7 @@ static ssize_t btrfs_sectorsize_show(struct kobject *kobj,
+ {
+ 	struct btrfs_fs_info *fs_info = to_fs_info(kobj);
+ 
+-	return snprintf(buf, PAGE_SIZE, "%u\n",
+-			fs_info->super_copy->sectorsize);
++	return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->sectorsize);
+ }
+ 
+ BTRFS_ATTR(, sectorsize, btrfs_sectorsize_show);
+@@ -444,8 +443,7 @@ static ssize_t btrfs_clone_alignment_show(struct kobject *kobj,
+ {
+ 	struct btrfs_fs_info *fs_info = to_fs_info(kobj);
+ 
+-	return snprintf(buf, PAGE_SIZE, "%u\n",
+-			fs_info->super_copy->sectorsize);
++	return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->sectorsize);
+ }
+ 
+ BTRFS_ATTR(, clone_alignment, btrfs_clone_alignment_show);
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 5a8c2649af2f..10d12b3de001 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -1723,19 +1723,23 @@ static void update_super_roots(struct btrfs_fs_info *fs_info)
+ 
+ 	super = fs_info->super_copy;
+ 
++	/* update latest btrfs_super_block::chunk_root refs */
+ 	root_item = &fs_info->chunk_root->root_item;
+-	super->chunk_root = root_item->bytenr;
+-	super->chunk_root_generation = root_item->generation;
+-	super->chunk_root_level = root_item->level;
++	btrfs_set_super_chunk_root(super, root_item->bytenr);
++	btrfs_set_super_chunk_root_generation(super, root_item->generation);
++	btrfs_set_super_chunk_root_level(super, root_item->level);
+ 
++	/* update latest btrfs_super_block::root refs */
+ 	root_item = &fs_info->tree_root->root_item;
+-	super->root = root_item->bytenr;
+-	super->generation = root_item->generation;
+-	super->root_level = root_item->level;
++	btrfs_set_super_root(super, root_item->bytenr);
++	btrfs_set_super_generation(super, root_item->generation);
++	btrfs_set_super_root_level(super, root_item->level);
++
+ 	if (btrfs_test_opt(fs_info, SPACE_CACHE))
+-		super->cache_generation = root_item->generation;
++		btrfs_set_super_cache_generation(super, root_item->generation);
+ 	if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
+-		super->uuid_tree_generation = root_item->generation;
++		btrfs_set_super_uuid_tree_generation(super,
++						     root_item->generation);
+ }
+ 
+ int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
+diff --git a/fs/direct-io.c b/fs/direct-io.c
+index 3aafb3343a65..b76110e96d62 100644
+--- a/fs/direct-io.c
++++ b/fs/direct-io.c
+@@ -1252,8 +1252,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
+ 	 */
+ 	if (dio->is_async && iov_iter_rw(iter) == WRITE) {
+ 		retval = 0;
+-		if ((iocb->ki_filp->f_flags & O_DSYNC) ||
+-		    IS_SYNC(iocb->ki_filp->f_mapping->host))
++		if (iocb->ki_flags & IOCB_DSYNC)
+ 			retval = dio_set_defer_completion(dio);
+ 		else if (!dio->inode->i_sb->s_dio_done_wq) {
+ 			/*
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 511fbaabf624..79421287ff5e 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -3204,7 +3204,7 @@ static inline bool vma_is_fsdax(struct vm_area_struct *vma)
+ 	if (!vma_is_dax(vma))
+ 		return false;
+ 	inode = file_inode(vma->vm_file);
+-	if (inode->i_mode == S_IFCHR)
++	if (S_ISCHR(inode->i_mode))
+ 		return false; /* device-dax */
+ 	return true;
+ }
+diff --git a/include/linux/nospec.h b/include/linux/nospec.h
+index fbc98e2c8228..132e3f5a2e0d 100644
+--- a/include/linux/nospec.h
++++ b/include/linux/nospec.h
+@@ -72,7 +72,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
+ 	BUILD_BUG_ON(sizeof(_i) > sizeof(long));			\
+ 	BUILD_BUG_ON(sizeof(_s) > sizeof(long));			\
+ 									\
+-	_i &= _mask;							\
+-	_i;								\
++	(typeof(_i)) (_i & _mask);					\
+ })
+ #endif /* _LINUX_NOSPEC_H */
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+index dc82a07cb4fd..123cd703741d 100644
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -819,6 +819,7 @@ void phy_device_remove(struct phy_device *phydev);
+ int phy_init_hw(struct phy_device *phydev);
+ int phy_suspend(struct phy_device *phydev);
+ int phy_resume(struct phy_device *phydev);
++int __phy_resume(struct phy_device *phydev);
+ int phy_loopback(struct phy_device *phydev, bool enable);
+ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
+ 			      phy_interface_t interface);
+diff --git a/include/net/udplite.h b/include/net/udplite.h
+index 81bdbf97319b..9185e45b997f 100644
+--- a/include/net/udplite.h
++++ b/include/net/udplite.h
+@@ -64,6 +64,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
+ 		UDP_SKB_CB(skb)->cscov = cscov;
+ 		if (skb->ip_summed == CHECKSUM_COMPLETE)
+ 			skb->ip_summed = CHECKSUM_NONE;
++		skb->csum_valid = 0;
+         }
+ 
+ 	return 0;
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index aa9d2a2b1210..cf8e4df808cf 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1104,7 +1104,12 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+ 
+ 	cpu_base = raw_cpu_ptr(&hrtimer_bases);
+ 
+-	if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
++	/*
++	 * POSIX magic: Relative CLOCK_REALTIME timers are not affected by
++	 * clock modifications, so they needs to become CLOCK_MONOTONIC to
++	 * ensure POSIX compliance.
++	 */
++	if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
+ 		clock_id = CLOCK_MONOTONIC;
+ 
+ 	base = hrtimer_clockid_to_base(clock_id);
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index 0bcf00e3ce48..e9eb29a0edc5 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1886,6 +1886,12 @@ int timers_dead_cpu(unsigned int cpu)
+ 		raw_spin_lock_irq(&new_base->lock);
+ 		raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+ 
++		/*
++		 * The current CPUs base clock might be stale. Update it
++		 * before moving the timers over.
++		 */
++		forward_timer_base(new_base);
++
+ 		BUG_ON(old_base->running_timer);
+ 
+ 		for (i = 0; i < WHEEL_SIZE; i++)
+diff --git a/lib/vsprintf.c b/lib/vsprintf.c
+index 01c3957b2de6..062ac753a101 100644
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -1849,7 +1849,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+ {
+ 	const int default_width = 2 * sizeof(void *);
+ 
+-	if (!ptr && *fmt != 'K') {
++	if (!ptr && *fmt != 'K' && *fmt != 'x') {
+ 		/*
+ 		 * Print (null) with the same width as a pointer so it makes
+ 		 * tabular output look nice.
+diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
+index 0254c35b2bf0..126a8ea73c96 100644
+--- a/net/bridge/br_sysfs_if.c
++++ b/net/bridge/br_sysfs_if.c
+@@ -255,6 +255,9 @@ static ssize_t brport_show(struct kobject *kobj,
+ 	struct brport_attribute *brport_attr = to_brport_attr(attr);
+ 	struct net_bridge_port *p = to_brport(kobj);
+ 
++	if (!brport_attr->show)
++		return -EINVAL;
++
+ 	return brport_attr->show(p, buf);
+ }
+ 
+diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
+index 51935270c651..9896f4975353 100644
+--- a/net/bridge/br_vlan.c
++++ b/net/bridge/br_vlan.c
+@@ -168,6 +168,8 @@ static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid
+ 		masterv = br_vlan_find(vg, vid);
+ 		if (WARN_ON(!masterv))
+ 			return NULL;
++		refcount_set(&masterv->refcnt, 1);
++		return masterv;
+ 	}
+ 	refcount_inc(&masterv->refcnt);
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index c8c102a3467f..a2a89acd0de8 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2366,8 +2366,11 @@ EXPORT_SYMBOL(netdev_set_num_tc);
+  */
+ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
+ {
++	bool disabling;
+ 	int rc;
+ 
++	disabling = txq < dev->real_num_tx_queues;
++
+ 	if (txq < 1 || txq > dev->num_tx_queues)
+ 		return -EINVAL;
+ 
+@@ -2383,15 +2386,19 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
+ 		if (dev->num_tc)
+ 			netif_setup_tc(dev, txq);
+ 
+-		if (txq < dev->real_num_tx_queues) {
++		dev->real_num_tx_queues = txq;
++
++		if (disabling) {
++			synchronize_net();
+ 			qdisc_reset_all_tx_gt(dev, txq);
+ #ifdef CONFIG_XPS
+ 			netif_reset_xps_queues_gt(dev, txq);
+ #endif
+ 		}
++	} else {
++		dev->real_num_tx_queues = txq;
+ 	}
+ 
+-	dev->real_num_tx_queues = txq;
+ 	return 0;
+ }
+ EXPORT_SYMBOL(netif_set_real_num_tx_queues);
+diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
+index 0a3f88f08727..98fd12721221 100644
+--- a/net/core/gen_estimator.c
++++ b/net/core/gen_estimator.c
+@@ -66,6 +66,7 @@ struct net_rate_estimator {
+ static void est_fetch_counters(struct net_rate_estimator *e,
+ 			       struct gnet_stats_basic_packed *b)
+ {
++	memset(b, 0, sizeof(*b));
+ 	if (e->stats_lock)
+ 		spin_lock(e->stats_lock);
+ 
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index c586597da20d..7d36a950d961 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -646,6 +646,11 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
+ 					    fi->fib_nh, cfg, extack))
+ 				return 1;
+ 		}
++#ifdef CONFIG_IP_ROUTE_CLASSID
++		if (cfg->fc_flow &&
++		    cfg->fc_flow != fi->fib_nh->nh_tclassid)
++			return 1;
++#endif
+ 		if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
+ 		    (!cfg->fc_gw  || cfg->fc_gw == fi->fib_nh->nh_gw))
+ 			return 0;
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 4e153b23bcec..f746e49dd585 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -128,10 +128,13 @@ static int ip_rt_redirect_silence __read_mostly	= ((HZ / 50) << (9 + 1));
+ static int ip_rt_error_cost __read_mostly	= HZ;
+ static int ip_rt_error_burst __read_mostly	= 5 * HZ;
+ static int ip_rt_mtu_expires __read_mostly	= 10 * 60 * HZ;
+-static int ip_rt_min_pmtu __read_mostly		= 512 + 20 + 20;
++static u32 ip_rt_min_pmtu __read_mostly		= 512 + 20 + 20;
+ static int ip_rt_min_advmss __read_mostly	= 256;
+ 
+ static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
++
++static int ip_min_valid_pmtu __read_mostly	= IPV4_MIN_MTU;
++
+ /*
+  *	Interface to generic destination cache.
+  */
+@@ -1829,6 +1832,8 @@ int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4,
+ 				return skb_get_hash_raw(skb) >> 1;
+ 			memset(&hash_keys, 0, sizeof(hash_keys));
+ 			skb_flow_dissect_flow_keys(skb, &keys, flag);
++
++			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ 			hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
+ 			hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
+ 			hash_keys.ports.src = keys.ports.src;
+@@ -2934,7 +2939,8 @@ static struct ctl_table ipv4_route_table[] = {
+ 		.data		= &ip_rt_min_pmtu,
+ 		.maxlen		= sizeof(int),
+ 		.mode		= 0644,
+-		.proc_handler	= proc_dointvec,
++		.proc_handler	= proc_dointvec_minmax,
++		.extra1		= &ip_min_valid_pmtu,
+ 	},
+ 	{
+ 		.procname	= "min_adv_mss",
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 45f750e85714..0228f494b0a5 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -1977,11 +1977,6 @@ void tcp_enter_loss(struct sock *sk)
+ 	/* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
+ 	 * loss recovery is underway except recurring timeout(s) on
+ 	 * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
+-	 *
+-	 * In theory F-RTO can be used repeatedly during loss recovery.
+-	 * In practice this interacts badly with broken middle-boxes that
+-	 * falsely raise the receive window, which results in repeated
+-	 * timeouts and stop-and-go behavior.
+ 	 */
+ 	tp->frto = net->ipv4.sysctl_tcp_frto &&
+ 		   (new_recovery || icsk->icsk_retransmits) &&
+@@ -2637,18 +2632,14 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
+ 	    tcp_try_undo_loss(sk, false))
+ 		return;
+ 
+-	/* The ACK (s)acks some never-retransmitted data meaning not all
+-	 * the data packets before the timeout were lost. Therefore we
+-	 * undo the congestion window and state. This is essentially
+-	 * the operation in F-RTO (RFC5682 section 3.1 step 3.b). Since
+-	 * a retransmitted skb is permantly marked, we can apply such an
+-	 * operation even if F-RTO was not used.
+-	 */
+-	if ((flag & FLAG_ORIG_SACK_ACKED) &&
+-	    tcp_try_undo_loss(sk, tp->undo_marker))
+-		return;
+-
+ 	if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
++		/* Step 3.b. A timeout is spurious if not all data are
++		 * lost, i.e., never-retransmitted data are (s)acked.
++		 */
++		if ((flag & FLAG_ORIG_SACK_ACKED) &&
++		    tcp_try_undo_loss(sk, true))
++			return;
++
+ 		if (after(tp->snd_nxt, tp->high_seq)) {
+ 			if (flag & FLAG_DATA_SACKED || is_dupack)
+ 				tp->frto = 0; /* Step 3.a. loss was real */
+@@ -3988,6 +3979,7 @@ void tcp_reset(struct sock *sk)
+ 	/* This barrier is coupled with smp_rmb() in tcp_poll() */
+ 	smp_wmb();
+ 
++	tcp_write_queue_purge(sk);
+ 	tcp_done(sk);
+ 
+ 	if (!sock_flag(sk, SOCK_DEAD))
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 94e28350f420..3b051b9b3743 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -705,7 +705,8 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
+ 	 */
+ 	if (sk) {
+ 		arg.bound_dev_if = sk->sk_bound_dev_if;
+-		trace_tcp_send_reset(sk, skb);
++		if (sk_fullsock(sk))
++			trace_tcp_send_reset(sk, skb);
+ 	}
+ 
+ 	BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index a4d214c7b506..580912de16c2 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1730,7 +1730,7 @@ u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
+ 	 */
+ 	segs = max_t(u32, bytes / mss_now, min_tso_segs);
+ 
+-	return min_t(u32, segs, sk->sk_gso_max_segs);
++	return segs;
+ }
+ EXPORT_SYMBOL(tcp_tso_autosize);
+ 
+@@ -1742,9 +1742,10 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
+ 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
+ 	u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0;
+ 
+-	return tso_segs ? :
+-		tcp_tso_autosize(sk, mss_now,
+-				 sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
++	if (!tso_segs)
++		tso_segs = tcp_tso_autosize(sk, mss_now,
++				sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
++	return min_t(u32, tso_segs, sk->sk_gso_max_segs);
+ }
+ 
+ /* Returns the portion of skb which can be sent right away */
+@@ -2026,6 +2027,24 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk)
+ 	}
+ }
+ 
++static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
++{
++	struct sk_buff *skb, *next;
++
++	skb = tcp_send_head(sk);
++	tcp_for_write_queue_from_safe(skb, next, sk) {
++		if (len <= skb->len)
++			break;
++
++		if (unlikely(TCP_SKB_CB(skb)->eor))
++			return false;
++
++		len -= skb->len;
++	}
++
++	return true;
++}
++
+ /* Create a new MTU probe if we are ready.
+  * MTU probe is regularly attempting to increase the path MTU by
+  * deliberately sending larger packets.  This discovers routing
+@@ -2098,6 +2117,9 @@ static int tcp_mtu_probe(struct sock *sk)
+ 			return 0;
+ 	}
+ 
++	if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
++		return -1;
++
+ 	/* We're allowed to probe.  Build it now. */
+ 	nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
+ 	if (!nskb)
+@@ -2133,6 +2155,10 @@ static int tcp_mtu_probe(struct sock *sk)
+ 			/* We've eaten all the data from this skb.
+ 			 * Throw it away. */
+ 			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
++			/* If this is the last SKB we copy and eor is set
++			 * we need to propagate it to the new skb.
++			 */
++			TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
+ 			tcp_unlink_write_queue(skb, sk);
+ 			sk_wmem_free_skb(sk, skb);
+ 		} else {
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index e4ff25c947c5..590f9ed90c1f 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2031,6 +2031,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
+ 		err = udplite_checksum_init(skb, uh);
+ 		if (err)
+ 			return err;
++
++		if (UDP_SKB_CB(skb)->partial_cov) {
++			skb->csum = inet_compute_pseudo(skb, proto);
++			return 0;
++		}
+ 	}
+ 
+ 	/* Note, we are only interested in != 0 or == 0, thus the
+diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
+index ec43d18b5ff9..547515e8450a 100644
+--- a/net/ipv6/ip6_checksum.c
++++ b/net/ipv6/ip6_checksum.c
+@@ -73,6 +73,11 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
+ 		err = udplite_checksum_init(skb, uh);
+ 		if (err)
+ 			return err;
++
++		if (UDP_SKB_CB(skb)->partial_cov) {
++			skb->csum = ip6_compute_pseudo(skb, proto);
++			return 0;
++		}
+ 	}
+ 
+ 	/* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels)
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 3873d3877135..3a1775a62973 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -182,7 +182,7 @@ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
+ #ifdef CONFIG_IPV6_SIT_6RD
+ 	struct ip_tunnel *t = netdev_priv(dev);
+ 
+-	if (t->dev == sitn->fb_tunnel_dev) {
++	if (dev == sitn->fb_tunnel_dev) {
+ 		ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0);
+ 		t->ip6rd.relay_prefix = 0;
+ 		t->ip6rd.prefixlen = 16;
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 7178476b3d2f..6378f6fbc89f 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -943,7 +943,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
+ 
+ 	if (sk) {
+ 		oif = sk->sk_bound_dev_if;
+-		trace_tcp_send_reset(sk, skb);
++		if (sk_fullsock(sk))
++			trace_tcp_send_reset(sk, skb);
+ 	}
+ 
+ 	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 115918ad8eca..861b67c34191 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -136,51 +136,6 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
+ 
+ }
+ 
+-/* Lookup the tunnel socket, possibly involving the fs code if the socket is
+- * owned by userspace.  A struct sock returned from this function must be
+- * released using l2tp_tunnel_sock_put once you're done with it.
+- */
+-static struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
+-{
+-	int err = 0;
+-	struct socket *sock = NULL;
+-	struct sock *sk = NULL;
+-
+-	if (!tunnel)
+-		goto out;
+-
+-	if (tunnel->fd >= 0) {
+-		/* Socket is owned by userspace, who might be in the process
+-		 * of closing it.  Look the socket up using the fd to ensure
+-		 * consistency.
+-		 */
+-		sock = sockfd_lookup(tunnel->fd, &err);
+-		if (sock)
+-			sk = sock->sk;
+-	} else {
+-		/* Socket is owned by kernelspace */
+-		sk = tunnel->sock;
+-		sock_hold(sk);
+-	}
+-
+-out:
+-	return sk;
+-}
+-
+-/* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */
+-static void l2tp_tunnel_sock_put(struct sock *sk)
+-{
+-	struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
+-	if (tunnel) {
+-		if (tunnel->fd >= 0) {
+-			/* Socket is owned by userspace */
+-			sockfd_put(sk->sk_socket);
+-		}
+-		sock_put(sk);
+-	}
+-	sock_put(sk);
+-}
+-
+ /* Session hash list.
+  * The session_id SHOULD be random according to RFC2661, but several
+  * L2TP implementations (Cisco and Microsoft) use incrementing
+@@ -193,6 +148,13 @@ l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
+ 	return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
+ }
+ 
++void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
++{
++	sock_put(tunnel->sock);
++	/* the tunnel is freed in the socket destructor */
++}
++EXPORT_SYMBOL(l2tp_tunnel_free);
++
+ /* Lookup a tunnel. A new reference is held on the returned tunnel. */
+ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
+ {
+@@ -345,13 +307,11 @@ int l2tp_session_register(struct l2tp_session *session,
+ 			}
+ 
+ 		l2tp_tunnel_inc_refcount(tunnel);
+-		sock_hold(tunnel->sock);
+ 		hlist_add_head_rcu(&session->global_hlist, g_head);
+ 
+ 		spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+ 	} else {
+ 		l2tp_tunnel_inc_refcount(tunnel);
+-		sock_hold(tunnel->sock);
+ 	}
+ 
+ 	hlist_add_head(&session->hlist, head);
+@@ -975,7 +935,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ {
+ 	struct l2tp_tunnel *tunnel;
+ 
+-	tunnel = l2tp_sock_to_tunnel(sk);
++	tunnel = l2tp_tunnel(sk);
+ 	if (tunnel == NULL)
+ 		goto pass_up;
+ 
+@@ -983,13 +943,10 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ 		 tunnel->name, skb->len);
+ 
+ 	if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
+-		goto pass_up_put;
++		goto pass_up;
+ 
+-	sock_put(sk);
+ 	return 0;
+ 
+-pass_up_put:
+-	sock_put(sk);
+ pass_up:
+ 	return 1;
+ }
+@@ -1216,14 +1173,12 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
+ static void l2tp_tunnel_destruct(struct sock *sk)
+ {
+ 	struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
+-	struct l2tp_net *pn;
+ 
+ 	if (tunnel == NULL)
+ 		goto end;
+ 
+ 	l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
+ 
+-
+ 	/* Disable udp encapsulation */
+ 	switch (tunnel->encap) {
+ 	case L2TP_ENCAPTYPE_UDP:
+@@ -1240,18 +1195,11 @@ static void l2tp_tunnel_destruct(struct sock *sk)
+ 	sk->sk_destruct = tunnel->old_sk_destruct;
+ 	sk->sk_user_data = NULL;
+ 
+-	/* Remove the tunnel struct from the tunnel list */
+-	pn = l2tp_pernet(tunnel->l2tp_net);
+-	spin_lock_bh(&pn->l2tp_tunnel_list_lock);
+-	list_del_rcu(&tunnel->list);
+-	spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+-
+-	tunnel->sock = NULL;
+-	l2tp_tunnel_dec_refcount(tunnel);
+-
+ 	/* Call the original destructor */
+ 	if (sk->sk_destruct)
+ 		(*sk->sk_destruct)(sk);
++
++	kfree_rcu(tunnel, rcu);
+ end:
+ 	return;
+ }
+@@ -1312,49 +1260,43 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
+ /* Tunnel socket destroy hook for UDP encapsulation */
+ static void l2tp_udp_encap_destroy(struct sock *sk)
+ {
+-	struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
+-	if (tunnel) {
+-		l2tp_tunnel_closeall(tunnel);
+-		sock_put(sk);
+-	}
++	struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
++
++	if (tunnel)
++		l2tp_tunnel_delete(tunnel);
+ }
+ 
+ /* Workqueue tunnel deletion function */
+ static void l2tp_tunnel_del_work(struct work_struct *work)
+ {
+-	struct l2tp_tunnel *tunnel = NULL;
+-	struct socket *sock = NULL;
+-	struct sock *sk = NULL;
+-
+-	tunnel = container_of(work, struct l2tp_tunnel, del_work);
++	struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
++						  del_work);
++	struct sock *sk = tunnel->sock;
++	struct socket *sock = sk->sk_socket;
++	struct l2tp_net *pn;
+ 
+ 	l2tp_tunnel_closeall(tunnel);
+ 
+-	sk = l2tp_tunnel_sock_lookup(tunnel);
+-	if (!sk)
+-		goto out;
+-
+-	sock = sk->sk_socket;
+-
+-	/* If the tunnel socket was created by userspace, then go through the
+-	 * inet layer to shut the socket down, and let userspace close it.
+-	 * Otherwise, if we created the socket directly within the kernel, use
++	/* If the tunnel socket was created within the kernel, use
+ 	 * the sk API to release it here.
+-	 * In either case the tunnel resources are freed in the socket
+-	 * destructor when the tunnel socket goes away.
+ 	 */
+-	if (tunnel->fd >= 0) {
+-		if (sock)
+-			inet_shutdown(sock, 2);
+-	} else {
++	if (tunnel->fd < 0) {
+ 		if (sock) {
+ 			kernel_sock_shutdown(sock, SHUT_RDWR);
+ 			sock_release(sock);
+ 		}
+ 	}
+ 
+-	l2tp_tunnel_sock_put(sk);
+-out:
++	/* Remove the tunnel struct from the tunnel list */
++	pn = l2tp_pernet(tunnel->l2tp_net);
++	spin_lock_bh(&pn->l2tp_tunnel_list_lock);
++	list_del_rcu(&tunnel->list);
++	spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
++
++	/* drop initial ref */
++	l2tp_tunnel_dec_refcount(tunnel);
++
++	/* drop workqueue ref */
+ 	l2tp_tunnel_dec_refcount(tunnel);
+ }
+ 
+@@ -1607,13 +1549,22 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
+ 		sk->sk_user_data = tunnel;
+ 	}
+ 
++	/* Bump the reference count. The tunnel context is deleted
++	 * only when this drops to zero. A reference is also held on
++	 * the tunnel socket to ensure that it is not released while
++	 * the tunnel is extant. Must be done before sk_destruct is
++	 * set.
++	 */
++	refcount_set(&tunnel->ref_count, 1);
++	sock_hold(sk);
++	tunnel->sock = sk;
++	tunnel->fd = fd;
++
+ 	/* Hook on the tunnel socket destructor so that we can cleanup
+ 	 * if the tunnel socket goes away.
+ 	 */
+ 	tunnel->old_sk_destruct = sk->sk_destruct;
+ 	sk->sk_destruct = &l2tp_tunnel_destruct;
+-	tunnel->sock = sk;
+-	tunnel->fd = fd;
+ 	lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
+ 
+ 	sk->sk_allocation = GFP_ATOMIC;
+@@ -1623,11 +1574,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
+ 
+ 	/* Add tunnel to our list */
+ 	INIT_LIST_HEAD(&tunnel->list);
+-
+-	/* Bump the reference count. The tunnel context is deleted
+-	 * only when this drops to zero. Must be done before list insertion
+-	 */
+-	refcount_set(&tunnel->ref_count, 1);
+ 	spin_lock_bh(&pn->l2tp_tunnel_list_lock);
+ 	list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
+ 	spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+@@ -1668,8 +1614,6 @@ void l2tp_session_free(struct l2tp_session *session)
+ 
+ 	if (tunnel) {
+ 		BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
+-		sock_put(tunnel->sock);
+-		session->tunnel = NULL;
+ 		l2tp_tunnel_dec_refcount(tunnel);
+ 	}
+ 
+diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
+index 9534e16965cc..8ecb1d357445 100644
+--- a/net/l2tp/l2tp_core.h
++++ b/net/l2tp/l2tp_core.h
+@@ -219,27 +219,8 @@ static inline void *l2tp_session_priv(struct l2tp_session *session)
+ 	return &session->priv[0];
+ }
+ 
+-static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk)
+-{
+-	struct l2tp_tunnel *tunnel;
+-
+-	if (sk == NULL)
+-		return NULL;
+-
+-	sock_hold(sk);
+-	tunnel = (struct l2tp_tunnel *)(sk->sk_user_data);
+-	if (tunnel == NULL) {
+-		sock_put(sk);
+-		goto out;
+-	}
+-
+-	BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
+-
+-out:
+-	return tunnel;
+-}
+-
+ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id);
++void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
+ 
+ struct l2tp_session *l2tp_session_get(const struct net *net,
+ 				      struct l2tp_tunnel *tunnel,
+@@ -288,7 +269,7 @@ static inline void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
+ static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
+ {
+ 	if (refcount_dec_and_test(&tunnel->ref_count))
+-		kfree_rcu(tunnel, rcu);
++		l2tp_tunnel_free(tunnel);
+ }
+ 
+ /* Session reference counts. Incremented when code obtains a reference
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index ff61124fdf59..3428fba6f2b7 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -234,17 +234,13 @@ static void l2tp_ip_close(struct sock *sk, long timeout)
+ static void l2tp_ip_destroy_sock(struct sock *sk)
+ {
+ 	struct sk_buff *skb;
+-	struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
++	struct l2tp_tunnel *tunnel = sk->sk_user_data;
+ 
+ 	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
+ 		kfree_skb(skb);
+ 
+-	if (tunnel) {
+-		l2tp_tunnel_closeall(tunnel);
+-		sock_put(sk);
+-	}
+-
+-	sk_refcnt_debug_dec(sk);
++	if (tunnel)
++		l2tp_tunnel_delete(tunnel);
+ }
+ 
+ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index 192344688c06..6f009eaa5fbe 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -248,16 +248,14 @@ static void l2tp_ip6_close(struct sock *sk, long timeout)
+ 
+ static void l2tp_ip6_destroy_sock(struct sock *sk)
+ {
+-	struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
++	struct l2tp_tunnel *tunnel = sk->sk_user_data;
+ 
+ 	lock_sock(sk);
+ 	ip6_flush_pending_frames(sk);
+ 	release_sock(sk);
+ 
+-	if (tunnel) {
+-		l2tp_tunnel_closeall(tunnel);
+-		sock_put(sk);
+-	}
++	if (tunnel)
++		l2tp_tunnel_delete(tunnel);
+ 
+ 	inet6_destroy_sock(sk);
+ }
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index b412fc3351dc..5ea718609fe8 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -416,20 +416,28 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+  * Session (and tunnel control) socket create/destroy.
+  *****************************************************************************/
+ 
++static void pppol2tp_put_sk(struct rcu_head *head)
++{
++	struct pppol2tp_session *ps;
++
++	ps = container_of(head, typeof(*ps), rcu);
++	sock_put(ps->__sk);
++}
++
+ /* Called by l2tp_core when a session socket is being closed.
+  */
+ static void pppol2tp_session_close(struct l2tp_session *session)
+ {
+-	struct sock *sk;
+-
+-	BUG_ON(session->magic != L2TP_SESSION_MAGIC);
++	struct pppol2tp_session *ps;
+ 
+-	sk = pppol2tp_session_get_sock(session);
+-	if (sk) {
+-		if (sk->sk_socket)
+-			inet_shutdown(sk->sk_socket, SEND_SHUTDOWN);
+-		sock_put(sk);
+-	}
++	ps = l2tp_session_priv(session);
++	mutex_lock(&ps->sk_lock);
++	ps->__sk = rcu_dereference_protected(ps->sk,
++					     lockdep_is_held(&ps->sk_lock));
++	RCU_INIT_POINTER(ps->sk, NULL);
++	if (ps->__sk)
++		call_rcu(&ps->rcu, pppol2tp_put_sk);
++	mutex_unlock(&ps->sk_lock);
+ }
+ 
+ /* Really kill the session socket. (Called from sock_put() if
+@@ -449,14 +457,6 @@ static void pppol2tp_session_destruct(struct sock *sk)
+ 	}
+ }
+ 
+-static void pppol2tp_put_sk(struct rcu_head *head)
+-{
+-	struct pppol2tp_session *ps;
+-
+-	ps = container_of(head, typeof(*ps), rcu);
+-	sock_put(ps->__sk);
+-}
+-
+ /* Called when the PPPoX socket (session) is closed.
+  */
+ static int pppol2tp_release(struct socket *sock)
+@@ -480,26 +480,17 @@ static int pppol2tp_release(struct socket *sock)
+ 	sock_orphan(sk);
+ 	sock->sk = NULL;
+ 
++	/* If the socket is associated with a session,
++	 * l2tp_session_delete will call pppol2tp_session_close which
++	 * will drop the session's ref on the socket.
++	 */
+ 	session = pppol2tp_sock_to_session(sk);
+-
+-	if (session != NULL) {
+-		struct pppol2tp_session *ps;
+-
++	if (session) {
+ 		l2tp_session_delete(session);
+-
+-		ps = l2tp_session_priv(session);
+-		mutex_lock(&ps->sk_lock);
+-		ps->__sk = rcu_dereference_protected(ps->sk,
+-						     lockdep_is_held(&ps->sk_lock));
+-		RCU_INIT_POINTER(ps->sk, NULL);
+-		mutex_unlock(&ps->sk_lock);
+-		call_rcu(&ps->rcu, pppol2tp_put_sk);
+-
+-		/* Rely on the sock_put() call at the end of the function for
+-		 * dropping the reference held by pppol2tp_sock_to_session().
+-		 * The last reference will be dropped by pppol2tp_put_sk().
+-		 */
++		/* drop the ref obtained by pppol2tp_sock_to_session */
++		sock_put(sk);
+ 	}
++
+ 	release_sock(sk);
+ 
+ 	/* This will delete the session context via
+@@ -796,6 +787,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 
+ out_no_ppp:
+ 	/* This is how we get the session context from the socket. */
++	sock_hold(sk);
+ 	sk->sk_user_data = session;
+ 	rcu_assign_pointer(ps->sk, sk);
+ 	mutex_unlock(&ps->sk_lock);
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 84a4e4c3be4b..ca9c0544c856 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -2275,7 +2275,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ 	if (cb->start) {
+ 		ret = cb->start(cb);
+ 		if (ret)
+-			goto error_unlock;
++			goto error_put;
+ 	}
+ 
+ 	nlk->cb_running = true;
+@@ -2295,6 +2295,8 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ 	 */
+ 	return -EINTR;
+ 
++error_put:
++	module_put(control->module);
+ error_unlock:
+ 	sock_put(sk);
+ 	mutex_unlock(nlk->cb_mutex);
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index d444daf1ac04..6f02499ef007 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -1081,6 +1081,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
+ {
+ 	struct sk_buff *tmp;
+ 	struct net *net, *prev = NULL;
++	bool delivered = false;
+ 	int err;
+ 
+ 	for_each_net_rcu(net) {
+@@ -1092,14 +1093,21 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
+ 			}
+ 			err = nlmsg_multicast(prev->genl_sock, tmp,
+ 					      portid, group, flags);
+-			if (err)
++			if (!err)
++				delivered = true;
++			else if (err != -ESRCH)
+ 				goto error;
+ 		}
+ 
+ 		prev = net;
+ 	}
+ 
+-	return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
++	err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
++	if (!err)
++		delivered = true;
++	else if (err != -ESRCH)
++		goto error;
++	return delivered ? 0 : -ESRCH;
+  error:
+ 	kfree_skb(skb);
+ 	return err;
+diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
+index 42410e910aff..cf73dc006c3b 100644
+--- a/net/rxrpc/output.c
++++ b/net/rxrpc/output.c
+@@ -445,7 +445,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
+ 					(char *)&opt, sizeof(opt));
+ 		if (ret == 0) {
+ 			ret = kernel_sendmsg(conn->params.local->socket, &msg,
+-					     iov, 1, iov[0].iov_len);
++					     iov, 2, len);
+ 
+ 			opt = IPV6_PMTUDISC_DO;
+ 			kernel_setsockopt(conn->params.local->socket,
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index e6b853f0ee4f..2e437bbd3358 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -1054,13 +1054,18 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
+ 		    nla_get_u32(tca[TCA_CHAIN]) != chain->index)
+ 			continue;
+ 		if (!tcf_chain_dump(chain, q, parent, skb, cb,
+-				    index_start, &index))
++				    index_start, &index)) {
++			err = -EMSGSIZE;
+ 			break;
++		}
+ 	}
+ 
+ 	cb->args[0] = index;
+ 
+ out:
++	/* If we did no progress, the error (EMSGSIZE) is real */
++	if (skb->len == 0 && err)
++		return err;
+ 	return skb->len;
+ }
+ 
+diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
+index 33294b5b2c6a..425cc341fd41 100644
+--- a/net/sched/cls_u32.c
++++ b/net/sched/cls_u32.c
+@@ -397,10 +397,12 @@ static int u32_init(struct tcf_proto *tp)
+ static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
+ 			   bool free_pf)
+ {
++	struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
++
+ 	tcf_exts_destroy(&n->exts);
+ 	tcf_exts_put_net(&n->exts);
+-	if (n->ht_down)
+-		n->ht_down->refcnt--;
++	if (ht && --ht->refcnt == 0)
++		kfree(ht);
+ #ifdef CONFIG_CLS_U32_PERF
+ 	if (free_pf)
+ 		free_percpu(n->pf);
+@@ -653,16 +655,15 @@ static void u32_destroy(struct tcf_proto *tp)
+ 
+ 		hlist_del(&tp_c->hnode);
+ 
+-		for (ht = rtnl_dereference(tp_c->hlist);
+-		     ht;
+-		     ht = rtnl_dereference(ht->next)) {
+-			ht->refcnt--;
+-			u32_clear_hnode(tp, ht);
+-		}
+-
+ 		while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
++			u32_clear_hnode(tp, ht);
+ 			RCU_INIT_POINTER(tp_c->hlist, ht->next);
+-			kfree_rcu(ht, rcu);
++
++			/* u32_destroy_key() will later free ht for us, if it's
++			 * still referenced by some knode
++			 */
++			if (--ht->refcnt == 0)
++				kfree_rcu(ht, rcu);
+ 		}
+ 
+ 		idr_destroy(&tp_c->handle_idr);
+@@ -928,7 +929,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
+ 		if (TC_U32_KEY(n->handle) == 0)
+ 			return -EINVAL;
+ 
+-		if (n->flags != flags)
++		if ((n->flags ^ flags) &
++		    ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW))
+ 			return -EINVAL;
+ 
+ 		new = u32_init_knode(tp, n);
+diff --git a/net/sctp/input.c b/net/sctp/input.c
+index 141c9c466ec1..0247cc432e02 100644
+--- a/net/sctp/input.c
++++ b/net/sctp/input.c
+@@ -897,15 +897,12 @@ int sctp_hash_transport(struct sctp_transport *t)
+ 	rhl_for_each_entry_rcu(transport, tmp, list, node)
+ 		if (transport->asoc->ep == t->asoc->ep) {
+ 			rcu_read_unlock();
+-			err = -EEXIST;
+-			goto out;
++			return -EEXIST;
+ 		}
+ 	rcu_read_unlock();
+ 
+ 	err = rhltable_insert_key(&sctp_transport_hashtable, &arg,
+ 				  &t->node, sctp_hash_params);
+-
+-out:
+ 	if (err)
+ 		pr_err_once("insert transport fail, errno %d\n", err);
+ 
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index 5d4c15bf66d2..e35d4f73d2df 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -326,8 +326,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
+ 		final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
+ 		bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
+ 
+-		if (!IS_ERR(bdst) &&
+-		    ipv6_chk_addr(dev_net(bdst->dev),
++		if (IS_ERR(bdst))
++			continue;
++
++		if (ipv6_chk_addr(dev_net(bdst->dev),
+ 				  &laddr->a.v6.sin6_addr, bdst->dev, 1)) {
+ 			if (!IS_ERR_OR_NULL(dst))
+ 				dst_release(dst);
+@@ -336,8 +338,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
+ 		}
+ 
+ 		bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
+-		if (matchlen > bmatchlen)
++		if (matchlen > bmatchlen) {
++			dst_release(bdst);
+ 			continue;
++		}
+ 
+ 		if (!IS_ERR_OR_NULL(dst))
+ 			dst_release(dst);
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 6a38c2503649..91813e686c67 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -514,22 +514,20 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
+ 		if (IS_ERR(rt))
+ 			continue;
+ 
+-		if (!dst)
+-			dst = &rt->dst;
+-
+ 		/* Ensure the src address belongs to the output
+ 		 * interface.
+ 		 */
+ 		odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
+ 				     false);
+ 		if (!odev || odev->ifindex != fl4->flowi4_oif) {
+-			if (&rt->dst != dst)
++			if (!dst)
++				dst = &rt->dst;
++			else
+ 				dst_release(&rt->dst);
+ 			continue;
+ 		}
+ 
+-		if (dst != &rt->dst)
+-			dst_release(dst);
++		dst_release(dst);
+ 		dst = &rt->dst;
+ 		break;
+ 	}
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
+index 9bf575f2e8ed..ea4226e382f9 100644
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -1378,9 +1378,14 @@ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
+ 	struct sctp_chunk *retval;
+ 	struct sk_buff *skb;
+ 	struct sock *sk;
++	int chunklen;
++
++	chunklen = SCTP_PAD4(sizeof(*chunk_hdr) + paylen);
++	if (chunklen > SCTP_MAX_CHUNK_LEN)
++		goto nodata;
+ 
+ 	/* No need to allocate LL here, as this is only a chunk. */
+-	skb = alloc_skb(SCTP_PAD4(sizeof(*chunk_hdr) + paylen), gfp);
++	skb = alloc_skb(chunklen, gfp);
+ 	if (!skb)
+ 		goto nodata;
+ 
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index 736719c8314e..3a780337c393 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -45,17 +45,27 @@ MODULE_AUTHOR("Mellanox Technologies");
+ MODULE_DESCRIPTION("Transport Layer Security Support");
+ MODULE_LICENSE("Dual BSD/GPL");
+ 
++enum {
++	TLSV4,
++	TLSV6,
++	TLS_NUM_PROTS,
++};
++
+ enum {
+ 	TLS_BASE_TX,
+ 	TLS_SW_TX,
+ 	TLS_NUM_CONFIG,
+ };
+ 
+-static struct proto tls_prots[TLS_NUM_CONFIG];
++static struct proto *saved_tcpv6_prot;
++static DEFINE_MUTEX(tcpv6_prot_mutex);
++static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG];
+ 
+ static inline void update_sk_prot(struct sock *sk, struct tls_context *ctx)
+ {
+-	sk->sk_prot = &tls_prots[ctx->tx_conf];
++	int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
++
++	sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf];
+ }
+ 
+ int wait_on_pending_writer(struct sock *sk, long *timeo)
+@@ -450,8 +460,21 @@ static int tls_setsockopt(struct sock *sk, int level, int optname,
+ 	return do_tls_setsockopt(sk, optname, optval, optlen);
+ }
+ 
++static void build_protos(struct proto *prot, struct proto *base)
++{
++	prot[TLS_BASE_TX] = *base;
++	prot[TLS_BASE_TX].setsockopt	= tls_setsockopt;
++	prot[TLS_BASE_TX].getsockopt	= tls_getsockopt;
++	prot[TLS_BASE_TX].close		= tls_sk_proto_close;
++
++	prot[TLS_SW_TX] = prot[TLS_BASE_TX];
++	prot[TLS_SW_TX].sendmsg		= tls_sw_sendmsg;
++	prot[TLS_SW_TX].sendpage	= tls_sw_sendpage;
++}
++
+ static int tls_init(struct sock *sk)
+ {
++	int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+ 	struct tls_context *ctx;
+ 	int rc = 0;
+@@ -476,6 +499,17 @@ static int tls_init(struct sock *sk)
+ 	ctx->getsockopt = sk->sk_prot->getsockopt;
+ 	ctx->sk_proto_close = sk->sk_prot->close;
+ 
++	/* Build IPv6 TLS whenever the address of tcpv6_prot changes */
++	if (ip_ver == TLSV6 &&
++	    unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
++		mutex_lock(&tcpv6_prot_mutex);
++		if (likely(sk->sk_prot != saved_tcpv6_prot)) {
++			build_protos(tls_prots[TLSV6], sk->sk_prot);
++			smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
++		}
++		mutex_unlock(&tcpv6_prot_mutex);
++	}
++
+ 	ctx->tx_conf = TLS_BASE_TX;
+ 	update_sk_prot(sk, ctx);
+ out:
+@@ -488,21 +522,9 @@ static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
+ 	.init			= tls_init,
+ };
+ 
+-static void build_protos(struct proto *prot, struct proto *base)
+-{
+-	prot[TLS_BASE_TX] = *base;
+-	prot[TLS_BASE_TX].setsockopt	= tls_setsockopt;
+-	prot[TLS_BASE_TX].getsockopt	= tls_getsockopt;
+-	prot[TLS_BASE_TX].close		= tls_sk_proto_close;
+-
+-	prot[TLS_SW_TX] = prot[TLS_BASE_TX];
+-	prot[TLS_SW_TX].sendmsg		= tls_sw_sendmsg;
+-	prot[TLS_SW_TX].sendpage	= tls_sw_sendpage;
+-}
+-
+ static int __init tls_register(void)
+ {
+-	build_protos(tls_prots, &tcp_prot);
++	build_protos(tls_prots[TLSV4], &tcp_prot);
+ 
+ 	tcp_register_ulp(&tcp_tls_ulp_ops);
+ 
+diff --git a/sound/core/control.c b/sound/core/control.c
+index 56b3e2d49c82..af7e6165e21e 100644
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -888,7 +888,7 @@ static int snd_ctl_elem_read(struct snd_card *card,
+ 
+ 	index_offset = snd_ctl_get_ioff(kctl, &control->id);
+ 	vd = &kctl->vd[index_offset];
+-	if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) && kctl->get == NULL)
++	if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) || kctl->get == NULL)
+ 		return -EPERM;
+ 
+ 	snd_ctl_build_ioff(&control->id, kctl, index_offset);
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index c71dcacea807..96143df19b21 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -181,7 +181,7 @@ static const struct kernel_param_ops param_ops_xint = {
+ };
+ #define param_check_xint param_check_int
+ 
+-static int power_save = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
++static int power_save = -1;
+ module_param(power_save, xint, 0644);
+ MODULE_PARM_DESC(power_save, "Automatic power-saving timeout "
+ 		 "(in second, 0 = disable).");
+@@ -2186,6 +2186,24 @@ static int azx_probe(struct pci_dev *pci,
+ 	return err;
+ }
+ 
++#ifdef CONFIG_PM
++/* On some boards setting power_save to a non 0 value leads to clicking /
++ * popping sounds when ever we enter/leave powersaving mode. Ideally we would
++ * figure out how to avoid these sounds, but that is not always feasible.
++ * So we keep a list of devices where we disable powersaving as its known
++ * to causes problems on these devices.
++ */
++static struct snd_pci_quirk power_save_blacklist[] = {
++	/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
++	SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
++	/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
++	SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
++	/* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
++	SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
++	{}
++};
++#endif /* CONFIG_PM */
++
+ /* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
+ static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = {
+ 	[AZX_DRIVER_NVIDIA] = 8,
+@@ -2198,6 +2216,7 @@ static int azx_probe_continue(struct azx *chip)
+ 	struct hdac_bus *bus = azx_bus(chip);
+ 	struct pci_dev *pci = chip->pci;
+ 	int dev = chip->dev_index;
++	int val;
+ 	int err;
+ 
+ 	hda->probe_continued = 1;
+@@ -2278,7 +2297,22 @@ static int azx_probe_continue(struct azx *chip)
+ 
+ 	chip->running = 1;
+ 	azx_add_card_list(chip);
+-	snd_hda_set_power_save(&chip->bus, power_save * 1000);
++
++	val = power_save;
++#ifdef CONFIG_PM
++	if (val == -1) {
++		const struct snd_pci_quirk *q;
++
++		val = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
++		q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
++		if (q && val) {
++			dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
++				 q->subvendor, q->subdevice);
++			val = 0;
++		}
++	}
++#endif /* CONFIG_PM */
++	snd_hda_set_power_save(&chip->bus, val * 1000);
+ 	if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo)
+ 		pm_runtime_put_autosuspend(&pci->dev);
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 4ff1f0ca52fc..8fe38c18e29d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4875,13 +4875,14 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
+ 
+ 	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ 		spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
++		snd_hda_apply_pincfgs(codec, pincfgs);
++	} else if (action == HDA_FIXUP_ACT_INIT) {
+ 		/* Enable DOCK device */
+ 		snd_hda_codec_write(codec, 0x17, 0,
+ 			    AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
+ 		/* Enable DOCK device */
+ 		snd_hda_codec_write(codec, 0x19, 0,
+ 			    AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
+-		snd_hda_apply_pincfgs(codec, pincfgs);
+ 	}
+ }
+ 
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 8a59d4782a0f..69bf5cf1e91e 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -3277,4 +3277,51 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+ 	}
+ },
+ 
++{
++	/*
++	 * Bower's & Wilkins PX headphones only support the 48 kHz sample rate
++	 * even though it advertises more. The capture interface doesn't work
++	 * even on windows.
++	 */
++	USB_DEVICE(0x19b5, 0x0021),
++	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_COMPOSITE,
++		.data = (const struct snd_usb_audio_quirk[]) {
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_STANDARD_MIXER,
++			},
++			/* Capture */
++			{
++				.ifnum = 1,
++				.type = QUIRK_IGNORE_INTERFACE,
++			},
++			/* Playback */
++			{
++				.ifnum = 2,
++				.type = QUIRK_AUDIO_FIXED_ENDPOINT,
++				.data = &(const struct audioformat) {
++					.formats = SNDRV_PCM_FMTBIT_S16_LE,
++					.channels = 2,
++					.iface = 2,
++					.altsetting = 1,
++					.altset_idx = 1,
++					.attributes = UAC_EP_CS_ATTR_FILL_MAX |
++						UAC_EP_CS_ATTR_SAMPLE_RATE,
++					.endpoint = 0x03,
++					.ep_attr = USB_ENDPOINT_XFER_ISOC,
++					.rates = SNDRV_PCM_RATE_48000,
++					.rate_min = 48000,
++					.rate_max = 48000,
++					.nr_rates = 1,
++					.rate_table = (unsigned int[]) {
++						48000
++					}
++				}
++			},
++		}
++	}
++},
++
+ #undef USB_DEVICE_VENDOR_SPEC
+diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
+index a0951505c7f5..697872d8308e 100644
+--- a/sound/x86/intel_hdmi_audio.c
++++ b/sound/x86/intel_hdmi_audio.c
+@@ -1827,6 +1827,8 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
+ 		ctx->port = port;
+ 		ctx->pipe = -1;
+ 
++		spin_lock_init(&ctx->had_spinlock);
++		mutex_init(&ctx->mutex);
+ 		INIT_WORK(&ctx->hdmi_audio_wq, had_audio_wq);
+ 
+ 		ret = snd_pcm_new(card, INTEL_HAD, port, MAX_PB_STREAMS,
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 210bf820385a..e536977e7b6d 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -974,8 +974,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
+ 		/* Check for overlaps */
+ 		r = -EEXIST;
+ 		kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
+-			if ((slot->id >= KVM_USER_MEM_SLOTS) ||
+-			    (slot->id == id))
++			if (slot->id == id)
+ 				continue;
+ 			if (!((base_gfn + npages <= slot->base_gfn) ||
+ 			      (base_gfn >= slot->base_gfn + slot->npages)))