public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Sun, 24 Nov 2019 15:44:00 +0000 (UTC)	[thread overview]
Message-ID: <1574610220.c80b40d79098833e09f11405179603a350680c8f.mpagano@gentoo> (raw)

commit:     c80b40d79098833e09f11405179603a350680c8f
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Nov 24 15:43:40 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Nov 24 15:43:40 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c80b40d7

Linux patch 4.19.86

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1085_linux-4.19.86.patch | 8035 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 8039 insertions(+)

diff --git a/0000_README b/0000_README
index ebd08d7..89fef48 100644
--- a/0000_README
+++ b/0000_README
@@ -379,6 +379,10 @@ Patch:  1084_linux-4.19.85.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.19.85
 
+Patch:  1085_linux-4.19.86.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.19.86
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1085_linux-4.19.86.patch b/1085_linux-4.19.86.patch
new file mode 100644
index 0000000..7237a30
--- /dev/null
+++ b/1085_linux-4.19.86.patch
@@ -0,0 +1,8035 @@
+diff --git a/Makefile b/Makefile
+index d6f7c5a323c0..feb0568e9535 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 85
++SUBLEVEL = 86
+ EXTRAVERSION =
+ NAME = "People's Front"
+ 
+diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
+index 3b1baa8605a7..2214bfe7aa20 100644
+--- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
++++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
+@@ -92,13 +92,13 @@
+ 							reg = <0x40000 0xc0000>;
+ 						};
+ 
+-						bootloaderenv@0x100000 {
+-							label = "bootloader env";
++						bootloaderenvred@0x100000 {
++							label = "bootloader env redundant";
+ 							reg = <0x100000 0x40000>;
+ 						};
+ 
+-						bootloaderenvred@0x140000 {
+-							label = "bootloader env redundant";
++						bootloaderenv@0x140000 {
++							label = "bootloader env";
+ 							reg = <0x140000 0x40000>;
+ 						};
+ 
+diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
+index 4b7c762d5f22..7d554b9ab27f 100644
+--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
++++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
+@@ -252,7 +252,7 @@
+ 
+ 						rootfs@800000 {
+ 							label = "rootfs";
+-							reg = <0x800000 0x0f800000>;
++							reg = <0x800000 0x1f800000>;
+ 						};
+ 					};
+ 				};
+diff --git a/arch/arm/boot/dts/at91sam9x5cm.dtsi b/arch/arm/boot/dts/at91sam9x5cm.dtsi
+index 4908ee07e628..993eabe1cf7a 100644
+--- a/arch/arm/boot/dts/at91sam9x5cm.dtsi
++++ b/arch/arm/boot/dts/at91sam9x5cm.dtsi
+@@ -100,7 +100,7 @@
+ 
+ 						rootfs@800000 {
+ 							label = "rootfs";
+-							reg = <0x800000 0x1f800000>;
++							reg = <0x800000 0x0f800000>;
+ 						};
+ 					};
+ 				};
+diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
+index 9136b3cf9a2c..7ce24b282d42 100644
+--- a/arch/arm/boot/dts/dra7.dtsi
++++ b/arch/arm/boot/dts/dra7.dtsi
+@@ -336,6 +336,7 @@
+ 						<0 0 0 2 &pcie1_intc 2>,
+ 						<0 0 0 3 &pcie1_intc 3>,
+ 						<0 0 0 4 &pcie1_intc 4>;
++				ti,syscon-unaligned-access = <&scm_conf1 0x14 1>;
+ 				status = "disabled";
+ 				pcie1_intc: interrupt-controller {
+ 					interrupt-controller;
+@@ -387,6 +388,7 @@
+ 						<0 0 0 2 &pcie2_intc 2>,
+ 						<0 0 0 3 &pcie2_intc 3>,
+ 						<0 0 0 4 &pcie2_intc 4>;
++				ti,syscon-unaligned-access = <&scm_conf1 0x14 2>;
+ 				pcie2_intc: interrupt-controller {
+ 					interrupt-controller;
+ 					#address-cells = <0>;
+diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
+index 8b8db9d8e912..61a06f6add3c 100644
+--- a/arch/arm/boot/dts/omap5-board-common.dtsi
++++ b/arch/arm/boot/dts/omap5-board-common.dtsi
+@@ -703,6 +703,11 @@
+ 	vbus-supply = <&smps10_out1_reg>;
+ };
+ 
++&dwc3 {
++	extcon = <&extcon_usb3>;
++	dr_mode = "otg";
++};
++
+ &mcspi1 {
+ 
+ };
+diff --git a/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts b/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts
+index 30540dc8e0c5..bdda0d99128e 100644
+--- a/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts
++++ b/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts
+@@ -140,7 +140,7 @@
+ &external_mdio {
+ 	ext_rgmii_phy: ethernet-phy@1 {
+ 		compatible = "ethernet-phy-ieee802.3-c22";
+-		reg = <0>;
++		reg = <1>;
+ 	};
+ };
+ 
+diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
+index fc6131315c47..4b1530ebe427 100644
+--- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi
++++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
+@@ -816,7 +816,7 @@
+ 			clock-names = "apb", "ir";
+ 			resets = <&r_ccu RST_APB0_IR>;
+ 			interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+-			reg = <0x01f02000 0x40>;
++			reg = <0x01f02000 0x400>;
+ 			status = "disabled";
+ 		};
+ 
+diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
+index 746565a876dc..0465d65d23de 100644
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -296,16 +296,15 @@ __sys_trace:
+ 	cmp	scno, #-1			@ skip the syscall?
+ 	bne	2b
+ 	add	sp, sp, #S_OFF			@ restore stack
+-	b	ret_slow_syscall
+ 
+-__sys_trace_return:
+-	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
++__sys_trace_return_nosave:
++	enable_irq_notrace
+ 	mov	r0, sp
+ 	bl	syscall_trace_exit
+ 	b	ret_slow_syscall
+ 
+-__sys_trace_return_nosave:
+-	enable_irq_notrace
++__sys_trace_return:
++	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
+ 	mov	r0, sp
+ 	bl	syscall_trace_exit
+ 	b	ret_slow_syscall
+diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
+index 21ba0b29621b..4374020c824a 100644
+--- a/arch/arm64/lib/clear_user.S
++++ b/arch/arm64/lib/clear_user.S
+@@ -57,5 +57,6 @@ ENDPROC(__arch_clear_user)
+ 	.section .fixup,"ax"
+ 	.align	2
+ 9:	mov	x0, x2			// return the original size
++	uaccess_disable_not_uao x2, x3
+ 	ret
+ 	.previous
+diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
+index 20305d485046..96b22c0fa343 100644
+--- a/arch/arm64/lib/copy_from_user.S
++++ b/arch/arm64/lib/copy_from_user.S
+@@ -75,5 +75,6 @@ ENDPROC(__arch_copy_from_user)
+ 	.section .fixup,"ax"
+ 	.align	2
+ 9998:	sub	x0, end, dst			// bytes not copied
++	uaccess_disable_not_uao x3, x4
+ 	ret
+ 	.previous
+diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
+index 54b75deb1d16..e56c705f1f23 100644
+--- a/arch/arm64/lib/copy_in_user.S
++++ b/arch/arm64/lib/copy_in_user.S
+@@ -77,5 +77,6 @@ ENDPROC(__arch_copy_in_user)
+ 	.section .fixup,"ax"
+ 	.align	2
+ 9998:	sub	x0, end, dst			// bytes not copied
++	uaccess_disable_not_uao x3, x4
+ 	ret
+ 	.previous
+diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
+index fda6172d6b88..6b99b939c50f 100644
+--- a/arch/arm64/lib/copy_to_user.S
++++ b/arch/arm64/lib/copy_to_user.S
+@@ -74,5 +74,6 @@ ENDPROC(__arch_copy_to_user)
+ 	.section .fixup,"ax"
+ 	.align	2
+ 9998:	sub	x0, end, dst			// bytes not copied
++	uaccess_disable_not_uao x3, x4
+ 	ret
+ 	.previous
+diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
+index 146c04ceaa51..54529b4ed513 100644
+--- a/arch/arm64/mm/numa.c
++++ b/arch/arm64/mm/numa.c
+@@ -432,7 +432,7 @@ static int __init dummy_numa_init(void)
+ 	if (numa_off)
+ 		pr_info("NUMA disabled\n"); /* Forced off on command line. */
+ 	pr_info("Faking a node at [mem %#018Lx-%#018Lx]\n",
+-		0LLU, PFN_PHYS(max_pfn) - 1);
++		memblock_start_of_DRAM(), memblock_end_of_DRAM() - 1);
+ 
+ 	for_each_memblock(memory, mblk) {
+ 		ret = numa_add_memblk(0, mblk->base, mblk->base + mblk->size);
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index 70f145e02487..7707990c4c16 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -984,10 +984,14 @@ static void register_decrementer_clockevent(int cpu)
+ 	*dec = decrementer_clockevent;
+ 	dec->cpumask = cpumask_of(cpu);
+ 
++	clockevents_config_and_register(dec, ppc_tb_freq, 2, decrementer_max);
++
+ 	printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
+ 		    dec->name, dec->mult, dec->shift, cpu);
+ 
+-	clockevents_register_device(dec);
++	/* Set values for KVM, see kvm_emulate_dec() */
++	decrementer_clockevent.mult = dec->mult;
++	decrementer_clockevent.shift = dec->shift;
+ }
+ 
+ static void enable_large_decrementer(void)
+@@ -1035,18 +1039,7 @@ static void __init set_decrementer_max(void)
+ 
+ static void __init init_decrementer_clockevent(void)
+ {
+-	int cpu = smp_processor_id();
+-
+-	clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4);
+-
+-	decrementer_clockevent.max_delta_ns =
+-		clockevent_delta2ns(decrementer_max, &decrementer_clockevent);
+-	decrementer_clockevent.max_delta_ticks = decrementer_max;
+-	decrementer_clockevent.min_delta_ns =
+-		clockevent_delta2ns(2, &decrementer_clockevent);
+-	decrementer_clockevent.min_delta_ticks = 2;
+-
+-	register_decrementer_clockevent(cpu);
++	register_decrementer_clockevent(smp_processor_id());
+ }
+ 
+ void secondary_cpu_time_init(void)
+diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
+index 281f074581a3..cc05f346e042 100644
+--- a/arch/powerpc/kvm/book3s.c
++++ b/arch/powerpc/kvm/book3s.c
+@@ -78,8 +78,11 @@ void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
+ {
+ 	if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
+ 		ulong pc = kvmppc_get_pc(vcpu);
++		ulong lr = kvmppc_get_lr(vcpu);
+ 		if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
+ 			kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
++		if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
++			kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK);
+ 		vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
+ 	}
+ }
+diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
+index 07a8004c3c23..65486c3d029b 100644
+--- a/arch/powerpc/kvm/book3s_64_vio.c
++++ b/arch/powerpc/kvm/book3s_64_vio.c
+@@ -401,7 +401,7 @@ static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
+ 	long ret;
+ 
+ 	if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir)))
+-		return H_HARDWARE;
++		return H_TOO_HARD;
+ 
+ 	if (dir == DMA_NONE)
+ 		return H_SUCCESS;
+@@ -449,15 +449,15 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
+ 		return H_TOO_HARD;
+ 
+ 	if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
+-		return H_HARDWARE;
++		return H_TOO_HARD;
+ 
+ 	if (mm_iommu_mapped_inc(mem))
+-		return H_CLOSED;
++		return H_TOO_HARD;
+ 
+ 	ret = iommu_tce_xchg(tbl, entry, &hpa, &dir);
+ 	if (WARN_ON_ONCE(ret)) {
+ 		mm_iommu_mapped_dec(mem);
+-		return H_HARDWARE;
++		return H_TOO_HARD;
+ 	}
+ 
+ 	if (dir != DMA_NONE)
+diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
+index eb8b11515a7f..d258ed4ef77c 100644
+--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
++++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
+@@ -300,10 +300,10 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
+ 
+ 	if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
+ 			&hpa)))
+-		return H_HARDWARE;
++		return H_TOO_HARD;
+ 
+ 	if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
+-		return H_CLOSED;
++		return H_TOO_HARD;
+ 
+ 	ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
+ 	if (ret) {
+@@ -501,7 +501,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+ 
+ 		rmap = (void *) vmalloc_to_phys(rmap);
+ 		if (WARN_ON_ONCE_RM(!rmap))
+-			return H_HARDWARE;
++			return H_TOO_HARD;
+ 
+ 		/*
+ 		 * Synchronize with the MMU notifier callbacks in
+diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
+index 62be0e5732b7..796ff5de26d0 100644
+--- a/arch/powerpc/mm/tlb-radix.c
++++ b/arch/powerpc/mm/tlb-radix.c
+@@ -429,6 +429,7 @@ static inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric)
+ 		__tlbiel_lpid_guest(lpid, set, RIC_FLUSH_TLB);
+ 
+ 	asm volatile("ptesync": : :"memory");
++	asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
+ }
+ 
+ 
+diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
+index 18014cdeb590..ef6595153642 100644
+--- a/arch/powerpc/platforms/pseries/dtl.c
++++ b/arch/powerpc/platforms/pseries/dtl.c
+@@ -149,7 +149,7 @@ static int dtl_start(struct dtl *dtl)
+ 
+ 	/* Register our dtl buffer with the hypervisor. The HV expects the
+ 	 * buffer size to be passed in the second word of the buffer */
+-	((u32 *)dtl->buf)[1] = DISPATCH_LOG_BYTES;
++	((u32 *)dtl->buf)[1] = cpu_to_be32(DISPATCH_LOG_BYTES);
+ 
+ 	hwcpu = get_hard_smp_processor_id(dtl->cpu);
+ 	addr = __pa(dtl->buf);
+@@ -184,7 +184,7 @@ static void dtl_stop(struct dtl *dtl)
+ 
+ static u64 dtl_current_index(struct dtl *dtl)
+ {
+-	return lppaca_of(dtl->cpu).dtl_idx;
++	return be64_to_cpu(lppaca_of(dtl->cpu).dtl_idx);
+ }
+ #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
+ 
+diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
+index 0b24b1031221..f3af53abd40f 100644
+--- a/arch/powerpc/sysdev/xive/common.c
++++ b/arch/powerpc/sysdev/xive/common.c
+@@ -1009,12 +1009,13 @@ static void xive_ipi_eoi(struct irq_data *d)
+ {
+ 	struct xive_cpu *xc = __this_cpu_read(xive_cpu);
+ 
+-	DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
+-		    d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
+-
+ 	/* Handle possible race with unplug and drop stale IPIs */
+ 	if (!xc)
+ 		return;
++
++	DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
++		    d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
++
+ 	xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data);
+ 	xive_do_queue_eoi(xc);
+ }
+diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
+index 9e6668ee93de..f6a9b0c20355 100644
+--- a/arch/s390/boot/Makefile
++++ b/arch/s390/boot/Makefile
+@@ -6,6 +6,7 @@
+ KCOV_INSTRUMENT := n
+ GCOV_PROFILE := n
+ UBSAN_SANITIZE := n
++KASAN_SANITIZE := n
+ 
+ KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
+ KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
+diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
+index b375c6c5ae7b..9b3d821e5b46 100644
+--- a/arch/s390/boot/compressed/Makefile
++++ b/arch/s390/boot/compressed/Makefile
+@@ -8,6 +8,7 @@
+ KCOV_INSTRUMENT := n
+ GCOV_PROFILE := n
+ UBSAN_SANITIZE := n
++KASAN_SANITIZE := n
+ 
+ obj-y	:= $(if $(CONFIG_KERNEL_UNCOMPRESSED),,head.o misc.o) piggy.o
+ targets	:= vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
+diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
+index b205c0ff0b22..762fc45376ff 100644
+--- a/arch/s390/kernel/Makefile
++++ b/arch/s390/kernel/Makefile
+@@ -23,6 +23,8 @@ KCOV_INSTRUMENT_early_nobss.o	:= n
+ UBSAN_SANITIZE_early.o		:= n
+ UBSAN_SANITIZE_early_nobss.o	:= n
+ 
++KASAN_SANITIZE_early_nobss.o	:= n
++
+ #
+ # Passing null pointers is ok for smp code, since we access the lowcore here.
+ #
+diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
+index 04dd3e2c3bd9..e76309fbbcb3 100644
+--- a/arch/s390/kernel/vdso32/Makefile
++++ b/arch/s390/kernel/vdso32/Makefile
+@@ -28,9 +28,10 @@ obj-y += vdso32_wrapper.o
+ extra-y += vdso32.lds
+ CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
+ 
+-# Disable gcov profiling and ubsan for VDSO code
++# Disable gcov profiling, ubsan and kasan for VDSO code
+ GCOV_PROFILE := n
+ UBSAN_SANITIZE := n
++KASAN_SANITIZE := n
+ 
+ # Force dependency (incbin is bad)
+ $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
+diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
+index ddebc26cd949..f849ac61c5da 100644
+--- a/arch/s390/kernel/vdso64/Makefile
++++ b/arch/s390/kernel/vdso64/Makefile
+@@ -28,9 +28,10 @@ obj-y += vdso64_wrapper.o
+ extra-y += vdso64.lds
+ CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
+ 
+-# Disable gcov profiling and ubsan for VDSO code
++# Disable gcov profiling, ubsan and kasan for VDSO code
+ GCOV_PROFILE := n
+ UBSAN_SANITIZE := n
++KASAN_SANITIZE := n
+ 
+ # Force dependency (incbin is bad)
+ $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
+diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
+index 57ab40188d4b..5418d10dc2a8 100644
+--- a/arch/s390/lib/Makefile
++++ b/arch/s390/lib/Makefile
+@@ -9,5 +9,9 @@ lib-$(CONFIG_SMP) += spinlock.o
+ lib-$(CONFIG_KPROBES) += probes.o
+ lib-$(CONFIG_UPROBES) += probes.o
+ 
++# Instrumenting memory accesses to __user data (in different address space)
++# produce false positives
++KASAN_SANITIZE_uaccess.o := n
++
+ chkbss := mem.o
+ include $(srctree)/arch/s390/scripts/Makefile.chkbss
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 5726b264036f..af35f5caadbe 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2771,8 +2771,7 @@ config OLPC
+ 
+ config OLPC_XO1_PM
+ 	bool "OLPC XO-1 Power Management"
+-	depends on OLPC && MFD_CS5535 && PM_SLEEP
+-	select MFD_CORE
++	depends on OLPC && MFD_CS5535=y && PM_SLEEP
+ 	---help---
+ 	  Add support for poweroff and suspend of the OLPC XO-1 laptop.
+ 
+diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
+index f327236f0fa7..5125fca472bb 100644
+--- a/arch/x86/include/asm/kexec.h
++++ b/arch/x86/include/asm/kexec.h
+@@ -67,7 +67,7 @@ struct kimage;
+ 
+ /* Memory to backup during crash kdump */
+ #define KEXEC_BACKUP_SRC_START	(0UL)
+-#define KEXEC_BACKUP_SRC_END	(640 * 1024UL)	/* 640K */
++#define KEXEC_BACKUP_SRC_END	(640 * 1024UL - 1)	/* 640K */
+ 
+ /*
+  * CPU does not save ss and sp on stack if execution is already
+diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
+index abb71ac70443..cc43c5abd187 100644
+--- a/arch/x86/kernel/cpu/intel_rdt.c
++++ b/arch/x86/kernel/cpu/intel_rdt.c
+@@ -421,7 +421,7 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
+ 	struct list_head *l;
+ 
+ 	if (id < 0)
+-		return ERR_PTR(id);
++		return ERR_PTR(-ENODEV);
+ 
+ 	list_for_each(l, &r->domains) {
+ 		d = list_entry(l, struct rdt_domain, list);
+diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
+index 627e5c809b33..968ace3c6d73 100644
+--- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
++++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
+@@ -459,7 +459,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
+ 
+ 	r = &rdt_resources_all[resid];
+ 	d = rdt_find_domain(r, domid, NULL);
+-	if (!d) {
++	if (IS_ERR_OR_NULL(d)) {
+ 		ret = -ENOENT;
+ 		goto out;
+ 	}
+diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+index 2013699a5c54..ad64031e82dc 100644
+--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
++++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+@@ -965,7 +965,78 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of,
+ }
+ 
+ /**
+- * rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
++ * rdt_cdp_peer_get - Retrieve CDP peer if it exists
++ * @r: RDT resource to which RDT domain @d belongs
++ * @d: Cache instance for which a CDP peer is requested
++ * @r_cdp: RDT resource that shares hardware with @r (RDT resource peer)
++ *         Used to return the result.
++ * @d_cdp: RDT domain that shares hardware with @d (RDT domain peer)
++ *         Used to return the result.
++ *
++ * RDT resources are managed independently and by extension the RDT domains
++ * (RDT resource instances) are managed independently also. The Code and
++ * Data Prioritization (CDP) RDT resources, while managed independently,
++ * could refer to the same underlying hardware. For example,
++ * RDT_RESOURCE_L2CODE and RDT_RESOURCE_L2DATA both refer to the L2 cache.
++ *
++ * When provided with an RDT resource @r and an instance of that RDT
++ * resource @d rdt_cdp_peer_get() will return if there is a peer RDT
++ * resource and the exact instance that shares the same hardware.
++ *
++ * Return: 0 if a CDP peer was found, <0 on error or if no CDP peer exists.
++ *         If a CDP peer was found, @r_cdp will point to the peer RDT resource
++ *         and @d_cdp will point to the peer RDT domain.
++ */
++static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
++			    struct rdt_resource **r_cdp,
++			    struct rdt_domain **d_cdp)
++{
++	struct rdt_resource *_r_cdp = NULL;
++	struct rdt_domain *_d_cdp = NULL;
++	int ret = 0;
++
++	switch (r->rid) {
++	case RDT_RESOURCE_L3DATA:
++		_r_cdp = &rdt_resources_all[RDT_RESOURCE_L3CODE];
++		break;
++	case RDT_RESOURCE_L3CODE:
++		_r_cdp =  &rdt_resources_all[RDT_RESOURCE_L3DATA];
++		break;
++	case RDT_RESOURCE_L2DATA:
++		_r_cdp =  &rdt_resources_all[RDT_RESOURCE_L2CODE];
++		break;
++	case RDT_RESOURCE_L2CODE:
++		_r_cdp =  &rdt_resources_all[RDT_RESOURCE_L2DATA];
++		break;
++	default:
++		ret = -ENOENT;
++		goto out;
++	}
++
++	/*
++	 * When a new CPU comes online and CDP is enabled then the new
++	 * RDT domains (if any) associated with both CDP RDT resources
++	 * are added in the same CPU online routine while the
++	 * rdtgroup_mutex is held. It should thus not happen for one
++	 * RDT domain to exist and be associated with its RDT CDP
++	 * resource but there is no RDT domain associated with the
++	 * peer RDT CDP resource. Hence the WARN.
++	 */
++	_d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
++	if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
++		_r_cdp = NULL;
++		ret = -EINVAL;
++	}
++
++out:
++	*r_cdp = _r_cdp;
++	*d_cdp = _d_cdp;
++
++	return ret;
++}
++
++/**
++ * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
+  * @r: Resource to which domain instance @d belongs.
+  * @d: The domain instance for which @closid is being tested.
+  * @cbm: Capacity bitmask being tested.
+@@ -984,8 +1055,8 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of,
+  *
+  * Return: false if CBM does not overlap, true if it does.
+  */
+-bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
+-			   unsigned long cbm, int closid, bool exclusive)
++static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
++				    unsigned long cbm, int closid, bool exclusive)
+ {
+ 	enum rdtgrp_mode mode;
+ 	unsigned long ctrl_b;
+@@ -1020,6 +1091,41 @@ bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
+ 	return false;
+ }
+ 
++/**
++ * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware
++ * @r: Resource to which domain instance @d belongs.
++ * @d: The domain instance for which @closid is being tested.
++ * @cbm: Capacity bitmask being tested.
++ * @closid: Intended closid for @cbm.
++ * @exclusive: Only check if overlaps with exclusive resource groups
++ *
++ * Resources that can be allocated using a CBM can use the CBM to control
++ * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test
++ * for overlap. Overlap test is not limited to the specific resource for
++ * which the CBM is intended though - when dealing with CDP resources that
++ * share the underlying hardware the overlap check should be performed on
++ * the CDP resource sharing the hardware also.
++ *
++ * Refer to description of __rdtgroup_cbm_overlaps() for the details of the
++ * overlap test.
++ *
++ * Return: true if CBM overlap detected, false if there is no overlap
++ */
++bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
++			   unsigned long cbm, int closid, bool exclusive)
++{
++	struct rdt_resource *r_cdp;
++	struct rdt_domain *d_cdp;
++
++	if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, exclusive))
++		return true;
++
++	if (rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp) < 0)
++		return false;
++
++	return  __rdtgroup_cbm_overlaps(r_cdp, d_cdp, cbm, closid, exclusive);
++}
++
+ /**
+  * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
+  *
+diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
+index 516ec7586a5f..8d4d50645310 100644
+--- a/arch/x86/kernel/ptrace.c
++++ b/arch/x86/kernel/ptrace.c
+@@ -40,6 +40,7 @@
+ #include <asm/hw_breakpoint.h>
+ #include <asm/traps.h>
+ #include <asm/syscall.h>
++#include <asm/mmu_context.h>
+ 
+ #include "tls.h"
+ 
+@@ -343,6 +344,49 @@ static int set_segment_reg(struct task_struct *task,
+ 	return 0;
+ }
+ 
++static unsigned long task_seg_base(struct task_struct *task,
++				   unsigned short selector)
++{
++	unsigned short idx = selector >> 3;
++	unsigned long base;
++
++	if (likely((selector & SEGMENT_TI_MASK) == 0)) {
++		if (unlikely(idx >= GDT_ENTRIES))
++			return 0;
++
++		/*
++		 * There are no user segments in the GDT with nonzero bases
++		 * other than the TLS segments.
++		 */
++		if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++			return 0;
++
++		idx -= GDT_ENTRY_TLS_MIN;
++		base = get_desc_base(&task->thread.tls_array[idx]);
++	} else {
++#ifdef CONFIG_MODIFY_LDT_SYSCALL
++		struct ldt_struct *ldt;
++
++		/*
++		 * If performance here mattered, we could protect the LDT
++		 * with RCU.  This is a slow path, though, so we can just
++		 * take the mutex.
++		 */
++		mutex_lock(&task->mm->context.lock);
++		ldt = task->mm->context.ldt;
++		if (unlikely(idx >= ldt->nr_entries))
++			base = 0;
++		else
++			base = get_desc_base(ldt->entries + idx);
++		mutex_unlock(&task->mm->context.lock);
++#else
++		base = 0;
++#endif
++	}
++
++	return base;
++}
++
+ #endif	/* CONFIG_X86_32 */
+ 
+ static unsigned long get_flags(struct task_struct *task)
+@@ -436,18 +480,16 @@ static unsigned long getreg(struct task_struct *task, unsigned long offset)
+ 
+ #ifdef CONFIG_X86_64
+ 	case offsetof(struct user_regs_struct, fs_base): {
+-		/*
+-		 * XXX: This will not behave as expected if called on
+-		 * current or if fsindex != 0.
+-		 */
+-		return task->thread.fsbase;
++		if (task->thread.fsindex == 0)
++			return task->thread.fsbase;
++		else
++			return task_seg_base(task, task->thread.fsindex);
+ 	}
+ 	case offsetof(struct user_regs_struct, gs_base): {
+-		/*
+-		 * XXX: This will not behave as expected if called on
+-		 * current or if fsindex != 0.
+-		 */
+-		return task->thread.gsbase;
++		if (task->thread.gsindex == 0)
++			return task->thread.gsbase;
++		else
++			return task_seg_base(task, task->thread.gsindex);
+ 	}
+ #endif
+ 	}
+diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
+index c05a818224bb..abcb8d00b014 100644
+--- a/arch/x86/mm/dump_pagetables.c
++++ b/arch/x86/mm/dump_pagetables.c
+@@ -19,7 +19,9 @@
+ #include <linux/sched.h>
+ #include <linux/seq_file.h>
+ #include <linux/highmem.h>
++#include <linux/pci.h>
+ 
++#include <asm/e820/types.h>
+ #include <asm/pgtable.h>
+ 
+ /*
+@@ -238,6 +240,29 @@ static unsigned long normalize_addr(unsigned long u)
+ 	return (signed long)(u << shift) >> shift;
+ }
+ 
++static void note_wx(struct pg_state *st)
++{
++	unsigned long npages;
++
++	npages = (st->current_address - st->start_address) / PAGE_SIZE;
++
++#ifdef CONFIG_PCI_BIOS
++	/*
++	 * If PCI BIOS is enabled, the PCI BIOS area is forced to WX.
++	 * Inform about it, but avoid the warning.
++	 */
++	if (pcibios_enabled && st->start_address >= PAGE_OFFSET + BIOS_BEGIN &&
++	    st->current_address <= PAGE_OFFSET + BIOS_END) {
++		pr_warn_once("x86/mm: PCI BIOS W+X mapping %lu pages\n", npages);
++		return;
++	}
++#endif
++	/* Account the WX pages */
++	st->wx_pages += npages;
++	WARN_ONCE(1, "x86/mm: Found insecure W+X mapping at address %pS\n",
++		  (void *)st->start_address);
++}
++
+ /*
+  * This function gets called on a break in a continuous series
+  * of PTE entries; the next one is different so we need to
+@@ -273,14 +298,8 @@ static void note_page(struct seq_file *m, struct pg_state *st,
+ 		unsigned long delta;
+ 		int width = sizeof(unsigned long) * 2;
+ 
+-		if (st->check_wx && (eff & _PAGE_RW) && !(eff & _PAGE_NX)) {
+-			WARN_ONCE(1,
+-				  "x86/mm: Found insecure W+X mapping at address %p/%pS\n",
+-				  (void *)st->start_address,
+-				  (void *)st->start_address);
+-			st->wx_pages += (st->current_address -
+-					 st->start_address) / PAGE_SIZE;
+-		}
++		if (st->check_wx && (eff & _PAGE_RW) && !(eff & _PAGE_NX))
++			note_wx(st);
+ 
+ 		/*
+ 		 * Now print the actual finished series
+diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
+index 8f6cc71e0848..24d573bc550d 100644
+--- a/arch/x86/net/bpf_jit_comp32.c
++++ b/arch/x86/net/bpf_jit_comp32.c
+@@ -117,6 +117,8 @@ static bool is_simm32(s64 value)
+ #define IA32_JLE 0x7E
+ #define IA32_JG  0x7F
+ 
++#define COND_JMP_OPCODE_INVALID	(0xFF)
++
+ /*
+  * Map eBPF registers to IA32 32bit registers or stack scratch space.
+  *
+@@ -698,19 +700,12 @@ static inline void emit_ia32_neg64(const u8 dst[], bool dstk, u8 **pprog)
+ 		      STACK_VAR(dst_hi));
+ 	}
+ 
+-	/* xor ecx,ecx */
+-	EMIT2(0x31, add_2reg(0xC0, IA32_ECX, IA32_ECX));
+-	/* sub dreg_lo,ecx */
+-	EMIT2(0x2B, add_2reg(0xC0, dreg_lo, IA32_ECX));
+-	/* mov dreg_lo,ecx */
+-	EMIT2(0x89, add_2reg(0xC0, dreg_lo, IA32_ECX));
+-
+-	/* xor ecx,ecx */
+-	EMIT2(0x31, add_2reg(0xC0, IA32_ECX, IA32_ECX));
+-	/* sbb dreg_hi,ecx */
+-	EMIT2(0x19, add_2reg(0xC0, dreg_hi, IA32_ECX));
+-	/* mov dreg_hi,ecx */
+-	EMIT2(0x89, add_2reg(0xC0, dreg_hi, IA32_ECX));
++	/* neg dreg_lo */
++	EMIT2(0xF7, add_1reg(0xD8, dreg_lo));
++	/* adc dreg_hi,0x0 */
++	EMIT3(0x83, add_1reg(0xD0, dreg_hi), 0x00);
++	/* neg dreg_hi */
++	EMIT2(0xF7, add_1reg(0xD8, dreg_hi));
+ 
+ 	if (dstk) {
+ 		/* mov dword ptr [ebp+off],dreg_lo */
+@@ -729,9 +724,6 @@ static inline void emit_ia32_lsh_r64(const u8 dst[], const u8 src[],
+ {
+ 	u8 *prog = *pprog;
+ 	int cnt = 0;
+-	static int jmp_label1 = -1;
+-	static int jmp_label2 = -1;
+-	static int jmp_label3 = -1;
+ 	u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
+ 	u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
+ 
+@@ -750,78 +742,22 @@ static inline void emit_ia32_lsh_r64(const u8 dst[], const u8 src[],
+ 		/* mov ecx,src_lo */
+ 		EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
+ 
+-	/* cmp ecx,32 */
+-	EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
+-	/* Jumps when >= 32 */
+-	if (is_imm8(jmp_label(jmp_label1, 2)))
+-		EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
+-	else
+-		EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
+-
+-	/* < 32 */
+-	/* shl dreg_hi,cl */
+-	EMIT2(0xD3, add_1reg(0xE0, dreg_hi));
+-	/* mov ebx,dreg_lo */
+-	EMIT2(0x8B, add_2reg(0xC0, dreg_lo, IA32_EBX));
++	/* shld dreg_hi,dreg_lo,cl */
++	EMIT3(0x0F, 0xA5, add_2reg(0xC0, dreg_hi, dreg_lo));
+ 	/* shl dreg_lo,cl */
+ 	EMIT2(0xD3, add_1reg(0xE0, dreg_lo));
+ 
+-	/* IA32_ECX = -IA32_ECX + 32 */
+-	/* neg ecx */
+-	EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
+-	/* add ecx,32 */
+-	EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
+-
+-	/* shr ebx,cl */
+-	EMIT2(0xD3, add_1reg(0xE8, IA32_EBX));
+-	/* or dreg_hi,ebx */
+-	EMIT2(0x09, add_2reg(0xC0, dreg_hi, IA32_EBX));
+-
+-	/* goto out; */
+-	if (is_imm8(jmp_label(jmp_label3, 2)))
+-		EMIT2(0xEB, jmp_label(jmp_label3, 2));
+-	else
+-		EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
++	/* if ecx >= 32, mov dreg_lo into dreg_hi and clear dreg_lo */
+ 
+-	/* >= 32 */
+-	if (jmp_label1 == -1)
+-		jmp_label1 = cnt;
+-
+-	/* cmp ecx,64 */
+-	EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
+-	/* Jumps when >= 64 */
+-	if (is_imm8(jmp_label(jmp_label2, 2)))
+-		EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
+-	else
+-		EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
++	/* cmp ecx,32 */
++	EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
++	/* skip the next two instructions (4 bytes) when < 32 */
++	EMIT2(IA32_JB, 4);
+ 
+-	/* >= 32 && < 64 */
+-	/* sub ecx,32 */
+-	EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
+-	/* shl dreg_lo,cl */
+-	EMIT2(0xD3, add_1reg(0xE0, dreg_lo));
+ 	/* mov dreg_hi,dreg_lo */
+ 	EMIT2(0x89, add_2reg(0xC0, dreg_hi, dreg_lo));
+-
+-	/* xor dreg_lo,dreg_lo */
+-	EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
+-
+-	/* goto out; */
+-	if (is_imm8(jmp_label(jmp_label3, 2)))
+-		EMIT2(0xEB, jmp_label(jmp_label3, 2));
+-	else
+-		EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
+-
+-	/* >= 64 */
+-	if (jmp_label2 == -1)
+-		jmp_label2 = cnt;
+ 	/* xor dreg_lo,dreg_lo */
+ 	EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
+-	/* xor dreg_hi,dreg_hi */
+-	EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
+-
+-	if (jmp_label3 == -1)
+-		jmp_label3 = cnt;
+ 
+ 	if (dstk) {
+ 		/* mov dword ptr [ebp+off],dreg_lo */
+@@ -841,9 +777,6 @@ static inline void emit_ia32_arsh_r64(const u8 dst[], const u8 src[],
+ {
+ 	u8 *prog = *pprog;
+ 	int cnt = 0;
+-	static int jmp_label1 = -1;
+-	static int jmp_label2 = -1;
+-	static int jmp_label3 = -1;
+ 	u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
+ 	u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
+ 
+@@ -862,78 +795,22 @@ static inline void emit_ia32_arsh_r64(const u8 dst[], const u8 src[],
+ 		/* mov ecx,src_lo */
+ 		EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
+ 
+-	/* cmp ecx,32 */
+-	EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
+-	/* Jumps when >= 32 */
+-	if (is_imm8(jmp_label(jmp_label1, 2)))
+-		EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
+-	else
+-		EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
+-
+-	/* < 32 */
+-	/* lshr dreg_lo,cl */
+-	EMIT2(0xD3, add_1reg(0xE8, dreg_lo));
+-	/* mov ebx,dreg_hi */
+-	EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
+-	/* ashr dreg_hi,cl */
++	/* shrd dreg_lo,dreg_hi,cl */
++	EMIT3(0x0F, 0xAD, add_2reg(0xC0, dreg_lo, dreg_hi));
++	/* sar dreg_hi,cl */
+ 	EMIT2(0xD3, add_1reg(0xF8, dreg_hi));
+ 
+-	/* IA32_ECX = -IA32_ECX + 32 */
+-	/* neg ecx */
+-	EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
+-	/* add ecx,32 */
+-	EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
+-
+-	/* shl ebx,cl */
+-	EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
+-	/* or dreg_lo,ebx */
+-	EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
++	/* if ecx >= 32, mov dreg_hi to dreg_lo and set/clear dreg_hi depending on sign */
+ 
+-	/* goto out; */
+-	if (is_imm8(jmp_label(jmp_label3, 2)))
+-		EMIT2(0xEB, jmp_label(jmp_label3, 2));
+-	else
+-		EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
+-
+-	/* >= 32 */
+-	if (jmp_label1 == -1)
+-		jmp_label1 = cnt;
+-
+-	/* cmp ecx,64 */
+-	EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
+-	/* Jumps when >= 64 */
+-	if (is_imm8(jmp_label(jmp_label2, 2)))
+-		EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
+-	else
+-		EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
++	/* cmp ecx,32 */
++	EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
++	/* skip the next two instructions (5 bytes) when < 32 */
++	EMIT2(IA32_JB, 5);
+ 
+-	/* >= 32 && < 64 */
+-	/* sub ecx,32 */
+-	EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
+-	/* ashr dreg_hi,cl */
+-	EMIT2(0xD3, add_1reg(0xF8, dreg_hi));
+ 	/* mov dreg_lo,dreg_hi */
+ 	EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
+-
+-	/* ashr dreg_hi,imm8 */
+-	EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31);
+-
+-	/* goto out; */
+-	if (is_imm8(jmp_label(jmp_label3, 2)))
+-		EMIT2(0xEB, jmp_label(jmp_label3, 2));
+-	else
+-		EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
+-
+-	/* >= 64 */
+-	if (jmp_label2 == -1)
+-		jmp_label2 = cnt;
+-	/* ashr dreg_hi,imm8 */
++	/* sar dreg_hi,31 */
+ 	EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31);
+-	/* mov dreg_lo,dreg_hi */
+-	EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
+-
+-	if (jmp_label3 == -1)
+-		jmp_label3 = cnt;
+ 
+ 	if (dstk) {
+ 		/* mov dword ptr [ebp+off],dreg_lo */
+@@ -953,9 +830,6 @@ static inline void emit_ia32_rsh_r64(const u8 dst[], const u8 src[], bool dstk,
+ {
+ 	u8 *prog = *pprog;
+ 	int cnt = 0;
+-	static int jmp_label1 = -1;
+-	static int jmp_label2 = -1;
+-	static int jmp_label3 = -1;
+ 	u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
+ 	u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
+ 
+@@ -974,77 +848,23 @@ static inline void emit_ia32_rsh_r64(const u8 dst[], const u8 src[], bool dstk,
+ 		/* mov ecx,src_lo */
+ 		EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
+ 
+-	/* cmp ecx,32 */
+-	EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
+-	/* Jumps when >= 32 */
+-	if (is_imm8(jmp_label(jmp_label1, 2)))
+-		EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
+-	else
+-		EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
+-
+-	/* < 32 */
+-	/* lshr dreg_lo,cl */
+-	EMIT2(0xD3, add_1reg(0xE8, dreg_lo));
+-	/* mov ebx,dreg_hi */
+-	EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
++	/* shrd dreg_lo,dreg_hi,cl */
++	EMIT3(0x0F, 0xAD, add_2reg(0xC0, dreg_lo, dreg_hi));
+ 	/* shr dreg_hi,cl */
+ 	EMIT2(0xD3, add_1reg(0xE8, dreg_hi));
+ 
+-	/* IA32_ECX = -IA32_ECX + 32 */
+-	/* neg ecx */
+-	EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
+-	/* add ecx,32 */
+-	EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
+-
+-	/* shl ebx,cl */
+-	EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
+-	/* or dreg_lo,ebx */
+-	EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
++	/* if ecx >= 32, mov dreg_hi to dreg_lo and clear dreg_hi */
+ 
+-	/* goto out; */
+-	if (is_imm8(jmp_label(jmp_label3, 2)))
+-		EMIT2(0xEB, jmp_label(jmp_label3, 2));
+-	else
+-		EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
+-
+-	/* >= 32 */
+-	if (jmp_label1 == -1)
+-		jmp_label1 = cnt;
+-	/* cmp ecx,64 */
+-	EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
+-	/* Jumps when >= 64 */
+-	if (is_imm8(jmp_label(jmp_label2, 2)))
+-		EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
+-	else
+-		EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
++	/* cmp ecx,32 */
++	EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
++	/* skip the next two instructions (4 bytes) when < 32 */
++	EMIT2(IA32_JB, 4);
+ 
+-	/* >= 32 && < 64 */
+-	/* sub ecx,32 */
+-	EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
+-	/* shr dreg_hi,cl */
+-	EMIT2(0xD3, add_1reg(0xE8, dreg_hi));
+ 	/* mov dreg_lo,dreg_hi */
+ 	EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
+ 	/* xor dreg_hi,dreg_hi */
+ 	EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
+ 
+-	/* goto out; */
+-	if (is_imm8(jmp_label(jmp_label3, 2)))
+-		EMIT2(0xEB, jmp_label(jmp_label3, 2));
+-	else
+-		EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
+-
+-	/* >= 64 */
+-	if (jmp_label2 == -1)
+-		jmp_label2 = cnt;
+-	/* xor dreg_lo,dreg_lo */
+-	EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
+-	/* xor dreg_hi,dreg_hi */
+-	EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
+-
+-	if (jmp_label3 == -1)
+-		jmp_label3 = cnt;
+-
+ 	if (dstk) {
+ 		/* mov dword ptr [ebp+off],dreg_lo */
+ 		EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
+@@ -1074,27 +894,10 @@ static inline void emit_ia32_lsh_i64(const u8 dst[], const u32 val,
+ 	}
+ 	/* Do LSH operation */
+ 	if (val < 32) {
+-		/* shl dreg_hi,imm8 */
+-		EMIT3(0xC1, add_1reg(0xE0, dreg_hi), val);
+-		/* mov ebx,dreg_lo */
+-		EMIT2(0x8B, add_2reg(0xC0, dreg_lo, IA32_EBX));
++		/* shld dreg_hi,dreg_lo,imm8 */
++		EMIT4(0x0F, 0xA4, add_2reg(0xC0, dreg_hi, dreg_lo), val);
+ 		/* shl dreg_lo,imm8 */
+ 		EMIT3(0xC1, add_1reg(0xE0, dreg_lo), val);
+-
+-		/* IA32_ECX = 32 - val */
+-		/* mov ecx,val */
+-		EMIT2(0xB1, val);
+-		/* movzx ecx,ecx */
+-		EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
+-		/* neg ecx */
+-		EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
+-		/* add ecx,32 */
+-		EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
+-
+-		/* shr ebx,cl */
+-		EMIT2(0xD3, add_1reg(0xE8, IA32_EBX));
+-		/* or dreg_hi,ebx */
+-		EMIT2(0x09, add_2reg(0xC0, dreg_hi, IA32_EBX));
+ 	} else if (val >= 32 && val < 64) {
+ 		u32 value = val - 32;
+ 
+@@ -1140,27 +943,10 @@ static inline void emit_ia32_rsh_i64(const u8 dst[], const u32 val,
+ 
+ 	/* Do RSH operation */
+ 	if (val < 32) {
+-		/* shr dreg_lo,imm8 */
+-		EMIT3(0xC1, add_1reg(0xE8, dreg_lo), val);
+-		/* mov ebx,dreg_hi */
+-		EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
++		/* shrd dreg_lo,dreg_hi,imm8 */
++		EMIT4(0x0F, 0xAC, add_2reg(0xC0, dreg_lo, dreg_hi), val);
+ 		/* shr dreg_hi,imm8 */
+ 		EMIT3(0xC1, add_1reg(0xE8, dreg_hi), val);
+-
+-		/* IA32_ECX = 32 - val */
+-		/* mov ecx,val */
+-		EMIT2(0xB1, val);
+-		/* movzx ecx,ecx */
+-		EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
+-		/* neg ecx */
+-		EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
+-		/* add ecx,32 */
+-		EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
+-
+-		/* shl ebx,cl */
+-		EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
+-		/* or dreg_lo,ebx */
+-		EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
+ 	} else if (val >= 32 && val < 64) {
+ 		u32 value = val - 32;
+ 
+@@ -1205,27 +991,10 @@ static inline void emit_ia32_arsh_i64(const u8 dst[], const u32 val,
+ 	}
+ 	/* Do RSH operation */
+ 	if (val < 32) {
+-		/* shr dreg_lo,imm8 */
+-		EMIT3(0xC1, add_1reg(0xE8, dreg_lo), val);
+-		/* mov ebx,dreg_hi */
+-		EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
++		/* shrd dreg_lo,dreg_hi,imm8 */
++		EMIT4(0x0F, 0xAC, add_2reg(0xC0, dreg_lo, dreg_hi), val);
+ 		/* ashr dreg_hi,imm8 */
+ 		EMIT3(0xC1, add_1reg(0xF8, dreg_hi), val);
+-
+-		/* IA32_ECX = 32 - val */
+-		/* mov ecx,val */
+-		EMIT2(0xB1, val);
+-		/* movzx ecx,ecx */
+-		EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
+-		/* neg ecx */
+-		EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
+-		/* add ecx,32 */
+-		EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
+-
+-		/* shl ebx,cl */
+-		EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
+-		/* or dreg_lo,ebx */
+-		EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
+ 	} else if (val >= 32 && val < 64) {
+ 		u32 value = val - 32;
+ 
+@@ -1613,6 +1382,75 @@ static inline void emit_push_r64(const u8 src[], u8 **pprog)
+ 	*pprog = prog;
+ }
+ 
++static u8 get_cond_jmp_opcode(const u8 op, bool is_cmp_lo)
++{
++	u8 jmp_cond;
++
++	/* Convert BPF opcode to x86 */
++	switch (op) {
++	case BPF_JEQ:
++		jmp_cond = IA32_JE;
++		break;
++	case BPF_JSET:
++	case BPF_JNE:
++		jmp_cond = IA32_JNE;
++		break;
++	case BPF_JGT:
++		/* GT is unsigned '>', JA in x86 */
++		jmp_cond = IA32_JA;
++		break;
++	case BPF_JLT:
++		/* LT is unsigned '<', JB in x86 */
++		jmp_cond = IA32_JB;
++		break;
++	case BPF_JGE:
++		/* GE is unsigned '>=', JAE in x86 */
++		jmp_cond = IA32_JAE;
++		break;
++	case BPF_JLE:
++		/* LE is unsigned '<=', JBE in x86 */
++		jmp_cond = IA32_JBE;
++		break;
++	case BPF_JSGT:
++		if (!is_cmp_lo)
++			/* Signed '>', GT in x86 */
++			jmp_cond = IA32_JG;
++		else
++			/* GT is unsigned '>', JA in x86 */
++			jmp_cond = IA32_JA;
++		break;
++	case BPF_JSLT:
++		if (!is_cmp_lo)
++			/* Signed '<', LT in x86 */
++			jmp_cond = IA32_JL;
++		else
++			/* LT is unsigned '<', JB in x86 */
++			jmp_cond = IA32_JB;
++		break;
++	case BPF_JSGE:
++		if (!is_cmp_lo)
++			/* Signed '>=', GE in x86 */
++			jmp_cond = IA32_JGE;
++		else
++			/* GE is unsigned '>=', JAE in x86 */
++			jmp_cond = IA32_JAE;
++		break;
++	case BPF_JSLE:
++		if (!is_cmp_lo)
++			/* Signed '<=', LE in x86 */
++			jmp_cond = IA32_JLE;
++		else
++			/* LE is unsigned '<=', JBE in x86 */
++			jmp_cond = IA32_JBE;
++		break;
++	default: /* to silence GCC warning */
++		jmp_cond = COND_JMP_OPCODE_INVALID;
++		break;
++	}
++
++	return jmp_cond;
++}
++
+ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
+ 		  int oldproglen, struct jit_context *ctx)
+ {
+@@ -2068,11 +1906,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
+ 		case BPF_JMP | BPF_JGT | BPF_X:
+ 		case BPF_JMP | BPF_JLT | BPF_X:
+ 		case BPF_JMP | BPF_JGE | BPF_X:
+-		case BPF_JMP | BPF_JLE | BPF_X:
+-		case BPF_JMP | BPF_JSGT | BPF_X:
+-		case BPF_JMP | BPF_JSLE | BPF_X:
+-		case BPF_JMP | BPF_JSLT | BPF_X:
+-		case BPF_JMP | BPF_JSGE | BPF_X: {
++		case BPF_JMP | BPF_JLE | BPF_X: {
+ 			u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
+ 			u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
+ 			u8 sreg_lo = sstk ? IA32_ECX : src_lo;
+@@ -2099,6 +1933,40 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
+ 			EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
+ 			goto emit_cond_jmp;
+ 		}
++		case BPF_JMP | BPF_JSGT | BPF_X:
++		case BPF_JMP | BPF_JSLE | BPF_X:
++		case BPF_JMP | BPF_JSLT | BPF_X:
++		case BPF_JMP | BPF_JSGE | BPF_X: {
++			u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
++			u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
++			u8 sreg_lo = sstk ? IA32_ECX : src_lo;
++			u8 sreg_hi = sstk ? IA32_EBX : src_hi;
++
++			if (dstk) {
++				EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
++				      STACK_VAR(dst_lo));
++				EMIT3(0x8B,
++				      add_2reg(0x40, IA32_EBP,
++					       IA32_EDX),
++				      STACK_VAR(dst_hi));
++			}
++
++			if (sstk) {
++				EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
++				      STACK_VAR(src_lo));
++				EMIT3(0x8B,
++				      add_2reg(0x40, IA32_EBP,
++					       IA32_EBX),
++				      STACK_VAR(src_hi));
++			}
++
++			/* cmp dreg_hi,sreg_hi */
++			EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
++			EMIT2(IA32_JNE, 10);
++			/* cmp dreg_lo,sreg_lo */
++			EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
++			goto emit_cond_jmp_signed;
++		}
+ 		case BPF_JMP | BPF_JSET | BPF_X: {
+ 			u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
+ 			u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
+@@ -2159,11 +2027,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
+ 		case BPF_JMP | BPF_JGT | BPF_K:
+ 		case BPF_JMP | BPF_JLT | BPF_K:
+ 		case BPF_JMP | BPF_JGE | BPF_K:
+-		case BPF_JMP | BPF_JLE | BPF_K:
+-		case BPF_JMP | BPF_JSGT | BPF_K:
+-		case BPF_JMP | BPF_JSLE | BPF_K:
+-		case BPF_JMP | BPF_JSLT | BPF_K:
+-		case BPF_JMP | BPF_JSGE | BPF_K: {
++		case BPF_JMP | BPF_JLE | BPF_K: {
+ 			u32 hi;
+ 			u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
+ 			u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
+@@ -2189,50 +2053,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
+ 			/* cmp dreg_lo,sreg_lo */
+ 			EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
+ 
+-emit_cond_jmp:		/* Convert BPF opcode to x86 */
+-			switch (BPF_OP(code)) {
+-			case BPF_JEQ:
+-				jmp_cond = IA32_JE;
+-				break;
+-			case BPF_JSET:
+-			case BPF_JNE:
+-				jmp_cond = IA32_JNE;
+-				break;
+-			case BPF_JGT:
+-				/* GT is unsigned '>', JA in x86 */
+-				jmp_cond = IA32_JA;
+-				break;
+-			case BPF_JLT:
+-				/* LT is unsigned '<', JB in x86 */
+-				jmp_cond = IA32_JB;
+-				break;
+-			case BPF_JGE:
+-				/* GE is unsigned '>=', JAE in x86 */
+-				jmp_cond = IA32_JAE;
+-				break;
+-			case BPF_JLE:
+-				/* LE is unsigned '<=', JBE in x86 */
+-				jmp_cond = IA32_JBE;
+-				break;
+-			case BPF_JSGT:
+-				/* Signed '>', GT in x86 */
+-				jmp_cond = IA32_JG;
+-				break;
+-			case BPF_JSLT:
+-				/* Signed '<', LT in x86 */
+-				jmp_cond = IA32_JL;
+-				break;
+-			case BPF_JSGE:
+-				/* Signed '>=', GE in x86 */
+-				jmp_cond = IA32_JGE;
+-				break;
+-			case BPF_JSLE:
+-				/* Signed '<=', LE in x86 */
+-				jmp_cond = IA32_JLE;
+-				break;
+-			default: /* to silence GCC warning */
++emit_cond_jmp:		jmp_cond = get_cond_jmp_opcode(BPF_OP(code), false);
++			if (jmp_cond == COND_JMP_OPCODE_INVALID)
+ 				return -EFAULT;
+-			}
+ 			jmp_offset = addrs[i + insn->off] - addrs[i];
+ 			if (is_imm8(jmp_offset)) {
+ 				EMIT2(jmp_cond, jmp_offset);
+@@ -2242,7 +2065,66 @@ emit_cond_jmp:		/* Convert BPF opcode to x86 */
+ 				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
+ 				return -EFAULT;
+ 			}
++			break;
++		}
++		case BPF_JMP | BPF_JSGT | BPF_K:
++		case BPF_JMP | BPF_JSLE | BPF_K:
++		case BPF_JMP | BPF_JSLT | BPF_K:
++		case BPF_JMP | BPF_JSGE | BPF_K: {
++			u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
++			u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
++			u8 sreg_lo = IA32_ECX;
++			u8 sreg_hi = IA32_EBX;
++			u32 hi;
++
++			if (dstk) {
++				EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
++				      STACK_VAR(dst_lo));
++				EMIT3(0x8B,
++				      add_2reg(0x40, IA32_EBP,
++					       IA32_EDX),
++				      STACK_VAR(dst_hi));
++			}
++
++			/* mov ecx,imm32 */
++			EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32);
++			hi = imm32 & (1 << 31) ? (u32)~0 : 0;
++			/* mov ebx,imm32 */
++			EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi);
++			/* cmp dreg_hi,sreg_hi */
++			EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
++			EMIT2(IA32_JNE, 10);
++			/* cmp dreg_lo,sreg_lo */
++			EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
++
++			/*
++			 * For simplicity of branch offset computation,
++			 * let's use fixed jump coding here.
++			 */
++emit_cond_jmp_signed:	/* Check the condition for low 32-bit comparison */
++			jmp_cond = get_cond_jmp_opcode(BPF_OP(code), true);
++			if (jmp_cond == COND_JMP_OPCODE_INVALID)
++				return -EFAULT;
++			jmp_offset = addrs[i + insn->off] - addrs[i] + 8;
++			if (is_simm32(jmp_offset)) {
++				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
++			} else {
++				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
++				return -EFAULT;
++			}
++			EMIT2(0xEB, 6);
+ 
++			/* Check the condition for high 32-bit comparison */
++			jmp_cond = get_cond_jmp_opcode(BPF_OP(code), false);
++			if (jmp_cond == COND_JMP_OPCODE_INVALID)
++				return -EFAULT;
++			jmp_offset = addrs[i + insn->off] - addrs[i];
++			if (is_simm32(jmp_offset)) {
++				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
++			} else {
++				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
++				return -EFAULT;
++			}
+ 			break;
+ 		}
+ 		case BPF_JMP | BPF_JA:
+diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
+index bd372e896557..527e69b12002 100644
+--- a/arch/x86/pci/fixup.c
++++ b/arch/x86/pci/fixup.c
+@@ -629,17 +629,11 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff);
+ static void quirk_no_aersid(struct pci_dev *pdev)
+ {
+ 	/* VMD Domain */
+-	if (is_vmd(pdev->bus))
++	if (is_vmd(pdev->bus) && pci_is_root_bus(pdev->bus))
+ 		pdev->bus->bus_flags |= PCI_BUS_FLAGS_NO_AERSID;
+ }
+-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2030, quirk_no_aersid);
+-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid);
+-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid);
+-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
+-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334a, quirk_no_aersid);
+-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334b, quirk_no_aersid);
+-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334c, quirk_no_aersid);
+-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334d, quirk_no_aersid);
++DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
++			      PCI_CLASS_BRIDGE_PCI, 8, quirk_no_aersid);
+ 
+ static void quirk_intel_th_dnv(struct pci_dev *dev)
+ {
+diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
+index c9986041a5e1..6c3ec193a246 100644
+--- a/arch/x86/power/hibernate_64.c
++++ b/arch/x86/power/hibernate_64.c
+@@ -266,9 +266,9 @@ free_tfm:
+ 	return ret;
+ }
+ 
+-static void hibernation_e820_save(void *buf)
++static int hibernation_e820_save(void *buf)
+ {
+-	get_e820_md5(e820_table_firmware, buf);
++	return get_e820_md5(e820_table_firmware, buf);
+ }
+ 
+ static bool hibernation_e820_mismatch(void *buf)
+@@ -288,8 +288,9 @@ static bool hibernation_e820_mismatch(void *buf)
+ 	return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false;
+ }
+ #else
+-static void hibernation_e820_save(void *buf)
++static int hibernation_e820_save(void *buf)
+ {
++	return 0;
+ }
+ 
+ static bool hibernation_e820_mismatch(void *buf)
+@@ -334,9 +335,7 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
+ 
+ 	rdr->magic = RESTORE_MAGIC;
+ 
+-	hibernation_e820_save(rdr->e820_digest);
+-
+-	return 0;
++	return hibernation_e820_save(rdr->e820_digest);
+ }
+ 
+ /**
+diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
+index 7eda27d43b48..b21c241aaab9 100644
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -16,6 +16,7 @@
+ #include <linux/err.h>
+ #include <linux/io.h>
+ #include <linux/mutex.h>
++#include <linux/pci.h>
+ #include <linux/platform_device.h>
+ #include <linux/platform_data/clk-lpss.h>
+ #include <linux/platform_data/x86/pmc_atom.h>
+@@ -83,6 +84,7 @@ struct lpss_device_desc {
+ 	size_t prv_size_override;
+ 	struct property_entry *properties;
+ 	void (*setup)(struct lpss_private_data *pdata);
++	bool resume_from_noirq;
+ };
+ 
+ static const struct lpss_device_desc lpss_dma_desc = {
+@@ -292,12 +294,14 @@ static const struct lpss_device_desc byt_i2c_dev_desc = {
+ 	.flags = LPSS_CLK | LPSS_SAVE_CTX,
+ 	.prv_offset = 0x800,
+ 	.setup = byt_i2c_setup,
++	.resume_from_noirq = true,
+ };
+ 
+ static const struct lpss_device_desc bsw_i2c_dev_desc = {
+ 	.flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
+ 	.prv_offset = 0x800,
+ 	.setup = byt_i2c_setup,
++	.resume_from_noirq = true,
+ };
+ 
+ static const struct lpss_device_desc bsw_spi_dev_desc = {
+@@ -512,12 +516,18 @@ static int match_hid_uid(struct device *dev, void *data)
+ 
+ static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
+ {
++	struct device *dev;
++
+ 	struct hid_uid data = {
+ 		.hid = hid,
+ 		.uid = uid,
+ 	};
+ 
+-	return bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid);
++	dev = bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid);
++	if (dev)
++		return dev;
++
++	return bus_find_device(&pci_bus_type, NULL, &data, match_hid_uid);
+ }
+ 
+ static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle)
+@@ -1024,7 +1034,7 @@ static int acpi_lpss_resume(struct device *dev)
+ }
+ 
+ #ifdef CONFIG_PM_SLEEP
+-static int acpi_lpss_suspend_late(struct device *dev)
++static int acpi_lpss_do_suspend_late(struct device *dev)
+ {
+ 	int ret;
+ 
+@@ -1035,12 +1045,62 @@ static int acpi_lpss_suspend_late(struct device *dev)
+ 	return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
+ }
+ 
+-static int acpi_lpss_resume_early(struct device *dev)
++static int acpi_lpss_suspend_late(struct device *dev)
++{
++	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
++
++	if (pdata->dev_desc->resume_from_noirq)
++		return 0;
++
++	return acpi_lpss_do_suspend_late(dev);
++}
++
++static int acpi_lpss_suspend_noirq(struct device *dev)
++{
++	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
++	int ret;
++
++	if (pdata->dev_desc->resume_from_noirq) {
++		ret = acpi_lpss_do_suspend_late(dev);
++		if (ret)
++			return ret;
++	}
++
++	return acpi_subsys_suspend_noirq(dev);
++}
++
++static int acpi_lpss_do_resume_early(struct device *dev)
+ {
+ 	int ret = acpi_lpss_resume(dev);
+ 
+ 	return ret ? ret : pm_generic_resume_early(dev);
+ }
++
++static int acpi_lpss_resume_early(struct device *dev)
++{
++	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
++
++	if (pdata->dev_desc->resume_from_noirq)
++		return 0;
++
++	return acpi_lpss_do_resume_early(dev);
++}
++
++static int acpi_lpss_resume_noirq(struct device *dev)
++{
++	struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
++	int ret;
++
++	ret = acpi_subsys_resume_noirq(dev);
++	if (ret)
++		return ret;
++
++	if (!dev_pm_may_skip_resume(dev) && pdata->dev_desc->resume_from_noirq)
++		ret = acpi_lpss_do_resume_early(dev);
++
++	return ret;
++}
++
+ #endif /* CONFIG_PM_SLEEP */
+ 
+ static int acpi_lpss_runtime_suspend(struct device *dev)
+@@ -1070,8 +1130,8 @@ static struct dev_pm_domain acpi_lpss_pm_domain = {
+ 		.complete = acpi_subsys_complete,
+ 		.suspend = acpi_subsys_suspend,
+ 		.suspend_late = acpi_lpss_suspend_late,
+-		.suspend_noirq = acpi_subsys_suspend_noirq,
+-		.resume_noirq = acpi_subsys_resume_noirq,
++		.suspend_noirq = acpi_lpss_suspend_noirq,
++		.resume_noirq = acpi_lpss_resume_noirq,
+ 		.resume_early = acpi_lpss_resume_early,
+ 		.freeze = acpi_subsys_freeze,
+ 		.freeze_late = acpi_subsys_freeze_late,
+@@ -1079,8 +1139,8 @@ static struct dev_pm_domain acpi_lpss_pm_domain = {
+ 		.thaw_noirq = acpi_subsys_thaw_noirq,
+ 		.poweroff = acpi_subsys_suspend,
+ 		.poweroff_late = acpi_lpss_suspend_late,
+-		.poweroff_noirq = acpi_subsys_suspend_noirq,
+-		.restore_noirq = acpi_subsys_resume_noirq,
++		.poweroff_noirq = acpi_lpss_suspend_noirq,
++		.restore_noirq = acpi_lpss_resume_noirq,
+ 		.restore_early = acpi_lpss_resume_early,
+ #endif
+ 		.runtime_suspend = acpi_lpss_runtime_suspend,
+diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
+index 298180bf7e3c..bfcc68b9f708 100644
+--- a/drivers/acpi/acpica/acevents.h
++++ b/drivers/acpi/acpica/acevents.h
+@@ -230,6 +230,8 @@ acpi_ev_default_region_setup(acpi_handle handle,
+ 
+ acpi_status acpi_ev_initialize_region(union acpi_operand_object *region_obj);
+ 
++u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
++
+ /*
+  * evsci - SCI (System Control Interrupt) handling/dispatch
+  */
+diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
+index 0f28a38a43ea..99b0da899109 100644
+--- a/drivers/acpi/acpica/aclocal.h
++++ b/drivers/acpi/acpica/aclocal.h
+@@ -395,9 +395,9 @@ struct acpi_simple_repair_info {
+ /* Info for running the _REG methods */
+ 
+ struct acpi_reg_walk_info {
+-	acpi_adr_space_type space_id;
+ 	u32 function;
+ 	u32 reg_run_count;
++	acpi_adr_space_type space_id;
+ };
+ 
+ /*****************************************************************************
+diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
+index 70c2bd169f66..49decca4e08f 100644
+--- a/drivers/acpi/acpica/evregion.c
++++ b/drivers/acpi/acpica/evregion.c
+@@ -653,6 +653,19 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
+ 
+ 	ACPI_FUNCTION_TRACE(ev_execute_reg_methods);
+ 
++	/*
++	 * These address spaces do not need a call to _REG, since the ACPI
++	 * specification defines them as: "must always be accessible". Since
++	 * they never change state (never become unavailable), no need to ever
++	 * call _REG on them. Also, a data_table is not a "real" address space,
++	 * so do not call _REG. September 2018.
++	 */
++	if ((space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) ||
++	    (space_id == ACPI_ADR_SPACE_SYSTEM_IO) ||
++	    (space_id == ACPI_ADR_SPACE_DATA_TABLE)) {
++		return_VOID;
++	}
++
+ 	info.space_id = space_id;
+ 	info.function = function;
+ 	info.reg_run_count = 0;
+@@ -714,8 +727,8 @@ acpi_ev_reg_run(acpi_handle obj_handle,
+ 	}
+ 
+ 	/*
+-	 * We only care about regions.and objects that are allowed to have address
+-	 * space handlers
++	 * We only care about regions and objects that are allowed to have
++	 * address space handlers
+ 	 */
+ 	if ((node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) {
+ 		return (AE_OK);
+diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
+index 39284deedd88..17df5dacd43c 100644
+--- a/drivers/acpi/acpica/evrgnini.c
++++ b/drivers/acpi/acpica/evrgnini.c
+@@ -16,9 +16,6 @@
+ #define _COMPONENT          ACPI_EVENTS
+ ACPI_MODULE_NAME("evrgnini")
+ 
+-/* Local prototypes */
+-static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
+-
+ /*******************************************************************************
+  *
+  * FUNCTION:    acpi_ev_system_memory_region_setup
+@@ -33,7 +30,6 @@ static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
+  * DESCRIPTION: Setup a system_memory operation region
+  *
+  ******************************************************************************/
+-
+ acpi_status
+ acpi_ev_system_memory_region_setup(acpi_handle handle,
+ 				   u32 function,
+@@ -313,7 +309,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
+  *
+  ******************************************************************************/
+ 
+-static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
++u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
+ {
+ 	acpi_status status;
+ 	struct acpi_pnp_device_id *hid;
+diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
+index 091415b14fbf..3b3a25d9f0e6 100644
+--- a/drivers/acpi/acpica/evxfregn.c
++++ b/drivers/acpi/acpica/evxfregn.c
+@@ -193,7 +193,6 @@ acpi_remove_address_space_handler(acpi_handle device,
+ 				 */
+ 				region_obj =
+ 				    handler_obj->address_space.region_list;
+-
+ 			}
+ 
+ 			/* Remove this Handler object from the list */
+diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
+index ed73f6fb0779..b48874b8e1ea 100644
+--- a/drivers/acpi/osl.c
++++ b/drivers/acpi/osl.c
+@@ -1132,6 +1132,7 @@ void acpi_os_wait_events_complete(void)
+ 	flush_workqueue(kacpid_wq);
+ 	flush_workqueue(kacpi_notify_wq);
+ }
++EXPORT_SYMBOL(acpi_os_wait_events_complete);
+ 
+ struct acpi_hp_work {
+ 	struct work_struct work;
+diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
+index 7a3431018e0a..5008ead4609a 100644
+--- a/drivers/acpi/sbshc.c
++++ b/drivers/acpi/sbshc.c
+@@ -196,6 +196,7 @@ int acpi_smbus_unregister_callback(struct acpi_smb_hc *hc)
+ 	hc->callback = NULL;
+ 	hc->context = NULL;
+ 	mutex_unlock(&hc->lock);
++	acpi_os_wait_events_complete();
+ 	return 0;
+ }
+ 
+@@ -292,6 +293,7 @@ static int acpi_smbus_hc_remove(struct acpi_device *device)
+ 
+ 	hc = acpi_driver_data(device);
+ 	acpi_ec_remove_query_handler(hc->ec, hc->query_bit);
++	acpi_os_wait_events_complete();
+ 	kfree(hc);
+ 	device->driver_data = NULL;
+ 	return 0;
+diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
+index 39b181d6bd0d..99698d7fe585 100644
+--- a/drivers/ata/Kconfig
++++ b/drivers/ata/Kconfig
+@@ -121,7 +121,8 @@ config SATA_AHCI_PLATFORM
+ 
+ config AHCI_BRCM
+ 	tristate "Broadcom AHCI SATA support"
+-	depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP
++	depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP || \
++		   ARCH_BCM_63XX
+ 	help
+ 	  This option enables support for the AHCI SATA3 controller found on
+ 	  Broadcom SoC's.
+diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
+index 0a550190955a..cc6d06c1b2c7 100644
+--- a/drivers/ata/pata_ep93xx.c
++++ b/drivers/ata/pata_ep93xx.c
+@@ -659,7 +659,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
+ 	 * start of new transfer.
+ 	 */
+ 	drv_data->dma_rx_data.port = EP93XX_DMA_IDE;
+-	drv_data->dma_rx_data.direction = DMA_FROM_DEVICE;
++	drv_data->dma_rx_data.direction = DMA_DEV_TO_MEM;
+ 	drv_data->dma_rx_data.name = "ep93xx-pata-rx";
+ 	drv_data->dma_rx_channel = dma_request_channel(mask,
+ 		ep93xx_pata_dma_filter, &drv_data->dma_rx_data);
+@@ -667,7 +667,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
+ 		return;
+ 
+ 	drv_data->dma_tx_data.port = EP93XX_DMA_IDE;
+-	drv_data->dma_tx_data.direction = DMA_TO_DEVICE;
++	drv_data->dma_tx_data.direction = DMA_MEM_TO_DEV;
+ 	drv_data->dma_tx_data.name = "ep93xx-pata-tx";
+ 	drv_data->dma_tx_channel = dma_request_channel(mask,
+ 		ep93xx_pata_dma_filter, &drv_data->dma_tx_data);
+@@ -678,7 +678,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
+ 
+ 	/* Configure receive channel direction and source address */
+ 	memset(&conf, 0, sizeof(conf));
+-	conf.direction = DMA_FROM_DEVICE;
++	conf.direction = DMA_DEV_TO_MEM;
+ 	conf.src_addr = drv_data->udma_in_phys;
+ 	conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ 	if (dmaengine_slave_config(drv_data->dma_rx_channel, &conf)) {
+@@ -689,7 +689,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
+ 
+ 	/* Configure transmit channel direction and destination address */
+ 	memset(&conf, 0, sizeof(conf));
+-	conf.direction = DMA_TO_DEVICE;
++	conf.direction = DMA_MEM_TO_DEV;
+ 	conf.dst_addr = drv_data->udma_out_phys;
+ 	conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ 	if (dmaengine_slave_config(drv_data->dma_tx_channel, &conf)) {
+diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
+index a84c5573cabe..ed344eb717cc 100644
+--- a/drivers/clk/Makefile
++++ b/drivers/clk/Makefile
+@@ -73,6 +73,7 @@ obj-$(CONFIG_ARCH_HISI)			+= hisilicon/
+ obj-y					+= imgtec/
+ obj-$(CONFIG_ARCH_MXC)			+= imx/
+ obj-$(CONFIG_MACH_INGENIC)		+= ingenic/
++obj-$(CONFIG_ARCH_K3)			+= keystone/
+ obj-$(CONFIG_ARCH_KEYSTONE)		+= keystone/
+ obj-$(CONFIG_MACH_LOONGSON32)		+= loongson1/
+ obj-y					+= mediatek/
+diff --git a/drivers/clk/keystone/Kconfig b/drivers/clk/keystone/Kconfig
+index 7e9f0176578a..b04927d06cd1 100644
+--- a/drivers/clk/keystone/Kconfig
++++ b/drivers/clk/keystone/Kconfig
+@@ -7,7 +7,7 @@ config COMMON_CLK_KEYSTONE
+ 
+ config TI_SCI_CLK
+ 	tristate "TI System Control Interface clock drivers"
+-	depends on (ARCH_KEYSTONE || COMPILE_TEST) && OF
++	depends on (ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST) && OF
+ 	depends on TI_SCI_PROTOCOL
+ 	default ARCH_KEYSTONE
+ 	---help---
+diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
+index d2c99d8916b8..a5fddebbe530 100644
+--- a/drivers/clk/samsung/clk-cpu.c
++++ b/drivers/clk/samsung/clk-cpu.c
+@@ -152,7 +152,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
+ 			struct exynos_cpuclk *cpuclk, void __iomem *base)
+ {
+ 	const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
+-	unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent);
++	unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent);
+ 	unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
+ 	unsigned long div0, div1 = 0, mux_reg;
+ 	unsigned long flags;
+@@ -280,7 +280,7 @@ static int exynos5433_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
+ 			struct exynos_cpuclk *cpuclk, void __iomem *base)
+ {
+ 	const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
+-	unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent);
++	unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent);
+ 	unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
+ 	unsigned long div0, div1 = 0, mux_reg;
+ 	unsigned long flags;
+@@ -432,7 +432,7 @@ int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
+ 	else
+ 		cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb;
+ 
+-	cpuclk->alt_parent = __clk_lookup(alt_parent);
++	cpuclk->alt_parent = __clk_get_hw(__clk_lookup(alt_parent));
+ 	if (!cpuclk->alt_parent) {
+ 		pr_err("%s: could not lookup alternate parent %s\n",
+ 				__func__, alt_parent);
+diff --git a/drivers/clk/samsung/clk-cpu.h b/drivers/clk/samsung/clk-cpu.h
+index d4b6b517fe1b..bd38c6aa3897 100644
+--- a/drivers/clk/samsung/clk-cpu.h
++++ b/drivers/clk/samsung/clk-cpu.h
+@@ -49,7 +49,7 @@ struct exynos_cpuclk_cfg_data {
+  */
+ struct exynos_cpuclk {
+ 	struct clk_hw				hw;
+-	struct clk				*alt_parent;
++	struct clk_hw				*alt_parent;
+ 	void __iomem				*ctrl_base;
+ 	spinlock_t				*lock;
+ 	const struct exynos_cpuclk_cfg_data	*cfg;
+diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
+index d4f77c4eb277..ce30862617a6 100644
+--- a/drivers/clk/samsung/clk-exynos5420.c
++++ b/drivers/clk/samsung/clk-exynos5420.c
+@@ -634,6 +634,7 @@ static const struct samsung_div_clock exynos5420_div_clks[] __initconst = {
+ };
+ 
+ static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = {
++	GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0),
+ 	GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
+ 			SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
+ };
+@@ -1163,8 +1164,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
+ 	GATE(CLK_TMU, "tmu", "aclk66_psgen", GATE_IP_PERIS, 21, 0, 0),
+ 	GATE(CLK_TMU_GPU, "tmu_gpu", "aclk66_psgen", GATE_IP_PERIS, 22, 0, 0),
+ 
+-	GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0),
+-
+ 	/* GEN Block */
+ 	GATE(CLK_ROTATOR, "rotator", "mout_user_aclk266", GATE_IP_GEN, 1, 0, 0),
+ 	GATE(CLK_JPEG, "jpeg", "aclk300_jpeg", GATE_IP_GEN, 2, 0, 0),
+diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
+index 162de44df099..426980514e67 100644
+--- a/drivers/clk/samsung/clk-exynos5433.c
++++ b/drivers/clk/samsung/clk-exynos5433.c
+@@ -5630,7 +5630,7 @@ static const struct of_device_id exynos5433_cmu_of_match[] = {
+ static const struct dev_pm_ops exynos5433_cmu_pm_ops = {
+ 	SET_RUNTIME_PM_OPS(exynos5433_cmu_suspend, exynos5433_cmu_resume,
+ 			   NULL)
+-	SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
++	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ 				     pm_runtime_force_resume)
+ };
+ 
+diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
+index bbbf37c471a3..cec90a4c79b3 100644
+--- a/drivers/clocksource/sh_cmt.c
++++ b/drivers/clocksource/sh_cmt.c
+@@ -78,18 +78,17 @@ struct sh_cmt_info {
+ 	unsigned int channels_mask;
+ 
+ 	unsigned long width; /* 16 or 32 bit version of hardware block */
+-	unsigned long overflow_bit;
+-	unsigned long clear_bits;
++	u32 overflow_bit;
++	u32 clear_bits;
+ 
+ 	/* callbacks for CMSTR and CMCSR access */
+-	unsigned long (*read_control)(void __iomem *base, unsigned long offs);
++	u32 (*read_control)(void __iomem *base, unsigned long offs);
+ 	void (*write_control)(void __iomem *base, unsigned long offs,
+-			      unsigned long value);
++			      u32 value);
+ 
+ 	/* callbacks for CMCNT and CMCOR access */
+-	unsigned long (*read_count)(void __iomem *base, unsigned long offs);
+-	void (*write_count)(void __iomem *base, unsigned long offs,
+-			    unsigned long value);
++	u32 (*read_count)(void __iomem *base, unsigned long offs);
++	void (*write_count)(void __iomem *base, unsigned long offs, u32 value);
+ };
+ 
+ struct sh_cmt_channel {
+@@ -103,13 +102,13 @@ struct sh_cmt_channel {
+ 
+ 	unsigned int timer_bit;
+ 	unsigned long flags;
+-	unsigned long match_value;
+-	unsigned long next_match_value;
+-	unsigned long max_match_value;
++	u32 match_value;
++	u32 next_match_value;
++	u32 max_match_value;
+ 	raw_spinlock_t lock;
+ 	struct clock_event_device ced;
+ 	struct clocksource cs;
+-	unsigned long total_cycles;
++	u64 total_cycles;
+ 	bool cs_enabled;
+ };
+ 
+@@ -160,24 +159,22 @@ struct sh_cmt_device {
+ #define SH_CMT32_CMCSR_CKS_RCLK1	(7 << 0)
+ #define SH_CMT32_CMCSR_CKS_MASK		(7 << 0)
+ 
+-static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs)
++static u32 sh_cmt_read16(void __iomem *base, unsigned long offs)
+ {
+ 	return ioread16(base + (offs << 1));
+ }
+ 
+-static unsigned long sh_cmt_read32(void __iomem *base, unsigned long offs)
++static u32 sh_cmt_read32(void __iomem *base, unsigned long offs)
+ {
+ 	return ioread32(base + (offs << 2));
+ }
+ 
+-static void sh_cmt_write16(void __iomem *base, unsigned long offs,
+-			   unsigned long value)
++static void sh_cmt_write16(void __iomem *base, unsigned long offs, u32 value)
+ {
+ 	iowrite16(value, base + (offs << 1));
+ }
+ 
+-static void sh_cmt_write32(void __iomem *base, unsigned long offs,
+-			   unsigned long value)
++static void sh_cmt_write32(void __iomem *base, unsigned long offs, u32 value)
+ {
+ 	iowrite32(value, base + (offs << 2));
+ }
+@@ -242,7 +239,7 @@ static const struct sh_cmt_info sh_cmt_info[] = {
+ #define CMCNT 1 /* channel register */
+ #define CMCOR 2 /* channel register */
+ 
+-static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
++static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
+ {
+ 	if (ch->iostart)
+ 		return ch->cmt->info->read_control(ch->iostart, 0);
+@@ -250,8 +247,7 @@ static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
+ 		return ch->cmt->info->read_control(ch->cmt->mapbase, 0);
+ }
+ 
+-static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch,
+-				      unsigned long value)
++static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value)
+ {
+ 	if (ch->iostart)
+ 		ch->cmt->info->write_control(ch->iostart, 0, value);
+@@ -259,39 +255,35 @@ static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch,
+ 		ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);
+ }
+ 
+-static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
++static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
+ {
+ 	return ch->cmt->info->read_control(ch->ioctrl, CMCSR);
+ }
+ 
+-static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch,
+-				      unsigned long value)
++static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value)
+ {
+ 	ch->cmt->info->write_control(ch->ioctrl, CMCSR, value);
+ }
+ 
+-static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
++static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
+ {
+ 	return ch->cmt->info->read_count(ch->ioctrl, CMCNT);
+ }
+ 
+-static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch,
+-				      unsigned long value)
++static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value)
+ {
+ 	ch->cmt->info->write_count(ch->ioctrl, CMCNT, value);
+ }
+ 
+-static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch,
+-				      unsigned long value)
++static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value)
+ {
+ 	ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);
+ }
+ 
+-static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch,
+-					int *has_wrapped)
++static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped)
+ {
+-	unsigned long v1, v2, v3;
+-	int o1, o2;
++	u32 v1, v2, v3;
++	u32 o1, o2;
+ 
+ 	o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
+ 
+@@ -311,7 +303,8 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch,
+ 
+ static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
+ {
+-	unsigned long flags, value;
++	unsigned long flags;
++	u32 value;
+ 
+ 	/* start stop register shared by multiple timer channels */
+ 	raw_spin_lock_irqsave(&ch->cmt->lock, flags);
+@@ -418,11 +411,11 @@ static void sh_cmt_disable(struct sh_cmt_channel *ch)
+ static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch,
+ 					      int absolute)
+ {
+-	unsigned long new_match;
+-	unsigned long value = ch->next_match_value;
+-	unsigned long delay = 0;
+-	unsigned long now = 0;
+-	int has_wrapped;
++	u32 value = ch->next_match_value;
++	u32 new_match;
++	u32 delay = 0;
++	u32 now = 0;
++	u32 has_wrapped;
+ 
+ 	now = sh_cmt_get_counter(ch, &has_wrapped);
+ 	ch->flags |= FLAG_REPROGRAM; /* force reprogram */
+@@ -619,9 +612,10 @@ static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
+ static u64 sh_cmt_clocksource_read(struct clocksource *cs)
+ {
+ 	struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
+-	unsigned long flags, raw;
+-	unsigned long value;
+-	int has_wrapped;
++	unsigned long flags;
++	u32 has_wrapped;
++	u64 value;
++	u32 raw;
+ 
+ 	raw_spin_lock_irqsave(&ch->lock, flags);
+ 	value = ch->total_cycles;
+@@ -694,7 +688,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
+ 	cs->disable = sh_cmt_clocksource_disable;
+ 	cs->suspend = sh_cmt_clocksource_suspend;
+ 	cs->resume = sh_cmt_clocksource_resume;
+-	cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
++	cs->mask = CLOCKSOURCE_MASK(sizeof(u64) * 8);
+ 	cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ 
+ 	dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
+diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
+index e26a40971b26..6d7f6b9bb373 100644
+--- a/drivers/cpuidle/governors/menu.c
++++ b/drivers/cpuidle/governors/menu.c
+@@ -512,6 +512,16 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
+ 		 * duration predictor do a better job next time.
+ 		 */
+ 		measured_us = 9 * MAX_INTERESTING / 10;
++	} else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) &&
++		   dev->poll_time_limit) {
++		/*
++		 * The CPU exited the "polling" state due to a time limit, so
++		 * the idle duration prediction leading to the selection of that
++		 * state was inaccurate.  If a better prediction had been made,
++		 * the CPU might have been woken up from idle by the next timer.
++		 * Assume that to be the case.
++		 */
++		measured_us = data->next_timer_us;
+ 	} else {
+ 		/* measured value */
+ 		measured_us = cpuidle_get_last_residency(dev);
+diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
+index 3f86d23c592e..36ff5a1d9422 100644
+--- a/drivers/cpuidle/poll_state.c
++++ b/drivers/cpuidle/poll_state.c
+@@ -17,6 +17,8 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
+ {
+ 	u64 time_start = local_clock();
+ 
++	dev->poll_time_limit = false;
++
+ 	local_irq_enable();
+ 	if (!current_set_polling_and_test()) {
+ 		unsigned int loop_count = 0;
+@@ -27,8 +29,10 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
+ 				continue;
+ 
+ 			loop_count = 0;
+-			if (local_clock() - time_start > POLL_IDLE_TIME_LIMIT)
++			if (local_clock() - time_start > POLL_IDLE_TIME_LIMIT) {
++				dev->poll_time_limit = true;
+ 				break;
++			}
+ 		}
+ 	}
+ 	current_clr_polling();
+diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
+index 56bd28174f52..b926098f70ff 100644
+--- a/drivers/crypto/mxs-dcp.c
++++ b/drivers/crypto/mxs-dcp.c
+@@ -28,9 +28,24 @@
+ 
+ #define DCP_MAX_CHANS	4
+ #define DCP_BUF_SZ	PAGE_SIZE
++#define DCP_SHA_PAY_SZ  64
+ 
+ #define DCP_ALIGNMENT	64
+ 
++/*
++ * Null hashes to align with hw behavior on imx6sl and ull
++ * these are flipped for consistency with hw output
++ */
++const uint8_t sha1_null_hash[] =
++	"\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
++	"\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
++
++const uint8_t sha256_null_hash[] =
++	"\x55\xb8\x52\x78\x1b\x99\x95\xa4"
++	"\x4c\x93\x9b\x64\xe4\x41\xae\x27"
++	"\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
++	"\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
++
+ /* DCP DMA descriptor. */
+ struct dcp_dma_desc {
+ 	uint32_t	next_cmd_addr;
+@@ -48,6 +63,7 @@ struct dcp_coherent_block {
+ 	uint8_t			aes_in_buf[DCP_BUF_SZ];
+ 	uint8_t			aes_out_buf[DCP_BUF_SZ];
+ 	uint8_t			sha_in_buf[DCP_BUF_SZ];
++	uint8_t			sha_out_buf[DCP_SHA_PAY_SZ];
+ 
+ 	uint8_t			aes_key[2 * AES_KEYSIZE_128];
+ 
+@@ -209,6 +225,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
+ 	dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
+ 					     DCP_BUF_SZ, DMA_FROM_DEVICE);
+ 
++	if (actx->fill % AES_BLOCK_SIZE) {
++		dev_err(sdcp->dev, "Invalid block size!\n");
++		ret = -EINVAL;
++		goto aes_done_run;
++	}
++
+ 	/* Fill in the DMA descriptor. */
+ 	desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
+ 		    MXS_DCP_CONTROL0_INTERRUPT |
+@@ -238,6 +260,7 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
+ 
+ 	ret = mxs_dcp_start_dma(actx);
+ 
++aes_done_run:
+ 	dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
+ 			 DMA_TO_DEVICE);
+ 	dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
+@@ -264,13 +287,15 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
+ 
+ 	uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
+ 	uint32_t dst_off = 0;
++	uint32_t last_out_len = 0;
+ 
+ 	uint8_t *key = sdcp->coh->aes_key;
+ 
+ 	int ret = 0;
+ 	int split = 0;
+-	unsigned int i, len, clen, rem = 0;
++	unsigned int i, len, clen, rem = 0, tlen = 0;
+ 	int init = 0;
++	bool limit_hit = false;
+ 
+ 	actx->fill = 0;
+ 
+@@ -289,6 +314,11 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
+ 	for_each_sg(req->src, src, nents, i) {
+ 		src_buf = sg_virt(src);
+ 		len = sg_dma_len(src);
++		tlen += len;
++		limit_hit = tlen > req->nbytes;
++
++		if (limit_hit)
++			len = req->nbytes - (tlen - len);
+ 
+ 		do {
+ 			if (actx->fill + len > out_off)
+@@ -305,13 +335,15 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
+ 			 * If we filled the buffer or this is the last SG,
+ 			 * submit the buffer.
+ 			 */
+-			if (actx->fill == out_off || sg_is_last(src)) {
++			if (actx->fill == out_off || sg_is_last(src) ||
++				limit_hit) {
+ 				ret = mxs_dcp_run_aes(actx, req, init);
+ 				if (ret)
+ 					return ret;
+ 				init = 0;
+ 
+ 				out_tmp = out_buf;
++				last_out_len = actx->fill;
+ 				while (dst && actx->fill) {
+ 					if (!split) {
+ 						dst_buf = sg_virt(dst);
+@@ -334,6 +366,19 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
+ 				}
+ 			}
+ 		} while (len);
++
++		if (limit_hit)
++			break;
++	}
++
++	/* Copy the IV for CBC for chaining */
++	if (!rctx->ecb) {
++		if (rctx->enc)
++			memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE),
++				AES_BLOCK_SIZE);
++		else
++			memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE),
++				AES_BLOCK_SIZE);
+ 	}
+ 
+ 	return ret;
+@@ -513,8 +558,6 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
+ 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ 	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
+ 	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
+-	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
+-
+ 	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
+ 
+ 	dma_addr_t digest_phys = 0;
+@@ -536,10 +579,23 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
+ 	desc->payload = 0;
+ 	desc->status = 0;
+ 
++	/*
++	 * Align driver with hw behavior when generating null hashes
++	 */
++	if (rctx->init && rctx->fini && desc->size == 0) {
++		struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
++		const uint8_t *sha_buf =
++			(actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
++			sha1_null_hash : sha256_null_hash;
++		memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
++		ret = 0;
++		goto done_run;
++	}
++
+ 	/* Set HASH_TERM bit for last transfer block. */
+ 	if (rctx->fini) {
+-		digest_phys = dma_map_single(sdcp->dev, req->result,
+-					     halg->digestsize, DMA_FROM_DEVICE);
++		digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
++					     DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
+ 		desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
+ 		desc->payload = digest_phys;
+ 	}
+@@ -547,9 +603,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
+ 	ret = mxs_dcp_start_dma(actx);
+ 
+ 	if (rctx->fini)
+-		dma_unmap_single(sdcp->dev, digest_phys, halg->digestsize,
++		dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
+ 				 DMA_FROM_DEVICE);
+ 
++done_run:
+ 	dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
+ 
+ 	return ret;
+@@ -567,6 +624,7 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
+ 	const int nents = sg_nents(req->src);
+ 
+ 	uint8_t *in_buf = sdcp->coh->sha_in_buf;
++	uint8_t *out_buf = sdcp->coh->sha_out_buf;
+ 
+ 	uint8_t *src_buf;
+ 
+@@ -621,11 +679,9 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
+ 
+ 		actx->fill = 0;
+ 
+-		/* For some reason, the result is flipped. */
+-		for (i = 0; i < halg->digestsize / 2; i++) {
+-			swap(req->result[i],
+-			     req->result[halg->digestsize - i - 1]);
+-		}
++		/* For some reason the result is flipped */
++		for (i = 0; i < halg->digestsize; i++)
++			req->result[i] = out_buf[halg->digestsize - i - 1];
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index 4c49bb1330b5..e2ab46bfa666 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -11,6 +11,7 @@
+  */
+ 
+ #include <linux/kernel.h>
++#include <linux/kmod.h>
+ #include <linux/sched.h>
+ #include <linux/errno.h>
+ #include <linux/err.h>
+@@ -221,6 +222,49 @@ static struct devfreq_governor *find_devfreq_governor(const char *name)
+ 	return ERR_PTR(-ENODEV);
+ }
+ 
++/**
++ * try_then_request_governor() - Try to find the governor and request the
++ *                               module if is not found.
++ * @name:	name of the governor
++ *
++ * Search the list of devfreq governors and request the module and try again
++ * if is not found. This can happen when both drivers (the governor driver
++ * and the driver that call devfreq_add_device) are built as modules.
++ * devfreq_list_lock should be held by the caller. Returns the matched
++ * governor's pointer or an error pointer.
++ */
++static struct devfreq_governor *try_then_request_governor(const char *name)
++{
++	struct devfreq_governor *governor;
++	int err = 0;
++
++	if (IS_ERR_OR_NULL(name)) {
++		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
++		return ERR_PTR(-EINVAL);
++	}
++	WARN(!mutex_is_locked(&devfreq_list_lock),
++	     "devfreq_list_lock must be locked.");
++
++	governor = find_devfreq_governor(name);
++	if (IS_ERR(governor)) {
++		mutex_unlock(&devfreq_list_lock);
++
++		if (!strncmp(name, DEVFREQ_GOV_SIMPLE_ONDEMAND,
++			     DEVFREQ_NAME_LEN))
++			err = request_module("governor_%s", "simpleondemand");
++		else
++			err = request_module("governor_%s", name);
++		/* Restore previous state before return */
++		mutex_lock(&devfreq_list_lock);
++		if (err)
++			return ERR_PTR(err);
++
++		governor = find_devfreq_governor(name);
++	}
++
++	return governor;
++}
++
+ static int devfreq_notify_transition(struct devfreq *devfreq,
+ 		struct devfreq_freqs *freqs, unsigned int state)
+ {
+@@ -283,11 +327,11 @@ int update_devfreq(struct devfreq *devfreq)
+ 	max_freq = MIN(devfreq->scaling_max_freq, devfreq->max_freq);
+ 	min_freq = MAX(devfreq->scaling_min_freq, devfreq->min_freq);
+ 
+-	if (min_freq && freq < min_freq) {
++	if (freq < min_freq) {
+ 		freq = min_freq;
+ 		flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
+ 	}
+-	if (max_freq && freq > max_freq) {
++	if (freq > max_freq) {
+ 		freq = max_freq;
+ 		flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
+ 	}
+@@ -534,10 +578,6 @@ static void devfreq_dev_release(struct device *dev)
+ 	list_del(&devfreq->node);
+ 	mutex_unlock(&devfreq_list_lock);
+ 
+-	if (devfreq->governor)
+-		devfreq->governor->event_handler(devfreq,
+-						 DEVFREQ_GOV_STOP, NULL);
+-
+ 	if (devfreq->profile->exit)
+ 		devfreq->profile->exit(devfreq->dev.parent);
+ 
+@@ -646,9 +686,8 @@ struct devfreq *devfreq_add_device(struct device *dev,
+ 	mutex_unlock(&devfreq->lock);
+ 
+ 	mutex_lock(&devfreq_list_lock);
+-	list_add(&devfreq->node, &devfreq_list);
+ 
+-	governor = find_devfreq_governor(devfreq->governor_name);
++	governor = try_then_request_governor(devfreq->governor_name);
+ 	if (IS_ERR(governor)) {
+ 		dev_err(dev, "%s: Unable to find governor for the device\n",
+ 			__func__);
+@@ -664,15 +703,17 @@ struct devfreq *devfreq_add_device(struct device *dev,
+ 			__func__);
+ 		goto err_init;
+ 	}
++
++	list_add(&devfreq->node, &devfreq_list);
++
+ 	mutex_unlock(&devfreq_list_lock);
+ 
+ 	return devfreq;
+ 
+ err_init:
+-	list_del(&devfreq->node);
+ 	mutex_unlock(&devfreq_list_lock);
+ 
+-	device_unregister(&devfreq->dev);
++	devfreq_remove_device(devfreq);
+ 	devfreq = NULL;
+ err_dev:
+ 	if (devfreq)
+@@ -693,6 +734,9 @@ int devfreq_remove_device(struct devfreq *devfreq)
+ 	if (!devfreq)
+ 		return -EINVAL;
+ 
++	if (devfreq->governor)
++		devfreq->governor->event_handler(devfreq,
++						 DEVFREQ_GOV_STOP, NULL);
+ 	device_unregister(&devfreq->dev);
+ 
+ 	return 0;
+@@ -991,7 +1035,7 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&devfreq_list_lock);
+-	governor = find_devfreq_governor(str_governor);
++	governor = try_then_request_governor(str_governor);
+ 	if (IS_ERR(governor)) {
+ 		ret = PTR_ERR(governor);
+ 		goto out;
+@@ -1126,17 +1170,26 @@ static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
+ 	struct devfreq *df = to_devfreq(dev);
+ 	unsigned long value;
+ 	int ret;
+-	unsigned long max;
+ 
+ 	ret = sscanf(buf, "%lu", &value);
+ 	if (ret != 1)
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&df->lock);
+-	max = df->max_freq;
+-	if (value && max && value > max) {
+-		ret = -EINVAL;
+-		goto unlock;
++
++	if (value) {
++		if (value > df->max_freq) {
++			ret = -EINVAL;
++			goto unlock;
++		}
++	} else {
++		unsigned long *freq_table = df->profile->freq_table;
++
++		/* Get minimum frequency according to sorting order */
++		if (freq_table[0] < freq_table[df->profile->max_state - 1])
++			value = freq_table[0];
++		else
++			value = freq_table[df->profile->max_state - 1];
+ 	}
+ 
+ 	df->min_freq = value;
+@@ -1161,17 +1214,26 @@ static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
+ 	struct devfreq *df = to_devfreq(dev);
+ 	unsigned long value;
+ 	int ret;
+-	unsigned long min;
+ 
+ 	ret = sscanf(buf, "%lu", &value);
+ 	if (ret != 1)
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&df->lock);
+-	min = df->min_freq;
+-	if (value && min && value < min) {
+-		ret = -EINVAL;
+-		goto unlock;
++
++	if (value) {
++		if (value < df->min_freq) {
++			ret = -EINVAL;
++			goto unlock;
++		}
++	} else {
++		unsigned long *freq_table = df->profile->freq_table;
++
++		/* Get maximum frequency according to sorting order */
++		if (freq_table[0] < freq_table[df->profile->max_state - 1])
++			value = freq_table[df->profile->max_state - 1];
++		else
++			value = freq_table[0];
+ 	}
+ 
+ 	df->max_freq = value;
+diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
+index 21a5708985bc..0fec3c554fe3 100644
+--- a/drivers/dma/ioat/init.c
++++ b/drivers/dma/ioat/init.c
+@@ -129,7 +129,7 @@ static void
+ ioat_init_channel(struct ioatdma_device *ioat_dma,
+ 		  struct ioatdma_chan *ioat_chan, int idx);
+ static void ioat_intr_quirk(struct ioatdma_device *ioat_dma);
+-static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
++static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
+ static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
+ 
+ static int ioat_dca_enabled = 1;
+@@ -575,7 +575,7 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
+  * ioat_enumerate_channels - find and initialize the device's channels
+  * @ioat_dma: the ioat dma device to be enumerated
+  */
+-static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
++static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
+ {
+ 	struct ioatdma_chan *ioat_chan;
+ 	struct device *dev = &ioat_dma->pdev->dev;
+@@ -594,7 +594,7 @@ static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
+ 	xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
+ 	xfercap_log &= 0x1f; /* bits [4:0] valid */
+ 	if (xfercap_log == 0)
+-		return 0;
++		return;
+ 	dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
+ 
+ 	for (i = 0; i < dma->chancnt; i++) {
+@@ -611,7 +611,6 @@ static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
+ 		}
+ 	}
+ 	dma->chancnt = i;
+-	return i;
+ }
+ 
+ /**
+diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
+index 041ce864097e..80ff95f75199 100644
+--- a/drivers/dma/sh/rcar-dmac.c
++++ b/drivers/dma/sh/rcar-dmac.c
+@@ -198,6 +198,7 @@ struct rcar_dmac {
+ 	struct dma_device engine;
+ 	struct device *dev;
+ 	void __iomem *iomem;
++	struct device_dma_parameters parms;
+ 
+ 	unsigned int n_channels;
+ 	struct rcar_dmac_chan *channels;
+@@ -1814,6 +1815,8 @@ static int rcar_dmac_probe(struct platform_device *pdev)
+ 
+ 	dmac->dev = &pdev->dev;
+ 	platform_set_drvdata(pdev, dmac);
++	dmac->dev->dma_parms = &dmac->parms;
++	dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
+ 	dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
+ 
+ 	ret = rcar_dmac_parse_of(&pdev->dev, dmac);
+diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
+index 395c698edb4d..fc0f9c8766a8 100644
+--- a/drivers/dma/timb_dma.c
++++ b/drivers/dma/timb_dma.c
+@@ -545,7 +545,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
+ 	}
+ 
+ 	dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
+-		td_desc->desc_list_len, DMA_MEM_TO_DEV);
++		td_desc->desc_list_len, DMA_TO_DEVICE);
+ 
+ 	return &td_desc->txd;
+ }
+diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
+index 87c18a544513..7f3da34c7874 100644
+--- a/drivers/gpio/gpio-syscon.c
++++ b/drivers/gpio/gpio-syscon.c
+@@ -122,7 +122,7 @@ static int syscon_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int val)
+ 				   BIT(offs % SYSCON_REG_BITS));
+ 	}
+ 
+-	priv->data->set(chip, offset, val);
++	chip->set(chip, offset, val);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+index 9acb9dfaf57e..9cde79a7335c 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+@@ -1140,7 +1140,7 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
+ 
+ 	gmu->dev = &pdev->dev;
+ 
+-	of_dma_configure(gmu->dev, node, false);
++	of_dma_configure(gmu->dev, node, true);
+ 
+ 	/* Fow now, don't do anything fancy until we get our feet under us */
+ 	gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
+diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
+index e6b49500c52a..8c9555313fc3 100644
+--- a/drivers/hwmon/ina3221.c
++++ b/drivers/hwmon/ina3221.c
+@@ -38,9 +38,9 @@
+ #define INA3221_WARN3			0x0c
+ #define INA3221_MASK_ENABLE		0x0f
+ 
+-#define INA3221_CONFIG_MODE_SHUNT	BIT(1)
+-#define INA3221_CONFIG_MODE_BUS		BIT(2)
+-#define INA3221_CONFIG_MODE_CONTINUOUS	BIT(3)
++#define INA3221_CONFIG_MODE_SHUNT	BIT(0)
++#define INA3221_CONFIG_MODE_BUS		BIT(1)
++#define INA3221_CONFIG_MODE_CONTINUOUS	BIT(2)
+ 
+ #define INA3221_RSHUNT_DEFAULT		10000
+ 
+diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
+index bb15d7816a29..2cef0c37ff6f 100644
+--- a/drivers/hwmon/k10temp.c
++++ b/drivers/hwmon/k10temp.c
+@@ -325,8 +325,9 @@ static int k10temp_probe(struct pci_dev *pdev,
+ 
+ 	data->pdev = pdev;
+ 
+-	if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
+-					  boot_cpu_data.x86_model == 0x70)) {
++	if (boot_cpu_data.x86 == 0x15 &&
++	    ((boot_cpu_data.x86_model & 0xf0) == 0x60 ||
++	     (boot_cpu_data.x86_model & 0xf0) == 0x70)) {
+ 		data->read_htcreg = read_htcreg_nb_f15;
+ 		data->read_tempreg = read_tempreg_nb_f15;
+ 	} else if (boot_cpu_data.x86 == 0x17) {
+diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
+index eba692cddbde..559101a1c136 100644
+--- a/drivers/hwmon/nct6775.c
++++ b/drivers/hwmon/nct6775.c
+@@ -704,10 +704,10 @@ static const char *const nct6795_temp_label[] = {
+ 	"PCH_CHIP_TEMP",
+ 	"PCH_CPU_TEMP",
+ 	"PCH_MCH_TEMP",
+-	"PCH_DIM0_TEMP",
+-	"PCH_DIM1_TEMP",
+-	"PCH_DIM2_TEMP",
+-	"PCH_DIM3_TEMP",
++	"Agent0 Dimm0",
++	"Agent0 Dimm1",
++	"Agent1 Dimm0",
++	"Agent1 Dimm1",
+ 	"BYTE_TEMP0",
+ 	"BYTE_TEMP1",
+ 	"PECI Agent 0 Calibration",
+@@ -742,10 +742,10 @@ static const char *const nct6796_temp_label[] = {
+ 	"PCH_CHIP_TEMP",
+ 	"PCH_CPU_TEMP",
+ 	"PCH_MCH_TEMP",
+-	"PCH_DIM0_TEMP",
+-	"PCH_DIM1_TEMP",
+-	"PCH_DIM2_TEMP",
+-	"PCH_DIM3_TEMP",
++	"Agent0 Dimm0",
++	"Agent0 Dimm1",
++	"Agent1 Dimm0",
++	"Agent1 Dimm1",
+ 	"BYTE_TEMP0",
+ 	"BYTE_TEMP1",
+ 	"PECI Agent 0 Calibration",
+diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c
+index b998f9fbed41..979b579bc118 100644
+--- a/drivers/hwmon/npcm750-pwm-fan.c
++++ b/drivers/hwmon/npcm750-pwm-fan.c
+@@ -52,7 +52,7 @@
+ 
+ /* Define the Counter Register, value = 100 for match 100% */
+ #define NPCM7XX_PWM_COUNTER_DEFAULT_NUM		255
+-#define NPCM7XX_PWM_CMR_DEFAULT_NUM		127
++#define NPCM7XX_PWM_CMR_DEFAULT_NUM		255
+ #define NPCM7XX_PWM_CMR_MAX			255
+ 
+ /* default all PWM channels PRESCALE2 = 1 */
+diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
+index 7f01fad0d3e3..65de80bd63d8 100644
+--- a/drivers/hwmon/pwm-fan.c
++++ b/drivers/hwmon/pwm-fan.c
+@@ -221,8 +221,12 @@ static int pwm_fan_probe(struct platform_device *pdev)
+ 
+ 	ctx->pwm = devm_of_pwm_get(&pdev->dev, pdev->dev.of_node, NULL);
+ 	if (IS_ERR(ctx->pwm)) {
+-		dev_err(&pdev->dev, "Could not get PWM\n");
+-		return PTR_ERR(ctx->pwm);
++		ret = PTR_ERR(ctx->pwm);
++
++		if (ret != -EPROBE_DEFER)
++			dev_err(&pdev->dev, "Could not get PWM: %d\n", ret);
++
++		return ret;
+ 	}
+ 
+ 	platform_set_drvdata(pdev, ctx);
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index 8f803812ea24..ee6dd1b84fac 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -433,12 +433,13 @@ config I2C_BCM_KONA
+ 	  If you do not need KONA I2C interface, say N.
+ 
+ config I2C_BRCMSTB
+-	tristate "BRCM Settop I2C controller"
+-	depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
++	tristate "BRCM Settop/DSL I2C controller"
++	depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_63XX || \
++		   COMPILE_TEST
+ 	default y
+ 	help
+ 	  If you say yes to this option, support will be included for the
+-	  I2C interface on the Broadcom Settop SoCs.
++	  I2C interface on the Broadcom Settop/DSL SoCs.
+ 
+ 	  If you do not need I2C interface, say N.
+ 
+diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
+index a74ef76705e0..2bb4d20ead32 100644
+--- a/drivers/i2c/busses/i2c-mt65xx.c
++++ b/drivers/i2c/busses/i2c-mt65xx.c
+@@ -503,7 +503,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
+ 		writel(I2C_DMA_INT_FLAG_NONE, i2c->pdmabase + OFFSET_INT_FLAG);
+ 		writel(I2C_DMA_CON_RX, i2c->pdmabase + OFFSET_CON);
+ 
+-		dma_rd_buf = i2c_get_dma_safe_msg_buf(msgs, 0);
++		dma_rd_buf = i2c_get_dma_safe_msg_buf(msgs, 1);
+ 		if (!dma_rd_buf)
+ 			return -ENOMEM;
+ 
+@@ -526,7 +526,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
+ 		writel(I2C_DMA_INT_FLAG_NONE, i2c->pdmabase + OFFSET_INT_FLAG);
+ 		writel(I2C_DMA_CON_TX, i2c->pdmabase + OFFSET_CON);
+ 
+-		dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 0);
++		dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 1);
+ 		if (!dma_wr_buf)
+ 			return -ENOMEM;
+ 
+@@ -549,7 +549,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
+ 		writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_INT_FLAG);
+ 		writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_CON);
+ 
+-		dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 0);
++		dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 1);
+ 		if (!dma_wr_buf)
+ 			return -ENOMEM;
+ 
+@@ -561,7 +561,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
+ 			return -ENOMEM;
+ 		}
+ 
+-		dma_rd_buf = i2c_get_dma_safe_msg_buf((msgs + 1), 0);
++		dma_rd_buf = i2c_get_dma_safe_msg_buf((msgs + 1), 1);
+ 		if (!dma_rd_buf) {
+ 			dma_unmap_single(i2c->dev, wpaddr,
+ 					 msgs->len, DMA_TO_DEVICE);
+diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
+index 2ac86096ddd9..cd9c65f3d404 100644
+--- a/drivers/i2c/busses/i2c-omap.c
++++ b/drivers/i2c/busses/i2c-omap.c
+@@ -661,9 +661,6 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
+ 	dev_dbg(omap->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
+ 		msg->addr, msg->len, msg->flags, stop);
+ 
+-	if (msg->len == 0)
+-		return -EINVAL;
+-
+ 	omap->receiver = !!(msg->flags & I2C_M_RD);
+ 	omap_i2c_resize_fifo(omap, msg->len, omap->receiver);
+ 
+@@ -1179,6 +1176,10 @@ static const struct i2c_algorithm omap_i2c_algo = {
+ 	.functionality	= omap_i2c_func,
+ };
+ 
++static const struct i2c_adapter_quirks omap_i2c_quirks = {
++	.flags = I2C_AQ_NO_ZERO_LEN,
++};
++
+ #ifdef CONFIG_OF
+ static struct omap_i2c_bus_platform_data omap2420_pdata = {
+ 	.rev = OMAP_I2C_IP_VERSION_1,
+@@ -1453,6 +1454,7 @@ omap_i2c_probe(struct platform_device *pdev)
+ 	adap->class = I2C_CLASS_DEPRECATED;
+ 	strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
+ 	adap->algo = &omap_i2c_algo;
++	adap->quirks = &omap_i2c_quirks;
+ 	adap->dev.parent = &pdev->dev;
+ 	adap->dev.of_node = pdev->dev.of_node;
+ 	adap->bus_recovery_info = &omap_i2c_bus_recovery_info;
+diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
+index c86c3ae1318f..e09cd0775ae9 100644
+--- a/drivers/i2c/busses/i2c-qup.c
++++ b/drivers/i2c/busses/i2c-qup.c
+@@ -1088,11 +1088,6 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
+ 	writel(I2C_MINI_CORE | I2C_N_VAL, qup->base + QUP_CONFIG);
+ 
+ 	for (idx = 0; idx < num; idx++) {
+-		if (msgs[idx].len == 0) {
+-			ret = -EINVAL;
+-			goto out;
+-		}
+-
+ 		if (qup_i2c_poll_state_i2c_master(qup)) {
+ 			ret = -EIO;
+ 			goto out;
+@@ -1520,9 +1515,6 @@ qup_i2c_determine_mode_v2(struct qup_i2c_dev *qup,
+ 
+ 	/* All i2c_msgs should be transferred using either dma or cpu */
+ 	for (idx = 0; idx < num; idx++) {
+-		if (msgs[idx].len == 0)
+-			return -EINVAL;
+-
+ 		if (msgs[idx].flags & I2C_M_RD)
+ 			max_rx_len = max_t(unsigned int, max_rx_len,
+ 					   msgs[idx].len);
+@@ -1636,9 +1628,14 @@ static const struct i2c_algorithm qup_i2c_algo_v2 = {
+  * which limits the possible read to 256 (QUP_READ_LIMIT) bytes.
+  */
+ static const struct i2c_adapter_quirks qup_i2c_quirks = {
++	.flags = I2C_AQ_NO_ZERO_LEN,
+ 	.max_read_len = QUP_READ_LIMIT,
+ };
+ 
++static const struct i2c_adapter_quirks qup_i2c_quirks_v2 = {
++	.flags = I2C_AQ_NO_ZERO_LEN,
++};
++
+ static void qup_i2c_enable_clocks(struct qup_i2c_dev *qup)
+ {
+ 	clk_prepare_enable(qup->clk);
+@@ -1701,6 +1698,7 @@ static int qup_i2c_probe(struct platform_device *pdev)
+ 		is_qup_v1 = true;
+ 	} else {
+ 		qup->adap.algo = &qup_i2c_algo_v2;
++		qup->adap.quirks = &qup_i2c_quirks_v2;
+ 		is_qup_v1 = false;
+ 		if (acpi_match_device(qup_i2c_acpi_match, qup->dev))
+ 			goto nodma;
+diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
+index ef13b6ce9d8d..47d196c026ba 100644
+--- a/drivers/i2c/busses/i2c-tegra.c
++++ b/drivers/i2c/busses/i2c-tegra.c
+@@ -684,9 +684,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
+ 
+ 	tegra_i2c_flush_fifos(i2c_dev);
+ 
+-	if (msg->len == 0)
+-		return -EINVAL;
+-
+ 	i2c_dev->msg_buf = msg->buf;
+ 	i2c_dev->msg_buf_remaining = msg->len;
+ 	i2c_dev->msg_err = I2C_ERR_NONE;
+@@ -831,6 +828,7 @@ static const struct i2c_algorithm tegra_i2c_algo = {
+ 
+ /* payload size is only 12 bit */
+ static const struct i2c_adapter_quirks tegra_i2c_quirks = {
++	.flags = I2C_AQ_NO_ZERO_LEN,
+ 	.max_read_len = 4096,
+ 	.max_write_len = 4096 - 12,
+ };
+diff --git a/drivers/i2c/busses/i2c-zx2967.c b/drivers/i2c/busses/i2c-zx2967.c
+index 48281c1b30c6..b8f9e020d80e 100644
+--- a/drivers/i2c/busses/i2c-zx2967.c
++++ b/drivers/i2c/busses/i2c-zx2967.c
+@@ -281,9 +281,6 @@ static int zx2967_i2c_xfer_msg(struct zx2967_i2c *i2c,
+ 	int ret;
+ 	int i;
+ 
+-	if (msg->len == 0)
+-		return -EINVAL;
+-
+ 	zx2967_i2c_flush_fifos(i2c);
+ 
+ 	i2c->cur_trans = msg->buf;
+@@ -498,6 +495,10 @@ static const struct i2c_algorithm zx2967_i2c_algo = {
+ 	.functionality = zx2967_i2c_func,
+ };
+ 
++static const struct i2c_adapter_quirks zx2967_i2c_quirks = {
++	.flags = I2C_AQ_NO_ZERO_LEN,
++};
++
+ static const struct of_device_id zx2967_i2c_of_match[] = {
+ 	{ .compatible = "zte,zx296718-i2c", },
+ 	{ },
+@@ -568,6 +569,7 @@ static int zx2967_i2c_probe(struct platform_device *pdev)
+ 	strlcpy(i2c->adap.name, "zx2967 i2c adapter",
+ 		sizeof(i2c->adap.name));
+ 	i2c->adap.algo = &zx2967_i2c_algo;
++	i2c->adap.quirks = &zx2967_i2c_quirks;
+ 	i2c->adap.nr = pdev->id;
+ 	i2c->adap.dev.parent = &pdev->dev;
+ 	i2c->adap.dev.of_node = pdev->dev.of_node;
+diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
+index f208a25d0e4f..1669548e91dc 100644
+--- a/drivers/infiniband/hw/hfi1/mad.c
++++ b/drivers/infiniband/hw/hfi1/mad.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright(c) 2015-2017 Intel Corporation.
++ * Copyright(c) 2015-2018 Intel Corporation.
+  *
+  * This file is provided under a dual BSD/GPLv2 license.  When using or
+  * redistributing this file, you may do so under either license.
+@@ -4829,7 +4829,7 @@ static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
+ 	int ret;
+ 	int pkey_idx;
+ 	int local_mad = 0;
+-	u32 resp_len = 0;
++	u32 resp_len = in_wc->byte_len - sizeof(*in_grh);
+ 	struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ 
+ 	pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
+index 9a24fd0ee3e7..ebfb0998bced 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -665,7 +665,9 @@ struct hns_roce_caps {
+ 	u32		max_sq_sg;	/* 2 */
+ 	u32		max_sq_inline;	/* 32 */
+ 	u32		max_rq_sg;	/* 2 */
++	u32		max_extend_sg;
+ 	int		num_qps;	/* 256k */
++	int             reserved_qps;
+ 	u32		max_wqes;	/* 16k */
+ 	u32		max_sq_desc_sz;	/* 64 */
+ 	u32		max_rq_desc_sz;	/* 64 */
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 3f8e13190aa7..9e7923cf8577 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -121,6 +121,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ 		}
+ 
+ 		if (wr->opcode == IB_WR_RDMA_READ) {
++			*bad_wr =  wr;
+ 			dev_err(hr_dev->dev, "Not support inline data!\n");
+ 			return -EINVAL;
+ 		}
+@@ -1193,6 +1194,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
+ 	caps->num_cqs		= HNS_ROCE_V2_MAX_CQ_NUM;
+ 	caps->max_cqes		= HNS_ROCE_V2_MAX_CQE_NUM;
+ 	caps->max_sq_sg		= HNS_ROCE_V2_MAX_SQ_SGE_NUM;
++	caps->max_extend_sg	= HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
+ 	caps->max_rq_sg		= HNS_ROCE_V2_MAX_RQ_SGE_NUM;
+ 	caps->max_sq_inline	= HNS_ROCE_V2_MAX_SQ_INLINE;
+ 	caps->num_uars		= HNS_ROCE_V2_UAR_NUM;
+@@ -1222,6 +1224,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
+ 	caps->reserved_mrws	= 1;
+ 	caps->reserved_uars	= 0;
+ 	caps->reserved_cqs	= 0;
++	caps->reserved_qps	= HNS_ROCE_V2_RSV_QPS;
+ 
+ 	caps->qpc_ba_pg_sz	= 0;
+ 	caps->qpc_buf_pg_sz	= 0;
+@@ -2266,6 +2269,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
+ 		wc->src_qp = (u8)roce_get_field(cqe->byte_32,
+ 						V2_CQE_BYTE_32_RMT_QPN_M,
+ 						V2_CQE_BYTE_32_RMT_QPN_S);
++		wc->slid = 0;
+ 		wc->wc_flags |= (roce_get_bit(cqe->byte_32,
+ 					      V2_CQE_BYTE_32_GRH_S) ?
+ 					      IB_WC_GRH : 0);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+index 14aa308befef..2c3e600db9ce 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -50,6 +50,7 @@
+ #define HNS_ROCE_V2_MAX_CQE_NUM			0x10000
+ #define HNS_ROCE_V2_MAX_RQ_SGE_NUM		0x100
+ #define HNS_ROCE_V2_MAX_SQ_SGE_NUM		0xff
++#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM		0x200000
+ #define HNS_ROCE_V2_MAX_SQ_INLINE		0x20
+ #define HNS_ROCE_V2_UAR_NUM			256
+ #define HNS_ROCE_V2_PHY_UAR_NUM			1
+@@ -78,6 +79,7 @@
+ #define HNS_ROCE_INVALID_LKEY			0x100
+ #define HNS_ROCE_CMQ_TX_TIMEOUT			30000
+ #define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE	2
++#define HNS_ROCE_V2_RSV_QPS			8
+ 
+ #define HNS_ROCE_CONTEXT_HOP_NUM		1
+ #define HNS_ROCE_MTT_HOP_NUM			1
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index 2fa4fb17f6d3..54d22868ffba 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -31,6 +31,7 @@
+  * SOFTWARE.
+  */
+ 
++#include <linux/pci.h>
+ #include <linux/platform_device.h>
+ #include <rdma/ib_addr.h>
+ #include <rdma/ib_umem.h>
+@@ -372,6 +373,16 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
+ 	if (hr_qp->sq.max_gs > 2)
+ 		hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
+ 							(hr_qp->sq.max_gs - 2));
++
++	if ((hr_qp->sq.max_gs > 2) && (hr_dev->pci_dev->revision == 0x20)) {
++		if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
++			dev_err(hr_dev->dev,
++				"The extended sge cnt error! sge_cnt=%d\n",
++				hr_qp->sge.sge_cnt);
++			return -EINVAL;
++		}
++	}
++
+ 	hr_qp->sge.sge_shift = 4;
+ 
+ 	/* Get buf size, SQ and RQ  are aligned to page_szie */
+@@ -465,6 +476,14 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
+ 		hr_qp->sge.sge_shift = 4;
+ 	}
+ 
++	if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) {
++		if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
++			dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
++				hr_qp->sge.sge_cnt);
++			return -EINVAL;
++		}
++	}
++
+ 	/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
+ 	page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
+ 	hr_qp->sq.offset = 0;
+@@ -1106,14 +1125,20 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
+ {
+ 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ 	int reserved_from_top = 0;
++	int reserved_from_bot;
+ 	int ret;
+ 
+ 	spin_lock_init(&qp_table->lock);
+ 	INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
+ 
+-	/* A port include two SQP, six port total 12 */
++	/* In hw v1, a port include two SQP, six ports total 12 */
++	if (hr_dev->caps.max_sq_sg <= 2)
++		reserved_from_bot = SQP_NUM;
++	else
++		reserved_from_bot = hr_dev->caps.reserved_qps;
++
+ 	ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
+-				   hr_dev->caps.num_qps - 1, SQP_NUM,
++				   hr_dev->caps.num_qps - 1, reserved_from_bot,
+ 				   reserved_from_top);
+ 	if (ret) {
+ 		dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
+diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
+index f3e80dec1334..af7f2083d4d1 100644
+--- a/drivers/infiniband/hw/mthca/mthca_main.c
++++ b/drivers/infiniband/hw/mthca/mthca_main.c
+@@ -986,7 +986,8 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
+ 		goto err_free_dev;
+ 	}
+ 
+-	if (mthca_cmd_init(mdev)) {
++	err = mthca_cmd_init(mdev);
++	if (err) {
+ 		mthca_err(mdev, "Failed to init command interface, aborting.\n");
+ 		goto err_free_dev;
+ 	}
+diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
+index 0d6c04ba7fc3..c41a5fee81f7 100644
+--- a/drivers/infiniband/sw/rxe/rxe_srq.c
++++ b/drivers/infiniband/sw/rxe/rxe_srq.c
+@@ -31,6 +31,7 @@
+  * SOFTWARE.
+  */
+ 
++#include <linux/vmalloc.h>
+ #include "rxe.h"
+ #include "rxe_loc.h"
+ #include "rxe_queue.h"
+@@ -129,13 +130,18 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
+ 
+ 	err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context, q->buf,
+ 			   q->buf_size, &q->ip);
+-	if (err)
++	if (err) {
++		vfree(q->buf);
++		kfree(q);
+ 		return err;
++	}
+ 
+ 	if (uresp) {
+ 		if (copy_to_user(&uresp->srq_num, &srq->srq_num,
+-				 sizeof(uresp->srq_num)))
++				 sizeof(uresp->srq_num))) {
++			rxe_queue_cleanup(q);
+ 			return -EFAULT;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
+index 267da8215e08..31cd361416ac 100644
+--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
++++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
+@@ -351,7 +351,8 @@ static uint32_t opa_vnic_get_dlid(struct opa_vnic_adapter *adapter,
+ 			if (unlikely(!dlid))
+ 				v_warn("Null dlid in MAC address\n");
+ 		} else if (def_port != OPA_VNIC_INVALID_PORT) {
+-			dlid = info->vesw.u_ucast_dlid[def_port];
++			if (def_port < OPA_VESW_MAX_NUM_DEF_PORT)
++				dlid = info->vesw.u_ucast_dlid[def_port];
+ 		}
+ 	}
+ 
+diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
+index e5c3b066bd2a..06f0eb04a8fd 100644
+--- a/drivers/input/touchscreen/silead.c
++++ b/drivers/input/touchscreen/silead.c
+@@ -558,20 +558,33 @@ static int __maybe_unused silead_ts_suspend(struct device *dev)
+ static int __maybe_unused silead_ts_resume(struct device *dev)
+ {
+ 	struct i2c_client *client = to_i2c_client(dev);
++	bool second_try = false;
+ 	int error, status;
+ 
+ 	silead_ts_set_power(client, SILEAD_POWER_ON);
+ 
++ retry:
+ 	error = silead_ts_reset(client);
+ 	if (error)
+ 		return error;
+ 
++	if (second_try) {
++		error = silead_ts_load_fw(client);
++		if (error)
++			return error;
++	}
++
+ 	error = silead_ts_startup(client);
+ 	if (error)
+ 		return error;
+ 
+ 	status = silead_ts_get_status(client);
+ 	if (status != SILEAD_STATUS_OK) {
++		if (!second_try) {
++			second_try = true;
++			dev_dbg(dev, "Reloading firmware after unsuccessful resume\n");
++			goto retry;
++		}
+ 		dev_err(dev, "Resume error, status: 0x%02x\n", status);
+ 		return -ENODEV;
+ 	}
+diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
+index d5dfa4053bbf..b71673911aac 100644
+--- a/drivers/input/touchscreen/st1232.c
++++ b/drivers/input/touchscreen/st1232.c
+@@ -195,6 +195,7 @@ static int st1232_ts_probe(struct i2c_client *client,
+ 	input_dev->id.bustype = BUS_I2C;
+ 	input_dev->dev.parent = &client->dev;
+ 
++	__set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+ 	__set_bit(EV_SYN, input_dev->evbit);
+ 	__set_bit(EV_KEY, input_dev->evbit);
+ 	__set_bit(EV_ABS, input_dev->evbit);
+diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
+index 40fbf20d69e5..2ab7100bcff1 100644
+--- a/drivers/iommu/arm-smmu-v3.c
++++ b/drivers/iommu/arm-smmu-v3.c
+@@ -567,7 +567,7 @@ struct arm_smmu_device {
+ 
+ 	int				gerr_irq;
+ 	int				combined_irq;
+-	atomic_t			sync_nr;
++	u32				sync_nr;
+ 
+ 	unsigned long			ias; /* IPA */
+ 	unsigned long			oas; /* PA */
+@@ -964,14 +964,13 @@ static int __arm_smmu_cmdq_issue_sync_msi(struct arm_smmu_device *smmu)
+ 	struct arm_smmu_cmdq_ent ent = {
+ 		.opcode = CMDQ_OP_CMD_SYNC,
+ 		.sync	= {
+-			.msidata = atomic_inc_return_relaxed(&smmu->sync_nr),
+ 			.msiaddr = virt_to_phys(&smmu->sync_count),
+ 		},
+ 	};
+ 
+-	arm_smmu_cmdq_build_cmd(cmd, &ent);
+-
+ 	spin_lock_irqsave(&smmu->cmdq.lock, flags);
++	ent.sync.msidata = ++smmu->sync_nr;
++	arm_smmu_cmdq_build_cmd(cmd, &ent);
+ 	arm_smmu_cmdq_insert_cmd(smmu, cmd);
+ 	spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
+ 
+@@ -2196,7 +2195,6 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
+ {
+ 	int ret;
+ 
+-	atomic_set(&smmu->sync_nr, 0);
+ 	ret = arm_smmu_init_queues(smmu);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
+index 88641b4560bc..2f79efd16a05 100644
+--- a/drivers/iommu/io-pgtable-arm.c
++++ b/drivers/iommu/io-pgtable-arm.c
+@@ -574,13 +574,12 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
+ 			return 0;
+ 
+ 		tablep = iopte_deref(pte, data);
++	} else if (unmap_idx >= 0) {
++		io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
++		return size;
+ 	}
+ 
+-	if (unmap_idx < 0)
+-		return __arm_lpae_unmap(data, iova, size, lvl, tablep);
+-
+-	io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
+-	return size;
++	return __arm_lpae_unmap(data, iova, size, lvl, tablep);
+ }
+ 
+ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
+diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
+index 13063339b416..a2a3acd74491 100644
+--- a/drivers/irqchip/irq-mvebu-icu.c
++++ b/drivers/irqchip/irq-mvebu-icu.c
+@@ -105,7 +105,7 @@ static int
+ mvebu_icu_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
+ 			       unsigned long *hwirq, unsigned int *type)
+ {
+-	struct mvebu_icu *icu = d->host_data;
++	struct mvebu_icu *icu = platform_msi_get_host_data(d);
+ 	unsigned int icu_group;
+ 
+ 	/* Check the count of the parameters in dt */
+diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
+index 80710c62ac29..8dce31dbf2cb 100644
+--- a/drivers/lightnvm/pblk-core.c
++++ b/drivers/lightnvm/pblk-core.c
+@@ -893,10 +893,8 @@ static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
+ 
+ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
+ {
+-	struct nvm_rq rqd;
+-	int ret = 0;
+-
+-	memset(&rqd, 0, sizeof(struct nvm_rq));
++	struct nvm_rq rqd = {NULL};
++	int ret;
+ 
+ 	pblk_setup_e_rq(pblk, &rqd, ppa);
+ 
+@@ -904,19 +902,6 @@ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
+ 	 * with writes. Thus, there is no need to take the LUN semaphore.
+ 	 */
+ 	ret = pblk_submit_io_sync(pblk, &rqd);
+-	if (ret) {
+-		struct nvm_tgt_dev *dev = pblk->dev;
+-		struct nvm_geo *geo = &dev->geo;
+-
+-		pblk_err(pblk, "could not sync erase line:%d,blk:%d\n",
+-					pblk_ppa_to_line(ppa),
+-					pblk_ppa_to_pos(geo, ppa));
+-
+-		rqd.error = ret;
+-		goto out;
+-	}
+-
+-out:
+ 	rqd.private = pblk;
+ 	__pblk_end_io_erase(pblk, &rqd);
+ 
+@@ -1788,6 +1773,17 @@ void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
+ 	wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
+ 	wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
+ 
++	if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) {
++		emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
++		memcpy(emeta_buf->header.uuid, pblk->instance_uuid, 16);
++		emeta_buf->header.id = cpu_to_le32(line->id);
++		emeta_buf->header.type = cpu_to_le16(line->type);
++		emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
++		emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
++		emeta_buf->header.crc = cpu_to_le32(
++			pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
++	}
++
+ 	emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
+ 	emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
+ 
+@@ -1805,8 +1801,6 @@ void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
+ 	spin_unlock(&l_mg->close_lock);
+ 
+ 	pblk_line_should_sync_meta(pblk);
+-
+-
+ }
+ 
+ static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
+diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
+index 537e98f2b24a..88b632787abd 100644
+--- a/drivers/lightnvm/pblk-init.c
++++ b/drivers/lightnvm/pblk-init.c
+@@ -181,7 +181,8 @@ static int pblk_rwb_init(struct pblk *pblk)
+ 	unsigned int power_size, power_seg_sz;
+ 	int pgs_in_buffer;
+ 
+-	pgs_in_buffer = max(geo->mw_cunits, geo->ws_opt) * geo->all_luns;
++	pgs_in_buffer = (max(geo->mw_cunits, geo->ws_opt) + geo->ws_opt)
++								* geo->all_luns;
+ 
+ 	if (write_buffer_size && (write_buffer_size > pgs_in_buffer))
+ 		buffer_size = write_buffer_size;
+@@ -371,9 +372,11 @@ static int pblk_core_init(struct pblk *pblk)
+ 	atomic64_set(&pblk->nr_flush, 0);
+ 	pblk->nr_flush_rst = 0;
+ 
+-	pblk->min_write_pgs = geo->ws_opt * (geo->csecs / PAGE_SIZE);
++	pblk->min_write_pgs = geo->ws_opt;
+ 	max_write_ppas = pblk->min_write_pgs * geo->all_luns;
+ 	pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
++	pblk->max_write_pgs = min_t(int, pblk->max_write_pgs,
++		queue_max_hw_sectors(dev->q) / (geo->csecs >> SECTOR_SHIFT));
+ 	pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
+ 
+ 	if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
+@@ -1083,7 +1086,8 @@ static int pblk_lines_init(struct pblk *pblk)
+ 
+ 	if (!nr_free_chks) {
+ 		pblk_err(pblk, "too many bad blocks prevent for sane instance\n");
+-		return -EINTR;
++		ret = -EINTR;
++		goto fail_free_lines;
+ 	}
+ 
+ 	pblk_set_provision(pblk, nr_free_chks);
+diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c
+index 8d2ed510c04b..bdc86ee4c779 100644
+--- a/drivers/lightnvm/pblk-sysfs.c
++++ b/drivers/lightnvm/pblk-sysfs.c
+@@ -343,7 +343,6 @@ static ssize_t pblk_get_write_amp(u64 user, u64 gc, u64 pad,
+ {
+ 	int sz;
+ 
+-
+ 	sz = snprintf(page, PAGE_SIZE,
+ 			"user:%lld gc:%lld pad:%lld WA:",
+ 			user, gc, pad);
+@@ -355,7 +354,7 @@ static ssize_t pblk_get_write_amp(u64 user, u64 gc, u64 pad,
+ 		u32 wa_frac;
+ 
+ 		wa_int = (user + gc + pad) * 100000;
+-		wa_int = div_u64(wa_int, user);
++		wa_int = div64_u64(wa_int, user);
+ 		wa_int = div_u64_rem(wa_int, 100000, &wa_frac);
+ 
+ 		sz += snprintf(page + sz, PAGE_SIZE - sz, "%llu.%05u\n",
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 2321643974da..4998b4cae9c1 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -418,6 +418,7 @@ static int __uuid_write(struct cache_set *c)
+ {
+ 	BKEY_PADDED(key) k;
+ 	struct closure cl;
++	struct cache *ca;
+ 
+ 	closure_init_stack(&cl);
+ 	lockdep_assert_held(&bch_register_lock);
+@@ -429,6 +430,10 @@ static int __uuid_write(struct cache_set *c)
+ 	uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
+ 	closure_sync(&cl);
+ 
++	/* Only one bucket used for uuid write */
++	ca = PTR_CACHE(c, &k.key, 0);
++	atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written);
++
+ 	bkey_copy(&c->uuid_bucket, &k.key);
+ 	bkey_put(c, &k.key);
+ 	return 0;
+@@ -1004,6 +1009,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
+ 	bch_write_bdev_super(dc, &cl);
+ 	closure_sync(&cl);
+ 
++	calc_cached_dev_sectors(dc->disk.c);
+ 	bcache_device_detach(&dc->disk);
+ 	list_move(&dc->list, &uncached_devices);
+ 
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index a8fbaa384e9a..2fe4f93d1d7d 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -8778,6 +8778,18 @@ static void md_start_sync(struct work_struct *ws)
+  */
+ void md_check_recovery(struct mddev *mddev)
+ {
++	if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
++		/* Write superblock - thread that called mddev_suspend()
++		 * holds reconfig_mutex for us.
++		 */
++		set_bit(MD_UPDATING_SB, &mddev->flags);
++		smp_mb__after_atomic();
++		if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
++			md_update_sb(mddev, 0);
++		clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
++		wake_up(&mddev->sb_wait);
++	}
++
+ 	if (mddev->suspended)
+ 		return;
+ 
+@@ -8938,16 +8950,6 @@ void md_check_recovery(struct mddev *mddev)
+ 	unlock:
+ 		wake_up(&mddev->sb_wait);
+ 		mddev_unlock(mddev);
+-	} else if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
+-		/* Write superblock - thread that called mddev_suspend()
+-		 * holds reconfig_mutex for us.
+-		 */
+-		set_bit(MD_UPDATING_SB, &mddev->flags);
+-		smp_mb__after_atomic();
+-		if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
+-			md_update_sb(mddev, 0);
+-		clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
+-		wake_up(&mddev->sb_wait);
+ 	}
+ }
+ EXPORT_SYMBOL(md_check_recovery);
+diff --git a/drivers/media/cec/cec-pin.c b/drivers/media/cec/cec-pin.c
+index 0496d93b2b8f..8f987bc0dd88 100644
+--- a/drivers/media/cec/cec-pin.c
++++ b/drivers/media/cec/cec-pin.c
+@@ -936,6 +936,17 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
+ 			/* Start bit, switch to receive state */
+ 			pin->ts = ts;
+ 			pin->state = CEC_ST_RX_START_BIT_LOW;
++			/*
++			 * If a transmit is pending, then that transmit should
++			 * use a signal free time of no more than
++			 * CEC_SIGNAL_FREE_TIME_NEW_INITIATOR since it will
++			 * have a new initiator due to the receive that is now
++			 * starting.
++			 */
++			if (pin->tx_msg.len && pin->tx_signal_free_time >
++			    CEC_SIGNAL_FREE_TIME_NEW_INITIATOR)
++				pin->tx_signal_free_time =
++					CEC_SIGNAL_FREE_TIME_NEW_INITIATOR;
+ 			break;
+ 		}
+ 		if (ktime_to_ns(pin->ts) == 0)
+@@ -1158,6 +1169,15 @@ static int cec_pin_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ {
+ 	struct cec_pin *pin = adap->pin;
+ 
++	/*
++	 * If a receive is in progress, then this transmit should use
++	 * a signal free time of max CEC_SIGNAL_FREE_TIME_NEW_INITIATOR
++	 * since when it starts transmitting it will have a new initiator.
++	 */
++	if (pin->state != CEC_ST_IDLE &&
++	    signal_free_time > CEC_SIGNAL_FREE_TIME_NEW_INITIATOR)
++		signal_free_time = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR;
++
+ 	pin->tx_signal_free_time = signal_free_time;
+ 	pin->tx_extra_bytes = 0;
+ 	pin->tx_msg = *msg;
+diff --git a/drivers/media/i2c/adv748x/adv748x-core.c b/drivers/media/i2c/adv748x/adv748x-core.c
+index 6ca88daa0ecd..65c3024c5f76 100644
+--- a/drivers/media/i2c/adv748x/adv748x-core.c
++++ b/drivers/media/i2c/adv748x/adv748x-core.c
+@@ -569,7 +569,8 @@ static int adv748x_parse_dt(struct adv748x_state *state)
+ {
+ 	struct device_node *ep_np = NULL;
+ 	struct of_endpoint ep;
+-	bool found = false;
++	bool out_found = false;
++	bool in_found = false;
+ 
+ 	for_each_endpoint_of_node(state->dev->of_node, ep_np) {
+ 		of_graph_parse_endpoint(ep_np, &ep);
+@@ -592,10 +593,17 @@ static int adv748x_parse_dt(struct adv748x_state *state)
+ 		of_node_get(ep_np);
+ 		state->endpoints[ep.port] = ep_np;
+ 
+-		found = true;
++		/*
++		 * At least one input endpoint and one output endpoint shall
++		 * be defined.
++		 */
++		if (ep.port < ADV748X_PORT_TXA)
++			in_found = true;
++		else
++			out_found = true;
+ 	}
+ 
+-	return found ? 0 : -ENODEV;
++	return in_found && out_found ? 0 : -ENODEV;
+ }
+ 
+ static void adv748x_dt_cleanup(struct adv748x_state *state)
+@@ -627,6 +635,17 @@ static int adv748x_probe(struct i2c_client *client,
+ 	state->i2c_clients[ADV748X_PAGE_IO] = client;
+ 	i2c_set_clientdata(client, state);
+ 
++	/*
++	 * We can not use container_of to get back to the state with two TXs;
++	 * Initialize the TXs's fields unconditionally on the endpoint
++	 * presence to access them later.
++	 */
++	state->txa.state = state->txb.state = state;
++	state->txa.page = ADV748X_PAGE_TXA;
++	state->txb.page = ADV748X_PAGE_TXB;
++	state->txa.port = ADV748X_PORT_TXA;
++	state->txb.port = ADV748X_PORT_TXB;
++
+ 	/* Discover and process ports declared by the Device tree endpoints */
+ 	ret = adv748x_parse_dt(state);
+ 	if (ret) {
+diff --git a/drivers/media/i2c/adv748x/adv748x-csi2.c b/drivers/media/i2c/adv748x/adv748x-csi2.c
+index 469be87a3761..556e13c911a6 100644
+--- a/drivers/media/i2c/adv748x/adv748x-csi2.c
++++ b/drivers/media/i2c/adv748x/adv748x-csi2.c
+@@ -266,19 +266,10 @@ static int adv748x_csi2_init_controls(struct adv748x_csi2 *tx)
+ 
+ int adv748x_csi2_init(struct adv748x_state *state, struct adv748x_csi2 *tx)
+ {
+-	struct device_node *ep;
+ 	int ret;
+ 
+-	/* We can not use container_of to get back to the state with two TXs */
+-	tx->state = state;
+-	tx->page = is_txa(tx) ? ADV748X_PAGE_TXA : ADV748X_PAGE_TXB;
+-
+-	ep = state->endpoints[is_txa(tx) ? ADV748X_PORT_TXA : ADV748X_PORT_TXB];
+-	if (!ep) {
+-		adv_err(state, "No endpoint found for %s\n",
+-				is_txa(tx) ? "txa" : "txb");
+-		return -ENODEV;
+-	}
++	if (!is_tx_enabled(tx))
++		return 0;
+ 
+ 	/* Initialise the virtual channel */
+ 	adv748x_csi2_set_virtual_channel(tx, 0);
+@@ -288,7 +279,7 @@ int adv748x_csi2_init(struct adv748x_state *state, struct adv748x_csi2 *tx)
+ 			    is_txa(tx) ? "txa" : "txb");
+ 
+ 	/* Ensure that matching is based upon the endpoint fwnodes */
+-	tx->sd.fwnode = of_fwnode_handle(ep);
++	tx->sd.fwnode = of_fwnode_handle(state->endpoints[tx->port]);
+ 
+ 	/* Register internal ops for incremental subdev registration */
+ 	tx->sd.internal_ops = &adv748x_csi2_internal_ops;
+@@ -321,6 +312,9 @@ err_free_media:
+ 
+ void adv748x_csi2_cleanup(struct adv748x_csi2 *tx)
+ {
++	if (!is_tx_enabled(tx))
++		return;
++
+ 	v4l2_async_unregister_subdev(&tx->sd);
+ 	media_entity_cleanup(&tx->sd.entity);
+ 	v4l2_ctrl_handler_free(&tx->ctrl_hdl);
+diff --git a/drivers/media/i2c/adv748x/adv748x.h b/drivers/media/i2c/adv748x/adv748x.h
+index 65f83741277e..1cf46c401664 100644
+--- a/drivers/media/i2c/adv748x/adv748x.h
++++ b/drivers/media/i2c/adv748x/adv748x.h
+@@ -82,6 +82,7 @@ struct adv748x_csi2 {
+ 	struct adv748x_state *state;
+ 	struct v4l2_mbus_framefmt format;
+ 	unsigned int page;
++	unsigned int port;
+ 
+ 	struct media_pad pads[ADV748X_CSI2_NR_PADS];
+ 	struct v4l2_ctrl_handler ctrl_hdl;
+@@ -91,6 +92,7 @@ struct adv748x_csi2 {
+ 
+ #define notifier_to_csi2(n) container_of(n, struct adv748x_csi2, notifier)
+ #define adv748x_sd_to_csi2(sd) container_of(sd, struct adv748x_csi2, sd)
++#define is_tx_enabled(_tx) ((_tx)->state->endpoints[(_tx)->port] != NULL)
+ 
+ enum adv748x_hdmi_pads {
+ 	ADV748X_HDMI_SINK,
+diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
+index 91fae01d052b..3dc2100470a1 100644
+--- a/drivers/media/i2c/dw9714.c
++++ b/drivers/media/i2c/dw9714.c
+@@ -169,7 +169,8 @@ static int dw9714_probe(struct i2c_client *client)
+ 	return 0;
+ 
+ err_cleanup:
+-	dw9714_subdev_cleanup(dw9714_dev);
++	v4l2_ctrl_handler_free(&dw9714_dev->ctrls_vcm);
++	media_entity_cleanup(&dw9714_dev->sd.entity);
+ 	dev_err(&client->dev, "Probe failed: %d\n", rval);
+ 	return rval;
+ }
+diff --git a/drivers/media/i2c/dw9807-vcm.c b/drivers/media/i2c/dw9807-vcm.c
+index 8ba3920b6e2f..5477ba326d68 100644
+--- a/drivers/media/i2c/dw9807-vcm.c
++++ b/drivers/media/i2c/dw9807-vcm.c
+@@ -218,7 +218,8 @@ static int dw9807_probe(struct i2c_client *client)
+ 	return 0;
+ 
+ err_cleanup:
+-	dw9807_subdev_cleanup(dw9807_dev);
++	v4l2_ctrl_handler_free(&dw9807_dev->ctrls_vcm);
++	media_entity_cleanup(&dw9807_dev->sd.entity);
+ 
+ 	return rval;
+ }
+diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
+index a3bbef682fb8..2023df14f828 100644
+--- a/drivers/media/i2c/ov5640.c
++++ b/drivers/media/i2c/ov5640.c
+@@ -2572,8 +2572,6 @@ static int ov5640_s_frame_interval(struct v4l2_subdev *sd,
+ 	if (frame_rate < 0)
+ 		frame_rate = OV5640_15_FPS;
+ 
+-	sensor->current_fr = frame_rate;
+-	sensor->frame_interval = fi->interval;
+ 	mode = ov5640_find_mode(sensor, frame_rate, mode->hact,
+ 				mode->vact, true);
+ 	if (!mode) {
+@@ -2581,7 +2579,10 @@ static int ov5640_s_frame_interval(struct v4l2_subdev *sd,
+ 		goto out;
+ 	}
+ 
+-	if (mode != sensor->current_mode) {
++	if (mode != sensor->current_mode ||
++	    frame_rate != sensor->current_fr) {
++		sensor->current_fr = frame_rate;
++		sensor->frame_interval = fi->interval;
+ 		sensor->current_mode = mode;
+ 		sensor->pending_mode_change = true;
+ 	}
+diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
+index 0c389a3fb4e5..e64f9093cd6d 100644
+--- a/drivers/media/pci/cx18/cx18-driver.c
++++ b/drivers/media/pci/cx18/cx18-driver.c
+@@ -1252,7 +1252,7 @@ static void cx18_cancel_out_work_orders(struct cx18 *cx)
+ {
+ 	int i;
+ 	for (i = 0; i < CX18_MAX_STREAMS; i++)
+-		if (&cx->streams[i].video_dev)
++		if (cx->streams[i].video_dev.v4l2_dev)
+ 			cancel_work_sync(&cx->streams[i].out_work_order);
+ }
+ 
+diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c
+index f924e76e2fbf..340f8218f54d 100644
+--- a/drivers/media/platform/davinci/isif.c
++++ b/drivers/media/platform/davinci/isif.c
+@@ -1100,7 +1100,8 @@ fail_nobase_res:
+ 
+ 	while (i >= 0) {
+ 		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+-		release_mem_region(res->start, resource_size(res));
++		if (res)
++			release_mem_region(res->start, resource_size(res));
+ 		i--;
+ 	}
+ 	vpfe_unregister_ccdc_device(&isif_hw_dev);
+diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
+index b6e9e93bde7a..406ac673ad84 100644
+--- a/drivers/media/platform/pxa_camera.c
++++ b/drivers/media/platform/pxa_camera.c
+@@ -2397,7 +2397,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
+ 	pcdev->res = res;
+ 
+ 	pcdev->pdata = pdev->dev.platform_data;
+-	if (&pdev->dev.of_node && !pcdev->pdata) {
++	if (pdev->dev.of_node && !pcdev->pdata) {
+ 		err = pxa_camera_pdata_from_dt(&pdev->dev, pcdev, &pcdev->asd);
+ 	} else {
+ 		pcdev->platform_flags = pcdev->pdata->flags;
+diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
+index dfbbbf0f746f..e40fdf97b0f0 100644
+--- a/drivers/media/platform/qcom/venus/vdec.c
++++ b/drivers/media/platform/qcom/venus/vdec.c
+@@ -888,8 +888,7 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
+ 		unsigned int opb_sz = venus_helper_get_opb_size(inst);
+ 
+ 		vb = &vbuf->vb2_buf;
+-		vb->planes[0].bytesused =
+-			max_t(unsigned int, opb_sz, bytesused);
++		vb2_set_plane_payload(vb, 0, bytesused ? : opb_sz);
+ 		vb->planes[0].data_offset = data_offset;
+ 		vb->timestamp = timestamp_us * NSEC_PER_USEC;
+ 		vbuf->sequence = inst->sequence_cap++;
+diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
+index e1085e3ab3cc..485fa3fa8b49 100644
+--- a/drivers/media/platform/rcar-vin/rcar-core.c
++++ b/drivers/media/platform/rcar-vin/rcar-core.c
+@@ -174,7 +174,6 @@ static int rvin_group_link_notify(struct media_link *link, u32 flags,
+ 
+ 	if (csi_id == -ENODEV) {
+ 		struct v4l2_subdev *sd;
+-		unsigned int i;
+ 
+ 		/*
+ 		 * Make sure the source entity subdevice is registered as
+diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
+index 68487ce9f79b..d96aed1343e4 100644
+--- a/drivers/media/rc/ir-rc6-decoder.c
++++ b/drivers/media/rc/ir-rc6-decoder.c
+@@ -40,6 +40,7 @@
+ #define RC6_6A_MCE_TOGGLE_MASK	0x8000	/* for the body bits */
+ #define RC6_6A_LCC_MASK		0xffff0000 /* RC6-6A-32 long customer code mask */
+ #define RC6_6A_MCE_CC		0x800f0000 /* MCE customer code */
++#define RC6_6A_KATHREIN_CC	0x80460000 /* Kathrein RCU-676 customer code */
+ #ifndef CHAR_BIT
+ #define CHAR_BIT 8	/* Normally in <limits.h> */
+ #endif
+@@ -242,13 +243,17 @@ again:
+ 				toggle = 0;
+ 				break;
+ 			case 32:
+-				if ((scancode & RC6_6A_LCC_MASK) == RC6_6A_MCE_CC) {
++				switch (scancode & RC6_6A_LCC_MASK) {
++				case RC6_6A_MCE_CC:
++				case RC6_6A_KATHREIN_CC:
+ 					protocol = RC_PROTO_RC6_MCE;
+ 					toggle = !!(scancode & RC6_6A_MCE_TOGGLE_MASK);
+ 					scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
+-				} else {
++					break;
++				default:
+ 					protocol = RC_PROTO_RC6_6A_32;
+ 					toggle = 0;
++					break;
+ 				}
+ 				break;
+ 			default:
+diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
+index f7fcd733a2ca..963739fa8671 100644
+--- a/drivers/media/usb/cx231xx/cx231xx-video.c
++++ b/drivers/media/usb/cx231xx/cx231xx-video.c
+@@ -1389,7 +1389,7 @@ int cx231xx_g_register(struct file *file, void *priv,
+ 		ret = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER,
+ 				(u16)reg->reg, value, 4);
+ 		reg->val = value[0] | value[1] << 8 |
+-			value[2] << 16 | value[3] << 24;
++			value[2] << 16 | (u32)value[3] << 24;
+ 		reg->size = 4;
+ 		break;
+ 	case 1:	/* AFE - read byte */
+diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
+index fe8d335a4d74..4660ad90ef55 100644
+--- a/drivers/mfd/ti_am335x_tscadc.c
++++ b/drivers/mfd/ti_am335x_tscadc.c
+@@ -295,11 +295,24 @@ static int ti_tscadc_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
++static int __maybe_unused ti_tscadc_can_wakeup(struct device *dev, void *data)
++{
++	return device_may_wakeup(dev);
++}
++
+ static int __maybe_unused tscadc_suspend(struct device *dev)
+ {
+ 	struct ti_tscadc_dev	*tscadc = dev_get_drvdata(dev);
+ 
+ 	regmap_write(tscadc->regmap, REG_SE, 0x00);
++	if (device_for_each_child(dev, NULL, ti_tscadc_can_wakeup)) {
++		u32 ctrl;
++
++		regmap_read(tscadc->regmap, REG_CTRL, &ctrl);
++		ctrl &= ~(CNTRLREG_POWERDOWN);
++		ctrl |= CNTRLREG_TSCSSENB;
++		regmap_write(tscadc->regmap, REG_CTRL, ctrl);
++	}
+ 	pm_runtime_put_sync(dev);
+ 
+ 	return 0;
+diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
+index b83a373e3a8d..08f4a512afad 100644
+--- a/drivers/misc/cxl/guest.c
++++ b/drivers/misc/cxl/guest.c
+@@ -1020,8 +1020,6 @@ err1:
+ 
+ void cxl_guest_remove_afu(struct cxl_afu *afu)
+ {
+-	pr_devel("in %s - AFU(%d)\n", __func__, afu->slice);
+-
+ 	if (!afu)
+ 		return;
+ 
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
+index eb1a65cb878f..fa6268c0f123 100644
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -895,14 +895,18 @@ static void
+ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
+ 	      unsigned int status)
+ {
++	unsigned int status_err;
++
+ 	/* Make sure we have data to handle */
+ 	if (!data)
+ 		return;
+ 
+ 	/* First check for errors */
+-	if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT |
+-		      host->variant->start_err |
+-		      MCI_TXUNDERRUN | MCI_RXOVERRUN)) {
++	status_err = status & (host->variant->start_err |
++			       MCI_DATACRCFAIL | MCI_DATATIMEOUT |
++			       MCI_TXUNDERRUN | MCI_RXOVERRUN);
++
++	if (status_err) {
+ 		u32 remain, success;
+ 
+ 		/* Terminate the DMA transfer */
+@@ -922,18 +926,18 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
+ 		success = data->blksz * data->blocks - remain;
+ 
+ 		dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
+-			status, success);
+-		if (status & MCI_DATACRCFAIL) {
++			status_err, success);
++		if (status_err & MCI_DATACRCFAIL) {
+ 			/* Last block was not successful */
+ 			success -= 1;
+ 			data->error = -EILSEQ;
+-		} else if (status & MCI_DATATIMEOUT) {
++		} else if (status_err & MCI_DATATIMEOUT) {
+ 			data->error = -ETIMEDOUT;
+-		} else if (status & MCI_STARTBITERR) {
++		} else if (status_err & MCI_STARTBITERR) {
+ 			data->error = -ECOMM;
+-		} else if (status & MCI_TXUNDERRUN) {
++		} else if (status_err & MCI_TXUNDERRUN) {
+ 			data->error = -EIO;
+-		} else if (status & MCI_RXOVERRUN) {
++		} else if (status_err & MCI_RXOVERRUN) {
+ 			if (success > host->variant->fifosize)
+ 				success -= host->variant->fifosize;
+ 			else
+@@ -1790,7 +1794,7 @@ static int mmci_probe(struct amba_device *dev,
+ 			goto clk_disable;
+ 	}
+ 
+-	writel(MCI_IRQENABLE, host->base + MMCIMASK0);
++	writel(MCI_IRQENABLE | variant->start_err, host->base + MMCIMASK0);
+ 
+ 	amba_set_drvdata(dev, mmc);
+ 
+@@ -1877,7 +1881,8 @@ static void mmci_restore(struct mmci_host *host)
+ 		writel(host->datactrl_reg, host->base + MMCIDATACTRL);
+ 		writel(host->pwr_reg, host->base + MMCIPOWER);
+ 	}
+-	writel(MCI_IRQENABLE, host->base + MMCIMASK0);
++	writel(MCI_IRQENABLE | host->variant->start_err,
++	       host->base + MMCIMASK0);
+ 	mmci_reg_delay(host);
+ 
+ 	spin_unlock_irqrestore(&host->lock, flags);
+diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
+index 517591d219e9..613d37ab08d2 100644
+--- a/drivers/mmc/host/mmci.h
++++ b/drivers/mmc/host/mmci.h
+@@ -181,9 +181,9 @@
+ #define MMCIFIFO		0x080 /* to 0x0bc */
+ 
+ #define MCI_IRQENABLE	\
+-	(MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK|	\
+-	MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK|	\
+-	MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_STARTBITERRMASK)
++	(MCI_CMDCRCFAILMASK | MCI_DATACRCFAILMASK | MCI_CMDTIMEOUTMASK | \
++	MCI_DATATIMEOUTMASK | MCI_TXUNDERRUNMASK | MCI_RXOVERRUNMASK |	\
++	MCI_CMDRESPENDMASK | MCI_CMDSENTMASK)
+ 
+ /* These interrupts are directed to IRQ1 when two IRQ lines are available */
+ #define MCI_IRQ1MASK \
+diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+index ca0b43973769..382172fb3da8 100644
+--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
++++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+@@ -298,6 +298,7 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = {
+ 	{ .soc_id = "r8a7796", .revision = "ES1.0",
+ 	  .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
+ 	/* generic ones */
++	{ .soc_id = "r8a774a1" },
+ 	{ .soc_id = "r8a7795" },
+ 	{ .soc_id = "r8a7796" },
+ 	{ .soc_id = "r8a77965" },
+@@ -309,12 +310,20 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = {
+ static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev)
+ {
+ 	const struct soc_device_attribute *soc = soc_device_match(gen3_soc_whitelist);
++	struct device *dev = &pdev->dev;
+ 
+ 	if (!soc)
+ 		return -ENODEV;
+ 
+ 	global_flags |= (unsigned long)soc->data;
+ 
++	dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
++	if (!dev->dma_parms)
++		return -ENOMEM;
++
++	/* value is max of SD_SECCNT. Confirmed by HW engineers */
++	dma_set_max_seg_size(dev, 0xffffffff);
++
+ 	return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops);
+ }
+ 
+diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
+index 7d13ca9ea534..35630ccbe9e5 100644
+--- a/drivers/mmc/host/tmio_mmc_core.c
++++ b/drivers/mmc/host/tmio_mmc_core.c
+@@ -926,8 +926,9 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
+ 	if (mrq->cmd->error || (mrq->data && mrq->data->error))
+ 		tmio_mmc_abort_dma(host);
+ 
+-	if (host->check_scc_error)
+-		host->check_scc_error(host);
++	/* SCC error means retune, but executed command was still successful */
++	if (host->check_scc_error && host->check_scc_error(host))
++		mmc_retune_needed(host->mmc);
+ 
+ 	/* If SET_BLOCK_COUNT, continue with main command */
+ 	if (host->mrq && !mrq->cmd->error) {
+diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
+index 270d3c9580c5..c4a1d04b8c80 100644
+--- a/drivers/mtd/devices/m25p80.c
++++ b/drivers/mtd/devices/m25p80.c
+@@ -90,7 +90,6 @@ static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
+ 				   SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
+ 				   SPI_MEM_OP_NO_DUMMY,
+ 				   SPI_MEM_OP_DATA_OUT(len, buf, 1));
+-	size_t remaining = len;
+ 	int ret;
+ 
+ 	/* get transfer protocols. */
+@@ -101,22 +100,16 @@ static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
+ 	if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
+ 		op.addr.nbytes = 0;
+ 
+-	while (remaining) {
+-		op.data.nbytes = remaining < UINT_MAX ? remaining : UINT_MAX;
+-		ret = spi_mem_adjust_op_size(flash->spimem, &op);
+-		if (ret)
+-			return ret;
+-
+-		ret = spi_mem_exec_op(flash->spimem, &op);
+-		if (ret)
+-			return ret;
++	ret = spi_mem_adjust_op_size(flash->spimem, &op);
++	if (ret)
++		return ret;
++	op.data.nbytes = len < op.data.nbytes ? len : op.data.nbytes;
+ 
+-		op.addr.val += op.data.nbytes;
+-		remaining -= op.data.nbytes;
+-		op.data.buf.out += op.data.nbytes;
+-	}
++	ret = spi_mem_exec_op(flash->spimem, &op);
++	if (ret)
++		return ret;
+ 
+-	return len;
++	return op.data.nbytes;
+ }
+ 
+ /*
+diff --git a/drivers/mtd/maps/physmap_of_core.c b/drivers/mtd/maps/physmap_of_core.c
+index 4129535b8e46..ece605d78c21 100644
+--- a/drivers/mtd/maps/physmap_of_core.c
++++ b/drivers/mtd/maps/physmap_of_core.c
+@@ -31,7 +31,6 @@
+ struct of_flash_list {
+ 	struct mtd_info *mtd;
+ 	struct map_info map;
+-	struct resource *res;
+ };
+ 
+ struct of_flash {
+@@ -56,18 +55,10 @@ static int of_flash_remove(struct platform_device *dev)
+ 			mtd_concat_destroy(info->cmtd);
+ 	}
+ 
+-	for (i = 0; i < info->list_size; i++) {
++	for (i = 0; i < info->list_size; i++)
+ 		if (info->list[i].mtd)
+ 			map_destroy(info->list[i].mtd);
+ 
+-		if (info->list[i].map.virt)
+-			iounmap(info->list[i].map.virt);
+-
+-		if (info->list[i].res) {
+-			release_resource(info->list[i].res);
+-			kfree(info->list[i].res);
+-		}
+-	}
+ 	return 0;
+ }
+ 
+@@ -215,10 +206,11 @@ static int of_flash_probe(struct platform_device *dev)
+ 
+ 		err = -EBUSY;
+ 		res_size = resource_size(&res);
+-		info->list[i].res = request_mem_region(res.start, res_size,
+-						       dev_name(&dev->dev));
+-		if (!info->list[i].res)
++		info->list[i].map.virt = devm_ioremap_resource(&dev->dev, &res);
++		if (IS_ERR(info->list[i].map.virt)) {
++			err = PTR_ERR(info->list[i].map.virt);
+ 			goto err_out;
++		}
+ 
+ 		err = -ENXIO;
+ 		width = of_get_property(dp, "bank-width", NULL);
+@@ -246,15 +238,6 @@ static int of_flash_probe(struct platform_device *dev)
+ 		if (err)
+ 			goto err_out;
+ 
+-		err = -ENOMEM;
+-		info->list[i].map.virt = ioremap(info->list[i].map.phys,
+-						 info->list[i].map.size);
+-		if (!info->list[i].map.virt) {
+-			dev_err(&dev->dev, "Failed to ioremap() flash"
+-				" region\n");
+-			goto err_out;
+-		}
+-
+ 		simple_map_init(&info->list[i].map);
+ 
+ 		/*
+diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
+index bb8866e05ff7..1e7273263c4b 100644
+--- a/drivers/mtd/nand/raw/sh_flctl.c
++++ b/drivers/mtd/nand/raw/sh_flctl.c
+@@ -480,7 +480,7 @@ static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
+ 
+ 	/* initiate DMA transfer */
+ 	if (flctl->chan_fifo0_rx && rlen >= 32 &&
+-		flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_DEV_TO_MEM) > 0)
++		flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE) > 0)
+ 			goto convert;	/* DMA success */
+ 
+ 	/* do polling transfer */
+@@ -539,7 +539,7 @@ static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
+ 
+ 	/* initiate DMA transfer */
+ 	if (flctl->chan_fifo0_tx && rlen >= 32 &&
+-		flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_MEM_TO_DEV) > 0)
++		flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE) > 0)
+ 			return;	/* DMA success */
+ 
+ 	/* do polling transfer */
+diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
+index 0806c7a81c0f..04cedd3a2bf6 100644
+--- a/drivers/mtd/spi-nor/cadence-quadspi.c
++++ b/drivers/mtd/spi-nor/cadence-quadspi.c
+@@ -972,7 +972,7 @@ static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
+ 		return 0;
+ 	}
+ 
+-	dma_dst = dma_map_single(nor->dev, buf, len, DMA_DEV_TO_MEM);
++	dma_dst = dma_map_single(nor->dev, buf, len, DMA_FROM_DEVICE);
+ 	if (dma_mapping_error(nor->dev, dma_dst)) {
+ 		dev_err(nor->dev, "dma mapping failed\n");
+ 		return -ENOMEM;
+@@ -1007,7 +1007,7 @@ static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
+ 	}
+ 
+ err_unmap:
+-	dma_unmap_single(nor->dev, dma_dst, len, DMA_DEV_TO_MEM);
++	dma_unmap_single(nor->dev, dma_dst, len, DMA_FROM_DEVICE);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+index 790c684f08ab..b178c2e9dc23 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+@@ -78,8 +78,12 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
+ 		memcpy(buf, data_addr, bytesize);
+ 
+ 	dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr);
+-	if (rc)
++	if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
++		netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n");
++		return -EACCES;
++	} else if (rc) {
+ 		return -EIO;
++	}
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+index b34f0f077a31..838692948c0b 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+@@ -273,8 +273,8 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap,
+ 		enum cxgb4_dcb_state_input input =
+ 			((pcmd->u.dcb.control.all_syncd_pkd &
+ 			  FW_PORT_CMD_ALL_SYNCD_F)
+-			 ? CXGB4_DCB_STATE_FW_ALLSYNCED
+-			 : CXGB4_DCB_STATE_FW_INCOMPLETE);
++			 ? CXGB4_DCB_INPUT_FW_ALLSYNCED
++			 : CXGB4_DCB_INPUT_FW_INCOMPLETE);
+ 
+ 		if (dcb->dcb_version != FW_PORT_DCB_VER_UNKNOWN) {
+ 			dcb_running_version = FW_PORT_CMD_DCB_VERSION_G(
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
+index 02040b99c78a..484ee8290090 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
+@@ -67,7 +67,7 @@
+ 	do { \
+ 		if ((__dcb)->dcb_version == FW_PORT_DCB_VER_IEEE) \
+ 			cxgb4_dcb_state_fsm((__dev), \
+-					    CXGB4_DCB_STATE_FW_ALLSYNCED); \
++					    CXGB4_DCB_INPUT_FW_ALLSYNCED); \
+ 	} while (0)
+ 
+ /* States we can be in for a port's Data Center Bridging.
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 0ccfa6a84535..e11a7de20b8f 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -195,8 +195,6 @@ void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
+ 				   struct hns3_nic_priv *priv)
+ {
+-	struct hnae3_handle *h = priv->ae_handle;
+-
+ 	/* initialize the configuration for interrupt coalescing.
+ 	 * 1. GL (Interrupt Gap Limiter)
+ 	 * 2. RL (Interrupt Rate Limiter)
+@@ -209,9 +207,6 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
+ 	tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
+ 	tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
+ 
+-	/* Default: disable RL */
+-	h->kinfo.int_rl_setting = 0;
+-
+ 	tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
+ 	tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
+ 	tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
+@@ -1447,13 +1442,11 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
+ 	}
+ 
+ 	ret = h->ae_algo->ops->set_mtu(h, new_mtu);
+-	if (ret) {
++	if (ret)
+ 		netdev_err(netdev, "failed to change MTU in hardware %d\n",
+ 			   ret);
+-		return ret;
+-	}
+-
+-	netdev->mtu = new_mtu;
++	else
++		netdev->mtu = new_mtu;
+ 
+ 	/* if the netdev was running earlier, bring it up again */
+ 	if (if_running && hns3_nic_net_open(netdev))
+@@ -2131,18 +2124,18 @@ static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
+ 	napi_gro_receive(&ring->tqp_vector->napi, skb);
+ }
+ 
+-static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
+-			       struct hns3_desc *desc, u32 l234info)
++static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
++				struct hns3_desc *desc, u32 l234info,
++				u16 *vlan_tag)
+ {
+ 	struct pci_dev *pdev = ring->tqp->handle->pdev;
+-	u16 vlan_tag;
+ 
+ 	if (pdev->revision == 0x20) {
+-		vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+-		if (!(vlan_tag & VLAN_VID_MASK))
+-			vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
++		*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
++		if (!(*vlan_tag & VLAN_VID_MASK))
++			*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+ 
+-		return vlan_tag;
++		return (*vlan_tag != 0);
+ 	}
+ 
+ #define HNS3_STRP_OUTER_VLAN	0x1
+@@ -2151,17 +2144,14 @@ static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
+ 	switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
+ 				HNS3_RXD_STRP_TAGP_S)) {
+ 	case HNS3_STRP_OUTER_VLAN:
+-		vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+-		break;
++		*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
++		return true;
+ 	case HNS3_STRP_INNER_VLAN:
+-		vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+-		break;
++		*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
++		return true;
+ 	default:
+-		vlan_tag = 0;
+-		break;
++		return false;
+ 	}
+-
+-	return vlan_tag;
+ }
+ 
+ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
+@@ -2263,8 +2253,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
+ 	if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ 		u16 vlan_tag;
+ 
+-		vlan_tag = hns3_parse_vlan_tag(ring, desc, l234info);
+-		if (vlan_tag & VLAN_VID_MASK)
++		if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
+ 			__vlan_hwaccel_put_tag(skb,
+ 					       htons(ETH_P_8021Q),
+ 					       vlan_tag);
+@@ -3425,6 +3414,31 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
+ 	return 0;
+ }
+ 
++static void hns3_store_coal(struct hns3_nic_priv *priv)
++{
++	/* ethtool only support setting and querying one coal
++	 * configuation for now, so save the vector 0' coal
++	 * configuation here in order to restore it.
++	 */
++	memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
++	       sizeof(struct hns3_enet_coalesce));
++	memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
++	       sizeof(struct hns3_enet_coalesce));
++}
++
++static void hns3_restore_coal(struct hns3_nic_priv *priv)
++{
++	u16 vector_num = priv->vector_num;
++	int i;
++
++	for (i = 0; i < vector_num; i++) {
++		memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
++		       sizeof(struct hns3_enet_coalesce));
++		memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
++		       sizeof(struct hns3_enet_coalesce));
++	}
++}
++
+ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
+ {
+ 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+@@ -3471,6 +3485,8 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
+ 	/* Carrier off reporting is important to ethtool even BEFORE open */
+ 	netif_carrier_off(netdev);
+ 
++	hns3_restore_coal(priv);
++
+ 	ret = hns3_nic_init_vector_data(priv);
+ 	if (ret)
+ 		return ret;
+@@ -3498,6 +3514,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
+ 		return ret;
+ 	}
+ 
++	hns3_store_coal(priv);
++
+ 	ret = hns3_uninit_all_ring(priv);
+ 	if (ret)
+ 		netdev_err(netdev, "uninit ring error\n");
+@@ -3532,24 +3550,7 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
+ 	return ret;
+ }
+ 
+-static void hns3_restore_coal(struct hns3_nic_priv *priv,
+-			      struct hns3_enet_coalesce *tx,
+-			      struct hns3_enet_coalesce *rx)
+-{
+-	u16 vector_num = priv->vector_num;
+-	int i;
+-
+-	for (i = 0; i < vector_num; i++) {
+-		memcpy(&priv->tqp_vector[i].tx_group.coal, tx,
+-		       sizeof(struct hns3_enet_coalesce));
+-		memcpy(&priv->tqp_vector[i].rx_group.coal, rx,
+-		       sizeof(struct hns3_enet_coalesce));
+-	}
+-}
+-
+-static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num,
+-			       struct hns3_enet_coalesce *tx,
+-			       struct hns3_enet_coalesce *rx)
++static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
+ {
+ 	struct hns3_nic_priv *priv = netdev_priv(netdev);
+ 	struct hnae3_handle *h = hns3_get_handle(netdev);
+@@ -3567,7 +3568,7 @@ static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num,
+ 	if (ret)
+ 		goto err_alloc_vector;
+ 
+-	hns3_restore_coal(priv, tx, rx);
++	hns3_restore_coal(priv);
+ 
+ 	ret = hns3_nic_init_vector_data(priv);
+ 	if (ret)
+@@ -3599,7 +3600,6 @@ int hns3_set_channels(struct net_device *netdev,
+ 	struct hns3_nic_priv *priv = netdev_priv(netdev);
+ 	struct hnae3_handle *h = hns3_get_handle(netdev);
+ 	struct hnae3_knic_private_info *kinfo = &h->kinfo;
+-	struct hns3_enet_coalesce tx_coal, rx_coal;
+ 	bool if_running = netif_running(netdev);
+ 	u32 new_tqp_num = ch->combined_count;
+ 	u16 org_tqp_num;
+@@ -3631,15 +3631,7 @@ int hns3_set_channels(struct net_device *netdev,
+ 		goto open_netdev;
+ 	}
+ 
+-	/* Changing the tqp num may also change the vector num,
+-	 * ethtool only support setting and querying one coal
+-	 * configuation for now, so save the vector 0' coal
+-	 * configuation here in order to restore it.
+-	 */
+-	memcpy(&tx_coal, &priv->tqp_vector[0].tx_group.coal,
+-	       sizeof(struct hns3_enet_coalesce));
+-	memcpy(&rx_coal, &priv->tqp_vector[0].rx_group.coal,
+-	       sizeof(struct hns3_enet_coalesce));
++	hns3_store_coal(priv);
+ 
+ 	hns3_nic_dealloc_vector_data(priv);
+ 
+@@ -3647,10 +3639,9 @@ int hns3_set_channels(struct net_device *netdev,
+ 	hns3_put_ring_config(priv);
+ 
+ 	org_tqp_num = h->kinfo.num_tqps;
+-	ret = hns3_modify_tqp_num(netdev, new_tqp_num, &tx_coal, &rx_coal);
++	ret = hns3_modify_tqp_num(netdev, new_tqp_num);
+ 	if (ret) {
+-		ret = hns3_modify_tqp_num(netdev, org_tqp_num,
+-					  &tx_coal, &rx_coal);
++		ret = hns3_modify_tqp_num(netdev, org_tqp_num);
+ 		if (ret) {
+ 			/* If revert to old tqp failed, fatal error occurred */
+ 			dev_err(&netdev->dev,
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+index cb450d7ec8c1..94d7446811d5 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+@@ -541,6 +541,8 @@ struct hns3_nic_priv {
+ 	/* Vxlan/Geneve information */
+ 	struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX];
+ 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
++	struct hns3_enet_coalesce tx_coal;
++	struct hns3_enet_coalesce rx_coal;
+ };
+ 
+ union l3_hdr_info {
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index 46a71d289bca..6a677fd540d6 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -4211,7 +4211,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
+ 		vf->link_forced = true;
+ 		vf->link_up = true;
+ 		pfe.event_data.link_event.link_status = true;
+-		pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
++		pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
+ 		break;
+ 	case IFLA_VF_LINK_STATE_DISABLE:
+ 		vf->link_forced = true;
+diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
+index 6b7ec2ae5ad6..4012adbab011 100644
+--- a/drivers/net/ethernet/intel/ice/ice_switch.c
++++ b/drivers/net/ethernet/intel/ice/ice_switch.c
+@@ -468,6 +468,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
+ 	void *daddr = NULL;
+ 	u32 act = 0;
+ 	__be16 *off;
++	u8 q_rgn;
+ 
+ 	if (opc == ice_aqc_opc_remove_sw_rules) {
+ 		s_rule->pdata.lkup_tx_rx.act = 0;
+@@ -503,14 +504,19 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
+ 		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
+ 			ICE_SINGLE_ACT_Q_INDEX_M;
+ 		break;
++	case ICE_DROP_PACKET:
++		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
++			ICE_SINGLE_ACT_VALID_BIT;
++		break;
+ 	case ICE_FWD_TO_QGRP:
++		q_rgn = f_info->qgrp_size > 0 ?
++			(u8)ilog2(f_info->qgrp_size) : 0;
+ 		act |= ICE_SINGLE_ACT_TO_Q;
+-		act |= (f_info->qgrp_size << ICE_SINGLE_ACT_Q_REGION_S) &
++		act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
++			ICE_SINGLE_ACT_Q_INDEX_M;
++		act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
+ 			ICE_SINGLE_ACT_Q_REGION_M;
+ 		break;
+-	case ICE_DROP_PACKET:
+-		act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP;
+-		break;
+ 	default:
+ 		return;
+ 	}
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 85280765d793..b45a6e2ed8d1 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -3582,12 +3582,18 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
+ 		else
+ 			mtqc |= IXGBE_MTQC_64VF;
+ 	} else {
+-		if (tcs > 4)
++		if (tcs > 4) {
+ 			mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+-		else if (tcs > 1)
++		} else if (tcs > 1) {
+ 			mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+-		else
+-			mtqc = IXGBE_MTQC_64Q_1PB;
++		} else {
++			u8 max_txq = adapter->num_tx_queues +
++				adapter->num_xdp_queues;
++			if (max_txq > 63)
++				mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
++			else
++				mtqc = IXGBE_MTQC_64Q_1PB;
++		}
+ 	}
+ 
+ 	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
+@@ -5181,6 +5187,7 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
+ 	struct ixgbe_hw *hw = &adapter->hw;
+ 	struct hlist_node *node2;
+ 	struct ixgbe_fdir_filter *filter;
++	u64 action;
+ 
+ 	spin_lock(&adapter->fdir_perfect_lock);
+ 
+@@ -5189,12 +5196,17 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
+ 
+ 	hlist_for_each_entry_safe(filter, node2,
+ 				  &adapter->fdir_filter_list, fdir_node) {
++		action = filter->action;
++		if (action != IXGBE_FDIR_DROP_QUEUE && action != 0)
++			action =
++			(action >> ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF) - 1;
++
+ 		ixgbe_fdir_write_perfect_filter_82599(hw,
+ 				&filter->filter,
+ 				filter->sw_idx,
+-				(filter->action == IXGBE_FDIR_DROP_QUEUE) ?
++				(action == IXGBE_FDIR_DROP_QUEUE) ?
+ 				IXGBE_FDIR_DROP_QUEUE :
+-				adapter->rx_ring[filter->action]->reg_idx);
++				adapter->rx_ring[action]->reg_idx);
+ 	}
+ 
+ 	spin_unlock(&adapter->fdir_perfect_lock);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+index a4f237f815d1..8d556eb37b7a 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+@@ -2324,8 +2324,15 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
+ 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ 	struct mlxsw_sp_switchdev_event_work *switchdev_work;
+ 	struct switchdev_notifier_fdb_info *fdb_info = ptr;
++	struct net_device *br_dev;
+ 
+-	if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
++	/* Tunnel devices are not our uppers, so check their master instead */
++	br_dev = netdev_master_upper_dev_get_rcu(dev);
++	if (!br_dev)
++		return NOTIFY_DONE;
++	if (!netif_is_bridge_master(br_dev))
++		return NOTIFY_DONE;
++	if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
+ 		return NOTIFY_DONE;
+ 
+ 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+index 015de1e0addd..2847509a183d 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+@@ -796,7 +796,18 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
+ 		tx_pkt.vlan = p_buffer->vlan;
+ 		tx_pkt.bd_flags = bd_flags;
+ 		tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
+-		tx_pkt.tx_dest = p_ll2_conn->tx_dest;
++		switch (p_ll2_conn->tx_dest) {
++		case CORE_TX_DEST_NW:
++			tx_pkt.tx_dest = QED_LL2_TX_DEST_NW;
++			break;
++		case CORE_TX_DEST_LB:
++			tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
++			break;
++		case CORE_TX_DEST_DROP:
++		default:
++			tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
++			break;
++		}
+ 		tx_pkt.first_frag = first_frag;
+ 		tx_pkt.first_frag_len = p_buffer->packet_length;
+ 		tx_pkt.cookie = p_buffer;
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index a57d82ef0f81..1f57a6a2b8a2 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -579,7 +579,7 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
+ 	err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
+ 			      USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
+ 			      0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
+-	if (err < sizeof(max_datagram_size)) {
++	if (err != sizeof(max_datagram_size)) {
+ 		dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
+ 		goto out;
+ 	}
+diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
+index 9feea02e7d37..5c9fc4070fd2 100644
+--- a/drivers/net/wireless/ath/ath10k/core.h
++++ b/drivers/net/wireless/ath/ath10k/core.h
+@@ -1003,6 +1003,7 @@ struct ath10k {
+ 
+ 	struct completion install_key_done;
+ 
++	int last_wmi_vdev_start_status;
+ 	struct completion vdev_setup_done;
+ 
+ 	struct workqueue_struct *workqueue;
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index 9d033da46ec2..d3d33cc2adfd 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -968,7 +968,7 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
+ 	if (time_left == 0)
+ 		return -ETIMEDOUT;
+ 
+-	return 0;
++	return ar->last_wmi_vdev_start_status;
+ }
+ 
+ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
+diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
+index 40b36e73bb48..aefc92d2c09b 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi.c
++++ b/drivers/net/wireless/ath/ath10k/wmi.c
+@@ -3248,18 +3248,31 @@ void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
+ {
+ 	struct wmi_vdev_start_ev_arg arg = {};
+ 	int ret;
++	u32 status;
+ 
+ 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
+ 
++	ar->last_wmi_vdev_start_status = 0;
++
+ 	ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
+ 	if (ret) {
+ 		ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret);
+-		return;
++		ar->last_wmi_vdev_start_status = ret;
++		goto out;
+ 	}
+ 
+-	if (WARN_ON(__le32_to_cpu(arg.status)))
+-		return;
++	status = __le32_to_cpu(arg.status);
++	if (WARN_ON_ONCE(status)) {
++		ath10k_warn(ar, "vdev-start-response reports status error: %d (%s)\n",
++			    status, (status == WMI_VDEV_START_CHAN_INVALID) ?
++			    "chan-invalid" : "unknown");
++		/* Setup is done one way or another though, so we should still
++		 * do the completion, so don't return here.
++		 */
++		ar->last_wmi_vdev_start_status = -EINVAL;
++	}
+ 
++out:
+ 	complete(&ar->vdev_setup_done);
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
+index 36220258e3c7..e341cfb3fcc2 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi.h
++++ b/drivers/net/wireless/ath/ath10k/wmi.h
+@@ -6642,11 +6642,17 @@ struct wmi_ch_info_ev_arg {
+ 	__le32 rx_frame_count;
+ };
+ 
++/* From 10.4 firmware, not sure all have the same values. */
++enum wmi_vdev_start_status {
++	WMI_VDEV_START_OK = 0,
++	WMI_VDEV_START_CHAN_INVALID,
++};
++
+ struct wmi_vdev_start_ev_arg {
+ 	__le32 vdev_id;
+ 	__le32 req_id;
+ 	__le32 resp_type; /* %WMI_VDEV_RESP_ */
+-	__le32 status;
++	__le32 status; /* See wmi_vdev_start_status enum above */
+ };
+ 
+ struct wmi_peer_kick_ev_arg {
+diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
+index 440e16e641e4..f75eb068e6cf 100644
+--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
++++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
+@@ -411,7 +411,7 @@ ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs,
+ 
+ 		ath_dbg(common, SPECTRAL_SCAN,
+ 			"Calculated new upper max 0x%X at %i\n",
+-			tmp_mag, i);
++			tmp_mag, fft_sample_40.upper_max_index);
+ 	} else
+ 	for (i = dc_pos; i < SPECTRAL_HT20_40_NUM_BINS; i++) {
+ 		if (fft_sample_40.data[i] == (upper_mag >> max_exp))
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+index 3e9c4f2f5dd1..456a1bf008b3 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+@@ -74,7 +74,7 @@
+ #define P2P_AF_MAX_WAIT_TIME		msecs_to_jiffies(2000)
+ #define P2P_INVALID_CHANNEL		-1
+ #define P2P_CHANNEL_SYNC_RETRY		5
+-#define P2P_AF_FRM_SCAN_MAX_WAIT	msecs_to_jiffies(1500)
++#define P2P_AF_FRM_SCAN_MAX_WAIT	msecs_to_jiffies(450)
+ #define P2P_DEFAULT_SLEEP_TIME_VSDB	200
+ 
+ /* WiFi P2P Public Action Frame OUI Subtypes */
+@@ -1134,7 +1134,6 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
+ {
+ 	struct afx_hdl *afx_hdl = &p2p->afx_hdl;
+ 	struct brcmf_cfg80211_vif *pri_vif;
+-	unsigned long duration;
+ 	s32 retry;
+ 
+ 	brcmf_dbg(TRACE, "Enter\n");
+@@ -1150,7 +1149,6 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
+ 	 * pending action frame tx is cancelled.
+ 	 */
+ 	retry = 0;
+-	duration = msecs_to_jiffies(P2P_AF_FRM_SCAN_MAX_WAIT);
+ 	while ((retry < P2P_CHANNEL_SYNC_RETRY) &&
+ 	       (afx_hdl->peer_chan == P2P_INVALID_CHANNEL)) {
+ 		afx_hdl->is_listen = false;
+@@ -1158,7 +1156,8 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
+ 			  retry);
+ 		/* search peer on peer's listen channel */
+ 		schedule_work(&afx_hdl->afx_work);
+-		wait_for_completion_timeout(&afx_hdl->act_frm_scan, duration);
++		wait_for_completion_timeout(&afx_hdl->act_frm_scan,
++					    P2P_AF_FRM_SCAN_MAX_WAIT);
+ 		if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) ||
+ 		    (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+ 			       &p2p->status)))
+@@ -1171,7 +1170,7 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
+ 			afx_hdl->is_listen = true;
+ 			schedule_work(&afx_hdl->afx_work);
+ 			wait_for_completion_timeout(&afx_hdl->act_frm_scan,
+-						    duration);
++						    P2P_AF_FRM_SCAN_MAX_WAIT);
+ 		}
+ 		if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) ||
+ 		    (!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
+@@ -1458,10 +1457,12 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
+ 		return 0;
+ 
+ 	if (e->event_code == BRCMF_E_ACTION_FRAME_COMPLETE) {
+-		if (e->status == BRCMF_E_STATUS_SUCCESS)
++		if (e->status == BRCMF_E_STATUS_SUCCESS) {
+ 			set_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED,
+ 				&p2p->status);
+-		else {
++			if (!p2p->wait_for_offchan_complete)
++				complete(&p2p->send_af_done);
++		} else {
+ 			set_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
+ 			/* If there is no ack, we don't need to wait for
+ 			 * WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE event
+@@ -1512,6 +1513,17 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
+ 	p2p->af_sent_channel = le32_to_cpu(af_params->channel);
+ 	p2p->af_tx_sent_jiffies = jiffies;
+ 
++	if (test_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status) &&
++	    p2p->af_sent_channel ==
++	    ieee80211_frequency_to_channel(p2p->remain_on_channel.center_freq))
++		p2p->wait_for_offchan_complete = false;
++	else
++		p2p->wait_for_offchan_complete = true;
++
++	brcmf_dbg(TRACE, "Waiting for %s tx completion event\n",
++		  (p2p->wait_for_offchan_complete) ?
++		   "off-channel" : "on-channel");
++
+ 	timeout = wait_for_completion_timeout(&p2p->send_af_done,
+ 					      P2P_AF_MAX_WAIT_TIME);
+ 
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
+index 0e8b34d2d85c..39f0d0218088 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
+@@ -124,6 +124,7 @@ struct afx_hdl {
+  * @gon_req_action: about to send go negotiation requets frame.
+  * @block_gon_req_tx: drop tx go negotiation requets frame.
+  * @p2pdev_dynamically: is p2p device if created by module param or supplicant.
++ * @wait_for_offchan_complete: wait for off-channel tx completion event.
+  */
+ struct brcmf_p2p_info {
+ 	struct brcmf_cfg80211_info *cfg;
+@@ -144,6 +145,7 @@ struct brcmf_p2p_info {
+ 	bool gon_req_action;
+ 	bool block_gon_req_tx;
+ 	bool p2pdev_dynamically;
++	bool wait_for_offchan_complete;
+ };
+ 
+ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+index 79bdae994822..868cb1195a74 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -731,8 +731,10 @@ int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
+ {
+ 	struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
+ 	struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
++	bool unified = fw_has_capa(&mvm->fw->ucode_capa,
++				   IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+ 	struct wowlan_key_data key_data = {
+-		.configure_keys = !d0i3,
++		.configure_keys = !d0i3 && !unified,
+ 		.use_rsc_tsc = false,
+ 		.tkip = &tkip_cmd,
+ 		.use_tkip = false,
+diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
+index ade4a2029a24..1b5abd4816ed 100644
+--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
++++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
+@@ -548,6 +548,12 @@ mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb)
+ 	struct mt76_wcid *wcid = status->wcid;
+ 	bool ps;
+ 
++	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) {
++		sta = ieee80211_find_sta_by_ifaddr(dev->hw, hdr->addr2, NULL);
++		if (sta)
++			wcid = status->wcid = (struct mt76_wcid *) sta->drv_priv;
++	}
++
+ 	if (!wcid || !wcid->sta)
+ 		return;
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c
+index 324b2a4b8b67..54a9e1dfaf7a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init_common.c
+@@ -72,6 +72,9 @@ void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable)
+ {
+ 	u32 val;
+ 
++	if (!enable)
++		goto out;
++
+ 	val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+ 
+ 	val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+@@ -87,6 +90,7 @@ void mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable)
+ 	mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+ 	udelay(20);
+ 
++out:
+ 	mt76x2_set_wlan_state(dev, enable);
+ }
+ EXPORT_SYMBOL_GPL(mt76x2_reset_wlan);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c
+index e66f047ea448..26cfda24ce08 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c
+@@ -53,6 +53,7 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		return -ENOMEM;
+ 
+ 	mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
++	mt76x2_reset_wlan(dev, false);
+ 
+ 	dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
+ 	dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c
+index 9fd6ab4cbb94..ca68dd184489 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy_common.c
+@@ -232,9 +232,9 @@ void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
+ 	mt76_wr(dev, MT_TX_PWR_CFG_7,
+ 		mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8]));
+ 	mt76_wr(dev, MT_TX_PWR_CFG_8,
+-		mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0));
++		mt76x2_tx_power_mask(t.ht[14], 0, t.vht[8], t.vht[8]));
+ 	mt76_wr(dev, MT_TX_PWR_CFG_9,
+-		mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0));
++		mt76x2_tx_power_mask(t.ht[6], 0, t.vht[8], t.vht[8]));
+ }
+ EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower);
+ 
+diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+index 4aa332f4646b..ff8a46c9595e 100644
+--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
++++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+@@ -521,9 +521,16 @@ static int qtnf_del_key(struct wiphy *wiphy, struct net_device *dev,
+ 	int ret;
+ 
+ 	ret = qtnf_cmd_send_del_key(vif, key_index, pairwise, mac_addr);
+-	if (ret)
+-		pr_err("VIF%u.%u: failed to delete key: idx=%u pw=%u\n",
+-		       vif->mac->macid, vif->vifid, key_index, pairwise);
++	if (ret) {
++		if (ret == -ENOENT) {
++			pr_debug("VIF%u.%u: key index %d out of bounds\n",
++				 vif->mac->macid, vif->vifid, key_index);
++		} else {
++			pr_err("VIF%u.%u: failed to delete key: idx=%u pw=%u\n",
++			       vif->mac->macid, vif->vifid,
++			       key_index, pairwise);
++		}
++	}
+ 
+ 	return ret;
+ }
+@@ -1109,6 +1116,9 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
+ 	if (hw_info->hw_capab & QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR)
+ 		wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+ 
++	if (!(hw_info->hw_capab & QLINK_HW_CAPAB_OBSS_SCAN))
++		wiphy->features |= NL80211_FEATURE_NEED_OBSS_SCAN;
++
+ #ifdef CONFIG_PM
+ 	if (macinfo->wowlan)
+ 		wiphy->wowlan = macinfo->wowlan;
+@@ -1123,6 +1133,15 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
+ 		wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
+ 	}
+ 
++	if (mac->macinfo.extended_capabilities_len) {
++		wiphy->extended_capabilities =
++			mac->macinfo.extended_capabilities;
++		wiphy->extended_capabilities_mask =
++			mac->macinfo.extended_capabilities_mask;
++		wiphy->extended_capabilities_len =
++			mac->macinfo.extended_capabilities_len;
++	}
++
+ 	strlcpy(wiphy->fw_version, hw_info->fw_version,
+ 		sizeof(wiphy->fw_version));
+ 	wiphy->hw_version = hw_info->hw_version;
+diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
+index ae9e77300533..734844b34c26 100644
+--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
++++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
+@@ -544,6 +544,9 @@ qtnf_sta_info_parse_rate(struct rate_info *rate_dst,
+ 		rate_dst->flags |= RATE_INFO_FLAGS_MCS;
+ 	else if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_VHT_MCS)
+ 		rate_dst->flags |= RATE_INFO_FLAGS_VHT_MCS;
++
++	if (rate_src->flags & QLINK_STA_INFO_RATE_FLAG_SHORT_GI)
++		rate_dst->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ }
+ 
+ static void
+@@ -1353,8 +1356,7 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
+ 		ext_capa_mask = NULL;
+ 	}
+ 
+-	kfree(mac->macinfo.extended_capabilities);
+-	kfree(mac->macinfo.extended_capabilities_mask);
++	qtnf_mac_ext_caps_free(mac);
+ 	mac->macinfo.extended_capabilities = ext_capa;
+ 	mac->macinfo.extended_capabilities_mask = ext_capa_mask;
+ 	mac->macinfo.extended_capabilities_len = ext_capa_len;
+diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
+index 19abbc4e23e0..08928d5e252d 100644
+--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
++++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
+@@ -304,6 +304,19 @@ void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac)
+ 	}
+ }
+ 
++void qtnf_mac_ext_caps_free(struct qtnf_wmac *mac)
++{
++	if (mac->macinfo.extended_capabilities_len) {
++		kfree(mac->macinfo.extended_capabilities);
++		mac->macinfo.extended_capabilities = NULL;
++
++		kfree(mac->macinfo.extended_capabilities_mask);
++		mac->macinfo.extended_capabilities_mask = NULL;
++
++		mac->macinfo.extended_capabilities_len = 0;
++	}
++}
++
+ static void qtnf_vif_reset_handler(struct work_struct *work)
+ {
+ 	struct qtnf_vif *vif = container_of(work, struct qtnf_vif, reset_work);
+@@ -493,8 +506,7 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
+ 	}
+ 
+ 	qtnf_mac_iface_comb_free(mac);
+-	kfree(mac->macinfo.extended_capabilities);
+-	kfree(mac->macinfo.extended_capabilities_mask);
++	qtnf_mac_ext_caps_free(mac);
+ 	kfree(mac->macinfo.wowlan);
+ 	wiphy_free(wiphy);
+ 	bus->mac[macid] = NULL;
+diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
+index a1e338a1f055..ecb5c41c8ed7 100644
+--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
++++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
+@@ -151,6 +151,7 @@ struct qtnf_hw_info {
+ struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac);
+ struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac);
+ void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac);
++void qtnf_mac_ext_caps_free(struct qtnf_wmac *mac);
+ struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus);
+ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *priv,
+ 			 const char *name, unsigned char name_assign_type);
+diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+index 99d37e3efba6..c5ae4ea9a47a 100644
+--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
++++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+@@ -71,6 +71,7 @@ struct qlink_msg_header {
+  * @QLINK_HW_CAPAB_DFS_OFFLOAD: device implements DFS offload functionality
+  * @QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR: device supports MAC Address
+  *	Randomization in probe requests.
++ * @QLINK_HW_CAPAB_OBSS_SCAN: device can perform OBSS scanning.
+  */
+ enum qlink_hw_capab {
+ 	QLINK_HW_CAPAB_REG_UPDATE		= BIT(0),
+@@ -78,6 +79,7 @@ enum qlink_hw_capab {
+ 	QLINK_HW_CAPAB_DFS_OFFLOAD		= BIT(2),
+ 	QLINK_HW_CAPAB_SCAN_RANDOM_MAC_ADDR	= BIT(3),
+ 	QLINK_HW_CAPAB_PWR_MGMT			= BIT(4),
++	QLINK_HW_CAPAB_OBSS_SCAN		= BIT(5),
+ };
+ 
+ enum qlink_iface_type {
+diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+index b026e80940a4..6fbf8845a2ab 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
++++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+@@ -1324,13 +1324,13 @@ bool exhalbtc_initlize_variables_wifi_only(struct rtl_priv *rtlpriv)
+ 
+ 	switch (rtlpriv->rtlhal.interface) {
+ 	case INTF_PCI:
+-		wifionly_cfg->chip_interface = BTC_INTF_PCI;
++		wifionly_cfg->chip_interface = WIFIONLY_INTF_PCI;
+ 		break;
+ 	case INTF_USB:
+-		wifionly_cfg->chip_interface = BTC_INTF_USB;
++		wifionly_cfg->chip_interface = WIFIONLY_INTF_USB;
+ 		break;
+ 	default:
+-		wifionly_cfg->chip_interface = BTC_INTF_UNKNOWN;
++		wifionly_cfg->chip_interface = WIFIONLY_INTF_UNKNOWN;
+ 		break;
+ 	}
+ 
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index 27b6b141cb71..4cafc31b98b7 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -173,7 +173,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
+ 				[skb_get_hash_raw(skb) % size];
+ }
+ 
+-static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static netdev_tx_t
++xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ 	struct xenvif *vif = netdev_priv(dev);
+ 	struct xenvif_queue *queue = NULL;
+diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
+index 6fe5923c95d4..a69553e75f38 100644
+--- a/drivers/nvme/host/lightnvm.c
++++ b/drivers/nvme/host/lightnvm.c
+@@ -968,6 +968,9 @@ void nvme_nvm_update_nvm_info(struct nvme_ns *ns)
+ 	struct nvm_dev *ndev = ns->ndev;
+ 	struct nvm_geo *geo = &ndev->geo;
+ 
++	if (geo->version == NVM_OCSSD_SPEC_12)
++		return;
++
+ 	geo->csecs = 1 << ns->lba_shift;
+ 	geo->sos = ns->ms;
+ }
+diff --git a/drivers/opp/core.c b/drivers/opp/core.c
+index 14d4ef594374..1e80f9ec1aa6 100644
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -48,14 +48,9 @@ static struct opp_device *_find_opp_dev(const struct device *dev,
+ static struct opp_table *_find_opp_table_unlocked(struct device *dev)
+ {
+ 	struct opp_table *opp_table;
+-	bool found;
+ 
+ 	list_for_each_entry(opp_table, &opp_tables, node) {
+-		mutex_lock(&opp_table->lock);
+-		found = !!_find_opp_dev(dev, opp_table);
+-		mutex_unlock(&opp_table->lock);
+-
+-		if (found) {
++		if (_find_opp_dev(dev, opp_table)) {
+ 			_get_opp_table_kref(opp_table);
+ 
+ 			return opp_table;
+@@ -318,7 +313,7 @@ int dev_pm_opp_get_opp_count(struct device *dev)
+ 		count = PTR_ERR(opp_table);
+ 		dev_dbg(dev, "%s: OPP table not found (%d)\n",
+ 			__func__, count);
+-		return 0;
++		return count;
+ 	}
+ 
+ 	count = _get_opp_count(opp_table);
+@@ -771,8 +766,6 @@ struct opp_device *_add_opp_dev(const struct device *dev,
+ 
+ 	/* Initialize opp-dev */
+ 	opp_dev->dev = dev;
+-
+-	mutex_lock(&opp_table->lock);
+ 	list_add(&opp_dev->node, &opp_table->dev_list);
+ 
+ 	/* Create debugfs entries for the opp_table */
+@@ -780,7 +773,6 @@ struct opp_device *_add_opp_dev(const struct device *dev,
+ 	if (ret)
+ 		dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
+ 			__func__, ret);
+-	mutex_unlock(&opp_table->lock);
+ 
+ 	return opp_dev;
+ }
+@@ -799,7 +791,6 @@ static struct opp_table *_allocate_opp_table(struct device *dev)
+ 	if (!opp_table)
+ 		return NULL;
+ 
+-	mutex_init(&opp_table->lock);
+ 	INIT_LIST_HEAD(&opp_table->dev_list);
+ 
+ 	opp_dev = _add_opp_dev(dev, opp_table);
+@@ -821,6 +812,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev)
+ 
+ 	BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
+ 	INIT_LIST_HEAD(&opp_table->opp_list);
++	mutex_init(&opp_table->lock);
+ 	kref_init(&opp_table->kref);
+ 
+ 	/* Secure the device table modification */
+@@ -862,10 +854,6 @@ static void _opp_table_kref_release(struct kref *kref)
+ 	if (!IS_ERR(opp_table->clk))
+ 		clk_put(opp_table->clk);
+ 
+-	/*
+-	 * No need to take opp_table->lock here as we are guaranteed that no
+-	 * references to the OPP table are taken at this point.
+-	 */
+ 	opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
+ 				   node);
+ 
+@@ -1731,9 +1719,6 @@ void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev,
+ {
+ 	struct dev_pm_opp *opp, *tmp;
+ 
+-	/* Protect dev_list */
+-	mutex_lock(&opp_table->lock);
+-
+ 	/* Find if opp_table manages a single device */
+ 	if (list_is_singular(&opp_table->dev_list)) {
+ 		/* Free static OPPs */
+@@ -1751,8 +1736,6 @@ void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev,
+ 	} else {
+ 		_remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
+ 	}
+-
+-	mutex_unlock(&opp_table->lock);
+ }
+ 
+ void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all)
+diff --git a/drivers/opp/cpu.c b/drivers/opp/cpu.c
+index 2868a022a040..0c0910709435 100644
+--- a/drivers/opp/cpu.c
++++ b/drivers/opp/cpu.c
+@@ -222,10 +222,8 @@ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
+ 	cpumask_clear(cpumask);
+ 
+ 	if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
+-		mutex_lock(&opp_table->lock);
+ 		list_for_each_entry(opp_dev, &opp_table->dev_list, node)
+ 			cpumask_set_cpu(opp_dev->dev->id, cpumask);
+-		mutex_unlock(&opp_table->lock);
+ 	} else {
+ 		cpumask_set_cpu(cpu_dev->id, cpumask);
+ 	}
+diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
+index e0866b1c1f1b..7c540fd063b2 100644
+--- a/drivers/opp/opp.h
++++ b/drivers/opp/opp.h
+@@ -126,7 +126,7 @@ enum opp_table_access {
+  * @dev_list:	list of devices that share these OPPs
+  * @opp_list:	table of opps
+  * @kref:	for reference count of the table.
+- * @lock:	mutex protecting the opp_list and dev_list.
++ * @lock:	mutex protecting the opp_list.
+  * @np:		struct device_node pointer for opp's DT node.
+  * @clock_latency_ns_max: Max clock latency in nanoseconds.
+  * @shared_opp: OPP is shared between multiple devices.
+diff --git a/drivers/pinctrl/pinctrl-gemini.c b/drivers/pinctrl/pinctrl-gemini.c
+index fa7d998e1d5a..3535f9841861 100644
+--- a/drivers/pinctrl/pinctrl-gemini.c
++++ b/drivers/pinctrl/pinctrl-gemini.c
+@@ -591,13 +591,16 @@ static const unsigned int tvc_3512_pins[] = {
+ 	319, /* TVC_DATA[1] */
+ 	301, /* TVC_DATA[2] */
+ 	283, /* TVC_DATA[3] */
+-	265, /* TVC_CLK */
+ 	320, /* TVC_DATA[4] */
+ 	302, /* TVC_DATA[5] */
+ 	284, /* TVC_DATA[6] */
+ 	266, /* TVC_DATA[7] */
+ };
+ 
++static const unsigned int tvc_clk_3512_pins[] = {
++	265, /* TVC_CLK */
++};
++
+ /* NAND flash pins */
+ static const unsigned int nflash_3512_pins[] = {
+ 	199, 200, 201, 202, 216, 217, 218, 219, 220, 234, 235, 236, 237, 252,
+@@ -629,7 +632,7 @@ static const unsigned int pflash_3512_pins_extended[] = {
+ /* Serial flash pins CE0, CE1, DI, DO, CK */
+ static const unsigned int sflash_3512_pins[] = { 230, 231, 232, 233, 211 };
+ 
+-/* The GPIO0A (0) pin overlap with TVC and extended parallel flash */
++/* The GPIO0A (0) pin overlap with TVC CLK and extended parallel flash */
+ static const unsigned int gpio0a_3512_pins[] = { 265 };
+ 
+ /* The GPIO0B (1-4) pins overlap with TVC and ICE */
+@@ -823,7 +826,13 @@ static const struct gemini_pin_group gemini_3512_pin_groups[] = {
+ 		.num_pins = ARRAY_SIZE(tvc_3512_pins),
+ 		/* Conflict with character LCD and ICE */
+ 		.mask = LCD_PADS_ENABLE,
+-		.value = TVC_PADS_ENABLE | TVC_CLK_PAD_ENABLE,
++		.value = TVC_PADS_ENABLE,
++	},
++	{
++		.name = "tvcclkgrp",
++		.pins = tvc_clk_3512_pins,
++		.num_pins = ARRAY_SIZE(tvc_clk_3512_pins),
++		.value = TVC_CLK_PAD_ENABLE,
+ 	},
+ 	/*
+ 	 * The construction is done such that it is possible to use a serial
+@@ -860,8 +869,8 @@ static const struct gemini_pin_group gemini_3512_pin_groups[] = {
+ 		.name = "gpio0agrp",
+ 		.pins = gpio0a_3512_pins,
+ 		.num_pins = ARRAY_SIZE(gpio0a_3512_pins),
+-		/* Conflict with TVC */
+-		.mask = TVC_PADS_ENABLE,
++		/* Conflict with TVC CLK */
++		.mask = TVC_CLK_PAD_ENABLE,
+ 	},
+ 	{
+ 		.name = "gpio0bgrp",
+@@ -1531,13 +1540,16 @@ static const unsigned int tvc_3516_pins[] = {
+ 	311, /* TVC_DATA[1] */
+ 	394, /* TVC_DATA[2] */
+ 	374, /* TVC_DATA[3] */
+-	333, /* TVC_CLK */
+ 	354, /* TVC_DATA[4] */
+ 	395, /* TVC_DATA[5] */
+ 	312, /* TVC_DATA[6] */
+ 	334, /* TVC_DATA[7] */
+ };
+ 
++static const unsigned int tvc_clk_3516_pins[] = {
++	333, /* TVC_CLK */
++};
++
+ /* NAND flash pins */
+ static const unsigned int nflash_3516_pins[] = {
+ 	243, 260, 261, 224, 280, 262, 281, 264, 300, 263, 282, 301, 320, 283,
+@@ -1570,7 +1582,7 @@ static const unsigned int pflash_3516_pins_extended[] = {
+ static const unsigned int sflash_3516_pins[] = { 296, 338, 295, 359, 339 };
+ 
+ /* The GPIO0A (0-4) pins overlap with TVC and extended parallel flash */
+-static const unsigned int gpio0a_3516_pins[] = { 333, 354, 395, 312, 334 };
++static const unsigned int gpio0a_3516_pins[] = { 354, 395, 312, 334 };
+ 
+ /* The GPIO0B (5-7) pins overlap with ICE */
+ static const unsigned int gpio0b_3516_pins[] = { 375, 396, 376 };
+@@ -1602,6 +1614,9 @@ static const unsigned int gpio0j_3516_pins[] = { 359, 339 };
+ /* The GPIO0K (30,31) pins overlap with NAND flash */
+ static const unsigned int gpio0k_3516_pins[] = { 275, 298 };
+ 
++/* The GPIO0L (0) pins overlap with TVC_CLK */
++static const unsigned int gpio0l_3516_pins[] = { 333 };
++
+ /* The GPIO1A (0-4) pins that overlap with IDE and parallel flash */
+ static const unsigned int gpio1a_3516_pins[] = { 221, 200, 222, 201, 220 };
+ 
+@@ -1761,7 +1776,13 @@ static const struct gemini_pin_group gemini_3516_pin_groups[] = {
+ 		.num_pins = ARRAY_SIZE(tvc_3516_pins),
+ 		/* Conflict with character LCD */
+ 		.mask = LCD_PADS_ENABLE,
+-		.value = TVC_PADS_ENABLE | TVC_CLK_PAD_ENABLE,
++		.value = TVC_PADS_ENABLE,
++	},
++	{
++		.name = "tvcclkgrp",
++		.pins = tvc_clk_3516_pins,
++		.num_pins = ARRAY_SIZE(tvc_clk_3516_pins),
++		.value = TVC_CLK_PAD_ENABLE,
+ 	},
+ 	/*
+ 	 * The construction is done such that it is possible to use a serial
+@@ -1872,6 +1893,13 @@ static const struct gemini_pin_group gemini_3516_pin_groups[] = {
+ 		/* Conflict with parallel and NAND flash */
+ 		.value = PFLASH_PADS_DISABLE | NAND_PADS_DISABLE,
+ 	},
++	{
++		.name = "gpio0lgrp",
++		.pins = gpio0l_3516_pins,
++		.num_pins = ARRAY_SIZE(gpio0l_3516_pins),
++		/* Conflict with TVE CLK */
++		.mask = TVC_CLK_PAD_ENABLE,
++	},
+ 	{
+ 		.name = "gpio1agrp",
+ 		.pins = gpio1a_3516_pins,
+@@ -2184,7 +2212,8 @@ static int gemini_pmx_set_mux(struct pinctrl_dev *pctldev,
+ 		 func->name, grp->name);
+ 
+ 	regmap_read(pmx->map, GLOBAL_MISC_CTRL, &before);
+-	regmap_update_bits(pmx->map, GLOBAL_MISC_CTRL, grp->mask,
++	regmap_update_bits(pmx->map, GLOBAL_MISC_CTRL,
++			   grp->mask | grp->value,
+ 			   grp->value);
+ 	regmap_read(pmx->map, GLOBAL_MISC_CTRL, &after);
+ 
+diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
+index 742a0c217925..d17db140cb1f 100644
+--- a/drivers/platform/x86/mlx-platform.c
++++ b/drivers/platform/x86/mlx-platform.c
+@@ -575,7 +575,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_msn201x_items[] = {
+ 
+ static
+ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_msn201x_data = {
+-	.items = mlxplat_mlxcpld_msn21xx_items,
++	.items = mlxplat_mlxcpld_msn201x_items,
+ 	.counter = ARRAY_SIZE(mlxplat_mlxcpld_msn201x_items),
+ 	.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ 	.mask = MLXPLAT_CPLD_AGGR_MASK_DEF,
+diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
+index 602af839421d..0d33e3079f0d 100644
+--- a/drivers/remoteproc/qcom_q6v5.c
++++ b/drivers/remoteproc/qcom_q6v5.c
+@@ -84,6 +84,7 @@ static irqreturn_t q6v5_fatal_interrupt(int irq, void *data)
+ 	else
+ 		dev_err(q6v5->dev, "fatal error without message\n");
+ 
++	q6v5->running = false;
+ 	rproc_report_crash(q6v5->rproc, RPROC_FATAL_ERROR);
+ 
+ 	return IRQ_HANDLED;
+@@ -150,8 +151,6 @@ int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5)
+ {
+ 	int ret;
+ 
+-	q6v5->running = false;
+-
+ 	qcom_smem_state_update_bits(q6v5->state,
+ 				    BIT(q6v5->stop_bit), BIT(q6v5->stop_bit));
+ 
+diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c
+index 47be411400e5..3a4c3d7cafca 100644
+--- a/drivers/remoteproc/remoteproc_sysfs.c
++++ b/drivers/remoteproc/remoteproc_sysfs.c
+@@ -48,6 +48,11 @@ static ssize_t firmware_store(struct device *dev,
+ 	}
+ 
+ 	len = strcspn(buf, "\n");
++	if (!len) {
++		dev_err(dev, "can't provide a NULL firmware\n");
++		err = -EINVAL;
++		goto out;
++	}
+ 
+ 	p = kstrndup(buf, len, GFP_KERNEL);
+ 	if (!p) {
+diff --git a/drivers/reset/core.c b/drivers/reset/core.c
+index 225e34c56b94..d1887c0ed5d3 100644
+--- a/drivers/reset/core.c
++++ b/drivers/reset/core.c
+@@ -496,28 +496,29 @@ struct reset_control *__of_reset_control_get(struct device_node *node,
+ 			break;
+ 		}
+ 	}
+-	of_node_put(args.np);
+ 
+ 	if (!rcdev) {
+-		mutex_unlock(&reset_list_mutex);
+-		return ERR_PTR(-EPROBE_DEFER);
++		rstc = ERR_PTR(-EPROBE_DEFER);
++		goto out;
+ 	}
+ 
+ 	if (WARN_ON(args.args_count != rcdev->of_reset_n_cells)) {
+-		mutex_unlock(&reset_list_mutex);
+-		return ERR_PTR(-EINVAL);
++		rstc = ERR_PTR(-EINVAL);
++		goto out;
+ 	}
+ 
+ 	rstc_id = rcdev->of_xlate(rcdev, &args);
+ 	if (rstc_id < 0) {
+-		mutex_unlock(&reset_list_mutex);
+-		return ERR_PTR(rstc_id);
++		rstc = ERR_PTR(rstc_id);
++		goto out;
+ 	}
+ 
+ 	/* reset_list_mutex also protects the rcdev's reset_control list */
+ 	rstc = __reset_control_get_internal(rcdev, rstc_id, shared);
+ 
++out:
+ 	mutex_unlock(&reset_list_mutex);
++	of_node_put(args.np);
+ 
+ 	return rstc;
+ }
+diff --git a/drivers/rpmsg/qcom_glink_smem.c b/drivers/rpmsg/qcom_glink_smem.c
+index 2b5cf2790954..7b6544348a3e 100644
+--- a/drivers/rpmsg/qcom_glink_smem.c
++++ b/drivers/rpmsg/qcom_glink_smem.c
+@@ -89,15 +89,11 @@ static void glink_smem_rx_peak(struct qcom_glink_pipe *np,
+ 		tail -= pipe->native.length;
+ 
+ 	len = min_t(size_t, count, pipe->native.length - tail);
+-	if (len) {
+-		__ioread32_copy(data, pipe->fifo + tail,
+-				len / sizeof(u32));
+-	}
++	if (len)
++		memcpy_fromio(data, pipe->fifo + tail, len);
+ 
+-	if (len != count) {
+-		__ioread32_copy(data + len, pipe->fifo,
+-				(count - len) / sizeof(u32));
+-	}
++	if (len != count)
++		memcpy_fromio(data + len, pipe->fifo, (count - len));
+ }
+ 
+ static void glink_smem_rx_advance(struct qcom_glink_pipe *np,
+diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
+index c6ab34f94b1b..3072b89785dd 100644
+--- a/drivers/s390/char/Makefile
++++ b/drivers/s390/char/Makefile
+@@ -11,6 +11,7 @@ endif
+ GCOV_PROFILE_sclp_early_core.o		:= n
+ KCOV_INSTRUMENT_sclp_early_core.o	:= n
+ UBSAN_SANITIZE_sclp_early_core.o	:= n
++KASAN_SANITIZE_sclp_early_core.o	:= n
+ 
+ CFLAGS_sclp_early_core.o		+= -D__NO_FORTIFY
+ 
+diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
+index 12316ef4c893..c75d4695f982 100644
+--- a/drivers/scsi/arcmsr/arcmsr_hba.c
++++ b/drivers/scsi/arcmsr/arcmsr_hba.c
+@@ -4135,9 +4135,9 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
+ 		pci_read_config_byte(acb->pdev, i, &value[i]);
+ 	}
+ 	/* hardware reset signal */
+-	if ((acb->dev_id == 0x1680)) {
++	if (acb->dev_id == 0x1680) {
+ 		writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]);
+-	} else if ((acb->dev_id == 0x1880)) {
++	} else if (acb->dev_id == 0x1880) {
+ 		do {
+ 			count++;
+ 			writel(0xF, &pmuC->write_sequence);
+@@ -4161,7 +4161,7 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
+ 		} while (((readl(&pmuE->host_diagnostic_3xxx) &
+ 			ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5));
+ 		writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx);
+-	} else if ((acb->dev_id == 0x1214)) {
++	} else if (acb->dev_id == 0x1214) {
+ 		writel(0x20, pmuD->reset_request);
+ 	} else {
+ 		pci_write_config_byte(acb->pdev, 0x84, 0x20);
+diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
+index 2f71f7df3465..f9edd28894fd 100644
+--- a/drivers/soc/fsl/qbman/bman_portal.c
++++ b/drivers/soc/fsl/qbman/bman_portal.c
+@@ -91,7 +91,15 @@ static int bman_portal_probe(struct platform_device *pdev)
+ 	struct device_node *node = dev->of_node;
+ 	struct bm_portal_config *pcfg;
+ 	struct resource *addr_phys[2];
+-	int irq, cpu;
++	int irq, cpu, err;
++
++	err = bman_is_probed();
++	if (!err)
++		return -EPROBE_DEFER;
++	if (err < 0) {
++		dev_err(&pdev->dev, "failing probe due to bman probe error\n");
++		return -ENODEV;
++	}
+ 
+ 	pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+ 	if (!pcfg)
+diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
+index e6d5cc6ab108..51670976faa3 100644
+--- a/drivers/spi/spi-fsl-lpspi.c
++++ b/drivers/spi/spi-fsl-lpspi.c
+@@ -276,7 +276,7 @@ static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
+ 
+ 	fsl_lpspi_set_watermark(fsl_lpspi);
+ 
+-	temp = CFGR1_PCSCFG | CFGR1_MASTER | CFGR1_NOSTALL;
++	temp = CFGR1_PCSCFG | CFGR1_MASTER;
+ 	if (fsl_lpspi->config.mode & SPI_CS_HIGH)
+ 		temp |= CFGR1_PCSPOL;
+ 	writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1);
+diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
+index 3dc31627c655..0c2867deb36f 100644
+--- a/drivers/spi/spi-mt65xx.c
++++ b/drivers/spi/spi-mt65xx.c
+@@ -522,11 +522,11 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
+ 		mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
+ 		mtk_spi_setup_packet(master);
+ 
+-		cnt = len / 4;
++		cnt = mdata->xfer_len / 4;
+ 		iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
+ 				trans->tx_buf + mdata->num_xfered, cnt);
+ 
+-		remainder = len % 4;
++		remainder = mdata->xfer_len % 4;
+ 		if (remainder > 0) {
+ 			reg_val = 0;
+ 			memcpy(&reg_val,
+diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
+index fdcf3076681b..185bbdce62b1 100644
+--- a/drivers/spi/spi-rockchip.c
++++ b/drivers/spi/spi-rockchip.c
+@@ -445,6 +445,9 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs)
+ 	struct dma_slave_config rxconf, txconf;
+ 	struct dma_async_tx_descriptor *rxdesc, *txdesc;
+ 
++	memset(&rxconf, 0, sizeof(rxconf));
++	memset(&txconf, 0, sizeof(txconf));
++
+ 	spin_lock_irqsave(&rs->lock, flags);
+ 	rs->state &= ~RXBUSY;
+ 	rs->state &= ~TXBUSY;
+diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
+index cda10719d1d1..c5fe08bc34a0 100644
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -724,11 +724,9 @@ static int spidev_probe(struct spi_device *spi)
+ 	 * compatible string, it is a Linux implementation thing
+ 	 * rather than a description of the hardware.
+ 	 */
+-	if (spi->dev.of_node && !of_match_device(spidev_dt_ids, &spi->dev)) {
+-		dev_err(&spi->dev, "buggy DT: spidev listed directly in DT\n");
+-		WARN_ON(spi->dev.of_node &&
+-			!of_match_device(spidev_dt_ids, &spi->dev));
+-	}
++	WARN(spi->dev.of_node &&
++	     of_device_is_compatible(spi->dev.of_node, "spidev"),
++	     "%pOF: buggy DT: spidev listed directly in DT\n", spi->dev.of_node);
+ 
+ 	spidev_probe_acpi(spi);
+ 
+diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
+index 34dce850067b..2f254f957b0a 100644
+--- a/drivers/tee/optee/core.c
++++ b/drivers/tee/optee/core.c
+@@ -696,8 +696,10 @@ static int __init optee_driver_init(void)
+ 		return -ENODEV;
+ 
+ 	np = of_find_matching_node(fw_np, optee_match);
+-	if (!np || !of_device_is_available(np))
++	if (!np || !of_device_is_available(np)) {
++		of_node_put(np);
+ 		return -ENODEV;
++	}
+ 
+ 	optee = optee_probe(np);
+ 	of_node_put(np);
+diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
+index dff2c6e8d797..a93415f33bf3 100644
+--- a/drivers/usb/dwc2/params.c
++++ b/drivers/usb/dwc2/params.c
+@@ -88,6 +88,7 @@ static void dwc2_set_rk_params(struct dwc2_hsotg *hsotg)
+ 	p->host_perio_tx_fifo_size = 256;
+ 	p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
+ 		GAHBCFG_HBSTLEN_SHIFT;
++	p->power_down = 0;
+ }
+ 
+ static void dwc2_set_ltq_params(struct dwc2_hsotg *hsotg)
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 8398c33d08e7..3e04004b4f1b 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -271,27 +271,36 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
+ 	const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
+ 	struct dwc3		*dwc = dep->dwc;
+ 	u32			timeout = 1000;
++	u32			saved_config = 0;
+ 	u32			reg;
+ 
+ 	int			cmd_status = 0;
+-	int			susphy = false;
+ 	int			ret = -EINVAL;
+ 
+ 	/*
+-	 * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if
+-	 * we're issuing an endpoint command, we must check if
+-	 * GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it.
++	 * When operating in USB 2.0 speeds (HS/FS), if GUSB2PHYCFG.ENBLSLPM or
++	 * GUSB2PHYCFG.SUSPHY is set, it must be cleared before issuing an
++	 * endpoint command.
+ 	 *
+-	 * We will also set SUSPHY bit to what it was before returning as stated
+-	 * by the same section on Synopsys databook.
++	 * Save and clear both GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY
++	 * settings. Restore them after the command is completed.
++	 *
++	 * DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2
+ 	 */
+ 	if (dwc->gadget.speed <= USB_SPEED_HIGH) {
+ 		reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ 		if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
+-			susphy = true;
++			saved_config |= DWC3_GUSB2PHYCFG_SUSPHY;
+ 			reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+-			dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ 		}
++
++		if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) {
++			saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM;
++			reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
++		}
++
++		if (saved_config)
++			dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ 	}
+ 
+ 	if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
+@@ -380,9 +389,9 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
+ 		dwc3_gadget_ep_get_transfer_index(dep);
+ 	}
+ 
+-	if (unlikely(susphy)) {
++	if (saved_config) {
+ 		reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+-		reg |= DWC3_GUSB2PHYCFG_SUSPHY;
++		reg |= saved_config;
+ 		dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ 	}
+ 
+diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
+index 587c5037ff07..bc6abaea907d 100644
+--- a/drivers/usb/gadget/udc/fotg210-udc.c
++++ b/drivers/usb/gadget/udc/fotg210-udc.c
+@@ -741,7 +741,7 @@ static void fotg210_get_status(struct fotg210_udc *fotg210,
+ 	fotg210->ep0_req->length = 2;
+ 
+ 	spin_unlock(&fotg210->lock);
+-	fotg210_ep_queue(fotg210->gadget.ep0, fotg210->ep0_req, GFP_KERNEL);
++	fotg210_ep_queue(fotg210->gadget.ep0, fotg210->ep0_req, GFP_ATOMIC);
+ 	spin_lock(&fotg210->lock);
+ }
+ 
+diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
+index e0035c023120..2c58649fd47a 100644
+--- a/drivers/usb/serial/cypress_m8.c
++++ b/drivers/usb/serial/cypress_m8.c
+@@ -769,7 +769,7 @@ send:
+ 
+ 	usb_fill_int_urb(port->interrupt_out_urb, port->serial->dev,
+ 		usb_sndintpipe(port->serial->dev, port->interrupt_out_endpointAddress),
+-		port->interrupt_out_buffer, port->interrupt_out_size,
++		port->interrupt_out_buffer, actual_size,
+ 		cypress_write_int_callback, port, priv->write_urb_interval);
+ 	result = usb_submit_urb(port->interrupt_out_urb, GFP_ATOMIC);
+ 	if (result) {
+diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
+index cd50df5807ea..086611c7bc03 100644
+--- a/drivers/video/backlight/lm3639_bl.c
++++ b/drivers/video/backlight/lm3639_bl.c
+@@ -400,10 +400,8 @@ static int lm3639_remove(struct i2c_client *client)
+ 
+ 	regmap_write(pchip->regmap, REG_ENABLE, 0x00);
+ 
+-	if (&pchip->cdev_torch)
+-		led_classdev_unregister(&pchip->cdev_torch);
+-	if (&pchip->cdev_flash)
+-		led_classdev_unregister(&pchip->cdev_flash);
++	led_classdev_unregister(&pchip->cdev_torch);
++	led_classdev_unregister(&pchip->cdev_flash);
+ 	if (pchip->bled)
+ 		device_remove_file(&(pchip->bled->dev), &dev_attr_bled_mode);
+ 	return 0;
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index 591a13a59787..f99558d006bf 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -2,6 +2,18 @@
+ # fbdev configuration
+ #
+ 
++config FB_CMDLINE
++	bool
++
++config FB_NOTIFY
++	bool
++
++config FB_CLPS711X_OLD
++	tristate
++	select FB_CFB_FILLRECT
++	select FB_CFB_COPYAREA
++	select FB_CFB_IMAGEBLIT
++
+ menuconfig FB
+ 	tristate "Support for frame buffer devices"
+ 	select FB_CMDLINE
+@@ -54,12 +66,6 @@ config FIRMWARE_EDID
+ 	 combination with certain motherboards and monitors are known to
+ 	 suffer from this problem.
+ 
+-config FB_CMDLINE
+-	bool
+-
+-config FB_NOTIFY
+-	bool
+-
+ config FB_DDC
+        tristate
+        depends on FB
+@@ -329,12 +335,6 @@ config FB_ACORN
+ 	  hardware found in Acorn RISC PCs and other ARM-based machines.  If
+ 	  unsure, say N.
+ 
+-config FB_CLPS711X_OLD
+-	tristate
+-	select FB_CFB_FILLRECT
+-	select FB_CFB_COPYAREA
+-	select FB_CFB_IMAGEBLIT
+-
+ config FB_CLPS711X
+ 	tristate "CLPS711X LCD support"
+ 	depends on FB && (ARCH_CLPS711X || COMPILE_TEST)
+@@ -1456,7 +1456,6 @@ if FB_VIA
+ 
+ config FB_VIA_DIRECT_PROCFS
+ 	bool "direct hardware access via procfs (DEPRECATED)(DANGEROUS)"
+-	depends on FB_VIA
+ 	default n
+ 	help
+ 	  Allow direct hardware access to some output registers via procfs.
+@@ -1466,7 +1465,6 @@ config FB_VIA_DIRECT_PROCFS
+ 
+ config FB_VIA_X_COMPATIBILITY
+ 	bool "X server compatibility"
+-	depends on FB_VIA
+ 	default n
+ 	help
+ 	  This option reduces the functionality (power saving, ...) of the
+@@ -2308,10 +2306,6 @@ config FB_SIMPLE
+ 	  Configuration re: surface address, size, and format must be provided
+ 	  through device tree, or plain old platform data.
+ 
+-source "drivers/video/fbdev/omap/Kconfig"
+-source "drivers/video/fbdev/omap2/Kconfig"
+-source "drivers/video/fbdev/mmp/Kconfig"
+-
+ config FB_SSD1307
+ 	tristate "Solomon SSD1307 framebuffer support"
+ 	depends on FB && I2C
+@@ -2341,3 +2335,7 @@ config FB_SM712
+ 	  This driver is also available as a module. The module will be
+ 	  called sm712fb. If you want to compile it as a module, say M
+ 	  here and read <file:Documentation/kbuild/modules.txt>.
++
++source "drivers/video/fbdev/omap/Kconfig"
++source "drivers/video/fbdev/omap2/Kconfig"
++source "drivers/video/fbdev/mmp/Kconfig"
+diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
+index 076d24afbd72..4ed55e6bbb84 100644
+--- a/drivers/video/fbdev/atmel_lcdfb.c
++++ b/drivers/video/fbdev/atmel_lcdfb.c
+@@ -22,6 +22,7 @@
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/of_device.h>
++#include <video/of_videomode.h>
+ #include <video/of_display_timing.h>
+ #include <linux/regulator/consumer.h>
+ #include <video/videomode.h>
+@@ -1028,11 +1029,11 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
+ 	struct device *dev = &sinfo->pdev->dev;
+ 	struct device_node *np =dev->of_node;
+ 	struct device_node *display_np;
+-	struct device_node *timings_np;
+-	struct display_timings *timings;
+ 	struct atmel_lcdfb_power_ctrl_gpio *og;
+ 	bool is_gpio_power = false;
++	struct fb_videomode fb_vm;
+ 	struct gpio_desc *gpiod;
++	struct videomode vm;
+ 	int ret = -ENOENT;
+ 	int i;
+ 
+@@ -1105,44 +1106,18 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
+ 	pdata->lcdcon_is_backlight = of_property_read_bool(display_np, "atmel,lcdcon-backlight");
+ 	pdata->lcdcon_pol_negative = of_property_read_bool(display_np, "atmel,lcdcon-backlight-inverted");
+ 
+-	timings = of_get_display_timings(display_np);
+-	if (!timings) {
+-		dev_err(dev, "failed to get display timings\n");
+-		ret = -EINVAL;
++	ret = of_get_videomode(display_np, &vm, OF_USE_NATIVE_MODE);
++	if (ret) {
++		dev_err(dev, "failed to get videomode from DT\n");
+ 		goto put_display_node;
+ 	}
+ 
+-	timings_np = of_get_child_by_name(display_np, "display-timings");
+-	if (!timings_np) {
+-		dev_err(dev, "failed to find display-timings node\n");
+-		ret = -ENODEV;
++	ret = fb_videomode_from_videomode(&vm, &fb_vm);
++	if (ret < 0)
+ 		goto put_display_node;
+-	}
+ 
+-	for (i = 0; i < of_get_child_count(timings_np); i++) {
+-		struct videomode vm;
+-		struct fb_videomode fb_vm;
+-
+-		ret = videomode_from_timings(timings, &vm, i);
+-		if (ret < 0)
+-			goto put_timings_node;
+-		ret = fb_videomode_from_videomode(&vm, &fb_vm);
+-		if (ret < 0)
+-			goto put_timings_node;
+-
+-		fb_add_videomode(&fb_vm, &info->modelist);
+-	}
+-
+-	/*
+-	 * FIXME: Make sure we are not referencing any fields in display_np
+-	 * and timings_np and drop our references to them before returning to
+-	 * avoid leaking the nodes on probe deferral and driver unbind.
+-	 */
+-
+-	return 0;
++	fb_add_videomode(&fb_vm, &info->modelist);
+ 
+-put_timings_node:
+-	of_node_put(timings_np);
+ put_display_node:
+ 	of_node_put(display_np);
+ 	return ret;
+diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
+index 852d86c1c527..8607439d6932 100644
+--- a/drivers/video/fbdev/core/fbmon.c
++++ b/drivers/video/fbdev/core/fbmon.c
+@@ -997,98 +997,6 @@ void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs)
+ 	DPRINTK("========================================\n");
+ }
+ 
+-/**
+- * fb_edid_add_monspecs() - add monitor video modes from E-EDID data
+- * @edid:	128 byte array with an E-EDID block
+- * @spacs:	monitor specs to be extended
+- */
+-void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs)
+-{
+-	unsigned char *block;
+-	struct fb_videomode *m;
+-	int num = 0, i;
+-	u8 svd[64], edt[(128 - 4) / DETAILED_TIMING_DESCRIPTION_SIZE];
+-	u8 pos = 4, svd_n = 0;
+-
+-	if (!edid)
+-		return;
+-
+-	if (!edid_checksum(edid))
+-		return;
+-
+-	if (edid[0] != 0x2 ||
+-	    edid[2] < 4 || edid[2] > 128 - DETAILED_TIMING_DESCRIPTION_SIZE)
+-		return;
+-
+-	DPRINTK("  Short Video Descriptors\n");
+-
+-	while (pos < edid[2]) {
+-		u8 len = edid[pos] & 0x1f, type = (edid[pos] >> 5) & 7;
+-		pr_debug("Data block %u of %u bytes\n", type, len);
+-		if (type == 2) {
+-			for (i = pos; i < pos + len; i++) {
+-				u8 idx = edid[pos + i] & 0x7f;
+-				svd[svd_n++] = idx;
+-				pr_debug("N%sative mode #%d\n",
+-					 edid[pos + i] & 0x80 ? "" : "on-n", idx);
+-			}
+-		} else if (type == 3 && len >= 3) {
+-			/* Check Vendor Specific Data Block.  For HDMI,
+-			   it is always 00-0C-03 for HDMI Licensing, LLC. */
+-			if (edid[pos + 1] == 3 && edid[pos + 2] == 0xc &&
+-			    edid[pos + 3] == 0)
+-				specs->misc |= FB_MISC_HDMI;
+-		}
+-		pos += len + 1;
+-	}
+-
+-	block = edid + edid[2];
+-
+-	DPRINTK("  Extended Detailed Timings\n");
+-
+-	for (i = 0; i < (128 - edid[2]) / DETAILED_TIMING_DESCRIPTION_SIZE;
+-	     i++, block += DETAILED_TIMING_DESCRIPTION_SIZE)
+-		if (PIXEL_CLOCK != 0)
+-			edt[num++] = block - edid;
+-
+-	/* Yikes, EDID data is totally useless */
+-	if (!(num + svd_n))
+-		return;
+-
+-	m = kcalloc(specs->modedb_len + num + svd_n,
+-		    sizeof(struct fb_videomode),
+-		    GFP_KERNEL);
+-
+-	if (!m)
+-		return;
+-
+-	memcpy(m, specs->modedb, specs->modedb_len * sizeof(struct fb_videomode));
+-
+-	for (i = specs->modedb_len; i < specs->modedb_len + num; i++) {
+-		get_detailed_timing(edid + edt[i - specs->modedb_len], &m[i]);
+-		if (i == specs->modedb_len)
+-			m[i].flag |= FB_MODE_IS_FIRST;
+-		pr_debug("Adding %ux%u@%u\n", m[i].xres, m[i].yres, m[i].refresh);
+-	}
+-
+-	for (i = specs->modedb_len + num; i < specs->modedb_len + num + svd_n; i++) {
+-		int idx = svd[i - specs->modedb_len - num];
+-		if (!idx || idx >= ARRAY_SIZE(cea_modes)) {
+-			pr_warn("Reserved SVD code %d\n", idx);
+-		} else if (!cea_modes[idx].xres) {
+-			pr_warn("Unimplemented SVD code %d\n", idx);
+-		} else {
+-			memcpy(&m[i], cea_modes + idx, sizeof(m[i]));
+-			pr_debug("Adding SVD #%d: %ux%u@%u\n", idx,
+-				 m[i].xres, m[i].yres, m[i].refresh);
+-		}
+-	}
+-
+-	kfree(specs->modedb);
+-	specs->modedb = m;
+-	specs->modedb_len = specs->modedb_len + num + svd_n;
+-}
+-
+ /*
+  * VESA Generalized Timing Formula (GTF)
+  */
+@@ -1498,9 +1406,6 @@ int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var)
+ void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs)
+ {
+ }
+-void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs)
+-{
+-}
+ void fb_destroy_modedb(struct fb_videomode *modedb)
+ {
+ }
+@@ -1608,7 +1513,6 @@ EXPORT_SYMBOL(fb_firmware_edid);
+ 
+ EXPORT_SYMBOL(fb_parse_edid);
+ EXPORT_SYMBOL(fb_edid_to_monspecs);
+-EXPORT_SYMBOL(fb_edid_add_monspecs);
+ EXPORT_SYMBOL(fb_get_mode);
+ EXPORT_SYMBOL(fb_validate_mode);
+ EXPORT_SYMBOL(fb_destroy_modedb);
+diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c
+index ac049871704d..6473e0dfe146 100644
+--- a/drivers/video/fbdev/core/modedb.c
++++ b/drivers/video/fbdev/core/modedb.c
+@@ -289,63 +289,6 @@ static const struct fb_videomode modedb[] = {
+ };
+ 
+ #ifdef CONFIG_FB_MODE_HELPERS
+-const struct fb_videomode cea_modes[65] = {
+-	/* #1: 640x480p@59.94/60Hz */
+-	[1] = {
+-		NULL, 60, 640, 480, 39722, 48, 16, 33, 10, 96, 2, 0,
+-		FB_VMODE_NONINTERLACED, 0,
+-	},
+-	/* #3: 720x480p@59.94/60Hz */
+-	[3] = {
+-		NULL, 60, 720, 480, 37037, 60, 16, 30, 9, 62, 6, 0,
+-		FB_VMODE_NONINTERLACED, 0,
+-	},
+-	/* #5: 1920x1080i@59.94/60Hz */
+-	[5] = {
+-		NULL, 60, 1920, 1080, 13763, 148, 88, 15, 2, 44, 5,
+-		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+-		FB_VMODE_INTERLACED, 0,
+-	},
+-	/* #7: 720(1440)x480iH@59.94/60Hz */
+-	[7] = {
+-		NULL, 60, 1440, 480, 18554/*37108*/, 114, 38, 15, 4, 124, 3, 0,
+-		FB_VMODE_INTERLACED, 0,
+-	},
+-	/* #9: 720(1440)x240pH@59.94/60Hz */
+-	[9] = {
+-		NULL, 60, 1440, 240, 18554, 114, 38, 16, 4, 124, 3, 0,
+-		FB_VMODE_NONINTERLACED, 0,
+-	},
+-	/* #18: 720x576pH@50Hz */
+-	[18] = {
+-		NULL, 50, 720, 576, 37037, 68, 12, 39, 5, 64, 5, 0,
+-		FB_VMODE_NONINTERLACED, 0,
+-	},
+-	/* #19: 1280x720p@50Hz */
+-	[19] = {
+-		NULL, 50, 1280, 720, 13468, 220, 440, 20, 5, 40, 5,
+-		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+-		FB_VMODE_NONINTERLACED, 0,
+-	},
+-	/* #20: 1920x1080i@50Hz */
+-	[20] = {
+-		NULL, 50, 1920, 1080, 13480, 148, 528, 15, 5, 528, 5,
+-		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+-		FB_VMODE_INTERLACED, 0,
+-	},
+-	/* #32: 1920x1080p@23.98/24Hz */
+-	[32] = {
+-		NULL, 24, 1920, 1080, 13468, 148, 638, 36, 4, 44, 5,
+-		FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+-		FB_VMODE_NONINTERLACED, 0,
+-	},
+-	/* #35: (2880)x480p4x@59.94/60Hz */
+-	[35] = {
+-		NULL, 60, 2880, 480, 9250, 240, 64, 30, 9, 248, 6, 0,
+-		FB_VMODE_NONINTERLACED, 0,
+-	},
+-};
+-
+ const struct fb_videomode vesa_modes[] = {
+ 	/* 0 640x350-85 VESA */
+ 	{ NULL, 85, 640, 350, 31746,  96, 32, 60, 32, 64, 3,
+diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c
+index a436d44f1b7f..01a7110e61a7 100644
+--- a/drivers/video/fbdev/sbuslib.c
++++ b/drivers/video/fbdev/sbuslib.c
+@@ -106,11 +106,11 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
+ 		struct fbtype __user *f = (struct fbtype __user *) arg;
+ 
+ 		if (put_user(type, &f->fb_type) ||
+-		    __put_user(info->var.yres, &f->fb_height) ||
+-		    __put_user(info->var.xres, &f->fb_width) ||
+-		    __put_user(fb_depth, &f->fb_depth) ||
+-		    __put_user(0, &f->fb_cmsize) ||
+-		    __put_user(fb_size, &f->fb_cmsize))
++		    put_user(info->var.yres, &f->fb_height) ||
++		    put_user(info->var.xres, &f->fb_width) ||
++		    put_user(fb_depth, &f->fb_depth) ||
++		    put_user(0, &f->fb_cmsize) ||
++		    put_user(fb_size, &f->fb_cmsize))
+ 			return -EFAULT;
+ 		return 0;
+ 	}
+@@ -125,10 +125,10 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
+ 		unsigned int index, count, i;
+ 
+ 		if (get_user(index, &c->index) ||
+-		    __get_user(count, &c->count) ||
+-		    __get_user(ured, &c->red) ||
+-		    __get_user(ugreen, &c->green) ||
+-		    __get_user(ublue, &c->blue))
++		    get_user(count, &c->count) ||
++		    get_user(ured, &c->red) ||
++		    get_user(ugreen, &c->green) ||
++		    get_user(ublue, &c->blue))
+ 			return -EFAULT;
+ 
+ 		cmap.len = 1;
+@@ -165,13 +165,13 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
+ 		u8 red, green, blue;
+ 
+ 		if (get_user(index, &c->index) ||
+-		    __get_user(count, &c->count) ||
+-		    __get_user(ured, &c->red) ||
+-		    __get_user(ugreen, &c->green) ||
+-		    __get_user(ublue, &c->blue))
++		    get_user(count, &c->count) ||
++		    get_user(ured, &c->red) ||
++		    get_user(ugreen, &c->green) ||
++		    get_user(ublue, &c->blue))
+ 			return -EFAULT;
+ 
+-		if (index + count > cmap->len)
++		if (index > cmap->len || count > cmap->len - index)
+ 			return -EINVAL;
+ 
+ 		for (i = 0; i < count; i++) {
+diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c
+index d01efd342dc0..62d9d3edcdf2 100644
+--- a/drivers/watchdog/renesas_wdt.c
++++ b/drivers/watchdog/renesas_wdt.c
+@@ -239,6 +239,7 @@ static int rwdt_probe(struct platform_device *pdev)
+ 	watchdog_set_drvdata(&priv->wdev, priv);
+ 	watchdog_set_nowayout(&priv->wdev, nowayout);
+ 	watchdog_set_restart_priority(&priv->wdev, 0);
++	watchdog_stop_on_unregister(&priv->wdev);
+ 
+ 	/* This overrides the default timeout only if DT configuration was found */
+ 	ret = watchdog_init_timeout(&priv->wdev, 0, &pdev->dev);
+diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c
+index 255169916dbb..1e93c1b0e3cf 100644
+--- a/drivers/watchdog/sama5d4_wdt.c
++++ b/drivers/watchdog/sama5d4_wdt.c
+@@ -247,11 +247,7 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
+ 		}
+ 	}
+ 
+-	ret = watchdog_init_timeout(wdd, wdt_timeout, &pdev->dev);
+-	if (ret) {
+-		dev_err(&pdev->dev, "unable to set timeout value\n");
+-		return ret;
+-	}
++	watchdog_init_timeout(wdd, wdt_timeout, &pdev->dev);
+ 
+ 	timeout = WDT_SEC2TICKS(wdd->timeout);
+ 
+diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
+index 7817836bff55..4b9365d4de7a 100644
+--- a/drivers/watchdog/w83627hf_wdt.c
++++ b/drivers/watchdog/w83627hf_wdt.c
+@@ -50,7 +50,7 @@ static int cr_wdt_csr;		/* WDT control & status register */
+ enum chips { w83627hf, w83627s, w83697hf, w83697ug, w83637hf, w83627thf,
+ 	     w83687thf, w83627ehf, w83627dhg, w83627uhg, w83667hg, w83627dhg_p,
+ 	     w83667hg_b, nct6775, nct6776, nct6779, nct6791, nct6792, nct6793,
+-	     nct6795, nct6102 };
++	     nct6795, nct6796, nct6102 };
+ 
+ static int timeout;			/* in seconds */
+ module_param(timeout, int, 0);
+@@ -100,6 +100,7 @@ MODULE_PARM_DESC(early_disable, "Disable watchdog at boot time (default=0)");
+ #define NCT6792_ID		0xc9
+ #define NCT6793_ID		0xd1
+ #define NCT6795_ID		0xd3
++#define NCT6796_ID		0xd4	/* also NCT9697D, NCT9698D */
+ 
+ #define W83627HF_WDT_TIMEOUT	0xf6
+ #define W83697HF_WDT_TIMEOUT	0xf4
+@@ -209,6 +210,7 @@ static int w83627hf_init(struct watchdog_device *wdog, enum chips chip)
+ 	case nct6792:
+ 	case nct6793:
+ 	case nct6795:
++	case nct6796:
+ 	case nct6102:
+ 		/*
+ 		 * These chips have a fixed WDTO# output pin (W83627UHG),
+@@ -407,6 +409,9 @@ static int wdt_find(int addr)
+ 	case NCT6795_ID:
+ 		ret = nct6795;
+ 		break;
++	case NCT6796_ID:
++		ret = nct6796;
++		break;
+ 	case NCT6102_ID:
+ 		ret = nct6102;
+ 		cr_wdt_timeout = NCT6102D_WDT_TIMEOUT;
+@@ -450,6 +455,7 @@ static int __init wdt_init(void)
+ 		"NCT6792",
+ 		"NCT6793",
+ 		"NCT6795",
++		"NCT6796",
+ 		"NCT6102",
+ 	};
+ 
+diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
+index ffbdc4642ea5..f6c24b22b37c 100644
+--- a/drivers/watchdog/watchdog_dev.c
++++ b/drivers/watchdog/watchdog_dev.c
+@@ -1019,16 +1019,16 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
+ 		old_wd_data = NULL;
+ 	}
+ 
+-	mutex_lock(&wd_data->lock);
+-	wd_data->wdd = NULL;
+-	wdd->wd_data = NULL;
+-	mutex_unlock(&wd_data->lock);
+-
+ 	if (watchdog_active(wdd) &&
+ 	    test_bit(WDOG_STOP_ON_UNREGISTER, &wdd->status)) {
+ 		watchdog_stop(wdd);
+ 	}
+ 
++	mutex_lock(&wd_data->lock);
++	wd_data->wdd = NULL;
++	wdd->wd_data = NULL;
++	mutex_unlock(&wd_data->lock);
++
+ 	hrtimer_cancel(&wd_data->timer);
+ 	kthread_cancel_work_sync(&wd_data->work);
+ 
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 61dc1b0e4465..badbb8b4f0f1 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2284,7 +2284,7 @@ again:
+ 			dxroot->info.indirect_levels += 1;
+ 			dxtrace(printk(KERN_DEBUG
+ 				       "Creating %d level index...\n",
+-				       info->indirect_levels));
++				       dxroot->info.indirect_levels));
+ 			err = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
+ 			if (err)
+ 				goto journal_error;
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index dd29a49143f5..8c4cb1eee10a 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -1244,7 +1244,7 @@ stop:
+ 
+ 	put_gc_inode(&gc_list);
+ 
+-	if (sync)
++	if (sync && !ret)
+ 		ret = sec_freed ? 0 : -EAGAIN;
+ 	return ret;
+ }
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index b05e10c332b7..15779123d089 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1556,6 +1556,7 @@ skip:
+ 		(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
+ 
+ 	limit_reserve_root(sbi);
++	*flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
+ 	return 0;
+ restore_gc:
+ 	if (need_restart_gc) {
+diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
+index b96d39c28e17..5d72e8b66a26 100644
+--- a/fs/gfs2/incore.h
++++ b/fs/gfs2/incore.h
+@@ -623,6 +623,7 @@ enum {
+ 	SDF_RORECOVERY		= 7, /* read only recovery */
+ 	SDF_SKIP_DLM_UNLOCK	= 8,
+ 	SDF_FORCE_AIL_FLUSH     = 9,
++	SDF_AIL1_IO_ERROR	= 10,
+ };
+ 
+ enum gfs2_freeze_state {
+diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
+index cd85092723de..90b5c8d0c56a 100644
+--- a/fs/gfs2/log.c
++++ b/fs/gfs2/log.c
+@@ -108,7 +108,9 @@ __acquires(&sdp->sd_ail_lock)
+ 		gfs2_assert(sdp, bd->bd_tr == tr);
+ 
+ 		if (!buffer_busy(bh)) {
+-			if (!buffer_uptodate(bh)) {
++			if (!buffer_uptodate(bh) &&
++			    !test_and_set_bit(SDF_AIL1_IO_ERROR,
++					      &sdp->sd_flags)) {
+ 				gfs2_io_error_bh(sdp, bh);
+ 				*withdraw = true;
+ 			}
+@@ -206,7 +208,8 @@ static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
+ 		gfs2_assert(sdp, bd->bd_tr == tr);
+ 		if (buffer_busy(bh))
+ 			continue;
+-		if (!buffer_uptodate(bh)) {
++		if (!buffer_uptodate(bh) &&
++		    !test_and_set_bit(SDF_AIL1_IO_ERROR, &sdp->sd_flags)) {
+ 			gfs2_io_error_bh(sdp, bh);
+ 			*withdraw = true;
+ 		}
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index c212893534ed..a971862b186e 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -854,10 +854,10 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
+ 	if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
+ 		return error;
+ 
++	flush_workqueue(gfs2_delete_workqueue);
+ 	kthread_stop(sdp->sd_quotad_process);
+ 	kthread_stop(sdp->sd_logd_process);
+ 
+-	flush_workqueue(gfs2_delete_workqueue);
+ 	gfs2_quota_sync(sdp->sd_vfs, 0);
+ 	gfs2_statfs_sync(sdp->sd_vfs, 0);
+ 
+diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
+index 59c811de0dc7..6a02cc890daf 100644
+--- a/fs/gfs2/util.c
++++ b/fs/gfs2/util.c
+@@ -256,12 +256,13 @@ void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
+ 			const char *function, char *file, unsigned int line,
+ 			bool withdraw)
+ {
+-	fs_err(sdp,
+-	       "fatal: I/O error\n"
+-	       "  block = %llu\n"
+-	       "  function = %s, file = %s, line = %u\n",
+-	       (unsigned long long)bh->b_blocknr,
+-	       function, file, line);
++	if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
++		fs_err(sdp,
++		       "fatal: I/O error\n"
++		       "  block = %llu\n"
++		       "  function = %s, file = %s, line = %u\n",
++		       (unsigned long long)bh->b_blocknr,
++		       function, file, line);
+ 	if (withdraw)
+ 		gfs2_lm_withdraw(sdp, NULL);
+ }
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index c5c3394148f7..74ff459b75ef 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -103,7 +103,7 @@ int nfs4_check_delegation(struct inode *inode, fmode_t flags)
+ 	return nfs4_do_check_delegation(inode, flags, false);
+ }
+ 
+-static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
++static int nfs_delegation_claim_locks(struct nfs4_state *state, const nfs4_stateid *stateid)
+ {
+ 	struct inode *inode = state->inode;
+ 	struct file_lock *fl;
+@@ -118,7 +118,7 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_
+ 	spin_lock(&flctx->flc_lock);
+ restart:
+ 	list_for_each_entry(fl, list, fl_list) {
+-		if (nfs_file_open_context(fl->fl_file) != ctx)
++		if (nfs_file_open_context(fl->fl_file)->state != state)
+ 			continue;
+ 		spin_unlock(&flctx->flc_lock);
+ 		status = nfs4_lock_delegation_recall(fl, state, stateid);
+@@ -165,7 +165,7 @@ again:
+ 		seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
+ 		err = nfs4_open_delegation_recall(ctx, state, stateid);
+ 		if (!err)
+-			err = nfs_delegation_claim_locks(ctx, state, stateid);
++			err = nfs_delegation_claim_locks(state, stateid);
+ 		if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
+ 			err = -EAGAIN;
+ 		mutex_unlock(&sp->so_delegreturn_mutex);
+diff --git a/fs/orangefs/orangefs-sysfs.c b/fs/orangefs/orangefs-sysfs.c
+index dd28079f518c..19739aaee675 100644
+--- a/fs/orangefs/orangefs-sysfs.c
++++ b/fs/orangefs/orangefs-sysfs.c
+@@ -323,7 +323,7 @@ static ssize_t sysfs_service_op_show(struct kobject *kobj,
+ 	/* Can't do a service_operation if the client is not running... */
+ 	rc = is_daemon_in_service();
+ 	if (rc) {
+-		pr_info("%s: Client not running :%d:\n",
++		pr_info_ratelimited("%s: Client not running :%d:\n",
+ 			__func__,
+ 			is_daemon_in_service());
+ 		goto out;
+diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
+index cbde728f8ac6..5c5f161763c8 100644
+--- a/fs/proc/vmcore.c
++++ b/fs/proc/vmcore.c
+@@ -176,6 +176,16 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
+ 	return remap_pfn_range(vma, from, pfn, size, prot);
+ }
+ 
++/*
++ * Architectures which support memory encryption override this.
++ */
++ssize_t __weak
++copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
++			   unsigned long offset, int userbuf)
++{
++	return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
++}
++
+ /*
+  * Copy to either kernel or user space
+  */
+diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
+index 4325d6fdde9b..317aecaed897 100644
+--- a/include/linux/cpuidle.h
++++ b/include/linux/cpuidle.h
+@@ -81,6 +81,7 @@ struct cpuidle_device {
+ 	unsigned int		registered:1;
+ 	unsigned int		enabled:1;
+ 	unsigned int		use_deepest_state:1;
++	unsigned int		poll_time_limit:1;
+ 	unsigned int		cpu;
+ 
+ 	int			last_residency;
+diff --git a/include/linux/fb.h b/include/linux/fb.h
+index 3e7e75383d32..7bfed8460c78 100644
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -736,8 +736,6 @@ extern int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var);
+ extern const unsigned char *fb_firmware_edid(struct device *device);
+ extern void fb_edid_to_monspecs(unsigned char *edid,
+ 				struct fb_monspecs *specs);
+-extern void fb_edid_add_monspecs(unsigned char *edid,
+-				 struct fb_monspecs *specs);
+ extern void fb_destroy_modedb(struct fb_videomode *modedb);
+ extern int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb);
+ extern unsigned char *fb_ddc_read(struct i2c_adapter *adapter);
+@@ -811,7 +809,6 @@ struct dmt_videomode {
+ 
+ extern const char *fb_mode_option;
+ extern const struct fb_videomode vesa_modes[];
+-extern const struct fb_videomode cea_modes[65];
+ extern const struct dmt_videomode dmt_modes[];
+ 
+ struct fb_modelist {
+diff --git a/include/linux/platform_data/dma-ep93xx.h b/include/linux/platform_data/dma-ep93xx.h
+index f8f1f6b952a6..eb9805bb3fe8 100644
+--- a/include/linux/platform_data/dma-ep93xx.h
++++ b/include/linux/platform_data/dma-ep93xx.h
+@@ -85,7 +85,7 @@ static inline enum dma_transfer_direction
+ ep93xx_dma_chan_direction(struct dma_chan *chan)
+ {
+ 	if (!ep93xx_dma_chan_is_m2p(chan))
+-		return DMA_NONE;
++		return DMA_TRANS_NONE;
+ 
+ 	/* even channels are for TX, odd for RX */
+ 	return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
+index 592653becd91..ad2e243f3f03 100644
+--- a/include/linux/sunrpc/sched.h
++++ b/include/linux/sunrpc/sched.h
+@@ -188,7 +188,6 @@ struct rpc_timer {
+ struct rpc_wait_queue {
+ 	spinlock_t		lock;
+ 	struct list_head	tasks[RPC_NR_PRIORITY];	/* task queue for each priority level */
+-	pid_t			owner;			/* process id of last task serviced */
+ 	unsigned char		maxpriority;		/* maximum priority (0 if queue is not a priority queue) */
+ 	unsigned char		priority;		/* current priority */
+ 	unsigned char		nr;			/* # tasks remaining for cookie */
+@@ -204,7 +203,6 @@ struct rpc_wait_queue {
+  * from a single cookie.  The aim is to improve
+  * performance of NFS operations such as read/write.
+  */
+-#define RPC_BATCH_COUNT			16
+ #define RPC_IS_PRIORITY(q)		((q)->maxpriority > 0)
+ 
+ /*
+diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
+index f3d475024d37..54e4d1fd21f8 100644
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -1147,7 +1147,7 @@ struct ib_qp_init_attr {
+ 	struct ib_qp_cap	cap;
+ 	enum ib_sig_type	sq_sig_type;
+ 	enum ib_qp_type		qp_type;
+-	enum ib_qp_create_flags	create_flags;
++	u32			create_flags;
+ 
+ 	/*
+ 	 * Only needed for special QP types, or when using the RW API.
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index 138f0302692e..378cef70341c 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -2114,6 +2114,9 @@ static int btf_parse_hdr(struct btf_verifier_env *env, void __user *btf_data,
+ 
+ 	hdr = &btf->hdr;
+ 
++	if (hdr->hdr_len != hdr_len)
++		return -EINVAL;
++
+ 	btf_verifier_log_hdr(env, btf_data_size);
+ 
+ 	if (hdr->magic != BTF_MAGIC) {
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 9bb57ce57d98..8d6b8b5493f9 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -375,6 +375,7 @@ void __init cpu_smt_disable(bool force)
+ 		pr_info("SMT: Force disabled\n");
+ 		cpu_smt_control = CPU_SMT_FORCE_DISABLED;
+ 	} else {
++		pr_info("SMT: disabled\n");
+ 		cpu_smt_control = CPU_SMT_DISABLED;
+ 	}
+ }
+diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
+index f50b90d0d1c2..faeec8255e7e 100644
+--- a/kernel/kexec_core.c
++++ b/kernel/kexec_core.c
+@@ -473,6 +473,10 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
+ 		}
+ 	}
+ 
++	/* Ensure that these pages are decrypted if SME is enabled. */
++	if (pages)
++		arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0);
++
+ 	return pages;
+ }
+ 
+@@ -869,6 +873,7 @@ static int kimage_load_crash_segment(struct kimage *image,
+ 			result  = -ENOMEM;
+ 			goto out;
+ 		}
++		arch_kexec_post_alloc_pages(page_address(page), 1, 0);
+ 		ptr = kmap(page);
+ 		ptr += maddr & ~PAGE_MASK;
+ 		mchunk = min_t(size_t, mbytes,
+@@ -886,6 +891,7 @@ static int kimage_load_crash_segment(struct kimage *image,
+ 			result = copy_from_user(ptr, buf, uchunk);
+ 		kexec_flush_icache_page(page);
+ 		kunmap(page);
++		arch_kexec_pre_free_pages(page_address(page), 1);
+ 		if (result) {
+ 			result = -EFAULT;
+ 			goto out;
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index d0d03223b45b..c7b3d5489937 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -423,6 +423,7 @@ static u32 log_next_idx;
+ /* the next printk record to write to the console */
+ static u64 console_seq;
+ static u32 console_idx;
++static u64 exclusive_console_stop_seq;
+ 
+ /* the next printk record to read after the last 'clear' command */
+ static u64 clear_seq;
+@@ -437,6 +438,7 @@ static u32 clear_idx;
+ /* record buffer */
+ #define LOG_ALIGN __alignof__(struct printk_log)
+ #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
++#define LOG_BUF_LEN_MAX (u32)(1 << 31)
+ static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
+ static char *log_buf = __log_buf;
+ static u32 log_buf_len = __LOG_BUF_LEN;
+@@ -1037,18 +1039,23 @@ void log_buf_vmcoreinfo_setup(void)
+ static unsigned long __initdata new_log_buf_len;
+ 
+ /* we practice scaling the ring buffer by powers of 2 */
+-static void __init log_buf_len_update(unsigned size)
++static void __init log_buf_len_update(u64 size)
+ {
++	if (size > (u64)LOG_BUF_LEN_MAX) {
++		size = (u64)LOG_BUF_LEN_MAX;
++		pr_err("log_buf over 2G is not supported.\n");
++	}
++
+ 	if (size)
+ 		size = roundup_pow_of_two(size);
+ 	if (size > log_buf_len)
+-		new_log_buf_len = size;
++		new_log_buf_len = (unsigned long)size;
+ }
+ 
+ /* save requested log_buf_len since it's too early to process it */
+ static int __init log_buf_len_setup(char *str)
+ {
+-	unsigned int size;
++	u64 size;
+ 
+ 	if (!str)
+ 		return -EINVAL;
+@@ -1118,7 +1125,7 @@ void __init setup_log_buf(int early)
+ 	}
+ 
+ 	if (unlikely(!new_log_buf)) {
+-		pr_err("log_buf_len: %ld bytes not available\n",
++		pr_err("log_buf_len: %lu bytes not available\n",
+ 			new_log_buf_len);
+ 		return;
+ 	}
+@@ -1131,8 +1138,8 @@ void __init setup_log_buf(int early)
+ 	memcpy(log_buf, __log_buf, __LOG_BUF_LEN);
+ 	logbuf_unlock_irqrestore(flags);
+ 
+-	pr_info("log_buf_len: %d bytes\n", log_buf_len);
+-	pr_info("early log buf free: %d(%d%%)\n",
++	pr_info("log_buf_len: %u bytes\n", log_buf_len);
++	pr_info("early log buf free: %u(%u%%)\n",
+ 		free, (free * 100) / __LOG_BUF_LEN);
+ }
+ 
+@@ -2014,6 +2021,7 @@ static u64 syslog_seq;
+ static u32 syslog_idx;
+ static u64 console_seq;
+ static u32 console_idx;
++static u64 exclusive_console_stop_seq;
+ static u64 log_first_seq;
+ static u32 log_first_idx;
+ static u64 log_next_seq;
+@@ -2356,8 +2364,9 @@ again:
+ 		printk_safe_enter_irqsave(flags);
+ 		raw_spin_lock(&logbuf_lock);
+ 		if (console_seq < log_first_seq) {
+-			len = sprintf(text, "** %u printk messages dropped **\n",
+-				      (unsigned)(log_first_seq - console_seq));
++			len = sprintf(text,
++				      "** %llu printk messages dropped **\n",
++				      log_first_seq - console_seq);
+ 
+ 			/* messages are gone, move to first one */
+ 			console_seq = log_first_seq;
+@@ -2381,6 +2390,12 @@ skip:
+ 			goto skip;
+ 		}
+ 
++		/* Output to all consoles once old messages replayed. */
++		if (unlikely(exclusive_console &&
++			     console_seq >= exclusive_console_stop_seq)) {
++			exclusive_console = NULL;
++		}
++
+ 		len += msg_print_text(msg,
+ 				console_msg_format & MSG_FORMAT_SYSLOG,
+ 				text + len,
+@@ -2423,10 +2438,6 @@ skip:
+ 
+ 	console_locked = 0;
+ 
+-	/* Release the exclusive_console once it is used */
+-	if (unlikely(exclusive_console))
+-		exclusive_console = NULL;
+-
+ 	raw_spin_unlock(&logbuf_lock);
+ 
+ 	up_console_sem();
+@@ -2704,13 +2715,18 @@ void register_console(struct console *newcon)
+ 		logbuf_lock_irqsave(flags);
+ 		console_seq = syslog_seq;
+ 		console_idx = syslog_idx;
+-		logbuf_unlock_irqrestore(flags);
+ 		/*
+ 		 * We're about to replay the log buffer.  Only do this to the
+ 		 * just-registered console to avoid excessive message spam to
+ 		 * the already-registered consoles.
++		 *
++		 * Set exclusive_console with disabled interrupts to reduce
++		 * race window with eventual console_flush_on_panic() that
++		 * ignores console_lock.
+ 		 */
+ 		exclusive_console = newcon;
++		exclusive_console_stop_seq = console_seq;
++		logbuf_unlock_irqrestore(flags);
+ 	}
+ 	console_unlock();
+ 	console_sysfs_notify();
+diff --git a/lib/idr.c b/lib/idr.c
+index fab2fd5bc326..61383564a6c5 100644
+--- a/lib/idr.c
++++ b/lib/idr.c
+@@ -231,11 +231,22 @@ void *idr_get_next(struct idr *idr, int *nextid)
+ {
+ 	struct radix_tree_iter iter;
+ 	void __rcu **slot;
++	void *entry = NULL;
+ 	unsigned long base = idr->idr_base;
+ 	unsigned long id = *nextid;
+ 
+ 	id = (id < base) ? 0 : id - base;
+-	slot = radix_tree_iter_find(&idr->idr_rt, &iter, id);
++	radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, id) {
++		entry = rcu_dereference_raw(*slot);
++		if (!entry)
++			continue;
++		if (!radix_tree_deref_retry(entry))
++			break;
++		if (slot != (void *)&idr->idr_rt.rnode &&
++				entry != (void *)RADIX_TREE_INTERNAL_NODE)
++			break;
++		slot = radix_tree_iter_retry(&iter);
++	}
+ 	if (!slot)
+ 		return NULL;
+ 	id = iter.index + base;
+@@ -244,7 +255,7 @@ void *idr_get_next(struct idr *idr, int *nextid)
+ 		return NULL;
+ 
+ 	*nextid = id;
+-	return rcu_dereference_raw(*slot);
++	return entry;
+ }
+ EXPORT_SYMBOL(idr_get_next);
+ 
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index af6735562215..7965112eb063 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -437,70 +437,33 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
+ 	zone_span_writeunlock(zone);
+ }
+ 
+-static void shrink_pgdat_span(struct pglist_data *pgdat,
+-			      unsigned long start_pfn, unsigned long end_pfn)
++static void update_pgdat_span(struct pglist_data *pgdat)
+ {
+-	unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
+-	unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */
+-	unsigned long pgdat_end_pfn = p;
+-	unsigned long pfn;
+-	struct mem_section *ms;
+-	int nid = pgdat->node_id;
+-
+-	if (pgdat_start_pfn == start_pfn) {
+-		/*
+-		 * If the section is smallest section in the pgdat, it need
+-		 * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages.
+-		 * In this case, we find second smallest valid mem_section
+-		 * for shrinking zone.
+-		 */
+-		pfn = find_smallest_section_pfn(nid, NULL, end_pfn,
+-						pgdat_end_pfn);
+-		if (pfn) {
+-			pgdat->node_start_pfn = pfn;
+-			pgdat->node_spanned_pages = pgdat_end_pfn - pfn;
+-		}
+-	} else if (pgdat_end_pfn == end_pfn) {
+-		/*
+-		 * If the section is biggest section in the pgdat, it need
+-		 * shrink pgdat->node_spanned_pages.
+-		 * In this case, we find second biggest valid mem_section for
+-		 * shrinking zone.
+-		 */
+-		pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn,
+-					       start_pfn);
+-		if (pfn)
+-			pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1;
+-	}
++	unsigned long node_start_pfn = 0, node_end_pfn = 0;
++	struct zone *zone;
+ 
+-	/*
+-	 * If the section is not biggest or smallest mem_section in the pgdat,
+-	 * it only creates a hole in the pgdat. So in this case, we need not
+-	 * change the pgdat.
+-	 * But perhaps, the pgdat has only hole data. Thus it check the pgdat
+-	 * has only hole or not.
+-	 */
+-	pfn = pgdat_start_pfn;
+-	for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
+-		ms = __pfn_to_section(pfn);
++	for (zone = pgdat->node_zones;
++	     zone < pgdat->node_zones + MAX_NR_ZONES; zone++) {
++		unsigned long zone_end_pfn = zone->zone_start_pfn +
++					     zone->spanned_pages;
+ 
+-		if (unlikely(!valid_section(ms)))
++		/* No need to lock the zones, they can't change. */
++		if (!zone->spanned_pages)
+ 			continue;
+-
+-		if (pfn_to_nid(pfn) != nid)
+-			continue;
+-
+-		 /* If the section is current section, it continues the loop */
+-		if (start_pfn == pfn)
++		if (!node_end_pfn) {
++			node_start_pfn = zone->zone_start_pfn;
++			node_end_pfn = zone_end_pfn;
+ 			continue;
++		}
+ 
+-		/* If we find valid section, we have nothing to do */
+-		return;
++		if (zone_end_pfn > node_end_pfn)
++			node_end_pfn = zone_end_pfn;
++		if (zone->zone_start_pfn < node_start_pfn)
++			node_start_pfn = zone->zone_start_pfn;
+ 	}
+ 
+-	/* The pgdat has no valid section */
+-	pgdat->node_start_pfn = 0;
+-	pgdat->node_spanned_pages = 0;
++	pgdat->node_start_pfn = node_start_pfn;
++	pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
+ }
+ 
+ static void __remove_zone(struct zone *zone, unsigned long start_pfn)
+@@ -511,7 +474,7 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn)
+ 
+ 	pgdat_resize_lock(zone->zone_pgdat, &flags);
+ 	shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
+-	shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages);
++	update_pgdat_span(pgdat);
+ 	pgdat_resize_unlock(zone->zone_pgdat, &flags);
+ }
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 4a2ee1ce6c02..e96c88b1465d 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4296,6 +4296,9 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
+ 	struct netdev_rx_queue *rxqueue;
+ 	void *orig_data, *orig_data_end;
+ 	u32 metalen, act = XDP_DROP;
++	__be16 orig_eth_type;
++	struct ethhdr *eth;
++	bool orig_bcast;
+ 	int hlen, off;
+ 	u32 mac_len;
+ 
+@@ -4336,6 +4339,9 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
+ 	xdp->data_hard_start = skb->data - skb_headroom(skb);
+ 	orig_data_end = xdp->data_end;
+ 	orig_data = xdp->data;
++	eth = (struct ethhdr *)xdp->data;
++	orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
++	orig_eth_type = eth->h_proto;
+ 
+ 	rxqueue = netif_get_rxqueue(skb);
+ 	xdp->rxq = &rxqueue->xdp_rxq;
+@@ -4359,6 +4365,14 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
+ 
+ 	}
+ 
++	/* check if XDP changed eth hdr such SKB needs update */
++	eth = (struct ethhdr *)xdp->data;
++	if ((orig_eth_type != eth->h_proto) ||
++	    (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
++		__skb_push(skb, ETH_HLEN);
++		skb->protocol = eth_type_trans(skb, skb->dev);
++	}
++
+ 	switch (act) {
+ 	case XDP_REDIRECT:
+ 	case XDP_TX:
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 647ba447bf1a..a7a804bece7a 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3910,8 +3910,8 @@ void __init tcp_init(void)
+ 	init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
+ 
+ 	init_net.ipv4.sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
+-	init_net.ipv4.sysctl_tcp_rmem[1] = 87380;
+-	init_net.ipv4.sysctl_tcp_rmem[2] = max(87380, max_rshare);
++	init_net.ipv4.sysctl_tcp_rmem[1] = 131072;
++	init_net.ipv4.sysctl_tcp_rmem[2] = max(131072, max_rshare);
+ 
+ 	pr_info("Hash tables configured (established %u bind %u)\n",
+ 		tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 14a6a489937c..57e8dad956ec 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -426,26 +426,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
+ 	}
+ }
+ 
+-/* 3. Tuning rcvbuf, when connection enters established state. */
+-static void tcp_fixup_rcvbuf(struct sock *sk)
+-{
+-	u32 mss = tcp_sk(sk)->advmss;
+-	int rcvmem;
+-
+-	rcvmem = 2 * SKB_TRUESIZE(mss + MAX_TCP_HEADER) *
+-		 tcp_default_init_rwnd(mss);
+-
+-	/* Dynamic Right Sizing (DRS) has 2 to 3 RTT latency
+-	 * Allow enough cushion so that sender is not limited by our window
+-	 */
+-	if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf)
+-		rcvmem <<= 2;
+-
+-	if (sk->sk_rcvbuf < rcvmem)
+-		sk->sk_rcvbuf = min(rcvmem, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
+-}
+-
+-/* 4. Try to fixup all. It is made immediately after connection enters
++/* 3. Try to fixup all. It is made immediately after connection enters
+  *    established state.
+  */
+ void tcp_init_buffer_space(struct sock *sk)
+@@ -454,12 +435,10 @@ void tcp_init_buffer_space(struct sock *sk)
+ 	struct tcp_sock *tp = tcp_sk(sk);
+ 	int maxwin;
+ 
+-	if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
+-		tcp_fixup_rcvbuf(sk);
+ 	if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
+ 		tcp_sndbuf_expand(sk);
+ 
+-	tp->rcvq_space.space = tp->rcv_wnd;
++	tp->rcvq_space.space = min_t(u32, tp->rcv_wnd, TCP_INIT_CWND * tp->advmss);
+ 	tcp_mstamp_refresh(tp);
+ 	tp->rcvq_space.time = tp->tcp_mstamp;
+ 	tp->rcvq_space.seq = tp->copied_seq;
+@@ -485,7 +464,7 @@ void tcp_init_buffer_space(struct sock *sk)
+ 	tp->snd_cwnd_stamp = tcp_jiffies32;
+ }
+ 
+-/* 5. Recalculate window clamp after socket hit its memory bounds. */
++/* 4. Recalculate window clamp after socket hit its memory bounds. */
+ static void tcp_clamp_window(struct sock *sk)
+ {
+ 	struct tcp_sock *tp = tcp_sk(sk);
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 2697e4397e46..53f910bb5508 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -179,21 +179,6 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
+ 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
+ }
+ 
+-
+-u32 tcp_default_init_rwnd(u32 mss)
+-{
+-	/* Initial receive window should be twice of TCP_INIT_CWND to
+-	 * enable proper sending of new unsent data during fast recovery
+-	 * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a
+-	 * limit when mss is larger than 1460.
+-	 */
+-	u32 init_rwnd = TCP_INIT_CWND * 2;
+-
+-	if (mss > 1460)
+-		init_rwnd = max((1460 * init_rwnd) / mss, 2U);
+-	return init_rwnd;
+-}
+-
+ /* Determine a window scaling and initial window to offer.
+  * Based on the assumption that the given amount of space
+  * will be offered. Store the results in the tp structure.
+@@ -228,7 +213,10 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
+ 	if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
+ 		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
+ 	else
+-		(*rcv_wnd) = space;
++		(*rcv_wnd) = min_t(u32, space, U16_MAX);
++
++	if (init_rcv_wnd)
++		*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
+ 
+ 	(*rcv_wscale) = 0;
+ 	if (wscale_ok) {
+@@ -241,11 +229,6 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
+ 			(*rcv_wscale)++;
+ 		}
+ 	}
+-
+-	if (!init_rcv_wnd) /* Use default unless specified otherwise */
+-		init_rcv_wnd = tcp_default_init_rwnd(mss);
+-	*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
+-
+ 	/* Set the clamp no higher than max representable value */
+ 	(*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
+ }
+diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
+index 67ebdeaffbbc..3d5520776655 100644
+--- a/net/mac80211/rc80211_minstrel_ht.c
++++ b/net/mac80211/rc80211_minstrel_ht.c
+@@ -129,7 +129,7 @@
+ 
+ #define CCK_GROUP					\
+ 	[MINSTREL_CCK_GROUP] = {			\
+-		.streams = 0,				\
++		.streams = 1,				\
+ 		.flags = 0,				\
+ 		.duration = {				\
+ 			CCK_DURATION_LIST(false),	\
+@@ -282,7 +282,8 @@ minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
+ 				break;
+ 
+ 		/* short preamble */
+-		if (!(mi->supported[group] & BIT(idx)))
++		if ((mi->supported[group] & BIT(idx + 4)) &&
++		    (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE))
+ 			idx += 4;
+ 	}
+ 	return &mi->groups[group].rates[idx];
+@@ -1077,18 +1078,23 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
+ 		return;
+ 
+ 	sample_group = &minstrel_mcs_groups[sample_idx / MCS_GROUP_RATES];
++	sample_idx %= MCS_GROUP_RATES;
++
++	if (sample_group == &minstrel_mcs_groups[MINSTREL_CCK_GROUP] &&
++	    (sample_idx >= 4) != txrc->short_preamble)
++		return;
++
+ 	info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+ 	rate->count = 1;
+ 
+-	if (sample_idx / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) {
++	if (sample_group == &minstrel_mcs_groups[MINSTREL_CCK_GROUP]) {
+ 		int idx = sample_idx % ARRAY_SIZE(mp->cck_rates);
+ 		rate->idx = mp->cck_rates[idx];
+ 	} else if (sample_group->flags & IEEE80211_TX_RC_VHT_MCS) {
+ 		ieee80211_rate_set_vht(rate, sample_idx % MCS_GROUP_RATES,
+ 				       sample_group->streams);
+ 	} else {
+-		rate->idx = sample_idx % MCS_GROUP_RATES +
+-			    (sample_group->streams - 1) * 8;
++		rate->idx = sample_idx + (sample_group->streams - 1) * 8;
+ 	}
+ 
+ 	rate->flags = sample_group->flags;
+@@ -1132,7 +1138,6 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
+ 	struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
+ 	u16 sta_cap = sta->ht_cap.cap;
+ 	struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+-	struct sta_info *sinfo = container_of(sta, struct sta_info, sta);
+ 	int use_vht;
+ 	int n_supported = 0;
+ 	int ack_dur;
+@@ -1258,8 +1263,7 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
+ 	if (!n_supported)
+ 		goto use_legacy;
+ 
+-	if (test_sta_flag(sinfo, WLAN_STA_SHORT_PREAMBLE))
+-		mi->cck_supported_short |= mi->cck_supported_short << 4;
++	mi->supported[MINSTREL_CCK_GROUP] |= mi->cck_supported_short << 4;
+ 
+ 	/* create an initial rate table with the lowest supported rates */
+ 	minstrel_ht_update_stats(mp, mi);
+diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
+index 1245e02239d9..469f9da5073b 100644
+--- a/net/netfilter/nft_compat.c
++++ b/net/netfilter/nft_compat.c
+@@ -269,6 +269,24 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
+ 	kfree(expr->ops);
+ }
+ 
++static int nft_extension_dump_info(struct sk_buff *skb, int attr,
++				   const void *info,
++				   unsigned int size, unsigned int user_size)
++{
++	unsigned int info_size, aligned_size = XT_ALIGN(size);
++	struct nlattr *nla;
++
++	nla = nla_reserve(skb, attr, aligned_size);
++	if (!nla)
++		return -1;
++
++	info_size = user_size ? : size;
++	memcpy(nla_data(nla), info, info_size);
++	memset(nla_data(nla) + info_size, 0, aligned_size - info_size);
++
++	return 0;
++}
++
+ static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
+ {
+ 	const struct xt_target *target = expr->ops->data;
+@@ -276,7 +294,8 @@ static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
+ 
+ 	if (nla_put_string(skb, NFTA_TARGET_NAME, target->name) ||
+ 	    nla_put_be32(skb, NFTA_TARGET_REV, htonl(target->revision)) ||
+-	    nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(target->targetsize), info))
++	    nft_extension_dump_info(skb, NFTA_TARGET_INFO, info,
++				    target->targetsize, target->usersize))
+ 		goto nla_put_failure;
+ 
+ 	return 0;
+@@ -504,7 +523,8 @@ static int __nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr,
+ 
+ 	if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) ||
+ 	    nla_put_be32(skb, NFTA_MATCH_REV, htonl(match->revision)) ||
+-	    nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(match->matchsize), info))
++	    nft_extension_dump_info(skb, NFTA_MATCH_INFO, info,
++				    match->matchsize, match->usersize))
+ 		goto nla_put_failure;
+ 
+ 	return 0;
+diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
+index d2356a284646..3ebf8ba7c389 100644
+--- a/net/openvswitch/vport-internal_dev.c
++++ b/net/openvswitch/vport-internal_dev.c
+@@ -43,7 +43,8 @@ static struct internal_dev *internal_dev_priv(struct net_device *netdev)
+ }
+ 
+ /* Called with rcu_read_lock_bh. */
+-static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
++static netdev_tx_t
++internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
+ {
+ 	int len, err;
+ 
+@@ -62,7 +63,7 @@ static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	} else {
+ 		netdev->stats.tx_errors++;
+ 	}
+-	return 0;
++	return NETDEV_TX_OK;
+ }
+ 
+ static int internal_dev_open(struct net_device *netdev)
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 30e32df5f84a..8a4d01e427a2 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -577,6 +577,18 @@ struct Qdisc noop_qdisc = {
+ 	.dev_queue	=	&noop_netdev_queue,
+ 	.running	=	SEQCNT_ZERO(noop_qdisc.running),
+ 	.busylock	=	__SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
++	.gso_skb = {
++		.next = (struct sk_buff *)&noop_qdisc.gso_skb,
++		.prev = (struct sk_buff *)&noop_qdisc.gso_skb,
++		.qlen = 0,
++		.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock),
++	},
++	.skb_bad_txq = {
++		.next = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
++		.prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
++		.qlen = 0,
++		.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock),
++	},
+ };
+ EXPORT_SYMBOL(noop_qdisc);
+ 
+@@ -1253,8 +1265,6 @@ static void dev_init_scheduler_queue(struct net_device *dev,
+ 
+ 	rcu_assign_pointer(dev_queue->qdisc, qdisc);
+ 	dev_queue->qdisc_sleeping = qdisc;
+-	__skb_queue_head_init(&qdisc->gso_skb);
+-	__skb_queue_head_init(&qdisc->skb_bad_txq);
+ }
+ 
+ void dev_init_scheduler(struct net_device *dev)
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 3fe5d60ab0e2..e2808586c9e6 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -99,64 +99,78 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
+ 	list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
+ }
+ 
+-static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue)
+-{
+-	struct list_head *q = &queue->tasks[queue->priority];
+-	struct rpc_task *task;
+-
+-	if (!list_empty(q)) {
+-		task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
+-		if (task->tk_owner == queue->owner)
+-			list_move_tail(&task->u.tk_wait.list, q);
+-	}
+-}
+-
+ static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
+ {
+ 	if (queue->priority != priority) {
+-		/* Fairness: rotate the list when changing priority */
+-		rpc_rotate_queue_owner(queue);
+ 		queue->priority = priority;
++		queue->nr = 1U << priority;
+ 	}
+ }
+ 
+-static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
+-{
+-	queue->owner = pid;
+-	queue->nr = RPC_BATCH_COUNT;
+-}
+-
+ static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
+ {
+ 	rpc_set_waitqueue_priority(queue, queue->maxpriority);
+-	rpc_set_waitqueue_owner(queue, 0);
+ }
+ 
+ /*
+- * Add new request to a priority queue.
++ * Add a request to a queue list
+  */
+-static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
+-		struct rpc_task *task,
+-		unsigned char queue_priority)
++static void
++__rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
+ {
+-	struct list_head *q;
+ 	struct rpc_task *t;
+ 
+-	INIT_LIST_HEAD(&task->u.tk_wait.links);
+-	if (unlikely(queue_priority > queue->maxpriority))
+-		queue_priority = queue->maxpriority;
+-	if (queue_priority > queue->priority)
+-		rpc_set_waitqueue_priority(queue, queue_priority);
+-	q = &queue->tasks[queue_priority];
+ 	list_for_each_entry(t, q, u.tk_wait.list) {
+ 		if (t->tk_owner == task->tk_owner) {
+-			list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
++			list_add_tail(&task->u.tk_wait.links,
++					&t->u.tk_wait.links);
++			/* Cache the queue head in task->u.tk_wait.list */
++			task->u.tk_wait.list.next = q;
++			task->u.tk_wait.list.prev = NULL;
+ 			return;
+ 		}
+ 	}
++	INIT_LIST_HEAD(&task->u.tk_wait.links);
+ 	list_add_tail(&task->u.tk_wait.list, q);
+ }
+ 
++/*
++ * Remove request from a queue list
++ */
++static void
++__rpc_list_dequeue_task(struct rpc_task *task)
++{
++	struct list_head *q;
++	struct rpc_task *t;
++
++	if (task->u.tk_wait.list.prev == NULL) {
++		list_del(&task->u.tk_wait.links);
++		return;
++	}
++	if (!list_empty(&task->u.tk_wait.links)) {
++		t = list_first_entry(&task->u.tk_wait.links,
++				struct rpc_task,
++				u.tk_wait.links);
++		/* Assume __rpc_list_enqueue_task() cached the queue head */
++		q = t->u.tk_wait.list.next;
++		list_add_tail(&t->u.tk_wait.list, q);
++		list_del(&task->u.tk_wait.links);
++	}
++	list_del(&task->u.tk_wait.list);
++}
++
++/*
++ * Add new request to a priority queue.
++ */
++static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
++		struct rpc_task *task,
++		unsigned char queue_priority)
++{
++	if (unlikely(queue_priority > queue->maxpriority))
++		queue_priority = queue->maxpriority;
++	__rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
++}
++
+ /*
+  * Add new request to wait queue.
+  *
+@@ -194,13 +208,7 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
+  */
+ static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
+ {
+-	struct rpc_task *t;
+-
+-	if (!list_empty(&task->u.tk_wait.links)) {
+-		t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
+-		list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
+-		list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
+-	}
++	__rpc_list_dequeue_task(task);
+ }
+ 
+ /*
+@@ -212,7 +220,8 @@ static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_tas
+ 	__rpc_disable_timer(queue, task);
+ 	if (RPC_IS_PRIORITY(queue))
+ 		__rpc_remove_wait_queue_priority(task);
+-	list_del(&task->u.tk_wait.list);
++	else
++		list_del(&task->u.tk_wait.list);
+ 	queue->qlen--;
+ 	dprintk("RPC: %5u removed from queue %p \"%s\"\n",
+ 			task->tk_pid, queue, rpc_qname(queue));
+@@ -493,17 +502,9 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q
+ 	 * Service a batch of tasks from a single owner.
+ 	 */
+ 	q = &queue->tasks[queue->priority];
+-	if (!list_empty(q)) {
+-		task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
+-		if (queue->owner == task->tk_owner) {
+-			if (--queue->nr)
+-				goto out;
+-			list_move_tail(&task->u.tk_wait.list, q);
+-		}
+-		/*
+-		 * Check if we need to switch queues.
+-		 */
+-		goto new_owner;
++	if (!list_empty(q) && --queue->nr) {
++		task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
++		goto out;
+ 	}
+ 
+ 	/*
+@@ -515,7 +516,7 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q
+ 		else
+ 			q = q - 1;
+ 		if (!list_empty(q)) {
+-			task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
++			task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
+ 			goto new_queue;
+ 		}
+ 	} while (q != &queue->tasks[queue->priority]);
+@@ -525,8 +526,6 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q
+ 
+ new_queue:
+ 	rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
+-new_owner:
+-	rpc_set_waitqueue_owner(queue, task->tk_owner);
+ out:
+ 	return task;
+ }
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index 3581168e6b99..5e7c13aa66d0 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -796,17 +796,11 @@ void xprt_connect(struct rpc_task *task)
+ 
+ static void xprt_connect_status(struct rpc_task *task)
+ {
+-	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
+-
+-	if (task->tk_status == 0) {
+-		xprt->stat.connect_count++;
+-		xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
++	switch (task->tk_status) {
++	case 0:
+ 		dprintk("RPC: %5u xprt_connect_status: connection established\n",
+ 				task->tk_pid);
+-		return;
+-	}
+-
+-	switch (task->tk_status) {
++		break;
+ 	case -ECONNREFUSED:
+ 	case -ECONNRESET:
+ 	case -ECONNABORTED:
+@@ -823,7 +817,7 @@ static void xprt_connect_status(struct rpc_task *task)
+ 	default:
+ 		dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
+ 				"server %s\n", task->tk_pid, -task->tk_status,
+-				xprt->servername);
++				task->tk_rqstp->rq_xprt->servername);
+ 		task->tk_status = -EIO;
+ 	}
+ }
+diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
+index 98cbc7b060ba..f56f36b4d742 100644
+--- a/net/sunrpc/xprtrdma/transport.c
++++ b/net/sunrpc/xprtrdma/transport.c
+@@ -242,8 +242,12 @@ rpcrdma_connect_worker(struct work_struct *work)
+ 
+ 	spin_lock_bh(&xprt->transport_lock);
+ 	if (ep->rep_connected > 0) {
+-		if (!xprt_test_and_set_connected(xprt))
++		if (!xprt_test_and_set_connected(xprt)) {
++			xprt->stat.connect_count++;
++			xprt->stat.connect_time += (long)jiffies -
++						   xprt->stat.connect_start;
+ 			xprt_wake_pending_tasks(xprt, 0);
++		}
+ 	} else {
+ 		if (xprt_test_and_clear_connected(xprt))
+ 			xprt_wake_pending_tasks(xprt, -ENOTCONN);
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 7d8cce1dfcad..c0d7875a64ff 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -1611,6 +1611,9 @@ static void xs_tcp_state_change(struct sock *sk)
+ 			clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
+ 			xprt_clear_connecting(xprt);
+ 
++			xprt->stat.connect_count++;
++			xprt->stat.connect_time += (long)jiffies -
++						   xprt->stat.connect_start;
+ 			xprt_wake_pending_tasks(xprt, -EAGAIN);
+ 		}
+ 		spin_unlock(&xprt->transport_lock);
+@@ -2029,8 +2032,6 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
+ 	}
+ 
+ 	/* Tell the socket layer to start connecting... */
+-	xprt->stat.connect_count++;
+-	xprt->stat.connect_start = jiffies;
+ 	return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
+ }
+ 
+@@ -2062,6 +2063,9 @@ static int xs_local_setup_socket(struct sock_xprt *transport)
+ 	case 0:
+ 		dprintk("RPC:       xprt %p connected to %s\n",
+ 				xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
++		xprt->stat.connect_count++;
++		xprt->stat.connect_time += (long)jiffies -
++					   xprt->stat.connect_start;
+ 		xprt_set_connected(xprt);
+ 	case -ENOBUFS:
+ 		break;
+@@ -2387,8 +2391,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
+ 	xs_set_memalloc(xprt);
+ 
+ 	/* Tell the socket layer to start connecting... */
+-	xprt->stat.connect_count++;
+-	xprt->stat.connect_start = jiffies;
+ 	set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
+ 	ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
+ 	switch (ret) {
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 2ef1f56504cb..5075fd293feb 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -3396,7 +3396,7 @@ static void get_key_callback(void *c, struct key_params *params)
+ 			 params->cipher)))
+ 		goto nla_put_failure;
+ 
+-	if (nla_put_u8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx))
++	if (nla_put_u8(cookie->msg, NL80211_KEY_IDX, cookie->idx))
+ 		goto nla_put_failure;
+ 
+ 	nla_nest_end(cookie->msg, key);
+diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
+index 8cab91c482ff..d9117ab035f7 100644
+--- a/net/xdp/xdp_umem.c
++++ b/net/xdp/xdp_umem.c
+@@ -32,14 +32,9 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
+ {
+ 	unsigned long flags;
+ 
+-	if (xs->dev) {
+-		spin_lock_irqsave(&umem->xsk_list_lock, flags);
+-		list_del_rcu(&xs->list);
+-		spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
+-
+-		if (umem->zc)
+-			synchronize_net();
+-	}
++	spin_lock_irqsave(&umem->xsk_list_lock, flags);
++	list_del_rcu(&xs->list);
++	spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
+ }
+ 
+ int xdp_umem_query(struct net_device *dev, u16 queue_id)
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index 661504042d30..ff15207036dc 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -343,12 +343,18 @@ static int xsk_release(struct socket *sock)
+ 	local_bh_enable();
+ 
+ 	if (xs->dev) {
++		struct net_device *dev = xs->dev;
++
+ 		/* Wait for driver to stop using the xdp socket. */
+-		synchronize_net();
+-		dev_put(xs->dev);
++		xdp_del_sk_umem(xs->umem, xs);
+ 		xs->dev = NULL;
++		synchronize_net();
++		dev_put(dev);
+ 	}
+ 
++	xskq_destroy(xs->rx);
++	xskq_destroy(xs->tx);
++
+ 	sock_orphan(sk);
+ 	sock->sk = NULL;
+ 
+@@ -707,9 +713,6 @@ static void xsk_destruct(struct sock *sk)
+ 	if (!sock_flag(sk, SOCK_DEAD))
+ 		return;
+ 
+-	xskq_destroy(xs->rx);
+-	xskq_destroy(xs->tx);
+-	xdp_del_sk_umem(xs->umem, xs);
+ 	xdp_put_umem(xs->umem);
+ 
+ 	sk_refcnt_debug_dec(sk);
+diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
+index 790b514f86b6..d5635908587f 100644
+--- a/net/xfrm/xfrm_input.c
++++ b/net/xfrm/xfrm_input.c
+@@ -131,7 +131,7 @@ struct sec_path *secpath_dup(struct sec_path *src)
+ 	sp->len = 0;
+ 	sp->olen = 0;
+ 
+-	memset(sp->ovec, 0, sizeof(sp->ovec[XFRM_MAX_OFFLOAD_DEPTH]));
++	memset(sp->ovec, 0, sizeof(sp->ovec));
+ 
+ 	if (src) {
+ 		int i;
+diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c
+index bb9988914a56..32234481ad7d 100644
+--- a/samples/mei/mei-amt-version.c
++++ b/samples/mei/mei-amt-version.c
+@@ -370,7 +370,7 @@ static uint32_t amt_host_if_call(struct amt_host_if *acmd,
+ 			unsigned int expected_sz)
+ {
+ 	uint32_t in_buf_sz;
+-	uint32_t out_buf_sz;
++	ssize_t out_buf_sz;
+ 	ssize_t written;
+ 	uint32_t status;
+ 	struct amt_host_if_resp_header *msg_hdr;
+diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
+index 5bc4a1d587d4..60cb00fd0c69 100644
+--- a/sound/hda/ext/hdac_ext_controller.c
++++ b/sound/hda/ext/hdac_ext_controller.c
+@@ -48,9 +48,11 @@ void snd_hdac_ext_bus_ppcap_enable(struct hdac_bus *bus, bool enable)
+ 	}
+ 
+ 	if (enable)
+-		snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, 0, AZX_PPCTL_GPROCEN);
++		snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL,
++				 AZX_PPCTL_GPROCEN, AZX_PPCTL_GPROCEN);
+ 	else
+-		snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, AZX_PPCTL_GPROCEN, 0);
++		snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL,
++				 AZX_PPCTL_GPROCEN, 0);
+ }
+ EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_ppcap_enable);
+ 
+@@ -68,9 +70,11 @@ void snd_hdac_ext_bus_ppcap_int_enable(struct hdac_bus *bus, bool enable)
+ 	}
+ 
+ 	if (enable)
+-		snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, 0, AZX_PPCTL_PIE);
++		snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL,
++				 AZX_PPCTL_PIE, AZX_PPCTL_PIE);
+ 	else
+-		snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, AZX_PPCTL_PIE, 0);
++		snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL,
++				 AZX_PPCTL_PIE, 0);
+ }
+ EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_ppcap_int_enable);
+ 
+@@ -194,7 +198,8 @@ static int check_hdac_link_power_active(struct hdac_ext_link *link, bool enable)
+  */
+ int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *link)
+ {
+-	snd_hdac_updatel(link->ml_addr, AZX_REG_ML_LCTL, 0, AZX_MLCTL_SPA);
++	snd_hdac_updatel(link->ml_addr, AZX_REG_ML_LCTL,
++			 AZX_MLCTL_SPA, AZX_MLCTL_SPA);
+ 
+ 	return check_hdac_link_power_active(link, true);
+ }
+@@ -222,8 +227,8 @@ int snd_hdac_ext_bus_link_power_up_all(struct hdac_bus *bus)
+ 	int ret;
+ 
+ 	list_for_each_entry(hlink, &bus->hlink_list, list) {
+-		snd_hdac_updatel(hlink->ml_addr,
+-				AZX_REG_ML_LCTL, 0, AZX_MLCTL_SPA);
++		snd_hdac_updatel(hlink->ml_addr, AZX_REG_ML_LCTL,
++				 AZX_MLCTL_SPA, AZX_MLCTL_SPA);
+ 		ret = check_hdac_link_power_active(hlink, true);
+ 		if (ret < 0)
+ 			return ret;
+@@ -243,7 +248,8 @@ int snd_hdac_ext_bus_link_power_down_all(struct hdac_bus *bus)
+ 	int ret;
+ 
+ 	list_for_each_entry(hlink, &bus->hlink_list, list) {
+-		snd_hdac_updatel(hlink->ml_addr, AZX_REG_ML_LCTL, AZX_MLCTL_SPA, 0);
++		snd_hdac_updatel(hlink->ml_addr, AZX_REG_ML_LCTL,
++				 AZX_MLCTL_SPA, 0);
+ 		ret = check_hdac_link_power_active(hlink, false);
+ 		if (ret < 0)
+ 			return ret;
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index f2cabfdced05..6a9b89e05dae 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -4521,7 +4521,7 @@ static int ca0132_effects_set(struct hda_codec *codec, hda_nid_t nid, long val)
+ 			val = 0;
+ 
+ 		/* If Voice Focus on SBZ, set to two channel. */
+-		if ((nid == VOICE_FOCUS) && (spec->quirk == QUIRK_SBZ)
++		if ((nid == VOICE_FOCUS) && (spec->use_pci_mmio)
+ 				&& (spec->cur_mic_type != REAR_LINE_IN)) {
+ 			if (spec->effects_switch[CRYSTAL_VOICE -
+ 						 EFFECT_START_NID]) {
+@@ -4540,7 +4540,7 @@ static int ca0132_effects_set(struct hda_codec *codec, hda_nid_t nid, long val)
+ 		 * For SBZ noise reduction, there's an extra command
+ 		 * to module ID 0x47. No clue why.
+ 		 */
+-		if ((nid == NOISE_REDUCTION) && (spec->quirk == QUIRK_SBZ)
++		if ((nid == NOISE_REDUCTION) && (spec->use_pci_mmio)
+ 				&& (spec->cur_mic_type != REAR_LINE_IN)) {
+ 			if (spec->effects_switch[CRYSTAL_VOICE -
+ 						 EFFECT_START_NID]) {
+@@ -5856,8 +5856,8 @@ static int ca0132_build_controls(struct hda_codec *codec)
+ 	 */
+ 	num_fx = OUT_EFFECTS_COUNT + IN_EFFECTS_COUNT;
+ 	for (i = 0; i < num_fx; i++) {
+-		/* SBZ and R3D break if Echo Cancellation is used. */
+-		if (spec->quirk == QUIRK_SBZ || spec->quirk == QUIRK_R3D) {
++		/* Desktop cards break if Echo Cancellation is used. */
++		if (spec->use_pci_mmio) {
+ 			if (i == (ECHO_CANCELLATION - IN_EFFECT_START_NID +
+ 						OUT_EFFECTS_COUNT))
+ 				continue;
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 046705b4691a..d8168aa2cef3 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -77,6 +77,7 @@ enum {
+ 	STAC_DELL_M6_BOTH,
+ 	STAC_DELL_EQ,
+ 	STAC_ALIENWARE_M17X,
++	STAC_ELO_VUPOINT_15MX,
+ 	STAC_92HD89XX_HP_FRONT_JACK,
+ 	STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK,
+ 	STAC_92HD73XX_ASUS_MOBO,
+@@ -1879,6 +1880,18 @@ static void stac92hd73xx_fixup_no_jd(struct hda_codec *codec,
+ 		codec->no_jack_detect = 1;
+ }
+ 
++
++static void stac92hd73xx_disable_automute(struct hda_codec *codec,
++				     const struct hda_fixup *fix, int action)
++{
++	struct sigmatel_spec *spec = codec->spec;
++
++	if (action != HDA_FIXUP_ACT_PRE_PROBE)
++		return;
++
++	spec->gen.suppress_auto_mute = 1;
++}
++
+ static const struct hda_fixup stac92hd73xx_fixups[] = {
+ 	[STAC_92HD73XX_REF] = {
+ 		.type = HDA_FIXUP_FUNC,
+@@ -1904,6 +1917,10 @@ static const struct hda_fixup stac92hd73xx_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = stac92hd73xx_fixup_alienware_m17x,
+ 	},
++	[STAC_ELO_VUPOINT_15MX] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = stac92hd73xx_disable_automute,
++	},
+ 	[STAC_92HD73XX_INTEL] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = intel_dg45id_pin_configs,
+@@ -1942,6 +1959,7 @@ static const struct hda_model_fixup stac92hd73xx_models[] = {
+ 	{ .id = STAC_DELL_M6_BOTH, .name = "dell-m6" },
+ 	{ .id = STAC_DELL_EQ, .name = "dell-eq" },
+ 	{ .id = STAC_ALIENWARE_M17X, .name = "alienware" },
++	{ .id = STAC_ELO_VUPOINT_15MX, .name = "elo-vupoint-15mx" },
+ 	{ .id = STAC_92HD73XX_ASUS_MOBO, .name = "asus-mobo" },
+ 	{}
+ };
+@@ -1991,6 +2009,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
+ 		      "Alienware M17x", STAC_ALIENWARE_M17X),
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
+ 		      "Alienware M17x R3", STAC_DELL_EQ),
++	SND_PCI_QUIRK(0x1059, 0x1011,
++		      "ELO VuPoint 15MX", STAC_ELO_VUPOINT_15MX),
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1927,
+ 				"HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK),
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
+diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
+index 9db9a2944ef2..c1a7d376a3fe 100644
+--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
++++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
+@@ -319,10 +319,11 @@ static int q6asm_dai_open(struct snd_pcm_substream *substream)
+ 	prtd->audio_client = q6asm_audio_client_alloc(dev,
+ 				(q6asm_cb)event_handler, prtd, stream_id,
+ 				LEGACY_PCM_MODE);
+-	if (!prtd->audio_client) {
++	if (IS_ERR(prtd->audio_client)) {
+ 		pr_info("%s: Could not allocate memory\n", __func__);
++		ret = PTR_ERR(prtd->audio_client);
+ 		kfree(prtd);
+-		return -ENOMEM;
++		return ret;
+ 	}
+ 
+ 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+diff --git a/tools/pci/pcitest.c b/tools/pci/pcitest.c
+index af146bb03b4d..4c5be77c211f 100644
+--- a/tools/pci/pcitest.c
++++ b/tools/pci/pcitest.c
+@@ -23,7 +23,6 @@
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <sys/ioctl.h>
+-#include <time.h>
+ #include <unistd.h>
+ 
+ #include <linux/pcitest.h>
+@@ -50,15 +49,13 @@ struct pci_test {
+ 
+ static int run_test(struct pci_test *test)
+ {
+-	long ret;
++	int ret = -EINVAL;
+ 	int fd;
+-	struct timespec start, end;
+-	double time;
+ 
+ 	fd = open(test->device, O_RDWR);
+ 	if (fd < 0) {
+ 		perror("can't open PCI Endpoint Test device");
+-		return fd;
++		return -ENODEV;
+ 	}
+ 
+ 	if (test->barnum >= 0 && test->barnum <= 5) {
+diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
+index 321ba92c70d2..235eef71f3d1 100644
+--- a/tools/testing/radix-tree/idr-test.c
++++ b/tools/testing/radix-tree/idr-test.c
+@@ -227,6 +227,57 @@ void idr_u32_test(int base)
+ 	idr_u32_test1(&idr, 0xffffffff);
+ }
+ 
++static inline void *idr_mk_value(unsigned long v)
++{
++	BUG_ON((long)v < 0);
++	return (void *)((v & 1) | 2 | (v << 1));
++}
++
++DEFINE_IDR(find_idr);
++
++static void *idr_throbber(void *arg)
++{
++	time_t start = time(NULL);
++	int id = *(int *)arg;
++
++	rcu_register_thread();
++	do {
++		idr_alloc(&find_idr, idr_mk_value(id), id, id + 1, GFP_KERNEL);
++		idr_remove(&find_idr, id);
++	} while (time(NULL) < start + 10);
++	rcu_unregister_thread();
++
++	return NULL;
++}
++
++void idr_find_test_1(int anchor_id, int throbber_id)
++{
++	pthread_t throbber;
++	time_t start = time(NULL);
++
++	pthread_create(&throbber, NULL, idr_throbber, &throbber_id);
++
++	BUG_ON(idr_alloc(&find_idr, idr_mk_value(anchor_id), anchor_id,
++				anchor_id + 1, GFP_KERNEL) != anchor_id);
++
++	do {
++		int id = 0;
++		void *entry = idr_get_next(&find_idr, &id);
++		BUG_ON(entry != idr_mk_value(id));
++	} while (time(NULL) < start + 11);
++
++	pthread_join(throbber, NULL);
++
++	idr_remove(&find_idr, anchor_id);
++	BUG_ON(!idr_is_empty(&find_idr));
++}
++
++void idr_find_test(void)
++{
++	idr_find_test_1(100000, 0);
++	idr_find_test_1(0, 100000);
++}
++
+ void idr_checks(void)
+ {
+ 	unsigned long i;
+@@ -307,6 +358,7 @@ void idr_checks(void)
+ 	idr_u32_test(4);
+ 	idr_u32_test(1);
+ 	idr_u32_test(0);
++	idr_find_test();
+ }
+ 
+ #define module_init(x)
+diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
+index ca53b539aa2d..08bac6cf1bb3 100644
+--- a/tools/testing/selftests/net/forwarding/lib.sh
++++ b/tools/testing/selftests/net/forwarding/lib.sh
+@@ -251,7 +251,7 @@ lldpad_app_wait_set()
+ {
+ 	local dev=$1; shift
+ 
+-	while lldptool -t -i $dev -V APP -c app | grep -q pending; do
++	while lldptool -t -i $dev -V APP -c app | grep -Eq "pending|unknown"; do
+ 		echo "$dev: waiting for lldpad to push pending APP updates"
+ 		sleep 5
+ 	done
+diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
+index 8fdfeafaf8c0..7549d39ccaff 100644
+--- a/tools/testing/selftests/net/tls.c
++++ b/tools/testing/selftests/net/tls.c
+@@ -288,7 +288,7 @@ TEST_F(tls, splice_from_pipe)
+ 	ASSERT_GE(pipe(p), 0);
+ 	EXPECT_GE(write(p[1], mem_send, send_len), 0);
+ 	EXPECT_GE(splice(p[0], NULL, self->fd, NULL, send_len, 0), 0);
+-	EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0);
++	EXPECT_EQ(recv(self->cfd, mem_recv, send_len, MSG_WAITALL), send_len);
+ 	EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
+ }
+ 
+@@ -322,13 +322,13 @@ TEST_F(tls, send_and_splice)
+ 
+ 	ASSERT_GE(pipe(p), 0);
+ 	EXPECT_EQ(send(self->fd, test_str, send_len2, 0), send_len2);
+-	EXPECT_NE(recv(self->cfd, buf, send_len2, 0), -1);
++	EXPECT_EQ(recv(self->cfd, buf, send_len2, MSG_WAITALL), send_len2);
+ 	EXPECT_EQ(memcmp(test_str, buf, send_len2), 0);
+ 
+ 	EXPECT_GE(write(p[1], mem_send, send_len), send_len);
+ 	EXPECT_GE(splice(p[0], NULL, self->fd, NULL, send_len, 0), send_len);
+ 
+-	EXPECT_GE(recv(self->cfd, mem_recv, send_len, 0), 0);
++	EXPECT_EQ(recv(self->cfd, mem_recv, send_len, MSG_WAITALL), send_len);
+ 	EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
+ }
+ 
+@@ -516,17 +516,17 @@ TEST_F(tls, recv_peek_multiple_records)
+ 	len = strlen(test_str_second) + 1;
+ 	EXPECT_EQ(send(self->fd, test_str_second, len, 0), len);
+ 
+-	len = sizeof(buf);
++	len = strlen(test_str_first);
+ 	memset(buf, 0, len);
+-	EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1);
++	EXPECT_EQ(recv(self->cfd, buf, len, MSG_PEEK | MSG_WAITALL), len);
+ 
+ 	/* MSG_PEEK can only peek into the current record. */
+-	len = strlen(test_str_first) + 1;
++	len = strlen(test_str_first);
+ 	EXPECT_EQ(memcmp(test_str_first, buf, len), 0);
+ 
+-	len = sizeof(buf);
++	len = strlen(test_str) + 1;
+ 	memset(buf, 0, len);
+-	EXPECT_NE(recv(self->cfd, buf, len, 0), -1);
++	EXPECT_EQ(recv(self->cfd, buf, len, MSG_WAITALL), len);
+ 
+ 	/* Non-MSG_PEEK will advance strparser (and therefore record)
+ 	 * however.
+@@ -543,9 +543,9 @@ TEST_F(tls, recv_peek_multiple_records)
+ 	len = strlen(test_str_second) + 1;
+ 	EXPECT_EQ(send(self->fd, test_str_second, len, 0), len);
+ 
+-	len = sizeof(buf);
++	len = strlen(test_str) + 1;
+ 	memset(buf, 0, len);
+-	EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1);
++	EXPECT_EQ(recv(self->cfd, buf, len, MSG_PEEK | MSG_WAITALL), len);
+ 
+ 	len = strlen(test_str) + 1;
+ 	EXPECT_EQ(memcmp(test_str, buf, len), 0);
+diff --git a/tools/testing/selftests/tc-testing/bpf/Makefile b/tools/testing/selftests/tc-testing/bpf/Makefile
+new file mode 100644
+index 000000000000..dc92eb271d9a
+--- /dev/null
++++ b/tools/testing/selftests/tc-testing/bpf/Makefile
+@@ -0,0 +1,29 @@
++# SPDX-License-Identifier: GPL-2.0
++
++APIDIR := ../../../../include/uapi
++TEST_GEN_FILES = action.o
++
++top_srcdir = ../../../../..
++include ../../lib.mk
++
++CLANG ?= clang
++LLC   ?= llc
++PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
++
++ifeq ($(PROBE),)
++  CPU ?= probe
++else
++  CPU ?= generic
++endif
++
++CLANG_SYS_INCLUDES := $(shell $(CLANG) -v -E - </dev/null 2>&1 \
++	| sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }')
++
++CLANG_FLAGS = -I. -I$(APIDIR) \
++	      $(CLANG_SYS_INCLUDES) \
++	      -Wno-compare-distinct-pointer-types
++
++$(OUTPUT)/%.o: %.c
++	$(CLANG) $(CLANG_FLAGS) \
++		 -O2 -target bpf -emit-llvm -c $< -o - |      \
++	$(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
+diff --git a/tools/testing/selftests/tc-testing/bpf/action.c b/tools/testing/selftests/tc-testing/bpf/action.c
+new file mode 100644
+index 000000000000..c32b99b80e19
+--- /dev/null
++++ b/tools/testing/selftests/tc-testing/bpf/action.c
+@@ -0,0 +1,23 @@
++/* SPDX-License-Identifier: GPL-2.0
++ * Copyright (c) 2018 Davide Caratti, Red Hat inc.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of version 2 of the GNU General Public
++ * License as published by the Free Software Foundation.
++ */
++
++#include <linux/bpf.h>
++#include <linux/pkt_cls.h>
++
++__attribute__((section("action-ok"),used)) int action_ok(struct __sk_buff *s)
++{
++	return TC_ACT_OK;
++}
++
++__attribute__((section("action-ko"),used)) int action_ko(struct __sk_buff *s)
++{
++	s->data = 0x0;
++	return TC_ACT_OK;
++}
++
++char _license[] __attribute__((section("license"),used)) = "GPL";
+diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
+index 6f289a49e5ec..1a9b282dd0be 100644
+--- a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
++++ b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
+@@ -55,7 +55,7 @@
+             "bpf"
+         ],
+         "setup": [
+-            "printf '#include <linux/bpf.h>\nchar l[] __attribute__((section(\"license\"),used))=\"GPL\"; __attribute__((section(\"action\"),used)) int m(struct __sk_buff *s) { return 2; }' | clang -O2 -x c -c - -target bpf -o _b.o",
++            "make -C bpf",
+             [
+                 "$TC action flush action bpf",
+                 0,
+@@ -63,14 +63,14 @@
+                 255
+             ]
+         ],
+-        "cmdUnderTest": "$TC action add action bpf object-file _b.o index 667",
++        "cmdUnderTest": "$TC action add action bpf object-file $EBPFDIR/action.o section action-ok index 667",
+         "expExitCode": "0",
+         "verifyCmd": "$TC action get action bpf index 667",
+-        "matchPattern": "action order [0-9]*: bpf _b.o:\\[action\\] id [0-9]* tag 3b185187f1855c4c( jited)? default-action pipe.*index 667 ref",
++        "matchPattern": "action order [0-9]*: bpf action.o:\\[action-ok\\] id [0-9]* tag [0-9a-f]{16}( jited)? default-action pipe.*index 667 ref",
+         "matchCount": "1",
+         "teardown": [
+             "$TC action flush action bpf",
+-            "rm -f _b.o"
++            "make -C bpf clean"
+         ]
+     },
+     {
+@@ -81,7 +81,7 @@
+             "bpf"
+         ],
+         "setup": [
+-            "printf '#include <linux/bpf.h>\nchar l[] __attribute__((section(\"license\"),used))=\"GPL\"; __attribute__((section(\"action\"),used)) int m(struct __sk_buff *s) { s->data = 0x0; return 2; }' | clang -O2 -x c -c - -target bpf -o _c.o",
++            "make -C bpf",
+             [
+                 "$TC action flush action bpf",
+                 0,
+@@ -89,10 +89,10 @@
+                 255
+             ]
+         ],
+-        "cmdUnderTest": "$TC action add action bpf object-file _c.o index 667",
++        "cmdUnderTest": "$TC action add action bpf object-file $EBPFDIR/action.o section action-ko index 667",
+         "expExitCode": "255",
+         "verifyCmd": "$TC action get action bpf index 667",
+-        "matchPattern": "action order [0-9]*: bpf _c.o:\\[action\\] id [0-9].*index 667 ref",
++        "matchPattern": "action order [0-9]*: bpf action.o:\\[action-ko\\] id [0-9].*index 667 ref",
+         "matchCount": "0",
+         "teardown": [
+             [
+@@ -101,7 +101,7 @@
+                 1,
+                 255
+             ],
+-            "rm -f _c.o"
++            "make -C bpf clean"
+         ]
+     },
+     {
+diff --git a/tools/testing/selftests/tc-testing/tdc_config.py b/tools/testing/selftests/tc-testing/tdc_config.py
+index a023d0d62b25..d651bc1501bd 100644
+--- a/tools/testing/selftests/tc-testing/tdc_config.py
++++ b/tools/testing/selftests/tc-testing/tdc_config.py
+@@ -16,7 +16,9 @@ NAMES = {
+           'DEV2': '',
+           'BATCH_FILE': './batch.txt',
+           # Name of the namespace to use
+-          'NS': 'tcut'
++          'NS': 'tcut',
++          # Directory containing eBPF test programs
++          'EBPFDIR': './bpf'
+         }
+ 
+ 
+diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
+index 1344557a7085..bf330b493c1e 100644
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -412,7 +412,8 @@ static void stage2_flush_memslot(struct kvm *kvm,
+ 	pgd = kvm->arch.pgd + stage2_pgd_index(addr);
+ 	do {
+ 		next = stage2_pgd_addr_end(addr, end);
+-		stage2_flush_puds(kvm, pgd, addr, next);
++		if (!stage2_pgd_none(*pgd))
++			stage2_flush_puds(kvm, pgd, addr, next);
+ 	} while (pgd++, addr = next, addr != end);
+ }
+ 


             reply	other threads:[~2019-11-24 15:44 UTC|newest]

Thread overview: 332+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-11-24 15:44 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2024-04-18  3:06 [gentoo-commits] proj/linux-patches:4.19 commit in: / Alice Ferrazzi
2023-09-02  9:59 Mike Pagano
2023-08-30 15:00 Mike Pagano
2023-08-16 16:59 Mike Pagano
2023-08-11 11:58 Mike Pagano
2023-08-08 18:43 Mike Pagano
2023-07-24 20:30 Mike Pagano
2023-06-28 10:29 Mike Pagano
2023-06-21 14:55 Alice Ferrazzi
2023-06-14 10:21 Mike Pagano
2023-06-09 11:32 Mike Pagano
2023-05-30 12:57 Mike Pagano
2023-05-17 11:14 Mike Pagano
2023-05-17 11:01 Mike Pagano
2023-05-10 17:59 Mike Pagano
2023-04-26  9:35 Alice Ferrazzi
2023-04-20 11:17 Alice Ferrazzi
2023-04-05 11:41 Mike Pagano
2023-03-22 14:16 Alice Ferrazzi
2023-03-17 10:46 Mike Pagano
2023-03-13 11:35 Alice Ferrazzi
2023-03-11 16:01 Mike Pagano
2023-03-03 12:31 Mike Pagano
2023-02-25 11:41 Mike Pagano
2023-02-24  3:19 Alice Ferrazzi
2023-02-24  3:15 Alice Ferrazzi
2023-02-22 14:51 Alice Ferrazzi
2023-02-06 12:49 Mike Pagano
2023-01-24  7:16 Alice Ferrazzi
2023-01-18 11:11 Mike Pagano
2022-12-14 12:15 Mike Pagano
2022-12-08 12:14 Alice Ferrazzi
2022-11-25 17:04 Mike Pagano
2022-11-23  9:39 Alice Ferrazzi
2022-11-10 17:58 Mike Pagano
2022-11-03 15:11 Mike Pagano
2022-11-01 19:48 Mike Pagano
2022-10-26 11:41 Mike Pagano
2022-10-05 11:59 Mike Pagano
2022-09-28  9:18 Mike Pagano
2022-09-20 12:03 Mike Pagano
2022-09-15 11:09 Mike Pagano
2022-09-05 12:06 Mike Pagano
2022-08-25 10:35 Mike Pagano
2022-08-11 12:36 Mike Pagano
2022-07-29 15:28 Mike Pagano
2022-07-21 20:12 Mike Pagano
2022-07-12 16:01 Mike Pagano
2022-07-07 16:18 Mike Pagano
2022-07-02 16:07 Mike Pagano
2022-06-25 10:22 Mike Pagano
2022-06-16 11:40 Mike Pagano
2022-06-14 16:02 Mike Pagano
2022-06-06 11:05 Mike Pagano
2022-05-27 12:24 Mike Pagano
2022-05-25 11:55 Mike Pagano
2022-05-18  9:50 Mike Pagano
2022-05-15 22:12 Mike Pagano
2022-05-12 11:30 Mike Pagano
2022-05-01 17:04 Mike Pagano
2022-04-27 12:03 Mike Pagano
2022-04-20 12:09 Mike Pagano
2022-04-15 13:11 Mike Pagano
2022-04-12 19:24 Mike Pagano
2022-03-28 10:59 Mike Pagano
2022-03-23 11:57 Mike Pagano
2022-03-16 13:27 Mike Pagano
2022-03-11 10:56 Mike Pagano
2022-03-08 18:30 Mike Pagano
2022-03-02 13:08 Mike Pagano
2022-02-26 21:14 Mike Pagano
2022-02-23 12:39 Mike Pagano
2022-02-16 12:47 Mike Pagano
2022-02-11 12:53 Mike Pagano
2022-02-11 12:46 Mike Pagano
2022-02-11 12:45 Mike Pagano
2022-02-11 12:37 Mike Pagano
2022-02-08 17:56 Mike Pagano
2022-01-29 17:45 Mike Pagano
2022-01-27 11:39 Mike Pagano
2022-01-11 13:14 Mike Pagano
2022-01-05 12:55 Mike Pagano
2021-12-29 13:11 Mike Pagano
2021-12-22 14:07 Mike Pagano
2021-12-14 10:36 Mike Pagano
2021-12-08 12:55 Mike Pagano
2021-12-01 12:51 Mike Pagano
2021-11-26 11:59 Mike Pagano
2021-11-12 14:16 Mike Pagano
2021-11-06 13:26 Mike Pagano
2021-11-02 19:32 Mike Pagano
2021-10-27 11:59 Mike Pagano
2021-10-20 13:26 Mike Pagano
2021-10-17 13:12 Mike Pagano
2021-10-13 15:00 Alice Ferrazzi
2021-10-09 21:33 Mike Pagano
2021-10-06 14:06 Mike Pagano
2021-09-26 14:13 Mike Pagano
2021-09-22 11:40 Mike Pagano
2021-09-20 22:05 Mike Pagano
2021-09-03 11:22 Mike Pagano
2021-09-03 10:08 Alice Ferrazzi
2021-08-26 14:06 Mike Pagano
2021-08-25 22:45 Mike Pagano
2021-08-25 20:41 Mike Pagano
2021-08-15 20:07 Mike Pagano
2021-08-12 11:51 Mike Pagano
2021-08-08 13:39 Mike Pagano
2021-08-04 11:54 Mike Pagano
2021-08-03 12:26 Mike Pagano
2021-07-31 10:34 Alice Ferrazzi
2021-07-28 12:37 Mike Pagano
2021-07-20 15:35 Alice Ferrazzi
2021-07-13 12:38 Mike Pagano
2021-07-11 14:45 Mike Pagano
2021-06-30 14:25 Mike Pagano
2021-06-16 12:22 Mike Pagano
2021-06-10 11:46 Mike Pagano
2021-06-03 10:32 Alice Ferrazzi
2021-05-26 12:05 Mike Pagano
2021-05-22 10:03 Mike Pagano
2021-05-07 11:40 Alice Ferrazzi
2021-04-30 19:02 Mike Pagano
2021-04-28 18:31 Mike Pagano
2021-04-28 11:44 Alice Ferrazzi
2021-04-16 11:15 Alice Ferrazzi
2021-04-14 11:22 Alice Ferrazzi
2021-04-10 13:24 Mike Pagano
2021-04-07 12:21 Mike Pagano
2021-03-30 14:17 Mike Pagano
2021-03-24 12:08 Mike Pagano
2021-03-22 15:50 Mike Pagano
2021-03-20 14:26 Mike Pagano
2021-03-17 16:21 Mike Pagano
2021-03-11 14:05 Mike Pagano
2021-03-07 15:15 Mike Pagano
2021-03-04 12:08 Mike Pagano
2021-02-23 14:31 Alice Ferrazzi
2021-02-13 15:28 Alice Ferrazzi
2021-02-10 10:03 Alice Ferrazzi
2021-02-07 14:40 Alice Ferrazzi
2021-02-03 23:43 Mike Pagano
2021-01-30 13:34 Alice Ferrazzi
2021-01-27 11:15 Mike Pagano
2021-01-23 16:36 Mike Pagano
2021-01-19 20:34 Mike Pagano
2021-01-17 16:20 Mike Pagano
2021-01-12 20:06 Mike Pagano
2021-01-09 12:57 Mike Pagano
2021-01-06 14:15 Mike Pagano
2020-12-30 12:52 Mike Pagano
2020-12-11 12:56 Mike Pagano
2020-12-08 12:06 Mike Pagano
2020-12-02 12:49 Mike Pagano
2020-11-24 14:40 Mike Pagano
2020-11-22 19:26 Mike Pagano
2020-11-18 19:56 Mike Pagano
2020-11-11 15:43 Mike Pagano
2020-11-10 13:56 Mike Pagano
2020-11-05 12:35 Mike Pagano
2020-11-01 20:29 Mike Pagano
2020-10-29 11:18 Mike Pagano
2020-10-17 10:17 Mike Pagano
2020-10-14 20:36 Mike Pagano
2020-10-07 12:50 Mike Pagano
2020-10-01 12:45 Mike Pagano
2020-09-26 22:07 Mike Pagano
2020-09-26 22:00 Mike Pagano
2020-09-24 15:58 Mike Pagano
2020-09-23 12:07 Mike Pagano
2020-09-17 15:01 Mike Pagano
2020-09-17 14:55 Mike Pagano
2020-09-12 17:59 Mike Pagano
2020-09-09 17:59 Mike Pagano
2020-09-03 11:37 Mike Pagano
2020-08-26 11:15 Mike Pagano
2020-08-21 10:49 Alice Ferrazzi
2020-08-19  9:36 Alice Ferrazzi
2020-08-12 23:36 Alice Ferrazzi
2020-08-07 19:16 Mike Pagano
2020-08-05 14:51 Thomas Deutschmann
2020-07-31 18:00 Mike Pagano
2020-07-29 12:33 Mike Pagano
2020-07-22 12:42 Mike Pagano
2020-07-16 11:17 Mike Pagano
2020-07-09 12:12 Mike Pagano
2020-07-01 12:14 Mike Pagano
2020-06-29 17:41 Mike Pagano
2020-06-25 15:07 Mike Pagano
2020-06-22 14:47 Mike Pagano
2020-06-10 21:27 Mike Pagano
2020-06-07 21:52 Mike Pagano
2020-06-03 11:41 Mike Pagano
2020-05-27 16:25 Mike Pagano
2020-05-20 11:30 Mike Pagano
2020-05-20 11:27 Mike Pagano
2020-05-14 11:30 Mike Pagano
2020-05-13 12:33 Mike Pagano
2020-05-11 22:50 Mike Pagano
2020-05-09 22:20 Mike Pagano
2020-05-06 11:46 Mike Pagano
2020-05-02 19:24 Mike Pagano
2020-04-29 17:57 Mike Pagano
2020-04-23 11:44 Mike Pagano
2020-04-21 11:15 Mike Pagano
2020-04-17 11:45 Mike Pagano
2020-04-15 17:09 Mike Pagano
2020-04-13 11:34 Mike Pagano
2020-04-02 15:24 Mike Pagano
2020-03-25 14:58 Mike Pagano
2020-03-20 11:57 Mike Pagano
2020-03-18 14:21 Mike Pagano
2020-03-16 12:23 Mike Pagano
2020-03-11 17:20 Mike Pagano
2020-03-05 16:23 Mike Pagano
2020-02-28 16:38 Mike Pagano
2020-02-24 11:06 Mike Pagano
2020-02-19 23:45 Mike Pagano
2020-02-14 23:52 Mike Pagano
2020-02-11 16:20 Mike Pagano
2020-02-05 17:05 Mike Pagano
2020-02-01 10:37 Mike Pagano
2020-02-01 10:30 Mike Pagano
2020-01-29 16:16 Mike Pagano
2020-01-27 14:25 Mike Pagano
2020-01-23 11:07 Mike Pagano
2020-01-17 19:56 Mike Pagano
2020-01-14 22:30 Mike Pagano
2020-01-12 15:00 Mike Pagano
2020-01-09 11:15 Mike Pagano
2020-01-04 19:50 Mike Pagano
2019-12-31 17:46 Mike Pagano
2019-12-21 15:03 Mike Pagano
2019-12-17 21:56 Mike Pagano
2019-12-13 12:35 Mike Pagano
2019-12-05 12:03 Alice Ferrazzi
2019-12-01 14:06 Thomas Deutschmann
2019-11-20 19:36 Mike Pagano
2019-11-12 21:00 Mike Pagano
2019-11-10 16:20 Mike Pagano
2019-11-06 14:26 Mike Pagano
2019-10-29 12:04 Mike Pagano
2019-10-17 22:27 Mike Pagano
2019-10-11 17:04 Mike Pagano
2019-10-07 17:42 Mike Pagano
2019-10-05 11:42 Mike Pagano
2019-10-01 10:10 Mike Pagano
2019-09-21 17:11 Mike Pagano
2019-09-19 12:34 Mike Pagano
2019-09-19 10:04 Mike Pagano
2019-09-16 12:26 Mike Pagano
2019-09-10 11:12 Mike Pagano
2019-09-06 17:25 Mike Pagano
2019-08-29 14:15 Mike Pagano
2019-08-25 17:37 Mike Pagano
2019-08-23 22:18 Mike Pagano
2019-08-16 12:26 Mike Pagano
2019-08-16 12:13 Mike Pagano
2019-08-09 17:45 Mike Pagano
2019-08-06 19:19 Mike Pagano
2019-08-04 16:15 Mike Pagano
2019-07-31 15:09 Mike Pagano
2019-07-31 10:22 Mike Pagano
2019-07-28 16:27 Mike Pagano
2019-07-26 11:35 Mike Pagano
2019-07-21 14:41 Mike Pagano
2019-07-14 15:44 Mike Pagano
2019-07-10 11:05 Mike Pagano
2019-07-03 11:34 Mike Pagano
2019-06-25 10:53 Mike Pagano
2019-06-22 19:06 Mike Pagano
2019-06-19 17:17 Thomas Deutschmann
2019-06-17 19:22 Mike Pagano
2019-06-15 15:07 Mike Pagano
2019-06-11 12:42 Mike Pagano
2019-06-10 19:43 Mike Pagano
2019-06-09 16:19 Mike Pagano
2019-06-04 11:11 Mike Pagano
2019-05-31 15:02 Mike Pagano
2019-05-26 17:10 Mike Pagano
2019-05-22 11:02 Mike Pagano
2019-05-16 23:03 Mike Pagano
2019-05-14 21:00 Mike Pagano
2019-05-10 19:40 Mike Pagano
2019-05-08 10:06 Mike Pagano
2019-05-05 13:42 Mike Pagano
2019-05-04 18:28 Mike Pagano
2019-05-02 10:13 Mike Pagano
2019-04-27 17:36 Mike Pagano
2019-04-20 11:09 Mike Pagano
2019-04-19 19:51 Mike Pagano
2019-04-05 21:46 Mike Pagano
2019-04-03 10:59 Mike Pagano
2019-03-27 10:22 Mike Pagano
2019-03-23 20:23 Mike Pagano
2019-03-19 16:58 Mike Pagano
2019-03-13 22:08 Mike Pagano
2019-03-10 14:15 Mike Pagano
2019-03-06 19:06 Mike Pagano
2019-03-05 18:04 Mike Pagano
2019-02-27 11:23 Mike Pagano
2019-02-23 11:35 Mike Pagano
2019-02-23  0:46 Mike Pagano
2019-02-20 11:19 Mike Pagano
2019-02-16  0:42 Mike Pagano
2019-02-15 12:39 Mike Pagano
2019-02-12 20:53 Mike Pagano
2019-02-06 17:08 Mike Pagano
2019-01-31 11:28 Mike Pagano
2019-01-26 15:09 Mike Pagano
2019-01-22 23:06 Mike Pagano
2019-01-16 23:32 Mike Pagano
2019-01-13 19:29 Mike Pagano
2019-01-09 17:54 Mike Pagano
2018-12-29 18:55 Mike Pagano
2018-12-29  1:08 Mike Pagano
2018-12-21 14:58 Mike Pagano
2018-12-19 19:09 Mike Pagano
2018-12-17 11:42 Mike Pagano
2018-12-13 11:40 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-05 20:16 Mike Pagano
2018-12-01 15:08 Mike Pagano
2018-11-27 16:16 Mike Pagano
2018-11-23 12:42 Mike Pagano
2018-11-21 12:30 Mike Pagano
2018-11-14  0:47 Mike Pagano
2018-11-14  0:47 Mike Pagano
2018-11-13 20:44 Mike Pagano
2018-11-04 16:22 Alice Ferrazzi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1574610220.c80b40d79098833e09f11405179603a350680c8f.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox