public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Thu,  1 Oct 2020 12:45:36 +0000 (UTC)	[thread overview]
Message-ID: <1601556324.310f5c1a8c792bf9601dccaa67621ff87d95d8a8.mpagano@gentoo> (raw)

commit:     310f5c1a8c792bf9601dccaa67621ff87d95d8a8
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Oct  1 12:45:24 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Oct  1 12:45:24 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=310f5c1a

Linux patch 4.19.149

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1148_linux-4.19.149.patch | 9972 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 9976 insertions(+)

diff --git a/0000_README b/0000_README
index 9707ae7..e7a8587 100644
--- a/0000_README
+++ b/0000_README
@@ -631,6 +631,10 @@ Patch:  1147_linux-4.19.148.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.19.148
 
+Patch:  1148_linux-4.19.149.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.19.149
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1148_linux-4.19.149.patch b/1148_linux-4.19.149.patch
new file mode 100644
index 0000000..75c6340
--- /dev/null
+++ b/1148_linux-4.19.149.patch
@@ -0,0 +1,9972 @@
+diff --git a/Documentation/devicetree/bindings/sound/wm8994.txt b/Documentation/devicetree/bindings/sound/wm8994.txt
+index 68cccc4653ba3..367b58ce1bb92 100644
+--- a/Documentation/devicetree/bindings/sound/wm8994.txt
++++ b/Documentation/devicetree/bindings/sound/wm8994.txt
+@@ -14,9 +14,15 @@ Required properties:
+   - #gpio-cells : Must be 2. The first cell is the pin number and the
+     second cell is used to specify optional parameters (currently unused).
+ 
+-  - AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply, CPVDD-supply,
+-    SPKVDD1-supply, SPKVDD2-supply : power supplies for the device, as covered
+-    in Documentation/devicetree/bindings/regulator/regulator.txt
++  - power supplies for the device, as covered in
++    Documentation/devicetree/bindings/regulator/regulator.txt, depending
++    on compatible:
++    - for wlf,wm1811 and wlf,wm8958:
++      AVDD1-supply, AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply,
++      DCVDD-supply, CPVDD-supply, SPKVDD1-supply, SPKVDD2-supply
++    - for wlf,wm8994:
++      AVDD1-supply, AVDD2-supply, DBVDD-supply, DCVDD-supply, CPVDD-supply,
++      SPKVDD1-supply, SPKVDD2-supply
+ 
+ Optional properties:
+ 
+@@ -73,11 +79,11 @@ wm8994: codec@1a {
+ 
+ 	lineout1-se;
+ 
++	AVDD1-supply = <&regulator>;
+ 	AVDD2-supply = <&regulator>;
+ 	CPVDD-supply = <&regulator>;
+-	DBVDD1-supply = <&regulator>;
+-	DBVDD2-supply = <&regulator>;
+-	DBVDD3-supply = <&regulator>;
++	DBVDD-supply = <&regulator>;
++	DCVDD-supply = <&regulator>;
+ 	SPKVDD1-supply = <&regulator>;
+ 	SPKVDD2-supply = <&regulator>;
+ };
+diff --git a/Documentation/driver-api/libata.rst b/Documentation/driver-api/libata.rst
+index 70e180e6b93dc..9f3e5dc311840 100644
+--- a/Documentation/driver-api/libata.rst
++++ b/Documentation/driver-api/libata.rst
+@@ -250,7 +250,7 @@ High-level taskfile hooks
+ 
+ ::
+ 
+-    void (*qc_prep) (struct ata_queued_cmd *qc);
++    enum ata_completion_errors (*qc_prep) (struct ata_queued_cmd *qc);
+     int (*qc_issue) (struct ata_queued_cmd *qc);
+ 
+ 
+diff --git a/Makefile b/Makefile
+index 3ffd5b03e6ddf..3ff5cf33ef55c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 148
++SUBLEVEL = 149
+ EXTRAVERSION =
+ NAME = "People's Front"
+ 
+diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
+index 7d2ca035d6c8f..11d4ff9f3e4df 100644
+--- a/arch/arm/include/asm/kvm_emulate.h
++++ b/arch/arm/include/asm/kvm_emulate.h
+@@ -216,7 +216,7 @@ static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
+ 	return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
+ }
+ 
+-static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
++static inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
+ {
+ 	return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
+ }
+@@ -248,16 +248,21 @@ static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
+ 	return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
+ }
+ 
+-static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
++static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
+ {
+ 	return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
+ }
+ 
+-static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
++static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
+ {
+ 	return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
+ }
+ 
++static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
++{
++	return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
++}
++
+ static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
+ {
+ 	return kvm_vcpu_get_hsr(vcpu) & HSR_FSC;
+diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
+index a4d4a28fe07df..d23ab9ec130a3 100644
+--- a/arch/arm/kernel/stacktrace.c
++++ b/arch/arm/kernel/stacktrace.c
+@@ -115,6 +115,8 @@ static int save_trace(struct stackframe *frame, void *d)
+ 		return 0;
+ 
+ 	regs = (struct pt_regs *)frame->sp;
++	if ((unsigned long)&regs[1] > ALIGN(frame->sp, THREAD_SIZE))
++		return 0;
+ 
+ 	trace->entries[trace->nr_entries++] = regs->ARM_pc;
+ 
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index badf02ca36938..aec533168f046 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -67,14 +67,16 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
+ 
+ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
+ {
++	unsigned long end = frame + 4 + sizeof(struct pt_regs);
++
+ #ifdef CONFIG_KALLSYMS
+ 	printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
+ #else
+ 	printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
+ #endif
+ 
+-	if (in_entry_text(from))
+-		dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
++	if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE))
++		dump_mem("", "Exception stack", frame + 4, end);
+ }
+ 
+ void dump_backtrace_stm(u32 *stack, u32 instruction)
+diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
+index 778cb4f868d9b..669c960dd069c 100644
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -303,7 +303,7 @@ static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
+ 	return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
+ }
+ 
+-static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
++static inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
+ {
+ 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
+ }
+@@ -311,7 +311,7 @@ static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
+ static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
+ {
+ 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
+-		kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
++		kvm_vcpu_abt_iss1tw(vcpu); /* AF/DBM update */
+ }
+ 
+ static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
+@@ -340,6 +340,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
+ 	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
+ }
+ 
++static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
++{
++	return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
++}
++
+ static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
+ {
+ 	return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index ac3126aba0368..de6fa9b4abfa0 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -155,11 +155,10 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
+ 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
+ 	S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
+ 	S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
+-	/* Linux doesn't care about the EL3 */
+ 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
+-	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
+-	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
+-	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
++	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
++	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
++	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
+ 	ARM64_FTR_END,
+ };
+ 
+@@ -301,7 +300,7 @@ static const struct arm64_ftr_bits ftr_id_pfr0[] = {
+ };
+ 
+ static const struct arm64_ftr_bits ftr_id_dfr0[] = {
+-	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
++	/* [31:28] TraceFilt */
+ 	S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf),	/* PerfMon */
+ 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
+ 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
+@@ -671,9 +670,6 @@ void update_cpu_features(int cpu,
+ 	taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
+ 				      info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
+ 
+-	/*
+-	 * EL3 is not our concern.
+-	 */
+ 	taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
+ 				      info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
+ 	taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
+diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
+index f146bff53edf9..15312e429b7d1 100644
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -430,7 +430,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
+ 			kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
+ 			kvm_vcpu_dabt_isvalid(vcpu) &&
+ 			!kvm_vcpu_dabt_isextabt(vcpu) &&
+-			!kvm_vcpu_dabt_iss1tw(vcpu);
++			!kvm_vcpu_abt_iss1tw(vcpu);
+ 
+ 		if (valid) {
+ 			int ret = __vgic_v2_perform_cpuif_access(vcpu);
+diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
+index 96810d91da2bd..4a25ce6a1823d 100644
+--- a/arch/m68k/q40/config.c
++++ b/arch/m68k/q40/config.c
+@@ -273,6 +273,7 @@ static int q40_get_rtc_pll(struct rtc_pll_info *pll)
+ {
+ 	int tmp = Q40_RTC_CTRL;
+ 
++	pll->pll_ctrl = 0;
+ 	pll->pll_value = tmp & Q40_RTC_PLL_MASK;
+ 	if (tmp & Q40_RTC_PLL_SIGN)
+ 		pll->pll_value = -pll->pll_value;
+diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
+index a45af3de075d9..d43e4ab20b238 100644
+--- a/arch/mips/include/asm/cpu-type.h
++++ b/arch/mips/include/asm/cpu-type.h
+@@ -47,6 +47,7 @@ static inline int __pure __get_cpu_type(const int cpu_type)
+ 	case CPU_34K:
+ 	case CPU_1004K:
+ 	case CPU_74K:
++	case CPU_1074K:
+ 	case CPU_M14KC:
+ 	case CPU_M14KEC:
+ 	case CPU_INTERAPTIV:
+diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
+index a790d5cf6ea37..684e8ae00d160 100644
+--- a/arch/powerpc/include/asm/kvm_asm.h
++++ b/arch/powerpc/include/asm/kvm_asm.h
+@@ -163,4 +163,7 @@
+ 
+ #define KVM_INST_FETCH_FAILED	-1
+ 
++/* Extract PO and XOP opcode fields */
++#define PO_XOP_OPCODE_MASK 0xfc0007fe
++
+ #endif /* __POWERPC_KVM_ASM_H__ */
+diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
+index fe3c6f3bd3b62..d123cba0992d0 100644
+--- a/arch/powerpc/kernel/eeh.c
++++ b/arch/powerpc/kernel/eeh.c
+@@ -502,7 +502,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
+ 	rc = 1;
+ 	if (pe->state & EEH_PE_ISOLATED) {
+ 		pe->check_count++;
+-		if (pe->check_count % EEH_MAX_FAILS == 0) {
++		if (pe->check_count == EEH_MAX_FAILS) {
+ 			dn = pci_device_to_OF_node(dev);
+ 			if (dn)
+ 				location = of_get_property(dn, "ibm,loc-code",
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index d5f351f02c153..7781f0168ce8c 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -430,11 +430,11 @@ out:
+ #ifdef CONFIG_PPC_BOOK3S_64
+ 	BUG_ON(get_paca()->in_nmi == 0);
+ 	if (get_paca()->in_nmi > 1)
+-		nmi_panic(regs, "Unrecoverable nested System Reset");
++		die("Unrecoverable nested System Reset", regs, SIGABRT);
+ #endif
+ 	/* Must die if the interrupt is not recoverable */
+ 	if (!(regs->msr & MSR_RI))
+-		nmi_panic(regs, "Unrecoverable System Reset");
++		die("Unrecoverable System Reset", regs, SIGABRT);
+ 
+ 	if (!nested)
+ 		nmi_exit();
+@@ -775,7 +775,7 @@ void machine_check_exception(struct pt_regs *regs)
+ 
+ 	/* Must die if the interrupt is not recoverable */
+ 	if (!(regs->msr & MSR_RI))
+-		nmi_panic(regs, "Unrecoverable Machine check");
++		die("Unrecoverable Machine check", regs, SIGBUS);
+ 
+ 	return;
+ 
+diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c
+index 31cd0f327c8a2..e7fd60cf97804 100644
+--- a/arch/powerpc/kvm/book3s_hv_tm.c
++++ b/arch/powerpc/kvm/book3s_hv_tm.c
+@@ -6,6 +6,8 @@
+  * published by the Free Software Foundation.
+  */
+ 
++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
++
+ #include <linux/kvm_host.h>
+ 
+ #include <asm/kvm_ppc.h>
+@@ -47,7 +49,18 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
+ 	u64 newmsr, bescr;
+ 	int ra, rs;
+ 
+-	switch (instr & 0xfc0007ff) {
++	/*
++	 * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
++	 * in these instructions, so masking bit 31 out doesn't change these
++	 * instructions. For treclaim., tsr., and trechkpt. instructions if bit
++	 * 31 = 0 then they are per ISA invalid forms, however P9 UM, in section
++	 * 4.6.10 Book II Invalid Forms, informs specifically that ignoring bit
++	 * 31 is an acceptable way to handle these invalid forms that have
++	 * bit 31 = 0. Moreover, for emulation purposes both forms (w/ and wo/
++	 * bit 31 set) can generate a softpatch interrupt. Hence both forms
++	 * are handled below for these instructions so they behave the same way.
++	 */
++	switch (instr & PO_XOP_OPCODE_MASK) {
+ 	case PPC_INST_RFID:
+ 		/* XXX do we need to check for PR=0 here? */
+ 		newmsr = vcpu->arch.shregs.srr1;
+@@ -108,7 +121,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
+ 		vcpu->arch.shregs.msr = newmsr;
+ 		return RESUME_GUEST;
+ 
+-	case PPC_INST_TSR:
++	/* ignore bit 31, see comment above */
++	case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
+ 		/* check for PR=1 and arch 2.06 bit set in PCR */
+ 		if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
+ 			/* generate an illegal instruction interrupt */
+@@ -143,7 +157,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
+ 		vcpu->arch.shregs.msr = msr;
+ 		return RESUME_GUEST;
+ 
+-	case PPC_INST_TRECLAIM:
++	/* ignore bit 31, see comment above */
++	case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK):
+ 		/* check for TM disabled in the HFSCR or MSR */
+ 		if (!(vcpu->arch.hfscr & HFSCR_TM)) {
+ 			/* generate an illegal instruction interrupt */
+@@ -179,7 +194,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
+ 		vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
+ 		return RESUME_GUEST;
+ 
+-	case PPC_INST_TRECHKPT:
++	/* ignore bit 31, see comment above */
++	case (PPC_INST_TRECHKPT & PO_XOP_OPCODE_MASK):
+ 		/* XXX do we need to check for PR=0 here? */
+ 		/* check for TM disabled in the HFSCR or MSR */
+ 		if (!(vcpu->arch.hfscr & HFSCR_TM)) {
+@@ -211,6 +227,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
+ 	}
+ 
+ 	/* What should we do here? We didn't recognize the instruction */
+-	WARN_ON_ONCE(1);
++	kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
++	pr_warn_ratelimited("Unrecognized TM-related instruction %#x for emulation", instr);
++
+ 	return RESUME_GUEST;
+ }
+diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
+index 3cf5863bc06e8..3c7ca2fa19597 100644
+--- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c
++++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
+@@ -26,7 +26,18 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
+ 	u64 newmsr, msr, bescr;
+ 	int rs;
+ 
+-	switch (instr & 0xfc0007ff) {
++	/*
++	 * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
++	 * in these instructions, so masking bit 31 out doesn't change these
++	 * instructions. For the tsr. instruction if bit 31 = 0 then it is per
++	 * ISA an invalid form, however P9 UM, in section 4.6.10 Book II Invalid
++	 * Forms, informs specifically that ignoring bit 31 is an acceptable way
++	 * to handle TM-related invalid forms that have bit 31 = 0. Moreover,
++	 * for emulation purposes both forms (w/ and wo/ bit 31 set) can
++	 * generate a softpatch interrupt. Hence both forms are handled below
++	 * for tsr. to make them behave the same way.
++	 */
++	switch (instr & PO_XOP_OPCODE_MASK) {
+ 	case PPC_INST_RFID:
+ 		/* XXX do we need to check for PR=0 here? */
+ 		newmsr = vcpu->arch.shregs.srr1;
+@@ -76,7 +87,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
+ 		vcpu->arch.shregs.msr = newmsr;
+ 		return 1;
+ 
+-	case PPC_INST_TSR:
++	/* ignore bit 31, see comment above */
++	case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
+ 		/* we know the MSR has the TS field = S (0b01) here */
+ 		msr = vcpu->arch.shregs.msr;
+ 		/* check for PR=1 and arch 2.06 bit set in PCR */
+diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
+index c6dcc5291f972..02fbc175142e2 100644
+--- a/arch/riscv/include/asm/ftrace.h
++++ b/arch/riscv/include/asm/ftrace.h
+@@ -63,4 +63,11 @@ do {									\
+  * Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
+  */
+ #define MCOUNT_INSN_SIZE 8
++
++#ifndef __ASSEMBLY__
++struct dyn_ftrace;
++int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
++#define ftrace_init_nop ftrace_init_nop
++#endif
++
+ #endif
+diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
+index 6d39f64e4dce4..fa8530f05ed4f 100644
+--- a/arch/riscv/kernel/ftrace.c
++++ b/arch/riscv/kernel/ftrace.c
+@@ -88,6 +88,25 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
+ 	return __ftrace_modify_call(rec->ip, addr, false);
+ }
+ 
++
++/*
++ * This is called early on, and isn't wrapped by
++ * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
++ * text_mutex, which triggers a lockdep failure.  SMP isn't running so we could
++ * just directly poke the text, but it's simpler to just take the lock
++ * ourselves.
++ */
++int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
++{
++	int out;
++
++	ftrace_arch_code_modify_prepare();
++	out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
++	ftrace_arch_code_modify_post_process();
++
++	return out;
++}
++
+ int ftrace_update_ftrace_func(ftrace_func_t func)
+ {
+ 	int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
+diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
+index 74a296cea21cc..0e6d01225a670 100644
+--- a/arch/s390/kernel/perf_cpum_sf.c
++++ b/arch/s390/kernel/perf_cpum_sf.c
+@@ -1377,8 +1377,8 @@ static int aux_output_begin(struct perf_output_handle *handle,
+ 		idx = aux->empty_mark + 1;
+ 		for (i = 0; i < range_scan; i++, idx++) {
+ 			te = aux_sdb_trailer(aux, idx);
+-			te->flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK;
+-			te->flags = te->flags & ~SDB_TE_ALERT_REQ_MASK;
++			te->flags &= ~(SDB_TE_BUFFER_FULL_MASK |
++				       SDB_TE_ALERT_REQ_MASK);
+ 			te->overflow = 0;
+ 		}
+ 		/* Save the position of empty SDBs */
+@@ -1425,8 +1425,7 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
+ 	te = aux_sdb_trailer(aux, alert_index);
+ 	do {
+ 		orig_flags = te->flags;
+-		orig_overflow = te->overflow;
+-		*overflow = orig_overflow;
++		*overflow = orig_overflow = te->overflow;
+ 		if (orig_flags & SDB_TE_BUFFER_FULL_MASK) {
+ 			/*
+ 			 * SDB is already set by hardware.
+@@ -1660,7 +1659,7 @@ static void *aux_buffer_setup(struct perf_event *event, void **pages,
+ 	}
+ 
+ 	/* Allocate aux_buffer struct for the event */
+-	aux = kmalloc(sizeof(struct aux_buffer), GFP_KERNEL);
++	aux = kzalloc(sizeof(struct aux_buffer), GFP_KERNEL);
+ 	if (!aux)
+ 		goto no_aux;
+ 	sfb = &aux->sfb;
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index 5f85e0dfa66d1..4bda9055daefa 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -537,7 +537,7 @@ static struct notifier_block kdump_mem_nb = {
+ /*
+  * Make sure that the area behind memory_end is protected
+  */
+-static void reserve_memory_end(void)
++static void __init reserve_memory_end(void)
+ {
+ #ifdef CONFIG_CRASH_DUMP
+ 	if (ipl_info.type == IPL_TYPE_FCP_DUMP &&
+@@ -555,7 +555,7 @@ static void reserve_memory_end(void)
+ /*
+  * Make sure that oldmem, where the dump is stored, is protected
+  */
+-static void reserve_oldmem(void)
++static void __init reserve_oldmem(void)
+ {
+ #ifdef CONFIG_CRASH_DUMP
+ 	if (OLDMEM_BASE)
+@@ -567,7 +567,7 @@ static void reserve_oldmem(void)
+ /*
+  * Make sure that oldmem, where the dump is stored, is protected
+  */
+-static void remove_oldmem(void)
++static void __init remove_oldmem(void)
+ {
+ #ifdef CONFIG_CRASH_DUMP
+ 	if (OLDMEM_BASE)
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index e3f70c60e8ccd..62f9903544b59 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -330,7 +330,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
+  * combination with microcode which triggers a CPU buffer flush when the
+  * instruction is executed.
+  */
+-static inline void mds_clear_cpu_buffers(void)
++static __always_inline void mds_clear_cpu_buffers(void)
+ {
+ 	static const u16 ds = __KERNEL_DS;
+ 
+@@ -351,7 +351,7 @@ static inline void mds_clear_cpu_buffers(void)
+  *
+  * Clear CPU buffers if the corresponding static key is enabled
+  */
+-static inline void mds_user_clear_cpu_buffers(void)
++static __always_inline void mds_user_clear_cpu_buffers(void)
+ {
+ 	if (static_branch_likely(&mds_user_clear))
+ 		mds_clear_cpu_buffers();
+diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h
+index 19b137f1b3beb..2ff9b98812b76 100644
+--- a/arch/x86/include/asm/pkeys.h
++++ b/arch/x86/include/asm/pkeys.h
+@@ -4,6 +4,11 @@
+ 
+ #define ARCH_DEFAULT_PKEY	0
+ 
++/*
++ * If more than 16 keys are ever supported, a thorough audit
++ * will be necessary to ensure that the types that store key
++ * numbers and masks have sufficient capacity.
++ */
+ #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1)
+ 
+ extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index 95e21c4380124..15234885e60bc 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -2250,6 +2250,7 @@ static inline void __init check_timer(void)
+ 	legacy_pic->init(0);
+ 	legacy_pic->make_irq(0);
+ 	apic_write(APIC_LVT0, APIC_DM_EXTINT);
++	legacy_pic->unmask(0);
+ 
+ 	unlock_ExtINT_logic();
+ 
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index 4b900035f2202..601a5da1d196a 100644
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -907,8 +907,6 @@ const void *get_xsave_field_ptr(int xsave_state)
+ 
+ #ifdef CONFIG_ARCH_HAS_PKEYS
+ 
+-#define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2)
+-#define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1)
+ /*
+  * This will go out and modify PKRU register to set the access
+  * rights for @pkey to @init_val.
+@@ -927,6 +925,13 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
+ 	if (!boot_cpu_has(X86_FEATURE_OSPKE))
+ 		return -EINVAL;
+ 
++	/*
++	 * This code should only be called with valid 'pkey'
++	 * values originating from in-kernel users.  Complain
++	 * if a bad value is observed.
++	 */
++	WARN_ON_ONCE(pkey >= arch_max_pkey());
++
+ 	/* Set the bits we need in PKRU:  */
+ 	if (init_val & PKEY_DISABLE_ACCESS)
+ 		new_pkru_bits |= PKRU_AD_BIT;
+diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
+index cb41b036eb264..7e0dc8c7da2c0 100644
+--- a/arch/x86/kvm/mmutrace.h
++++ b/arch/x86/kvm/mmutrace.h
+@@ -339,7 +339,7 @@ TRACE_EVENT(
+ 		/* These depend on page entry type, so compute them now.  */
+ 		__field(bool, r)
+ 		__field(bool, x)
+-		__field(u8, u)
++		__field(signed char, u)
+ 	),
+ 
+ 	TP_fast_assign(
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 2aafb6c791345..cb09a0ec87500 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -3942,6 +3942,12 @@ static int iret_interception(struct vcpu_svm *svm)
+ 	return 1;
+ }
+ 
++static int invd_interception(struct vcpu_svm *svm)
++{
++	/* Treat an INVD instruction as a NOP and just skip it. */
++	return kvm_skip_emulated_instruction(&svm->vcpu);
++}
++
+ static int invlpg_interception(struct vcpu_svm *svm)
+ {
+ 	if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
+@@ -4831,7 +4837,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
+ 	[SVM_EXIT_RDPMC]			= rdpmc_interception,
+ 	[SVM_EXIT_CPUID]			= cpuid_interception,
+ 	[SVM_EXIT_IRET]                         = iret_interception,
+-	[SVM_EXIT_INVD]                         = emulate_on_interception,
++	[SVM_EXIT_INVD]                         = invd_interception,
+ 	[SVM_EXIT_PAUSE]			= pause_interception,
+ 	[SVM_EXIT_HLT]				= halt_interception,
+ 	[SVM_EXIT_INVLPG]			= invlpg_interception,
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 430a4bc66f604..dd182228be714 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -858,6 +858,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+ 	unsigned long old_cr4 = kvm_read_cr4(vcpu);
+ 	unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
+ 				   X86_CR4_SMEP;
++	unsigned long mmu_role_bits = pdptr_bits | X86_CR4_SMAP | X86_CR4_PKE;
+ 
+ 	if (kvm_valid_cr4(vcpu, cr4))
+ 		return 1;
+@@ -885,7 +886,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+ 	if (kvm_x86_ops->set_cr4(vcpu, cr4))
+ 		return 1;
+ 
+-	if (((cr4 ^ old_cr4) & pdptr_bits) ||
++	if (((cr4 ^ old_cr4) & mmu_role_bits) ||
+ 	    (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
+ 		kvm_mmu_reset_context(vcpu);
+ 
+@@ -4668,10 +4669,13 @@ set_identity_unlock:
+ 		r = -EFAULT;
+ 		if (copy_from_user(&u.ps, argp, sizeof u.ps))
+ 			goto out;
++		mutex_lock(&kvm->lock);
+ 		r = -ENXIO;
+ 		if (!kvm->arch.vpit)
+-			goto out;
++			goto set_pit_out;
+ 		r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
++set_pit_out:
++		mutex_unlock(&kvm->lock);
+ 		break;
+ 	}
+ 	case KVM_GET_PIT2: {
+@@ -4691,10 +4695,13 @@ set_identity_unlock:
+ 		r = -EFAULT;
+ 		if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
+ 			goto out;
++		mutex_lock(&kvm->lock);
+ 		r = -ENXIO;
+ 		if (!kvm->arch.vpit)
+-			goto out;
++			goto set_pit2_out;
+ 		r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
++set_pit2_out:
++		mutex_unlock(&kvm->lock);
+ 		break;
+ 	}
+ 	case KVM_REINJECT_CONTROL: {
+diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
+index 7077b3e282414..40dbbd8f1fe41 100644
+--- a/arch/x86/lib/usercopy_64.c
++++ b/arch/x86/lib/usercopy_64.c
+@@ -139,7 +139,7 @@ long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
+ 	 */
+ 	if (size < 8) {
+ 		if (!IS_ALIGNED(dest, 4) || size != 4)
+-			clean_cache_range(dst, 1);
++			clean_cache_range(dst, size);
+ 	} else {
+ 		if (!IS_ALIGNED(dest, 8)) {
+ 			dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 49e16f0090957..9415a0041aaf7 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -1080,29 +1080,21 @@ void acpi_ec_dispatch_gpe(void)
+ /* --------------------------------------------------------------------------
+                                 Event Management
+    -------------------------------------------------------------------------- */
+-static struct acpi_ec_query_handler *
+-acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
+-{
+-	if (handler)
+-		kref_get(&handler->kref);
+-	return handler;
+-}
+-
+ static struct acpi_ec_query_handler *
+ acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
+ {
+ 	struct acpi_ec_query_handler *handler;
+-	bool found = false;
+ 
+ 	mutex_lock(&ec->mutex);
+ 	list_for_each_entry(handler, &ec->list, node) {
+ 		if (value == handler->query_bit) {
+-			found = true;
+-			break;
++			kref_get(&handler->kref);
++			mutex_unlock(&ec->mutex);
++			return handler;
+ 		}
+ 	}
+ 	mutex_unlock(&ec->mutex);
+-	return found ? acpi_ec_get_query_handler(handler) : NULL;
++	return NULL;
+ }
+ 
+ static void acpi_ec_query_handler_release(struct kref *kref)
+diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
+index 583e366be7e23..505f8c3168188 100644
+--- a/drivers/ata/acard-ahci.c
++++ b/drivers/ata/acard-ahci.c
+@@ -72,7 +72,7 @@ struct acard_sg {
+ 	__le32			size;	 /* bit 31 (EOT) max==0x10000 (64k) */
+ };
+ 
+-static void acard_ahci_qc_prep(struct ata_queued_cmd *qc);
++static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc);
+ static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
+ static int acard_ahci_port_start(struct ata_port *ap);
+ static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+@@ -257,7 +257,7 @@ static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
+ 	return si;
+ }
+ 
+-static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
++static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc)
+ {
+ 	struct ata_port *ap = qc->ap;
+ 	struct ahci_port_priv *pp = ap->private_data;
+@@ -295,6 +295,8 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
+ 		opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
+ 
+ 	ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
++
++	return AC_ERR_OK;
+ }
+ 
+ static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index 2bdb250a2142c..f1153e7ba3b3a 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -73,7 +73,7 @@ static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+ static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
+ static int ahci_port_start(struct ata_port *ap);
+ static void ahci_port_stop(struct ata_port *ap);
+-static void ahci_qc_prep(struct ata_queued_cmd *qc);
++static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc);
+ static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
+ static void ahci_freeze(struct ata_port *ap);
+ static void ahci_thaw(struct ata_port *ap);
+@@ -1640,7 +1640,7 @@ static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
+ 		return sata_pmp_qc_defer_cmd_switch(qc);
+ }
+ 
+-static void ahci_qc_prep(struct ata_queued_cmd *qc)
++static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc)
+ {
+ 	struct ata_port *ap = qc->ap;
+ 	struct ahci_port_priv *pp = ap->private_data;
+@@ -1676,6 +1676,8 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
+ 		opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
+ 
+ 	ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
++
++	return AC_ERR_OK;
+ }
+ 
+ static void ahci_fbs_dec_intr(struct ata_port *ap)
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index fead7243930c0..db1d86af21b4d 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4996,7 +4996,10 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc)
+ 	return ATA_DEFER_LINK;
+ }
+ 
+-void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
++enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
++{
++	return AC_ERR_OK;
++}
+ 
+ /**
+  *	ata_sg_init - Associate command with scatter-gather table.
+@@ -5483,7 +5486,9 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
+ 		return;
+ 	}
+ 
+-	ap->ops->qc_prep(qc);
++	qc->err_mask |= ap->ops->qc_prep(qc);
++	if (unlikely(qc->err_mask))
++		goto err;
+ 	trace_ata_qc_issue(qc);
+ 	qc->err_mask |= ap->ops->qc_issue(qc);
+ 	if (unlikely(qc->err_mask))
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
+index 873cc09060551..7484ffdabd543 100644
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -2695,12 +2695,14 @@ static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
+  *	LOCKING:
+  *	spin_lock_irqsave(host lock)
+  */
+-void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
++enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
+ {
+ 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+-		return;
++		return AC_ERR_OK;
+ 
+ 	ata_bmdma_fill_sg(qc);
++
++	return AC_ERR_OK;
+ }
+ EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
+ 
+@@ -2713,12 +2715,14 @@ EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
+  *	LOCKING:
+  *	spin_lock_irqsave(host lock)
+  */
+-void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
++enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
+ {
+ 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+-		return;
++		return AC_ERR_OK;
+ 
+ 	ata_bmdma_fill_sg_dumb(qc);
++
++	return AC_ERR_OK;
+ }
+ EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
+ 
+diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
+index 9588e685d994c..765b99319d3cd 100644
+--- a/drivers/ata/pata_macio.c
++++ b/drivers/ata/pata_macio.c
+@@ -507,7 +507,7 @@ static int pata_macio_cable_detect(struct ata_port *ap)
+ 	return ATA_CBL_PATA40;
+ }
+ 
+-static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
++static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
+ {
+ 	unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE);
+ 	struct ata_port *ap = qc->ap;
+@@ -520,7 +520,7 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
+ 		   __func__, qc, qc->flags, write, qc->dev->devno);
+ 
+ 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+-		return;
++		return AC_ERR_OK;
+ 
+ 	table = (struct dbdma_cmd *) priv->dma_table_cpu;
+ 
+@@ -565,6 +565,8 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
+ 	table->command = cpu_to_le16(DBDMA_STOP);
+ 
+ 	dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi);
++
++	return AC_ERR_OK;
+ }
+ 
+ 
+diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
+index e8b6a2e464c98..5b1458ca986b6 100644
+--- a/drivers/ata/pata_pxa.c
++++ b/drivers/ata/pata_pxa.c
+@@ -58,25 +58,27 @@ static void pxa_ata_dma_irq(void *d)
+ /*
+  * Prepare taskfile for submission.
+  */
+-static void pxa_qc_prep(struct ata_queued_cmd *qc)
++static enum ata_completion_errors pxa_qc_prep(struct ata_queued_cmd *qc)
+ {
+ 	struct pata_pxa_data *pd = qc->ap->private_data;
+ 	struct dma_async_tx_descriptor *tx;
+ 	enum dma_transfer_direction dir;
+ 
+ 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+-		return;
++		return AC_ERR_OK;
+ 
+ 	dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
+ 	tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir,
+ 				     DMA_PREP_INTERRUPT);
+ 	if (!tx) {
+ 		ata_dev_err(qc->dev, "prep_slave_sg() failed\n");
+-		return;
++		return AC_ERR_OK;
+ 	}
+ 	tx->callback = pxa_ata_dma_irq;
+ 	tx->callback_param = pd;
+ 	pd->dma_cookie = dmaengine_submit(tx);
++
++	return AC_ERR_OK;
+ }
+ 
+ /*
+diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
+index f1e873a37465e..096b4771b19da 100644
+--- a/drivers/ata/pdc_adma.c
++++ b/drivers/ata/pdc_adma.c
+@@ -132,7 +132,7 @@ static int adma_ata_init_one(struct pci_dev *pdev,
+ 				const struct pci_device_id *ent);
+ static int adma_port_start(struct ata_port *ap);
+ static void adma_port_stop(struct ata_port *ap);
+-static void adma_qc_prep(struct ata_queued_cmd *qc);
++static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc);
+ static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
+ static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
+ static void adma_freeze(struct ata_port *ap);
+@@ -311,7 +311,7 @@ static int adma_fill_sg(struct ata_queued_cmd *qc)
+ 	return i;
+ }
+ 
+-static void adma_qc_prep(struct ata_queued_cmd *qc)
++static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc)
+ {
+ 	struct adma_port_priv *pp = qc->ap->private_data;
+ 	u8  *buf = pp->pkt;
+@@ -322,7 +322,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
+ 
+ 	adma_enter_reg_mode(qc->ap);
+ 	if (qc->tf.protocol != ATA_PROT_DMA)
+-		return;
++		return AC_ERR_OK;
+ 
+ 	buf[i++] = 0;	/* Response flags */
+ 	buf[i++] = 0;	/* reserved */
+@@ -387,6 +387,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
+ 			printk("%s\n", obuf);
+ 	}
+ #endif
++	return AC_ERR_OK;
+ }
+ 
+ static inline void adma_packet_start(struct ata_queued_cmd *qc)
+diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
+index ae52a45fab5f7..8b3be0ff91cb4 100644
+--- a/drivers/ata/sata_fsl.c
++++ b/drivers/ata/sata_fsl.c
+@@ -507,7 +507,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
+ 	return num_prde;
+ }
+ 
+-static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
++static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc)
+ {
+ 	struct ata_port *ap = qc->ap;
+ 	struct sata_fsl_port_priv *pp = ap->private_data;
+@@ -553,6 +553,8 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
+ 
+ 	VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n",
+ 		desc_info, ttl_dwords, num_prde);
++
++	return AC_ERR_OK;
+ }
+ 
+ static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
+diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
+index 9b6d7930d1c79..6c7ddc037fce9 100644
+--- a/drivers/ata/sata_inic162x.c
++++ b/drivers/ata/sata_inic162x.c
+@@ -472,7 +472,7 @@ static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc)
+ 	prd[-1].flags |= PRD_END;
+ }
+ 
+-static void inic_qc_prep(struct ata_queued_cmd *qc)
++static enum ata_completion_errors inic_qc_prep(struct ata_queued_cmd *qc)
+ {
+ 	struct inic_port_priv *pp = qc->ap->private_data;
+ 	struct inic_pkt *pkt = pp->pkt;
+@@ -532,6 +532,8 @@ static void inic_qc_prep(struct ata_queued_cmd *qc)
+ 		inic_fill_sg(prd, qc);
+ 
+ 	pp->cpb_tbl[0] = pp->pkt_dma;
++
++	return AC_ERR_OK;
+ }
+ 
+ static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
+diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
+index ab2e9f62ddc1a..2910b22fac117 100644
+--- a/drivers/ata/sata_mv.c
++++ b/drivers/ata/sata_mv.c
+@@ -605,8 +605,8 @@ static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
+ static int mv_port_start(struct ata_port *ap);
+ static void mv_port_stop(struct ata_port *ap);
+ static int mv_qc_defer(struct ata_queued_cmd *qc);
+-static void mv_qc_prep(struct ata_queued_cmd *qc);
+-static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
++static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc);
++static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc);
+ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
+ static int mv_hardreset(struct ata_link *link, unsigned int *class,
+ 			unsigned long deadline);
+@@ -2044,7 +2044,7 @@ static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
+  *      LOCKING:
+  *      Inherited from caller.
+  */
+-static void mv_qc_prep(struct ata_queued_cmd *qc)
++static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc)
+ {
+ 	struct ata_port *ap = qc->ap;
+ 	struct mv_port_priv *pp = ap->private_data;
+@@ -2056,15 +2056,15 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
+ 	switch (tf->protocol) {
+ 	case ATA_PROT_DMA:
+ 		if (tf->command == ATA_CMD_DSM)
+-			return;
++			return AC_ERR_OK;
+ 		/* fall-thru */
+ 	case ATA_PROT_NCQ:
+ 		break;	/* continue below */
+ 	case ATA_PROT_PIO:
+ 		mv_rw_multi_errata_sata24(qc);
+-		return;
++		return AC_ERR_OK;
+ 	default:
+-		return;
++		return AC_ERR_OK;
+ 	}
+ 
+ 	/* Fill in command request block
+@@ -2111,12 +2111,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
+ 		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
+ 		 * of which are defined/used by Linux.  If we get here, this
+ 		 * driver needs work.
+-		 *
+-		 * FIXME: modify libata to give qc_prep a return value and
+-		 * return error here.
+ 		 */
+-		BUG_ON(tf->command);
+-		break;
++		ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__,
++				tf->command);
++		return AC_ERR_INVALID;
+ 	}
+ 	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
+ 	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
+@@ -2129,8 +2127,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
+ 	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */
+ 
+ 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+-		return;
++		return AC_ERR_OK;
+ 	mv_fill_sg(qc);
++
++	return AC_ERR_OK;
+ }
+ 
+ /**
+@@ -2145,7 +2145,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
+  *      LOCKING:
+  *      Inherited from caller.
+  */
+-static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
++static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc)
+ {
+ 	struct ata_port *ap = qc->ap;
+ 	struct mv_port_priv *pp = ap->private_data;
+@@ -2156,9 +2156,9 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
+ 
+ 	if ((tf->protocol != ATA_PROT_DMA) &&
+ 	    (tf->protocol != ATA_PROT_NCQ))
+-		return;
++		return AC_ERR_OK;
+ 	if (tf->command == ATA_CMD_DSM)
+-		return;  /* use bmdma for this */
++		return AC_ERR_OK;  /* use bmdma for this */
+ 
+ 	/* Fill in Gen IIE command request block */
+ 	if (!(tf->flags & ATA_TFLAG_WRITE))
+@@ -2199,8 +2199,10 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
+ 		);
+ 
+ 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+-		return;
++		return AC_ERR_OK;
+ 	mv_fill_sg(qc);
++
++	return AC_ERR_OK;
+ }
+ 
+ /**
+diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
+index 761577d57ff37..798d549435cc1 100644
+--- a/drivers/ata/sata_nv.c
++++ b/drivers/ata/sata_nv.c
+@@ -313,7 +313,7 @@ static void nv_ck804_freeze(struct ata_port *ap);
+ static void nv_ck804_thaw(struct ata_port *ap);
+ static int nv_adma_slave_config(struct scsi_device *sdev);
+ static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
+-static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
++static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
+ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
+ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
+ static void nv_adma_irq_clear(struct ata_port *ap);
+@@ -335,7 +335,7 @@ static void nv_mcp55_freeze(struct ata_port *ap);
+ static void nv_swncq_error_handler(struct ata_port *ap);
+ static int nv_swncq_slave_config(struct scsi_device *sdev);
+ static int nv_swncq_port_start(struct ata_port *ap);
+-static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
++static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
+ static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
+ static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
+ static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
+@@ -1365,7 +1365,7 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
+ 	return 1;
+ }
+ 
+-static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
++static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
+ {
+ 	struct nv_adma_port_priv *pp = qc->ap->private_data;
+ 	struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
+@@ -1377,7 +1377,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
+ 			(qc->flags & ATA_QCFLAG_DMAMAP));
+ 		nv_adma_register_mode(qc->ap);
+ 		ata_bmdma_qc_prep(qc);
+-		return;
++		return AC_ERR_OK;
+ 	}
+ 
+ 	cpb->resp_flags = NV_CPB_RESP_DONE;
+@@ -1409,6 +1409,8 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
+ 	cpb->ctl_flags = ctl_flags;
+ 	wmb();
+ 	cpb->resp_flags = 0;
++
++	return AC_ERR_OK;
+ }
+ 
+ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
+@@ -1972,17 +1974,19 @@ static int nv_swncq_port_start(struct ata_port *ap)
+ 	return 0;
+ }
+ 
+-static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
++static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
+ {
+ 	if (qc->tf.protocol != ATA_PROT_NCQ) {
+ 		ata_bmdma_qc_prep(qc);
+-		return;
++		return AC_ERR_OK;
+ 	}
+ 
+ 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+-		return;
++		return AC_ERR_OK;
+ 
+ 	nv_swncq_fill_sg(qc);
++
++	return AC_ERR_OK;
+ }
+ 
+ static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
+diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
+index d032bf657f709..29d2bb465f60d 100644
+--- a/drivers/ata/sata_promise.c
++++ b/drivers/ata/sata_promise.c
+@@ -155,7 +155,7 @@ static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 va
+ static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+ static int pdc_common_port_start(struct ata_port *ap);
+ static int pdc_sata_port_start(struct ata_port *ap);
+-static void pdc_qc_prep(struct ata_queued_cmd *qc);
++static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc);
+ static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
+ static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
+ static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
+@@ -649,7 +649,7 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
+ 	prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+ }
+ 
+-static void pdc_qc_prep(struct ata_queued_cmd *qc)
++static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc)
+ {
+ 	struct pdc_port_priv *pp = qc->ap->private_data;
+ 	unsigned int i;
+@@ -681,6 +681,8 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
+ 	default:
+ 		break;
+ 	}
++
++	return AC_ERR_OK;
+ }
+ 
+ static int pdc_is_sataii_tx4(unsigned long flags)
+diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
+index 1fe941688e95d..a66d10628c183 100644
+--- a/drivers/ata/sata_qstor.c
++++ b/drivers/ata/sata_qstor.c
+@@ -116,7 +116,7 @@ static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+ static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+ static int qs_port_start(struct ata_port *ap);
+ static void qs_host_stop(struct ata_host *host);
+-static void qs_qc_prep(struct ata_queued_cmd *qc);
++static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc);
+ static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
+ static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
+ static void qs_freeze(struct ata_port *ap);
+@@ -276,7 +276,7 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
+ 	return si;
+ }
+ 
+-static void qs_qc_prep(struct ata_queued_cmd *qc)
++static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc)
+ {
+ 	struct qs_port_priv *pp = qc->ap->private_data;
+ 	u8 dflags = QS_DF_PORD, *buf = pp->pkt;
+@@ -288,7 +288,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
+ 
+ 	qs_enter_reg_mode(qc->ap);
+ 	if (qc->tf.protocol != ATA_PROT_DMA)
+-		return;
++		return AC_ERR_OK;
+ 
+ 	nelem = qs_fill_sg(qc);
+ 
+@@ -311,6 +311,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
+ 
+ 	/* frame information structure (FIS) */
+ 	ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]);
++
++	return AC_ERR_OK;
+ }
+ 
+ static inline void qs_packet_start(struct ata_queued_cmd *qc)
+diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
+index 50ebd779d975f..8323f88d17a53 100644
+--- a/drivers/ata/sata_rcar.c
++++ b/drivers/ata/sata_rcar.c
+@@ -554,12 +554,14 @@ static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc)
+ 	prd[si - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND);
+ }
+ 
+-static void sata_rcar_qc_prep(struct ata_queued_cmd *qc)
++static enum ata_completion_errors sata_rcar_qc_prep(struct ata_queued_cmd *qc)
+ {
+ 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+-		return;
++		return AC_ERR_OK;
+ 
+ 	sata_rcar_bmdma_fill_sg(qc);
++
++	return AC_ERR_OK;
+ }
+ 
+ static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc)
+diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
+index ed76f070d21e4..82adaf02887fb 100644
+--- a/drivers/ata/sata_sil.c
++++ b/drivers/ata/sata_sil.c
+@@ -119,7 +119,7 @@ static void sil_dev_config(struct ata_device *dev);
+ static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+ static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+ static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
+-static void sil_qc_prep(struct ata_queued_cmd *qc);
++static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc);
+ static void sil_bmdma_setup(struct ata_queued_cmd *qc);
+ static void sil_bmdma_start(struct ata_queued_cmd *qc);
+ static void sil_bmdma_stop(struct ata_queued_cmd *qc);
+@@ -333,12 +333,14 @@ static void sil_fill_sg(struct ata_queued_cmd *qc)
+ 		last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT);
+ }
+ 
+-static void sil_qc_prep(struct ata_queued_cmd *qc)
++static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc)
+ {
+ 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+-		return;
++		return AC_ERR_OK;
+ 
+ 	sil_fill_sg(qc);
++
++	return AC_ERR_OK;
+ }
+ 
+ static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
+diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
+index 319f517137cd5..7a8ca81e52bfc 100644
+--- a/drivers/ata/sata_sil24.c
++++ b/drivers/ata/sata_sil24.c
+@@ -336,7 +336,7 @@ static void sil24_dev_config(struct ata_device *dev);
+ static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val);
+ static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val);
+ static int sil24_qc_defer(struct ata_queued_cmd *qc);
+-static void sil24_qc_prep(struct ata_queued_cmd *qc);
++static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc);
+ static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
+ static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc);
+ static void sil24_pmp_attach(struct ata_port *ap);
+@@ -840,7 +840,7 @@ static int sil24_qc_defer(struct ata_queued_cmd *qc)
+ 	return ata_std_qc_defer(qc);
+ }
+ 
+-static void sil24_qc_prep(struct ata_queued_cmd *qc)
++static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc)
+ {
+ 	struct ata_port *ap = qc->ap;
+ 	struct sil24_port_priv *pp = ap->private_data;
+@@ -884,6 +884,8 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
+ 
+ 	if (qc->flags & ATA_QCFLAG_DMAMAP)
+ 		sil24_fill_sg(qc, sge);
++
++	return AC_ERR_OK;
+ }
+ 
+ static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
+diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
+index 405e606a234d1..0d742457925ec 100644
+--- a/drivers/ata/sata_sx4.c
++++ b/drivers/ata/sata_sx4.c
+@@ -218,7 +218,7 @@ static void pdc_error_handler(struct ata_port *ap);
+ static void pdc_freeze(struct ata_port *ap);
+ static void pdc_thaw(struct ata_port *ap);
+ static int pdc_port_start(struct ata_port *ap);
+-static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
++static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc);
+ static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
+ static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
+ static unsigned int pdc20621_dimm_init(struct ata_host *host);
+@@ -546,7 +546,7 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
+ 	VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
+ }
+ 
+-static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
++static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc)
+ {
+ 	switch (qc->tf.protocol) {
+ 	case ATA_PROT_DMA:
+@@ -558,6 +558,8 @@ static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
+ 	default:
+ 		break;
+ 	}
++
++	return AC_ERR_OK;
+ }
+ 
+ static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
+diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
+index 7323e9210f4b1..38fec976e62d4 100644
+--- a/drivers/atm/eni.c
++++ b/drivers/atm/eni.c
+@@ -2243,7 +2243,7 @@ static int eni_init_one(struct pci_dev *pci_dev,
+ 
+ 	rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
+ 	if (rc < 0)
+-		goto out;
++		goto err_disable;
+ 
+ 	rc = -ENOMEM;
+ 	eni_dev = kmalloc(sizeof(struct eni_dev), GFP_KERNEL);
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index d26b485ccc7d0..e8b3353c18eb8 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -2367,7 +2367,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
+ EXPORT_SYMBOL_GPL(regmap_raw_write_async);
+ 
+ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
+-			    unsigned int val_len)
++			    unsigned int val_len, bool noinc)
+ {
+ 	struct regmap_range_node *range;
+ 	int ret;
+@@ -2380,7 +2380,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
+ 	range = _regmap_range_lookup(map, reg);
+ 	if (range) {
+ 		ret = _regmap_select_page(map, &reg, range,
+-					  val_len / map->format.val_bytes);
++					  noinc ? 1 : val_len / map->format.val_bytes);
+ 		if (ret != 0)
+ 			return ret;
+ 	}
+@@ -2418,7 +2418,7 @@ static int _regmap_bus_read(void *context, unsigned int reg,
+ 	if (!map->format.parse_val)
+ 		return -EINVAL;
+ 
+-	ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes);
++	ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
+ 	if (ret == 0)
+ 		*val = map->format.parse_val(work_val);
+ 
+@@ -2536,7 +2536,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
+ 
+ 		/* Read bytes that fit into whole chunks */
+ 		for (i = 0; i < chunk_count; i++) {
+-			ret = _regmap_raw_read(map, reg, val, chunk_bytes);
++			ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
+ 			if (ret != 0)
+ 				goto out;
+ 
+@@ -2547,7 +2547,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
+ 
+ 		/* Read remaining bytes */
+ 		if (val_len) {
+-			ret = _regmap_raw_read(map, reg, val, val_len);
++			ret = _regmap_raw_read(map, reg, val, val_len, false);
+ 			if (ret != 0)
+ 				goto out;
+ 		}
+@@ -2622,7 +2622,7 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg,
+ 			read_len = map->max_raw_read;
+ 		else
+ 			read_len = val_len;
+-		ret = _regmap_raw_read(map, reg, val, read_len);
++		ret = _regmap_raw_read(map, reg, val, read_len, true);
+ 		if (ret)
+ 			goto out_unlock;
+ 		val = ((u8 *)val) + read_len;
+diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
+index 8d1cd2479e36f..cc51395d8b0e5 100644
+--- a/drivers/bluetooth/btrtl.c
++++ b/drivers/bluetooth/btrtl.c
+@@ -343,11 +343,11 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
+ 	 * the end.
+ 	 */
+ 	len = patch_length;
+-	buf = kmemdup(btrtl_dev->fw_data + patch_offset, patch_length,
+-		      GFP_KERNEL);
++	buf = kvmalloc(patch_length, GFP_KERNEL);
+ 	if (!buf)
+ 		return -ENOMEM;
+ 
++	memcpy(buf, btrtl_dev->fw_data + patch_offset, patch_length - 4);
+ 	memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4);
+ 
+ 	*_buf = buf;
+@@ -415,8 +415,10 @@ static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff)
+ 	if (ret < 0)
+ 		return ret;
+ 	ret = fw->size;
+-	*buff = kmemdup(fw->data, ret, GFP_KERNEL);
+-	if (!*buff)
++	*buff = kvmalloc(fw->size, GFP_KERNEL);
++	if (*buff)
++		memcpy(*buff, fw->data, ret);
++	else
+ 		ret = -ENOMEM;
+ 
+ 	release_firmware(fw);
+@@ -454,14 +456,14 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev,
+ 		goto out;
+ 
+ 	if (btrtl_dev->cfg_len > 0) {
+-		tbuff = kzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL);
++		tbuff = kvzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL);
+ 		if (!tbuff) {
+ 			ret = -ENOMEM;
+ 			goto out;
+ 		}
+ 
+ 		memcpy(tbuff, fw_data, ret);
+-		kfree(fw_data);
++		kvfree(fw_data);
+ 
+ 		memcpy(tbuff + ret, btrtl_dev->cfg_data, btrtl_dev->cfg_len);
+ 		ret += btrtl_dev->cfg_len;
+@@ -474,7 +476,7 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev,
+ 	ret = rtl_download_firmware(hdev, fw_data, ret);
+ 
+ out:
+-	kfree(fw_data);
++	kvfree(fw_data);
+ 	return ret;
+ }
+ 
+@@ -501,8 +503,8 @@ static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev)
+ 
+ void btrtl_free(struct btrtl_device_info *btrtl_dev)
+ {
+-	kfree(btrtl_dev->fw_data);
+-	kfree(btrtl_dev->cfg_data);
++	kvfree(btrtl_dev->fw_data);
++	kvfree(btrtl_dev->cfg_data);
+ 	kfree(btrtl_dev);
+ }
+ EXPORT_SYMBOL_GPL(btrtl_free);
+diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
+index e31c02dc77709..cbd970fb02f18 100644
+--- a/drivers/bus/hisi_lpc.c
++++ b/drivers/bus/hisi_lpc.c
+@@ -358,6 +358,26 @@ static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev,
+ 	return 0;
+ }
+ 
++/*
++ * Released firmware describes the IO port max address as 0x3fff, which is
++ * the max host bus address. Fixup to a proper range. This will probably
++ * never be fixed in firmware.
++ */
++static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev,
++					       struct resource *r)
++{
++	if (r->end != 0x3fff)
++		return;
++
++	if (r->start == 0xe4)
++		r->end = 0xe4 + 0x04 - 1;
++	else if (r->start == 0x2f8)
++		r->end = 0x2f8 + 0x08 - 1;
++	else
++		dev_warn(hostdev, "unrecognised resource %pR to fixup, ignoring\n",
++			 r);
++}
++
+ /*
+  * hisi_lpc_acpi_set_io_res - set the resources for a child
+  * @child: the device node to be updated the I/O resource
+@@ -419,8 +439,11 @@ static int hisi_lpc_acpi_set_io_res(struct device *child,
+ 		return -ENOMEM;
+ 	}
+ 	count = 0;
+-	list_for_each_entry(rentry, &resource_list, node)
+-		resources[count++] = *rentry->res;
++	list_for_each_entry(rentry, &resource_list, node) {
++		resources[count] = *rentry->res;
++		hisi_lpc_acpi_fixup_child_resource(hostdev, &resources[count]);
++		count++;
++	}
+ 
+ 	acpi_dev_free_resource_list(&resource_list);
+ 
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 6a5d4dfafc474..80dedecfe15c5 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1150,14 +1150,14 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+ 	 * We take into account the first, second and third-order deltas
+ 	 * in order to make our estimate.
+ 	 */
+-	delta = sample.jiffies - state->last_time;
+-	state->last_time = sample.jiffies;
++	delta = sample.jiffies - READ_ONCE(state->last_time);
++	WRITE_ONCE(state->last_time, sample.jiffies);
+ 
+-	delta2 = delta - state->last_delta;
+-	state->last_delta = delta;
++	delta2 = delta - READ_ONCE(state->last_delta);
++	WRITE_ONCE(state->last_delta, delta);
+ 
+-	delta3 = delta2 - state->last_delta2;
+-	state->last_delta2 = delta2;
++	delta3 = delta2 - READ_ONCE(state->last_delta2);
++	WRITE_ONCE(state->last_delta2, delta2);
+ 
+ 	if (delta < 0)
+ 		delta = -delta;
+diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
+index 8eeb4190207d1..dce22b7fc5449 100644
+--- a/drivers/char/tlclk.c
++++ b/drivers/char/tlclk.c
+@@ -776,17 +776,21 @@ static int __init tlclk_init(void)
+ {
+ 	int ret;
+ 
++	telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
++
++	alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
++	if (!alarm_events) {
++		ret = -ENOMEM;
++		goto out1;
++	}
++
+ 	ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops);
+ 	if (ret < 0) {
+ 		printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major);
++		kfree(alarm_events);
+ 		return ret;
+ 	}
+ 	tlclk_major = ret;
+-	alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
+-	if (!alarm_events) {
+-		ret = -ENOMEM;
+-		goto out1;
+-	}
+ 
+ 	/* Read telecom clock IRQ number (Set by BIOS) */
+ 	if (!request_region(TLCLK_BASE, 8, "telco_clock")) {
+@@ -795,7 +799,6 @@ static int __init tlclk_init(void)
+ 		ret = -EBUSY;
+ 		goto out2;
+ 	}
+-	telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
+ 
+ 	if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */
+ 		printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n",
+@@ -836,8 +839,8 @@ out3:
+ 	release_region(TLCLK_BASE, 8);
+ out2:
+ 	kfree(alarm_events);
+-out1:
+ 	unregister_chrdev(tlclk_major, "telco_clock");
++out1:
+ 	return ret;
+ }
+ 
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
+index 763fc7e6c0058..20f27100708bd 100644
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -26,6 +26,7 @@
+ #include "tpm.h"
+ 
+ #define ACPI_SIG_TPM2 "TPM2"
++#define TPM_CRB_MAX_RESOURCES 3
+ 
+ static const guid_t crb_acpi_start_guid =
+ 	GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714,
+@@ -95,7 +96,6 @@ enum crb_status {
+ struct crb_priv {
+ 	u32 sm;
+ 	const char *hid;
+-	void __iomem *iobase;
+ 	struct crb_regs_head __iomem *regs_h;
+ 	struct crb_regs_tail __iomem *regs_t;
+ 	u8 __iomem *cmd;
+@@ -438,21 +438,27 @@ static const struct tpm_class_ops tpm_crb = {
+ 
+ static int crb_check_resource(struct acpi_resource *ares, void *data)
+ {
+-	struct resource *io_res = data;
++	struct resource *iores_array = data;
+ 	struct resource_win win;
+ 	struct resource *res = &(win.res);
++	int i;
+ 
+ 	if (acpi_dev_resource_memory(ares, res) ||
+ 	    acpi_dev_resource_address_space(ares, &win)) {
+-		*io_res = *res;
+-		io_res->name = NULL;
++		for (i = 0; i < TPM_CRB_MAX_RESOURCES + 1; ++i) {
++			if (resource_type(iores_array + i) != IORESOURCE_MEM) {
++				iores_array[i] = *res;
++				iores_array[i].name = NULL;
++				break;
++			}
++		}
+ 	}
+ 
+ 	return 1;
+ }
+ 
+-static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
+-				 struct resource *io_res, u64 start, u32 size)
++static void __iomem *crb_map_res(struct device *dev, struct resource *iores,
++				 void __iomem **iobase_ptr, u64 start, u32 size)
+ {
+ 	struct resource new_res = {
+ 		.start	= start,
+@@ -464,10 +470,16 @@ static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
+ 	if (start != new_res.start)
+ 		return (void __iomem *) ERR_PTR(-EINVAL);
+ 
+-	if (!resource_contains(io_res, &new_res))
++	if (!iores)
+ 		return devm_ioremap_resource(dev, &new_res);
+ 
+-	return priv->iobase + (new_res.start - io_res->start);
++	if (!*iobase_ptr) {
++		*iobase_ptr = devm_ioremap_resource(dev, iores);
++		if (IS_ERR(*iobase_ptr))
++			return *iobase_ptr;
++	}
++
++	return *iobase_ptr + (new_res.start - iores->start);
+ }
+ 
+ /*
+@@ -494,9 +506,13 @@ static u64 crb_fixup_cmd_size(struct device *dev, struct resource *io_res,
+ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+ 		      struct acpi_table_tpm2 *buf)
+ {
+-	struct list_head resources;
+-	struct resource io_res;
++	struct list_head acpi_resource_list;
++	struct resource iores_array[TPM_CRB_MAX_RESOURCES + 1] = { {0} };
++	void __iomem *iobase_array[TPM_CRB_MAX_RESOURCES] = {NULL};
+ 	struct device *dev = &device->dev;
++	struct resource *iores;
++	void __iomem **iobase_ptr;
++	int i;
+ 	u32 pa_high, pa_low;
+ 	u64 cmd_pa;
+ 	u32 cmd_size;
+@@ -505,21 +521,41 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+ 	u32 rsp_size;
+ 	int ret;
+ 
+-	INIT_LIST_HEAD(&resources);
+-	ret = acpi_dev_get_resources(device, &resources, crb_check_resource,
+-				     &io_res);
++	INIT_LIST_HEAD(&acpi_resource_list);
++	ret = acpi_dev_get_resources(device, &acpi_resource_list,
++				     crb_check_resource, iores_array);
+ 	if (ret < 0)
+ 		return ret;
+-	acpi_dev_free_resource_list(&resources);
++	acpi_dev_free_resource_list(&acpi_resource_list);
+ 
+-	if (resource_type(&io_res) != IORESOURCE_MEM) {
++	if (resource_type(iores_array) != IORESOURCE_MEM) {
+ 		dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
+ 		return -EINVAL;
++	} else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) ==
++		IORESOURCE_MEM) {
++		dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n");
++		memset(iores_array + TPM_CRB_MAX_RESOURCES,
++		       0, sizeof(*iores_array));
++		iores_array[TPM_CRB_MAX_RESOURCES].flags = 0;
+ 	}
+ 
+-	priv->iobase = devm_ioremap_resource(dev, &io_res);
+-	if (IS_ERR(priv->iobase))
+-		return PTR_ERR(priv->iobase);
++	iores = NULL;
++	iobase_ptr = NULL;
++	for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) {
++		if (buf->control_address >= iores_array[i].start &&
++		    buf->control_address + sizeof(struct crb_regs_tail) - 1 <=
++		    iores_array[i].end) {
++			iores = iores_array + i;
++			iobase_ptr = iobase_array + i;
++			break;
++		}
++	}
++
++	priv->regs_t = crb_map_res(dev, iores, iobase_ptr, buf->control_address,
++				   sizeof(struct crb_regs_tail));
++
++	if (IS_ERR(priv->regs_t))
++		return PTR_ERR(priv->regs_t);
+ 
+ 	/* The ACPI IO region starts at the head area and continues to include
+ 	 * the control area, as one nice sane region except for some older
+@@ -527,9 +563,10 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+ 	 */
+ 	if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
+ 	    (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) {
+-		if (buf->control_address == io_res.start +
++		if (iores &&
++		    buf->control_address == iores->start +
+ 		    sizeof(*priv->regs_h))
+-			priv->regs_h = priv->iobase;
++			priv->regs_h = *iobase_ptr;
+ 		else
+ 			dev_warn(dev, FW_BUG "Bad ACPI memory layout");
+ 	}
+@@ -538,13 +575,6 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+ 	if (ret)
+ 		return ret;
+ 
+-	priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address,
+-				   sizeof(struct crb_regs_tail));
+-	if (IS_ERR(priv->regs_t)) {
+-		ret = PTR_ERR(priv->regs_t);
+-		goto out_relinquish_locality;
+-	}
+-
+ 	/*
+ 	 * PTT HW bug w/a: wake up the device to access
+ 	 * possibly not retained registers.
+@@ -556,13 +586,26 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+ 	pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high);
+ 	pa_low  = ioread32(&priv->regs_t->ctrl_cmd_pa_low);
+ 	cmd_pa = ((u64)pa_high << 32) | pa_low;
+-	cmd_size = crb_fixup_cmd_size(dev, &io_res, cmd_pa,
+-				      ioread32(&priv->regs_t->ctrl_cmd_size));
++	cmd_size = ioread32(&priv->regs_t->ctrl_cmd_size);
++
++	iores = NULL;
++	iobase_ptr = NULL;
++	for (i = 0; iores_array[i].end; ++i) {
++		if (cmd_pa >= iores_array[i].start &&
++		    cmd_pa <= iores_array[i].end) {
++			iores = iores_array + i;
++			iobase_ptr = iobase_array + i;
++			break;
++		}
++	}
++
++	if (iores)
++		cmd_size = crb_fixup_cmd_size(dev, iores, cmd_pa, cmd_size);
+ 
+ 	dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n",
+ 		pa_high, pa_low, cmd_size);
+ 
+-	priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size);
++	priv->cmd = crb_map_res(dev, iores, iobase_ptr,	cmd_pa, cmd_size);
+ 	if (IS_ERR(priv->cmd)) {
+ 		ret = PTR_ERR(priv->cmd);
+ 		goto out;
+@@ -570,11 +613,25 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+ 
+ 	memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8);
+ 	rsp_pa = le64_to_cpu(__rsp_pa);
+-	rsp_size = crb_fixup_cmd_size(dev, &io_res, rsp_pa,
+-				      ioread32(&priv->regs_t->ctrl_rsp_size));
++	rsp_size = ioread32(&priv->regs_t->ctrl_rsp_size);
++
++	iores = NULL;
++	iobase_ptr = NULL;
++	for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) {
++		if (rsp_pa >= iores_array[i].start &&
++		    rsp_pa <= iores_array[i].end) {
++			iores = iores_array + i;
++			iobase_ptr = iobase_array + i;
++			break;
++		}
++	}
++
++	if (iores)
++		rsp_size = crb_fixup_cmd_size(dev, iores, rsp_pa, rsp_size);
+ 
+ 	if (cmd_pa != rsp_pa) {
+-		priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size);
++		priv->rsp = crb_map_res(dev, iores, iobase_ptr,
++					rsp_pa, rsp_size);
+ 		ret = PTR_ERR_OR_ZERO(priv->rsp);
+ 		goto out;
+ 	}
+diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
+index 569e93e1f06cc..3ba67bc6baba0 100644
+--- a/drivers/char/tpm/tpm_ibmvtpm.c
++++ b/drivers/char/tpm/tpm_ibmvtpm.c
+@@ -588,6 +588,7 @@ static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
+ 	 */
+ 	while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
+ 		ibmvtpm_crq_process(crq, ibmvtpm);
++		wake_up_interruptible(&ibmvtpm->crq_queue.wq);
+ 		crq->valid = 0;
+ 		smp_wmb();
+ 	}
+@@ -635,6 +636,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
+ 	}
+ 
+ 	crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
++	init_waitqueue_head(&crq_q->wq);
+ 	ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
+ 						 CRQ_RES_BUF_SIZE,
+ 						 DMA_BIDIRECTIONAL);
+@@ -687,6 +689,13 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
+ 	if (rc)
+ 		goto init_irq_cleanup;
+ 
++	if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
++				ibmvtpm->rtce_buf != NULL,
++				HZ)) {
++		dev_err(dev, "CRQ response timed out\n");
++		goto init_irq_cleanup;
++	}
++
+ 	return tpm_chip_register(chip);
+ init_irq_cleanup:
+ 	do {
+diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
+index 91dfe766d0800..4f6a124601db4 100644
+--- a/drivers/char/tpm/tpm_ibmvtpm.h
++++ b/drivers/char/tpm/tpm_ibmvtpm.h
+@@ -31,6 +31,7 @@ struct ibmvtpm_crq_queue {
+ 	struct ibmvtpm_crq *crq_addr;
+ 	u32 index;
+ 	u32 num_entry;
++	wait_queue_head_t wq;
+ };
+ 
+ struct ibmvtpm_dev {
+diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
+index c4d0b6f6abf2e..fc2e2839fe570 100644
+--- a/drivers/clk/socfpga/clk-pll-s10.c
++++ b/drivers/clk/socfpga/clk-pll-s10.c
+@@ -38,7 +38,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
+ 	/* read VCO1 reg for numerator and denominator */
+ 	reg = readl(socfpgaclk->hw.reg);
+ 	refdiv = (reg & SOCFPGA_PLL_REFDIV_MASK) >> SOCFPGA_PLL_REFDIV_SHIFT;
+-	vco_freq = (unsigned long long)parent_rate / refdiv;
++
++	vco_freq = parent_rate;
++	do_div(vco_freq, refdiv);
+ 
+ 	/* Read mdiv and fdiv from the fdbck register */
+ 	reg = readl(socfpgaclk->hw.reg + 0x4);
+diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
+index 688e403333b91..14926e07d09ae 100644
+--- a/drivers/clk/ti/adpll.c
++++ b/drivers/clk/ti/adpll.c
+@@ -193,15 +193,8 @@ static const char *ti_adpll_clk_get_name(struct ti_adpll_data *d,
+ 		if (err)
+ 			return NULL;
+ 	} else {
+-		const char *base_name = "adpll";
+-		char *buf;
+-
+-		buf = devm_kzalloc(d->dev, 8 + 1 + strlen(base_name) + 1 +
+-				    strlen(postfix), GFP_KERNEL);
+-		if (!buf)
+-			return NULL;
+-		sprintf(buf, "%08lx.%s.%s", d->pa, base_name, postfix);
+-		name = buf;
++		name = devm_kasprintf(d->dev, GFP_KERNEL, "%08lx.adpll.%s",
++				      d->pa, postfix);
+ 	}
+ 
+ 	return name;
+diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c
+index 1d740a8c42ab3..47114c2a7cb54 100644
+--- a/drivers/clocksource/h8300_timer8.c
++++ b/drivers/clocksource/h8300_timer8.c
+@@ -169,7 +169,7 @@ static int __init h8300_8timer_init(struct device_node *node)
+ 		return PTR_ERR(clk);
+ 	}
+ 
+-	ret = ENXIO;
++	ret = -ENXIO;
+ 	base = of_iomap(node, 0);
+ 	if (!base) {
+ 		pr_err("failed to map registers for clockevent\n");
+diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
+index 687c92ef76440..79942f7057576 100644
+--- a/drivers/cpufreq/powernv-cpufreq.c
++++ b/drivers/cpufreq/powernv-cpufreq.c
+@@ -903,6 +903,7 @@ static struct notifier_block powernv_cpufreq_reboot_nb = {
+ void powernv_cpufreq_work_fn(struct work_struct *work)
+ {
+ 	struct chip *chip = container_of(work, struct chip, throttle);
++	struct cpufreq_policy *policy;
+ 	unsigned int cpu;
+ 	cpumask_t mask;
+ 
+@@ -917,12 +918,14 @@ void powernv_cpufreq_work_fn(struct work_struct *work)
+ 	chip->restore = false;
+ 	for_each_cpu(cpu, &mask) {
+ 		int index;
+-		struct cpufreq_policy policy;
+ 
+-		cpufreq_get_policy(&policy, cpu);
+-		index = cpufreq_table_find_index_c(&policy, policy.cur);
+-		powernv_cpufreq_target_index(&policy, index);
+-		cpumask_andnot(&mask, &mask, policy.cpus);
++		policy = cpufreq_cpu_get(cpu);
++		if (!policy)
++			continue;
++		index = cpufreq_table_find_index_c(policy, policy->cur);
++		powernv_cpufreq_target_index(policy, index);
++		cpumask_andnot(&mask, &mask, policy->cpus);
++		cpufreq_cpu_put(policy);
+ 	}
+ out:
+ 	put_online_cpus();
+diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
+index 9b3c259f081d3..ee508bbbb7504 100644
+--- a/drivers/crypto/chelsio/chcr_algo.c
++++ b/drivers/crypto/chelsio/chcr_algo.c
+@@ -2418,8 +2418,9 @@ int chcr_aead_dma_map(struct device *dev,
+ 	else
+ 		reqctx->b0_dma = 0;
+ 	if (req->src == req->dst) {
+-		error = dma_map_sg(dev, req->src, sg_nents(req->src),
+-				   DMA_BIDIRECTIONAL);
++		error = dma_map_sg(dev, req->src,
++				sg_nents_for_len(req->src, dst_size),
++					DMA_BIDIRECTIONAL);
+ 		if (!error)
+ 			goto err;
+ 	} else {
+diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
+index 1e0cc96306dd7..2c1f3ddb0cc79 100644
+--- a/drivers/crypto/chelsio/chtls/chtls_io.c
++++ b/drivers/crypto/chelsio/chtls/chtls_io.c
+@@ -1449,7 +1449,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 				      csk->wr_max_credits))
+ 			sk->sk_write_space(sk);
+ 
+-		if (copied >= target && !sk->sk_backlog.tail)
++		if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
+ 			break;
+ 
+ 		if (copied) {
+@@ -1482,7 +1482,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 				break;
+ 			}
+ 		}
+-		if (sk->sk_backlog.tail) {
++		if (READ_ONCE(sk->sk_backlog.tail)) {
+ 			release_sock(sk);
+ 			lock_sock(sk);
+ 			chtls_cleanup_rbuf(sk, copied);
+@@ -1627,7 +1627,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
+ 			break;
+ 		}
+ 
+-		if (sk->sk_backlog.tail) {
++		if (READ_ONCE(sk->sk_backlog.tail)) {
+ 			/* Do not sleep, just process backlog. */
+ 			release_sock(sk);
+ 			lock_sock(sk);
+@@ -1759,7 +1759,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 				      csk->wr_max_credits))
+ 			sk->sk_write_space(sk);
+ 
+-		if (copied >= target && !sk->sk_backlog.tail)
++		if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
+ 			break;
+ 
+ 		if (copied) {
+@@ -1790,7 +1790,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 			}
+ 		}
+ 
+-		if (sk->sk_backlog.tail) {
++		if (READ_ONCE(sk->sk_backlog.tail)) {
+ 			release_sock(sk);
+ 			lock_sock(sk);
+ 			chtls_cleanup_rbuf(sk, copied);
+diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
+index 06768074d2d82..479d9575e1245 100644
+--- a/drivers/devfreq/tegra-devfreq.c
++++ b/drivers/devfreq/tegra-devfreq.c
+@@ -80,6 +80,8 @@
+ 
+ #define KHZ							1000
+ 
++#define KHZ_MAX						(ULONG_MAX / KHZ)
++
+ /* Assume that the bus is saturated if the utilization is 25% */
+ #define BUS_SATURATION_RATIO					25
+ 
+@@ -180,7 +182,7 @@ struct tegra_actmon_emc_ratio {
+ };
+ 
+ static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
+-	{ 1400000, ULONG_MAX },
++	{ 1400000,    KHZ_MAX },
+ 	{ 1200000,    750000 },
+ 	{ 1100000,    600000 },
+ 	{ 1000000,    500000 },
+diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
+index 1551ca7df3941..8586cc05def17 100644
+--- a/drivers/dma-buf/dma-fence.c
++++ b/drivers/dma-buf/dma-fence.c
+@@ -244,6 +244,30 @@ void dma_fence_free(struct dma_fence *fence)
+ }
+ EXPORT_SYMBOL(dma_fence_free);
+ 
++static bool __dma_fence_enable_signaling(struct dma_fence *fence)
++{
++	bool was_set;
++
++	lockdep_assert_held(fence->lock);
++
++	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
++				   &fence->flags);
++
++	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
++		return false;
++
++	if (!was_set && fence->ops->enable_signaling) {
++		trace_dma_fence_enable_signal(fence);
++
++		if (!fence->ops->enable_signaling(fence)) {
++			dma_fence_signal_locked(fence);
++			return false;
++		}
++	}
++
++	return true;
++}
++
+ /**
+  * dma_fence_enable_sw_signaling - enable signaling on fence
+  * @fence: the fence to enable
+@@ -256,19 +280,12 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence)
+ {
+ 	unsigned long flags;
+ 
+-	if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+-			      &fence->flags) &&
+-	    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
+-	    fence->ops->enable_signaling) {
+-		trace_dma_fence_enable_signal(fence);
+-
+-		spin_lock_irqsave(fence->lock, flags);
+-
+-		if (!fence->ops->enable_signaling(fence))
+-			dma_fence_signal_locked(fence);
++	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
++		return;
+ 
+-		spin_unlock_irqrestore(fence->lock, flags);
+-	}
++	spin_lock_irqsave(fence->lock, flags);
++	__dma_fence_enable_signaling(fence);
++	spin_unlock_irqrestore(fence->lock, flags);
+ }
+ EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
+ 
+@@ -302,7 +319,6 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
+ {
+ 	unsigned long flags;
+ 	int ret = 0;
+-	bool was_set;
+ 
+ 	if (WARN_ON(!fence || !func))
+ 		return -EINVAL;
+@@ -314,25 +330,14 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
+ 
+ 	spin_lock_irqsave(fence->lock, flags);
+ 
+-	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+-				   &fence->flags);
+-
+-	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+-		ret = -ENOENT;
+-	else if (!was_set && fence->ops->enable_signaling) {
+-		trace_dma_fence_enable_signal(fence);
+-
+-		if (!fence->ops->enable_signaling(fence)) {
+-			dma_fence_signal_locked(fence);
+-			ret = -ENOENT;
+-		}
+-	}
+-
+-	if (!ret) {
++	if (__dma_fence_enable_signaling(fence)) {
+ 		cb->func = func;
+ 		list_add_tail(&cb->node, &fence->cb_list);
+-	} else
++	} else {
+ 		INIT_LIST_HEAD(&cb->node);
++		ret = -ENOENT;
++	}
++
+ 	spin_unlock_irqrestore(fence->lock, flags);
+ 
+ 	return ret;
+@@ -432,7 +437,6 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
+ 	struct default_wait_cb cb;
+ 	unsigned long flags;
+ 	signed long ret = timeout ? timeout : 1;
+-	bool was_set;
+ 
+ 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ 		return ret;
+@@ -444,21 +448,9 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
+ 		goto out;
+ 	}
+ 
+-	was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+-				   &fence->flags);
+-
+-	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
++	if (!__dma_fence_enable_signaling(fence))
+ 		goto out;
+ 
+-	if (!was_set && fence->ops->enable_signaling) {
+-		trace_dma_fence_enable_signal(fence);
+-
+-		if (!fence->ops->enable_signaling(fence)) {
+-			dma_fence_signal_locked(fence);
+-			goto out;
+-		}
+-	}
+-
+ 	if (!timeout) {
+ 		ret = 0;
+ 		goto out;
+diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
+index b7ec56ae02a6e..fca232b1d4a64 100644
+--- a/drivers/dma/mediatek/mtk-hsdma.c
++++ b/drivers/dma/mediatek/mtk-hsdma.c
+@@ -997,7 +997,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
+ 	if (err) {
+ 		dev_err(&pdev->dev,
+ 			"request_irq failed with err %d\n", err);
+-		goto err_unregister;
++		goto err_free;
+ 	}
+ 
+ 	platform_set_drvdata(pdev, hsdma);
+@@ -1006,6 +1006,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
+ 
+ 	return 0;
+ 
++err_free:
++	of_dma_controller_free(pdev->dev.of_node);
+ err_unregister:
+ 	dma_async_device_unregister(dd);
+ 
+diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
+index 4903a408fc146..ac7af440f8658 100644
+--- a/drivers/dma/stm32-dma.c
++++ b/drivers/dma/stm32-dma.c
+@@ -494,8 +494,10 @@ static int stm32_dma_terminate_all(struct dma_chan *c)
+ 
+ 	spin_lock_irqsave(&chan->vchan.lock, flags);
+ 
+-	if (chan->busy) {
+-		stm32_dma_stop(chan);
++	if (chan->desc) {
++		vchan_terminate_vdesc(&chan->desc->vdesc);
++		if (chan->busy)
++			stm32_dma_stop(chan);
+ 		chan->desc = NULL;
+ 	}
+ 
+@@ -551,6 +553,8 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
+ 		if (!vdesc)
+ 			return;
+ 
++		list_del(&vdesc->node);
++
+ 		chan->desc = to_stm32_dma_desc(vdesc);
+ 		chan->next_sg = 0;
+ 	}
+@@ -628,7 +632,6 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan)
+ 		} else {
+ 			chan->busy = false;
+ 			if (chan->next_sg == chan->desc->num_sgs) {
+-				list_del(&chan->desc->vdesc.node);
+ 				vchan_cookie_complete(&chan->desc->vdesc);
+ 				chan->desc = NULL;
+ 			}
+diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
+index 8c3c3e5b812a8..9c6867916e890 100644
+--- a/drivers/dma/stm32-mdma.c
++++ b/drivers/dma/stm32-mdma.c
+@@ -1137,6 +1137,8 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
+ 		return;
+ 	}
+ 
++	list_del(&vdesc->node);
++
+ 	chan->desc = to_stm32_mdma_desc(vdesc);
+ 	hwdesc = chan->desc->node[0].hwdesc;
+ 	chan->curr_hwdesc = 0;
+@@ -1252,8 +1254,10 @@ static int stm32_mdma_terminate_all(struct dma_chan *c)
+ 	LIST_HEAD(head);
+ 
+ 	spin_lock_irqsave(&chan->vchan.lock, flags);
+-	if (chan->busy) {
+-		stm32_mdma_stop(chan);
++	if (chan->desc) {
++		vchan_terminate_vdesc(&chan->desc->vdesc);
++		if (chan->busy)
++			stm32_mdma_stop(chan);
+ 		chan->desc = NULL;
+ 	}
+ 	vchan_get_all_descriptors(&chan->vchan, &head);
+@@ -1341,7 +1345,6 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
+ 
+ static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan)
+ {
+-	list_del(&chan->desc->vdesc.node);
+ 	vchan_cookie_complete(&chan->desc->vdesc);
+ 	chan->desc = NULL;
+ 	chan->busy = false;
+diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
+index 15481aeaeecd1..5ccd24a46e381 100644
+--- a/drivers/dma/tegra20-apb-dma.c
++++ b/drivers/dma/tegra20-apb-dma.c
+@@ -1225,8 +1225,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
+ 
+ 	dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
+ 
+-	if (tdc->busy)
+-		tegra_dma_terminate_all(dc);
++	tegra_dma_terminate_all(dc);
+ 
+ 	spin_lock_irqsave(&tdc->lock, flags);
+ 	list_splice_init(&tdc->pending_sg_req, &sg_req_list);
+diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
+index 73de6a6179fcd..e002ff8413e2a 100644
+--- a/drivers/dma/xilinx/zynqmp_dma.c
++++ b/drivers/dma/xilinx/zynqmp_dma.c
+@@ -127,10 +127,12 @@
+ /* Max transfer size per descriptor */
+ #define ZYNQMP_DMA_MAX_TRANS_LEN	0x40000000
+ 
++/* Max burst lengths */
++#define ZYNQMP_DMA_MAX_DST_BURST_LEN    32768U
++#define ZYNQMP_DMA_MAX_SRC_BURST_LEN    32768U
++
+ /* Reset values for data attributes */
+ #define ZYNQMP_DMA_AXCACHE_VAL		0xF
+-#define ZYNQMP_DMA_ARLEN_RST_VAL	0xF
+-#define ZYNQMP_DMA_AWLEN_RST_VAL	0xF
+ 
+ #define ZYNQMP_DMA_SRC_ISSUE_RST_VAL	0x1F
+ 
+@@ -536,17 +538,19 @@ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
+ 
+ static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
+ {
+-	u32 val;
++	u32 val, burst_val;
+ 
+ 	val = readl(chan->regs + ZYNQMP_DMA_CTRL0);
+ 	val |= ZYNQMP_DMA_POINT_TYPE_SG;
+ 	writel(val, chan->regs + ZYNQMP_DMA_CTRL0);
+ 
+ 	val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
++	burst_val = __ilog2_u32(chan->src_burst_len);
+ 	val = (val & ~ZYNQMP_DMA_ARLEN) |
+-		(chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST);
++		((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN);
++	burst_val = __ilog2_u32(chan->dst_burst_len);
+ 	val = (val & ~ZYNQMP_DMA_AWLEN) |
+-		(chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST);
++		((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN);
+ 	writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
+ }
+ 
+@@ -562,8 +566,10 @@ static int zynqmp_dma_device_config(struct dma_chan *dchan,
+ {
+ 	struct zynqmp_dma_chan *chan = to_chan(dchan);
+ 
+-	chan->src_burst_len = config->src_maxburst;
+-	chan->dst_burst_len = config->dst_maxburst;
++	chan->src_burst_len = clamp(config->src_maxburst, 1U,
++		ZYNQMP_DMA_MAX_SRC_BURST_LEN);
++	chan->dst_burst_len = clamp(config->dst_maxburst, 1U,
++		ZYNQMP_DMA_MAX_DST_BURST_LEN);
+ 
+ 	return 0;
+ }
+@@ -884,8 +890,8 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
+ 		return PTR_ERR(chan->regs);
+ 
+ 	chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64;
+-	chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL;
+-	chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL;
++	chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN;
++	chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN;
+ 	err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width);
+ 	if (err < 0) {
+ 		dev_err(&pdev->dev, "missing xlnx,bus-width property\n");
+diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
+index 05b528c7ed8fd..e809f4d9a9e93 100644
+--- a/drivers/firmware/arm_sdei.c
++++ b/drivers/firmware/arm_sdei.c
+@@ -410,14 +410,19 @@ int sdei_event_enable(u32 event_num)
+ 		return -ENOENT;
+ 	}
+ 
+-	spin_lock(&sdei_list_lock);
+-	event->reenable = true;
+-	spin_unlock(&sdei_list_lock);
+ 
++	cpus_read_lock();
+ 	if (event->type == SDEI_EVENT_TYPE_SHARED)
+ 		err = sdei_api_event_enable(event->event_num);
+ 	else
+ 		err = sdei_do_cross_call(_local_event_enable, event);
++
++	if (!err) {
++		spin_lock(&sdei_list_lock);
++		event->reenable = true;
++		spin_unlock(&sdei_list_lock);
++	}
++	cpus_read_unlock();
+ 	mutex_unlock(&sdei_events_lock);
+ 
+ 	return err;
+@@ -619,21 +624,18 @@ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
+ 			break;
+ 		}
+ 
+-		spin_lock(&sdei_list_lock);
+-		event->reregister = true;
+-		spin_unlock(&sdei_list_lock);
+-
++		cpus_read_lock();
+ 		err = _sdei_event_register(event);
+ 		if (err) {
+-			spin_lock(&sdei_list_lock);
+-			event->reregister = false;
+-			event->reenable = false;
+-			spin_unlock(&sdei_list_lock);
+-
+ 			sdei_event_destroy(event);
+ 			pr_warn("Failed to register event %u: %d\n", event_num,
+ 				err);
++		} else {
++			spin_lock(&sdei_list_lock);
++			event->reregister = true;
++			spin_unlock(&sdei_list_lock);
+ 		}
++		cpus_read_unlock();
+ 	} while (0);
+ 	mutex_unlock(&sdei_events_lock);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+index a5df80d50d447..6cf3dd5edffda 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+@@ -191,30 +191,35 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
+ 
+ static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
+ {
+-	uint8_t __iomem *bios;
+-	size_t size;
++	phys_addr_t rom = adev->pdev->rom;
++	size_t romlen = adev->pdev->romlen;
++	void __iomem *bios;
+ 
+ 	adev->bios = NULL;
+ 
+-	bios = pci_platform_rom(adev->pdev, &size);
+-	if (!bios) {
++	if (!rom || romlen == 0)
+ 		return false;
+-	}
+ 
+-	adev->bios = kzalloc(size, GFP_KERNEL);
+-	if (adev->bios == NULL)
++	adev->bios = kzalloc(romlen, GFP_KERNEL);
++	if (!adev->bios)
+ 		return false;
+ 
+-	memcpy_fromio(adev->bios, bios, size);
++	bios = ioremap(rom, romlen);
++	if (!bios)
++		goto free_bios;
+ 
+-	if (!check_atom_bios(adev->bios, size)) {
+-		kfree(adev->bios);
+-		return false;
+-	}
++	memcpy_fromio(adev->bios, bios, romlen);
++	iounmap(bios);
+ 
+-	adev->bios_size = size;
++	if (!check_atom_bios(adev->bios, romlen))
++		goto free_bios;
++
++	adev->bios_size = romlen;
+ 
+ 	return true;
++free_bios:
++	kfree(adev->bios);
++	return false;
+ }
+ 
+ #ifdef CONFIG_ACPI
+diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
+index e9934de1b9cf8..0222bb7ea49b4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/atom.c
++++ b/drivers/gpu/drm/amd/amdgpu/atom.c
+@@ -742,8 +742,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
+ 			cjiffies = jiffies;
+ 			if (time_after(cjiffies, ctx->last_jump_jiffies)) {
+ 				cjiffies -= ctx->last_jump_jiffies;
+-				if ((jiffies_to_msecs(cjiffies) > 5000)) {
+-					DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
++				if ((jiffies_to_msecs(cjiffies) > 10000)) {
++					DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n");
+ 					ctx->abort = true;
+ 				}
+ 			} else {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 189212cb35475..bff39f561264e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1101,6 +1101,8 @@ static int stop_cpsch(struct device_queue_manager *dqm)
+ 	unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ 	dqm_unlock(dqm);
+ 
++	pm_release_ib(&dqm->packets);
++
+ 	kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
+ 	pm_uninit(&dqm->packets);
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 3abc0294c05f5..2fb2c683ad54b 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -1576,8 +1576,7 @@ static void write_i2c_retimer_setting(
+ 						buffer, sizeof(buffer));
+ 
+ 			if (!i2c_success)
+-				/* Write failure */
+-				ASSERT(i2c_success);
++				goto i2c_write_fail;
+ 
+ 			/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
+ 			 * needs to be set to 1 on every 0xA-0xC write.
+@@ -1595,8 +1594,7 @@ static void write_i2c_retimer_setting(
+ 						pipe_ctx->stream->sink->link->ddc,
+ 						slave_address, &offset, 1, &value, 1);
+ 					if (!i2c_success)
+-						/* Write failure */
+-						ASSERT(i2c_success);
++						goto i2c_write_fail;
+ 				}
+ 
+ 				buffer[0] = offset;
+@@ -1605,8 +1603,7 @@ static void write_i2c_retimer_setting(
+ 				i2c_success = i2c_write(pipe_ctx, slave_address,
+ 						buffer, sizeof(buffer));
+ 				if (!i2c_success)
+-					/* Write failure */
+-					ASSERT(i2c_success);
++					goto i2c_write_fail;
+ 			}
+ 		}
+ 	}
+@@ -1623,8 +1620,7 @@ static void write_i2c_retimer_setting(
+ 							buffer, sizeof(buffer));
+ 
+ 				if (!i2c_success)
+-					/* Write failure */
+-					ASSERT(i2c_success);
++					goto i2c_write_fail;
+ 
+ 				/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
+ 				 * needs to be set to 1 on every 0xA-0xC write.
+@@ -1642,8 +1638,7 @@ static void write_i2c_retimer_setting(
+ 								pipe_ctx->stream->sink->link->ddc,
+ 								slave_address, &offset, 1, &value, 1);
+ 						if (!i2c_success)
+-							/* Write failure */
+-							ASSERT(i2c_success);
++							goto i2c_write_fail;
+ 					}
+ 
+ 					buffer[0] = offset;
+@@ -1652,8 +1647,7 @@ static void write_i2c_retimer_setting(
+ 					i2c_success = i2c_write(pipe_ctx, slave_address,
+ 							buffer, sizeof(buffer));
+ 					if (!i2c_success)
+-						/* Write failure */
+-						ASSERT(i2c_success);
++						goto i2c_write_fail;
+ 				}
+ 			}
+ 		}
+@@ -1668,8 +1662,7 @@ static void write_i2c_retimer_setting(
+ 		i2c_success = i2c_write(pipe_ctx, slave_address,
+ 				buffer, sizeof(buffer));
+ 		if (!i2c_success)
+-			/* Write failure */
+-			ASSERT(i2c_success);
++			goto i2c_write_fail;
+ 
+ 		/* Write offset 0x00 to 0x23 */
+ 		buffer[0] = 0x00;
+@@ -1677,8 +1670,7 @@ static void write_i2c_retimer_setting(
+ 		i2c_success = i2c_write(pipe_ctx, slave_address,
+ 				buffer, sizeof(buffer));
+ 		if (!i2c_success)
+-			/* Write failure */
+-			ASSERT(i2c_success);
++			goto i2c_write_fail;
+ 
+ 		/* Write offset 0xff to 0x00 */
+ 		buffer[0] = 0xff;
+@@ -1686,10 +1678,14 @@ static void write_i2c_retimer_setting(
+ 		i2c_success = i2c_write(pipe_ctx, slave_address,
+ 				buffer, sizeof(buffer));
+ 		if (!i2c_success)
+-			/* Write failure */
+-			ASSERT(i2c_success);
++			goto i2c_write_fail;
+ 
+ 	}
++
++	return;
++
++i2c_write_fail:
++	DC_LOG_DEBUG("Set retimer failed");
+ }
+ 
+ static void write_i2c_default_retimer_setting(
+@@ -1710,8 +1706,7 @@ static void write_i2c_default_retimer_setting(
+ 	i2c_success = i2c_write(pipe_ctx, slave_address,
+ 			buffer, sizeof(buffer));
+ 	if (!i2c_success)
+-		/* Write failure */
+-		ASSERT(i2c_success);
++		goto i2c_write_fail;
+ 
+ 	/* Write offset 0x0A to 0x17 */
+ 	buffer[0] = 0x0A;
+@@ -1719,8 +1714,7 @@ static void write_i2c_default_retimer_setting(
+ 	i2c_success = i2c_write(pipe_ctx, slave_address,
+ 			buffer, sizeof(buffer));
+ 	if (!i2c_success)
+-		/* Write failure */
+-		ASSERT(i2c_success);
++		goto i2c_write_fail;
+ 
+ 	/* Write offset 0x0B to 0xDA or 0xD8 */
+ 	buffer[0] = 0x0B;
+@@ -1728,8 +1722,7 @@ static void write_i2c_default_retimer_setting(
+ 	i2c_success = i2c_write(pipe_ctx, slave_address,
+ 			buffer, sizeof(buffer));
+ 	if (!i2c_success)
+-		/* Write failure */
+-		ASSERT(i2c_success);
++		goto i2c_write_fail;
+ 
+ 	/* Write offset 0x0A to 0x17 */
+ 	buffer[0] = 0x0A;
+@@ -1737,8 +1730,7 @@ static void write_i2c_default_retimer_setting(
+ 	i2c_success = i2c_write(pipe_ctx, slave_address,
+ 			buffer, sizeof(buffer));
+ 	if (!i2c_success)
+-		/* Write failure */
+-		ASSERT(i2c_success);
++		goto i2c_write_fail;
+ 
+ 	/* Write offset 0x0C to 0x1D or 0x91 */
+ 	buffer[0] = 0x0C;
+@@ -1746,8 +1738,7 @@ static void write_i2c_default_retimer_setting(
+ 	i2c_success = i2c_write(pipe_ctx, slave_address,
+ 			buffer, sizeof(buffer));
+ 	if (!i2c_success)
+-		/* Write failure */
+-		ASSERT(i2c_success);
++		goto i2c_write_fail;
+ 
+ 	/* Write offset 0x0A to 0x17 */
+ 	buffer[0] = 0x0A;
+@@ -1755,8 +1746,7 @@ static void write_i2c_default_retimer_setting(
+ 	i2c_success = i2c_write(pipe_ctx, slave_address,
+ 			buffer, sizeof(buffer));
+ 	if (!i2c_success)
+-		/* Write failure */
+-		ASSERT(i2c_success);
++		goto i2c_write_fail;
+ 
+ 
+ 	if (is_vga_mode) {
+@@ -1768,8 +1758,7 @@ static void write_i2c_default_retimer_setting(
+ 		i2c_success = i2c_write(pipe_ctx, slave_address,
+ 				buffer, sizeof(buffer));
+ 		if (!i2c_success)
+-			/* Write failure */
+-			ASSERT(i2c_success);
++			goto i2c_write_fail;
+ 
+ 		/* Write offset 0x00 to 0x23 */
+ 		buffer[0] = 0x00;
+@@ -1777,8 +1766,7 @@ static void write_i2c_default_retimer_setting(
+ 		i2c_success = i2c_write(pipe_ctx, slave_address,
+ 				buffer, sizeof(buffer));
+ 		if (!i2c_success)
+-			/* Write failure */
+-			ASSERT(i2c_success);
++			goto i2c_write_fail;
+ 
+ 		/* Write offset 0xff to 0x00 */
+ 		buffer[0] = 0xff;
+@@ -1786,9 +1774,13 @@ static void write_i2c_default_retimer_setting(
+ 		i2c_success = i2c_write(pipe_ctx, slave_address,
+ 				buffer, sizeof(buffer));
+ 		if (!i2c_success)
+-			/* Write failure */
+-			ASSERT(i2c_success);
++			goto i2c_write_fail;
+ 	}
++
++	return;
++
++i2c_write_fail:
++	DC_LOG_DEBUG("Set default retimer failed");
+ }
+ 
+ static void write_i2c_redriver_setting(
+@@ -1811,8 +1803,7 @@ static void write_i2c_redriver_setting(
+ 					buffer, sizeof(buffer));
+ 
+ 	if (!i2c_success)
+-		/* Write failure */
+-		ASSERT(i2c_success);
++		DC_LOG_DEBUG("Set redriver failed");
+ }
+ 
+ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+index 46c9cb47a96e5..145af3bb2dfcb 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+@@ -127,22 +127,16 @@ struct aux_payloads {
+ 	struct vector payloads;
+ };
+ 
+-static struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count)
++static bool dal_ddc_i2c_payloads_create(
++		struct dc_context *ctx,
++		struct i2c_payloads *payloads,
++		uint32_t count)
+ {
+-	struct i2c_payloads *payloads;
+-
+-	payloads = kzalloc(sizeof(struct i2c_payloads), GFP_KERNEL);
+-
+-	if (!payloads)
+-		return NULL;
+-
+ 	if (dal_vector_construct(
+ 		&payloads->payloads, ctx, count, sizeof(struct i2c_payload)))
+-		return payloads;
+-
+-	kfree(payloads);
+-	return NULL;
++		return true;
+ 
++	return false;
+ }
+ 
+ static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p)
+@@ -155,14 +149,12 @@ static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
+ 	return p->payloads.count;
+ }
+ 
+-static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p)
++static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads *p)
+ {
+-	if (!p || !*p)
++	if (!p)
+ 		return;
+-	dal_vector_destruct(&(*p)->payloads);
+-	kfree(*p);
+-	*p = NULL;
+ 
++	dal_vector_destruct(&p->payloads);
+ }
+ 
+ static struct aux_payloads *dal_ddc_aux_payloads_create(struct dc_context *ctx, uint32_t count)
+@@ -580,9 +572,13 @@ bool dal_ddc_service_query_ddc_data(
+ 
+ 	uint32_t payloads_num = write_payloads + read_payloads;
+ 
++
+ 	if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE)
+ 		return false;
+ 
++	if (!payloads_num)
++		return false;
++
+ 	/*TODO: len of payload data for i2c and aux is uint8!!!!,
+ 	 *  but we want to read 256 over i2c!!!!*/
+ 	if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
+@@ -613,23 +609,25 @@ bool dal_ddc_service_query_ddc_data(
+ 		dal_ddc_aux_payloads_destroy(&payloads);
+ 
+ 	} else {
+-		struct i2c_payloads *payloads =
+-			dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num);
++		struct i2c_command command = {0};
++		struct i2c_payloads payloads;
+ 
+-		struct i2c_command command = {
+-			.payloads = dal_ddc_i2c_payloads_get(payloads),
+-			.number_of_payloads = 0,
+-			.engine = DDC_I2C_COMMAND_ENGINE,
+-			.speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
++		if (!dal_ddc_i2c_payloads_create(ddc->ctx, &payloads, payloads_num))
++			return false;
++
++		command.payloads = dal_ddc_i2c_payloads_get(&payloads);
++		command.number_of_payloads = 0;
++		command.engine = DDC_I2C_COMMAND_ENGINE;
++		command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz;
+ 
+ 		dal_ddc_i2c_payloads_add(
+-			payloads, address, write_size, write_buf, true);
++			&payloads, address, write_size, write_buf, true);
+ 
+ 		dal_ddc_i2c_payloads_add(
+-			payloads, address, read_size, read_buf, false);
++			&payloads, address, read_size, read_buf, false);
+ 
+ 		command.number_of_payloads =
+-			dal_ddc_i2c_payloads_get_count(payloads);
++			dal_ddc_i2c_payloads_get_count(&payloads);
+ 
+ 		ret = dm_helpers_submit_i2c(
+ 				ddc->ctx,
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index 72c0a2ae2dd4f..058898b321b8a 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -3970,6 +3970,13 @@ static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
+ 			"Failed to populate and upload SCLK MCLK DPM levels!",
+ 			result = tmp_result);
+ 
++	/*
++	 * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
++	 * That effectively disables AVFS feature.
++	 */
++	if (hwmgr->hardcode_pp_table != NULL)
++		data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
++
+ 	tmp_result = smu7_update_avfs(hwmgr);
+ 	PP_ASSERT_WITH_CODE((0 == tmp_result),
+ 			"Failed to update avfs voltages!",
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index ce459ea4ec3ad..da9e6923fa659 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -3591,6 +3591,13 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
+ 	PP_ASSERT_WITH_CODE(!result,
+ 			"Failed to upload PPtable!", return result);
+ 
++	/*
++	 * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
++	 * That effectively disables AVFS feature.
++	 */
++	if(hwmgr->hardcode_pp_table != NULL)
++		data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
++
+ 	vega10_update_avfs(hwmgr);
+ 
+ 	data->need_update_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
+diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
+index 17db4b4749d5a..2e8479744ca4a 100644
+--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
++++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
+@@ -415,6 +415,8 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
+ 	struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ 	struct gma_clock_t clock;
+ 
++	memset(&clock, 0, sizeof(clock));
++
+ 	switch (refclk) {
+ 	case 27000:
+ 		if (target < 200000) {
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+index 1fc9a7fa37b45..d29a58bd2f7a3 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+@@ -1474,18 +1474,31 @@ static const struct adreno_gpu_funcs funcs = {
+ static void check_speed_bin(struct device *dev)
+ {
+ 	struct nvmem_cell *cell;
+-	u32 bin, val;
++	u32 val;
++
++	/*
++	 * If the OPP table specifies a opp-supported-hw property then we have
++	 * to set something with dev_pm_opp_set_supported_hw() or the table
++	 * doesn't get populated so pick an arbitrary value that should
++	 * ensure the default frequencies are selected but not conflict with any
++	 * actual bins
++	 */
++	val = 0x80;
+ 
+ 	cell = nvmem_cell_get(dev, "speed_bin");
+ 
+-	/* If a nvmem cell isn't defined, nothing to do */
+-	if (IS_ERR(cell))
+-		return;
++	if (!IS_ERR(cell)) {
++		void *buf = nvmem_cell_read(cell, NULL);
++
++		if (!IS_ERR(buf)) {
++			u8 bin = *((u8 *) buf);
+ 
+-	bin = *((u32 *) nvmem_cell_read(cell, NULL));
+-	nvmem_cell_put(cell);
++			val = (1 << bin);
++			kfree(buf);
++		}
+ 
+-	val = (1 << bin);
++		nvmem_cell_put(cell);
++	}
+ 
+ 	dev_pm_opp_set_supported_hw(dev, &val, 1);
+ }
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index 7f45486b6650b..3ba3ae9749bec 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -495,8 +495,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
+ 	if (!dev->dma_parms) {
+ 		dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+ 					      GFP_KERNEL);
+-		if (!dev->dma_parms)
+-			return -ENOMEM;
++		if (!dev->dma_parms) {
++			ret = -ENOMEM;
++			goto err_msm_uninit;
++		}
+ 	}
+ 	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+ 
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+index e06ea8c8184cb..1bb0a9f6fa730 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+@@ -909,8 +909,10 @@ nv50_mstc_detect(struct drm_connector *connector, bool force)
+ 		return connector_status_disconnected;
+ 
+ 	ret = pm_runtime_get_sync(connector->dev->dev);
+-	if (ret < 0 && ret != -EACCES)
++	if (ret < 0 && ret != -EACCES) {
++		pm_runtime_put_autosuspend(connector->dev->dev);
+ 		return connector_status_disconnected;
++	}
+ 
+ 	conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr,
+ 					     mstc->port);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+index 9635704a1d864..4561a786fab07 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
++++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+@@ -161,8 +161,11 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
+ 	}
+ 
+ 	ret = pm_runtime_get_sync(drm->dev);
+-	if (ret < 0 && ret != -EACCES)
++	if (ret < 0 && ret != -EACCES) {
++		pm_runtime_put_autosuspend(drm->dev);
+ 		return ret;
++	}
++
+ 	ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
+ 	pm_runtime_put_autosuspend(drm->dev);
+ 	if (ret < 0)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
+index 791f970714ed6..a98fccb0d32f9 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
+@@ -82,8 +82,10 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
+ 		return ret;
+ 
+ 	ret = pm_runtime_get_sync(dev);
+-	if (ret < 0 && ret != -EACCES)
++	if (ret < 0 && ret != -EACCES) {
++		pm_runtime_put_autosuspend(dev);
+ 		goto out;
++	}
+ 
+ 	ret = nouveau_vma_new(nvbo, &cli->vmm, &vma);
+ 	pm_runtime_mark_last_busy(dev);
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
+index 9b91da09dc5f8..8d9812a51ef63 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
+@@ -101,9 +101,13 @@ platform_init(struct nvkm_bios *bios, const char *name)
+ 	else
+ 		return ERR_PTR(-ENODEV);
+ 
++	if (!pdev->rom || pdev->romlen == 0)
++		return ERR_PTR(-ENODEV);
++
+ 	if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
++		priv->size = pdev->romlen;
+ 		if (ret = -ENODEV,
+-		    (priv->rom = pci_platform_rom(pdev, &priv->size)))
++		    (priv->rom = ioremap(pdev->rom, pdev->romlen)))
+ 			return priv;
+ 		kfree(priv);
+ 	}
+@@ -111,11 +115,20 @@ platform_init(struct nvkm_bios *bios, const char *name)
+ 	return ERR_PTR(ret);
+ }
+ 
++static void
++platform_fini(void *data)
++{
++	struct priv *priv = data;
++
++	iounmap(priv->rom);
++	kfree(priv);
++}
++
+ const struct nvbios_source
+ nvbios_platform = {
+ 	.name = "PLATFORM",
+ 	.init = platform_init,
+-	.fini = (void(*)(void *))kfree,
++	.fini = platform_fini,
+ 	.read = pcirom_read,
+ 	.rw = true,
+ };
+diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+index 3bfb95d230e0e..d8fb686c1fda9 100644
+--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
++++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+@@ -193,7 +193,7 @@ static int __init omapdss_boot_init(void)
+ 	dss = of_find_matching_node(NULL, omapdss_of_match);
+ 
+ 	if (dss == NULL || !of_device_is_available(dss))
+-		return 0;
++		goto put_node;
+ 
+ 	omapdss_walk_device(dss, true);
+ 
+@@ -218,6 +218,8 @@ static int __init omapdss_boot_init(void)
+ 		kfree(n);
+ 	}
+ 
++put_node:
++	of_node_put(dss);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
+index 04c0ed41374f1..dd0528cf98183 100644
+--- a/drivers/gpu/drm/radeon/radeon_bios.c
++++ b/drivers/gpu/drm/radeon/radeon_bios.c
+@@ -104,25 +104,33 @@ static bool radeon_read_bios(struct radeon_device *rdev)
+ 
+ static bool radeon_read_platform_bios(struct radeon_device *rdev)
+ {
+-	uint8_t __iomem *bios;
+-	size_t size;
++	phys_addr_t rom = rdev->pdev->rom;
++	size_t romlen = rdev->pdev->romlen;
++	void __iomem *bios;
+ 
+ 	rdev->bios = NULL;
+ 
+-	bios = pci_platform_rom(rdev->pdev, &size);
+-	if (!bios) {
++	if (!rom || romlen == 0)
+ 		return false;
+-	}
+ 
+-	if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
++	rdev->bios = kzalloc(romlen, GFP_KERNEL);
++	if (!rdev->bios)
+ 		return false;
+-	}
+-	rdev->bios = kmemdup(bios, size, GFP_KERNEL);
+-	if (rdev->bios == NULL) {
+-		return false;
+-	}
++
++	bios = ioremap(rom, romlen);
++	if (!bios)
++		goto free_bios;
++
++	memcpy_fromio(rdev->bios, bios, romlen);
++	iounmap(bios);
++
++	if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa)
++		goto free_bios;
+ 
+ 	return true;
++free_bios:
++	kfree(rdev->bios);
++	return false;
+ }
+ 
+ #ifdef CONFIG_ACPI
+diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.h b/drivers/gpu/drm/sun4i/sun8i_csc.h
+index 880e8fbb08556..242752b2d328c 100644
+--- a/drivers/gpu/drm/sun4i/sun8i_csc.h
++++ b/drivers/gpu/drm/sun4i/sun8i_csc.h
+@@ -14,7 +14,7 @@ struct sun8i_mixer;
+ 
+ /* VI channel CSC units offsets */
+ #define CCSC00_OFFSET 0xAA050
+-#define CCSC01_OFFSET 0xFA000
++#define CCSC01_OFFSET 0xFA050
+ #define CCSC10_OFFSET 0xA0000
+ #define CCSC11_OFFSET 0xF0000
+ 
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
+index 86b98856756d9..1161662664577 100644
+--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
+@@ -1134,6 +1134,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi)
+ 	card->num_links = 1;
+ 	card->name = "vc4-hdmi";
+ 	card->dev = dev;
++	card->owner = THIS_MODULE;
+ 
+ 	/*
+ 	 * Be careful, snd_soc_register_card() calls dev_set_drvdata() and
+diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
+index f225bef1e043c..41dd0a08a625c 100644
+--- a/drivers/i2c/i2c-core-base.c
++++ b/drivers/i2c/i2c-core-base.c
+@@ -1292,8 +1292,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
+ 
+ 	/* create pre-declared device nodes */
+ 	of_i2c_register_devices(adap);
+-	i2c_acpi_register_devices(adap);
+ 	i2c_acpi_install_space_handler(adap);
++	i2c_acpi_register_devices(adap);
+ 
+ 	if (adap->nr < __i2c_first_dynamic_bus_num)
+ 		i2c_scan_static_board_info(adap);
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 64f206e11d497..4ebf63360a697 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -1100,14 +1100,22 @@ retest:
+ 		break;
+ 	}
+ 
+-	spin_lock_irq(&cm.lock);
++	spin_lock_irq(&cm_id_priv->lock);
++	spin_lock(&cm.lock);
++	/* Required for cleanup paths related cm_req_handler() */
++	if (cm_id_priv->timewait_info) {
++		cm_cleanup_timewait(cm_id_priv->timewait_info);
++		kfree(cm_id_priv->timewait_info);
++		cm_id_priv->timewait_info = NULL;
++	}
+ 	if (!list_empty(&cm_id_priv->altr_list) &&
+ 	    (!cm_id_priv->altr_send_port_not_ready))
+ 		list_del(&cm_id_priv->altr_list);
+ 	if (!list_empty(&cm_id_priv->prim_list) &&
+ 	    (!cm_id_priv->prim_send_port_not_ready))
+ 		list_del(&cm_id_priv->prim_list);
+-	spin_unlock_irq(&cm.lock);
++	spin_unlock(&cm.lock);
++	spin_unlock_irq(&cm_id_priv->lock);
+ 
+ 	cm_free_id(cm_id->local_id);
+ 	cm_deref_id(cm_id_priv);
+@@ -1424,7 +1432,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
+ 	/* Verify that we're not in timewait. */
+ 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+ 	spin_lock_irqsave(&cm_id_priv->lock, flags);
+-	if (cm_id->state != IB_CM_IDLE) {
++	if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) {
+ 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ 		ret = -EINVAL;
+ 		goto out;
+@@ -1442,12 +1450,12 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
+ 				 param->ppath_sgid_attr, &cm_id_priv->av,
+ 				 cm_id_priv);
+ 	if (ret)
+-		goto error1;
++		goto out;
+ 	if (param->alternate_path) {
+ 		ret = cm_init_av_by_path(param->alternate_path, NULL,
+ 					 &cm_id_priv->alt_av, cm_id_priv);
+ 		if (ret)
+-			goto error1;
++			goto out;
+ 	}
+ 	cm_id->service_id = param->service_id;
+ 	cm_id->service_mask = ~cpu_to_be64(0);
+@@ -1465,7 +1473,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
+ 
+ 	ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
+ 	if (ret)
+-		goto error1;
++		goto out;
+ 
+ 	req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
+ 	cm_format_req(req_msg, cm_id_priv, param);
+@@ -1488,7 +1496,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
+ 	return 0;
+ 
+ error2:	cm_free_msg(cm_id_priv->msg);
+-error1:	kfree(cm_id_priv->timewait_info);
+ out:	return ret;
+ }
+ EXPORT_SYMBOL(ib_send_cm_req);
+@@ -1973,7 +1980,7 @@ static int cm_req_handler(struct cm_work *work)
+ 		pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__,
+ 			 be32_to_cpu(cm_id->local_id));
+ 		ret = -EINVAL;
+-		goto free_timeinfo;
++		goto destroy;
+ 	}
+ 
+ 	cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
+@@ -2057,8 +2064,6 @@ static int cm_req_handler(struct cm_work *work)
+ rejected:
+ 	atomic_dec(&cm_id_priv->refcount);
+ 	cm_deref_id(listen_cm_id_priv);
+-free_timeinfo:
+-	kfree(cm_id_priv->timewait_info);
+ destroy:
+ 	ib_destroy_cm_id(cm_id);
+ 	return ret;
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index 16145b0a14583..3fd3dfa3478b7 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -3293,7 +3293,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 		if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
+ 			err = pick_local_ipaddrs(dev, cm_id);
+ 			if (err)
+-				goto fail2;
++				goto fail3;
+ 		}
+ 
+ 		/* find a route */
+@@ -3315,7 +3315,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 		if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
+ 			err = pick_local_ip6addrs(dev, cm_id);
+ 			if (err)
+-				goto fail2;
++				goto fail3;
+ 		}
+ 
+ 		/* find a route */
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index 4321b9e3dbb4b..0273d0404e740 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -2071,9 +2071,9 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
+ 	dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr);
+ 	if (!dst || dst->error) {
+ 		if (dst) {
+-			dst_release(dst);
+ 			i40iw_pr_err("ip6_route_output returned dst->error = %d\n",
+ 				     dst->error);
++			dst_release(dst);
+ 		}
+ 		return rc;
+ 	}
+diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+index 2566715773675..e908dfbaa1378 100644
+--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
++++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+@@ -460,10 +460,10 @@ qedr_addr6_resolve(struct qedr_dev *dev,
+ 
+ 	if ((!dst) || dst->error) {
+ 		if (dst) {
+-			dst_release(dst);
+ 			DP_ERR(dev,
+ 			       "ip6_route_output returned dst->error = %d\n",
+ 			       dst->error);
++			dst_release(dst);
+ 		}
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
+index 94dedabe648c2..6589ff51eaf5c 100644
+--- a/drivers/infiniband/sw/rxe/rxe.c
++++ b/drivers/infiniband/sw/rxe/rxe.c
+@@ -121,6 +121,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
+ 	rxe->attr.max_fast_reg_page_list_len	= RXE_MAX_FMR_PAGE_LIST_LEN;
+ 	rxe->attr.max_pkeys			= RXE_MAX_PKEYS;
+ 	rxe->attr.local_ca_ack_delay		= RXE_LOCAL_CA_ACK_DELAY;
++	addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
++			rxe->ndev->dev_addr);
+ 
+ 	rxe->max_ucontext			= RXE_MAX_UCONTEXT;
+ }
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index 230697fa31fe3..8a22ab8b29e9b 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -583,15 +583,16 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
+ 	int err;
+ 
+ 	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
+-		int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
++		int max_rd_atomic = attr->max_rd_atomic ?
++			roundup_pow_of_two(attr->max_rd_atomic) : 0;
+ 
+ 		qp->attr.max_rd_atomic = max_rd_atomic;
+ 		atomic_set(&qp->req.rd_atomic, max_rd_atomic);
+ 	}
+ 
+ 	if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+-		int max_dest_rd_atomic =
+-			__roundup_pow_of_two(attr->max_dest_rd_atomic);
++		int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
++			roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
+ 
+ 		qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
+ 
+diff --git a/drivers/leds/leds-mlxreg.c b/drivers/leds/leds-mlxreg.c
+index 1ee48cb21df95..022e973dc7c31 100644
+--- a/drivers/leds/leds-mlxreg.c
++++ b/drivers/leds/leds-mlxreg.c
+@@ -209,8 +209,8 @@ static int mlxreg_led_config(struct mlxreg_led_priv_data *priv)
+ 			brightness = LED_OFF;
+ 			led_data->base_color = MLXREG_LED_GREEN_SOLID;
+ 		}
+-		sprintf(led_data->led_cdev_name, "%s:%s", "mlxreg",
+-			data->label);
++		snprintf(led_data->led_cdev_name, sizeof(led_data->led_cdev_name),
++			 "mlxreg:%s", data->label);
+ 		led_cdev->name = led_data->led_cdev_name;
+ 		led_cdev->brightness = brightness;
+ 		led_cdev->max_brightness = LED_ON;
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 1cc6ae3e058c6..6a380ed4919a0 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -585,6 +585,7 @@ struct cache_set {
+ 	 */
+ 	wait_queue_head_t	btree_cache_wait;
+ 	struct task_struct	*btree_cache_alloc_lock;
++	spinlock_t		btree_cannibalize_lock;
+ 
+ 	/*
+ 	 * When we free a btree node, we increment the gen of the bucket the
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index d320574b9a4c8..e388e7bb7b5db 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -876,15 +876,17 @@ out:
+ 
+ static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
+ {
+-	struct task_struct *old;
+-
+-	old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
+-	if (old && old != current) {
++	spin_lock(&c->btree_cannibalize_lock);
++	if (likely(c->btree_cache_alloc_lock == NULL)) {
++		c->btree_cache_alloc_lock = current;
++	} else if (c->btree_cache_alloc_lock != current) {
+ 		if (op)
+ 			prepare_to_wait(&c->btree_cache_wait, &op->wait,
+ 					TASK_UNINTERRUPTIBLE);
++		spin_unlock(&c->btree_cannibalize_lock);
+ 		return -EINTR;
+ 	}
++	spin_unlock(&c->btree_cannibalize_lock);
+ 
+ 	return 0;
+ }
+@@ -919,10 +921,12 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
+  */
+ static void bch_cannibalize_unlock(struct cache_set *c)
+ {
++	spin_lock(&c->btree_cannibalize_lock);
+ 	if (c->btree_cache_alloc_lock == current) {
+ 		c->btree_cache_alloc_lock = NULL;
+ 		wake_up(&c->btree_cache_wait);
+ 	}
++	spin_unlock(&c->btree_cannibalize_lock);
+ }
+ 
+ static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 825bfde10c694..7787ec42f81e1 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1737,6 +1737,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+ 	sema_init(&c->sb_write_mutex, 1);
+ 	mutex_init(&c->bucket_lock);
+ 	init_waitqueue_head(&c->btree_cache_wait);
++	spin_lock_init(&c->btree_cannibalize_lock);
+ 	init_waitqueue_head(&c->bucket_wait);
+ 	init_waitqueue_head(&c->gc_wait);
+ 	sema_init(&c->uuid_write_mutex, 1);
+diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
+index 097c42d3f8c26..df0c7243eafe4 100644
+--- a/drivers/media/dvb-frontends/tda10071.c
++++ b/drivers/media/dvb-frontends/tda10071.c
+@@ -483,10 +483,11 @@ static int tda10071_read_status(struct dvb_frontend *fe, enum fe_status *status)
+ 			goto error;
+ 
+ 		if (dev->delivery_system == SYS_DVBS) {
+-			dev->dvbv3_ber = buf[0] << 24 | buf[1] << 16 |
+-					 buf[2] << 8 | buf[3] << 0;
+-			dev->post_bit_error += buf[0] << 24 | buf[1] << 16 |
+-					       buf[2] << 8 | buf[3] << 0;
++			u32 bit_error = buf[0] << 24 | buf[1] << 16 |
++					buf[2] << 8 | buf[3] << 0;
++
++			dev->dvbv3_ber = bit_error;
++			dev->post_bit_error += bit_error;
+ 			c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ 			c->post_bit_error.stat[0].uvalue = dev->post_bit_error;
+ 			dev->block_error += buf[4] << 8 | buf[5] << 0;
+diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
+index 4731e1c72f960..0a434bdce3b3b 100644
+--- a/drivers/media/i2c/smiapp/smiapp-core.c
++++ b/drivers/media/i2c/smiapp/smiapp-core.c
+@@ -2337,11 +2337,12 @@ smiapp_sysfs_nvm_read(struct device *dev, struct device_attribute *attr,
+ 		if (rval < 0) {
+ 			if (rval != -EBUSY && rval != -EAGAIN)
+ 				pm_runtime_set_active(&client->dev);
+-			pm_runtime_put(&client->dev);
++			pm_runtime_put_noidle(&client->dev);
+ 			return -ENODEV;
+ 		}
+ 
+ 		if (smiapp_read_nvm(sensor, sensor->nvm)) {
++			pm_runtime_put(&client->dev);
+ 			dev_err(&client->dev, "nvm read failed\n");
+ 			return -ENODEV;
+ 		}
+diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
+index ed518b1f82e4a..d04ed438a45de 100644
+--- a/drivers/media/media-device.c
++++ b/drivers/media/media-device.c
+@@ -568,6 +568,38 @@ static void media_device_release(struct media_devnode *devnode)
+ 	dev_dbg(devnode->parent, "Media device released\n");
+ }
+ 
++static void __media_device_unregister_entity(struct media_entity *entity)
++{
++	struct media_device *mdev = entity->graph_obj.mdev;
++	struct media_link *link, *tmp;
++	struct media_interface *intf;
++	unsigned int i;
++
++	ida_free(&mdev->entity_internal_idx, entity->internal_idx);
++
++	/* Remove all interface links pointing to this entity */
++	list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) {
++		list_for_each_entry_safe(link, tmp, &intf->links, list) {
++			if (link->entity == entity)
++				__media_remove_intf_link(link);
++		}
++	}
++
++	/* Remove all data links that belong to this entity */
++	__media_entity_remove_links(entity);
++
++	/* Remove all pads that belong to this entity */
++	for (i = 0; i < entity->num_pads; i++)
++		media_gobj_destroy(&entity->pads[i].graph_obj);
++
++	/* Remove the entity */
++	media_gobj_destroy(&entity->graph_obj);
++
++	/* invoke entity_notify callbacks to handle entity removal?? */
++
++	entity->graph_obj.mdev = NULL;
++}
++
+ /**
+  * media_device_register_entity - Register an entity with a media device
+  * @mdev:	The media device
+@@ -625,6 +657,7 @@ int __must_check media_device_register_entity(struct media_device *mdev,
+ 		 */
+ 		ret = media_graph_walk_init(&new, mdev);
+ 		if (ret) {
++			__media_device_unregister_entity(entity);
+ 			mutex_unlock(&mdev->graph_mutex);
+ 			return ret;
+ 		}
+@@ -637,38 +670,6 @@ int __must_check media_device_register_entity(struct media_device *mdev,
+ }
+ EXPORT_SYMBOL_GPL(media_device_register_entity);
+ 
+-static void __media_device_unregister_entity(struct media_entity *entity)
+-{
+-	struct media_device *mdev = entity->graph_obj.mdev;
+-	struct media_link *link, *tmp;
+-	struct media_interface *intf;
+-	unsigned int i;
+-
+-	ida_free(&mdev->entity_internal_idx, entity->internal_idx);
+-
+-	/* Remove all interface links pointing to this entity */
+-	list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) {
+-		list_for_each_entry_safe(link, tmp, &intf->links, list) {
+-			if (link->entity == entity)
+-				__media_remove_intf_link(link);
+-		}
+-	}
+-
+-	/* Remove all data links that belong to this entity */
+-	__media_entity_remove_links(entity);
+-
+-	/* Remove all pads that belong to this entity */
+-	for (i = 0; i < entity->num_pads; i++)
+-		media_gobj_destroy(&entity->pads[i].graph_obj);
+-
+-	/* Remove the entity */
+-	media_gobj_destroy(&entity->graph_obj);
+-
+-	/* invoke entity_notify callbacks to handle entity removal?? */
+-
+-	entity->graph_obj.mdev = NULL;
+-}
+-
+ void media_device_unregister_entity(struct media_entity *entity)
+ {
+ 	struct media_device *mdev = entity->graph_obj.mdev;
+diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
+index be3155275a6ba..d945323fc437d 100644
+--- a/drivers/media/platform/ti-vpe/cal.c
++++ b/drivers/media/platform/ti-vpe/cal.c
+@@ -684,12 +684,13 @@ static void pix_proc_config(struct cal_ctx *ctx)
+ }
+ 
+ static void cal_wr_dma_config(struct cal_ctx *ctx,
+-			      unsigned int width)
++			      unsigned int width, unsigned int height)
+ {
+ 	u32 val;
+ 
+ 	val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port));
+ 	set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK);
++	set_field(&val, height, CAL_WR_DMA_CTRL_YSIZE_MASK);
+ 	set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT,
+ 		  CAL_WR_DMA_CTRL_DTAG_MASK);
+ 	set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST,
+@@ -1315,7 +1316,8 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
+ 	csi2_lane_config(ctx);
+ 	csi2_ctx_config(ctx);
+ 	pix_proc_config(ctx);
+-	cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline);
++	cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline,
++			  ctx->v_fmt.fmt.pix.height);
+ 	cal_wr_dma_addr(ctx, addr);
+ 	csi2_ppi_enable(ctx);
+ 
+diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c
+index 19c6a0354ce00..b84a6f6548610 100644
+--- a/drivers/media/usb/go7007/go7007-usb.c
++++ b/drivers/media/usb/go7007/go7007-usb.c
+@@ -1052,6 +1052,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
+ 	struct go7007_usb *usb;
+ 	const struct go7007_usb_board *board;
+ 	struct usb_device *usbdev = interface_to_usbdev(intf);
++	struct usb_host_endpoint *ep;
+ 	unsigned num_i2c_devs;
+ 	char *name;
+ 	int video_pipe, i, v_urb_len;
+@@ -1148,7 +1149,8 @@ static int go7007_usb_probe(struct usb_interface *intf,
+ 	if (usb->intr_urb->transfer_buffer == NULL)
+ 		goto allocfail;
+ 
+-	if (go->board_id == GO7007_BOARDID_SENSORAY_2250)
++	ep = usb->usbdev->ep_in[4];
++	if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
+ 		usb_fill_bulk_urb(usb->intr_urb, usb->usbdev,
+ 			usb_rcvbulkpipe(usb->usbdev, 4),
+ 			usb->intr_urb->transfer_buffer, 2*sizeof(u16),
+diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
+index 182973df1aed4..77c965c6a65f1 100644
+--- a/drivers/mfd/mfd-core.c
++++ b/drivers/mfd/mfd-core.c
+@@ -32,6 +32,11 @@ int mfd_cell_enable(struct platform_device *pdev)
+ 	const struct mfd_cell *cell = mfd_get_cell(pdev);
+ 	int err = 0;
+ 
++	if (!cell->enable) {
++		dev_dbg(&pdev->dev, "No .enable() call-back registered\n");
++		return 0;
++	}
++
+ 	/* only call enable hook if the cell wasn't previously enabled */
+ 	if (atomic_inc_return(cell->usage_count) == 1)
+ 		err = cell->enable(pdev);
+@@ -49,6 +54,11 @@ int mfd_cell_disable(struct platform_device *pdev)
+ 	const struct mfd_cell *cell = mfd_get_cell(pdev);
+ 	int err = 0;
+ 
++	if (!cell->disable) {
++		dev_dbg(&pdev->dev, "No .disable() call-back registered\n");
++		return 0;
++	}
++
+ 	/* only disable if no other clients are using it */
+ 	if (atomic_dec_return(cell->usage_count) == 0)
+ 		err = cell->disable(pdev);
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 5ca53e225382d..4b18034537f53 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -300,7 +300,7 @@ static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
+ 	}
+ }
+ 
+-static void mmc_part_add(struct mmc_card *card, unsigned int size,
++static void mmc_part_add(struct mmc_card *card, u64 size,
+ 			 unsigned int part_cfg, char *name, int idx, bool ro,
+ 			 int area_type)
+ {
+@@ -316,7 +316,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
+ {
+ 	int idx;
+ 	u8 hc_erase_grp_sz, hc_wp_grp_sz;
+-	unsigned int part_size;
++	u64 part_size;
+ 
+ 	/*
+ 	 * General purpose partition feature support --
+@@ -346,8 +346,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
+ 				(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
+ 				<< 8) +
+ 				ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
+-			part_size *= (size_t)(hc_erase_grp_sz *
+-				hc_wp_grp_sz);
++			part_size *= (hc_erase_grp_sz * hc_wp_grp_sz);
+ 			mmc_part_add(card, part_size << 19,
+ 				EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
+ 				"gp%d", idx, false,
+@@ -365,7 +364,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
+ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
+ {
+ 	int err = 0, idx;
+-	unsigned int part_size;
++	u64 part_size;
+ 	struct device_node *np;
+ 	bool broken_hpi = false;
+ 
+diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
+index 1dbc9554a0786..3ab75d3e2ce32 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0002.c
++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
+@@ -727,7 +727,6 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
+ 	kfree(mtd->eraseregions);
+ 	kfree(mtd);
+ 	kfree(cfi->cmdset_priv);
+-	kfree(cfi->cfiq);
+ 	return NULL;
+ }
+ 
+diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
+index 3ea44cff9b759..c29205ee82e20 100644
+--- a/drivers/mtd/cmdlinepart.c
++++ b/drivers/mtd/cmdlinepart.c
+@@ -231,12 +231,29 @@ static int mtdpart_setup_real(char *s)
+ 		struct cmdline_mtd_partition *this_mtd;
+ 		struct mtd_partition *parts;
+ 		int mtd_id_len, num_parts;
+-		char *p, *mtd_id;
++		char *p, *mtd_id, *semicol;
++
++		/*
++		 * Replace the first ';' by a NULL char so strrchr can work
++		 * properly.
++		 */
++		semicol = strchr(s, ';');
++		if (semicol)
++			*semicol = '\0';
+ 
+ 		mtd_id = s;
+ 
+-		/* fetch <mtd-id> */
+-		p = strchr(s, ':');
++		/*
++		 * fetch <mtd-id>. We use strrchr to ignore all ':' that could
++		 * be present in the MTD name, only the last one is interpreted
++		 * as an <mtd-id>/<part-definition> separator.
++		 */
++		p = strrchr(s, ':');
++
++		/* Restore the ';' now. */
++		if (semicol)
++			*semicol = ';';
++
+ 		if (!p) {
+ 			pr_err("no mtd-id\n");
+ 			return -EINVAL;
+diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c
+index a3f32f939cc17..6736777a41567 100644
+--- a/drivers/mtd/nand/raw/omap_elm.c
++++ b/drivers/mtd/nand/raw/omap_elm.c
+@@ -421,6 +421,7 @@ static int elm_probe(struct platform_device *pdev)
+ 	pm_runtime_enable(&pdev->dev);
+ 	if (pm_runtime_get_sync(&pdev->dev) < 0) {
+ 		ret = -EINVAL;
++		pm_runtime_put_sync(&pdev->dev);
+ 		pm_runtime_disable(&pdev->dev);
+ 		dev_err(&pdev->dev, "can't enable clock\n");
+ 		return ret;
+diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
+index 98f7d6be8d1fc..e08f6b4637dda 100644
+--- a/drivers/mtd/ubi/fastmap-wl.c
++++ b/drivers/mtd/ubi/fastmap-wl.c
+@@ -48,6 +48,13 @@ static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
+ 	return victim;
+ }
+ 
++static inline void return_unused_peb(struct ubi_device *ubi,
++				     struct ubi_wl_entry *e)
++{
++	wl_tree_add(e, &ubi->free);
++	ubi->free_count++;
++}
++
+ /**
+  * return_unused_pool_pebs - returns unused PEB to the free tree.
+  * @ubi: UBI device description object
+@@ -61,23 +68,10 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
+ 
+ 	for (i = pool->used; i < pool->size; i++) {
+ 		e = ubi->lookuptbl[pool->pebs[i]];
+-		wl_tree_add(e, &ubi->free);
+-		ubi->free_count++;
++		return_unused_peb(ubi, e);
+ 	}
+ }
+ 
+-static int anchor_pebs_available(struct rb_root *root)
+-{
+-	struct rb_node *p;
+-	struct ubi_wl_entry *e;
+-
+-	ubi_rb_for_each_entry(p, e, root, u.rb)
+-		if (e->pnum < UBI_FM_MAX_START)
+-			return 1;
+-
+-	return 0;
+-}
+-
+ /**
+  * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
+  * @ubi: UBI device description object
+@@ -286,8 +280,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
+ int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
+ {
+ 	struct ubi_work *wrk;
++	struct ubi_wl_entry *anchor;
+ 
+ 	spin_lock(&ubi->wl_lock);
++
++	/* Do we already have an anchor? */
++	if (ubi->fm_anchor) {
++		spin_unlock(&ubi->wl_lock);
++		return 0;
++	}
++
++	/* See if we can find an anchor PEB on the list of free PEBs */
++	anchor = ubi_wl_get_fm_peb(ubi, 1);
++	if (anchor) {
++		ubi->fm_anchor = anchor;
++		spin_unlock(&ubi->wl_lock);
++		return 0;
++	}
++
++	/* No luck, trigger wear leveling to produce a new anchor PEB */
++	ubi->fm_do_produce_anchor = 1;
+ 	if (ubi->wl_scheduled) {
+ 		spin_unlock(&ubi->wl_lock);
+ 		return 0;
+@@ -303,7 +315,6 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
+ 		return -ENOMEM;
+ 	}
+ 
+-	wrk->anchor = 1;
+ 	wrk->func = &wear_leveling_worker;
+ 	__schedule_ubi_work(ubi, wrk);
+ 	return 0;
+@@ -365,6 +376,11 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
+ 	return_unused_pool_pebs(ubi, &ubi->fm_pool);
+ 	return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
+ 
++	if (ubi->fm_anchor) {
++		return_unused_peb(ubi, ubi->fm_anchor);
++		ubi->fm_anchor = NULL;
++	}
++
+ 	if (ubi->fm) {
+ 		for (i = 0; i < ubi->fm->used_blocks; i++)
+ 			kfree(ubi->fm->e[i]);
+diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
+index 8e292992f84c7..b88ef875236cc 100644
+--- a/drivers/mtd/ubi/fastmap.c
++++ b/drivers/mtd/ubi/fastmap.c
+@@ -1552,14 +1552,6 @@ int ubi_update_fastmap(struct ubi_device *ubi)
+ 		return 0;
+ 	}
+ 
+-	ret = ubi_ensure_anchor_pebs(ubi);
+-	if (ret) {
+-		up_write(&ubi->fm_eba_sem);
+-		up_write(&ubi->work_sem);
+-		up_write(&ubi->fm_protect);
+-		return ret;
+-	}
+-
+ 	new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
+ 	if (!new_fm) {
+ 		up_write(&ubi->fm_eba_sem);
+@@ -1630,7 +1622,8 @@ int ubi_update_fastmap(struct ubi_device *ubi)
+ 	}
+ 
+ 	spin_lock(&ubi->wl_lock);
+-	tmp_e = ubi_wl_get_fm_peb(ubi, 1);
++	tmp_e = ubi->fm_anchor;
++	ubi->fm_anchor = NULL;
+ 	spin_unlock(&ubi->wl_lock);
+ 
+ 	if (old_fm) {
+@@ -1682,6 +1675,9 @@ out_unlock:
+ 	up_write(&ubi->work_sem);
+ 	up_write(&ubi->fm_protect);
+ 	kfree(old_fm);
++
++	ubi_ensure_anchor_pebs(ubi);
++
+ 	return ret;
+ 
+ err:
+diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
+index d47b9e436e673..d248ec371cc17 100644
+--- a/drivers/mtd/ubi/ubi.h
++++ b/drivers/mtd/ubi/ubi.h
+@@ -504,6 +504,8 @@ struct ubi_debug_info {
+  * @fm_work: fastmap work queue
+  * @fm_work_scheduled: non-zero if fastmap work was scheduled
+  * @fast_attach: non-zero if UBI was attached by fastmap
++ * @fm_anchor: The next anchor PEB to use for fastmap
++ * @fm_do_produce_anchor: If true produce an anchor PEB in wl
+  *
+  * @used: RB-tree of used physical eraseblocks
+  * @erroneous: RB-tree of erroneous used physical eraseblocks
+@@ -612,6 +614,8 @@ struct ubi_device {
+ 	struct work_struct fm_work;
+ 	int fm_work_scheduled;
+ 	int fast_attach;
++	struct ubi_wl_entry *fm_anchor;
++	int fm_do_produce_anchor;
+ 
+ 	/* Wear-leveling sub-system's stuff */
+ 	struct rb_root used;
+@@ -802,7 +806,6 @@ struct ubi_attach_info {
+  * @vol_id: the volume ID on which this erasure is being performed
+  * @lnum: the logical eraseblock number
+  * @torture: if the physical eraseblock has to be tortured
+- * @anchor: produce a anchor PEB to by used by fastmap
+  *
+  * The @func pointer points to the worker function. If the @shutdown argument is
+  * not zero, the worker has to free the resources and exit immediately as the
+@@ -818,7 +821,6 @@ struct ubi_work {
+ 	int vol_id;
+ 	int lnum;
+ 	int torture;
+-	int anchor;
+ };
+ 
+ #include "debug.h"
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index 6f2ac865ff05e..80d64d7e7a8be 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -331,13 +331,6 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
+ 		}
+ 	}
+ 
+-	/* If no fastmap has been written and this WL entry can be used
+-	 * as anchor PEB, hold it back and return the second best WL entry
+-	 * such that fastmap can use the anchor PEB later. */
+-	if (prev_e && !ubi->fm_disabled &&
+-	    !ubi->fm && e->pnum < UBI_FM_MAX_START)
+-		return prev_e;
+-
+ 	return e;
+ }
+ 
+@@ -648,9 +641,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ {
+ 	int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
+ 	int erase = 0, keep = 0, vol_id = -1, lnum = -1;
+-#ifdef CONFIG_MTD_UBI_FASTMAP
+-	int anchor = wrk->anchor;
+-#endif
+ 	struct ubi_wl_entry *e1, *e2;
+ 	struct ubi_vid_io_buf *vidb;
+ 	struct ubi_vid_hdr *vid_hdr;
+@@ -690,11 +680,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ 	}
+ 
+ #ifdef CONFIG_MTD_UBI_FASTMAP
+-	/* Check whether we need to produce an anchor PEB */
+-	if (!anchor)
+-		anchor = !anchor_pebs_available(&ubi->free);
+-
+-	if (anchor) {
++	if (ubi->fm_do_produce_anchor) {
+ 		e1 = find_anchor_wl_entry(&ubi->used);
+ 		if (!e1)
+ 			goto out_cancel;
+@@ -705,6 +691,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ 		self_check_in_wl_tree(ubi, e1, &ubi->used);
+ 		rb_erase(&e1->u.rb, &ubi->used);
+ 		dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
++		ubi->fm_do_produce_anchor = 0;
+ 	} else if (!ubi->scrub.rb_node) {
+ #else
+ 	if (!ubi->scrub.rb_node) {
+@@ -1037,7 +1024,6 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
+ 		goto out_cancel;
+ 	}
+ 
+-	wrk->anchor = 0;
+ 	wrk->func = &wear_leveling_worker;
+ 	if (nested)
+ 		__schedule_ubi_work(ubi, wrk);
+@@ -1079,8 +1065,15 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
+ 	err = sync_erase(ubi, e, wl_wrk->torture);
+ 	if (!err) {
+ 		spin_lock(&ubi->wl_lock);
+-		wl_tree_add(e, &ubi->free);
+-		ubi->free_count++;
++
++		if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) {
++			ubi->fm_anchor = e;
++			ubi->fm_do_produce_anchor = 0;
++		} else {
++			wl_tree_add(e, &ubi->free);
++			ubi->free_count++;
++		}
++
+ 		spin_unlock(&ubi->wl_lock);
+ 
+ 		/*
+@@ -1724,6 +1717,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
+ 	if (err)
+ 		goto out_free;
+ 
++#ifdef CONFIG_MTD_UBI_FASTMAP
++	ubi_ensure_anchor_pebs(ubi);
++#endif
+ 	return 0;
+ 
+ out_free:
+diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
+index a9e2d669acd81..c93a532937863 100644
+--- a/drivers/mtd/ubi/wl.h
++++ b/drivers/mtd/ubi/wl.h
+@@ -2,7 +2,6 @@
+ #ifndef UBI_WL_H
+ #define UBI_WL_H
+ #ifdef CONFIG_MTD_UBI_FASTMAP
+-static int anchor_pebs_available(struct rb_root *root);
+ static void update_fastmap_work_fn(struct work_struct *wrk);
+ static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
+ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
+index 47b867c64b147..195108858f38f 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -542,8 +542,13 @@ void e1000_reinit_locked(struct e1000_adapter *adapter)
+ 	WARN_ON(in_interrupt());
+ 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ 		msleep(1);
+-	e1000_down(adapter);
+-	e1000_up(adapter);
++
++	/* only run the task if not already down */
++	if (!test_bit(__E1000_DOWN, &adapter->flags)) {
++		e1000_down(adapter);
++		e1000_up(adapter);
++	}
++
+ 	clear_bit(__E1000_RESETTING, &adapter->flags);
+ }
+ 
+@@ -1433,10 +1438,15 @@ int e1000_close(struct net_device *netdev)
+ 	struct e1000_hw *hw = &adapter->hw;
+ 	int count = E1000_CHECK_RESET_COUNT;
+ 
+-	while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
++	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
+ 		usleep_range(10000, 20000);
+ 
+-	WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
++	WARN_ON(count < 0);
++
++	/* signal that we're down so that the reset task will no longer run */
++	set_bit(__E1000_DOWN, &adapter->flags);
++	clear_bit(__E1000_RESETTING, &adapter->flags);
++
+ 	e1000_down(adapter);
+ 	e1000_power_down_phy(adapter);
+ 	e1000_free_irq(adapter);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+index 71a7af134dd8e..886c7aae662fa 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+@@ -96,6 +96,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
+ 		p_ramrod->personality = PERSONALITY_ETH;
+ 		break;
+ 	case QED_PCI_ETH_ROCE:
++	case QED_PCI_ETH_IWARP:
+ 		p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
+ 		break;
+ 	default:
+diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
+index 71be8524cca87..a686926bba71e 100644
+--- a/drivers/net/ieee802154/adf7242.c
++++ b/drivers/net/ieee802154/adf7242.c
+@@ -883,7 +883,9 @@ static int adf7242_rx(struct adf7242_local *lp)
+ 	int ret;
+ 	u8 lqi, len_u8, *data;
+ 
+-	adf7242_read_reg(lp, 0, &len_u8);
++	ret = adf7242_read_reg(lp, 0, &len_u8);
++	if (ret)
++		return ret;
+ 
+ 	len = len_u8;
+ 
+diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
+index 38a41651e451c..deace0aadad24 100644
+--- a/drivers/net/ieee802154/ca8210.c
++++ b/drivers/net/ieee802154/ca8210.c
+@@ -2923,6 +2923,7 @@ static int ca8210_dev_com_init(struct ca8210_priv *priv)
+ 	);
+ 	if (!priv->irq_workqueue) {
+ 		dev_crit(&priv->spi->dev, "alloc of irq_workqueue failed!\n");
++		destroy_workqueue(priv->mlme_workqueue);
+ 		return -ENOMEM;
+ 	}
+ 
+diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
+index da2d179430ca5..4c57e79e5779a 100644
+--- a/drivers/net/wireless/ath/ar5523/ar5523.c
++++ b/drivers/net/wireless/ath/ar5523/ar5523.c
+@@ -1770,6 +1770,8 @@ static const struct usb_device_id ar5523_id_table[] = {
+ 	AR5523_DEVICE_UX(0x0846, 0x4300),	/* Netgear / WG111U */
+ 	AR5523_DEVICE_UG(0x0846, 0x4250),	/* Netgear / WG111T */
+ 	AR5523_DEVICE_UG(0x0846, 0x5f00),	/* Netgear / WPN111 */
++	AR5523_DEVICE_UG(0x083a, 0x4506),	/* SMC / EZ Connect
++						   SMCWUSBT-G2 */
+ 	AR5523_DEVICE_UG(0x157e, 0x3006),	/* Umedia / AR5523_1 */
+ 	AR5523_DEVICE_UX(0x157e, 0x3205),	/* Umedia / AR5523_2 */
+ 	AR5523_DEVICE_UG(0x157e, 0x3006),	/* Umedia / TEW444UBEU */
+diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
+index 0baaad90b8d18..4e980e78ba95c 100644
+--- a/drivers/net/wireless/ath/ath10k/debug.c
++++ b/drivers/net/wireless/ath/ath10k/debug.c
+@@ -1521,7 +1521,7 @@ static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats,
+ 	*len += scnprintf(buf + *len, buf_len - *len,
+ 			  "No.  Preamble Rate_code ");
+ 
+-	for (i = 0; i < WMI_TPC_TX_N_CHAIN; i++)
++	for (i = 0; i < tpc_stats->num_tx_chain; i++)
+ 		*len += scnprintf(buf + *len, buf_len - *len,
+ 				  "tpc_value%d ", i);
+ 
+@@ -2365,6 +2365,7 @@ void ath10k_debug_destroy(struct ath10k *ar)
+ 	ath10k_debug_fw_stats_reset(ar);
+ 
+ 	kfree(ar->debug.tpc_stats);
++	kfree(ar->debug.tpc_stats_final);
+ }
+ 
+ int ath10k_debug_register(struct ath10k *ar)
+diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
+index 0ecaba824fb28..0cdaecb0e28a9 100644
+--- a/drivers/net/wireless/ath/ath10k/sdio.c
++++ b/drivers/net/wireless/ath/ath10k/sdio.c
+@@ -1567,23 +1567,33 @@ static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+ 				     size_t buf_len)
+ {
+ 	int ret;
++	void *mem;
++
++	mem = kzalloc(buf_len, GFP_KERNEL);
++	if (!mem)
++		return -ENOMEM;
+ 
+ 	/* set window register to start read cycle */
+ 	ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);
+ 	if (ret) {
+ 		ath10k_warn(ar, "failed to set mbox window read address: %d", ret);
+-		return ret;
++		goto out;
+ 	}
+ 
+ 	/* read the data */
+-	ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, buf, buf_len);
++	ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, mem, buf_len);
+ 	if (ret) {
+ 		ath10k_warn(ar, "failed to read from mbox window data address: %d\n",
+ 			    ret);
+-		return ret;
++		goto out;
+ 	}
+ 
+-	return 0;
++	memcpy(buf, mem, buf_len);
++
++out:
++	kfree(mem);
++
++	return ret;
+ }
+ 
+ static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address,
+diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
+index 3372dfa0deccf..3f3fbee631c34 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi.c
++++ b/drivers/net/wireless/ath/ath10k/wmi.c
+@@ -4550,16 +4550,13 @@ static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
+ 	}
+ 
+ 	pream_idx = 0;
+-	for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) {
++	for (i = 0; i < tpc_stats->rate_max; i++) {
+ 		memset(tpc_value, 0, sizeof(tpc_value));
+ 		memset(buff, 0, sizeof(buff));
+ 		if (i == pream_table[pream_idx])
+ 			pream_idx++;
+ 
+-		for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) {
+-			if (j >= __le32_to_cpu(ev->num_tx_chain))
+-				break;
+-
++		for (j = 0; j < tpc_stats->num_tx_chain; j++) {
+ 			tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1,
+ 							    rate_code[i],
+ 							    type);
+@@ -4672,7 +4669,7 @@ void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table,
+ 
+ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
+ {
+-	u32 num_tx_chain;
++	u32 num_tx_chain, rate_max;
+ 	u8 rate_code[WMI_TPC_RATE_MAX];
+ 	u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
+ 	struct wmi_pdev_tpc_config_event *ev;
+@@ -4688,6 +4685,13 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
+ 		return;
+ 	}
+ 
++	rate_max = __le32_to_cpu(ev->rate_max);
++	if (rate_max > WMI_TPC_RATE_MAX) {
++		ath10k_warn(ar, "number of rate is %d greater than TPC configured rate %d\n",
++			    rate_max, WMI_TPC_RATE_MAX);
++		rate_max = WMI_TPC_RATE_MAX;
++	}
++
+ 	tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
+ 	if (!tpc_stats)
+ 		return;
+@@ -4704,8 +4708,8 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
+ 		__le32_to_cpu(ev->twice_antenna_reduction);
+ 	tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
+ 	tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
+-	tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
+-	tpc_stats->rate_max = __le32_to_cpu(ev->rate_max);
++	tpc_stats->num_tx_chain = num_tx_chain;
++	tpc_stats->rate_max = rate_max;
+ 
+ 	ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
+ 				      rate_code, pream_table,
+@@ -4900,16 +4904,13 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
+ 	}
+ 
+ 	pream_idx = 0;
+-	for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) {
++	for (i = 0; i < tpc_stats->rate_max; i++) {
+ 		memset(tpc_value, 0, sizeof(tpc_value));
+ 		memset(buff, 0, sizeof(buff));
+ 		if (i == pream_table[pream_idx])
+ 			pream_idx++;
+ 
+-		for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) {
+-			if (j >= __le32_to_cpu(ev->num_tx_chain))
+-				break;
+-
++		for (j = 0; j < tpc_stats->num_tx_chain; j++) {
+ 			tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1,
+ 							       rate_code[i],
+ 							       type, pream_idx);
+@@ -4925,7 +4926,7 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
+ 
+ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
+ {
+-	u32 num_tx_chain;
++	u32 num_tx_chain, rate_max;
+ 	u8 rate_code[WMI_TPC_FINAL_RATE_MAX];
+ 	u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
+ 	struct wmi_pdev_tpc_final_table_event *ev;
+@@ -4933,12 +4934,24 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
+ 
+ 	ev = (struct wmi_pdev_tpc_final_table_event *)skb->data;
+ 
++	num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
++	if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
++		ath10k_warn(ar, "number of tx chain is %d greater than TPC final configured tx chain %d\n",
++			    num_tx_chain, WMI_TPC_TX_N_CHAIN);
++		return;
++	}
++
++	rate_max = __le32_to_cpu(ev->rate_max);
++	if (rate_max > WMI_TPC_FINAL_RATE_MAX) {
++		ath10k_warn(ar, "number of rate is %d greater than TPC final configured rate %d\n",
++			    rate_max, WMI_TPC_FINAL_RATE_MAX);
++		rate_max = WMI_TPC_FINAL_RATE_MAX;
++	}
++
+ 	tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
+ 	if (!tpc_stats)
+ 		return;
+ 
+-	num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
+-
+ 	ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
+ 					    num_tx_chain);
+ 
+@@ -4951,8 +4964,8 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
+ 		__le32_to_cpu(ev->twice_antenna_reduction);
+ 	tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
+ 	tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
+-	tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
+-	tpc_stats->rate_max = __le32_to_cpu(ev->rate_max);
++	tpc_stats->num_tx_chain = num_tx_chain;
++	tpc_stats->rate_max = rate_max;
+ 
+ 	ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
+ 					       rate_code, pream_table,
+diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
+index 1fb76d2f5d3fd..8b9d0809daf62 100644
+--- a/drivers/net/wireless/marvell/mwifiex/fw.h
++++ b/drivers/net/wireless/marvell/mwifiex/fw.h
+@@ -953,7 +953,7 @@ struct mwifiex_tkip_param {
+ struct mwifiex_aes_param {
+ 	u8 pn[WPA_PN_SIZE];
+ 	__le16 key_len;
+-	u8 key[WLAN_KEY_LEN_CCMP];
++	u8 key[WLAN_KEY_LEN_CCMP_256];
+ } __packed;
+ 
+ struct mwifiex_wapi_param {
+diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+index 797c2e9783943..7003767eef423 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
++++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+@@ -620,7 +620,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
+ 	key_v2 = &resp->params.key_material_v2;
+ 
+ 	len = le16_to_cpu(key_v2->key_param_set.key_params.aes.key_len);
+-	if (len > WLAN_KEY_LEN_CCMP)
++	if (len > sizeof(key_v2->key_param_set.key_params.aes.key))
+ 		return -EINVAL;
+ 
+ 	if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) {
+@@ -636,7 +636,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
+ 		return 0;
+ 
+ 	memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0,
+-	       WLAN_KEY_LEN_CCMP);
++	       sizeof(key_v2->key_param_set.key_params.aes.key));
+ 	priv->aes_key_v2.key_param_set.key_params.aes.key_len =
+ 				cpu_to_le16(len);
+ 	memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key,
+diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
+index d44d57e6eb27a..97df6b3a472b1 100644
+--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
++++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
+@@ -278,6 +278,7 @@ static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
+ 		if (!skb)
+ 			continue;
+ 
++		tid->reorder_buf[i] = NULL;
+ 		tid->nframes--;
+ 		dev_kfree_skb(skb);
+ 	}
+diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
+index 2ca5658bbc2ab..43c7b37dec0c9 100644
+--- a/drivers/net/wireless/ti/wlcore/main.c
++++ b/drivers/net/wireless/ti/wlcore/main.c
+@@ -3671,8 +3671,10 @@ void wlcore_regdomain_config(struct wl1271 *wl)
+ 		goto out;
+ 
+ 	ret = pm_runtime_get_sync(wl->dev);
+-	if (ret < 0)
++	if (ret < 0) {
++		pm_runtime_put_autosuspend(wl->dev);
+ 		goto out;
++	}
+ 
+ 	ret = wlcore_cmd_regdomain_config_locked(wl);
+ 	if (ret < 0) {
+diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
+index b6e19c2d66b0a..250bcbf4ea2f2 100644
+--- a/drivers/net/wireless/ti/wlcore/tx.c
++++ b/drivers/net/wireless/ti/wlcore/tx.c
+@@ -877,6 +877,7 @@ void wl1271_tx_work(struct work_struct *work)
+ 
+ 	ret = wlcore_tx_work_locked(wl);
+ 	if (ret < 0) {
++		pm_runtime_put_noidle(wl->dev);
+ 		wl12xx_queue_recovery_work(wl);
+ 		goto out;
+ 	}
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 0d60f2f8f3eec..33dad9774da01 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -255,11 +255,8 @@ void nvme_complete_rq(struct request *req)
+ 	trace_nvme_complete_rq(req);
+ 
+ 	if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
+-		if ((req->cmd_flags & REQ_NVME_MPATH) &&
+-		    blk_path_error(status)) {
+-			nvme_failover_req(req);
++		if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req))
+ 			return;
+-		}
+ 
+ 		if (!blk_queue_dying(req->q)) {
+ 			nvme_req(req)->retries++;
+@@ -1602,7 +1599,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
+ 	if (ns->head->disk) {
+ 		nvme_update_disk_info(ns->head->disk, ns, id);
+ 		blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
+-		revalidate_disk(ns->head->disk);
++		nvme_mpath_update_disk_size(ns->head->disk);
+ 	}
+ #endif
+ }
+@@ -2859,6 +2856,10 @@ static ssize_t nvme_sysfs_delete(struct device *dev,
+ {
+ 	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+ 
++	/* Can't delete non-created controllers */
++	if (!ctrl->created)
++		return -EBUSY;
++
+ 	if (device_remove_file_self(dev, attr))
+ 		nvme_delete_ctrl_sync(ctrl);
+ 	return count;
+@@ -3579,6 +3580,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
+ 		queue_work(nvme_wq, &ctrl->async_event_work);
+ 		nvme_start_queues(ctrl);
+ 	}
++	ctrl->created = true;
+ }
+ EXPORT_SYMBOL_GPL(nvme_start_ctrl);
+ 
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 2e63c1106030b..e71075338ff5c 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -73,17 +73,12 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
+ 	}
+ }
+ 
+-void nvme_failover_req(struct request *req)
++bool nvme_failover_req(struct request *req)
+ {
+ 	struct nvme_ns *ns = req->q->queuedata;
+ 	u16 status = nvme_req(req)->status;
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&ns->head->requeue_lock, flags);
+-	blk_steal_bios(&ns->head->requeue_list, req);
+-	spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
+-	blk_mq_end_request(req, 0);
+-
+ 	switch (status & 0x7ff) {
+ 	case NVME_SC_ANA_TRANSITION:
+ 	case NVME_SC_ANA_INACCESSIBLE:
+@@ -111,15 +106,17 @@ void nvme_failover_req(struct request *req)
+ 		nvme_mpath_clear_current_path(ns);
+ 		break;
+ 	default:
+-		/*
+-		 * Reset the controller for any non-ANA error as we don't know
+-		 * what caused the error.
+-		 */
+-		nvme_reset_ctrl(ns->ctrl);
+-		break;
++		/* This was a non-ANA error so follow the normal error path. */
++		return false;
+ 	}
+ 
++	spin_lock_irqsave(&ns->head->requeue_lock, flags);
++	blk_steal_bios(&ns->head->requeue_list, req);
++	spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
++	blk_mq_end_request(req, 0);
++
+ 	kblockd_schedule_work(&ns->head->requeue_work);
++	return true;
+ }
+ 
+ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index cc4273f119894..9c2e7a151e400 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -206,6 +206,7 @@ struct nvme_ctrl {
+ 	struct nvme_command ka_cmd;
+ 	struct work_struct fw_act_work;
+ 	unsigned long events;
++	bool created;
+ 
+ #ifdef CONFIG_NVME_MULTIPATH
+ 	/* asymmetric namespace access: */
+@@ -477,7 +478,7 @@ void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
+ void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
+ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
+ 			struct nvme_ctrl *ctrl, int *flags);
+-void nvme_failover_req(struct request *req);
++bool nvme_failover_req(struct request *req);
+ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
+ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
+ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
+@@ -503,6 +504,16 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
+ 		kblockd_schedule_work(&head->requeue_work);
+ }
+ 
++static inline void nvme_mpath_update_disk_size(struct gendisk *disk)
++{
++	struct block_device *bdev = bdget_disk(disk, 0);
++
++	if (bdev) {
++		bd_set_size(bdev, get_capacity(disk) << SECTOR_SHIFT);
++		bdput(bdev);
++	}
++}
++
+ extern struct device_attribute dev_attr_ana_grpid;
+ extern struct device_attribute dev_attr_ana_state;
+ 
+@@ -521,8 +532,9 @@ static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
+ 	sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
+ }
+ 
+-static inline void nvme_failover_req(struct request *req)
++static inline bool nvme_failover_req(struct request *req)
+ {
++	return false;
+ }
+ static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
+ {
+@@ -568,6 +580,9 @@ static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
+ static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
+ {
+ }
++static inline void nvme_mpath_update_disk_size(struct gendisk *disk)
++{
++}
+ #endif /* CONFIG_NVME_MULTIPATH */
+ 
+ #ifdef CONFIG_NVM
+diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
+index 08f997a390d5d..cfd26437aeaea 100644
+--- a/drivers/nvme/target/rdma.c
++++ b/drivers/nvme/target/rdma.c
+@@ -83,6 +83,7 @@ enum nvmet_rdma_queue_state {
+ 
+ struct nvmet_rdma_queue {
+ 	struct rdma_cm_id	*cm_id;
++	struct ib_qp		*qp;
+ 	struct nvmet_port	*port;
+ 	struct ib_cq		*cq;
+ 	atomic_t		sq_wr_avail;
+@@ -471,7 +472,7 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
+ 	if (ndev->srq)
+ 		ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
+ 	else
+-		ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL);
++		ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL);
+ 
+ 	if (unlikely(ret))
+ 		pr_err("post_recv cmd failed\n");
+@@ -510,7 +511,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
+ 	atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
+ 
+ 	if (rsp->n_rdma) {
+-		rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
++		rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
+ 				queue->cm_id->port_num, rsp->req.sg,
+ 				rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
+ 	}
+@@ -594,7 +595,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
+ 
+ 	WARN_ON(rsp->n_rdma <= 0);
+ 	atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
+-	rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
++	rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
+ 			queue->cm_id->port_num, rsp->req.sg,
+ 			rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
+ 	rsp->n_rdma = 0;
+@@ -737,7 +738,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
+ 	}
+ 
+ 	if (nvmet_rdma_need_data_in(rsp)) {
+-		if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp,
++		if (rdma_rw_ctx_post(&rsp->rw, queue->qp,
+ 				queue->cm_id->port_num, &rsp->read_cqe, NULL))
+ 			nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
+ 	} else {
+@@ -1020,6 +1021,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
+ 		pr_err("failed to create_qp ret= %d\n", ret);
+ 		goto err_destroy_cq;
+ 	}
++	queue->qp = queue->cm_id->qp;
+ 
+ 	atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
+ 
+@@ -1048,11 +1050,10 @@ err_destroy_cq:
+ 
+ static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
+ {
+-	struct ib_qp *qp = queue->cm_id->qp;
+-
+-	ib_drain_qp(qp);
+-	rdma_destroy_id(queue->cm_id);
+-	ib_destroy_qp(qp);
++	ib_drain_qp(queue->qp);
++	if (queue->cm_id)
++		rdma_destroy_id(queue->cm_id);
++	ib_destroy_qp(queue->qp);
+ 	ib_free_cq(queue->cq);
+ }
+ 
+@@ -1286,9 +1287,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
+ 
+ 	ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
+ 	if (ret) {
+-		schedule_work(&queue->release_work);
+-		/* Destroying rdma_cm id is not needed here */
+-		return 0;
++		/*
++		 * Don't destroy the cm_id in free path, as we implicitly
++		 * destroy the cm_id here with non-zero ret code.
++		 */
++		queue->cm_id = NULL;
++		goto free_queue;
+ 	}
+ 
+ 	mutex_lock(&nvmet_rdma_queue_mutex);
+@@ -1297,6 +1301,8 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
+ 
+ 	return 0;
+ 
++free_queue:
++	nvmet_rdma_free_queue(queue);
+ put_device:
+ 	kref_put(&ndev->ref, nvmet_rdma_free_dev);
+ 
+diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
+index 6f86583605a46..097c02197ec8f 100644
+--- a/drivers/pci/controller/pci-tegra.c
++++ b/drivers/pci/controller/pci-tegra.c
+@@ -2400,7 +2400,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
+ 	err = pm_runtime_get_sync(pcie->dev);
+ 	if (err < 0) {
+ 		dev_err(dev, "fail to enable pcie controller: %d\n", err);
+-		goto teardown_msi;
++		goto pm_runtime_put;
+ 	}
+ 
+ 	err = tegra_pcie_request_resources(pcie);
+@@ -2440,7 +2440,6 @@ free_resources:
+ pm_runtime_put:
+ 	pm_runtime_put_sync(pcie->dev);
+ 	pm_runtime_disable(pcie->dev);
+-teardown_msi:
+ 	tegra_pcie_msi_teardown(pcie);
+ put_resources:
+ 	tegra_pcie_put_resources(pcie);
+diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
+index 07940d1d83b70..005817e40ad39 100644
+--- a/drivers/pci/hotplug/pciehp_hpc.c
++++ b/drivers/pci/hotplug/pciehp_hpc.c
+@@ -530,7 +530,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
+ 	struct controller *ctrl = (struct controller *)dev_id;
+ 	struct pci_dev *pdev = ctrl_dev(ctrl);
+ 	struct device *parent = pdev->dev.parent;
+-	u16 status, events;
++	u16 status, events = 0;
+ 
+ 	/*
+ 	 * Interrupts only occur in D3hot or shallower (PCIe r4.0, sec 6.7.3.4).
+@@ -553,6 +553,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
+ 		}
+ 	}
+ 
++read_status:
+ 	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
+ 	if (status == (u16) ~0) {
+ 		ctrl_info(ctrl, "%s: no response from device\n", __func__);
+@@ -565,24 +566,37 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
+ 	 * Slot Status contains plain status bits as well as event
+ 	 * notification bits; right now we only want the event bits.
+ 	 */
+-	events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
+-			   PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
+-			   PCI_EXP_SLTSTA_DLLSC);
++	status &= PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
++		  PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
++		  PCI_EXP_SLTSTA_DLLSC;
+ 
+ 	/*
+ 	 * If we've already reported a power fault, don't report it again
+ 	 * until we've done something to handle it.
+ 	 */
+ 	if (ctrl->power_fault_detected)
+-		events &= ~PCI_EXP_SLTSTA_PFD;
++		status &= ~PCI_EXP_SLTSTA_PFD;
+ 
++	events |= status;
+ 	if (!events) {
+ 		if (parent)
+ 			pm_runtime_put(parent);
+ 		return IRQ_NONE;
+ 	}
+ 
+-	pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
++	if (status) {
++		pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
++
++		/*
++		 * In MSI mode, all event bits must be zero before the port
++		 * will send a new interrupt (PCIe Base Spec r5.0 sec 6.7.3.4).
++		 * So re-read the Slot Status register in case a bit was set
++		 * between read and write.
++		 */
++		if (pci_dev_msi_enabled(pdev) && !pciehp_poll_mode)
++			goto read_status;
++	}
++
+ 	ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
+ 	if (parent)
+ 		pm_runtime_put(parent);
+diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
+index 137bf0cee897c..8fc9a4e911e3a 100644
+--- a/drivers/pci/rom.c
++++ b/drivers/pci/rom.c
+@@ -195,20 +195,3 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
+ 		pci_disable_rom(pdev);
+ }
+ EXPORT_SYMBOL(pci_unmap_rom);
+-
+-/**
+- * pci_platform_rom - provides a pointer to any ROM image provided by the
+- * platform
+- * @pdev: pointer to pci device struct
+- * @size: pointer to receive size of pci window over ROM
+- */
+-void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size)
+-{
+-	if (pdev->rom && pdev->romlen) {
+-		*size = pdev->romlen;
+-		return phys_to_virt((phys_addr_t)pdev->rom);
+-	}
+-
+-	return NULL;
+-}
+-EXPORT_SYMBOL(pci_platform_rom);
+diff --git a/drivers/phy/samsung/phy-s5pv210-usb2.c b/drivers/phy/samsung/phy-s5pv210-usb2.c
+index f6f72339bbc32..bb7fdf491c1c2 100644
+--- a/drivers/phy/samsung/phy-s5pv210-usb2.c
++++ b/drivers/phy/samsung/phy-s5pv210-usb2.c
+@@ -142,6 +142,10 @@ static void s5pv210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on)
+ 		udelay(10);
+ 		rst &= ~rstbits;
+ 		writel(rst, drv->reg_phy + S5PV210_UPHYRST);
++		/* The following delay is necessary for the reset sequence to be
++		 * completed
++		 */
++		udelay(80);
+ 	} else {
+ 		pwr = readl(drv->reg_phy + S5PV210_UPHYPWR);
+ 		pwr |= phypwr;
+diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c
+index 33c40f79d23d5..2c35c13ad546f 100644
+--- a/drivers/power/supply/max17040_battery.c
++++ b/drivers/power/supply/max17040_battery.c
+@@ -109,7 +109,7 @@ static void max17040_get_vcell(struct i2c_client *client)
+ 
+ 	vcell = max17040_read_reg(client, MAX17040_VCELL);
+ 
+-	chip->vcell = vcell;
++	chip->vcell = (vcell >> 4) * 1250;
+ }
+ 
+ static void max17040_get_soc(struct i2c_client *client)
+diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
+index 5940780648e0f..f36a8a5261a13 100644
+--- a/drivers/rapidio/devices/rio_mport_cdev.c
++++ b/drivers/rapidio/devices/rio_mport_cdev.c
+@@ -2385,13 +2385,6 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
+ 	cdev_init(&md->cdev, &mport_fops);
+ 	md->cdev.owner = THIS_MODULE;
+ 
+-	ret = cdev_device_add(&md->cdev, &md->dev);
+-	if (ret) {
+-		rmcd_error("Failed to register mport %d (err=%d)",
+-		       mport->id, ret);
+-		goto err_cdev;
+-	}
+-
+ 	INIT_LIST_HEAD(&md->doorbells);
+ 	spin_lock_init(&md->db_lock);
+ 	INIT_LIST_HEAD(&md->portwrites);
+@@ -2411,6 +2404,13 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
+ #else
+ 	md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
+ #endif
++
++	ret = cdev_device_add(&md->cdev, &md->dev);
++	if (ret) {
++		rmcd_error("Failed to register mport %d (err=%d)",
++		       mport->id, ret);
++		goto err_cdev;
++	}
+ 	ret = rio_query_mport(mport, &attr);
+ 	if (!ret) {
+ 		md->properties.flags = attr.flags;
+diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
+index 38a2e9e684df4..77a106e90124b 100644
+--- a/drivers/rtc/rtc-ds1374.c
++++ b/drivers/rtc/rtc-ds1374.c
+@@ -620,6 +620,10 @@ static int ds1374_probe(struct i2c_client *client,
+ 	if (!ds1374)
+ 		return -ENOMEM;
+ 
++	ds1374->rtc = devm_rtc_allocate_device(&client->dev);
++	if (IS_ERR(ds1374->rtc))
++		return PTR_ERR(ds1374->rtc);
++
+ 	ds1374->client = client;
+ 	i2c_set_clientdata(client, ds1374);
+ 
+@@ -641,12 +645,11 @@ static int ds1374_probe(struct i2c_client *client,
+ 		device_set_wakeup_capable(&client->dev, 1);
+ 	}
+ 
+-	ds1374->rtc = devm_rtc_device_register(&client->dev, client->name,
+-						&ds1374_rtc_ops, THIS_MODULE);
+-	if (IS_ERR(ds1374->rtc)) {
+-		dev_err(&client->dev, "unable to register the class device\n");
+-		return PTR_ERR(ds1374->rtc);
+-	}
++	ds1374->rtc->ops = &ds1374_rtc_ops;
++
++	ret = rtc_register_device(ds1374->rtc);
++	if (ret)
++		return ret;
+ 
+ #ifdef CONFIG_RTC_DRV_DS1374_WDT
+ 	save_client = client;
+diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
+index 304d905cb23fd..56f625371735f 100644
+--- a/drivers/rtc/rtc-sa1100.c
++++ b/drivers/rtc/rtc-sa1100.c
+@@ -186,7 +186,6 @@ static const struct rtc_class_ops sa1100_rtc_ops = {
+ 
+ int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info)
+ {
+-	struct rtc_device *rtc;
+ 	int ret;
+ 
+ 	spin_lock_init(&info->lock);
+@@ -215,15 +214,14 @@ int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info)
+ 		writel_relaxed(0, info->rcnr);
+ 	}
+ 
+-	rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &sa1100_rtc_ops,
+-					THIS_MODULE);
+-	if (IS_ERR(rtc)) {
++	info->rtc->ops = &sa1100_rtc_ops;
++	info->rtc->max_user_freq = RTC_FREQ;
++
++	ret = rtc_register_device(info->rtc);
++	if (ret) {
+ 		clk_disable_unprepare(info->clk);
+-		return PTR_ERR(rtc);
++		return ret;
+ 	}
+-	info->rtc = rtc;
+-
+-	rtc->max_user_freq = RTC_FREQ;
+ 
+ 	/* Fix for a nasty initialization problem the in SA11xx RTSR register.
+ 	 * See also the comments in sa1100_rtc_interrupt().
+@@ -272,6 +270,10 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
+ 	info->irq_1hz = irq_1hz;
+ 	info->irq_alarm = irq_alarm;
+ 
++	info->rtc = devm_rtc_allocate_device(&pdev->dev);
++	if (IS_ERR(info->rtc))
++		return PTR_ERR(info->rtc);
++
+ 	ret = devm_request_irq(&pdev->dev, irq_1hz, sa1100_rtc_interrupt, 0,
+ 			       "rtc 1Hz", &pdev->dev);
+ 	if (ret) {
+diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
+index 56007a3e7f110..fab09455ba944 100644
+--- a/drivers/s390/block/dasd_fba.c
++++ b/drivers/s390/block/dasd_fba.c
+@@ -40,6 +40,7 @@
+ MODULE_LICENSE("GPL");
+ 
+ static struct dasd_discipline dasd_fba_discipline;
++static void *dasd_fba_zero_page;
+ 
+ struct dasd_fba_private {
+ 	struct dasd_fba_characteristics rdc_data;
+@@ -270,7 +271,7 @@ static void ccw_write_zero(struct ccw1 *ccw, int count)
+ 	ccw->cmd_code = DASD_FBA_CCW_WRITE;
+ 	ccw->flags |= CCW_FLAG_SLI;
+ 	ccw->count = count;
+-	ccw->cda = (__u32) (addr_t) page_to_phys(ZERO_PAGE(0));
++	ccw->cda = (__u32) (addr_t) dasd_fba_zero_page;
+ }
+ 
+ /*
+@@ -811,6 +812,11 @@ dasd_fba_init(void)
+ 	int ret;
+ 
+ 	ASCEBC(dasd_fba_discipline.ebcname, 4);
++
++	dasd_fba_zero_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
++	if (!dasd_fba_zero_page)
++		return -ENOMEM;
++
+ 	ret = ccw_driver_register(&dasd_fba_driver);
+ 	if (!ret)
+ 		wait_for_device_probe();
+@@ -822,6 +828,7 @@ static void __exit
+ dasd_fba_cleanup(void)
+ {
+ 	ccw_driver_unregister(&dasd_fba_driver);
++	free_page((unsigned long)dasd_fba_zero_page);
+ }
+ 
+ module_init(dasd_fba_init);
+diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
+index 23c24a699cefe..b7cb897cd83e0 100644
+--- a/drivers/s390/crypto/zcrypt_api.c
++++ b/drivers/s390/crypto/zcrypt_api.c
+@@ -915,7 +915,8 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		if (!reqcnt)
+ 			return -ENOMEM;
+ 		zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
+-		if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
++		if (copy_to_user((int __user *) arg, reqcnt,
++				 sizeof(u32) * AP_DEVICES))
+ 			rc = -EFAULT;
+ 		kfree(reqcnt);
+ 		return rc;
+diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
+index 6e356325d8d98..54717fb84a54c 100644
+--- a/drivers/scsi/aacraid/aachba.c
++++ b/drivers/scsi/aacraid/aachba.c
+@@ -2481,13 +2481,13 @@ static int aac_read(struct scsi_cmnd * scsicmd)
+ 		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ 			SAM_STAT_CHECK_CONDITION;
+ 		set_sense(&dev->fsa_dev[cid].sense_data,
+-			  HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
++			  ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
+ 			  ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
+ 		memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ 		       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ 			     SCSI_SENSE_BUFFERSIZE));
+ 		scsicmd->scsi_done(scsicmd);
+-		return 1;
++		return 0;
+ 	}
+ 
+ 	dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
+@@ -2573,13 +2573,13 @@ static int aac_write(struct scsi_cmnd * scsicmd)
+ 		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ 			SAM_STAT_CHECK_CONDITION;
+ 		set_sense(&dev->fsa_dev[cid].sense_data,
+-			  HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
++			  ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
+ 			  ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
+ 		memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ 		       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ 			     SCSI_SENSE_BUFFERSIZE));
+ 		scsicmd->scsi_done(scsicmd);
+-		return 1;
++		return 0;
+ 	}
+ 
+ 	dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index b7588de4484e5..4cb6ee6e1212e 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -743,7 +743,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
+ 		hbacmd->request_id =
+ 			cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
+ 		fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD;
+-	} else if (command != HBA_IU_TYPE_SCSI_TM_REQ)
++	} else
+ 		return -EINVAL;
+ 
+ 
+diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
+index 1046947064a0b..eecffc03084c0 100644
+--- a/drivers/scsi/aacraid/linit.c
++++ b/drivers/scsi/aacraid/linit.c
+@@ -736,7 +736,11 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
+ 		status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib,
+ 				  (fib_callback) aac_hba_callback,
+ 				  (void *) cmd);
+-
++		if (status != -EINPROGRESS) {
++			aac_fib_complete(fib);
++			aac_fib_free(fib);
++			return ret;
++		}
+ 		/* Wait up to 15 secs for completion */
+ 		for (count = 0; count < 15; ++count) {
+ 			if (cmd->SCp.sent_command) {
+@@ -915,11 +919,11 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
+ 
+ 	info = &aac->hba_map[bus][cid];
+ 
+-	if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
+-	    info->reset_state > 0)
++	if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
++	 !(info->reset_state > 0)))
+ 		return FAILED;
+ 
+-	pr_err("%s: Host adapter reset request. SCSI hang ?\n",
++	pr_err("%s: Host device reset request. SCSI hang ?\n",
+ 	       AAC_DRIVERNAME);
+ 
+ 	fib = aac_fib_alloc(aac);
+@@ -934,7 +938,12 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
+ 	status = aac_hba_send(command, fib,
+ 			      (fib_callback) aac_tmf_callback,
+ 			      (void *) info);
+-
++	if (status != -EINPROGRESS) {
++		info->reset_state = 0;
++		aac_fib_complete(fib);
++		aac_fib_free(fib);
++		return ret;
++	}
+ 	/* Wait up to 15 seconds for completion */
+ 	for (count = 0; count < 15; ++count) {
+ 		if (info->reset_state == 0) {
+@@ -973,11 +982,11 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd)
+ 
+ 	info = &aac->hba_map[bus][cid];
+ 
+-	if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
+-	    info->reset_state > 0)
++	if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
++	 !(info->reset_state > 0)))
+ 		return FAILED;
+ 
+-	pr_err("%s: Host adapter reset request. SCSI hang ?\n",
++	pr_err("%s: Host target reset request. SCSI hang ?\n",
+ 	       AAC_DRIVERNAME);
+ 
+ 	fib = aac_fib_alloc(aac);
+@@ -994,6 +1003,13 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd)
+ 			      (fib_callback) aac_tmf_callback,
+ 			      (void *) info);
+ 
++	if (status != -EINPROGRESS) {
++		info->reset_state = 0;
++		aac_fib_complete(fib);
++		aac_fib_free(fib);
++		return ret;
++	}
++
+ 	/* Wait up to 15 seconds for completion */
+ 	for (count = 0; count < 15; ++count) {
+ 		if (info->reset_state <= 0) {
+@@ -1046,7 +1062,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
+ 		}
+ 	}
+ 
+-	pr_err("%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVERNAME);
++	pr_err("%s: Host bus reset request. SCSI hang ?\n", AAC_DRIVERNAME);
+ 
+ 	/*
+ 	 * Check the health of the controller
+@@ -1604,7 +1620,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	struct Scsi_Host *shost;
+ 	struct aac_dev *aac;
+ 	struct list_head *insert = &aac_devices;
+-	int error = -ENODEV;
++	int error;
+ 	int unique_id = 0;
+ 	u64 dmamask;
+ 	int mask_bits = 0;
+@@ -1629,7 +1645,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	error = pci_enable_device(pdev);
+ 	if (error)
+ 		goto out;
+-	error = -ENODEV;
+ 
+ 	if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) {
+ 		error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+@@ -1661,8 +1676,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	pci_set_master(pdev);
+ 
+ 	shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
+-	if (!shost)
++	if (!shost) {
++		error = -ENOMEM;
+ 		goto out_disable_pdev;
++	}
+ 
+ 	shost->irq = pdev->irq;
+ 	shost->unique_id = unique_id;
+@@ -1687,8 +1704,11 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB,
+ 			    sizeof(struct fib),
+ 			    GFP_KERNEL);
+-	if (!aac->fibs)
++	if (!aac->fibs) {
++		error = -ENOMEM;
+ 		goto out_free_host;
++	}
++
+ 	spin_lock_init(&aac->fib_lock);
+ 
+ 	mutex_init(&aac->ioctl_mutex);
+diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
+index f987c40c47a13..443813feaef47 100644
+--- a/drivers/scsi/cxlflash/main.c
++++ b/drivers/scsi/cxlflash/main.c
+@@ -3749,6 +3749,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
+ 	cfg->afu_cookie = cfg->ops->create_afu(pdev);
+ 	if (unlikely(!cfg->afu_cookie)) {
+ 		dev_err(dev, "%s: create_afu failed\n", __func__);
++		rc = -ENOMEM;
+ 		goto out_remove;
+ 	}
+ 
+diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
+index 73ffc16ec0225..b521fc7650cb9 100644
+--- a/drivers/scsi/fnic/fnic_scsi.c
++++ b/drivers/scsi/fnic/fnic_scsi.c
+@@ -1034,7 +1034,8 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
+ 		atomic64_inc(&fnic_stats->io_stats.io_completions);
+ 
+ 
+-	io_duration_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
++	io_duration_time = jiffies_to_msecs(jiffies) -
++						jiffies_to_msecs(start_time);
+ 
+ 	if(io_duration_time <= 10)
+ 		atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index f570b8c5d857c..11de2198bb87d 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -507,6 +507,12 @@ static ssize_t host_store_rescan(struct device *dev,
+ 	return count;
+ }
+ 
++static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device)
++{
++	device->offload_enabled = 0;
++	device->offload_to_be_enabled = 0;
++}
++
+ static ssize_t host_show_firmware_revision(struct device *dev,
+ 	     struct device_attribute *attr, char *buf)
+ {
+@@ -1743,8 +1749,7 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
+ 				__func__,
+ 				h->scsi_host->host_no, logical_drive->bus,
+ 				logical_drive->target, logical_drive->lun);
+-			logical_drive->offload_enabled = 0;
+-			logical_drive->offload_to_be_enabled = 0;
++			hpsa_turn_off_ioaccel_for_device(logical_drive);
+ 			logical_drive->queue_depth = 8;
+ 		}
+ 	}
+@@ -2496,8 +2501,7 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
+ 			IOACCEL2_SERV_RESPONSE_FAILURE) {
+ 		if (c2->error_data.status ==
+ 			IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
+-			dev->offload_enabled = 0;
+-			dev->offload_to_be_enabled = 0;
++			hpsa_turn_off_ioaccel_for_device(dev);
+ 		}
+ 
+ 		return hpsa_retry_cmd(h, c);
+@@ -3676,10 +3680,17 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h,
+ 	this_device->offload_config =
+ 		!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
+ 	if (this_device->offload_config) {
+-		this_device->offload_to_be_enabled =
++		bool offload_enabled =
+ 			!!(ioaccel_status & OFFLOAD_ENABLED_BIT);
+-		if (hpsa_get_raid_map(h, scsi3addr, this_device))
+-			this_device->offload_to_be_enabled = 0;
++		/*
++		 * Check to see if offload can be enabled.
++		 */
++		if (offload_enabled) {
++			rc = hpsa_get_raid_map(h, scsi3addr, this_device);
++			if (rc) /* could not load raid_map */
++				goto out;
++			this_device->offload_to_be_enabled = 1;
++		}
+ 	}
+ 
+ out:
+@@ -3998,8 +4009,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
+ 	} else {
+ 		this_device->raid_level = RAID_UNKNOWN;
+ 		this_device->offload_config = 0;
+-		this_device->offload_enabled = 0;
+-		this_device->offload_to_be_enabled = 0;
++		hpsa_turn_off_ioaccel_for_device(this_device);
+ 		this_device->hba_ioaccel_enabled = 0;
+ 		this_device->volume_offline = 0;
+ 		this_device->queue_depth = h->nr_cmds;
+@@ -5213,8 +5223,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
+ 		/* Handles load balance across RAID 1 members.
+ 		 * (2-drive R1 and R10 with even # of drives.)
+ 		 * Appropriate for SSDs, not optimal for HDDs
++		 * Ensure we have the correct raid_map.
+ 		 */
+-		BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
++		if (le16_to_cpu(map->layout_map_count) != 2) {
++			hpsa_turn_off_ioaccel_for_device(dev);
++			return IO_ACCEL_INELIGIBLE;
++		}
+ 		if (dev->offload_to_mirror)
+ 			map_index += le16_to_cpu(map->data_disks_per_row);
+ 		dev->offload_to_mirror = !dev->offload_to_mirror;
+@@ -5222,8 +5236,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
+ 	case HPSA_RAID_ADM:
+ 		/* Handles N-way mirrors  (R1-ADM)
+ 		 * and R10 with # of drives divisible by 3.)
++		 * Ensure we have the correct raid_map.
+ 		 */
+-		BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
++		if (le16_to_cpu(map->layout_map_count) != 3) {
++			hpsa_turn_off_ioaccel_for_device(dev);
++			return IO_ACCEL_INELIGIBLE;
++		}
+ 
+ 		offload_to_mirror = dev->offload_to_mirror;
+ 		raid_map_helper(map, offload_to_mirror,
+@@ -5248,7 +5266,10 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
+ 		r5or6_blocks_per_row =
+ 			le16_to_cpu(map->strip_size) *
+ 			le16_to_cpu(map->data_disks_per_row);
+-		BUG_ON(r5or6_blocks_per_row == 0);
++		if (r5or6_blocks_per_row == 0) {
++			hpsa_turn_off_ioaccel_for_device(dev);
++			return IO_ACCEL_INELIGIBLE;
++		}
+ 		stripesize = r5or6_blocks_per_row *
+ 			le16_to_cpu(map->layout_map_count);
+ #if BITS_PER_LONG == 32
+@@ -8218,7 +8239,7 @@ static int detect_controller_lockup(struct ctlr_info *h)
+  *
+  * Called from monitor controller worker (hpsa_event_monitor_worker)
+  *
+- * A Volume (or Volumes that comprise an Array set may be undergoing a
++ * A Volume (or Volumes that comprise an Array set) may be undergoing a
+  * transformation, so we will be turning off ioaccel for all volumes that
+  * make up the Array.
+  */
+@@ -8241,6 +8262,9 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h)
+ 	 * Run through current device list used during I/O requests.
+ 	 */
+ 	for (i = 0; i < h->ndevices; i++) {
++		int offload_to_be_enabled = 0;
++		int offload_config = 0;
++
+ 		device = h->dev[i];
+ 
+ 		if (!device)
+@@ -8258,25 +8282,35 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h)
+ 			continue;
+ 
+ 		ioaccel_status = buf[IOACCEL_STATUS_BYTE];
+-		device->offload_config =
++
++		/*
++		 * Check if offload is still configured on
++		 */
++		offload_config =
+ 				!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
+-		if (device->offload_config)
+-			device->offload_to_be_enabled =
++		/*
++		 * If offload is configured on, check to see if ioaccel
++		 * needs to be enabled.
++		 */
++		if (offload_config)
++			offload_to_be_enabled =
+ 				!!(ioaccel_status & OFFLOAD_ENABLED_BIT);
+ 
++		/*
++		 * If ioaccel is to be re-enabled, re-enable later during the
++		 * scan operation so the driver can get a fresh raidmap
++		 * before turning ioaccel back on.
++		 */
++		if (offload_to_be_enabled)
++			continue;
++
+ 		/*
+ 		 * Immediately turn off ioaccel for any volume the
+ 		 * controller tells us to. Some of the reasons could be:
+ 		 *    transformation - change to the LVs of an Array.
+ 		 *    degraded volume - component failure
+-		 *
+-		 * If ioaccel is to be re-enabled, re-enable later during the
+-		 * scan operation so the driver can get a fresh raidmap
+-		 * before turning ioaccel back on.
+-		 *
+ 		 */
+-		if (!device->offload_to_be_enabled)
+-			device->offload_enabled = 0;
++		hpsa_turn_off_ioaccel_for_device(device);
+ 	}
+ 
+ 	kfree(buf);
+diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
+index 90a748551ede5..2b3239765c249 100644
+--- a/drivers/scsi/libfc/fc_rport.c
++++ b/drivers/scsi/libfc/fc_rport.c
+@@ -145,8 +145,10 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
+ 	lockdep_assert_held(&lport->disc.disc_mutex);
+ 
+ 	rdata = fc_rport_lookup(lport, port_id);
+-	if (rdata)
++	if (rdata) {
++		kref_put(&rdata->kref, fc_rport_destroy);
+ 		return rdata;
++	}
+ 
+ 	if (lport->rport_priv_size > 0)
+ 		rport_priv_size = lport->rport_priv_size;
+@@ -493,10 +495,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
+ 
+ 	fc_rport_state_enter(rdata, RPORT_ST_DELETE);
+ 
+-	kref_get(&rdata->kref);
+-	if (rdata->event == RPORT_EV_NONE &&
+-	    !queue_work(rport_event_queue, &rdata->event_work))
+-		kref_put(&rdata->kref, fc_rport_destroy);
++	if (rdata->event == RPORT_EV_NONE) {
++		kref_get(&rdata->kref);
++		if (!queue_work(rport_event_queue, &rdata->event_work))
++			kref_put(&rdata->kref, fc_rport_destroy);
++	}
+ 
+ 	rdata->event = event;
+ }
+diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
+index fe084d47ed9e5..3447d19d4147a 100644
+--- a/drivers/scsi/lpfc/lpfc_attr.c
++++ b/drivers/scsi/lpfc/lpfc_attr.c
+@@ -332,7 +332,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
+ 	if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
+ 		goto buffer_done;
+ 
+-	rcu_read_lock();
+ 	scnprintf(tmp, sizeof(tmp),
+ 		  "XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n",
+ 		  phba->brd_no,
+@@ -341,7 +340,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
+ 		  phba->sli4_hba.scsi_xri_max,
+ 		  lpfc_sli4_get_els_iocb_cnt(phba));
+ 	if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+-		goto rcu_unlock_buf_done;
++		goto buffer_done;
+ 
+ 	/* Port state is only one of two values for now. */
+ 	if (localport->port_id)
+@@ -357,7 +356,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
+ 		  wwn_to_u64(vport->fc_nodename.u.wwn),
+ 		  localport->port_id, statep);
+ 	if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+-		goto rcu_unlock_buf_done;
++		goto buffer_done;
++
++	spin_lock_irq(shost->host_lock);
+ 
+ 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ 		nrport = NULL;
+@@ -384,39 +385,39 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
+ 
+ 		/* Tab in to show lport ownership. */
+ 		if (strlcat(buf, "NVME RPORT       ", PAGE_SIZE) >= PAGE_SIZE)
+-			goto rcu_unlock_buf_done;
++			goto unlock_buf_done;
+ 		if (phba->brd_no >= 10) {
+ 			if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
+-				goto rcu_unlock_buf_done;
++				goto unlock_buf_done;
+ 		}
+ 
+ 		scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
+ 			  nrport->port_name);
+ 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+-			goto rcu_unlock_buf_done;
++			goto unlock_buf_done;
+ 
+ 		scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
+ 			  nrport->node_name);
+ 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+-			goto rcu_unlock_buf_done;
++			goto unlock_buf_done;
+ 
+ 		scnprintf(tmp, sizeof(tmp), "DID x%06x ",
+ 			  nrport->port_id);
+ 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+-			goto rcu_unlock_buf_done;
++			goto unlock_buf_done;
+ 
+ 		/* An NVME rport can have multiple roles. */
+ 		if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
+ 			if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
+-				goto rcu_unlock_buf_done;
++				goto unlock_buf_done;
+ 		}
+ 		if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
+ 			if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
+-				goto rcu_unlock_buf_done;
++				goto unlock_buf_done;
+ 		}
+ 		if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
+ 			if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
+-				goto rcu_unlock_buf_done;
++				goto unlock_buf_done;
+ 		}
+ 		if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
+ 					  FC_PORT_ROLE_NVME_TARGET |
+@@ -424,14 +425,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
+ 			scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
+ 				  nrport->port_role);
+ 			if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+-				goto rcu_unlock_buf_done;
++				goto unlock_buf_done;
+ 		}
+ 
+ 		scnprintf(tmp, sizeof(tmp), "%s\n", statep);
+ 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+-			goto rcu_unlock_buf_done;
++			goto unlock_buf_done;
+ 	}
+-	rcu_read_unlock();
++	spin_unlock_irq(shost->host_lock);
+ 
+ 	if (!lport)
+ 		goto buffer_done;
+@@ -491,11 +492,11 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
+ 		  atomic_read(&lport->cmpl_fcp_err));
+ 	strlcat(buf, tmp, PAGE_SIZE);
+ 
+-	/* RCU is already unlocked. */
++	/* host_lock is already unlocked. */
+ 	goto buffer_done;
+ 
+- rcu_unlock_buf_done:
+-	rcu_read_unlock();
++ unlock_buf_done:
++	spin_unlock_irq(shost->host_lock);
+ 
+  buffer_done:
+ 	len = strnlen(buf, PAGE_SIZE);
+diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
+index 384f5cd7c3c81..99b4ff78f9dce 100644
+--- a/drivers/scsi/lpfc/lpfc_ct.c
++++ b/drivers/scsi/lpfc/lpfc_ct.c
+@@ -1737,8 +1737,8 @@ lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0, sizeof(struct lpfc_name));
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
+ 	       sizeof(struct lpfc_name));
+@@ -1754,8 +1754,8 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t len, size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0, 256);
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	/* This string MUST be consistent with other FC platforms
+ 	 * supported by Broadcom.
+@@ -1779,8 +1779,8 @@ lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t len, size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0, 256);
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	strncpy(ae->un.AttrString, phba->SerialNumber,
+ 		sizeof(ae->un.AttrString));
+@@ -1801,8 +1801,8 @@ lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t len, size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0, 256);
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	strncpy(ae->un.AttrString, phba->ModelName,
+ 		sizeof(ae->un.AttrString));
+@@ -1822,8 +1822,8 @@ lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t len, size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0, 256);
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	strncpy(ae->un.AttrString, phba->ModelDesc,
+ 		sizeof(ae->un.AttrString));
+@@ -1845,8 +1845,8 @@ lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t i, j, incr, size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0, 256);
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	/* Convert JEDEC ID to ascii for hardware version */
+ 	incr = vp->rev.biuRev;
+@@ -1875,8 +1875,8 @@ lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t len, size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0, 256);
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	strncpy(ae->un.AttrString, lpfc_release_version,
+ 		sizeof(ae->un.AttrString));
+@@ -1897,8 +1897,8 @@ lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t len, size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0, 256);
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	if (phba->sli_rev == LPFC_SLI_REV4)
+ 		lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
+@@ -1922,8 +1922,8 @@ lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t len, size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0, 256);
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
+ 	len = strnlen(ae->un.AttrString,
+@@ -1942,8 +1942,8 @@ lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t len, size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0, 256);
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s %s %s",
+ 		 init_utsname()->sysname,
+@@ -1965,7 +1965,7 @@ lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++	ae = &ad->AttrValue;
+ 
+ 	ae->un.AttrInt =  cpu_to_be32(LPFC_MAX_CT_SIZE);
+ 	size = FOURBYTES + sizeof(uint32_t);
+@@ -1981,8 +1981,8 @@ lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t len, size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0, 256);
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	len = lpfc_vport_symbolic_node_name(vport,
+ 				ae->un.AttrString, 256);
+@@ -2000,7 +2000,7 @@ lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++	ae = &ad->AttrValue;
+ 
+ 	/* Nothing is defined for this currently */
+ 	ae->un.AttrInt =  cpu_to_be32(0);
+@@ -2017,7 +2017,7 @@ lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++	ae = &ad->AttrValue;
+ 
+ 	/* Each driver instance corresponds to a single port */
+ 	ae->un.AttrInt =  cpu_to_be32(1);
+@@ -2034,8 +2034,8 @@ lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0, sizeof(struct lpfc_name));
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	memcpy(&ae->un.AttrWWN, &vport->fabric_nodename,
+ 	       sizeof(struct lpfc_name));
+@@ -2053,8 +2053,8 @@ lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t len, size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0, 256);
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
+ 	len = strnlen(ae->un.AttrString,
+@@ -2073,7 +2073,7 @@ lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++	ae = &ad->AttrValue;
+ 
+ 	/* Driver doesn't have access to this information */
+ 	ae->un.AttrInt =  cpu_to_be32(0);
+@@ -2090,8 +2090,8 @@ lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t len, size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0, 256);
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	strncpy(ae->un.AttrString, "EMULEX",
+ 		sizeof(ae->un.AttrString));
+@@ -2112,8 +2112,8 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0, 32);
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
+ 	ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
+@@ -2134,7 +2134,7 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++	ae = &ad->AttrValue;
+ 
+ 	ae->un.AttrInt = 0;
+ 	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+@@ -2186,7 +2186,7 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++	ae = &ad->AttrValue;
+ 
+ 	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+ 		switch (phba->fc_linkspeed) {
+@@ -2253,7 +2253,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++	ae = &ad->AttrValue;
+ 
+ 	hsp = (struct serv_parm *)&vport->fc_sparam;
+ 	ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb) << 8) |
+@@ -2273,8 +2273,8 @@ lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t len, size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0, 256);
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	snprintf(ae->un.AttrString, sizeof(ae->un.AttrString),
+ 		 "/sys/class/scsi_host/host%d", shost->host_no);
+@@ -2294,8 +2294,8 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t len, size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0, 256);
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
+ 		 init_utsname()->nodename);
+@@ -2315,8 +2315,8 @@ lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0,  sizeof(struct lpfc_name));
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
+ 	       sizeof(struct lpfc_name));
+@@ -2333,8 +2333,8 @@ lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0,  sizeof(struct lpfc_name));
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	memcpy(&ae->un.AttrWWN, &vport->fc_sparam.portName,
+ 	       sizeof(struct lpfc_name));
+@@ -2351,8 +2351,8 @@ lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t len, size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0, 256);
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	len = lpfc_vport_symbolic_port_name(vport, ae->un.AttrString, 256);
+ 	len += (len & 3) ? (4 - (len & 3)) : 4;
+@@ -2370,7 +2370,7 @@ lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++	ae = &ad->AttrValue;
+ 	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP)
+ 		ae->un.AttrInt =  cpu_to_be32(LPFC_FDMI_PORTTYPE_NLPORT);
+ 	else
+@@ -2388,7 +2388,7 @@ lpfc_fdmi_port_attr_class(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++	ae = &ad->AttrValue;
+ 	ae->un.AttrInt = cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3);
+ 	size = FOURBYTES + sizeof(uint32_t);
+ 	ad->AttrLen = cpu_to_be16(size);
+@@ -2403,8 +2403,8 @@ lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0,  sizeof(struct lpfc_name));
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	memcpy(&ae->un.AttrWWN, &vport->fabric_portname,
+ 	       sizeof(struct lpfc_name));
+@@ -2421,8 +2421,8 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0, 32);
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
+ 	ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
+@@ -2442,7 +2442,7 @@ lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++	ae = &ad->AttrValue;
+ 	/* Link Up - operational */
+ 	ae->un.AttrInt =  cpu_to_be32(LPFC_FDMI_PORTSTATE_ONLINE);
+ 	size = FOURBYTES + sizeof(uint32_t);
+@@ -2458,7 +2458,7 @@ lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++	ae = &ad->AttrValue;
+ 	vport->fdmi_num_disc = lpfc_find_map_node(vport);
+ 	ae->un.AttrInt = cpu_to_be32(vport->fdmi_num_disc);
+ 	size = FOURBYTES + sizeof(uint32_t);
+@@ -2474,7 +2474,7 @@ lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++	ae = &ad->AttrValue;
+ 	ae->un.AttrInt =  cpu_to_be32(vport->fc_myDID);
+ 	size = FOURBYTES + sizeof(uint32_t);
+ 	ad->AttrLen = cpu_to_be16(size);
+@@ -2489,8 +2489,8 @@ lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t len, size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0, 256);
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	strncpy(ae->un.AttrString, "Smart SAN Initiator",
+ 		sizeof(ae->un.AttrString));
+@@ -2510,8 +2510,8 @@ lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0, 256);
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	memcpy(&ae->un.AttrString, &vport->fc_sparam.nodeName,
+ 	       sizeof(struct lpfc_name));
+@@ -2531,8 +2531,8 @@ lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t len, size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0, 256);
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	strncpy(ae->un.AttrString, "Smart SAN Version 2.0",
+ 		sizeof(ae->un.AttrString));
+@@ -2553,8 +2553,8 @@ lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t len, size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+-	memset(ae, 0, 256);
++	ae = &ad->AttrValue;
++	memset(ae, 0, sizeof(*ae));
+ 
+ 	strncpy(ae->un.AttrString, phba->ModelName,
+ 		sizeof(ae->un.AttrString));
+@@ -2573,7 +2573,7 @@ lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++	ae = &ad->AttrValue;
+ 
+ 	/* SRIOV (type 3) is not supported */
+ 	if (vport->vpi)
+@@ -2593,7 +2593,7 @@ lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++	ae = &ad->AttrValue;
+ 	ae->un.AttrInt =  cpu_to_be32(0);
+ 	size = FOURBYTES + sizeof(uint32_t);
+ 	ad->AttrLen = cpu_to_be16(size);
+@@ -2608,7 +2608,7 @@ lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport,
+ 	struct lpfc_fdmi_attr_entry *ae;
+ 	uint32_t size;
+ 
+-	ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++	ae = &ad->AttrValue;
+ 	ae->un.AttrInt =  cpu_to_be32(1);
+ 	size = FOURBYTES + sizeof(uint32_t);
+ 	ad->AttrLen = cpu_to_be16(size);
+@@ -2756,7 +2756,8 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 			/* Registered Port List */
+ 			/* One entry (port) per adapter */
+ 			rh->rpl.EntryCnt = cpu_to_be32(1);
+-			memcpy(&rh->rpl.pe, &phba->pport->fc_sparam.portName,
++			memcpy(&rh->rpl.pe.PortName,
++			       &phba->pport->fc_sparam.portName,
+ 			       sizeof(struct lpfc_name));
+ 
+ 			/* point to the HBA attribute block */
+diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
+index 009aa0eee0408..48d4d576d588e 100644
+--- a/drivers/scsi/lpfc/lpfc_hw.h
++++ b/drivers/scsi/lpfc/lpfc_hw.h
+@@ -1333,25 +1333,8 @@ struct fc_rdp_res_frame {
+ /* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */
+ #define  SLI_CT_FDMI_Subtypes     0x10	/* Management Service Subtype */
+ 
+-/*
+- * Registered Port List Format
+- */
+-struct lpfc_fdmi_reg_port_list {
+-	uint32_t EntryCnt;
+-	uint32_t pe;		/* Variable-length array */
+-};
+-
+-
+ /* Definitions for HBA / Port attribute entries */
+ 
+-struct lpfc_fdmi_attr_def { /* Defined in TLV format */
+-	/* Structure is in Big Endian format */
+-	uint32_t AttrType:16;
+-	uint32_t AttrLen:16;
+-	uint32_t AttrValue;  /* Marks start of Value (ATTRIBUTE_ENTRY) */
+-};
+-
+-
+ /* Attribute Entry */
+ struct lpfc_fdmi_attr_entry {
+ 	union {
+@@ -1362,7 +1345,13 @@ struct lpfc_fdmi_attr_entry {
+ 	} un;
+ };
+ 
+-#define LPFC_FDMI_MAX_AE_SIZE	sizeof(struct lpfc_fdmi_attr_entry)
++struct lpfc_fdmi_attr_def { /* Defined in TLV format */
++	/* Structure is in Big Endian format */
++	uint32_t AttrType:16;
++	uint32_t AttrLen:16;
++	/* Marks start of Value (ATTRIBUTE_ENTRY) */
++	struct lpfc_fdmi_attr_entry AttrValue;
++} __packed;
+ 
+ /*
+  * HBA Attribute Block
+@@ -1386,13 +1375,20 @@ struct lpfc_fdmi_hba_ident {
+ 	struct lpfc_name PortName;
+ };
+ 
++/*
++ * Registered Port List Format
++ */
++struct lpfc_fdmi_reg_port_list {
++	uint32_t EntryCnt;
++	struct lpfc_fdmi_port_entry pe;
++} __packed;
++
+ /*
+  * Register HBA(RHBA)
+  */
+ struct lpfc_fdmi_reg_hba {
+ 	struct lpfc_fdmi_hba_ident hi;
+-	struct lpfc_fdmi_reg_port_list rpl;	/* variable-length array */
+-/* struct lpfc_fdmi_attr_block   ab; */
++	struct lpfc_fdmi_reg_port_list rpl;
+ };
+ 
+ /*
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index a56a939792ac1..2ab351260e815 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -17413,6 +17413,10 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
+ 			list_add_tail(&iocbq->list, &first_iocbq->list);
+ 		}
+ 	}
++	/* Free the sequence's header buffer */
++	if (!first_iocbq)
++		lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
++
+ 	return first_iocbq;
+ }
+ 
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index ba79b37d8cf7e..5becdde3ea324 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -1184,8 +1184,8 @@ int pm8001_abort_task(struct sas_task *task)
+ 	pm8001_ha = pm8001_find_ha_by_dev(dev);
+ 	device_id = pm8001_dev->device_id;
+ 	phy_id = pm8001_dev->attached_phy;
+-	rc = pm8001_find_tag(task, &tag);
+-	if (rc == 0) {
++	ret = pm8001_find_tag(task, &tag);
++	if (ret == 0) {
+ 		pm8001_printk("no tag for task:%p\n", task);
+ 		return TMF_RESP_FUNC_FAILED;
+ 	}
+@@ -1223,26 +1223,50 @@ int pm8001_abort_task(struct sas_task *task)
+ 
+ 			/* 2. Send Phy Control Hard Reset */
+ 			reinit_completion(&completion);
++			phy->port_reset_status = PORT_RESET_TMO;
+ 			phy->reset_success = false;
+ 			phy->enable_completion = &completion;
+ 			phy->reset_completion = &completion_reset;
+ 			ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
+ 				PHY_HARD_RESET);
+-			if (ret)
+-				goto out;
+-			PM8001_MSG_DBG(pm8001_ha,
+-				pm8001_printk("Waiting for local phy ctl\n"));
+-			wait_for_completion(&completion);
+-			if (!phy->reset_success)
++			if (ret) {
++				phy->enable_completion = NULL;
++				phy->reset_completion = NULL;
+ 				goto out;
++			}
+ 
+-			/* 3. Wait for Port Reset complete / Port reset TMO */
++			/* In the case of the reset timeout/fail we still
++			 * abort the command at the firmware. The assumption
++			 * here is that the drive is off doing something so
++			 * that it's not processing requests, and we want to
++			 * avoid getting a completion for this and either
++			 * leaking the task in libsas or losing the race and
++			 * getting a double free.
++			 */
+ 			PM8001_MSG_DBG(pm8001_ha,
++				pm8001_printk("Waiting for local phy ctl\n"));
++			ret = wait_for_completion_timeout(&completion,
++					PM8001_TASK_TIMEOUT * HZ);
++			if (!ret || !phy->reset_success) {
++				phy->enable_completion = NULL;
++				phy->reset_completion = NULL;
++			} else {
++				/* 3. Wait for Port Reset complete or
++				 * Port reset TMO
++				 */
++				PM8001_MSG_DBG(pm8001_ha,
+ 				pm8001_printk("Waiting for Port reset\n"));
+-			wait_for_completion(&completion_reset);
+-			if (phy->port_reset_status) {
+-				pm8001_dev_gone_notify(dev);
+-				goto out;
++				ret = wait_for_completion_timeout(
++					&completion_reset,
++					PM8001_TASK_TIMEOUT * HZ);
++				if (!ret)
++					phy->reset_completion = NULL;
++				WARN_ON(phy->port_reset_status ==
++						PORT_RESET_TMO);
++				if (phy->port_reset_status == PORT_RESET_TMO) {
++					pm8001_dev_gone_notify(dev);
++					goto out;
++				}
+ 			}
+ 
+ 			/*
+diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
+index 751941a3ed303..aa451c8b49e56 100644
+--- a/drivers/scsi/qedi/qedi_iscsi.c
++++ b/drivers/scsi/qedi/qedi_iscsi.c
+@@ -1065,6 +1065,9 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
+ 		break;
+ 	}
+ 
++	if (!abrt_conn)
++		wait_delay += qedi->pf_params.iscsi_pf_params.two_msl_timer;
++
+ 	qedi_ep->state = EP_STATE_DISCONN_START;
+ 	ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
+ 	if (ret) {
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index eb10a5cacd90c..b2cbdd01ab10b 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -353,27 +353,27 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
+ 	u8 opcode = 0;
+ 	u32 intr, doorbell;
+ 	struct ufshcd_lrb *lrbp = &hba->lrb[tag];
++	struct scsi_cmnd *cmd = lrbp->cmd;
+ 	int transfer_len = -1;
+ 
+ 	if (!trace_ufshcd_command_enabled()) {
+ 		/* trace UPIU W/O tracing command */
+-		if (lrbp->cmd)
++		if (cmd)
+ 			ufshcd_add_cmd_upiu_trace(hba, tag, str);
+ 		return;
+ 	}
+ 
+-	if (lrbp->cmd) { /* data phase exists */
++	if (cmd) { /* data phase exists */
+ 		/* trace UPIU also */
+ 		ufshcd_add_cmd_upiu_trace(hba, tag, str);
+-		opcode = (u8)(*lrbp->cmd->cmnd);
++		opcode = cmd->cmnd[0];
+ 		if ((opcode == READ_10) || (opcode == WRITE_10)) {
+ 			/*
+ 			 * Currently we only fully trace read(10) and write(10)
+ 			 * commands
+ 			 */
+-			if (lrbp->cmd->request && lrbp->cmd->request->bio)
+-				lba =
+-				  lrbp->cmd->request->bio->bi_iter.bi_sector;
++			if (cmd->request && cmd->request->bio)
++				lba = cmd->request->bio->bi_iter.bi_sector;
+ 			transfer_len = be32_to_cpu(
+ 				lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
+ 		}
+@@ -1910,12 +1910,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
+ {
+ 	hba->lrb[task_tag].issue_time_stamp = ktime_get();
+ 	hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
++	ufshcd_add_command_trace(hba, task_tag, "send");
+ 	ufshcd_clk_scaling_start_busy(hba);
+ 	__set_bit(task_tag, &hba->outstanding_reqs);
+ 	ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ 	/* Make sure that doorbell is committed immediately */
+ 	wmb();
+-	ufshcd_add_command_trace(hba, task_tag, "send");
+ }
+ 
+ /**
+diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
+index 256039ce561e6..81a3370551dbc 100644
+--- a/drivers/staging/media/imx/imx-media-capture.c
++++ b/drivers/staging/media/imx/imx-media-capture.c
+@@ -678,7 +678,7 @@ int imx_media_capture_device_register(struct imx_media_video_dev *vdev)
+ 	/* setup default format */
+ 	fmt_src.pad = priv->src_sd_pad;
+ 	fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+-	v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src);
++	ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src);
+ 	if (ret) {
+ 		v4l2_err(sd, "failed to get src_sd format\n");
+ 		goto unreg;
+diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
+index 17b4b9257b495..0ddf41b5a734a 100644
+--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
++++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
+@@ -1535,21 +1535,14 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
+ 
+ 		/* Allocate new skb for releasing to upper layer */
+ 		sub_skb = dev_alloc_skb(nSubframe_Length + 12);
+-		if (sub_skb) {
+-			skb_reserve(sub_skb, 12);
+-			skb_put_data(sub_skb, pdata, nSubframe_Length);
+-		} else {
+-			sub_skb = skb_clone(prframe->pkt, GFP_ATOMIC);
+-			if (sub_skb) {
+-				sub_skb->data = pdata;
+-				sub_skb->len = nSubframe_Length;
+-				skb_set_tail_pointer(sub_skb, nSubframe_Length);
+-			} else {
+-				DBG_88E("skb_clone() Fail!!! , nr_subframes=%d\n", nr_subframes);
+-				break;
+-			}
++		if (!sub_skb) {
++			DBG_88E("dev_alloc_skb() Fail!!! , nr_subframes=%d\n", nr_subframes);
++			break;
+ 		}
+ 
++		skb_reserve(sub_skb, 12);
++		skb_put_data(sub_skb, pdata, nSubframe_Length);
++
+ 		subframes[nr_subframes++] = sub_skb;
+ 
+ 		if (nr_subframes >= MAX_SUBFRAME_COUNT) {
+diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
+index 4dc30e7890f6c..140386d7c75a3 100644
+--- a/drivers/thermal/rcar_thermal.c
++++ b/drivers/thermal/rcar_thermal.c
+@@ -505,8 +505,10 @@ static int rcar_thermal_probe(struct platform_device *pdev)
+ 			res = platform_get_resource(pdev, IORESOURCE_MEM,
+ 						    mres++);
+ 			common->base = devm_ioremap_resource(dev, res);
+-			if (IS_ERR(common->base))
+-				return PTR_ERR(common->base);
++			if (IS_ERR(common->base)) {
++				ret = PTR_ERR(common->base);
++				goto error_unregister;
++			}
+ 
+ 			idle = 0; /* polling delay is not needed */
+ 		}
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
+index a019286f8bb65..cbd006fb7fbb9 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -781,7 +781,10 @@ static void __dma_rx_do_complete(struct uart_8250_port *p)
+ 	dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
+ 
+ 	count = dma->rx_size - state.residue;
+-
++	if (count < dma->rx_size)
++		dmaengine_terminate_async(dma->rxchan);
++	if (!count)
++		goto unlock;
+ 	ret = tty_insert_flip_string(tty_port, dma->rx_buf, count);
+ 
+ 	p->port.icount.rx += ret;
+@@ -843,7 +846,6 @@ static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
+ 	spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
+ 
+ 	__dma_rx_do_complete(p);
+-	dmaengine_terminate_all(dma->rxchan);
+ }
+ 
+ static int omap_8250_rx_dma(struct uart_8250_port *p)
+@@ -1227,11 +1229,11 @@ static int omap8250_probe(struct platform_device *pdev)
+ 	spin_lock_init(&priv->rx_dma_lock);
+ 
+ 	device_init_wakeup(&pdev->dev, true);
++	pm_runtime_enable(&pdev->dev);
+ 	pm_runtime_use_autosuspend(&pdev->dev);
+ 	pm_runtime_set_autosuspend_delay(&pdev->dev, -1);
+ 
+ 	pm_runtime_irq_safe(&pdev->dev);
+-	pm_runtime_enable(&pdev->dev);
+ 
+ 	pm_runtime_get_sync(&pdev->dev);
+ 
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 09f0dc3b967b1..60ca19eca1f63 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -1861,6 +1861,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
+ 	unsigned char status;
+ 	unsigned long flags;
+ 	struct uart_8250_port *up = up_to_u8250p(port);
++	bool skip_rx = false;
+ 
+ 	if (iir & UART_IIR_NO_INT)
+ 		return 0;
+@@ -1869,7 +1870,20 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
+ 
+ 	status = serial_port_in(port, UART_LSR);
+ 
+-	if (status & (UART_LSR_DR | UART_LSR_BI)) {
++	/*
++	 * If port is stopped and there are no error conditions in the
++	 * FIFO, then don't drain the FIFO, as this may lead to TTY buffer
++	 * overflow. Not servicing, RX FIFO would trigger auto HW flow
++	 * control when FIFO occupancy reaches preset threshold, thus
++	 * halting RX. This only works when auto HW flow control is
++	 * available.
++	 */
++	if (!(status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS)) &&
++	    (port->status & (UPSTAT_AUTOCTS | UPSTAT_AUTORTS)) &&
++	    !(port->read_status_mask & UART_LSR_DR))
++		skip_rx = true;
++
++	if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) {
+ 		if (!up->dma || handle_rx_dma(up, iir))
+ 			status = serial8250_rx_chars(up, status);
+ 	}
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
+index fcb89bf2524d1..1528a7ba2bf4d 100644
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -1187,14 +1187,14 @@ static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport,
+ 	struct s3c24xx_uart_info *info = ourport->info;
+ 	struct clk *clk;
+ 	unsigned long rate;
+-	unsigned int cnt, baud, quot, clk_sel, best_quot = 0;
++	unsigned int cnt, baud, quot, best_quot = 0;
+ 	char clkname[MAX_CLK_NAME_LENGTH];
+ 	int calc_deviation, deviation = (1 << 30) - 1;
+ 
+-	clk_sel = (ourport->cfg->clk_sel) ? ourport->cfg->clk_sel :
+-			ourport->info->def_clk_sel;
+ 	for (cnt = 0; cnt < info->num_clks; cnt++) {
+-		if (!(clk_sel & (1 << cnt)))
++		/* Keep selected clock if provided */
++		if (ourport->cfg->clk_sel &&
++			!(ourport->cfg->clk_sel & (1 << cnt)))
+ 			continue;
+ 
+ 		sprintf(clkname, "clk_uart_baud%d", cnt);
+diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
+index 31950a38f0fb7..23f9b0cdff086 100644
+--- a/drivers/tty/serial/xilinx_uartps.c
++++ b/drivers/tty/serial/xilinx_uartps.c
+@@ -1236,6 +1236,7 @@ static int cdns_uart_console_setup(struct console *co, char *options)
+ 	int bits = 8;
+ 	int parity = 'n';
+ 	int flow = 'n';
++	unsigned long time_out;
+ 
+ 	if (!port->membase) {
+ 		pr_debug("console on " CDNS_UART_TTY_NAME "%i not present\n",
+@@ -1246,6 +1247,13 @@ static int cdns_uart_console_setup(struct console *co, char *options)
+ 	if (options)
+ 		uart_parse_options(options, &baud, &parity, &bits, &flow);
+ 
++	/* Wait for tx_empty before setting up the console */
++	time_out = jiffies + usecs_to_jiffies(TX_TIMEOUT);
++
++	while (time_before(jiffies, time_out) &&
++	       cdns_uart_tx_empty(port) != TIOCSER_TEMT)
++		cpu_relax();
++
+ 	return uart_set_options(port, co, baud, parity, bits, flow);
+ }
+ 
+diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c
+index 58b454c34560a..10a832a2135e2 100644
+--- a/drivers/tty/vcc.c
++++ b/drivers/tty/vcc.c
+@@ -604,6 +604,7 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ 	port->index = vcc_table_add(port);
+ 	if (port->index == -1) {
+ 		pr_err("VCC: no more TTY indices left for allocation\n");
++		rv = -ENOMEM;
+ 		goto free_ldc;
+ 	}
+ 
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 7bf2573dd459e..37cc3fd7c3cad 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -270,7 +270,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
+ {
+ 	const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
+ 	struct dwc3		*dwc = dep->dwc;
+-	u32			timeout = 1000;
++	u32			timeout = 5000;
+ 	u32			saved_config = 0;
+ 	u32			reg;
+ 
+diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
+index de764459e05a6..9d93e7441bbca 100644
+--- a/drivers/usb/host/ehci-mv.c
++++ b/drivers/usb/host/ehci-mv.c
+@@ -192,12 +192,10 @@ static int mv_ehci_probe(struct platform_device *pdev)
+ 	hcd->rsrc_len = resource_size(r);
+ 	hcd->regs = ehci_mv->op_regs;
+ 
+-	hcd->irq = platform_get_irq(pdev, 0);
+-	if (!hcd->irq) {
+-		dev_err(&pdev->dev, "Cannot get irq.");
+-		retval = -ENODEV;
++	retval = platform_get_irq(pdev, 0);
++	if (retval < 0)
+ 		goto err_disable_clk;
+-	}
++	hcd->irq = retval;
+ 
+ 	ehci = hcd_to_ehci(hcd);
+ 	ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs;
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index 9f72a6ee13b53..58e7336b2748b 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -409,6 +409,19 @@ static void vfio_pci_release(void *device_data)
+ 	if (!(--vdev->refcnt)) {
+ 		vfio_spapr_pci_eeh_release(vdev->pdev);
+ 		vfio_pci_disable(vdev);
++		mutex_lock(&vdev->igate);
++		if (vdev->err_trigger) {
++			eventfd_ctx_put(vdev->err_trigger);
++			vdev->err_trigger = NULL;
++		}
++		mutex_unlock(&vdev->igate);
++
++		mutex_lock(&vdev->igate);
++		if (vdev->req_trigger) {
++			eventfd_ctx_put(vdev->req_trigger);
++			vdev->req_trigger = NULL;
++		}
++		mutex_unlock(&vdev->igate);
+ 	}
+ 
+ 	mutex_unlock(&driver_lock);
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 8ac8f7469354b..9f3faac490259 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -1793,6 +1793,16 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
+ 	struct gendisk *disk = bdev->bd_disk;
+ 	struct block_device *victim = NULL;
+ 
++	/*
++	 * Sync early if it looks like we're the last one.  If someone else
++	 * opens the block device between now and the decrement of bd_openers
++	 * then we did a sync that we didn't need to, but that's not the end
++	 * of the world and we want to avoid long (could be several minute)
++	 * syncs while holding the mutex.
++	 */
++	if (bdev->bd_openers == 1)
++		sync_blockdev(bdev);
++
+ 	mutex_lock_nested(&bdev->bd_mutex, for_part);
+ 	if (for_part)
+ 		bdev->bd_part_count--;
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 319a89d4d0735..ce5e0f6c6af4f 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -9098,8 +9098,6 @@ out:
+ 	 */
+ 	if (!for_reloc && !root_dropped)
+ 		btrfs_add_dead_root(root);
+-	if (err && err != -EAGAIN)
+-		btrfs_handle_fs_error(fs_info, err, NULL);
+ 	return err;
+ }
+ 
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index bdfe159a60da6..64d459ca76d06 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -8913,20 +8913,17 @@ again:
+ 	/*
+ 	 * Qgroup reserved space handler
+ 	 * Page here will be either
+-	 * 1) Already written to disk
+-	 *    In this case, its reserved space is released from data rsv map
+-	 *    and will be freed by delayed_ref handler finally.
+-	 *    So even we call qgroup_free_data(), it won't decrease reserved
+-	 *    space.
+-	 * 2) Not written to disk
+-	 *    This means the reserved space should be freed here. However,
+-	 *    if a truncate invalidates the page (by clearing PageDirty)
+-	 *    and the page is accounted for while allocating extent
+-	 *    in btrfs_check_data_free_space() we let delayed_ref to
+-	 *    free the entire extent.
++	 * 1) Already written to disk or ordered extent already submitted
++	 *    Then its QGROUP_RESERVED bit in io_tree is already cleaned.
++	 *    Qgroup will be handled by its qgroup_record then.
++	 *    btrfs_qgroup_free_data() call will do nothing here.
++	 *
++	 * 2) Not written to disk yet
++	 *    Then btrfs_qgroup_free_data() call will clear the QGROUP_RESERVED
++	 *    bit of its io_tree, and free the qgroup reserved data space.
++	 *    Since the IO will never happen for this page.
+ 	 */
+-	if (PageDirty(page))
+-		btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
++	btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
+ 	if (!inode_evicting) {
+ 		clear_extent_bit(tree, page_start, page_end,
+ 				 EXTENT_LOCKED | EXTENT_DIRTY |
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index a2d4eed27f804..c0dbf8b7762b4 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -2015,12 +2015,24 @@ ack:
+ 			if (mutex_trylock(&session->s_mutex) == 0) {
+ 				dout("inverting session/ino locks on %p\n",
+ 				     session);
++				session = ceph_get_mds_session(session);
+ 				spin_unlock(&ci->i_ceph_lock);
+ 				if (took_snap_rwsem) {
+ 					up_read(&mdsc->snap_rwsem);
+ 					took_snap_rwsem = 0;
+ 				}
+-				mutex_lock(&session->s_mutex);
++				if (session) {
++					mutex_lock(&session->s_mutex);
++					ceph_put_mds_session(session);
++				} else {
++					/*
++					 * Because we take the reference while
++					 * holding the i_ceph_lock, it should
++					 * never be NULL. Throw a warning if it
++					 * ever is.
++					 */
++					WARN_ON_ONCE(true);
++				}
+ 				goto retry;
+ 			}
+ 		}
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index 1e438e0faf77e..3c24fb77ef325 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -764,8 +764,11 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
+ 	info_caps = le32_to_cpu(info->cap.caps);
+ 
+ 	/* prealloc new cap struct */
+-	if (info_caps && ceph_snap(inode) == CEPH_NOSNAP)
++	if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) {
+ 		new_cap = ceph_get_cap(mdsc, caps_reservation);
++		if (!new_cap)
++			return -ENOMEM;
++	}
+ 
+ 	/*
+ 	 * prealloc xattr data, if it looks like we'll need it.  only
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 71c2dd0c7f038..2c632793c88c5 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -259,8 +259,9 @@ struct smb_version_operations {
+ 	int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
+ 	bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
+ 	int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *);
+-	void (*downgrade_oplock)(struct TCP_Server_Info *,
+-					struct cifsInodeInfo *, bool);
++	void (*downgrade_oplock)(struct TCP_Server_Info *server,
++				 struct cifsInodeInfo *cinode, __u32 oplock,
++				 unsigned int epoch, bool *purge_cache);
+ 	/* process transaction2 response */
+ 	bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *,
+ 			     char *, int);
+@@ -1160,6 +1161,8 @@ struct cifsFileInfo {
+ 	unsigned int f_flags;
+ 	bool invalidHandle:1;	/* file closed via session abend */
+ 	bool oplock_break_cancelled:1;
++	unsigned int oplock_epoch; /* epoch from the lease break */
++	__u32 oplock_level; /* oplock/lease level from the lease break */
+ 	int count;
+ 	spinlock_t file_info_lock; /* protects four flag/count fields above */
+ 	struct mutex fh_mutex; /* prevents reopen race after dead ses*/
+@@ -1300,7 +1303,7 @@ struct cifsInodeInfo {
+ 	unsigned int epoch;		/* used to track lease state changes */
+ #define CIFS_INODE_PENDING_OPLOCK_BREAK   (0) /* oplock break in progress */
+ #define CIFS_INODE_PENDING_WRITERS	  (1) /* Writes in progress */
+-#define CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2 (2) /* Downgrade oplock to L2 */
++#define CIFS_INODE_FLAG_UNUSED		  (2) /* Unused flag */
+ #define CIFS_INO_DELETE_PENDING		  (3) /* delete pending on server */
+ #define CIFS_INO_INVALID_MAPPING	  (4) /* pagecache is invalid */
+ #define CIFS_INO_LOCK			  (5) /* lock bit for synchronization */
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 128cbd69911b4..5cb15649adb07 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -3804,7 +3804,8 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
+ 			break;
+ 
+ 		__SetPageLocked(page);
+-		if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
++		rc = add_to_page_cache_locked(page, mapping, page->index, gfp);
++		if (rc) {
+ 			__ClearPageLocked(page);
+ 			break;
+ 		}
+@@ -3820,6 +3821,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
+ 	struct list_head *page_list, unsigned num_pages)
+ {
+ 	int rc;
++	int err = 0;
+ 	struct list_head tmplist;
+ 	struct cifsFileInfo *open_file = file->private_data;
+ 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
+@@ -3860,7 +3862,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
+ 	 * the order of declining indexes. When we put the pages in
+ 	 * the rdata->pages, then we want them in increasing order.
+ 	 */
+-	while (!list_empty(page_list)) {
++	while (!list_empty(page_list) && !err) {
+ 		unsigned int i, nr_pages, bytes, rsize;
+ 		loff_t offset;
+ 		struct page *page, *tpage;
+@@ -3883,9 +3885,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
+ 			return 0;
+ 		}
+ 
+-		rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
++		nr_pages = 0;
++		err = readpages_get_pages(mapping, page_list, rsize, &tmplist,
+ 					 &nr_pages, &offset, &bytes);
+-		if (rc) {
++		if (!nr_pages) {
+ 			add_credits_and_wake_if(server, credits, 0);
+ 			break;
+ 		}
+@@ -4185,12 +4188,13 @@ void cifs_oplock_break(struct work_struct *work)
+ 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+ 	struct TCP_Server_Info *server = tcon->ses->server;
+ 	int rc = 0;
++	bool purge_cache = false;
+ 
+ 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
+ 			TASK_UNINTERRUPTIBLE);
+ 
+-	server->ops->downgrade_oplock(server, cinode,
+-		test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
++	server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
++				      cfile->oplock_epoch, &purge_cache);
+ 
+ 	if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
+ 						cifs_has_mand_locks(cinode)) {
+@@ -4205,18 +4209,21 @@ void cifs_oplock_break(struct work_struct *work)
+ 		else
+ 			break_lease(inode, O_WRONLY);
+ 		rc = filemap_fdatawrite(inode->i_mapping);
+-		if (!CIFS_CACHE_READ(cinode)) {
++		if (!CIFS_CACHE_READ(cinode) || purge_cache) {
+ 			rc = filemap_fdatawait(inode->i_mapping);
+ 			mapping_set_error(inode->i_mapping, rc);
+ 			cifs_zap_mapping(inode);
+ 		}
+ 		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
++		if (CIFS_CACHE_WRITE(cinode))
++			goto oplock_break_ack;
+ 	}
+ 
+ 	rc = cifs_push_locks(cfile);
+ 	if (rc)
+ 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
+ 
++oplock_break_ack:
+ 	/*
+ 	 * releasing stale oplock after recent reconnect of smb session using
+ 	 * a now incorrect file handle is not a data integrity issue but do
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index e45f8e321371c..dd67f56ea61e5 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -477,21 +477,10 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
+ 				set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
+ 					&pCifsInode->flags);
+ 
+-				/*
+-				 * Set flag if the server downgrades the oplock
+-				 * to L2 else clear.
+-				 */
+-				if (pSMB->OplockLevel)
+-					set_bit(
+-					   CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+-					   &pCifsInode->flags);
+-				else
+-					clear_bit(
+-					   CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+-					   &pCifsInode->flags);
+-
+-				cifs_queue_oplock_break(netfile);
++				netfile->oplock_epoch = 0;
++				netfile->oplock_level = pSMB->OplockLevel;
+ 				netfile->oplock_break_cancelled = false;
++				cifs_queue_oplock_break(netfile);
+ 
+ 				spin_unlock(&tcon->open_file_lock);
+ 				spin_unlock(&cifs_tcp_ses_lock);
+diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
+index c7f0c85664425..0b7f924512848 100644
+--- a/fs/cifs/smb1ops.c
++++ b/fs/cifs/smb1ops.c
+@@ -381,12 +381,10 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
+ 
+ static void
+ cifs_downgrade_oplock(struct TCP_Server_Info *server,
+-			struct cifsInodeInfo *cinode, bool set_level2)
++		      struct cifsInodeInfo *cinode, __u32 oplock,
++		      unsigned int epoch, bool *purge_cache)
+ {
+-	if (set_level2)
+-		cifs_set_oplock_level(cinode, OPLOCK_READ);
+-	else
+-		cifs_set_oplock_level(cinode, 0);
++	cifs_set_oplock_level(cinode, oplock);
+ }
+ 
+ static bool
+diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
+index 2fc96f7923ee5..7d875a47d0226 100644
+--- a/fs/cifs/smb2misc.c
++++ b/fs/cifs/smb2misc.c
+@@ -550,7 +550,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp)
+ 
+ 		cifs_dbg(FYI, "found in the open list\n");
+ 		cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
+-			 le32_to_cpu(rsp->NewLeaseState));
++			 lease_state);
+ 
+ 		if (ack_req)
+ 			cfile->oplock_break_cancelled = false;
+@@ -559,17 +559,8 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp)
+ 
+ 		set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
+ 
+-		/*
+-		 * Set or clear flags depending on the lease state being READ.
+-		 * HANDLE caching flag should be added when the client starts
+-		 * to defer closing remote file handles with HANDLE leases.
+-		 */
+-		if (lease_state & SMB2_LEASE_READ_CACHING_HE)
+-			set_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+-				&cinode->flags);
+-		else
+-			clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+-				  &cinode->flags);
++		cfile->oplock_epoch = le16_to_cpu(rsp->Epoch);
++		cfile->oplock_level = lease_state;
+ 
+ 		cifs_queue_oplock_break(cfile);
+ 		return true;
+@@ -599,7 +590,7 @@ smb2_tcon_find_pending_open_lease(struct cifs_tcon *tcon,
+ 
+ 		cifs_dbg(FYI, "found in the pending open list\n");
+ 		cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
+-			 le32_to_cpu(rsp->NewLeaseState));
++			 lease_state);
+ 
+ 		open->oplock = lease_state;
+ 	}
+@@ -732,18 +723,9 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
+ 				set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
+ 					&cinode->flags);
+ 
+-				/*
+-				 * Set flag if the server downgrades the oplock
+-				 * to L2 else clear.
+-				 */
+-				if (rsp->OplockLevel)
+-					set_bit(
+-					   CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+-					   &cinode->flags);
+-				else
+-					clear_bit(
+-					   CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+-					   &cinode->flags);
++				cfile->oplock_epoch = 0;
++				cfile->oplock_level = rsp->OplockLevel;
++
+ 				spin_unlock(&cfile->file_info_lock);
+ 
+ 				cifs_queue_oplock_break(cfile);
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 2a523139a05fb..947a40069d246 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -2358,22 +2358,38 @@ static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
+ 
+ static void
+ smb2_downgrade_oplock(struct TCP_Server_Info *server,
+-			struct cifsInodeInfo *cinode, bool set_level2)
++		      struct cifsInodeInfo *cinode, __u32 oplock,
++		      unsigned int epoch, bool *purge_cache)
+ {
+-	if (set_level2)
+-		server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II,
+-						0, NULL);
+-	else
+-		server->ops->set_oplock_level(cinode, 0, 0, NULL);
++	server->ops->set_oplock_level(cinode, oplock, 0, NULL);
+ }
+ 
+ static void
+-smb21_downgrade_oplock(struct TCP_Server_Info *server,
+-		       struct cifsInodeInfo *cinode, bool set_level2)
++smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
++		       unsigned int epoch, bool *purge_cache);
++
++static void
++smb3_downgrade_oplock(struct TCP_Server_Info *server,
++		       struct cifsInodeInfo *cinode, __u32 oplock,
++		       unsigned int epoch, bool *purge_cache)
+ {
+-	server->ops->set_oplock_level(cinode,
+-				      set_level2 ? SMB2_LEASE_READ_CACHING_HE :
+-				      0, 0, NULL);
++	unsigned int old_state = cinode->oplock;
++	unsigned int old_epoch = cinode->epoch;
++	unsigned int new_state;
++
++	if (epoch > old_epoch) {
++		smb21_set_oplock_level(cinode, oplock, 0, NULL);
++		cinode->epoch = epoch;
++	}
++
++	new_state = cinode->oplock;
++	*purge_cache = false;
++
++	if ((old_state & CIFS_CACHE_READ_FLG) != 0 &&
++	    (new_state & CIFS_CACHE_READ_FLG) == 0)
++		*purge_cache = true;
++	else if (old_state == new_state && (epoch - old_epoch > 1))
++		*purge_cache = true;
+ }
+ 
+ static void
+@@ -3449,7 +3465,7 @@ struct smb_version_operations smb21_operations = {
+ 	.print_stats = smb2_print_stats,
+ 	.is_oplock_break = smb2_is_valid_oplock_break,
+ 	.handle_cancelled_mid = smb2_handle_cancelled_mid,
+-	.downgrade_oplock = smb21_downgrade_oplock,
++	.downgrade_oplock = smb2_downgrade_oplock,
+ 	.need_neg = smb2_need_neg,
+ 	.negotiate = smb2_negotiate,
+ 	.negotiate_wsize = smb2_negotiate_wsize,
+@@ -3546,7 +3562,7 @@ struct smb_version_operations smb30_operations = {
+ 	.dump_share_caps = smb2_dump_share_caps,
+ 	.is_oplock_break = smb2_is_valid_oplock_break,
+ 	.handle_cancelled_mid = smb2_handle_cancelled_mid,
+-	.downgrade_oplock = smb21_downgrade_oplock,
++	.downgrade_oplock = smb3_downgrade_oplock,
+ 	.need_neg = smb2_need_neg,
+ 	.negotiate = smb2_negotiate,
+ 	.negotiate_wsize = smb2_negotiate_wsize,
+@@ -3651,7 +3667,7 @@ struct smb_version_operations smb311_operations = {
+ 	.dump_share_caps = smb2_dump_share_caps,
+ 	.is_oplock_break = smb2_is_valid_oplock_break,
+ 	.handle_cancelled_mid = smb2_handle_cancelled_mid,
+-	.downgrade_oplock = smb21_downgrade_oplock,
++	.downgrade_oplock = smb3_downgrade_oplock,
+ 	.need_neg = smb2_need_neg,
+ 	.negotiate = smb2_negotiate,
+ 	.negotiate_wsize = smb2_negotiate_wsize,
+diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
+index 308c682fa4d3b..44501f8cbd75e 100644
+--- a/fs/cifs/smb2pdu.h
++++ b/fs/cifs/smb2pdu.h
+@@ -1209,7 +1209,7 @@ struct smb2_oplock_break {
+ struct smb2_lease_break {
+ 	struct smb2_sync_hdr sync_hdr;
+ 	__le16 StructureSize; /* Must be 44 */
+-	__le16 Reserved;
++	__le16 Epoch;
+ 	__le32 Flags;
+ 	__u8   LeaseKey[16];
+ 	__le32 CurrentLeaseState;
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 6e0022326afe3..20370a0997bf9 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -864,17 +864,19 @@ struct dentry *dget_parent(struct dentry *dentry)
+ {
+ 	int gotref;
+ 	struct dentry *ret;
++	unsigned seq;
+ 
+ 	/*
+ 	 * Do optimistic parent lookup without any
+ 	 * locking.
+ 	 */
+ 	rcu_read_lock();
++	seq = raw_seqcount_begin(&dentry->d_seq);
+ 	ret = READ_ONCE(dentry->d_parent);
+ 	gotref = lockref_get_not_zero(&ret->d_lockref);
+ 	rcu_read_unlock();
+ 	if (likely(gotref)) {
+-		if (likely(ret == READ_ONCE(dentry->d_parent)))
++		if (!read_seqcount_retry(&dentry->d_seq, seq))
+ 			return ret;
+ 		dput(ret);
+ 	}
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index cd833f4e64ef1..52be4c9650241 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -5315,7 +5315,7 @@ static int ext4_do_update_inode(handle_t *handle,
+ 		raw_inode->i_file_acl_high =
+ 			cpu_to_le16(ei->i_file_acl >> 32);
+ 	raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
+-	if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) {
++	if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode)) {
+ 		ext4_isize_set(raw_inode, ei->i_disksize);
+ 		need_datasync = 1;
+ 	}
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 8dd54a8a03610..054cfdd007d69 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1901,8 +1901,15 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
+ 		BUG_ON(buddy == NULL);
+ 
+ 		k = mb_find_next_zero_bit(buddy, max, 0);
+-		BUG_ON(k >= max);
+-
++		if (k >= max) {
++			ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
++				"%d free clusters of order %d. But found 0",
++				grp->bb_counters[i], i);
++			ext4_mark_group_bitmap_corrupted(ac->ac_sb,
++					 e4b->bd_group,
++					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
++			break;
++		}
+ 		ac->ac_found++;
+ 
+ 		ac->ac_b_ex.fe_len = 1 << i;
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index 01e6ea11822bf..c51c9a6881e49 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -831,7 +831,6 @@ static int fuse_check_page(struct page *page)
+ {
+ 	if (page_mapcount(page) ||
+ 	    page->mapping != NULL ||
+-	    page_count(page) != 1 ||
+ 	    (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
+ 	     ~(1 << PG_locked |
+ 	       1 << PG_referenced |
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index d968b5c5df217..a52b8b0dceeb9 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -715,7 +715,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+ 
+ 	error = gfs2_trans_begin(sdp, blocks, 0);
+ 	if (error)
+-		goto fail_gunlock2;
++		goto fail_free_inode;
+ 
+ 	if (blocks > 1) {
+ 		ip->i_eattr = ip->i_no_addr + 1;
+@@ -726,7 +726,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+ 
+ 	error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
+ 	if (error)
+-		goto fail_gunlock2;
++		goto fail_free_inode;
+ 
+ 	BUG_ON(test_and_set_bit(GLF_INODE_CREATING, &io_gl->gl_flags));
+ 
+@@ -735,7 +735,6 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+ 		goto fail_gunlock2;
+ 
+ 	glock_set_object(ip->i_iopen_gh.gh_gl, ip);
+-	gfs2_glock_put(io_gl);
+ 	gfs2_set_iop(inode);
+ 	insert_inode_hash(inode);
+ 
+@@ -768,6 +767,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+ 
+ 	mark_inode_dirty(inode);
+ 	d_instantiate(dentry, inode);
++	/* After instantiate, errors should result in evict which will destroy
++	 * both inode and iopen glocks properly. */
+ 	if (file) {
+ 		file->f_mode |= FMODE_CREATED;
+ 		error = finish_open(file, dentry, gfs2_open_common);
+@@ -775,15 +776,15 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+ 	gfs2_glock_dq_uninit(ghs);
+ 	gfs2_glock_dq_uninit(ghs + 1);
+ 	clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
++	gfs2_glock_put(io_gl);
+ 	return error;
+ 
+ fail_gunlock3:
+ 	glock_clear_object(io_gl, ip);
+ 	gfs2_glock_dq_uninit(&ip->i_iopen_gh);
+-	gfs2_glock_put(io_gl);
+ fail_gunlock2:
+-	if (io_gl)
+-		clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
++	clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
++	gfs2_glock_put(io_gl);
+ fail_free_inode:
+ 	if (ip->i_gl) {
+ 		glock_clear_object(ip->i_gl, ip);
+diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
+index 5dae7c85d9b6e..2c7d76b4c5e18 100644
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -132,47 +132,70 @@ nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx)
+ EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
+ 
+ /*
+- * nfs_page_group_lock - lock the head of the page group
+- * @req - request in group that is to be locked
++ * nfs_page_set_headlock - set the request PG_HEADLOCK
++ * @req: request that is to be locked
+  *
+- * this lock must be held when traversing or modifying the page
+- * group list
++ * this lock must be held when modifying req->wb_head
+  *
+  * return 0 on success, < 0 on error
+  */
+ int
+-nfs_page_group_lock(struct nfs_page *req)
++nfs_page_set_headlock(struct nfs_page *req)
+ {
+-	struct nfs_page *head = req->wb_head;
+-
+-	WARN_ON_ONCE(head != head->wb_head);
+-
+-	if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
++	if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags))
+ 		return 0;
+ 
+-	set_bit(PG_CONTENDED1, &head->wb_flags);
++	set_bit(PG_CONTENDED1, &req->wb_flags);
+ 	smp_mb__after_atomic();
+-	return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
++	return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK,
+ 				TASK_UNINTERRUPTIBLE);
+ }
+ 
+ /*
+- * nfs_page_group_unlock - unlock the head of the page group
+- * @req - request in group that is to be unlocked
++ * nfs_page_clear_headlock - clear the request PG_HEADLOCK
++ * @req: request that is to be locked
+  */
+ void
+-nfs_page_group_unlock(struct nfs_page *req)
++nfs_page_clear_headlock(struct nfs_page *req)
+ {
+-	struct nfs_page *head = req->wb_head;
+-
+-	WARN_ON_ONCE(head != head->wb_head);
+-
+ 	smp_mb__before_atomic();
+-	clear_bit(PG_HEADLOCK, &head->wb_flags);
++	clear_bit(PG_HEADLOCK, &req->wb_flags);
+ 	smp_mb__after_atomic();
+-	if (!test_bit(PG_CONTENDED1, &head->wb_flags))
++	if (!test_bit(PG_CONTENDED1, &req->wb_flags))
+ 		return;
+-	wake_up_bit(&head->wb_flags, PG_HEADLOCK);
++	wake_up_bit(&req->wb_flags, PG_HEADLOCK);
++}
++
++/*
++ * nfs_page_group_lock - lock the head of the page group
++ * @req: request in group that is to be locked
++ *
++ * this lock must be held when traversing or modifying the page
++ * group list
++ *
++ * return 0 on success, < 0 on error
++ */
++int
++nfs_page_group_lock(struct nfs_page *req)
++{
++	int ret;
++
++	ret = nfs_page_set_headlock(req);
++	if (ret || req->wb_head == req)
++		return ret;
++	return nfs_page_set_headlock(req->wb_head);
++}
++
++/*
++ * nfs_page_group_unlock - unlock the head of the page group
++ * @req: request in group that is to be unlocked
++ */
++void
++nfs_page_group_unlock(struct nfs_page *req)
++{
++	if (req != req->wb_head)
++		nfs_page_clear_headlock(req->wb_head);
++	nfs_page_clear_headlock(req);
+ }
+ 
+ /*
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 63d20308a9bb7..d419d89b91f7c 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -416,22 +416,28 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
+ 		destroy_list = (subreq->wb_this_page == old_head) ?
+ 				   NULL : subreq->wb_this_page;
+ 
++		/* Note: lock subreq in order to change subreq->wb_head */
++		nfs_page_set_headlock(subreq);
+ 		WARN_ON_ONCE(old_head != subreq->wb_head);
+ 
+ 		/* make sure old group is not used */
+ 		subreq->wb_this_page = subreq;
++		subreq->wb_head = subreq;
+ 
+ 		clear_bit(PG_REMOVE, &subreq->wb_flags);
+ 
+ 		/* Note: races with nfs_page_group_destroy() */
+ 		if (!kref_read(&subreq->wb_kref)) {
+ 			/* Check if we raced with nfs_page_group_destroy() */
+-			if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags))
++			if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) {
++				nfs_page_clear_headlock(subreq);
+ 				nfs_free_request(subreq);
++			} else
++				nfs_page_clear_headlock(subreq);
+ 			continue;
+ 		}
++		nfs_page_clear_headlock(subreq);
+ 
+-		subreq->wb_head = subreq;
+ 		nfs_release_request(old_head);
+ 
+ 		if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) {
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index c24306af9758f..655079ae1dd1f 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -471,6 +471,8 @@ find_any_file(struct nfs4_file *f)
+ {
+ 	struct file *ret;
+ 
++	if (!f)
++		return NULL;
+ 	spin_lock(&f->fi_lock);
+ 	ret = __nfs4_get_fd(f, O_RDWR);
+ 	if (!ret) {
+@@ -1207,6 +1209,12 @@ static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
+ 	nfs4_free_stateowner(sop);
+ }
+ 
++static bool
++nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
++{
++	return list_empty(&stp->st_perfile);
++}
++
+ static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
+ {
+ 	struct nfs4_file *fp = stp->st_stid.sc_file;
+@@ -1274,9 +1282,11 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
+ {
+ 	lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
+ 
++	if (!unhash_ol_stateid(stp))
++		return false;
+ 	list_del_init(&stp->st_locks);
+ 	nfs4_unhash_stid(&stp->st_stid);
+-	return unhash_ol_stateid(stp);
++	return true;
+ }
+ 
+ static void release_lock_stateid(struct nfs4_ol_stateid *stp)
+@@ -1341,13 +1351,12 @@ static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
+ static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
+ 				struct list_head *reaplist)
+ {
+-	bool unhashed;
+-
+ 	lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
+ 
+-	unhashed = unhash_ol_stateid(stp);
++	if (!unhash_ol_stateid(stp))
++		return false;
+ 	release_open_stateid_locks(stp, reaplist);
+-	return unhashed;
++	return true;
+ }
+ 
+ static void release_open_stateid(struct nfs4_ol_stateid *stp)
+@@ -5774,21 +5783,21 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
+ }
+ 
+ static struct nfs4_ol_stateid *
+-find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
++find_lock_stateid(const struct nfs4_lockowner *lo,
++		  const struct nfs4_ol_stateid *ost)
+ {
+ 	struct nfs4_ol_stateid *lst;
+-	struct nfs4_client *clp = lo->lo_owner.so_client;
+ 
+-	lockdep_assert_held(&clp->cl_lock);
++	lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
+ 
+-	list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
+-		if (lst->st_stid.sc_type != NFS4_LOCK_STID)
+-			continue;
+-		if (lst->st_stid.sc_file == fp) {
+-			refcount_inc(&lst->st_stid.sc_count);
+-			return lst;
++	/* If ost is not hashed, ost->st_locks will not be valid */
++	if (!nfs4_ol_stateid_unhashed(ost))
++		list_for_each_entry(lst, &ost->st_locks, st_locks) {
++			if (lst->st_stateowner == &lo->lo_owner) {
++				refcount_inc(&lst->st_stid.sc_count);
++				return lst;
++			}
+ 		}
+-	}
+ 	return NULL;
+ }
+ 
+@@ -5804,11 +5813,11 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
+ 	mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
+ retry:
+ 	spin_lock(&clp->cl_lock);
+-	spin_lock(&fp->fi_lock);
+-	retstp = find_lock_stateid(lo, fp);
++	if (nfs4_ol_stateid_unhashed(open_stp))
++		goto out_close;
++	retstp = find_lock_stateid(lo, open_stp);
+ 	if (retstp)
+-		goto out_unlock;
+-
++		goto out_found;
+ 	refcount_inc(&stp->st_stid.sc_count);
+ 	stp->st_stid.sc_type = NFS4_LOCK_STID;
+ 	stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
+@@ -5817,22 +5826,26 @@ retry:
+ 	stp->st_access_bmap = 0;
+ 	stp->st_deny_bmap = open_stp->st_deny_bmap;
+ 	stp->st_openstp = open_stp;
++	spin_lock(&fp->fi_lock);
+ 	list_add(&stp->st_locks, &open_stp->st_locks);
+ 	list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
+ 	list_add(&stp->st_perfile, &fp->fi_stateids);
+-out_unlock:
+ 	spin_unlock(&fp->fi_lock);
+ 	spin_unlock(&clp->cl_lock);
+-	if (retstp) {
+-		if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
+-			nfs4_put_stid(&retstp->st_stid);
+-			goto retry;
+-		}
+-		/* To keep mutex tracking happy */
+-		mutex_unlock(&stp->st_mutex);
+-		stp = retstp;
+-	}
+ 	return stp;
++out_found:
++	spin_unlock(&clp->cl_lock);
++	if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
++		nfs4_put_stid(&retstp->st_stid);
++		goto retry;
++	}
++	/* To keep mutex tracking happy */
++	mutex_unlock(&stp->st_mutex);
++	return retstp;
++out_close:
++	spin_unlock(&clp->cl_lock);
++	mutex_unlock(&stp->st_mutex);
++	return NULL;
+ }
+ 
+ static struct nfs4_ol_stateid *
+@@ -5847,7 +5860,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
+ 
+ 	*new = false;
+ 	spin_lock(&clp->cl_lock);
+-	lst = find_lock_stateid(lo, fi);
++	lst = find_lock_stateid(lo, ost);
+ 	spin_unlock(&clp->cl_lock);
+ 	if (lst != NULL) {
+ 		if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
+diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
+index 099bec94b8207..fab29f899f913 100644
+--- a/fs/ubifs/io.c
++++ b/fs/ubifs/io.c
+@@ -237,7 +237,7 @@ int ubifs_is_mapped(const struct ubifs_info *c, int lnum)
+ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
+ 		     int offs, int quiet, int must_chk_crc)
+ {
+-	int err = -EINVAL, type, node_len;
++	int err = -EINVAL, type, node_len, dump_node = 1;
+ 	uint32_t crc, node_crc, magic;
+ 	const struct ubifs_ch *ch = buf;
+ 
+@@ -290,10 +290,22 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
+ out_len:
+ 	if (!quiet)
+ 		ubifs_err(c, "bad node length %d", node_len);
++	if (type == UBIFS_DATA_NODE && node_len > UBIFS_DATA_NODE_SZ)
++		dump_node = 0;
+ out:
+ 	if (!quiet) {
+ 		ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
+-		ubifs_dump_node(c, buf);
++		if (dump_node) {
++			ubifs_dump_node(c, buf);
++		} else {
++			int safe_len = min3(node_len, c->leb_size - offs,
++				(int)UBIFS_MAX_DATA_NODE_SZ);
++			pr_err("\tprevent out-of-bounds memory access\n");
++			pr_err("\ttruncated data node length      %d\n", safe_len);
++			pr_err("\tcorrupted data node:\n");
++			print_hex_dump(KERN_ERR, "\t", DUMP_PREFIX_OFFSET, 32, 1,
++					buf, safe_len, 0);
++		}
+ 		dump_stack();
+ 	}
+ 	return err;
+diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
+index bd37f4a292c3b..efb586ea508bf 100644
+--- a/fs/xfs/libxfs/xfs_attr_leaf.c
++++ b/fs/xfs/libxfs/xfs_attr_leaf.c
+@@ -1438,7 +1438,9 @@ xfs_attr3_leaf_add_work(
+ 	for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
+ 		if (ichdr->freemap[i].base == tmp) {
+ 			ichdr->freemap[i].base += sizeof(xfs_attr_leaf_entry_t);
+-			ichdr->freemap[i].size -= sizeof(xfs_attr_leaf_entry_t);
++			ichdr->freemap[i].size -=
++				min_t(uint16_t, ichdr->freemap[i].size,
++						sizeof(xfs_attr_leaf_entry_t));
+ 		}
+ 	}
+ 	ichdr->usedbytes += xfs_attr_leaf_entsize(leaf, args->index);
+diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
+index f1bb3434f51c7..01e99806b941f 100644
+--- a/fs/xfs/libxfs/xfs_dir2_node.c
++++ b/fs/xfs/libxfs/xfs_dir2_node.c
+@@ -214,6 +214,7 @@ __xfs_dir3_free_read(
+ 	if (fa) {
+ 		xfs_verifier_error(*bpp, -EFSCORRUPTED, fa);
+ 		xfs_trans_brelse(tp, *bpp);
++		*bpp = NULL;
+ 		return -EFSCORRUPTED;
+ 	}
+ 
+diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
+index f99a7aefe4184..2b3cc5a8ced1b 100644
+--- a/fs/xfs/libxfs/xfs_trans_resv.c
++++ b/fs/xfs/libxfs/xfs_trans_resv.c
+@@ -197,6 +197,24 @@ xfs_calc_inode_chunk_res(
+ 	return res;
+ }
+ 
++/*
++ * Per-extent log reservation for the btree changes involved in freeing or
++ * allocating a realtime extent.  We have to be able to log as many rtbitmap
++ * blocks as needed to mark inuse MAXEXTLEN blocks' worth of realtime extents,
++ * as well as the realtime summary block.
++ */
++unsigned int
++xfs_rtalloc_log_count(
++	struct xfs_mount	*mp,
++	unsigned int		num_ops)
++{
++	unsigned int		blksz = XFS_FSB_TO_B(mp, 1);
++	unsigned int		rtbmp_bytes;
++
++	rtbmp_bytes = (MAXEXTLEN / mp->m_sb.sb_rextsize) / NBBY;
++	return (howmany(rtbmp_bytes, blksz) + 1) * num_ops;
++}
++
+ /*
+  * Various log reservation values.
+  *
+@@ -219,13 +237,21 @@ xfs_calc_inode_chunk_res(
+ 
+ /*
+  * In a write transaction we can allocate a maximum of 2
+- * extents.  This gives:
++ * extents.  This gives (t1):
+  *    the inode getting the new extents: inode size
+  *    the inode's bmap btree: max depth * block size
+  *    the agfs of the ags from which the extents are allocated: 2 * sector
+  *    the superblock free block counter: sector size
+  *    the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
+- * And the bmap_finish transaction can free bmap blocks in a join:
++ * Or, if we're writing to a realtime file (t2):
++ *    the inode getting the new extents: inode size
++ *    the inode's bmap btree: max depth * block size
++ *    the agfs of the ags from which the extents are allocated: 2 * sector
++ *    the superblock free block counter: sector size
++ *    the realtime bitmap: ((MAXEXTLEN / rtextsize) / NBBY) bytes
++ *    the realtime summary: 1 block
++ *    the allocation btrees: 2 trees * (2 * max depth - 1) * block size
++ * And the bmap_finish transaction can free bmap blocks in a join (t3):
+  *    the agfs of the ags containing the blocks: 2 * sector size
+  *    the agfls of the ags containing the blocks: 2 * sector size
+  *    the super block free block counter: sector size
+@@ -235,40 +261,72 @@ STATIC uint
+ xfs_calc_write_reservation(
+ 	struct xfs_mount	*mp)
+ {
+-	return XFS_DQUOT_LOGRES(mp) +
+-		max((xfs_calc_inode_res(mp, 1) +
++	unsigned int		t1, t2, t3;
++	unsigned int		blksz = XFS_FSB_TO_B(mp, 1);
++
++	t1 = xfs_calc_inode_res(mp, 1) +
++	     xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), blksz) +
++	     xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
++	     xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz);
++
++	if (xfs_sb_version_hasrealtime(&mp->m_sb)) {
++		t2 = xfs_calc_inode_res(mp, 1) +
+ 		     xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
+-				      XFS_FSB_TO_B(mp, 1)) +
++				     blksz) +
+ 		     xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
+-		     xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2),
+-				      XFS_FSB_TO_B(mp, 1))),
+-		    (xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
+-		     xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2),
+-				      XFS_FSB_TO_B(mp, 1))));
++		     xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 1), blksz) +
++		     xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1), blksz);
++	} else {
++		t2 = 0;
++	}
++
++	t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
++	     xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz);
++
++	return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3);
+ }
+ 
+ /*
+- * In truncating a file we free up to two extents at once.  We can modify:
++ * In truncating a file we free up to two extents at once.  We can modify (t1):
+  *    the inode being truncated: inode size
+  *    the inode's bmap btree: (max depth + 1) * block size
+- * And the bmap_finish transaction can free the blocks and bmap blocks:
++ * And the bmap_finish transaction can free the blocks and bmap blocks (t2):
+  *    the agf for each of the ags: 4 * sector size
+  *    the agfl for each of the ags: 4 * sector size
+  *    the super block to reflect the freed blocks: sector size
+  *    worst case split in allocation btrees per extent assuming 4 extents:
+  *		4 exts * 2 trees * (2 * max depth - 1) * block size
++ * Or, if it's a realtime file (t3):
++ *    the agf for each of the ags: 2 * sector size
++ *    the agfl for each of the ags: 2 * sector size
++ *    the super block to reflect the freed blocks: sector size
++ *    the realtime bitmap: 2 exts * ((MAXEXTLEN / rtextsize) / NBBY) bytes
++ *    the realtime summary: 2 exts * 1 block
++ *    worst case split in allocation btrees per extent assuming 2 extents:
++ *		2 exts * 2 trees * (2 * max depth - 1) * block size
+  */
+ STATIC uint
+ xfs_calc_itruncate_reservation(
+ 	struct xfs_mount	*mp)
+ {
+-	return XFS_DQUOT_LOGRES(mp) +
+-		max((xfs_calc_inode_res(mp, 1) +
+-		     xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1,
+-				      XFS_FSB_TO_B(mp, 1))),
+-		    (xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
+-		     xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4),
+-				      XFS_FSB_TO_B(mp, 1))));
++	unsigned int		t1, t2, t3;
++	unsigned int		blksz = XFS_FSB_TO_B(mp, 1);
++
++	t1 = xfs_calc_inode_res(mp, 1) +
++	     xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, blksz);
++
++	t2 = xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
++	     xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4), blksz);
++
++	if (xfs_sb_version_hasrealtime(&mp->m_sb)) {
++		t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
++		     xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 2), blksz) +
++		     xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz);
++	} else {
++		t3 = 0;
++	}
++
++	return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3);
+ }
+ 
+ /*
+diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c
+index cd3e4d768a18c..33dfcba72c7a0 100644
+--- a/fs/xfs/scrub/dir.c
++++ b/fs/xfs/scrub/dir.c
+@@ -156,6 +156,9 @@ xchk_dir_actor(
+ 	xname.type = XFS_DIR3_FT_UNKNOWN;
+ 
+ 	error = xfs_dir_lookup(sdc->sc->tp, ip, &xname, &lookup_ino, NULL);
++	/* ENOENT means the hash lookup failed and the dir is corrupt */
++	if (error == -ENOENT)
++		error = -EFSCORRUPTED;
+ 	if (!xchk_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset,
+ 			&error))
+ 		goto out;
+diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
+index 3b0ba54cc4d5b..3bc1034c57e66 100644
+--- a/include/linux/debugfs.h
++++ b/include/linux/debugfs.h
+@@ -54,6 +54,8 @@ static const struct file_operations __fops = {				\
+ 	.llseek  = no_llseek,						\
+ }
+ 
++typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
++
+ #if defined(CONFIG_DEBUG_FS)
+ 
+ struct dentry *debugfs_lookup(const char *name, struct dentry *parent);
+@@ -75,7 +77,6 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent);
+ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
+ 				      const char *dest);
+ 
+-typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
+ struct dentry *debugfs_create_automount(const char *name,
+ 					struct dentry *parent,
+ 					debugfs_automount_t f,
+@@ -204,7 +205,7 @@ static inline struct dentry *debugfs_create_symlink(const char *name,
+ 
+ static inline struct dentry *debugfs_create_automount(const char *name,
+ 					struct dentry *parent,
+-					struct vfsmount *(*f)(void *),
++					debugfs_automount_t f,
+ 					void *data)
+ {
+ 	return ERR_PTR(-ENODEV);
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index afc1d72161ba5..3d076aca7ac2a 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -503,6 +503,7 @@ enum hsm_task_states {
+ };
+ 
+ enum ata_completion_errors {
++	AC_ERR_OK		= 0,	    /* no error */
+ 	AC_ERR_DEV		= (1 << 0), /* device reported error */
+ 	AC_ERR_HSM		= (1 << 1), /* host state machine violation */
+ 	AC_ERR_TIMEOUT		= (1 << 2), /* timeout */
+@@ -912,9 +913,9 @@ struct ata_port_operations {
+ 	/*
+ 	 * Command execution
+ 	 */
+-	int  (*qc_defer)(struct ata_queued_cmd *qc);
+-	int  (*check_atapi_dma)(struct ata_queued_cmd *qc);
+-	void (*qc_prep)(struct ata_queued_cmd *qc);
++	int (*qc_defer)(struct ata_queued_cmd *qc);
++	int (*check_atapi_dma)(struct ata_queued_cmd *qc);
++	enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc);
+ 	unsigned int (*qc_issue)(struct ata_queued_cmd *qc);
+ 	bool (*qc_fill_rtf)(struct ata_queued_cmd *qc);
+ 
+@@ -1181,7 +1182,7 @@ extern int ata_xfer_mode2shift(unsigned long xfer_mode);
+ extern const char *ata_mode_string(unsigned long xfer_mask);
+ extern unsigned long ata_id_xfermask(const u16 *id);
+ extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
+-extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
++extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc);
+ extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
+ 		 unsigned int n_elem);
+ extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
+@@ -1916,9 +1917,9 @@ extern const struct ata_port_operations ata_bmdma_port_ops;
+ 	.sg_tablesize		= LIBATA_MAX_PRD,		\
+ 	.dma_boundary		= ATA_DMA_BOUNDARY
+ 
+-extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
++extern enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
+ extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc);
+-extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
++extern enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
+ extern unsigned int ata_bmdma_port_intr(struct ata_port *ap,
+ 				      struct ata_queued_cmd *qc);
+ extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance);
+diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
+index 8ef330027b134..3f8e84a80b4ad 100644
+--- a/include/linux/mmc/card.h
++++ b/include/linux/mmc/card.h
+@@ -227,7 +227,7 @@ struct mmc_queue_req;
+  * MMC Physical partitions
+  */
+ struct mmc_part {
+-	unsigned int	size;	/* partition size (in bytes) */
++	u64		size;	/* partition size (in bytes) */
+ 	unsigned int	part_cfg;	/* partition type */
+ 	char	name[MAX_MMC_PART_NAME_LEN];
+ 	bool	force_ro;	/* to make boot parts RO by default */
+diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
+index ad69430fd0eb5..5162fc1533c2f 100644
+--- a/include/linux/nfs_page.h
++++ b/include/linux/nfs_page.h
+@@ -142,6 +142,8 @@ extern	void nfs_unlock_and_release_request(struct nfs_page *);
+ extern int nfs_page_group_lock(struct nfs_page *);
+ extern void nfs_page_group_unlock(struct nfs_page *);
+ extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
++extern	int nfs_page_set_headlock(struct nfs_page *req);
++extern void nfs_page_clear_headlock(struct nfs_page *req);
+ extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
+ 
+ /*
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 2517492dd1855..2fda9893962d1 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1144,7 +1144,6 @@ int pci_enable_rom(struct pci_dev *pdev);
+ void pci_disable_rom(struct pci_dev *pdev);
+ void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
+ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
+-void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
+ 
+ /* Power management related routines */
+ int pci_save_state(struct pci_dev *dev);
+diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
+index bcf4cf26b8c89..a42a29952889c 100644
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -243,6 +243,13 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
+  * usual consistency guarantee. It is one wmb cheaper, because we can
+  * collapse the two back-to-back wmb()s.
+  *
++ * Note that, writes surrounding the barrier should be declared atomic (e.g.
++ * via WRITE_ONCE): a) to ensure the writes become visible to other threads
++ * atomically, avoiding compiler optimizations; b) to document which writes are
++ * meant to propagate to the reader critical section. This is necessary because
++ * neither writes before and after the barrier are enclosed in a seq-writer
++ * critical section that would ensure readers are aware of ongoing writes.
++ *
+  *      seqcount_t seq;
+  *      bool X = true, Y = false;
+  *
+@@ -262,11 +269,11 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
+  *
+  *      void write(void)
+  *      {
+- *              Y = true;
++ *              WRITE_ONCE(Y, true);
+  *
+  *              raw_write_seqcount_barrier(seq);
+  *
+- *              X = false;
++ *              WRITE_ONCE(X, false);
+  *      }
+  */
+ static inline void raw_write_seqcount_barrier(seqcount_t *s)
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index cbc0294f39899..703ce71caeacb 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1688,6 +1688,18 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
+ 	return list_->qlen;
+ }
+ 
++/**
++ *	skb_queue_len_lockless	- get queue length
++ *	@list_: list to measure
++ *
++ *	Return the length of an &sk_buff queue.
++ *	This variant can be used in lockless contexts.
++ */
++static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_)
++{
++	return READ_ONCE(list_->qlen);
++}
++
+ /**
+  *	__skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
+  *	@list: queue to initialize
+@@ -1895,7 +1907,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
+ {
+ 	struct sk_buff *next, *prev;
+ 
+-	list->qlen--;
++	WRITE_ONCE(list->qlen, list->qlen - 1);
+ 	next	   = skb->next;
+ 	prev	   = skb->prev;
+ 	skb->next  = skb->prev = NULL;
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 77f36257cac97..bc752237dff3f 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -900,11 +900,11 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
+ 	skb_dst_force(skb);
+ 
+ 	if (!sk->sk_backlog.tail)
+-		sk->sk_backlog.head = skb;
++		WRITE_ONCE(sk->sk_backlog.head, skb);
+ 	else
+ 		sk->sk_backlog.tail->next = skb;
+ 
+-	sk->sk_backlog.tail = skb;
++	WRITE_ONCE(sk->sk_backlog.tail, skb);
+ 	skb->next = NULL;
+ }
+ 
+diff --git a/include/trace/events/sctp.h b/include/trace/events/sctp.h
+index 7475c7be165aa..d4aac34365955 100644
+--- a/include/trace/events/sctp.h
++++ b/include/trace/events/sctp.h
+@@ -75,15 +75,6 @@ TRACE_EVENT(sctp_probe,
+ 		__entry->pathmtu = asoc->pathmtu;
+ 		__entry->rwnd = asoc->peer.rwnd;
+ 		__entry->unack_data = asoc->unack_data;
+-
+-		if (trace_sctp_probe_path_enabled()) {
+-			struct sctp_transport *sp;
+-
+-			list_for_each_entry(sp, &asoc->peer.transport_addr_list,
+-					    transports) {
+-				trace_sctp_probe_path(sp, asoc);
+-			}
+-		}
+ 	),
+ 
+ 	TP_printk("asoc=%#llx mark=%#x bind_port=%d peer_port=%d pathmtu=%d "
+diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
+index 4f7262eba73d8..50952d6d81209 100644
+--- a/kernel/audit_watch.c
++++ b/kernel/audit_watch.c
+@@ -317,8 +317,6 @@ static void audit_update_watch(struct audit_parent *parent,
+ 			if (oentry->rule.exe)
+ 				audit_remove_mark(oentry->rule.exe);
+ 
+-			audit_watch_log_rule_change(r, owatch, "updated_rules");
+-
+ 			call_rcu(&oentry->rcu, audit_free_rule_rcu);
+ 		}
+ 
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index 1b28fb006763a..3f3ed33bd2fdc 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -667,15 +667,7 @@ static void htab_elem_free_rcu(struct rcu_head *head)
+ 	struct htab_elem *l = container_of(head, struct htab_elem, rcu);
+ 	struct bpf_htab *htab = l->htab;
+ 
+-	/* must increment bpf_prog_active to avoid kprobe+bpf triggering while
+-	 * we're calling kfree, otherwise deadlock is possible if kprobes
+-	 * are placed somewhere inside of slub
+-	 */
+-	preempt_disable();
+-	__this_cpu_inc(bpf_prog_active);
+ 	htab_elem_free(htab, l);
+-	__this_cpu_dec(bpf_prog_active);
+-	preempt_enable();
+ }
+ 
+ static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
+diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
+index c04815bb15cc1..11fade89c1f38 100644
+--- a/kernel/bpf/inode.c
++++ b/kernel/bpf/inode.c
+@@ -207,10 +207,12 @@ static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
+ 	else
+ 		prev_key = key;
+ 
++	rcu_read_lock();
+ 	if (map->ops->map_get_next_key(map, prev_key, key)) {
+ 		map_iter(m)->done = true;
+-		return NULL;
++		key = NULL;
+ 	}
++	rcu_read_unlock();
+ 	return key;
+ }
+ 
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 230d9d599b5aa..2161f519d4812 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -1065,9 +1065,20 @@ static int disarm_kprobe_ftrace(struct kprobe *p)
+ 	return ret;
+ }
+ #else	/* !CONFIG_KPROBES_ON_FTRACE */
+-#define prepare_kprobe(p)	arch_prepare_kprobe(p)
+-#define arm_kprobe_ftrace(p)	(-ENODEV)
+-#define disarm_kprobe_ftrace(p)	(-ENODEV)
++static inline int prepare_kprobe(struct kprobe *p)
++{
++	return arch_prepare_kprobe(p);
++}
++
++static inline int arm_kprobe_ftrace(struct kprobe *p)
++{
++	return -ENODEV;
++}
++
++static inline int disarm_kprobe_ftrace(struct kprobe *p)
++{
++	return -ENODEV;
++}
+ #endif
+ 
+ /* Arm a kprobe with text_mutex */
+@@ -2083,9 +2094,10 @@ static void kill_kprobe(struct kprobe *p)
+ 
+ 	/*
+ 	 * The module is going away. We should disarm the kprobe which
+-	 * is using ftrace.
++	 * is using ftrace, because ftrace framework is still available at
++	 * MODULE_STATE_GOING notification.
+ 	 */
+-	if (kprobe_ftrace(p))
++	if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
+ 		disarm_kprobe_ftrace(p);
+ }
+ 
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 3cb0e5b479ff3..cf272aba362be 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2148,6 +2148,9 @@ static int __init console_setup(char *str)
+ 	char *s, *options, *brl_options = NULL;
+ 	int idx;
+ 
++	if (str[0] == 0)
++		return 1;
++
+ 	if (_braille_console_setup(&str, &brl_options))
+ 		return 1;
+ 
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 096932a450466..baf60a3aa34b7 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -1275,11 +1275,13 @@ SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
+ 
+ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
+ {
+-	struct oldold_utsname tmp = {};
++	struct oldold_utsname tmp;
+ 
+ 	if (!name)
+ 		return -EFAULT;
+ 
++	memset(&tmp, 0, sizeof(tmp));
++
+ 	down_read(&uts_sem);
+ 	memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
+ 	memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 81ee5b83c9200..c66fd11d94bc4 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -1004,9 +1004,8 @@ static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
+ 	    ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
+ 		return -EOVERFLOW;
+ 	tmp *= mult;
+-	rem *= mult;
+ 
+-	do_div(rem, div);
++	rem = div64_u64(rem * mult, div);
+ 	*base = tmp + rem;
+ 	return 0;
+ }
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 4966410bb0f4d..6bf617ff03694 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -3037,6 +3037,9 @@ int trace_array_printk(struct trace_array *tr,
+ 	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
+ 		return 0;
+ 
++	if (!tr)
++		return -ENOENT;
++
+ 	va_start(ap, fmt);
+ 	ret = trace_array_vprintk(tr, ip, fmt, ap);
+ 	va_end(ap);
+@@ -8526,7 +8529,7 @@ __init static int tracer_alloc_buffers(void)
+ 		goto out_free_buffer_mask;
+ 
+ 	/* Only allocate trace_printk buffers if a trace_printk exists */
+-	if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
++	if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
+ 		/* Must be called before global_trace.buffer is allocated */
+ 		trace_printk_init_buffers();
+ 
+diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
+index 06bb2fd9a56c5..a97aad105d367 100644
+--- a/kernel/trace/trace_entries.h
++++ b/kernel/trace/trace_entries.h
+@@ -179,7 +179,7 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
+ 
+ 	F_STRUCT(
+ 		__field(	int,		size	)
+-		__dynamic_array(unsigned long,	caller	)
++		__array(	unsigned long,	caller,	FTRACE_STACK_ENTRIES	)
+ 	),
+ 
+ 	F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 27726121d332c..0fc06a7da87fb 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -800,6 +800,8 @@ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
+ 	char *event = NULL, *sub = NULL, *match;
+ 	int ret;
+ 
++	if (!tr)
++		return -ENOENT;
+ 	/*
+ 	 * The buf format can be <subsystem>:<event-name>
+ 	 *  *:<event-name> means any event by that name.
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index dbd3c97d1501a..3ed2d7f7e5712 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -4225,7 +4225,6 @@ static int parse_var_defs(struct hist_trigger_data *hist_data)
+ 
+ 			s = kstrdup(field_str, GFP_KERNEL);
+ 			if (!s) {
+-				kfree(hist_data->attrs->var_defs.name[n_vars]);
+ 				ret = -ENOMEM;
+ 				goto free;
+ 			}
+diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c
+index 71f553cceb3c1..0e373cb0106bb 100644
+--- a/kernel/trace/trace_preemptirq.c
++++ b/kernel/trace/trace_preemptirq.c
+@@ -59,14 +59,14 @@ EXPORT_SYMBOL(trace_hardirqs_on_caller);
+ 
+ __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
+ {
++	lockdep_hardirqs_off(CALLER_ADDR0);
++
+ 	if (!this_cpu_read(tracing_irq_cpu)) {
+ 		this_cpu_write(tracing_irq_cpu, 1);
+ 		tracer_hardirqs_off(CALLER_ADDR0, caller_addr);
+ 		if (!in_nmi())
+ 			trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
+ 	}
+-
+-	lockdep_hardirqs_off(CALLER_ADDR0);
+ }
+ EXPORT_SYMBOL(trace_hardirqs_off_caller);
+ #endif /* CONFIG_TRACE_IRQFLAGS */
+diff --git a/lib/string.c b/lib/string.c
+index 72125fd5b4a64..edf4907ec946f 100644
+--- a/lib/string.c
++++ b/lib/string.c
+@@ -236,6 +236,30 @@ ssize_t strscpy(char *dest, const char *src, size_t count)
+ EXPORT_SYMBOL(strscpy);
+ #endif
+ 
++/**
++ * stpcpy - copy a string from src to dest returning a pointer to the new end
++ *          of dest, including src's %NUL-terminator. May overrun dest.
++ * @dest: pointer to end of string being copied into. Must be large enough
++ *        to receive copy.
++ * @src: pointer to the beginning of string being copied from. Must not overlap
++ *       dest.
++ *
++ * stpcpy differs from strcpy in a key way: the return value is a pointer
++ * to the new %NUL-terminating character in @dest. (For strcpy, the return
++ * value is a pointer to the start of @dest). This interface is considered
++ * unsafe as it doesn't perform bounds checking of the inputs. As such it's
++ * not recommended for usage. Instead, its definition is provided in case
++ * the compiler lowers other libcalls to stpcpy.
++ */
++char *stpcpy(char *__restrict__ dest, const char *__restrict__ src);
++char *stpcpy(char *__restrict__ dest, const char *__restrict__ src)
++{
++	while ((*dest++ = *src++) != '\0')
++		/* nothing */;
++	return --dest;
++}
++EXPORT_SYMBOL(stpcpy);
++
+ #ifndef __HAVE_ARCH_STRCAT
+ /**
+  * strcat - Append one %NUL-terminated string to another
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 45f1c6d73b5b0..f2e777003b901 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2889,6 +2889,14 @@ filler:
+ 		unlock_page(page);
+ 		goto out;
+ 	}
++
++	/*
++	 * A previous I/O error may have been due to temporary
++	 * failures.
++	 * Clear page error before actual read, PG_error will be
++	 * set again if read page fails.
++	 */
++	ClearPageError(page);
+ 	goto filler;
+ 
+ out:
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index 5eeabece0c178..f54734abf9466 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -2039,7 +2039,7 @@ void __init kmemleak_init(void)
+ 	create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
+ 		      KMEMLEAK_GREY, GFP_ATOMIC);
+ 	/* only register .data..ro_after_init if not within .data */
+-	if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
++	if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
+ 		create_object((unsigned long)__start_ro_after_init,
+ 			      __end_ro_after_init - __start_ro_after_init,
+ 			      KMEMLEAK_GREY, GFP_ATOMIC);
+diff --git a/mm/memory.c b/mm/memory.c
+index bbf0cc4066c84..eeae63bd95027 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -116,6 +116,18 @@ int randomize_va_space __read_mostly =
+ 					2;
+ #endif
+ 
++#ifndef arch_faults_on_old_pte
++static inline bool arch_faults_on_old_pte(void)
++{
++	/*
++	 * Those arches which don't have hw access flag feature need to
++	 * implement their own helper. By default, "true" means pagefault
++	 * will be hit on old pte.
++	 */
++	return true;
++}
++#endif
++
+ static int __init disable_randmaps(char *s)
+ {
+ 	randomize_va_space = 0;
+@@ -2335,32 +2347,101 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
+ 	return same;
+ }
+ 
+-static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
++static inline bool cow_user_page(struct page *dst, struct page *src,
++				 struct vm_fault *vmf)
+ {
++	bool ret;
++	void *kaddr;
++	void __user *uaddr;
++	bool locked = false;
++	struct vm_area_struct *vma = vmf->vma;
++	struct mm_struct *mm = vma->vm_mm;
++	unsigned long addr = vmf->address;
++
+ 	debug_dma_assert_idle(src);
+ 
++	if (likely(src)) {
++		copy_user_highpage(dst, src, addr, vma);
++		return true;
++	}
++
+ 	/*
+ 	 * If the source page was a PFN mapping, we don't have
+ 	 * a "struct page" for it. We do a best-effort copy by
+ 	 * just copying from the original user address. If that
+ 	 * fails, we just zero-fill it. Live with it.
+ 	 */
+-	if (unlikely(!src)) {
+-		void *kaddr = kmap_atomic(dst);
+-		void __user *uaddr = (void __user *)(va & PAGE_MASK);
++	kaddr = kmap_atomic(dst);
++	uaddr = (void __user *)(addr & PAGE_MASK);
++
++	/*
++	 * On architectures with software "accessed" bits, we would
++	 * take a double page fault, so mark it accessed here.
++	 */
++	if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
++		pte_t entry;
++
++		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
++		locked = true;
++		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
++			/*
++			 * Other thread has already handled the fault
++			 * and we don't need to do anything. If it's
++			 * not the case, the fault will be triggered
++			 * again on the same address.
++			 */
++			ret = false;
++			goto pte_unlock;
++		}
++
++		entry = pte_mkyoung(vmf->orig_pte);
++		if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
++			update_mmu_cache(vma, addr, vmf->pte);
++	}
++
++	/*
++	 * This really shouldn't fail, because the page is there
++	 * in the page tables. But it might just be unreadable,
++	 * in which case we just give up and fill the result with
++	 * zeroes.
++	 */
++	if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
++		if (locked)
++			goto warn;
++
++		/* Re-validate under PTL if the page is still mapped */
++		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
++		locked = true;
++		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
++			/* The PTE changed under us. Retry page fault. */
++			ret = false;
++			goto pte_unlock;
++		}
+ 
+ 		/*
+-		 * This really shouldn't fail, because the page is there
+-		 * in the page tables. But it might just be unreadable,
+-		 * in which case we just give up and fill the result with
+-		 * zeroes.
++		 * The same page can be mapped back since last copy attampt.
++		 * Try to copy again under PTL.
+ 		 */
+-		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
++		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
++			/*
++			 * Give a warn in case there can be some obscure
++			 * use-case
++			 */
++warn:
++			WARN_ON_ONCE(1);
+ 			clear_page(kaddr);
+-		kunmap_atomic(kaddr);
+-		flush_dcache_page(dst);
+-	} else
+-		copy_user_highpage(dst, src, va, vma);
++		}
++	}
++
++	ret = true;
++
++pte_unlock:
++	if (locked)
++		pte_unmap_unlock(vmf->pte, vmf->ptl);
++	kunmap_atomic(kaddr);
++	flush_dcache_page(dst);
++
++	return ret;
+ }
+ 
+ static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
+@@ -2514,7 +2595,19 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
+ 				vmf->address);
+ 		if (!new_page)
+ 			goto oom;
+-		cow_user_page(new_page, old_page, vmf->address, vma);
++
++		if (!cow_user_page(new_page, old_page, vmf)) {
++			/*
++			 * COW failed, if the fault was solved by other,
++			 * it's fine. If not, userspace would re-fault on
++			 * the same address and we will handle the fault
++			 * from the second attempt.
++			 */
++			put_page(new_page);
++			if (old_page)
++				put_page(old_page);
++			return 0;
++		}
+ 	}
+ 
+ 	if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false))
+diff --git a/mm/mmap.c b/mm/mmap.c
+index e84fd3347a518..f875386e7acd4 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2077,6 +2077,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ 	info.low_limit = mm->mmap_base;
+ 	info.high_limit = TASK_SIZE;
+ 	info.align_mask = 0;
++	info.align_offset = 0;
+ 	return vm_unmapped_area(&info);
+ }
+ #endif
+@@ -2118,6 +2119,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ 	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
+ 	info.high_limit = mm->mmap_base;
+ 	info.align_mask = 0;
++	info.align_offset = 0;
+ 	addr = vm_unmapped_area(&info);
+ 
+ 	/*
+diff --git a/mm/pagewalk.c b/mm/pagewalk.c
+index c3084ff2569d2..3c0930d94a295 100644
+--- a/mm/pagewalk.c
++++ b/mm/pagewalk.c
+@@ -15,9 +15,9 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+ 		err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
+ 		if (err)
+ 		       break;
+-		addr += PAGE_SIZE;
+-		if (addr == end)
++		if (addr >= end - PAGE_SIZE)
+ 			break;
++		addr += PAGE_SIZE;
+ 		pte++;
+ 	}
+ 
+diff --git a/mm/swap_state.c b/mm/swap_state.c
+index 09731f4174c7e..3febffe0fca4a 100644
+--- a/mm/swap_state.c
++++ b/mm/swap_state.c
+@@ -537,10 +537,11 @@ static unsigned long swapin_nr_pages(unsigned long offset)
+ 		return 1;
+ 
+ 	hits = atomic_xchg(&swapin_readahead_hits, 0);
+-	pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages,
++	pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
++				  max_pages,
+ 				  atomic_read(&last_readahead_pages));
+ 	if (!hits)
+-		prev_offset = offset;
++		WRITE_ONCE(prev_offset, offset);
+ 	atomic_set(&last_readahead_pages, pages);
+ 
+ 	return pages;
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index 0047dcaf93697..adeb49fcad23e 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -998,7 +998,7 @@ start_over:
+ 			goto nextsi;
+ 		}
+ 		if (size == SWAPFILE_CLUSTER) {
+-			if (!(si->flags & SWP_FILE))
++			if (si->flags & SWP_BLKDEV)
+ 				n_ret = swap_alloc_cluster(si, swp_entries);
+ 		} else
+ 			n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
+@@ -2738,10 +2738,10 @@ static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
+ 	else
+ 		type = si->type + 1;
+ 
++	++(*pos);
+ 	for (; (si = swap_type_to_swap_info(type)); type++) {
+ 		if (!(si->flags & SWP_USED) || !si->swap_map)
+ 			continue;
+-		++*pos;
+ 		return si;
+ 	}
+ 
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index b93dc8fc6007f..b7d7f6d65bd5b 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -3109,8 +3109,9 @@ static bool allow_direct_reclaim(pg_data_t *pgdat)
+ 
+ 	/* kswapd must be awake if processes are being throttled */
+ 	if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
+-		pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx,
+-						(enum zone_type)ZONE_NORMAL);
++		if (READ_ONCE(pgdat->kswapd_classzone_idx) > ZONE_NORMAL)
++			WRITE_ONCE(pgdat->kswapd_classzone_idx, ZONE_NORMAL);
++
+ 		wake_up_interruptible(&pgdat->kswapd_wait);
+ 	}
+ 
+@@ -3626,9 +3627,9 @@ out:
+ static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat,
+ 					   enum zone_type prev_classzone_idx)
+ {
+-	if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
+-		return prev_classzone_idx;
+-	return pgdat->kswapd_classzone_idx;
++	enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx);
++
++	return curr_idx == MAX_NR_ZONES ? prev_classzone_idx : curr_idx;
+ }
+ 
+ static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
+@@ -3672,8 +3673,11 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_o
+ 		 * the previous request that slept prematurely.
+ 		 */
+ 		if (remaining) {
+-			pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
+-			pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order);
++			WRITE_ONCE(pgdat->kswapd_classzone_idx,
++				   kswapd_classzone_idx(pgdat, classzone_idx));
++
++			if (READ_ONCE(pgdat->kswapd_order) < reclaim_order)
++				WRITE_ONCE(pgdat->kswapd_order, reclaim_order);
+ 		}
+ 
+ 		finish_wait(&pgdat->kswapd_wait, &wait);
+@@ -3755,12 +3759,12 @@ static int kswapd(void *p)
+ 	tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
+ 	set_freezable();
+ 
+-	pgdat->kswapd_order = 0;
+-	pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
++	WRITE_ONCE(pgdat->kswapd_order, 0);
++	WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES);
+ 	for ( ; ; ) {
+ 		bool ret;
+ 
+-		alloc_order = reclaim_order = pgdat->kswapd_order;
++		alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
+ 		classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
+ 
+ kswapd_try_sleep:
+@@ -3768,10 +3772,10 @@ kswapd_try_sleep:
+ 					classzone_idx);
+ 
+ 		/* Read the new order and classzone_idx */
+-		alloc_order = reclaim_order = pgdat->kswapd_order;
++		alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
+ 		classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
+-		pgdat->kswapd_order = 0;
+-		pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
++		WRITE_ONCE(pgdat->kswapd_order, 0);
++		WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES);
+ 
+ 		ret = try_to_freeze();
+ 		if (kthread_should_stop())
+@@ -3816,20 +3820,23 @@ void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
+ 		   enum zone_type classzone_idx)
+ {
+ 	pg_data_t *pgdat;
++	enum zone_type curr_idx;
+ 
+ 	if (!managed_zone(zone))
+ 		return;
+ 
+ 	if (!cpuset_zone_allowed(zone, gfp_flags))
+ 		return;
++
+ 	pgdat = zone->zone_pgdat;
++	curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx);
++
++	if (curr_idx == MAX_NR_ZONES || curr_idx < classzone_idx)
++		WRITE_ONCE(pgdat->kswapd_classzone_idx, classzone_idx);
++
++	if (READ_ONCE(pgdat->kswapd_order) < order)
++		WRITE_ONCE(pgdat->kswapd_order, order);
+ 
+-	if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
+-		pgdat->kswapd_classzone_idx = classzone_idx;
+-	else
+-		pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx,
+-						  classzone_idx);
+-	pgdat->kswapd_order = max(pgdat->kswapd_order, order);
+ 	if (!waitqueue_active(&pgdat->kswapd_wait))
+ 		return;
+ 
+diff --git a/net/atm/lec.c b/net/atm/lec.c
+index ad4f829193f05..5a6186b809874 100644
+--- a/net/atm/lec.c
++++ b/net/atm/lec.c
+@@ -1270,6 +1270,12 @@ static void lec_arp_clear_vccs(struct lec_arp_table *entry)
+ 		entry->vcc = NULL;
+ 	}
+ 	if (entry->recv_vcc) {
++		struct atm_vcc *vcc = entry->recv_vcc;
++		struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
++
++		kfree(vpriv);
++		vcc->user_back = NULL;
++
+ 		entry->recv_vcc->push = entry->old_recv_push;
+ 		vcc_release_async(entry->recv_vcc, -EPIPE);
+ 		entry->recv_vcc = NULL;
+diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
+index 9b8bf06ccb613..1401031f4bb4a 100644
+--- a/net/batman-adv/bridge_loop_avoidance.c
++++ b/net/batman-adv/bridge_loop_avoidance.c
+@@ -37,6 +37,7 @@
+ #include <linux/lockdep.h>
+ #include <linux/netdevice.h>
+ #include <linux/netlink.h>
++#include <linux/preempt.h>
+ #include <linux/rculist.h>
+ #include <linux/rcupdate.h>
+ #include <linux/seq_file.h>
+@@ -96,11 +97,12 @@ static inline u32 batadv_choose_claim(const void *data, u32 size)
+  */
+ static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
+ {
+-	const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
++	const struct batadv_bla_backbone_gw *gw;
+ 	u32 hash = 0;
+ 
+-	hash = jhash(&claim->addr, sizeof(claim->addr), hash);
+-	hash = jhash(&claim->vid, sizeof(claim->vid), hash);
++	gw = (struct batadv_bla_backbone_gw *)data;
++	hash = jhash(&gw->orig, sizeof(gw->orig), hash);
++	hash = jhash(&gw->vid, sizeof(gw->vid), hash);
+ 
+ 	return hash % size;
+ }
+@@ -1592,13 +1594,16 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
+ }
+ 
+ /**
+- * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup.
++ * batadv_bla_check_duplist() - Check if a frame is in the broadcast dup.
+  * @bat_priv: the bat priv with all the soft interface information
+- * @skb: contains the bcast_packet to be checked
++ * @skb: contains the multicast packet to be checked
++ * @payload_ptr: pointer to position inside the head buffer of the skb
++ *  marking the start of the data to be CRC'ed
++ * @orig: originator mac address, NULL if unknown
+  *
+- * check if it is on our broadcast list. Another gateway might
+- * have sent the same packet because it is connected to the same backbone,
+- * so we have to remove this duplicate.
++ * Check if it is on our broadcast list. Another gateway might have sent the
++ * same packet because it is connected to the same backbone, so we have to
++ * remove this duplicate.
+  *
+  * This is performed by checking the CRC, which will tell us
+  * with a good chance that it is the same packet. If it is furthermore
+@@ -1607,19 +1612,17 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
+  *
+  * Return: true if a packet is in the duplicate list, false otherwise.
+  */
+-bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
+-				    struct sk_buff *skb)
++static bool batadv_bla_check_duplist(struct batadv_priv *bat_priv,
++				     struct sk_buff *skb, u8 *payload_ptr,
++				     const u8 *orig)
+ {
+-	int i, curr;
+-	__be32 crc;
+-	struct batadv_bcast_packet *bcast_packet;
+ 	struct batadv_bcast_duplist_entry *entry;
+ 	bool ret = false;
+-
+-	bcast_packet = (struct batadv_bcast_packet *)skb->data;
++	int i, curr;
++	__be32 crc;
+ 
+ 	/* calculate the crc ... */
+-	crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1));
++	crc = batadv_skb_crc32(skb, payload_ptr);
+ 
+ 	spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
+ 
+@@ -1638,8 +1641,21 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
+ 		if (entry->crc != crc)
+ 			continue;
+ 
+-		if (batadv_compare_eth(entry->orig, bcast_packet->orig))
+-			continue;
++		/* are the originators both known and not anonymous? */
++		if (orig && !is_zero_ether_addr(orig) &&
++		    !is_zero_ether_addr(entry->orig)) {
++			/* If known, check if the new frame came from
++			 * the same originator:
++			 * We are safe to take identical frames from the
++			 * same orig, if known, as multiplications in
++			 * the mesh are detected via the (orig, seqno) pair.
++			 * So we can be a bit more liberal here and allow
++			 * identical frames from the same orig which the source
++			 * host might have sent multiple times on purpose.
++			 */
++			if (batadv_compare_eth(entry->orig, orig))
++				continue;
++		}
+ 
+ 		/* this entry seems to match: same crc, not too old,
+ 		 * and from another gw. therefore return true to forbid it.
+@@ -1655,7 +1671,14 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
+ 	entry = &bat_priv->bla.bcast_duplist[curr];
+ 	entry->crc = crc;
+ 	entry->entrytime = jiffies;
+-	ether_addr_copy(entry->orig, bcast_packet->orig);
++
++	/* known originator */
++	if (orig)
++		ether_addr_copy(entry->orig, orig);
++	/* anonymous originator */
++	else
++		eth_zero_addr(entry->orig);
++
+ 	bat_priv->bla.bcast_duplist_curr = curr;
+ 
+ out:
+@@ -1664,6 +1687,48 @@ out:
+ 	return ret;
+ }
+ 
++/**
++ * batadv_bla_check_ucast_duplist() - Check if a frame is in the broadcast dup.
++ * @bat_priv: the bat priv with all the soft interface information
++ * @skb: contains the multicast packet to be checked, decapsulated from a
++ *  unicast_packet
++ *
++ * Check if it is on our broadcast list. Another gateway might have sent the
++ * same packet because it is connected to the same backbone, so we have to
++ * remove this duplicate.
++ *
++ * Return: true if a packet is in the duplicate list, false otherwise.
++ */
++static bool batadv_bla_check_ucast_duplist(struct batadv_priv *bat_priv,
++					   struct sk_buff *skb)
++{
++	return batadv_bla_check_duplist(bat_priv, skb, (u8 *)skb->data, NULL);
++}
++
++/**
++ * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup.
++ * @bat_priv: the bat priv with all the soft interface information
++ * @skb: contains the bcast_packet to be checked
++ *
++ * Check if it is on our broadcast list. Another gateway might have sent the
++ * same packet because it is connected to the same backbone, so we have to
++ * remove this duplicate.
++ *
++ * Return: true if a packet is in the duplicate list, false otherwise.
++ */
++bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
++				    struct sk_buff *skb)
++{
++	struct batadv_bcast_packet *bcast_packet;
++	u8 *payload_ptr;
++
++	bcast_packet = (struct batadv_bcast_packet *)skb->data;
++	payload_ptr = (u8 *)(bcast_packet + 1);
++
++	return batadv_bla_check_duplist(bat_priv, skb, payload_ptr,
++					bcast_packet->orig);
++}
++
+ /**
+  * batadv_bla_is_backbone_gw_orig() - Check if the originator is a gateway for
+  *  the VLAN identified by vid.
+@@ -1825,7 +1890,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
+  * @bat_priv: the bat priv with all the soft interface information
+  * @skb: the frame to be checked
+  * @vid: the VLAN ID of the frame
+- * @is_bcast: the packet came in a broadcast packet type.
++ * @packet_type: the batman packet type this frame came in
+  *
+  * batadv_bla_rx avoidance checks if:
+  *  * we have to race for a claim
+@@ -1837,7 +1902,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
+  * further process the skb.
+  */
+ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+-		   unsigned short vid, bool is_bcast)
++		   unsigned short vid, int packet_type)
+ {
+ 	struct batadv_bla_backbone_gw *backbone_gw;
+ 	struct ethhdr *ethhdr;
+@@ -1859,9 +1924,32 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ 		goto handled;
+ 
+ 	if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
+-		/* don't allow broadcasts while requests are in flight */
+-		if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
+-			goto handled;
++		/* don't allow multicast packets while requests are in flight */
++		if (is_multicast_ether_addr(ethhdr->h_dest))
++			/* Both broadcast flooding or multicast-via-unicasts
++			 * delivery might send to multiple backbone gateways
++			 * sharing the same LAN and therefore need to coordinate
++			 * which backbone gateway forwards into the LAN,
++			 * by claiming the payload source address.
++			 *
++			 * Broadcast flooding and multicast-via-unicasts
++			 * delivery use the following two batman packet types.
++			 * Note: explicitly exclude BATADV_UNICAST_4ADDR,
++			 * as the DHCP gateway feature will send explicitly
++			 * to only one BLA gateway, so the claiming process
++			 * should be avoided there.
++			 */
++			if (packet_type == BATADV_BCAST ||
++			    packet_type == BATADV_UNICAST)
++				goto handled;
++
++	/* potential duplicates from foreign BLA backbone gateways via
++	 * multicast-in-unicast packets
++	 */
++	if (is_multicast_ether_addr(ethhdr->h_dest) &&
++	    packet_type == BATADV_UNICAST &&
++	    batadv_bla_check_ucast_duplist(bat_priv, skb))
++		goto handled;
+ 
+ 	ether_addr_copy(search_claim.addr, ethhdr->h_source);
+ 	search_claim.vid = vid;
+@@ -1896,13 +1984,14 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ 		goto allow;
+ 	}
+ 
+-	/* if it is a broadcast ... */
+-	if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
++	/* if it is a multicast ... */
++	if (is_multicast_ether_addr(ethhdr->h_dest) &&
++	    (packet_type == BATADV_BCAST || packet_type == BATADV_UNICAST)) {
+ 		/* ... drop it. the responsible gateway is in charge.
+ 		 *
+-		 * We need to check is_bcast because with the gateway
++		 * We need to check packet type because with the gateway
+ 		 * feature, broadcasts (like DHCP requests) may be sent
+-		 * using a unicast packet type.
++		 * using a unicast 4 address packet type. See comment above.
+ 		 */
+ 		goto handled;
+ 	} else {
+diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
+index 71f95a3e4d3f3..af28fdb01467c 100644
+--- a/net/batman-adv/bridge_loop_avoidance.h
++++ b/net/batman-adv/bridge_loop_avoidance.h
+@@ -48,7 +48,7 @@ static inline bool batadv_bla_is_loopdetect_mac(const uint8_t *mac)
+ 
+ #ifdef CONFIG_BATMAN_ADV_BLA
+ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+-		   unsigned short vid, bool is_bcast);
++		   unsigned short vid, int packet_type);
+ bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ 		   unsigned short vid);
+ bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
+@@ -79,7 +79,7 @@ bool batadv_bla_check_claim(struct batadv_priv *bat_priv, u8 *addr,
+ 
+ static inline bool batadv_bla_rx(struct batadv_priv *bat_priv,
+ 				 struct sk_buff *skb, unsigned short vid,
+-				 bool is_bcast)
++				 int packet_type)
+ {
+ 	return false;
+ }
+diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
+index cc3ed93a6d513..98af41e3810dc 100644
+--- a/net/batman-adv/routing.c
++++ b/net/batman-adv/routing.c
+@@ -838,6 +838,10 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
+ 	vid = batadv_get_vid(skb, hdr_len);
+ 	ethhdr = (struct ethhdr *)(skb->data + hdr_len);
+ 
++	/* do not reroute multicast frames in a unicast header */
++	if (is_multicast_ether_addr(ethhdr->h_dest))
++		return true;
++
+ 	/* check if the destination client was served by this node and it is now
+ 	 * roaming. In this case, it means that the node has got a ROAM_ADV
+ 	 * message and that it knows the new destination in the mesh to re-route
+diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
+index a2976adeeedce..6ff78080ec7fb 100644
+--- a/net/batman-adv/soft-interface.c
++++ b/net/batman-adv/soft-interface.c
+@@ -426,10 +426,10 @@ void batadv_interface_rx(struct net_device *soft_iface,
+ 	struct vlan_ethhdr *vhdr;
+ 	struct ethhdr *ethhdr;
+ 	unsigned short vid;
+-	bool is_bcast;
++	int packet_type;
+ 
+ 	batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data;
+-	is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST);
++	packet_type = batadv_bcast_packet->packet_type;
+ 
+ 	skb_pull_rcsum(skb, hdr_size);
+ 	skb_reset_mac_header(skb);
+@@ -472,7 +472,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
+ 	/* Let the bridge loop avoidance check the packet. If will
+ 	 * not handle it, we can safely push it up.
+ 	 */
+-	if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
++	if (batadv_bla_rx(bat_priv, skb, vid, packet_type))
+ 		goto out;
+ 
+ 	if (orig_node)
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 2b4a7cf03041b..310622086f74b 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -41,12 +41,27 @@
+ 
+ /* Handle HCI Event packets */
+ 
+-static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
++static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
++				  u8 *new_status)
+ {
+ 	__u8 status = *((__u8 *) skb->data);
+ 
+ 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ 
++	/* It is possible that we receive Inquiry Complete event right
++	 * before we receive Inquiry Cancel Command Complete event, in
++	 * which case the latter event should have status of Command
++	 * Disallowed (0x0c). This should not be treated as error, since
++	 * we actually achieve what Inquiry Cancel wants to achieve,
++	 * which is to end the last Inquiry session.
++	 */
++	if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
++		bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
++		status = 0x00;
++	}
++
++	*new_status = status;
++
+ 	if (status)
+ 		return;
+ 
+@@ -3039,7 +3054,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
+ 
+ 	switch (*opcode) {
+ 	case HCI_OP_INQUIRY_CANCEL:
+-		hci_cc_inquiry_cancel(hdev, skb);
++		hci_cc_inquiry_cancel(hdev, skb, status);
+ 		break;
+ 
+ 	case HCI_OP_PERIODIC_INQ:
+@@ -5738,6 +5753,11 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
+ 	u8 status = 0, event = hdr->evt, req_evt = 0;
+ 	u16 opcode = HCI_OP_NOP;
+ 
++	if (!event) {
++		bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
++		goto done;
++	}
++
+ 	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
+ 		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
+ 		opcode = __le16_to_cpu(cmd_hdr->opcode);
+@@ -5949,6 +5969,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
+ 		req_complete_skb(hdev, status, opcode, orig_skb);
+ 	}
+ 
++done:
+ 	kfree_skb(orig_skb);
+ 	kfree_skb(skb);
+ 	hdev->stat.evt_rx++;
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 0d84d1f820d4c..c04107d446016 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -414,6 +414,9 @@ static void l2cap_chan_timeout(struct work_struct *work)
+ 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
+ 
+ 	mutex_lock(&conn->chan_lock);
++	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
++	 * this work. No need to call l2cap_chan_hold(chan) here again.
++	 */
+ 	l2cap_chan_lock(chan);
+ 
+ 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
+@@ -426,12 +429,12 @@ static void l2cap_chan_timeout(struct work_struct *work)
+ 
+ 	l2cap_chan_close(chan, reason);
+ 
+-	l2cap_chan_unlock(chan);
+-
+ 	chan->ops->close(chan);
+-	mutex_unlock(&conn->chan_lock);
+ 
++	l2cap_chan_unlock(chan);
+ 	l2cap_chan_put(chan);
++
++	mutex_unlock(&conn->chan_lock);
+ }
+ 
+ struct l2cap_chan *l2cap_chan_create(void)
+@@ -1725,9 +1728,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
+ 
+ 		l2cap_chan_del(chan, err);
+ 
+-		l2cap_chan_unlock(chan);
+-
+ 		chan->ops->close(chan);
++
++		l2cap_chan_unlock(chan);
+ 		l2cap_chan_put(chan);
+ 	}
+ 
+@@ -4114,7 +4117,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
+ 		return 0;
+ 	}
+ 
+-	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
++	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
++	    chan->state != BT_CONNECTED) {
+ 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
+ 				       chan->dcid);
+ 		goto unlock;
+@@ -4337,6 +4341,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
+ 		return 0;
+ 	}
+ 
++	l2cap_chan_hold(chan);
+ 	l2cap_chan_lock(chan);
+ 
+ 	rsp.dcid = cpu_to_le16(chan->scid);
+@@ -4345,12 +4350,11 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
+ 
+ 	chan->ops->set_shutdown(chan);
+ 
+-	l2cap_chan_hold(chan);
+ 	l2cap_chan_del(chan, ECONNRESET);
+ 
+-	l2cap_chan_unlock(chan);
+-
+ 	chan->ops->close(chan);
++
++	l2cap_chan_unlock(chan);
+ 	l2cap_chan_put(chan);
+ 
+ 	mutex_unlock(&conn->chan_lock);
+@@ -4382,20 +4386,21 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
+ 		return 0;
+ 	}
+ 
++	l2cap_chan_hold(chan);
+ 	l2cap_chan_lock(chan);
+ 
+ 	if (chan->state != BT_DISCONN) {
+ 		l2cap_chan_unlock(chan);
++		l2cap_chan_put(chan);
+ 		mutex_unlock(&conn->chan_lock);
+ 		return 0;
+ 	}
+ 
+-	l2cap_chan_hold(chan);
+ 	l2cap_chan_del(chan, 0);
+ 
+-	l2cap_chan_unlock(chan);
+-
+ 	chan->ops->close(chan);
++
++	l2cap_chan_unlock(chan);
+ 	l2cap_chan_put(chan);
+ 
+ 	mutex_unlock(&conn->chan_lock);
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index a3a2cd55e23a9..5572042f04531 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -1039,7 +1039,7 @@ done:
+ }
+ 
+ /* Kill socket (only if zapped and orphan)
+- * Must be called on unlocked socket.
++ * Must be called on unlocked socket, with l2cap channel lock.
+  */
+ static void l2cap_sock_kill(struct sock *sk)
+ {
+@@ -1190,6 +1190,7 @@ static int l2cap_sock_release(struct socket *sock)
+ {
+ 	struct sock *sk = sock->sk;
+ 	int err;
++	struct l2cap_chan *chan;
+ 
+ 	BT_DBG("sock %p, sk %p", sock, sk);
+ 
+@@ -1199,9 +1200,17 @@ static int l2cap_sock_release(struct socket *sock)
+ 	bt_sock_unlink(&l2cap_sk_list, sk);
+ 
+ 	err = l2cap_sock_shutdown(sock, 2);
++	chan = l2cap_pi(sk)->chan;
++
++	l2cap_chan_hold(chan);
++	l2cap_chan_lock(chan);
+ 
+ 	sock_orphan(sk);
+ 	l2cap_sock_kill(sk);
++
++	l2cap_chan_unlock(chan);
++	l2cap_chan_put(chan);
++
+ 	return err;
+ }
+ 
+@@ -1219,12 +1228,15 @@ static void l2cap_sock_cleanup_listen(struct sock *parent)
+ 		BT_DBG("child chan %p state %s", chan,
+ 		       state_to_string(chan->state));
+ 
++		l2cap_chan_hold(chan);
+ 		l2cap_chan_lock(chan);
++
+ 		__clear_chan_timer(chan);
+ 		l2cap_chan_close(chan, ECONNRESET);
+-		l2cap_chan_unlock(chan);
+-
+ 		l2cap_sock_kill(sk);
++
++		l2cap_chan_unlock(chan);
++		l2cap_chan_put(chan);
+ 	}
+ }
+ 
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 25a2c3186e14a..557bd5cc8f94c 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -5418,8 +5418,6 @@ static int bpf_gen_ld_abs(const struct bpf_insn *orig,
+ 	bool indirect = BPF_MODE(orig->code) == BPF_IND;
+ 	struct bpf_insn *insn = insn_buf;
+ 
+-	/* We're guaranteed here that CTX is in R6. */
+-	*insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
+ 	if (!indirect) {
+ 		*insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm);
+ 	} else {
+@@ -5427,6 +5425,8 @@ static int bpf_gen_ld_abs(const struct bpf_insn *orig,
+ 		if (orig->imm)
+ 			*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm);
+ 	}
++	/* We're guaranteed here that CTX is in R6. */
++	*insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
+ 
+ 	switch (BPF_SIZE(orig->code)) {
+ 	case BPF_B:
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index bf738ec68cb53..6e890f51b7d86 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -2844,6 +2844,7 @@ static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ 		*pos = cpu+1;
+ 		return per_cpu_ptr(tbl->stats, cpu);
+ 	}
++	(*pos)++;
+ 	return NULL;
+ }
+ 
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 84de87b7eedcd..3db428242b22d 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -274,6 +274,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ 		*pos = cpu+1;
+ 		return &per_cpu(rt_cache_stat, cpu);
+ 	}
++	(*pos)++;
+ 	return NULL;
+ 
+ }
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 616ff2970f4fc..4ce3397e6fcf7 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2038,7 +2038,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
+ 
+ 		/* Well, if we have backlog, try to process it now yet. */
+ 
+-		if (copied >= target && !sk->sk_backlog.tail)
++		if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
+ 			break;
+ 
+ 		if (copied) {
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index 05a206202e23d..b924941b96a31 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -2377,14 +2377,13 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ 	struct net *net = seq_file_net(seq);
+ 	struct ipv6_route_iter *iter = seq->private;
+ 
++	++(*pos);
+ 	if (!v)
+ 		goto iter_table;
+ 
+ 	n = rcu_dereference_bh(((struct fib6_info *)v)->fib6_next);
+-	if (n) {
+-		++*pos;
++	if (n)
+ 		return n;
+-	}
+ 
+ iter_table:
+ 	ipv6_route_check_sernum(iter);
+@@ -2392,8 +2391,6 @@ iter_table:
+ 	r = fib6_walk_continue(&iter->w);
+ 	spin_unlock_bh(&iter->tbl->tb6_lock);
+ 	if (r > 0) {
+-		if (v)
+-			++*pos;
+ 		return iter->w.leaf;
+ 	} else if (r < 0) {
+ 		fib6_walker_unlink(net, &iter->w);
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index 6ead3c39f3566..bcba579e292ff 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -785,7 +785,7 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 		}
+ 		/* Well, if we have backlog, try to process it now yet. */
+ 
+-		if (copied >= target && !sk->sk_backlog.tail)
++		if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
+ 			break;
+ 
+ 		if (copied) {
+diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
+index bcd1a5e6ebf42..2f873a0dc5836 100644
+--- a/net/mac802154/tx.c
++++ b/net/mac802154/tx.c
+@@ -42,11 +42,11 @@ void ieee802154_xmit_worker(struct work_struct *work)
+ 	if (res)
+ 		goto err_tx;
+ 
+-	ieee802154_xmit_complete(&local->hw, skb, false);
+-
+ 	dev->stats.tx_packets++;
+ 	dev->stats.tx_bytes += skb->len;
+ 
++	ieee802154_xmit_complete(&local->hw, skb, false);
++
+ 	return;
+ 
+ err_tx:
+@@ -86,6 +86,8 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
+ 
+ 	/* async is priority, otherwise sync is fallback */
+ 	if (local->ops->xmit_async) {
++		unsigned int len = skb->len;
++
+ 		ret = drv_xmit_async(local, skb);
+ 		if (ret) {
+ 			ieee802154_wake_queue(&local->hw);
+@@ -93,7 +95,7 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
+ 		}
+ 
+ 		dev->stats.tx_packets++;
+-		dev->stats.tx_bytes += skb->len;
++		dev->stats.tx_bytes += len;
+ 	} else {
+ 		local->tx_skb = skb;
+ 		queue_work(local->workqueue, &local->tx_work);
+diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
+index c038e021a5916..5ea2471ffc03f 100644
+--- a/net/openvswitch/meter.c
++++ b/net/openvswitch/meter.c
+@@ -255,8 +255,8 @@ static struct dp_meter *dp_meter_create(struct nlattr **a)
+ 		 *
+ 		 * Start with a full bucket.
+ 		 */
+-		band->bucket = (band->burst_size + band->rate) * 1000;
+-		band_max_delta_t = band->bucket / band->rate;
++		band->bucket = (band->burst_size + band->rate) * 1000ULL;
++		band_max_delta_t = div_u64(band->bucket, band->rate);
+ 		if (band_max_delta_t > meter->max_delta_t)
+ 			meter->max_delta_t = band_max_delta_t;
+ 		band++;
+diff --git a/net/openvswitch/meter.h b/net/openvswitch/meter.h
+index 964ace2650f89..970557ed5b5b6 100644
+--- a/net/openvswitch/meter.h
++++ b/net/openvswitch/meter.h
+@@ -26,7 +26,7 @@ struct dp_meter_band {
+ 	u32 type;
+ 	u32 rate;
+ 	u32 burst_size;
+-	u32 bucket; /* 1/1000 packets, or in bits */
++	u64 bucket; /* 1/1000 packets, or in bits */
+ 	struct ovs_flow_stats stats;
+ };
+ 
+diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
+index 7bb8e5603298d..d6e83a37a1adf 100644
+--- a/net/sctp/outqueue.c
++++ b/net/sctp/outqueue.c
+@@ -51,6 +51,7 @@
+ #include <net/sctp/sctp.h>
+ #include <net/sctp/sm.h>
+ #include <net/sctp/stream_sched.h>
++#include <trace/events/sctp.h>
+ 
+ /* Declare internal functions here.  */
+ static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
+@@ -1257,6 +1258,11 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
+ 	/* Grab the association's destination address list. */
+ 	transport_list = &asoc->peer.transport_addr_list;
+ 
++	/* SCTP path tracepoint for congestion control debugging. */
++	list_for_each_entry(transport, transport_list, transports) {
++		trace_sctp_probe_path(transport, asoc);
++	}
++
+ 	sack_ctsn = ntohl(sack->cum_tsn_ack);
+ 	gap_ack_blocks = ntohs(sack->num_gap_ack_blocks);
+ 	asoc->stats.gapcnt += gap_ack_blocks;
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index c8ee8e801edb8..709c082dc9059 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -103,8 +103,17 @@ void svc_unreg_xprt_class(struct svc_xprt_class *xcl)
+ }
+ EXPORT_SYMBOL_GPL(svc_unreg_xprt_class);
+ 
+-/*
+- * Format the transport list for printing
++/**
++ * svc_print_xprts - Format the transport list for printing
++ * @buf: target buffer for formatted address
++ * @maxlen: length of target buffer
++ *
++ * Fills in @buf with a string containing a list of transport names, each name
++ * terminated with '\n'. If the buffer is too small, some entries may be
++ * missing, but it is guaranteed that all lines in the output buffer are
++ * complete.
++ *
++ * Returns positive length of the filled-in string.
+  */
+ int svc_print_xprts(char *buf, int maxlen)
+ {
+@@ -117,9 +126,9 @@ int svc_print_xprts(char *buf, int maxlen)
+ 	list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
+ 		int slen;
+ 
+-		sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload);
+-		slen = strlen(tmpstr);
+-		if (len + slen > maxlen)
++		slen = snprintf(tmpstr, sizeof(tmpstr), "%s %d\n",
++				xcl->xcl_name, xcl->xcl_max_payload);
++		if (slen >= sizeof(tmpstr) || len + slen >= maxlen)
+ 			break;
+ 		len += slen;
+ 		strcat(buf, tmpstr);
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+index b9827665ff355..d183d4aee822c 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+@@ -256,6 +256,7 @@ xprt_rdma_bc_put(struct rpc_xprt *xprt)
+ {
+ 	dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
+ 
++	xprt_rdma_free_addresses(xprt);
+ 	xprt_free(xprt);
+ 	module_put(THIS_MODULE);
+ }
+diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
+index 41f4464ac6cc5..ec9a7137d2677 100644
+--- a/net/tipc/topsrv.c
++++ b/net/tipc/topsrv.c
+@@ -407,7 +407,9 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
+ 		return -EWOULDBLOCK;
+ 	if (ret == sizeof(s)) {
+ 		read_lock_bh(&sk->sk_callback_lock);
+-		ret = tipc_conn_rcv_sub(srv, con, &s);
++		/* RACE: the connection can be closed in the meantime */
++		if (likely(connected(con)))
++			ret = tipc_conn_rcv_sub(srv, con, &s);
+ 		read_unlock_bh(&sk->sk_callback_lock);
+ 		if (!ret)
+ 			return 0;
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 2318e2e2748f4..2020306468af4 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -192,11 +192,17 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk)
+ 	return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
+ }
+ 
+-static inline int unix_recvq_full(struct sock const *sk)
++static inline int unix_recvq_full(const struct sock *sk)
+ {
+ 	return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
+ }
+ 
++static inline int unix_recvq_full_lockless(const struct sock *sk)
++{
++	return skb_queue_len_lockless(&sk->sk_receive_queue) >
++		READ_ONCE(sk->sk_max_ack_backlog);
++}
++
+ struct sock *unix_peer_get(struct sock *s)
+ {
+ 	struct sock *peer;
+@@ -1788,7 +1794,8 @@ restart_locked:
+ 	 * - unix_peer(sk) == sk by time of get but disconnected before lock
+ 	 */
+ 	if (other != sk &&
+-	    unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
++	    unlikely(unix_peer(other) != sk &&
++	    unix_recvq_full_lockless(other))) {
+ 		if (timeo) {
+ 			timeo = unix_wait_for_peer(other, timeo);
+ 
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 452254fd89f87..250b725f5754c 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -3304,6 +3304,9 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
+ 		return dentry_has_perm(current_cred(), dentry, FILE__SETATTR);
+ 	}
+ 
++	if (!selinux_state.initialized)
++		return (inode_owner_or_capable(inode) ? 0 : -EPERM);
++
+ 	sbsec = inode->i_sb->s_security;
+ 	if (!(sbsec->flags & SBLABEL_MNT))
+ 		return -EOPNOTSUPP;
+@@ -3387,6 +3390,15 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name,
+ 		return;
+ 	}
+ 
++	if (!selinux_state.initialized) {
++		/* If we haven't even been initialized, then we can't validate
++		 * against a policy, so leave the label as invalid. It may
++		 * resolve to a valid label on the next revalidation try if
++		 * we've since initialized.
++		 */
++		return;
++	}
++
+ 	rc = security_context_to_sid_force(&selinux_state, value, size,
+ 					   &newsid);
+ 	if (rc) {
+diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
+index f3a5a138a096d..60b3f16bb5c7b 100644
+--- a/security/selinux/selinuxfs.c
++++ b/security/selinux/selinuxfs.c
+@@ -1509,6 +1509,7 @@ static struct avc_cache_stats *sel_avc_get_stat_idx(loff_t *idx)
+ 		*idx = cpu + 1;
+ 		return &per_cpu(avc_cache_stats, cpu);
+ 	}
++	(*idx)++;
+ 	return NULL;
+ }
+ 
+diff --git a/sound/hda/hdac_bus.c b/sound/hda/hdac_bus.c
+index 714a51721a313..ab9236e4c157e 100644
+--- a/sound/hda/hdac_bus.c
++++ b/sound/hda/hdac_bus.c
+@@ -155,6 +155,7 @@ static void process_unsol_events(struct work_struct *work)
+ 	struct hdac_driver *drv;
+ 	unsigned int rp, caddr, res;
+ 
++	spin_lock_irq(&bus->reg_lock);
+ 	while (bus->unsol_rp != bus->unsol_wp) {
+ 		rp = (bus->unsol_rp + 1) % HDA_UNSOL_QUEUE_SIZE;
+ 		bus->unsol_rp = rp;
+@@ -166,10 +167,13 @@ static void process_unsol_events(struct work_struct *work)
+ 		codec = bus->caddr_tbl[caddr & 0x0f];
+ 		if (!codec || !codec->dev.driver)
+ 			continue;
++		spin_unlock_irq(&bus->reg_lock);
+ 		drv = drv_to_hdac_driver(codec->dev.driver);
+ 		if (drv->unsol_event)
+ 			drv->unsol_event(codec, res);
++		spin_lock_irq(&bus->reg_lock);
+ 	}
++	spin_unlock_irq(&bus->reg_lock);
+ }
+ 
+ /**
+diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
+index 7d049569012c1..3f06986fbecf8 100644
+--- a/sound/pci/asihpi/hpioctl.c
++++ b/sound/pci/asihpi/hpioctl.c
+@@ -350,7 +350,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev,
+ 	struct hpi_message hm;
+ 	struct hpi_response hr;
+ 	struct hpi_adapter adapter;
+-	struct hpi_pci pci;
++	struct hpi_pci pci = { 0 };
+ 
+ 	memset(&adapter, 0, sizeof(adapter));
+ 
+@@ -506,7 +506,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev,
+ 	return 0;
+ 
+ err:
+-	for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) {
++	while (--idx >= 0) {
+ 		if (pci.ap_mem_base[idx]) {
+ 			iounmap(pci.ap_mem_base[idx]);
+ 			pci.ap_mem_base[idx] = NULL;
+diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
+index fa261b27d8588..8198d2e53b7df 100644
+--- a/sound/pci/hda/hda_controller.c
++++ b/sound/pci/hda/hda_controller.c
+@@ -1169,16 +1169,23 @@ irqreturn_t azx_interrupt(int irq, void *dev_id)
+ 		if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
+ 			active = true;
+ 
+-		/* clear rirb int */
+ 		status = azx_readb(chip, RIRBSTS);
+ 		if (status & RIRB_INT_MASK) {
++			/*
++			 * Clearing the interrupt status here ensures that no
++			 * interrupt gets masked after the RIRB wp is read in
++			 * snd_hdac_bus_update_rirb. This avoids a possible
++			 * race condition where codec response in RIRB may
++			 * remain unserviced by IRQ, eventually falling back
++			 * to polling mode in azx_rirb_get_response.
++			 */
++			azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
+ 			active = true;
+ 			if (status & RIRB_INT_RESPONSE) {
+ 				if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
+ 					udelay(80);
+ 				snd_hdac_bus_update_rirb(bus);
+ 			}
+-			azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
+ 		}
+ 	} while (active && ++repeat < 10);
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 9c5b3d19bfa73..24bc9e4460473 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3290,7 +3290,11 @@ static void alc256_shutup(struct hda_codec *codec)
+ 
+ 	/* 3k pull low control for Headset jack. */
+ 	/* NOTE: call this before clearing the pin, otherwise codec stalls */
+-	alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
++	/* If disable 3k pulldown control for alc257, the Mic detection will not work correctly
++	 * when booting with headset plugged. So skip setting it for the codec alc257
++	 */
++	if (codec->core.vendor_id != 0x10ec0257)
++		alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
+ 
+ 	if (!spec->no_shutup_pins)
+ 		snd_hda_codec_write(codec, hp_pin, 0,
+@@ -5612,6 +5616,7 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
+ #include "hp_x360_helper.c"
+ 
+ enum {
++	ALC269_FIXUP_GPIO2,
+ 	ALC269_FIXUP_SONY_VAIO,
+ 	ALC275_FIXUP_SONY_VAIO_GPIO2,
+ 	ALC269_FIXUP_DELL_M101Z,
+@@ -5764,6 +5769,10 @@ enum {
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
++	[ALC269_FIXUP_GPIO2] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_gpio2,
++	},
+ 	[ALC269_FIXUP_SONY_VAIO] = {
+ 		.type = HDA_FIXUP_PINCTLS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+@@ -6559,6 +6568,8 @@ static const struct hda_fixup alc269_fixups[] = {
+ 	[ALC233_FIXUP_LENOVO_MULTI_CODECS] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc233_alc662_fixup_lenovo_dual_codecs,
++		.chained = true,
++		.chain_id = ALC269_FIXUP_GPIO2
+ 	},
+ 	[ALC233_FIXUP_ACER_HEADSET_MIC] = {
+ 		.type = HDA_FIXUP_VERBS,
+diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
+index 89b6e187ac235..a5b0c40ee545f 100644
+--- a/sound/soc/codecs/max98090.c
++++ b/sound/soc/codecs/max98090.c
+@@ -2130,10 +2130,16 @@ static void max98090_pll_work(struct max98090_priv *max98090)
+ 
+ 	dev_info_ratelimited(component->dev, "PLL unlocked\n");
+ 
++	/*
++	 * As the datasheet suggested, the maximum PLL lock time should be
++	 * 7 msec.  The workaround resets the codec softly by toggling SHDN
++	 * off and on if PLL failed to lock for 10 msec.  Notably, there is
++	 * no suggested hold time for SHDN off.
++	 */
++
+ 	/* Toggle shutdown OFF then ON */
+ 	snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN,
+ 			    M98090_SHDNN_MASK, 0);
+-	msleep(10);
+ 	snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN,
+ 			    M98090_SHDNN_MASK, M98090_SHDNN_MASK);
+ 
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index 01acb8da2f48e..e3e069277a3ff 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -3376,6 +3376,8 @@ int wm8994_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
+ 		return -EINVAL;
+ 	}
+ 
++	pm_runtime_get_sync(component->dev);
++
+ 	switch (micbias) {
+ 	case 1:
+ 		micdet = &wm8994->micdet[0];
+@@ -3423,6 +3425,8 @@ int wm8994_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
+ 
+ 	snd_soc_dapm_sync(dapm);
+ 
++	pm_runtime_put(component->dev);
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(wm8994_mic_detect);
+@@ -3790,6 +3794,8 @@ int wm8958_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
+ 		return -EINVAL;
+ 	}
+ 
++	pm_runtime_get_sync(component->dev);
++
+ 	if (jack) {
+ 		snd_soc_dapm_force_enable_pin(dapm, "CLK_SYS");
+ 		snd_soc_dapm_sync(dapm);
+@@ -3858,6 +3864,8 @@ int wm8958_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
+ 		snd_soc_dapm_sync(dapm);
+ 	}
+ 
++	pm_runtime_put(component->dev);
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(wm8958_mic_detect);
+@@ -4051,11 +4059,13 @@ static int wm8994_component_probe(struct snd_soc_component *component)
+ 			wm8994->hubs.dcs_readback_mode = 2;
+ 			break;
+ 		}
++		wm8994->hubs.micd_scthr = true;
+ 		break;
+ 
+ 	case WM8958:
+ 		wm8994->hubs.dcs_readback_mode = 1;
+ 		wm8994->hubs.hp_startup_mode = 1;
++		wm8994->hubs.micd_scthr = true;
+ 
+ 		switch (control->revision) {
+ 		case 0:
+diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
+index fed6ea9b019f7..da7fa6f5459e6 100644
+--- a/sound/soc/codecs/wm_hubs.c
++++ b/sound/soc/codecs/wm_hubs.c
+@@ -1227,6 +1227,9 @@ int wm_hubs_handle_analogue_pdata(struct snd_soc_component *component,
+ 		snd_soc_component_update_bits(component, WM8993_ADDITIONAL_CONTROL,
+ 				    WM8993_LINEOUT2_FB, WM8993_LINEOUT2_FB);
+ 
++	if (!hubs->micd_scthr)
++		return 0;
++
+ 	snd_soc_component_update_bits(component, WM8993_MICBIAS,
+ 			    WM8993_JD_SCTHR_MASK | WM8993_JD_THR_MASK |
+ 			    WM8993_MICB1_LVL | WM8993_MICB2_LVL,
+diff --git a/sound/soc/codecs/wm_hubs.h b/sound/soc/codecs/wm_hubs.h
+index ee339ad8514d1..1433d73e09bf8 100644
+--- a/sound/soc/codecs/wm_hubs.h
++++ b/sound/soc/codecs/wm_hubs.h
+@@ -31,6 +31,7 @@ struct wm_hubs_data {
+ 	int hp_startup_mode;
+ 	int series_startup;
+ 	int no_series_update;
++	bool micd_scthr;
+ 
+ 	bool no_cache_dac_hp_direct;
+ 	struct list_head dcs_cache;
+diff --git a/sound/soc/img/img-i2s-out.c b/sound/soc/img/img-i2s-out.c
+index fc2d1dac63339..798ab579564cb 100644
+--- a/sound/soc/img/img-i2s-out.c
++++ b/sound/soc/img/img-i2s-out.c
+@@ -350,8 +350,10 @@ static int img_i2s_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+ 	chan_control_mask = IMG_I2S_OUT_CHAN_CTL_CLKT_MASK;
+ 
+ 	ret = pm_runtime_get_sync(i2s->dev);
+-	if (ret < 0)
++	if (ret < 0) {
++		pm_runtime_put_noidle(i2s->dev);
+ 		return ret;
++	}
+ 
+ 	img_i2s_out_disable(i2s);
+ 
+@@ -491,8 +493,10 @@ static int img_i2s_out_probe(struct platform_device *pdev)
+ 			goto err_pm_disable;
+ 	}
+ 	ret = pm_runtime_get_sync(&pdev->dev);
+-	if (ret < 0)
++	if (ret < 0) {
++		pm_runtime_put_noidle(&pdev->dev);
+ 		goto err_suspend;
++	}
+ 
+ 	reg = IMG_I2S_OUT_CTL_FRM_SIZE_MASK;
+ 	img_i2s_out_writel(i2s, reg, IMG_I2S_OUT_CTL);
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index 0dcd249877c55..ec630127ef2f3 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -588,6 +588,16 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ 					BYT_RT5640_SSP0_AIF1 |
+ 					BYT_RT5640_MCLK_EN),
+ 	},
++	{	/* MPMAN Converter 9, similar hw as the I.T.Works TW891 2-in-1 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "MPMAN"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Converter9"),
++		},
++		.driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
++					BYT_RT5640_MONO_SPEAKER |
++					BYT_RT5640_SSP0_AIF1 |
++					BYT_RT5640_MCLK_EN),
++	},
+ 	{
+ 		/* MPMAN MPWIN895CL */
+ 		.matches = {
+diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
+index c6a58520d377a..255cc45905b81 100644
+--- a/sound/soc/kirkwood/kirkwood-dma.c
++++ b/sound/soc/kirkwood/kirkwood-dma.c
+@@ -136,7 +136,7 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
+ 		err = request_irq(priv->irq, kirkwood_dma_irq, IRQF_SHARED,
+ 				  "kirkwood-i2s", priv);
+ 		if (err)
+-			return -EBUSY;
++			return err;
+ 
+ 		/*
+ 		 * Enable Error interrupts. We're only ack'ing them but
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index 28a3ad8b1d74b..137e1e8718d6f 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -1828,6 +1828,28 @@ static int snd_usbmidi_create_endpoints(struct snd_usb_midi *umidi,
+ 	return 0;
+ }
+ 
++static struct usb_ms_endpoint_descriptor *find_usb_ms_endpoint_descriptor(
++					struct usb_host_endpoint *hostep)
++{
++	unsigned char *extra = hostep->extra;
++	int extralen = hostep->extralen;
++
++	while (extralen > 3) {
++		struct usb_ms_endpoint_descriptor *ms_ep =
++				(struct usb_ms_endpoint_descriptor *)extra;
++
++		if (ms_ep->bLength > 3 &&
++		    ms_ep->bDescriptorType == USB_DT_CS_ENDPOINT &&
++		    ms_ep->bDescriptorSubtype == UAC_MS_GENERAL)
++			return ms_ep;
++		if (!extra[0])
++			break;
++		extralen -= extra[0];
++		extra += extra[0];
++	}
++	return NULL;
++}
++
+ /*
+  * Returns MIDIStreaming device capabilities.
+  */
+@@ -1865,11 +1887,8 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi,
+ 		ep = get_ep_desc(hostep);
+ 		if (!usb_endpoint_xfer_bulk(ep) && !usb_endpoint_xfer_int(ep))
+ 			continue;
+-		ms_ep = (struct usb_ms_endpoint_descriptor *)hostep->extra;
+-		if (hostep->extralen < 4 ||
+-		    ms_ep->bLength < 4 ||
+-		    ms_ep->bDescriptorType != USB_DT_CS_ENDPOINT ||
+-		    ms_ep->bDescriptorSubtype != UAC_MS_GENERAL)
++		ms_ep = find_usb_ms_endpoint_descriptor(hostep);
++		if (!ms_ep)
+ 			continue;
+ 		if (usb_endpoint_dir_out(ep)) {
+ 			if (endpoints[epidx].out_ep) {
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 45bd3d54be54b..451b8ea383c61 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -1699,6 +1699,16 @@ static void __build_feature_ctl(struct usb_mixer_interface *mixer,
+ 	/* get min/max values */
+ 	get_min_max_with_quirks(cval, 0, kctl);
+ 
++	/* skip a bogus volume range */
++	if (cval->max <= cval->min) {
++		usb_audio_dbg(mixer->chip,
++			      "[%d] FU [%s] skipped due to invalid volume\n",
++			      cval->head.id, kctl->id.name);
++		snd_ctl_free_one(kctl);
++		return;
++	}
++
++
+ 	if (control == UAC_FU_VOLUME) {
+ 		check_mapped_dB(map, cval);
+ 		if (cval->dBmin < cval->dBmax || !cval->initialized) {
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 8d9117312e30c..e6dea1c7112be 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1338,12 +1338,13 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
+ 	    && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ 		msleep(20);
+ 
+-	/* Zoom R16/24, Logitech H650e, Jabra 550a, Kingston HyperX needs a tiny
+-	 * delay here, otherwise requests like get/set frequency return as
+-	 * failed despite actually succeeding.
++	/* Zoom R16/24, Logitech H650e/H570e, Jabra 550a, Kingston HyperX
++	 *  needs a tiny delay here, otherwise requests like get/set
++	 *  frequency return as failed despite actually succeeding.
+ 	 */
+ 	if ((chip->usb_id == USB_ID(0x1686, 0x00dd) ||
+ 	     chip->usb_id == USB_ID(0x046d, 0x0a46) ||
++	     chip->usb_id == USB_ID(0x046d, 0x0a56) ||
+ 	     chip->usb_id == USB_ID(0x0b0e, 0x0349) ||
+ 	     chip->usb_id == USB_ID(0x0951, 0x16ad)) &&
+ 	    (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+diff --git a/tools/gpio/gpio-hammer.c b/tools/gpio/gpio-hammer.c
+index 4bcb234c0fcab..3da5462a0c7d3 100644
+--- a/tools/gpio/gpio-hammer.c
++++ b/tools/gpio/gpio-hammer.c
+@@ -138,7 +138,14 @@ int main(int argc, char **argv)
+ 			device_name = optarg;
+ 			break;
+ 		case 'o':
+-			lines[i] = strtoul(optarg, NULL, 10);
++			/*
++			 * Avoid overflow. Do not immediately error, we want to
++			 * be able to accurately report on the amount of times
++			 * '-o' was given to give an accurate error message
++			 */
++			if (i < GPIOHANDLES_MAX)
++				lines[i] = strtoul(optarg, NULL, 10);
++
+ 			i++;
+ 			break;
+ 		case '?':
+@@ -146,6 +153,14 @@ int main(int argc, char **argv)
+ 			return -1;
+ 		}
+ 	}
++
++	if (i >= GPIOHANDLES_MAX) {
++		fprintf(stderr,
++			"Only %d occurences of '-o' are allowed, %d were found\n",
++			GPIOHANDLES_MAX, i + 1);
++		return -1;
++	}
++
+ 	nlines = i;
+ 
+ 	if (!device_name || !nlines) {
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index fd3071d83deae..c0ab27368a345 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -503,7 +503,7 @@ static int add_jump_destinations(struct objtool_file *file)
+ 		    insn->type != INSN_JUMP_UNCONDITIONAL)
+ 			continue;
+ 
+-		if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET)
++		if (insn->offset == FAKE_JUMP_OFFSET)
+ 			continue;
+ 
+ 		rela = find_rela_by_dest_range(insn->sec, insn->offset,
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index 6aae10ff954c7..adabe9d4dc866 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -422,7 +422,7 @@ static void process_interval(void)
+ 	}
+ 
+ 	init_stats(&walltime_nsecs_stats);
+-	update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000);
++	update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL);
+ 	print_counters(&rs, 0, NULL);
+ }
+ 
+diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
+index c17e594041712..6631970f96832 100644
+--- a/tools/perf/pmu-events/jevents.c
++++ b/tools/perf/pmu-events/jevents.c
+@@ -1064,10 +1064,9 @@ static int process_one_file(const char *fpath, const struct stat *sb,
+  */
+ int main(int argc, char *argv[])
+ {
+-	int rc;
++	int rc, ret = 0;
+ 	int maxfds;
+ 	char ldirname[PATH_MAX];
+-
+ 	const char *arch;
+ 	const char *output_file;
+ 	const char *start_dirname;
+@@ -1138,7 +1137,8 @@ int main(int argc, char *argv[])
+ 		/* Make build fail */
+ 		fclose(eventsfp);
+ 		free_arch_std_events();
+-		return 1;
++		ret = 1;
++		goto out_free_mapfile;
+ 	} else if (rc) {
+ 		goto empty_map;
+ 	}
+@@ -1156,14 +1156,17 @@ int main(int argc, char *argv[])
+ 		/* Make build fail */
+ 		fclose(eventsfp);
+ 		free_arch_std_events();
+-		return 1;
++		ret = 1;
+ 	}
+ 
+-	return 0;
++
++	goto out_free_mapfile;
+ 
+ empty_map:
+ 	fclose(eventsfp);
+ 	create_empty_mapping(output_file);
+ 	free_arch_std_events();
+-	return 0;
++out_free_mapfile:
++	free(mapfile);
++	return ret;
+ }
+diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
+index 7cb99b433888b..c2cc42daf9242 100644
+--- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh
++++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
+@@ -14,7 +14,7 @@ add_probe_vfs_getname() {
+ 	if [ $had_vfs_getname -eq 1 ] ; then
+ 		line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
+ 		perf probe -q       "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
+-		perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string"
++		perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring"
+ 	fi
+ }
+ 
+diff --git a/tools/perf/trace/beauty/arch_errno_names.sh b/tools/perf/trace/beauty/arch_errno_names.sh
+index 22c9fc900c847..f8c44a85650be 100755
+--- a/tools/perf/trace/beauty/arch_errno_names.sh
++++ b/tools/perf/trace/beauty/arch_errno_names.sh
+@@ -91,7 +91,7 @@ EoHEADER
+ # in tools/perf/arch
+ archlist=""
+ for arch in $(find $toolsdir/arch -maxdepth 1 -mindepth 1 -type d -printf "%f\n" | grep -v x86 | sort); do
+-	test -d arch/$arch && archlist="$archlist $arch"
++	test -d $toolsdir/perf/arch/$arch && archlist="$archlist $arch"
+ done
+ 
+ for arch in x86 $archlist generic; do
+diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
+index f93846edc1e0d..827d844f4efb1 100644
+--- a/tools/perf/util/cpumap.c
++++ b/tools/perf/util/cpumap.c
+@@ -462,7 +462,7 @@ static void set_max_cpu_num(void)
+ 
+ 	/* get the highest possible cpu number for a sparse allocation */
+ 	ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
+-	if (ret == PATH_MAX) {
++	if (ret >= PATH_MAX) {
+ 		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
+ 		goto out;
+ 	}
+@@ -473,7 +473,7 @@ static void set_max_cpu_num(void)
+ 
+ 	/* get the highest present cpu number for a sparse allocation */
+ 	ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt);
+-	if (ret == PATH_MAX) {
++	if (ret >= PATH_MAX) {
+ 		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
+ 		goto out;
+ 	}
+@@ -501,7 +501,7 @@ static void set_max_node_num(void)
+ 
+ 	/* get the highest possible cpu number for a sparse allocation */
+ 	ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
+-	if (ret == PATH_MAX) {
++	if (ret >= PATH_MAX) {
+ 		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
+ 		goto out;
+ 	}
+@@ -586,7 +586,7 @@ int cpu__setup_cpunode_map(void)
+ 		return 0;
+ 
+ 	n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
+-	if (n == PATH_MAX) {
++	if (n >= PATH_MAX) {
+ 		pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
+ 		return -1;
+ 	}
+@@ -601,7 +601,7 @@ int cpu__setup_cpunode_map(void)
+ 			continue;
+ 
+ 		n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
+-		if (n == PATH_MAX) {
++		if (n >= PATH_MAX) {
+ 			pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
+ 			continue;
+ 		}
+diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
+index 4fad92213609f..11a2aa80802d5 100644
+--- a/tools/perf/util/evsel.c
++++ b/tools/perf/util/evsel.c
+@@ -1290,6 +1290,9 @@ void perf_evsel__exit(struct perf_evsel *evsel)
+ 	thread_map__put(evsel->threads);
+ 	zfree(&evsel->group_name);
+ 	zfree(&evsel->name);
++	zfree(&evsel->pmu_name);
++	zfree(&evsel->per_pkg_mask);
++	zfree(&evsel->metric_events);
+ 	perf_evsel__object.fini(evsel);
+ }
+ 
+diff --git a/tools/perf/util/mem2node.c b/tools/perf/util/mem2node.c
+index c6fd81c025863..81c5a2e438b7d 100644
+--- a/tools/perf/util/mem2node.c
++++ b/tools/perf/util/mem2node.c
+@@ -1,5 +1,6 @@
+ #include <errno.h>
+ #include <inttypes.h>
++#include <asm/bug.h>
+ #include <linux/bitmap.h>
+ #include "mem2node.h"
+ #include "util.h"
+@@ -92,7 +93,7 @@ int mem2node__init(struct mem2node *map, struct perf_env *env)
+ 
+ 	/* Cut unused entries, due to merging. */
+ 	tmp_entries = realloc(entries, sizeof(*entries) * j);
+-	if (tmp_entries)
++	if (tmp_entries || WARN_ON_ONCE(j == 0))
+ 		entries = tmp_entries;
+ 
+ 	for (i = 0; i < j; i++) {
+diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
+index 8b3dafe3fac3a..6dcc6e1182a54 100644
+--- a/tools/perf/util/metricgroup.c
++++ b/tools/perf/util/metricgroup.c
+@@ -171,6 +171,7 @@ static int metricgroup__setup_events(struct list_head *groups,
+ 		if (!evsel) {
+ 			pr_debug("Cannot resolve %s: %s\n",
+ 					eg->metric_name, eg->metric_expr);
++			free(metric_events);
+ 			continue;
+ 		}
+ 		for (i = 0; i < eg->idnum; i++)
+@@ -178,11 +179,13 @@ static int metricgroup__setup_events(struct list_head *groups,
+ 		me = metricgroup__lookup(metric_events_list, evsel, true);
+ 		if (!me) {
+ 			ret = -ENOMEM;
++			free(metric_events);
+ 			break;
+ 		}
+ 		expr = malloc(sizeof(struct metric_expr));
+ 		if (!expr) {
+ 			ret = -ENOMEM;
++			free(metric_events);
+ 			break;
+ 		}
+ 		expr->metric_expr = eg->metric_expr;
+diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
+index 95043cae57740..0eff0c3ba9eeb 100644
+--- a/tools/perf/util/parse-events.c
++++ b/tools/perf/util/parse-events.c
+@@ -1261,7 +1261,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
+ 		attr.type = pmu->type;
+ 		evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, auto_merge_stats);
+ 		if (evsel) {
+-			evsel->pmu_name = name;
++			evsel->pmu_name = name ? strdup(name) : NULL;
+ 			evsel->use_uncore_alias = use_uncore_alias;
+ 			return 0;
+ 		} else {
+@@ -1302,7 +1302,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
+ 		evsel->snapshot = info.snapshot;
+ 		evsel->metric_expr = info.metric_expr;
+ 		evsel->metric_name = info.metric_name;
+-		evsel->pmu_name = name;
++		evsel->pmu_name = name ? strdup(name) : NULL;
+ 		evsel->use_uncore_alias = use_uncore_alias;
+ 	}
+ 
+@@ -1421,12 +1421,11 @@ parse_events__set_leader_for_uncore_aliase(char *name, struct list_head *list,
+ 		 * event. That can be used to distinguish the leader from
+ 		 * other members, even they have the same event name.
+ 		 */
+-		if ((leader != evsel) && (leader->pmu_name == evsel->pmu_name)) {
++		if ((leader != evsel) &&
++		    !strcmp(leader->pmu_name, evsel->pmu_name)) {
+ 			is_leader = false;
+ 			continue;
+ 		}
+-		/* The name is always alias name */
+-		WARN_ON(strcmp(leader->name, evsel->name));
+ 
+ 		/* Store the leader event for each PMU */
+ 		leaders[nr_pmu++] = (uintptr_t) evsel;
+diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
+index 46daa22b86e3b..85ff4f68adc00 100644
+--- a/tools/perf/util/sort.c
++++ b/tools/perf/util/sort.c
+@@ -2690,7 +2690,7 @@ static char *prefix_if_not_in(const char *pre, char *str)
+ 		return str;
+ 
+ 	if (asprintf(&n, "%s,%s", pre, str) < 0)
+-		return NULL;
++		n = NULL;
+ 
+ 	free(str);
+ 	return n;
+diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
+index a701a8a48f005..166c621e02235 100644
+--- a/tools/perf/util/symbol-elf.c
++++ b/tools/perf/util/symbol-elf.c
+@@ -1421,6 +1421,7 @@ struct kcore_copy_info {
+ 	u64 first_symbol;
+ 	u64 last_symbol;
+ 	u64 first_module;
++	u64 first_module_symbol;
+ 	u64 last_module_symbol;
+ 	size_t phnum;
+ 	struct list_head phdrs;
+@@ -1497,6 +1498,8 @@ static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
+ 		return 0;
+ 
+ 	if (strchr(name, '[')) {
++		if (!kci->first_module_symbol || start < kci->first_module_symbol)
++			kci->first_module_symbol = start;
+ 		if (start > kci->last_module_symbol)
+ 			kci->last_module_symbol = start;
+ 		return 0;
+@@ -1694,6 +1697,10 @@ static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
+ 		kci->etext += page_size;
+ 	}
+ 
++	if (kci->first_module_symbol &&
++	    (!kci->first_module || kci->first_module_symbol < kci->first_module))
++		kci->first_module = kci->first_module_symbol;
++
+ 	kci->first_module = round_down(kci->first_module, page_size);
+ 
+ 	if (kci->last_module_symbol) {
+diff --git a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
+index 2fa3c5757bcb5..dbed3d213bf17 100755
+--- a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
++++ b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
+@@ -10,11 +10,11 @@ then this utility enables and collects trace data for a user specified interval
+ and generates performance plots.
+ 
+ Prerequisites:
+-    Python version 2.7.x
++    Python version 2.7.x or higher
+     gnuplot 5.0 or higher
+-    gnuplot-py 1.8
++    gnuplot-py 1.8 or higher
+     (Most of the distributions have these required packages. They may be called
+-     gnuplot-py, phython-gnuplot. )
++     gnuplot-py, phython-gnuplot or phython3-gnuplot, gnuplot-nox, ... )
+ 
+     HWP (Hardware P-States are disabled)
+     Kernel config for Linux trace is enabled
+@@ -180,7 +180,7 @@ def plot_pstate_cpu_with_sample():
+         g_plot('set xlabel "Samples"')
+         g_plot('set ylabel "P-State"')
+         g_plot('set title "{} : cpu pstate vs. sample : {:%F %H:%M}"'.format(testname, datetime.now()))
+-        title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
++        title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
+         plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_SAMPLE, C_TO)
+         g_plot('title_list = "{}"'.format(title_list))
+         g_plot(plot_str)
+@@ -197,7 +197,7 @@ def plot_pstate_cpu():
+ #    the following command is really cool, but doesn't work with the CPU masking option because it aborts on the first missing file.
+ #    plot_str = 'plot for [i=0:*] file=sprintf("cpu%03d.csv",i) title_s=sprintf("cpu%03d",i) file using 16:7 pt 7 ps 1 title title_s'
+ #
+-    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
++    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
+     plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_TO)
+     g_plot('title_list = "{}"'.format(title_list))
+     g_plot(plot_str)
+@@ -211,7 +211,7 @@ def plot_load_cpu():
+     g_plot('set ylabel "CPU load (percent)"')
+     g_plot('set title "{} : cpu loads : {:%F %H:%M}"'.format(testname, datetime.now()))
+ 
+-    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
++    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
+     plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_LOAD)
+     g_plot('title_list = "{}"'.format(title_list))
+     g_plot(plot_str)
+@@ -225,7 +225,7 @@ def plot_frequency_cpu():
+     g_plot('set ylabel "CPU Frequency (GHz)"')
+     g_plot('set title "{} : cpu frequencies : {:%F %H:%M}"'.format(testname, datetime.now()))
+ 
+-    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
++    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
+     plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_FREQ)
+     g_plot('title_list = "{}"'.format(title_list))
+     g_plot(plot_str)
+@@ -240,7 +240,7 @@ def plot_duration_cpu():
+     g_plot('set ylabel "Timer Duration (MilliSeconds)"')
+     g_plot('set title "{} : cpu durations : {:%F %H:%M}"'.format(testname, datetime.now()))
+ 
+-    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
++    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
+     plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_DURATION)
+     g_plot('title_list = "{}"'.format(title_list))
+     g_plot(plot_str)
+@@ -254,7 +254,7 @@ def plot_scaled_cpu():
+     g_plot('set ylabel "Scaled Busy (Unitless)"')
+     g_plot('set title "{} : cpu scaled busy : {:%F %H:%M}"'.format(testname, datetime.now()))
+ 
+-    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
++    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
+     plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_SCALED)
+     g_plot('title_list = "{}"'.format(title_list))
+     g_plot(plot_str)
+@@ -268,7 +268,7 @@ def plot_boost_cpu():
+     g_plot('set ylabel "CPU IO Boost (percent)"')
+     g_plot('set title "{} : cpu io boost : {:%F %H:%M}"'.format(testname, datetime.now()))
+ 
+-    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
++    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
+     plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_BOOST)
+     g_plot('title_list = "{}"'.format(title_list))
+     g_plot(plot_str)
+@@ -282,7 +282,7 @@ def plot_ghz_cpu():
+     g_plot('set ylabel "TSC Frequency (GHz)"')
+     g_plot('set title "{} : cpu TSC Frequencies (Sanity check calculation) : {:%F %H:%M}"'.format(testname, datetime.now()))
+ 
+-    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
++    title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
+     plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_GHZ)
+     g_plot('title_list = "{}"'.format(title_list))
+     g_plot(plot_str)
+diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
+index 27a54a17da65d..f4e92afab14b2 100644
+--- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
++++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
+@@ -30,7 +30,7 @@ ftrace_filter_check '*schedule*' '^.*schedule.*$'
+ ftrace_filter_check 'schedule*' '^schedule.*$'
+ 
+ # filter by *mid*end
+-ftrace_filter_check '*aw*lock' '.*aw.*lock$'
++ftrace_filter_check '*pin*lock' '.*pin.*lock$'
+ 
+ # filter by start*mid*
+ ftrace_filter_check 'mutex*try*' '^mutex.*try.*'
+diff --git a/tools/testing/selftests/x86/syscall_nt.c b/tools/testing/selftests/x86/syscall_nt.c
+index 43fcab367fb0a..74e6b3fc2d09e 100644
+--- a/tools/testing/selftests/x86/syscall_nt.c
++++ b/tools/testing/selftests/x86/syscall_nt.c
+@@ -67,6 +67,7 @@ static void do_it(unsigned long extraflags)
+ 	set_eflags(get_eflags() | extraflags);
+ 	syscall(SYS_getpid);
+ 	flags = get_eflags();
++	set_eflags(X86_EFLAGS_IF | X86_EFLAGS_FIXED);
+ 	if ((flags & extraflags) == extraflags) {
+ 		printf("[OK]\tThe syscall worked and flags are still set\n");
+ 	} else {
+diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
+index 878e0edb2e1b7..ff0a1c6083718 100644
+--- a/virt/kvm/arm/mmio.c
++++ b/virt/kvm/arm/mmio.c
+@@ -142,7 +142,7 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
+ 	bool sign_extend;
+ 	bool sixty_four;
+ 
+-	if (kvm_vcpu_dabt_iss1tw(vcpu)) {
++	if (kvm_vcpu_abt_iss1tw(vcpu)) {
+ 		/* page table accesses IO mem: tell guest to fix its TTBR */
+ 		kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
+ 		return 1;
+diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
+index 41d6285c3da99..787f7329d1b7f 100644
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -1282,6 +1282,9 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
+ 
+ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
+ {
++	if (kvm_vcpu_abt_iss1tw(vcpu))
++		return true;
++
+ 	if (kvm_vcpu_trap_is_iabt(vcpu))
+ 		return false;
+ 
+@@ -1496,7 +1499,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ 	unsigned long flags = 0;
+ 
+ 	write_fault = kvm_is_write_fault(vcpu);
+-	exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
++	exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
+ 	VM_BUG_ON(write_fault && exec_fault);
+ 
+ 	if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
+diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
+index cd75df25fe140..2fc1777da50d2 100644
+--- a/virt/kvm/arm/vgic/vgic-init.c
++++ b/virt/kvm/arm/vgic/vgic-init.c
+@@ -187,6 +187,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
+ 			break;
+ 		default:
+ 			kfree(dist->spis);
++			dist->spis = NULL;
+ 			return -EINVAL;
+ 		}
+ 	}
+diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
+index 9295addea7ecf..f139b1c62ca38 100644
+--- a/virt/kvm/arm/vgic/vgic-its.c
++++ b/virt/kvm/arm/vgic/vgic-its.c
+@@ -107,14 +107,21 @@ out_unlock:
+ 	 * We "cache" the configuration table entries in our struct vgic_irq's.
+ 	 * However we only have those structs for mapped IRQs, so we read in
+ 	 * the respective config data from memory here upon mapping the LPI.
++	 *
++	 * Should any of these fail, behave as if we couldn't create the LPI
++	 * by dropping the refcount and returning the error.
+ 	 */
+ 	ret = update_lpi_config(kvm, irq, NULL, false);
+-	if (ret)
++	if (ret) {
++		vgic_put_irq(kvm, irq);
+ 		return ERR_PTR(ret);
++	}
+ 
+ 	ret = vgic_v3_lpi_sync_pending_status(kvm, irq);
+-	if (ret)
++	if (ret) {
++		vgic_put_irq(kvm, irq);
+ 		return ERR_PTR(ret);
++	}
+ 
+ 	return irq;
+ }
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 6bd01d12df2ec..9312c7e750ed3 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -169,6 +169,7 @@ bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
+ 	 */
+ 	if (pfn_valid(pfn))
+ 		return PageReserved(pfn_to_page(pfn)) &&
++		       !is_zero_pfn(pfn) &&
+ 		       !kvm_is_zone_device_pfn(pfn);
+ 
+ 	return true;


             reply	other threads:[~2020-10-01 12:45 UTC|newest]

Thread overview: 332+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-01 12:45 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2024-04-18  3:06 [gentoo-commits] proj/linux-patches:4.19 commit in: / Alice Ferrazzi
2023-09-02  9:59 Mike Pagano
2023-08-30 15:00 Mike Pagano
2023-08-16 16:59 Mike Pagano
2023-08-11 11:58 Mike Pagano
2023-08-08 18:43 Mike Pagano
2023-07-24 20:30 Mike Pagano
2023-06-28 10:29 Mike Pagano
2023-06-21 14:55 Alice Ferrazzi
2023-06-14 10:21 Mike Pagano
2023-06-09 11:32 Mike Pagano
2023-05-30 12:57 Mike Pagano
2023-05-17 11:14 Mike Pagano
2023-05-17 11:01 Mike Pagano
2023-05-10 17:59 Mike Pagano
2023-04-26  9:35 Alice Ferrazzi
2023-04-20 11:17 Alice Ferrazzi
2023-04-05 11:41 Mike Pagano
2023-03-22 14:16 Alice Ferrazzi
2023-03-17 10:46 Mike Pagano
2023-03-13 11:35 Alice Ferrazzi
2023-03-11 16:01 Mike Pagano
2023-03-03 12:31 Mike Pagano
2023-02-25 11:41 Mike Pagano
2023-02-24  3:19 Alice Ferrazzi
2023-02-24  3:15 Alice Ferrazzi
2023-02-22 14:51 Alice Ferrazzi
2023-02-06 12:49 Mike Pagano
2023-01-24  7:16 Alice Ferrazzi
2023-01-18 11:11 Mike Pagano
2022-12-14 12:15 Mike Pagano
2022-12-08 12:14 Alice Ferrazzi
2022-11-25 17:04 Mike Pagano
2022-11-23  9:39 Alice Ferrazzi
2022-11-10 17:58 Mike Pagano
2022-11-03 15:11 Mike Pagano
2022-11-01 19:48 Mike Pagano
2022-10-26 11:41 Mike Pagano
2022-10-05 11:59 Mike Pagano
2022-09-28  9:18 Mike Pagano
2022-09-20 12:03 Mike Pagano
2022-09-15 11:09 Mike Pagano
2022-09-05 12:06 Mike Pagano
2022-08-25 10:35 Mike Pagano
2022-08-11 12:36 Mike Pagano
2022-07-29 15:28 Mike Pagano
2022-07-21 20:12 Mike Pagano
2022-07-12 16:01 Mike Pagano
2022-07-07 16:18 Mike Pagano
2022-07-02 16:07 Mike Pagano
2022-06-25 10:22 Mike Pagano
2022-06-16 11:40 Mike Pagano
2022-06-14 16:02 Mike Pagano
2022-06-06 11:05 Mike Pagano
2022-05-27 12:24 Mike Pagano
2022-05-25 11:55 Mike Pagano
2022-05-18  9:50 Mike Pagano
2022-05-15 22:12 Mike Pagano
2022-05-12 11:30 Mike Pagano
2022-05-01 17:04 Mike Pagano
2022-04-27 12:03 Mike Pagano
2022-04-20 12:09 Mike Pagano
2022-04-15 13:11 Mike Pagano
2022-04-12 19:24 Mike Pagano
2022-03-28 10:59 Mike Pagano
2022-03-23 11:57 Mike Pagano
2022-03-16 13:27 Mike Pagano
2022-03-11 10:56 Mike Pagano
2022-03-08 18:30 Mike Pagano
2022-03-02 13:08 Mike Pagano
2022-02-26 21:14 Mike Pagano
2022-02-23 12:39 Mike Pagano
2022-02-16 12:47 Mike Pagano
2022-02-11 12:53 Mike Pagano
2022-02-11 12:46 Mike Pagano
2022-02-11 12:45 Mike Pagano
2022-02-11 12:37 Mike Pagano
2022-02-08 17:56 Mike Pagano
2022-01-29 17:45 Mike Pagano
2022-01-27 11:39 Mike Pagano
2022-01-11 13:14 Mike Pagano
2022-01-05 12:55 Mike Pagano
2021-12-29 13:11 Mike Pagano
2021-12-22 14:07 Mike Pagano
2021-12-14 10:36 Mike Pagano
2021-12-08 12:55 Mike Pagano
2021-12-01 12:51 Mike Pagano
2021-11-26 11:59 Mike Pagano
2021-11-12 14:16 Mike Pagano
2021-11-06 13:26 Mike Pagano
2021-11-02 19:32 Mike Pagano
2021-10-27 11:59 Mike Pagano
2021-10-20 13:26 Mike Pagano
2021-10-17 13:12 Mike Pagano
2021-10-13 15:00 Alice Ferrazzi
2021-10-09 21:33 Mike Pagano
2021-10-06 14:06 Mike Pagano
2021-09-26 14:13 Mike Pagano
2021-09-22 11:40 Mike Pagano
2021-09-20 22:05 Mike Pagano
2021-09-03 11:22 Mike Pagano
2021-09-03 10:08 Alice Ferrazzi
2021-08-26 14:06 Mike Pagano
2021-08-25 22:45 Mike Pagano
2021-08-25 20:41 Mike Pagano
2021-08-15 20:07 Mike Pagano
2021-08-12 11:51 Mike Pagano
2021-08-08 13:39 Mike Pagano
2021-08-04 11:54 Mike Pagano
2021-08-03 12:26 Mike Pagano
2021-07-31 10:34 Alice Ferrazzi
2021-07-28 12:37 Mike Pagano
2021-07-20 15:35 Alice Ferrazzi
2021-07-13 12:38 Mike Pagano
2021-07-11 14:45 Mike Pagano
2021-06-30 14:25 Mike Pagano
2021-06-16 12:22 Mike Pagano
2021-06-10 11:46 Mike Pagano
2021-06-03 10:32 Alice Ferrazzi
2021-05-26 12:05 Mike Pagano
2021-05-22 10:03 Mike Pagano
2021-05-07 11:40 Alice Ferrazzi
2021-04-30 19:02 Mike Pagano
2021-04-28 18:31 Mike Pagano
2021-04-28 11:44 Alice Ferrazzi
2021-04-16 11:15 Alice Ferrazzi
2021-04-14 11:22 Alice Ferrazzi
2021-04-10 13:24 Mike Pagano
2021-04-07 12:21 Mike Pagano
2021-03-30 14:17 Mike Pagano
2021-03-24 12:08 Mike Pagano
2021-03-22 15:50 Mike Pagano
2021-03-20 14:26 Mike Pagano
2021-03-17 16:21 Mike Pagano
2021-03-11 14:05 Mike Pagano
2021-03-07 15:15 Mike Pagano
2021-03-04 12:08 Mike Pagano
2021-02-23 14:31 Alice Ferrazzi
2021-02-13 15:28 Alice Ferrazzi
2021-02-10 10:03 Alice Ferrazzi
2021-02-07 14:40 Alice Ferrazzi
2021-02-03 23:43 Mike Pagano
2021-01-30 13:34 Alice Ferrazzi
2021-01-27 11:15 Mike Pagano
2021-01-23 16:36 Mike Pagano
2021-01-19 20:34 Mike Pagano
2021-01-17 16:20 Mike Pagano
2021-01-12 20:06 Mike Pagano
2021-01-09 12:57 Mike Pagano
2021-01-06 14:15 Mike Pagano
2020-12-30 12:52 Mike Pagano
2020-12-11 12:56 Mike Pagano
2020-12-08 12:06 Mike Pagano
2020-12-02 12:49 Mike Pagano
2020-11-24 14:40 Mike Pagano
2020-11-22 19:26 Mike Pagano
2020-11-18 19:56 Mike Pagano
2020-11-11 15:43 Mike Pagano
2020-11-10 13:56 Mike Pagano
2020-11-05 12:35 Mike Pagano
2020-11-01 20:29 Mike Pagano
2020-10-29 11:18 Mike Pagano
2020-10-17 10:17 Mike Pagano
2020-10-14 20:36 Mike Pagano
2020-10-07 12:50 Mike Pagano
2020-09-26 22:07 Mike Pagano
2020-09-26 22:00 Mike Pagano
2020-09-24 15:58 Mike Pagano
2020-09-23 12:07 Mike Pagano
2020-09-17 15:01 Mike Pagano
2020-09-17 14:55 Mike Pagano
2020-09-12 17:59 Mike Pagano
2020-09-09 17:59 Mike Pagano
2020-09-03 11:37 Mike Pagano
2020-08-26 11:15 Mike Pagano
2020-08-21 10:49 Alice Ferrazzi
2020-08-19  9:36 Alice Ferrazzi
2020-08-12 23:36 Alice Ferrazzi
2020-08-07 19:16 Mike Pagano
2020-08-05 14:51 Thomas Deutschmann
2020-07-31 18:00 Mike Pagano
2020-07-29 12:33 Mike Pagano
2020-07-22 12:42 Mike Pagano
2020-07-16 11:17 Mike Pagano
2020-07-09 12:12 Mike Pagano
2020-07-01 12:14 Mike Pagano
2020-06-29 17:41 Mike Pagano
2020-06-25 15:07 Mike Pagano
2020-06-22 14:47 Mike Pagano
2020-06-10 21:27 Mike Pagano
2020-06-07 21:52 Mike Pagano
2020-06-03 11:41 Mike Pagano
2020-05-27 16:25 Mike Pagano
2020-05-20 11:30 Mike Pagano
2020-05-20 11:27 Mike Pagano
2020-05-14 11:30 Mike Pagano
2020-05-13 12:33 Mike Pagano
2020-05-11 22:50 Mike Pagano
2020-05-09 22:20 Mike Pagano
2020-05-06 11:46 Mike Pagano
2020-05-02 19:24 Mike Pagano
2020-04-29 17:57 Mike Pagano
2020-04-23 11:44 Mike Pagano
2020-04-21 11:15 Mike Pagano
2020-04-17 11:45 Mike Pagano
2020-04-15 17:09 Mike Pagano
2020-04-13 11:34 Mike Pagano
2020-04-02 15:24 Mike Pagano
2020-03-25 14:58 Mike Pagano
2020-03-20 11:57 Mike Pagano
2020-03-18 14:21 Mike Pagano
2020-03-16 12:23 Mike Pagano
2020-03-11 17:20 Mike Pagano
2020-03-05 16:23 Mike Pagano
2020-02-28 16:38 Mike Pagano
2020-02-24 11:06 Mike Pagano
2020-02-19 23:45 Mike Pagano
2020-02-14 23:52 Mike Pagano
2020-02-11 16:20 Mike Pagano
2020-02-05 17:05 Mike Pagano
2020-02-01 10:37 Mike Pagano
2020-02-01 10:30 Mike Pagano
2020-01-29 16:16 Mike Pagano
2020-01-27 14:25 Mike Pagano
2020-01-23 11:07 Mike Pagano
2020-01-17 19:56 Mike Pagano
2020-01-14 22:30 Mike Pagano
2020-01-12 15:00 Mike Pagano
2020-01-09 11:15 Mike Pagano
2020-01-04 19:50 Mike Pagano
2019-12-31 17:46 Mike Pagano
2019-12-21 15:03 Mike Pagano
2019-12-17 21:56 Mike Pagano
2019-12-13 12:35 Mike Pagano
2019-12-05 12:03 Alice Ferrazzi
2019-12-01 14:06 Thomas Deutschmann
2019-11-24 15:44 Mike Pagano
2019-11-20 19:36 Mike Pagano
2019-11-12 21:00 Mike Pagano
2019-11-10 16:20 Mike Pagano
2019-11-06 14:26 Mike Pagano
2019-10-29 12:04 Mike Pagano
2019-10-17 22:27 Mike Pagano
2019-10-11 17:04 Mike Pagano
2019-10-07 17:42 Mike Pagano
2019-10-05 11:42 Mike Pagano
2019-10-01 10:10 Mike Pagano
2019-09-21 17:11 Mike Pagano
2019-09-19 12:34 Mike Pagano
2019-09-19 10:04 Mike Pagano
2019-09-16 12:26 Mike Pagano
2019-09-10 11:12 Mike Pagano
2019-09-06 17:25 Mike Pagano
2019-08-29 14:15 Mike Pagano
2019-08-25 17:37 Mike Pagano
2019-08-23 22:18 Mike Pagano
2019-08-16 12:26 Mike Pagano
2019-08-16 12:13 Mike Pagano
2019-08-09 17:45 Mike Pagano
2019-08-06 19:19 Mike Pagano
2019-08-04 16:15 Mike Pagano
2019-07-31 15:09 Mike Pagano
2019-07-31 10:22 Mike Pagano
2019-07-28 16:27 Mike Pagano
2019-07-26 11:35 Mike Pagano
2019-07-21 14:41 Mike Pagano
2019-07-14 15:44 Mike Pagano
2019-07-10 11:05 Mike Pagano
2019-07-03 11:34 Mike Pagano
2019-06-25 10:53 Mike Pagano
2019-06-22 19:06 Mike Pagano
2019-06-19 17:17 Thomas Deutschmann
2019-06-17 19:22 Mike Pagano
2019-06-15 15:07 Mike Pagano
2019-06-11 12:42 Mike Pagano
2019-06-10 19:43 Mike Pagano
2019-06-09 16:19 Mike Pagano
2019-06-04 11:11 Mike Pagano
2019-05-31 15:02 Mike Pagano
2019-05-26 17:10 Mike Pagano
2019-05-22 11:02 Mike Pagano
2019-05-16 23:03 Mike Pagano
2019-05-14 21:00 Mike Pagano
2019-05-10 19:40 Mike Pagano
2019-05-08 10:06 Mike Pagano
2019-05-05 13:42 Mike Pagano
2019-05-04 18:28 Mike Pagano
2019-05-02 10:13 Mike Pagano
2019-04-27 17:36 Mike Pagano
2019-04-20 11:09 Mike Pagano
2019-04-19 19:51 Mike Pagano
2019-04-05 21:46 Mike Pagano
2019-04-03 10:59 Mike Pagano
2019-03-27 10:22 Mike Pagano
2019-03-23 20:23 Mike Pagano
2019-03-19 16:58 Mike Pagano
2019-03-13 22:08 Mike Pagano
2019-03-10 14:15 Mike Pagano
2019-03-06 19:06 Mike Pagano
2019-03-05 18:04 Mike Pagano
2019-02-27 11:23 Mike Pagano
2019-02-23 11:35 Mike Pagano
2019-02-23  0:46 Mike Pagano
2019-02-20 11:19 Mike Pagano
2019-02-16  0:42 Mike Pagano
2019-02-15 12:39 Mike Pagano
2019-02-12 20:53 Mike Pagano
2019-02-06 17:08 Mike Pagano
2019-01-31 11:28 Mike Pagano
2019-01-26 15:09 Mike Pagano
2019-01-22 23:06 Mike Pagano
2019-01-16 23:32 Mike Pagano
2019-01-13 19:29 Mike Pagano
2019-01-09 17:54 Mike Pagano
2018-12-29 18:55 Mike Pagano
2018-12-29  1:08 Mike Pagano
2018-12-21 14:58 Mike Pagano
2018-12-19 19:09 Mike Pagano
2018-12-17 11:42 Mike Pagano
2018-12-13 11:40 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-05 20:16 Mike Pagano
2018-12-01 15:08 Mike Pagano
2018-11-27 16:16 Mike Pagano
2018-11-23 12:42 Mike Pagano
2018-11-21 12:30 Mike Pagano
2018-11-14  0:47 Mike Pagano
2018-11-14  0:47 Mike Pagano
2018-11-13 20:44 Mike Pagano
2018-11-04 16:22 Alice Ferrazzi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1601556324.310f5c1a8c792bf9601dccaa67621ff87d95d8a8.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox