public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Wed,  2 Dec 2020 12:50:26 +0000 (UTC)	[thread overview]
Message-ID: <1606913414.28deb8e904daee3a90ee2257efcaab7e12f29612.mpagano@gentoo> (raw)

commit:     28deb8e904daee3a90ee2257efcaab7e12f29612
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Dec  2 12:50:14 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Dec  2 12:50:14 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=28deb8e9

Linux patch 5.4.81

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1080_linux-5.4.81.patch | 3824 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3828 insertions(+)

diff --git a/0000_README b/0000_README
index 429a7ad..53eba0a 100644
--- a/0000_README
+++ b/0000_README
@@ -363,6 +363,10 @@ Patch:  1079_linux-5.4.80.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.4.80
 
+Patch:  1080_linux-5.4.81.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.4.81
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1080_linux-5.4.81.patch b/1080_linux-5.4.81.patch
new file mode 100644
index 0000000..f7377e3
--- /dev/null
+++ b/1080_linux-5.4.81.patch
@@ -0,0 +1,3824 @@
+diff --git a/Makefile b/Makefile
+index 7c58e4ce51385..5bbb7607fa55f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 80
++SUBLEVEL = 81
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
+index 7addd0301c51a..6bdcf9b495b83 100644
+--- a/arch/arc/include/asm/pgtable.h
++++ b/arch/arc/include/asm/pgtable.h
+@@ -135,8 +135,10 @@
+ 
+ #ifdef CONFIG_ARC_HAS_PAE40
+ #define PTE_BITS_NON_RWX_IN_PD1	(0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
++#define MAX_POSSIBLE_PHYSMEM_BITS 40
+ #else
+ #define PTE_BITS_NON_RWX_IN_PD1	(PAGE_MASK | _PAGE_CACHEABLE)
++#define MAX_POSSIBLE_PHYSMEM_BITS 32
+ #endif
+ 
+ /**************************************************************************
+diff --git a/arch/arm/boot/dts/dra76x.dtsi b/arch/arm/boot/dts/dra76x.dtsi
+index 9f6fbe4c1fee1..859e4382ac4bb 100644
+--- a/arch/arm/boot/dts/dra76x.dtsi
++++ b/arch/arm/boot/dts/dra76x.dtsi
+@@ -32,8 +32,8 @@
+ 				interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
+ 					     <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
+ 				interrupt-names = "int0", "int1";
+-				clocks = <&mcan_clk>, <&l3_iclk_div>;
+-				clock-names = "cclk", "hclk";
++				clocks = <&l3_iclk_div>, <&mcan_clk>;
++				clock-names = "hclk", "cclk";
+ 				bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>;
+ 			};
+ 		};
+diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
+index 51beec41d48c8..50b51ac91fcbe 100644
+--- a/arch/arm/include/asm/pgtable-2level.h
++++ b/arch/arm/include/asm/pgtable-2level.h
+@@ -75,6 +75,8 @@
+ #define PTE_HWTABLE_OFF		(PTE_HWTABLE_PTRS * sizeof(pte_t))
+ #define PTE_HWTABLE_SIZE	(PTRS_PER_PTE * sizeof(u32))
+ 
++#define MAX_POSSIBLE_PHYSMEM_BITS	32
++
+ /*
+  * PMD_SHIFT determines the size of the area a second-level page table can map
+  * PGDIR_SHIFT determines what a third-level page table entry can map
+diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
+index 5b18295021a03..8006a56cc2ce2 100644
+--- a/arch/arm/include/asm/pgtable-3level.h
++++ b/arch/arm/include/asm/pgtable-3level.h
+@@ -25,6 +25,8 @@
+ #define PTE_HWTABLE_OFF		(0)
+ #define PTE_HWTABLE_SIZE	(PTRS_PER_PTE * sizeof(u64))
+ 
++#define MAX_POSSIBLE_PHYSMEM_BITS 40
++
+ /*
+  * PGDIR_SHIFT determines the size a top-level page table entry can map.
+  */
+diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
+index a92d277f81a08..c8d317fafe2ea 100644
+--- a/arch/arm/mach-omap2/cpuidle44xx.c
++++ b/arch/arm/mach-omap2/cpuidle44xx.c
+@@ -175,8 +175,11 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
+ 		if (mpuss_can_lose_context) {
+ 			error = cpu_cluster_pm_enter();
+ 			if (error) {
+-				omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
+-				goto cpu_cluster_pm_out;
++				index = 0;
++				cx = state_ptr + index;
++				pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
++				omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
++				mpuss_can_lose_context = 0;
+ 			}
+ 		}
+ 	}
+@@ -184,7 +187,6 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
+ 	omap4_enter_lowpower(dev->cpu, cx->cpu_state);
+ 	cpu_done[dev->cpu] = true;
+ 
+-cpu_cluster_pm_out:
+ 	/* Wakeup CPU1 only if it is not offlined */
+ 	if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
+ 
+diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+index 5728255bd0c1a..78f7e6e50beb0 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+@@ -692,7 +692,7 @@
+ 
+ 		hsp_aon: hsp@c150000 {
+ 			compatible = "nvidia,tegra194-hsp", "nvidia,tegra186-hsp";
+-			reg = <0x0c150000 0xa0000>;
++			reg = <0x0c150000 0x90000>;
+ 			interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
+ 			             <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
+ 			             <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index 41dd4b1f0ccba..69dfc340e71b1 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -98,8 +98,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+ #define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
+ #define pte_valid_not_user(pte) \
+ 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
+-#define pte_valid_young(pte) \
+-	((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
+ #define pte_valid_user(pte) \
+ 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
+ 
+@@ -107,9 +105,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+  * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
+  * so that we don't erroneously return false for pages that have been
+  * remapped as PROT_NONE but are yet to be flushed from the TLB.
++ * Note that we can't make any assumptions based on the state of the access
++ * flag, since ptep_clear_flush_young() elides a DSB when invalidating the
++ * TLB.
+  */
+ #define pte_accessible(mm, pte)	\
+-	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
++	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
+ 
+ /*
+  * p??_access_permitted() is true for valid user mappings (subject to the
+@@ -135,13 +136,6 @@ static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
+ 	return pte;
+ }
+ 
+-static inline pte_t pte_wrprotect(pte_t pte)
+-{
+-	pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
+-	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
+-	return pte;
+-}
+-
+ static inline pte_t pte_mkwrite(pte_t pte)
+ {
+ 	pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
+@@ -167,6 +161,20 @@ static inline pte_t pte_mkdirty(pte_t pte)
+ 	return pte;
+ }
+ 
++static inline pte_t pte_wrprotect(pte_t pte)
++{
++	/*
++	 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
++	 * clear), set the PTE_DIRTY bit.
++	 */
++	if (pte_hw_dirty(pte))
++		pte = pte_mkdirty(pte);
++
++	pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
++	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
++	return pte;
++}
++
+ static inline pte_t pte_mkold(pte_t pte)
+ {
+ 	return clear_pte_bit(pte, __pgprot(PTE_AF));
+@@ -782,12 +790,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
+ 	pte = READ_ONCE(*ptep);
+ 	do {
+ 		old_pte = pte;
+-		/*
+-		 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
+-		 * clear), set the PTE_DIRTY bit.
+-		 */
+-		if (pte_hw_dirty(pte))
+-			pte = pte_mkdirty(pte);
+ 		pte = pte_wrprotect(pte);
+ 		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
+ 					       pte_val(old_pte), pte_val(pte));
+diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
+index ba967148b016b..2604fab8a92dc 100644
+--- a/arch/mips/include/asm/pgtable-32.h
++++ b/arch/mips/include/asm/pgtable-32.h
+@@ -155,6 +155,7 @@ static inline void pmd_clear(pmd_t *pmdp)
+ 
+ #if defined(CONFIG_XPA)
+ 
++#define MAX_POSSIBLE_PHYSMEM_BITS 40
+ #define pte_pfn(x)		(((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
+ static inline pte_t
+ pfn_pte(unsigned long pfn, pgprot_t prot)
+@@ -170,6 +171,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
+ 
+ #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+ 
++#define MAX_POSSIBLE_PHYSMEM_BITS 36
+ #define pte_pfn(x)		((unsigned long)((x).pte_high >> 6))
+ 
+ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
+@@ -184,6 +186,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
+ 
+ #else
+ 
++#define MAX_POSSIBLE_PHYSMEM_BITS 32
+ #ifdef CONFIG_CPU_VR41XX
+ #define pte_pfn(x)		((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
+ #define pfn_pte(pfn, prot)	__pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
+diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
+index 0796533d37dd5..7b6349be621a3 100644
+--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
++++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
+@@ -37,8 +37,10 @@ static inline bool pte_user(pte_t pte)
+  */
+ #ifdef CONFIG_PTE_64BIT
+ #define PTE_RPN_MASK	(~((1ULL << PTE_RPN_SHIFT) - 1))
++#define MAX_POSSIBLE_PHYSMEM_BITS 36
+ #else
+ #define PTE_RPN_MASK	(~((1UL << PTE_RPN_SHIFT) - 1))
++#define MAX_POSSIBLE_PHYSMEM_BITS 32
+ #endif
+ 
+ /*
+diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h
+index c1e45f510591e..a29b64129a7d4 100644
+--- a/arch/powerpc/include/asm/book3s/64/kup-radix.h
++++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h
+@@ -54,6 +54,8 @@
+ 
+ #else /* !__ASSEMBLY__ */
+ 
++#include <linux/jump_label.h>
++
+ DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
+ 
+ #ifdef CONFIG_PPC_KUAP
+diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
+index 552b96eef0c8e..3d32d7103ec8e 100644
+--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
++++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
+@@ -148,8 +148,10 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
+  */
+ #if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
+ #define PTE_RPN_MASK	(~((1ULL << PTE_RPN_SHIFT) - 1))
++#define MAX_POSSIBLE_PHYSMEM_BITS 36
+ #else
+ #define PTE_RPN_MASK	(~((1UL << PTE_RPN_SHIFT) - 1))
++#define MAX_POSSIBLE_PHYSMEM_BITS 32
+ #endif
+ 
+ /*
+diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
+index 235d57d6c205e..d78d8487c1d6b 100644
+--- a/arch/powerpc/kvm/book3s_xive_native.c
++++ b/arch/powerpc/kvm/book3s_xive_native.c
+@@ -252,6 +252,13 @@ static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf)
+ 	}
+ 
+ 	state = &sb->irq_state[src];
++
++	/* Some sanity checking */
++	if (!state->valid) {
++		pr_devel("%s: source %lx invalid !\n", __func__, irq);
++		return VM_FAULT_SIGBUS;
++	}
++
+ 	kvmppc_xive_select_irq(state, &hw_num, &xd);
+ 
+ 	arch_spin_lock(&sb->lock);
+diff --git a/arch/riscv/include/asm/pgtable-32.h b/arch/riscv/include/asm/pgtable-32.h
+index b0ab66e5fdb1d..5b2e79e5bfa5b 100644
+--- a/arch/riscv/include/asm/pgtable-32.h
++++ b/arch/riscv/include/asm/pgtable-32.h
+@@ -14,4 +14,6 @@
+ #define PGDIR_SIZE      (_AC(1, UL) << PGDIR_SHIFT)
+ #define PGDIR_MASK      (~(PGDIR_SIZE - 1))
+ 
++#define MAX_POSSIBLE_PHYSMEM_BITS 34
++
+ #endif /* _ASM_RISCV_PGTABLE_32_H */
+diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
+index 4814c964692cb..0b50119ea12cc 100644
+--- a/arch/x86/events/intel/cstate.c
++++ b/arch/x86/events/intel/cstate.c
+@@ -107,14 +107,14 @@
+ MODULE_LICENSE("GPL");
+ 
+ #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format)		\
+-static ssize_t __cstate_##_var##_show(struct kobject *kobj,	\
+-				struct kobj_attribute *attr,	\
++static ssize_t __cstate_##_var##_show(struct device *dev,	\
++				struct device_attribute *attr,	\
+ 				char *page)			\
+ {								\
+ 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);		\
+ 	return sprintf(page, _format "\n");			\
+ }								\
+-static struct kobj_attribute format_attr_##_var =		\
++static struct device_attribute format_attr_##_var =		\
+ 	__ATTR(_name, 0444, __cstate_##_var##_show, NULL)
+ 
+ static ssize_t cstate_get_attr_cpumask(struct device *dev,
+diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
+index 86467f85c3831..a335be03aeef1 100644
+--- a/arch/x86/events/intel/uncore.c
++++ b/arch/x86/events/intel/uncore.c
+@@ -92,8 +92,8 @@ end:
+ 	return map;
+ }
+ 
+-ssize_t uncore_event_show(struct kobject *kobj,
+-			  struct kobj_attribute *attr, char *buf)
++ssize_t uncore_event_show(struct device *dev,
++			  struct device_attribute *attr, char *buf)
+ {
+ 	struct uncore_event_desc *event =
+ 		container_of(attr, struct uncore_event_desc, attr);
+diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
+index bbfdaa720b456..7b964c63e993c 100644
+--- a/arch/x86/events/intel/uncore.h
++++ b/arch/x86/events/intel/uncore.h
+@@ -144,7 +144,7 @@ struct intel_uncore_box {
+ #define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS	2
+ 
+ struct uncore_event_desc {
+-	struct kobj_attribute attr;
++	struct device_attribute attr;
+ 	const char *config;
+ };
+ 
+@@ -165,8 +165,8 @@ struct pci2phy_map {
+ struct pci2phy_map *__find_pci2phy_map(int segment);
+ int uncore_pcibus_to_physid(struct pci_bus *bus);
+ 
+-ssize_t uncore_event_show(struct kobject *kobj,
+-			  struct kobj_attribute *attr, char *buf);
++ssize_t uncore_event_show(struct device *dev,
++			  struct device_attribute *attr, char *buf);
+ 
+ #define INTEL_UNCORE_EVENT_DESC(_name, _config)			\
+ {								\
+@@ -175,14 +175,14 @@ ssize_t uncore_event_show(struct kobject *kobj,
+ }
+ 
+ #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)			\
+-static ssize_t __uncore_##_var##_show(struct kobject *kobj,		\
+-				struct kobj_attribute *attr,		\
++static ssize_t __uncore_##_var##_show(struct device *dev,		\
++				struct device_attribute *attr,		\
+ 				char *page)				\
+ {									\
+ 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\
+ 	return sprintf(page, _format "\n");				\
+ }									\
+-static struct kobj_attribute format_attr_##_var =			\
++static struct device_attribute format_attr_##_var =			\
+ 	__ATTR(_name, 0444, __uncore_##_var##_show, NULL)
+ 
+ static inline bool uncore_pmc_fixed(int idx)
+diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
+index 187c72a58e69c..9050d7b8abc5a 100644
+--- a/arch/x86/events/rapl.c
++++ b/arch/x86/events/rapl.c
+@@ -93,18 +93,6 @@ static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
+  * any other bit is reserved
+  */
+ #define RAPL_EVENT_MASK	0xFFULL
+-
+-#define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format)		\
+-static ssize_t __rapl_##_var##_show(struct kobject *kobj,	\
+-				struct kobj_attribute *attr,	\
+-				char *page)			\
+-{								\
+-	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);		\
+-	return sprintf(page, _format "\n");			\
+-}								\
+-static struct kobj_attribute format_attr_##_var =		\
+-	__ATTR(_name, 0444, __rapl_##_var##_show, NULL)
+-
+ #define RAPL_CNTR_WIDTH 32
+ 
+ #define RAPL_EVENT_ATTR_STR(_name, v, str)					\
+@@ -433,7 +421,7 @@ static struct attribute_group rapl_pmu_events_group = {
+ 	.attrs = attrs_empty,
+ };
+ 
+-DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7");
++PMU_FORMAT_ATTR(event, "config:0-7");
+ static struct attribute *rapl_formats_attr[] = {
+ 	&format_attr_event.attr,
+ 	NULL,
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index c41686641c3fb..c52b7073a5ab5 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1560,6 +1560,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
+ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
++int kvm_cpu_has_extint(struct kvm_vcpu *v);
+ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
+ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
+ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index bdc1ed7ff6692..fcc4238ee95f8 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -733,11 +733,13 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
+ 	if (boot_cpu_has(X86_FEATURE_IBPB)) {
+ 		setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+ 
++		spectre_v2_user_ibpb = mode;
+ 		switch (cmd) {
+ 		case SPECTRE_V2_USER_CMD_FORCE:
+ 		case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
+ 		case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
+ 			static_branch_enable(&switch_mm_always_ibpb);
++			spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
+ 			break;
+ 		case SPECTRE_V2_USER_CMD_PRCTL:
+ 		case SPECTRE_V2_USER_CMD_AUTO:
+@@ -751,8 +753,6 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
+ 		pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
+ 			static_key_enabled(&switch_mm_always_ibpb) ?
+ 			"always-on" : "conditional");
+-
+-		spectre_v2_user_ibpb = mode;
+ 	}
+ 
+ 	/*
+diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
+index 92331de16d70e..c2a9762d278dd 100644
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -1361,8 +1361,10 @@ void do_machine_check(struct pt_regs *regs, long error_code)
+ 	 * When there's any problem use only local no_way_out state.
+ 	 */
+ 	if (!lmce) {
+-		if (mce_end(order) < 0)
+-			no_way_out = worst >= MCE_PANIC_SEVERITY;
++		if (mce_end(order) < 0) {
++			if (!no_way_out)
++				no_way_out = worst >= MCE_PANIC_SEVERITY;
++		}
+ 	} else {
+ 		/*
+ 		 * If there was a fatal machine check we should have
+diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+index 54b711bc06073..830ccc396e26d 100644
+--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
++++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+@@ -507,6 +507,24 @@ unlock:
+ 	return ret ?: nbytes;
+ }
+ 
++/**
++ * rdtgroup_remove - the helper to remove resource group safely
++ * @rdtgrp: resource group to remove
++ *
++ * On resource group creation via a mkdir, an extra kernfs_node reference is
++ * taken to ensure that the rdtgroup structure remains accessible for the
++ * rdtgroup_kn_unlock() calls where it is removed.
++ *
++ * Drop the extra reference here, then free the rdtgroup structure.
++ *
++ * Return: void
++ */
++static void rdtgroup_remove(struct rdtgroup *rdtgrp)
++{
++	kernfs_put(rdtgrp->kn);
++	kfree(rdtgrp);
++}
++
+ struct task_move_callback {
+ 	struct callback_head	work;
+ 	struct rdtgroup		*rdtgrp;
+@@ -529,7 +547,7 @@ static void move_myself(struct callback_head *head)
+ 	    (rdtgrp->flags & RDT_DELETED)) {
+ 		current->closid = 0;
+ 		current->rmid = 0;
+-		kfree(rdtgrp);
++		rdtgroup_remove(rdtgrp);
+ 	}
+ 
+ 	preempt_disable();
+@@ -1618,7 +1636,6 @@ static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
+ 	if (IS_ERR(kn_subdir))
+ 		return PTR_ERR(kn_subdir);
+ 
+-	kernfs_get(kn_subdir);
+ 	ret = rdtgroup_kn_set_ugid(kn_subdir);
+ 	if (ret)
+ 		return ret;
+@@ -1641,7 +1658,6 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
+ 	kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
+ 	if (IS_ERR(kn_info))
+ 		return PTR_ERR(kn_info);
+-	kernfs_get(kn_info);
+ 
+ 	ret = rdtgroup_add_files(kn_info, RF_TOP_INFO);
+ 	if (ret)
+@@ -1662,12 +1678,6 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
+ 			goto out_destroy;
+ 	}
+ 
+-	/*
+-	 * This extra ref will be put in kernfs_remove() and guarantees
+-	 * that @rdtgrp->kn is always accessible.
+-	 */
+-	kernfs_get(kn_info);
+-
+ 	ret = rdtgroup_kn_set_ugid(kn_info);
+ 	if (ret)
+ 		goto out_destroy;
+@@ -1696,12 +1706,6 @@ mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp,
+ 	if (dest_kn)
+ 		*dest_kn = kn;
+ 
+-	/*
+-	 * This extra ref will be put in kernfs_remove() and guarantees
+-	 * that @rdtgrp->kn is always accessible.
+-	 */
+-	kernfs_get(kn);
+-
+ 	ret = rdtgroup_kn_set_ugid(kn);
+ 	if (ret)
+ 		goto out_destroy;
+@@ -1928,8 +1932,7 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
+ 		    rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
+ 			rdtgroup_pseudo_lock_remove(rdtgrp);
+ 		kernfs_unbreak_active_protection(kn);
+-		kernfs_put(rdtgrp->kn);
+-		kfree(rdtgrp);
++		rdtgroup_remove(rdtgrp);
+ 	} else {
+ 		kernfs_unbreak_active_protection(kn);
+ 	}
+@@ -1988,13 +1991,11 @@ static int rdt_get_tree(struct fs_context *fc)
+ 					  &kn_mongrp);
+ 		if (ret < 0)
+ 			goto out_info;
+-		kernfs_get(kn_mongrp);
+ 
+ 		ret = mkdir_mondata_all(rdtgroup_default.kn,
+ 					&rdtgroup_default, &kn_mondata);
+ 		if (ret < 0)
+ 			goto out_mongrp;
+-		kernfs_get(kn_mondata);
+ 		rdtgroup_default.mon.mon_data_kn = kn_mondata;
+ 	}
+ 
+@@ -2223,7 +2224,7 @@ static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
+ 		if (atomic_read(&sentry->waitcount) != 0)
+ 			sentry->flags = RDT_DELETED;
+ 		else
+-			kfree(sentry);
++			rdtgroup_remove(sentry);
+ 	}
+ }
+ 
+@@ -2265,7 +2266,7 @@ static void rmdir_all_sub(void)
+ 		if (atomic_read(&rdtgrp->waitcount) != 0)
+ 			rdtgrp->flags = RDT_DELETED;
+ 		else
+-			kfree(rdtgrp);
++			rdtgroup_remove(rdtgrp);
+ 	}
+ 	/* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
+ 	update_closid_rmid(cpu_online_mask, &rdtgroup_default);
+@@ -2365,11 +2366,6 @@ static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
+ 	if (IS_ERR(kn))
+ 		return PTR_ERR(kn);
+ 
+-	/*
+-	 * This extra ref will be put in kernfs_remove() and guarantees
+-	 * that kn is always accessible.
+-	 */
+-	kernfs_get(kn);
+ 	ret = rdtgroup_kn_set_ugid(kn);
+ 	if (ret)
+ 		goto out_destroy;
+@@ -2705,8 +2701,8 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
+ 	/*
+ 	 * kernfs_remove() will drop the reference count on "kn" which
+ 	 * will free it. But we still need it to stick around for the
+-	 * rdtgroup_kn_unlock(kn} call below. Take one extra reference
+-	 * here, which will be dropped inside rdtgroup_kn_unlock().
++	 * rdtgroup_kn_unlock(kn) call. Take one extra reference here,
++	 * which will be dropped by kernfs_put() in rdtgroup_remove().
+ 	 */
+ 	kernfs_get(kn);
+ 
+@@ -2747,6 +2743,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
+ out_idfree:
+ 	free_rmid(rdtgrp->mon.rmid);
+ out_destroy:
++	kernfs_put(rdtgrp->kn);
+ 	kernfs_remove(rdtgrp->kn);
+ out_free_rgrp:
+ 	kfree(rdtgrp);
+@@ -2759,7 +2756,7 @@ static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
+ {
+ 	kernfs_remove(rgrp->kn);
+ 	free_rmid(rgrp->mon.rmid);
+-	kfree(rgrp);
++	rdtgroup_remove(rgrp);
+ }
+ 
+ /*
+@@ -2921,11 +2918,6 @@ static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
+ 	WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
+ 	list_del(&rdtgrp->mon.crdtgrp_list);
+ 
+-	/*
+-	 * one extra hold on this, will drop when we kfree(rdtgrp)
+-	 * in rdtgroup_kn_unlock()
+-	 */
+-	kernfs_get(kn);
+ 	kernfs_remove(rdtgrp->kn);
+ 
+ 	return 0;
+@@ -2937,11 +2929,6 @@ static int rdtgroup_ctrl_remove(struct kernfs_node *kn,
+ 	rdtgrp->flags = RDT_DELETED;
+ 	list_del(&rdtgrp->rdtgroup_list);
+ 
+-	/*
+-	 * one extra hold on this, will drop when we kfree(rdtgrp)
+-	 * in rdtgroup_kn_unlock()
+-	 */
+-	kernfs_get(kn);
+ 	kernfs_remove(rdtgrp->kn);
+ 	return 0;
+ }
+diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
+index e330e7d125f72..896db1aa77e7f 100644
+--- a/arch/x86/kvm/irq.c
++++ b/arch/x86/kvm/irq.c
+@@ -40,29 +40,10 @@ static int pending_userspace_extint(struct kvm_vcpu *v)
+  * check if there is pending interrupt from
+  * non-APIC source without intack.
+  */
+-static int kvm_cpu_has_extint(struct kvm_vcpu *v)
+-{
+-	u8 accept = kvm_apic_accept_pic_intr(v);
+-
+-	if (accept) {
+-		if (irqchip_split(v->kvm))
+-			return pending_userspace_extint(v);
+-		else
+-			return v->kvm->arch.vpic->output;
+-	} else
+-		return 0;
+-}
+-
+-/*
+- * check if there is injectable interrupt:
+- * when virtual interrupt delivery enabled,
+- * interrupt from apic will handled by hardware,
+- * we don't need to check it here.
+- */
+-int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
++int kvm_cpu_has_extint(struct kvm_vcpu *v)
+ {
+ 	/*
+-	 * FIXME: interrupt.injected represents an interrupt that it's
++	 * FIXME: interrupt.injected represents an interrupt whose
+ 	 * side-effects have already been applied (e.g. bit from IRR
+ 	 * already moved to ISR). Therefore, it is incorrect to rely
+ 	 * on interrupt.injected to know if there is a pending
+@@ -75,6 +56,23 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
+ 	if (!lapic_in_kernel(v))
+ 		return v->arch.interrupt.injected;
+ 
++	if (!kvm_apic_accept_pic_intr(v))
++		return 0;
++
++	if (irqchip_split(v->kvm))
++		return pending_userspace_extint(v);
++	else
++		return v->kvm->arch.vpic->output;
++}
++
++/*
++ * check if there is injectable interrupt:
++ * when virtual interrupt delivery enabled,
++ * interrupt from apic will handled by hardware,
++ * we don't need to check it here.
++ */
++int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
++{
+ 	if (kvm_cpu_has_extint(v))
+ 		return 1;
+ 
+@@ -90,20 +88,6 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
+  */
+ int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
+ {
+-	/*
+-	 * FIXME: interrupt.injected represents an interrupt that it's
+-	 * side-effects have already been applied (e.g. bit from IRR
+-	 * already moved to ISR). Therefore, it is incorrect to rely
+-	 * on interrupt.injected to know if there is a pending
+-	 * interrupt in the user-mode LAPIC.
+-	 * This leads to nVMX/nSVM not be able to distinguish
+-	 * if it should exit from L2 to L1 on EXTERNAL_INTERRUPT on
+-	 * pending interrupt or should re-inject an injected
+-	 * interrupt.
+-	 */
+-	if (!lapic_in_kernel(v))
+-		return v->arch.interrupt.injected;
+-
+ 	if (kvm_cpu_has_extint(v))
+ 		return 1;
+ 
+@@ -117,16 +101,21 @@ EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
+  */
+ static int kvm_cpu_get_extint(struct kvm_vcpu *v)
+ {
+-	if (kvm_cpu_has_extint(v)) {
+-		if (irqchip_split(v->kvm)) {
+-			int vector = v->arch.pending_external_vector;
+-
+-			v->arch.pending_external_vector = -1;
+-			return vector;
+-		} else
+-			return kvm_pic_read_irq(v->kvm); /* PIC */
+-	} else
++	if (!kvm_cpu_has_extint(v)) {
++		WARN_ON(!lapic_in_kernel(v));
+ 		return -1;
++	}
++
++	if (!lapic_in_kernel(v))
++		return v->arch.interrupt.nr;
++
++	if (irqchip_split(v->kvm)) {
++		int vector = v->arch.pending_external_vector;
++
++		v->arch.pending_external_vector = -1;
++		return vector;
++	} else
++		return kvm_pic_read_irq(v->kvm); /* PIC */
+ }
+ 
+ /*
+@@ -134,13 +123,7 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v)
+  */
+ int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
+ {
+-	int vector;
+-
+-	if (!lapic_in_kernel(v))
+-		return v->arch.interrupt.nr;
+-
+-	vector = kvm_cpu_get_extint(v);
+-
++	int vector = kvm_cpu_get_extint(v);
+ 	if (vector != -1)
+ 		return vector;			/* PIC */
+ 
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 9f793c9649cdf..3f6b866c644d5 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2330,7 +2330,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
+ 	struct kvm_lapic *apic = vcpu->arch.apic;
+ 	u32 ppr;
+ 
+-	if (!kvm_apic_hw_enabled(apic))
++	if (!kvm_apic_present(vcpu))
+ 		return -1;
+ 
+ 	__apic_update_ppr(apic, &ppr);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 880a24889291c..b7f86acb8c911 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3624,21 +3624,23 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
+ 
+ static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
+ {
++	/*
++	 * We can accept userspace's request for interrupt injection
++	 * as long as we have a place to store the interrupt number.
++	 * The actual injection will happen when the CPU is able to
++	 * deliver the interrupt.
++	 */
++	if (kvm_cpu_has_extint(vcpu))
++		return false;
++
++	/* Acknowledging ExtINT does not happen if LINT0 is masked.  */
+ 	return (!lapic_in_kernel(vcpu) ||
+ 		kvm_apic_accept_pic_intr(vcpu));
+ }
+ 
+-/*
+- * if userspace requested an interrupt window, check that the
+- * interrupt window is open.
+- *
+- * No need to exit to userspace if we already have an interrupt queued.
+- */
+ static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
+ {
+ 	return kvm_arch_interrupt_allowed(vcpu) &&
+-		!kvm_cpu_has_interrupt(vcpu) &&
+-		!kvm_event_needs_reinjection(vcpu) &&
+ 		kvm_cpu_accept_dm_intr(vcpu);
+ }
+ 
+diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
+index 6deb49094c605..d817b7c862a62 100644
+--- a/arch/x86/xen/spinlock.c
++++ b/arch/x86/xen/spinlock.c
+@@ -93,10 +93,20 @@ void xen_init_lock_cpu(int cpu)
+ 
+ void xen_uninit_lock_cpu(int cpu)
+ {
++	int irq;
++
+ 	if (!xen_pvspin)
+ 		return;
+ 
+-	unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
++	/*
++	 * When booting the kernel with 'mitigations=auto,nosmt', the secondary
++	 * CPUs are not activated, and lock_kicker_irq is not initialized.
++	 */
++	irq = per_cpu(lock_kicker_irq, cpu);
++	if (irq == -1)
++		return;
++
++	unbind_from_irqhandler(irq, NULL);
+ 	per_cpu(lock_kicker_irq, cpu) = -1;
+ 	kfree(per_cpu(irq_name, cpu));
+ 	per_cpu(irq_name, cpu) = NULL;
+diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
+index 3f80386f18838..5cb24a789e9e1 100644
+--- a/arch/xtensa/include/asm/uaccess.h
++++ b/arch/xtensa/include/asm/uaccess.h
+@@ -300,7 +300,7 @@ strncpy_from_user(char *dst, const char *src, long count)
+ 	return -EFAULT;
+ }
+ #else
+-long strncpy_from_user(char *dst, const char *src, long count);
++long strncpy_from_user(char *dst, const char __user *src, long count);
+ #endif
+ 
+ /*
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index 770a780dfa544..3934ce3385ac3 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -192,6 +192,9 @@ static int sysc_wait_softreset(struct sysc *ddata)
+ 	u32 sysc_mask, syss_done, rstval;
+ 	int syss_offset, error = 0;
+ 
++	if (ddata->cap->regbits->srst_shift < 0)
++		return 0;
++
+ 	syss_offset = ddata->offsets[SYSC_SYSSTATUS];
+ 	sysc_mask = BIT(ddata->cap->regbits->srst_shift);
+ 
+diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
+index cd81d10974a29..57b6555d6d042 100644
+--- a/drivers/dma/pl330.c
++++ b/drivers/dma/pl330.c
+@@ -2793,7 +2793,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
+ 	 * If burst size is smaller than bus width then make sure we only
+ 	 * transfer one at a time to avoid a burst stradling an MFIFO entry.
+ 	 */
+-	if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width)
++	if (burst * 8 < pl330->pcfg.data_bus_width)
+ 		desc->rqcfg.brst_len = 1;
+ 
+ 	desc->bytes_requested = len;
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index 43acba2a1c0ee..a6abfe702c5a3 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -454,8 +454,8 @@ struct xilinx_dma_device {
+ #define to_dma_tx_descriptor(tx) \
+ 	container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
+ #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
+-	readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
+-			   cond, delay_us, timeout_us)
++	readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \
++				  val, cond, delay_us, timeout_us)
+ 
+ /* IO accessors */
+ static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
+diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
+index 6a6b412206ec0..3222645c95b33 100644
+--- a/drivers/firmware/efi/Kconfig
++++ b/drivers/firmware/efi/Kconfig
+@@ -216,7 +216,7 @@ config EFI_DEV_PATH_PARSER
+ 
+ config EFI_EARLYCON
+ 	def_bool y
+-	depends on SERIAL_EARLYCON && !ARM && !IA64
++	depends on EFI && SERIAL_EARLYCON && !ARM && !IA64
+ 	select FONT_SUPPORT
+ 	select ARCH_USE_MEMREMAP_PROT
+ 
+diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
+index a50ba4a4a1d71..b88f889b3932e 100644
+--- a/drivers/hid/hid-cypress.c
++++ b/drivers/hid/hid-cypress.c
+@@ -23,19 +23,17 @@
+ #define CP_2WHEEL_MOUSE_HACK		0x02
+ #define CP_2WHEEL_MOUSE_HACK_ON		0x04
+ 
++#define VA_INVAL_LOGICAL_BOUNDARY	0x08
++
+ /*
+  * Some USB barcode readers from cypress have usage min and usage max in
+  * the wrong order
+  */
+-static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
++static __u8 *cp_rdesc_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 		unsigned int *rsize)
+ {
+-	unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+ 	unsigned int i;
+ 
+-	if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX))
+-		return rdesc;
+-
+ 	if (*rsize < 4)
+ 		return rdesc;
+ 
+@@ -48,6 +46,40 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 	return rdesc;
+ }
+ 
++static __u8 *va_logical_boundary_fixup(struct hid_device *hdev, __u8 *rdesc,
++		unsigned int *rsize)
++{
++	/*
++	 * Varmilo VA104M (with VID Cypress and device ID 07B1) incorrectly
++	 * reports Logical Minimum of its Consumer Control device as 572
++	 * (0x02 0x3c). Fix this by setting its Logical Minimum to zero.
++	 */
++	if (*rsize == 25 &&
++			rdesc[0] == 0x05 && rdesc[1] == 0x0c &&
++			rdesc[2] == 0x09 && rdesc[3] == 0x01 &&
++			rdesc[6] == 0x19 && rdesc[7] == 0x00 &&
++			rdesc[11] == 0x16 && rdesc[12] == 0x3c && rdesc[13] == 0x02) {
++		hid_info(hdev,
++			 "fixing up varmilo VA104M consumer control report descriptor\n");
++		rdesc[12] = 0x00;
++		rdesc[13] = 0x00;
++	}
++	return rdesc;
++}
++
++static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
++		unsigned int *rsize)
++{
++	unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
++
++	if (quirks & CP_RDESC_SWAPPED_MIN_MAX)
++		rdesc = cp_rdesc_fixup(hdev, rdesc, rsize);
++	if (quirks & VA_INVAL_LOGICAL_BOUNDARY)
++		rdesc = va_logical_boundary_fixup(hdev, rdesc, rsize);
++
++	return rdesc;
++}
++
+ static int cp_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+ 		struct hid_field *field, struct hid_usage *usage,
+ 		unsigned long **bit, int *max)
+@@ -128,6 +160,8 @@ static const struct hid_device_id cp_devices[] = {
+ 		.driver_data = CP_RDESC_SWAPPED_MIN_MAX },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE),
+ 		.driver_data = CP_2WHEEL_MOUSE_HACK },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_VARMILO_VA104M_07B1),
++		.driver_data = VA_INVAL_LOGICAL_BOUNDARY },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(hid, cp_devices);
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 7363d0b488bd8..2aa810665a78c 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -337,6 +337,8 @@
+ #define USB_DEVICE_ID_CYPRESS_BARCODE_4	0xed81
+ #define USB_DEVICE_ID_CYPRESS_TRUETOUCH	0xc001
+ 
++#define USB_DEVICE_ID_CYPRESS_VARMILO_VA104M_07B1   0X07b1
++
+ #define USB_VENDOR_ID_DATA_MODUL	0x7374
+ #define USB_VENDOR_ID_DATA_MODUL_EASYMAXTOUCH	0x1201
+ 
+@@ -449,6 +451,10 @@
+ #define USB_VENDOR_ID_FRUCTEL	0x25B6
+ #define USB_DEVICE_ID_GAMETEL_MT_MODE	0x0002
+ 
++#define USB_VENDOR_ID_GAMEVICE	0x27F8
++#define USB_DEVICE_ID_GAMEVICE_GV186	0x0BBE
++#define USB_DEVICE_ID_GAMEVICE_KISHI	0x0BBF
++
+ #define USB_VENDOR_ID_GAMERON		0x0810
+ #define USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR	0x0001
+ #define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR	0x0002
+@@ -487,6 +493,7 @@
+ #define USB_DEVICE_ID_PENPOWER		0x00f4
+ 
+ #define USB_VENDOR_ID_GREENASIA		0x0e8f
++#define USB_DEVICE_ID_GREENASIA_DUAL_SAT_ADAPTOR 0x3010
+ #define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD	0x3013
+ 
+ #define USB_VENDOR_ID_GRETAGMACBETH	0x0971
+@@ -743,6 +750,7 @@
+ #define USB_VENDOR_ID_LOGITECH		0x046d
+ #define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
+ #define USB_DEVICE_ID_LOGITECH_T651	0xb00c
++#define USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD	0xb309
+ #define USB_DEVICE_ID_LOGITECH_C007	0xc007
+ #define USB_DEVICE_ID_LOGITECH_C077	0xc077
+ #define USB_DEVICE_ID_LOGITECH_RECEIVER	0xc101
+@@ -1292,6 +1300,7 @@
+ 
+ #define USB_VENDOR_ID_UGTIZER			0x2179
+ #define USB_DEVICE_ID_UGTIZER_TABLET_GP0610	0x0053
++#define USB_DEVICE_ID_UGTIZER_TABLET_GT5040	0x0077
+ 
+ #define USB_VENDOR_ID_VIEWSONIC			0x0543
+ #define USB_DEVICE_ID_VIEWSONIC_PD1011		0xe621
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index b2bff932c524f..b2da8476d0d30 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -319,6 +319,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ 		USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD),
+ 	  HID_BATTERY_QUIRK_IGNORE },
++	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
++		USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD),
++	  HID_BATTERY_QUIRK_IGNORE },
+ 	{}
+ };
+ 
+diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
+index 044a93f3c1178..742c052b0110a 100644
+--- a/drivers/hid/hid-ite.c
++++ b/drivers/hid/hid-ite.c
+@@ -11,6 +11,48 @@
+ 
+ #include "hid-ids.h"
+ 
++#define QUIRK_TOUCHPAD_ON_OFF_REPORT		BIT(0)
++
++static __u8 *ite_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize)
++{
++	unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
++
++	if (quirks & QUIRK_TOUCHPAD_ON_OFF_REPORT) {
++		if (*rsize == 188 && rdesc[162] == 0x81 && rdesc[163] == 0x02) {
++			hid_info(hdev, "Fixing up ITE keyboard report descriptor\n");
++			rdesc[163] = HID_MAIN_ITEM_RELATIVE;
++		}
++	}
++
++	return rdesc;
++}
++
++static int ite_input_mapping(struct hid_device *hdev,
++		struct hid_input *hi, struct hid_field *field,
++		struct hid_usage *usage, unsigned long **bit,
++		int *max)
++{
++
++	unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
++
++	if ((quirks & QUIRK_TOUCHPAD_ON_OFF_REPORT) &&
++	    (usage->hid & HID_USAGE_PAGE) == 0x00880000) {
++		if (usage->hid == 0x00880078) {
++			/* Touchpad on, userspace expects F22 for this */
++			hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_F22);
++			return 1;
++		}
++		if (usage->hid == 0x00880079) {
++			/* Touchpad off, userspace expects F23 for this */
++			hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_F23);
++			return 1;
++		}
++		return -1;
++	}
++
++	return 0;
++}
++
+ static int ite_event(struct hid_device *hdev, struct hid_field *field,
+ 		     struct hid_usage *usage, __s32 value)
+ {
+@@ -37,13 +79,27 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field,
+ 	return 0;
+ }
+ 
++static int ite_probe(struct hid_device *hdev, const struct hid_device_id *id)
++{
++	int ret;
++
++	hid_set_drvdata(hdev, (void *)id->driver_data);
++
++	ret = hid_open_report(hdev);
++	if (ret)
++		return ret;
++
++	return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
++}
++
+ static const struct hid_device_id ite_devices[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
+ 	/* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */
+ 	{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ 		     USB_VENDOR_ID_SYNAPTICS,
+-		     USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
++		     USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012),
++	  .driver_data = QUIRK_TOUCHPAD_ON_OFF_REPORT },
+ 	/* ITE8910 USB kbd ctlr, with Synaptics touchpad connected to it. */
+ 	{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ 		     USB_VENDOR_ID_SYNAPTICS,
+@@ -55,6 +111,9 @@ MODULE_DEVICE_TABLE(hid, ite_devices);
+ static struct hid_driver ite_driver = {
+ 	.name = "itetech",
+ 	.id_table = ite_devices,
++	.probe = ite_probe,
++	.report_fixup = ite_report_fixup,
++	.input_mapping = ite_input_mapping,
+ 	.event = ite_event,
+ };
+ module_hid_driver(ite_driver);
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index e49d36de07968..919551ed5809c 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -3789,6 +3789,9 @@ static const struct hid_device_id hidpp_devices[] = {
+ 	{ /* Keyboard MX5000 (Bluetooth-receiver in HID proxy mode) */
+ 	  LDJ_DEVICE(0xb305),
+ 	  .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
++	{ /* Dinovo Edge (Bluetooth-receiver in HID proxy mode) */
++	  LDJ_DEVICE(0xb309),
++	  .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
+ 	{ /* Keyboard MX5500 (Bluetooth-receiver in HID proxy mode) */
+ 	  LDJ_DEVICE(0xb30b),
+ 	  .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
+@@ -3831,6 +3834,9 @@ static const struct hid_device_id hidpp_devices[] = {
+ 	{ /* MX5000 keyboard over Bluetooth */
+ 	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305),
+ 	  .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
++	{ /* Dinovo Edge keyboard over Bluetooth */
++	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb309),
++	  .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
+ 	{ /* MX5500 keyboard over Bluetooth */
+ 	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
+ 	  .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 0440e2f6e8a3c..60d188a704e5e 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -83,7 +83,12 @@ static const struct hid_device_id hid_quirks[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER), HID_QUIRK_NO_INIT_REPORTS },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28), HID_QUIRK_NOGET },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_FUTABA, USB_DEVICE_ID_LED_DISPLAY), HID_QUIRK_NO_INIT_REPORTS },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_SAT_ADAPTOR), HID_QUIRK_MULTI_INPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD), HID_QUIRK_MULTI_INPUT },
++	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_GAMEVICE, USB_DEVICE_ID_GAMEVICE_GV186),
++		HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_GAMEVICE, USB_DEVICE_ID_GAMEVICE_KISHI),
++		HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
+diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
+index 94c7398b5c279..3dd7d32467378 100644
+--- a/drivers/hid/hid-sensor-hub.c
++++ b/drivers/hid/hid-sensor-hub.c
+@@ -483,7 +483,8 @@ static int sensor_hub_raw_event(struct hid_device *hdev,
+ 		return 1;
+ 
+ 	ptr = raw_data;
+-	ptr++; /* Skip report id */
++	if (report->id)
++		ptr++; /* Skip report id */
+ 
+ 	spin_lock_irqsave(&pdata->lock, flags);
+ 
+diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
+index 86b568037cb8a..8e9c9e646cb7d 100644
+--- a/drivers/hid/hid-uclogic-core.c
++++ b/drivers/hid/hid-uclogic-core.c
+@@ -385,6 +385,8 @@ static const struct hid_device_id uclogic_devices[] = {
+ 				USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER,
+ 				USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER,
++				USB_DEVICE_ID_UGTIZER_TABLET_GT5040) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
+ 				USB_DEVICE_ID_UGEE_TABLET_G5) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
+diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
+index 78a364ae2f685..e80c812f44a77 100644
+--- a/drivers/hid/hid-uclogic-params.c
++++ b/drivers/hid/hid-uclogic-params.c
+@@ -997,6 +997,8 @@ int uclogic_params_init(struct uclogic_params *params,
+ 		break;
+ 	case VID_PID(USB_VENDOR_ID_UGTIZER,
+ 		     USB_DEVICE_ID_UGTIZER_TABLET_GP0610):
++	case VID_PID(USB_VENDOR_ID_UGTIZER,
++		     USB_DEVICE_ID_UGTIZER_TABLET_GT5040):
+ 	case VID_PID(USB_VENDOR_ID_UGEE,
+ 		     USB_DEVICE_ID_UGEE_XPPEN_TABLET_G540):
+ 	case VID_PID(USB_VENDOR_ID_UGEE,
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index bb75328193957..e8933daab4995 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -2423,6 +2423,7 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
+ 
+ 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
+ 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
++	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1);
+ 
+ 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
+ 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
+@@ -4614,11 +4615,11 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ 					      V2_QPC_BYTE_28_AT_M,
+ 					      V2_QPC_BYTE_28_AT_S);
+ 	qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
+-					    V2_QPC_BYTE_212_RETRY_CNT_M,
+-					    V2_QPC_BYTE_212_RETRY_CNT_S);
++					    V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
++					    V2_QPC_BYTE_212_RETRY_NUM_INIT_S);
+ 	qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack,
+-					    V2_QPC_BYTE_244_RNR_CNT_M,
+-					    V2_QPC_BYTE_244_RNR_CNT_S);
++					    V2_QPC_BYTE_244_RNR_NUM_INIT_M,
++					    V2_QPC_BYTE_244_RNR_NUM_INIT_S);
+ 
+ done:
+ 	qp_attr->cur_qp_state = qp_attr->qp_state;
+diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
+index c3cfea243af8c..119b2573c9a08 100644
+--- a/drivers/infiniband/hw/mthca/mthca_cq.c
++++ b/drivers/infiniband/hw/mthca/mthca_cq.c
+@@ -803,8 +803,10 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
+ 	}
+ 
+ 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+-	if (IS_ERR(mailbox))
++	if (IS_ERR(mailbox)) {
++		err = PTR_ERR(mailbox);
+ 		goto err_out_arm;
++	}
+ 
+ 	cq_context = mailbox->buf;
+ 
+@@ -846,9 +848,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
+ 	}
+ 
+ 	spin_lock_irq(&dev->cq_table.lock);
+-	if (mthca_array_set(&dev->cq_table.cq,
+-			    cq->cqn & (dev->limits.num_cqs - 1),
+-			    cq)) {
++	err = mthca_array_set(&dev->cq_table.cq,
++			      cq->cqn & (dev->limits.num_cqs - 1), cq);
++	if (err) {
+ 		spin_unlock_irq(&dev->cq_table.lock);
+ 		goto err_out_free_mr;
+ 	}
+diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
+index 20ff2bed3917a..5a89c1cfdaa97 100644
+--- a/drivers/input/serio/i8042.c
++++ b/drivers/input/serio/i8042.c
+@@ -121,6 +121,7 @@ module_param_named(unmask_kbd_data, i8042_unmask_kbd_data, bool, 0600);
+ MODULE_PARM_DESC(unmask_kbd_data, "Unconditional enable (may reveal sensitive data) of normally sanitize-filtered kbd data traffic debug log [pre-condition: i8042.debug=1 enabled]");
+ #endif
+ 
++static bool i8042_present;
+ static bool i8042_bypass_aux_irq_test;
+ static char i8042_kbd_firmware_id[128];
+ static char i8042_aux_firmware_id[128];
+@@ -341,6 +342,9 @@ int i8042_command(unsigned char *param, int command)
+ 	unsigned long flags;
+ 	int retval;
+ 
++	if (!i8042_present)
++		return -1;
++
+ 	spin_lock_irqsave(&i8042_lock, flags);
+ 	retval = __i8042_command(param, command);
+ 	spin_unlock_irqrestore(&i8042_lock, flags);
+@@ -1609,12 +1613,15 @@ static int __init i8042_init(void)
+ 
+ 	err = i8042_platform_init();
+ 	if (err)
+-		return err;
++		return (err == -ENODEV) ? 0 : err;
+ 
+ 	err = i8042_controller_check();
+ 	if (err)
+ 		goto err_platform_exit;
+ 
++	/* Set this before creating the dev to allow i8042_command to work right away */
++	i8042_present = true;
++
+ 	pdev = platform_create_bundle(&i8042_driver, i8042_probe, NULL, 0, NULL, 0);
+ 	if (IS_ERR(pdev)) {
+ 		err = PTR_ERR(pdev);
+@@ -1633,6 +1640,9 @@ static int __init i8042_init(void)
+ 
+ static void __exit i8042_exit(void)
+ {
++	if (!i8042_present)
++		return;
++
+ 	platform_device_unregister(i8042_platform_device);
+ 	platform_driver_unregister(&i8042_driver);
+ 	i8042_platform_exit();
+diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c
+index 1d027623c7760..abd011fcecf4a 100644
+--- a/drivers/irqchip/irq-sni-exiu.c
++++ b/drivers/irqchip/irq-sni-exiu.c
+@@ -136,7 +136,7 @@ static int exiu_domain_translate(struct irq_domain *domain,
+ 		if (fwspec->param_count != 2)
+ 			return -EINVAL;
+ 		*hwirq = fwspec->param[0];
+-		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
++		*type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+ 	}
+ 	return 0;
+ }
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index 246fa2657d744..f9a2a9ecbac9e 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -990,7 +990,7 @@ static const struct can_bittiming_const m_can_bittiming_const_31X = {
+ 	.name = KBUILD_MODNAME,
+ 	.tseg1_min = 2,		/* Time segment 1 = prop_seg + phase_seg1 */
+ 	.tseg1_max = 256,
+-	.tseg2_min = 1,		/* Time segment 2 = phase_seg2 */
++	.tseg2_min = 2,		/* Time segment 2 = phase_seg2 */
+ 	.tseg2_max = 128,
+ 	.sjw_max = 128,
+ 	.brp_min = 1,
+@@ -1605,7 +1605,7 @@ static int m_can_open(struct net_device *dev)
+ 		INIT_WORK(&cdev->tx_work, m_can_tx_work_queue);
+ 
+ 		err = request_threaded_irq(dev->irq, NULL, m_can_isr,
+-					   IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
++					   IRQF_ONESHOT,
+ 					   dev->name, dev);
+ 	} else {
+ 		err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name,
+diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
+index a4b4b742c80c3..0ad13d78815c5 100644
+--- a/drivers/net/can/usb/gs_usb.c
++++ b/drivers/net/can/usb/gs_usb.c
+@@ -63,21 +63,27 @@ enum gs_can_identify_mode {
+ };
+ 
+ /* data types passed between host and device */
++
++/* The firmware on the original USB2CAN by Geschwister Schneider
++ * Technologie Entwicklungs- und Vertriebs UG exchanges all data
++ * between the host and the device in host byte order. This is done
++ * with the struct gs_host_config::byte_order member, which is sent
++ * first to indicate the desired byte order.
++ *
++ * The widely used open source firmware candleLight doesn't support
++ * this feature and exchanges the data in little endian byte order.
++ */
+ struct gs_host_config {
+-	u32 byte_order;
++	__le32 byte_order;
+ } __packed;
+-/* All data exchanged between host and device is exchanged in host byte order,
+- * thanks to the struct gs_host_config byte_order member, which is sent first
+- * to indicate the desired byte order.
+- */
+ 
+ struct gs_device_config {
+ 	u8 reserved1;
+ 	u8 reserved2;
+ 	u8 reserved3;
+ 	u8 icount;
+-	u32 sw_version;
+-	u32 hw_version;
++	__le32 sw_version;
++	__le32 hw_version;
+ } __packed;
+ 
+ #define GS_CAN_MODE_NORMAL               0
+@@ -87,26 +93,26 @@ struct gs_device_config {
+ #define GS_CAN_MODE_ONE_SHOT             BIT(3)
+ 
+ struct gs_device_mode {
+-	u32 mode;
+-	u32 flags;
++	__le32 mode;
++	__le32 flags;
+ } __packed;
+ 
+ struct gs_device_state {
+-	u32 state;
+-	u32 rxerr;
+-	u32 txerr;
++	__le32 state;
++	__le32 rxerr;
++	__le32 txerr;
+ } __packed;
+ 
+ struct gs_device_bittiming {
+-	u32 prop_seg;
+-	u32 phase_seg1;
+-	u32 phase_seg2;
+-	u32 sjw;
+-	u32 brp;
++	__le32 prop_seg;
++	__le32 phase_seg1;
++	__le32 phase_seg2;
++	__le32 sjw;
++	__le32 brp;
+ } __packed;
+ 
+ struct gs_identify_mode {
+-	u32 mode;
++	__le32 mode;
+ } __packed;
+ 
+ #define GS_CAN_FEATURE_LISTEN_ONLY      BIT(0)
+@@ -117,23 +123,23 @@ struct gs_identify_mode {
+ #define GS_CAN_FEATURE_IDENTIFY         BIT(5)
+ 
+ struct gs_device_bt_const {
+-	u32 feature;
+-	u32 fclk_can;
+-	u32 tseg1_min;
+-	u32 tseg1_max;
+-	u32 tseg2_min;
+-	u32 tseg2_max;
+-	u32 sjw_max;
+-	u32 brp_min;
+-	u32 brp_max;
+-	u32 brp_inc;
++	__le32 feature;
++	__le32 fclk_can;
++	__le32 tseg1_min;
++	__le32 tseg1_max;
++	__le32 tseg2_min;
++	__le32 tseg2_max;
++	__le32 sjw_max;
++	__le32 brp_min;
++	__le32 brp_max;
++	__le32 brp_inc;
+ } __packed;
+ 
+ #define GS_CAN_FLAG_OVERFLOW 1
+ 
+ struct gs_host_frame {
+ 	u32 echo_id;
+-	u32 can_id;
++	__le32 can_id;
+ 
+ 	u8 can_dlc;
+ 	u8 channel;
+@@ -329,13 +335,13 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
+ 		if (!skb)
+ 			return;
+ 
+-		cf->can_id = hf->can_id;
++		cf->can_id = le32_to_cpu(hf->can_id);
+ 
+ 		cf->can_dlc = get_can_dlc(hf->can_dlc);
+ 		memcpy(cf->data, hf->data, 8);
+ 
+ 		/* ERROR frames tell us information about the controller */
+-		if (hf->can_id & CAN_ERR_FLAG)
++		if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
+ 			gs_update_state(dev, cf);
+ 
+ 		netdev->stats.rx_packets++;
+@@ -418,11 +424,11 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
+ 	if (!dbt)
+ 		return -ENOMEM;
+ 
+-	dbt->prop_seg = bt->prop_seg;
+-	dbt->phase_seg1 = bt->phase_seg1;
+-	dbt->phase_seg2 = bt->phase_seg2;
+-	dbt->sjw = bt->sjw;
+-	dbt->brp = bt->brp;
++	dbt->prop_seg = cpu_to_le32(bt->prop_seg);
++	dbt->phase_seg1 = cpu_to_le32(bt->phase_seg1);
++	dbt->phase_seg2 = cpu_to_le32(bt->phase_seg2);
++	dbt->sjw = cpu_to_le32(bt->sjw);
++	dbt->brp = cpu_to_le32(bt->brp);
+ 
+ 	/* request bit timings */
+ 	rc = usb_control_msg(interface_to_usbdev(intf),
+@@ -503,7 +509,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
+ 
+ 	cf = (struct can_frame *)skb->data;
+ 
+-	hf->can_id = cf->can_id;
++	hf->can_id = cpu_to_le32(cf->can_id);
+ 	hf->can_dlc = cf->can_dlc;
+ 	memcpy(hf->data, cf->data, cf->can_dlc);
+ 
+@@ -573,6 +579,7 @@ static int gs_can_open(struct net_device *netdev)
+ 	int rc, i;
+ 	struct gs_device_mode *dm;
+ 	u32 ctrlmode;
++	u32 flags = 0;
+ 
+ 	rc = open_candev(netdev);
+ 	if (rc)
+@@ -640,24 +647,24 @@ static int gs_can_open(struct net_device *netdev)
+ 
+ 	/* flags */
+ 	ctrlmode = dev->can.ctrlmode;
+-	dm->flags = 0;
+ 
+ 	if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
+-		dm->flags |= GS_CAN_MODE_LOOP_BACK;
++		flags |= GS_CAN_MODE_LOOP_BACK;
+ 	else if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
+-		dm->flags |= GS_CAN_MODE_LISTEN_ONLY;
++		flags |= GS_CAN_MODE_LISTEN_ONLY;
+ 
+ 	/* Controller is not allowed to retry TX
+ 	 * this mode is unavailable on atmels uc3c hardware
+ 	 */
+ 	if (ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+-		dm->flags |= GS_CAN_MODE_ONE_SHOT;
++		flags |= GS_CAN_MODE_ONE_SHOT;
+ 
+ 	if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+-		dm->flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
++		flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
+ 
+ 	/* finally start device */
+-	dm->mode = GS_CAN_MODE_START;
++	dm->mode = cpu_to_le32(GS_CAN_MODE_START);
++	dm->flags = cpu_to_le32(flags);
+ 	rc = usb_control_msg(interface_to_usbdev(dev->iface),
+ 			     usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
+ 			     GS_USB_BREQ_MODE,
+@@ -737,9 +744,9 @@ static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
+ 		return -ENOMEM;
+ 
+ 	if (do_identify)
+-		imode->mode = GS_CAN_IDENTIFY_ON;
++		imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_ON);
+ 	else
+-		imode->mode = GS_CAN_IDENTIFY_OFF;
++		imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF);
+ 
+ 	rc = usb_control_msg(interface_to_usbdev(dev->iface),
+ 			     usb_sndctrlpipe(interface_to_usbdev(dev->iface),
+@@ -790,6 +797,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
+ 	struct net_device *netdev;
+ 	int rc;
+ 	struct gs_device_bt_const *bt_const;
++	u32 feature;
+ 
+ 	bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL);
+ 	if (!bt_const)
+@@ -830,14 +838,14 @@ static struct gs_can *gs_make_candev(unsigned int channel,
+ 
+ 	/* dev settup */
+ 	strcpy(dev->bt_const.name, "gs_usb");
+-	dev->bt_const.tseg1_min = bt_const->tseg1_min;
+-	dev->bt_const.tseg1_max = bt_const->tseg1_max;
+-	dev->bt_const.tseg2_min = bt_const->tseg2_min;
+-	dev->bt_const.tseg2_max = bt_const->tseg2_max;
+-	dev->bt_const.sjw_max = bt_const->sjw_max;
+-	dev->bt_const.brp_min = bt_const->brp_min;
+-	dev->bt_const.brp_max = bt_const->brp_max;
+-	dev->bt_const.brp_inc = bt_const->brp_inc;
++	dev->bt_const.tseg1_min = le32_to_cpu(bt_const->tseg1_min);
++	dev->bt_const.tseg1_max = le32_to_cpu(bt_const->tseg1_max);
++	dev->bt_const.tseg2_min = le32_to_cpu(bt_const->tseg2_min);
++	dev->bt_const.tseg2_max = le32_to_cpu(bt_const->tseg2_max);
++	dev->bt_const.sjw_max = le32_to_cpu(bt_const->sjw_max);
++	dev->bt_const.brp_min = le32_to_cpu(bt_const->brp_min);
++	dev->bt_const.brp_max = le32_to_cpu(bt_const->brp_max);
++	dev->bt_const.brp_inc = le32_to_cpu(bt_const->brp_inc);
+ 
+ 	dev->udev = interface_to_usbdev(intf);
+ 	dev->iface = intf;
+@@ -854,28 +862,29 @@ static struct gs_can *gs_make_candev(unsigned int channel,
+ 
+ 	/* can settup */
+ 	dev->can.state = CAN_STATE_STOPPED;
+-	dev->can.clock.freq = bt_const->fclk_can;
++	dev->can.clock.freq = le32_to_cpu(bt_const->fclk_can);
+ 	dev->can.bittiming_const = &dev->bt_const;
+ 	dev->can.do_set_bittiming = gs_usb_set_bittiming;
+ 
+ 	dev->can.ctrlmode_supported = 0;
+ 
+-	if (bt_const->feature & GS_CAN_FEATURE_LISTEN_ONLY)
++	feature = le32_to_cpu(bt_const->feature);
++	if (feature & GS_CAN_FEATURE_LISTEN_ONLY)
+ 		dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
+ 
+-	if (bt_const->feature & GS_CAN_FEATURE_LOOP_BACK)
++	if (feature & GS_CAN_FEATURE_LOOP_BACK)
+ 		dev->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK;
+ 
+-	if (bt_const->feature & GS_CAN_FEATURE_TRIPLE_SAMPLE)
++	if (feature & GS_CAN_FEATURE_TRIPLE_SAMPLE)
+ 		dev->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+ 
+-	if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT)
++	if (feature & GS_CAN_FEATURE_ONE_SHOT)
+ 		dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
+ 
+ 	SET_NETDEV_DEV(netdev, &intf->dev);
+ 
+-	if (dconf->sw_version > 1)
+-		if (bt_const->feature & GS_CAN_FEATURE_IDENTIFY)
++	if (le32_to_cpu(dconf->sw_version) > 1)
++		if (feature & GS_CAN_FEATURE_IDENTIFY)
+ 			netdev->ethtool_ops = &gs_usb_ethtool_ops;
+ 
+ 	kfree(bt_const);
+@@ -910,7 +919,7 @@ static int gs_usb_probe(struct usb_interface *intf,
+ 	if (!hconf)
+ 		return -ENOMEM;
+ 
+-	hconf->byte_order = 0x0000beef;
++	hconf->byte_order = cpu_to_le32(0x0000beef);
+ 
+ 	/* send host config */
+ 	rc = usb_control_msg(interface_to_usbdev(intf),
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 92e4d140df6fa..469b155df4885 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -2143,6 +2143,8 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
+ 		usleep_range(10000, 20000);
+ 		gpiod_set_value_cansleep(gpiod, 0);
+ 		usleep_range(10000, 20000);
++
++		mv88e6xxx_g1_wait_eeprom_done(chip);
+ 	}
+ }
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
+index 8a903624fdd7c..938dd146629f1 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1.c
++++ b/drivers/net/dsa/mv88e6xxx/global1.c
+@@ -75,6 +75,37 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
+ 	return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
+ }
+ 
++void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
++{
++	const unsigned long timeout = jiffies + 1 * HZ;
++	u16 val;
++	int err;
++
++	/* Wait up to 1 second for the switch to finish reading the
++	 * EEPROM.
++	 */
++	while (time_before(jiffies, timeout)) {
++		err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
++		if (err) {
++			dev_err(chip->dev, "Error reading status");
++			return;
++		}
++
++		/* If the switch is still resetting, it may not
++		 * respond on the bus, and so MDIO read returns
++		 * 0xffff. Differentiate between that, and waiting for
++		 * the EEPROM to be done by bit 0 being set.
++		 */
++		if (val != 0xffff &&
++		    val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE))
++			return;
++
++		usleep_range(1000, 2000);
++	}
++
++	dev_err(chip->dev, "Timeout waiting for EEPROM done");
++}
++
+ /* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
+  * Offset 0x02: Switch MAC Address Register Bytes 2 & 3
+  * Offset 0x03: Switch MAC Address Register Bytes 4 & 5
+diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
+index 0ae96a1e919b6..08d66ef6aace6 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1.h
++++ b/drivers/net/dsa/mv88e6xxx/global1.h
+@@ -277,6 +277,7 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
+ int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
+ int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
+ int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
++void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
+ 
+ int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
+ int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index 635345bced313..2e5348ec2a2e9 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -2622,16 +2622,9 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
+ 		goto err_mmio_read_less;
+ 	}
+ 
+-	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
++	rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_width));
+ 	if (rc) {
+-		dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
+-		goto err_mmio_read_less;
+-	}
+-
+-	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
+-	if (rc) {
+-		dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
+-			rc);
++		dev_err(dev, "dma_set_mask_and_coherent failed %d\n", rc);
+ 		goto err_mmio_read_less;
+ 	}
+ 
+@@ -3450,6 +3443,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		return rc;
+ 	}
+ 
++	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS));
++	if (rc) {
++		dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", rc);
++		goto err_disable_device;
++	}
++
+ 	pci_set_master(pdev);
+ 
+ 	ena_dev = vzalloc(sizeof(*ena_dev));
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 6f777e9b4b936..7c8187d386756 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -10826,7 +10826,8 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
+ 	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
+ 	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
+ 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
+-		goto init_err_disable;
++		rc = -EIO;
++		goto init_err_release;
+ 	}
+ 
+ 	pci_set_master(pdev);
+@@ -11892,6 +11893,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 				create_singlethread_workqueue("bnxt_pf_wq");
+ 			if (!bnxt_pf_wq) {
+ 				dev_err(&pdev->dev, "Unable to create workqueue.\n");
++				rc = -ENOMEM;
+ 				goto init_err_pci_clean;
+ 			}
+ 		}
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+index 202af8dc79662..cb50b41cd3df2 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+@@ -630,7 +630,8 @@ int set_filter_wr(struct adapter *adapter, int fidx)
+ 		 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
+ 		 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
+ 		 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
+-	fwr->smac_sel = f->smt->idx;
++	if (f->fs.newsmac)
++		fwr->smac_sel = f->smt->idx;
+ 	fwr->rx_chan_rx_rpl_iq =
+ 		htons(FW_FILTER_WR_RX_CHAN_V(0) |
+ 		      FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index f357b9cbfee72..e53994ca3142c 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -1994,8 +1994,11 @@ static int do_reset(struct ibmvnic_adapter *adapter,
+ 	for (i = 0; i < adapter->req_rx_queues; i++)
+ 		napi_schedule(&adapter->napi[i]);
+ 
+-	if (adapter->reset_reason != VNIC_RESET_FAILOVER)
++	if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
++	    adapter->reset_reason == VNIC_RESET_MOBILITY) {
+ 		call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
++		call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
++	}
+ 
+ 	rc = 0;
+ 
+@@ -2065,6 +2068,9 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
+ 	if (rc)
+ 		return IBMVNIC_OPEN_FAILED;
+ 
++	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
++	call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
++
+ 	return 0;
+ }
+ 
+@@ -2761,6 +2767,9 @@ static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
+ {
+ 	int i, rc;
+ 
++	if (!adapter->tx_scrq || !adapter->rx_scrq)
++		return -EINVAL;
++
+ 	for (i = 0; i < adapter->req_tx_queues; i++) {
+ 		netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
+ 		rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
+@@ -4768,6 +4777,9 @@ static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
+ 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+ 
+ 	/* Clean out the queue */
++	if (!crq->msgs)
++		return -EINVAL;
++
+ 	memset(crq->msgs, 0, PAGE_SIZE);
+ 	crq->cur = 0;
+ 	crq->active = false;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
+index 401304d4d5536..cfe99bae8e362 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e.h
++++ b/drivers/net/ethernet/intel/i40e/i40e.h
+@@ -150,6 +150,7 @@ enum i40e_state_t {
+ 	__I40E_CLIENT_RESET,
+ 	__I40E_VIRTCHNL_OP_PENDING,
+ 	__I40E_RECOVERY_MODE,
++	__I40E_VF_RESETS_DISABLED,	/* disable resets during i40e_remove */
+ 	/* This must be last as it determines the size of the BITMAP */
+ 	__I40E_STATE_SIZE__,
+ };
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index b3c3911adfc2e..2b4327416457d 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -3988,8 +3988,16 @@ static irqreturn_t i40e_intr(int irq, void *data)
+ 	}
+ 
+ 	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
+-		ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
+-		set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
++		/* disable any further VFLR event notifications */
++		if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) {
++			u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
++
++			reg &= ~I40E_PFINT_ICR0_VFLR_MASK;
++			wr32(hw, I40E_PFINT_ICR0_ENA, reg);
++		} else {
++			ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
++			set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
++		}
+ 	}
+ 
+ 	if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
+@@ -15345,6 +15353,11 @@ static void i40e_remove(struct pci_dev *pdev)
+ 	while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ 		usleep_range(1000, 2000);
+ 
++	if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
++		set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
++		i40e_free_vfs(pf);
++		pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
++	}
+ 	/* no more scheduling of any task */
+ 	set_bit(__I40E_SUSPENDED, pf->state);
+ 	set_bit(__I40E_DOWN, pf->state);
+@@ -15371,11 +15384,6 @@ static void i40e_remove(struct pci_dev *pdev)
+ 	 */
+ 	i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+ 
+-	if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+-		i40e_free_vfs(pf);
+-		pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
+-	}
+-
+ 	i40e_fdir_teardown(pf);
+ 
+ 	/* If there is a switch structure or any orphans, remove them.
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index 38042d610f82c..09ff3f335ffa6 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -1335,7 +1335,8 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
+  * @vf: pointer to the VF structure
+  * @flr: VFLR was issued or not
+  *
+- * Returns true if the VF is reset, false otherwise.
++ * Returns true if the VF is in reset, resets successfully, or resets
++ * are disabled and false otherwise.
+  **/
+ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
+ {
+@@ -1345,11 +1346,14 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
+ 	u32 reg;
+ 	int i;
+ 
++	if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
++		return true;
++
+ 	/* If the VFs have been disabled, this means something else is
+ 	 * resetting the VF, so we shouldn't continue.
+ 	 */
+ 	if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
+-		return false;
++		return true;
+ 
+ 	i40e_trigger_vf_reset(vf, flr);
+ 
+@@ -1513,6 +1517,15 @@ void i40e_free_vfs(struct i40e_pf *pf)
+ 
+ 	i40e_notify_client_of_vf_enable(pf, 0);
+ 
++	/* Disable IOV before freeing resources. This lets any VF drivers
++	 * running in the host get themselves cleaned up before we yank
++	 * the carpet out from underneath their feet.
++	 */
++	if (!pci_vfs_assigned(pf->pdev))
++		pci_disable_sriov(pf->pdev);
++	else
++		dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
++
+ 	/* Amortize wait time by stopping all VFs at the same time */
+ 	for (i = 0; i < pf->num_alloc_vfs; i++) {
+ 		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
+@@ -1528,15 +1541,6 @@ void i40e_free_vfs(struct i40e_pf *pf)
+ 		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
+ 	}
+ 
+-	/* Disable IOV before freeing resources. This lets any VF drivers
+-	 * running in the host get themselves cleaned up before we yank
+-	 * the carpet out from underneath their feet.
+-	 */
+-	if (!pci_vfs_assigned(pf->pdev))
+-		pci_disable_sriov(pf->pdev);
+-	else
+-		dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
+-
+ 	/* free up VF resources */
+ 	tmp = pf->num_alloc_vfs;
+ 	pf->num_alloc_vfs = 0;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index 01b26b3327b01..73b8bf0fbf16f 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -3069,6 +3069,9 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
+ 			goto out_unlock;
+ 		}
+ 
++		if (vif->type == NL80211_IFTYPE_STATION)
++			vif->bss_conf.he_support = sta->he_cap.has_he;
++
+ 		if (sta->tdls &&
+ 		    (vif->p2p ||
+ 		     iwl_mvm_tdls_sta_count(mvm, NULL) ==
+diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
+index e4f7fa00862de..2505abc8ef281 100644
+--- a/drivers/nfc/s3fwrn5/i2c.c
++++ b/drivers/nfc/s3fwrn5/i2c.c
+@@ -26,8 +26,8 @@ struct s3fwrn5_i2c_phy {
+ 	struct i2c_client *i2c_dev;
+ 	struct nci_dev *ndev;
+ 
+-	unsigned int gpio_en;
+-	unsigned int gpio_fw_wake;
++	int gpio_en;
++	int gpio_fw_wake;
+ 
+ 	struct mutex mutex;
+ 
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index f5d12bf109c78..9b1fc8633cfe1 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -271,9 +271,21 @@ static void nvme_dbbuf_init(struct nvme_dev *dev,
+ 	nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
+ }
+ 
++static void nvme_dbbuf_free(struct nvme_queue *nvmeq)
++{
++	if (!nvmeq->qid)
++		return;
++
++	nvmeq->dbbuf_sq_db = NULL;
++	nvmeq->dbbuf_cq_db = NULL;
++	nvmeq->dbbuf_sq_ei = NULL;
++	nvmeq->dbbuf_cq_ei = NULL;
++}
++
+ static void nvme_dbbuf_set(struct nvme_dev *dev)
+ {
+ 	struct nvme_command c;
++	unsigned int i;
+ 
+ 	if (!dev->dbbuf_dbs)
+ 		return;
+@@ -287,6 +299,9 @@ static void nvme_dbbuf_set(struct nvme_dev *dev)
+ 		dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
+ 		/* Free memory and continue on */
+ 		nvme_dbbuf_dma_free(dev);
++
++		for (i = 1; i <= dev->online_queues; i++)
++			nvme_dbbuf_free(&dev->queues[i]);
+ 	}
+ }
+ 
+diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
+index 2ea8497af82a6..bf5d80b97597b 100644
+--- a/drivers/phy/tegra/xusb.c
++++ b/drivers/phy/tegra/xusb.c
+@@ -949,6 +949,7 @@ power_down:
+ reset:
+ 	reset_control_assert(padctl->rst);
+ remove:
++	platform_set_drvdata(pdev, NULL);
+ 	soc->ops->remove(padctl);
+ 	return err;
+ }
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index abcb336a515a1..5081048f2356e 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -4238,6 +4238,7 @@ static void hotkey_resume(void)
+ 		pr_err("error while attempting to reset the event firmware interface\n");
+ 
+ 	tpacpi_send_radiosw_update();
++	tpacpi_input_send_tabletsw();
+ 	hotkey_tablet_mode_notify_change();
+ 	hotkey_wakeup_reason_notify_change();
+ 	hotkey_wakeup_hotunplug_complete_notify_change();
+diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
+index a1e6569427c34..71a969fc3b206 100644
+--- a/drivers/platform/x86/toshiba_acpi.c
++++ b/drivers/platform/x86/toshiba_acpi.c
+@@ -1485,7 +1485,7 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
+ 	struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
+ 	char *buffer;
+ 	char *cmd;
+-	int lcd_out, crt_out, tv_out;
++	int lcd_out = -1, crt_out = -1, tv_out = -1;
+ 	int remain = count;
+ 	int value;
+ 	int ret;
+@@ -1517,7 +1517,6 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
+ 
+ 	kfree(cmd);
+ 
+-	lcd_out = crt_out = tv_out = -1;
+ 	ret = get_video_status(dev, &video_out);
+ 	if (!ret) {
+ 		unsigned int new_video_out = video_out;
+diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
+index 820f2c29376c0..93b4cb156b0bc 100644
+--- a/drivers/s390/net/qeth_core.h
++++ b/drivers/s390/net/qeth_core.h
+@@ -436,10 +436,13 @@ enum qeth_qdio_out_buffer_state {
+ 	QETH_QDIO_BUF_EMPTY,
+ 	/* Filled by driver; owned by hardware in order to be sent. */
+ 	QETH_QDIO_BUF_PRIMED,
+-	/* Identified to be pending in TPQ. */
++	/* Discovered by the TX completion code: */
+ 	QETH_QDIO_BUF_PENDING,
+-	/* Found in completion queue. */
+-	QETH_QDIO_BUF_IN_CQ,
++	/* Finished by the TX completion code: */
++	QETH_QDIO_BUF_NEED_QAOB,
++	/* Received QAOB notification on CQ: */
++	QETH_QDIO_BUF_QAOB_OK,
++	QETH_QDIO_BUF_QAOB_ERROR,
+ 	/* Handled via transfer pending / completion queue. */
+ 	QETH_QDIO_BUF_HANDLED_DELAYED,
+ };
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index 5043f0fcf399a..fad1c46d4b0e1 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -31,6 +31,7 @@
+ 
+ #include <net/iucv/af_iucv.h>
+ #include <net/dsfield.h>
++#include <net/sock.h>
+ 
+ #include <asm/ebcdic.h>
+ #include <asm/chpid.h>
+@@ -425,18 +426,13 @@ static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
+ 
+ 		}
+ 	}
+-	if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
+-					QETH_QDIO_BUF_HANDLED_DELAYED)) {
+-		/* for recovery situations */
+-		qeth_init_qdio_out_buf(q, bidx);
+-		QETH_CARD_TEXT(q->card, 2, "clprecov");
+-	}
+ }
+ 
+ 
+ static void qeth_qdio_handle_aob(struct qeth_card *card,
+ 				 unsigned long phys_aob_addr)
+ {
++	enum qeth_qdio_out_buffer_state new_state = QETH_QDIO_BUF_QAOB_OK;
+ 	struct qaob *aob;
+ 	struct qeth_qdio_out_buffer *buffer;
+ 	enum iucv_tx_notify notification;
+@@ -448,22 +444,6 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
+ 	buffer = (struct qeth_qdio_out_buffer *) aob->user1;
+ 	QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
+ 
+-	if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
+-			   QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
+-		notification = TX_NOTIFY_OK;
+-	} else {
+-		WARN_ON_ONCE(atomic_read(&buffer->state) !=
+-							QETH_QDIO_BUF_PENDING);
+-		atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
+-		notification = TX_NOTIFY_DELAYED_OK;
+-	}
+-
+-	if (aob->aorc != 0)  {
+-		QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
+-		notification = qeth_compute_cq_notification(aob->aorc, 1);
+-	}
+-	qeth_notify_skbs(buffer->q, buffer, notification);
+-
+ 	/* Free dangling allocations. The attached skbs are handled by
+ 	 * qeth_cleanup_handled_pending().
+ 	 */
+@@ -474,7 +454,33 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
+ 			kmem_cache_free(qeth_core_header_cache,
+ 					(void *) aob->sba[i]);
+ 	}
+-	atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
++
++	if (aob->aorc) {
++		QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
++		new_state = QETH_QDIO_BUF_QAOB_ERROR;
++	}
++
++	switch (atomic_xchg(&buffer->state, new_state)) {
++	case QETH_QDIO_BUF_PRIMED:
++		/* Faster than TX completion code. */
++		notification = qeth_compute_cq_notification(aob->aorc, 0);
++		qeth_notify_skbs(buffer->q, buffer, notification);
++		atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
++		break;
++	case QETH_QDIO_BUF_PENDING:
++		/* TX completion code is active and will handle the async
++		 * completion for us.
++		 */
++		break;
++	case QETH_QDIO_BUF_NEED_QAOB:
++		/* TX completion code is already finished. */
++		notification = qeth_compute_cq_notification(aob->aorc, 1);
++		qeth_notify_skbs(buffer->q, buffer, notification);
++		atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
++		break;
++	default:
++		WARN_ON_ONCE(1);
++	}
+ 
+ 	qdio_release_aob(aob);
+ }
+@@ -1083,7 +1089,7 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
+ 	skb_queue_walk(&buf->skb_list, skb) {
+ 		QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
+ 		QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
+-		if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk)
++		if (skb->sk && skb->sk->sk_family == PF_IUCV)
+ 			iucv_sk(skb->sk)->sk_txnotify(skb, notification);
+ 	}
+ }
+@@ -1094,9 +1100,6 @@ static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
+ 	struct qeth_qdio_out_q *queue = buf->q;
+ 	struct sk_buff *skb;
+ 
+-	/* release may never happen from within CQ tasklet scope */
+-	WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
+-
+ 	if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
+ 		qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR);
+ 
+@@ -5223,9 +5226,32 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
+ 
+ 		if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
+ 						   QETH_QDIO_BUF_PENDING) ==
+-		    QETH_QDIO_BUF_PRIMED)
++		    QETH_QDIO_BUF_PRIMED) {
+ 			qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
+ 
++			/* Handle race with qeth_qdio_handle_aob(): */
++			switch (atomic_xchg(&buffer->state,
++					    QETH_QDIO_BUF_NEED_QAOB)) {
++			case QETH_QDIO_BUF_PENDING:
++				/* No concurrent QAOB notification. */
++				break;
++			case QETH_QDIO_BUF_QAOB_OK:
++				qeth_notify_skbs(queue, buffer,
++						 TX_NOTIFY_DELAYED_OK);
++				atomic_set(&buffer->state,
++					   QETH_QDIO_BUF_HANDLED_DELAYED);
++				break;
++			case QETH_QDIO_BUF_QAOB_ERROR:
++				qeth_notify_skbs(queue, buffer,
++						 TX_NOTIFY_DELAYED_GENERALERROR);
++				atomic_set(&buffer->state,
++					   QETH_QDIO_BUF_HANDLED_DELAYED);
++				break;
++			default:
++				WARN_ON_ONCE(1);
++			}
++		}
++
+ 		QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
+ 
+ 		/* prepare the queue slot for re-use: */
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 70b99c0e2e678..f954be3d5ee22 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -533,8 +533,8 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
+ 	if (conn->task == task)
+ 		conn->task = NULL;
+ 
+-	if (conn->ping_task == task)
+-		conn->ping_task = NULL;
++	if (READ_ONCE(conn->ping_task) == task)
++		WRITE_ONCE(conn->ping_task, NULL);
+ 
+ 	/* release get from queueing */
+ 	__iscsi_put_task(task);
+@@ -738,6 +738,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ 						   task->conn->session->age);
+ 	}
+ 
++	if (unlikely(READ_ONCE(conn->ping_task) == INVALID_SCSI_TASK))
++		WRITE_ONCE(conn->ping_task, task);
++
+ 	if (!ihost->workq) {
+ 		if (iscsi_prep_mgmt_task(conn, task))
+ 			goto free_task;
+@@ -941,8 +944,11 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+         struct iscsi_nopout hdr;
+ 	struct iscsi_task *task;
+ 
+-	if (!rhdr && conn->ping_task)
+-		return -EINVAL;
++	if (!rhdr) {
++		if (READ_ONCE(conn->ping_task))
++			return -EINVAL;
++		WRITE_ONCE(conn->ping_task, INVALID_SCSI_TASK);
++	}
+ 
+ 	memset(&hdr, 0, sizeof(struct iscsi_nopout));
+ 	hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
+@@ -957,11 +963,12 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+ 
+ 	task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+ 	if (!task) {
++		if (!rhdr)
++			WRITE_ONCE(conn->ping_task, NULL);
+ 		iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
+ 		return -EIO;
+ 	} else if (!rhdr) {
+ 		/* only track our nops */
+-		conn->ping_task = task;
+ 		conn->last_ping = jiffies;
+ 	}
+ 
+@@ -984,7 +991,7 @@ static int iscsi_nop_out_rsp(struct iscsi_task *task,
+ 	struct iscsi_conn *conn = task->conn;
+ 	int rc = 0;
+ 
+-	if (conn->ping_task != task) {
++	if (READ_ONCE(conn->ping_task) != task) {
+ 		/*
+ 		 * If this is not in response to one of our
+ 		 * nops then it must be from userspace.
+@@ -1923,7 +1930,7 @@ static void iscsi_start_tx(struct iscsi_conn *conn)
+  */
+ static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
+ {
+-	if (conn->ping_task &&
++	if (READ_ONCE(conn->ping_task) &&
+ 	    time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
+ 			   (conn->ping_timeout * HZ), jiffies))
+ 		return 1;
+@@ -2058,7 +2065,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
+ 	 * Checking the transport already or nop from a cmd timeout still
+ 	 * running
+ 	 */
+-	if (conn->ping_task) {
++	if (READ_ONCE(conn->ping_task)) {
+ 		task->have_checked_conn = true;
+ 		rc = BLK_EH_RESET_TIMER;
+ 		goto done;
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 0772327f87d93..b6ce880ddd153 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -8160,11 +8160,7 @@ int ufshcd_shutdown(struct ufs_hba *hba)
+ 	if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
+ 		goto out;
+ 
+-	if (pm_runtime_suspended(hba->dev)) {
+-		ret = ufshcd_runtime_resume(hba);
+-		if (ret)
+-			goto out;
+-	}
++	pm_runtime_get_sync(hba->dev);
+ 
+ 	ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
+ out:
+diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
+index d0afe0b1599fd..8a4be34bccfd2 100644
+--- a/drivers/spi/spi-bcm-qspi.c
++++ b/drivers/spi/spi-bcm-qspi.c
+@@ -1213,7 +1213,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
+ 	if (!of_match_node(bcm_qspi_of_match, dev->of_node))
+ 		return -ENODEV;
+ 
+-	master = spi_alloc_master(dev, sizeof(struct bcm_qspi));
++	master = devm_spi_alloc_master(dev, sizeof(struct bcm_qspi));
+ 	if (!master) {
+ 		dev_err(dev, "error allocating spi_master\n");
+ 		return -ENOMEM;
+@@ -1252,21 +1252,17 @@ int bcm_qspi_probe(struct platform_device *pdev,
+ 
+ 	if (res) {
+ 		qspi->base[MSPI]  = devm_ioremap_resource(dev, res);
+-		if (IS_ERR(qspi->base[MSPI])) {
+-			ret = PTR_ERR(qspi->base[MSPI]);
+-			goto qspi_resource_err;
+-		}
++		if (IS_ERR(qspi->base[MSPI]))
++			return PTR_ERR(qspi->base[MSPI]);
+ 	} else {
+-		goto qspi_resource_err;
++		return 0;
+ 	}
+ 
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
+ 	if (res) {
+ 		qspi->base[BSPI]  = devm_ioremap_resource(dev, res);
+-		if (IS_ERR(qspi->base[BSPI])) {
+-			ret = PTR_ERR(qspi->base[BSPI]);
+-			goto qspi_resource_err;
+-		}
++		if (IS_ERR(qspi->base[BSPI]))
++			return PTR_ERR(qspi->base[BSPI]);
+ 		qspi->bspi_mode = true;
+ 	} else {
+ 		qspi->bspi_mode = false;
+@@ -1277,18 +1273,14 @@ int bcm_qspi_probe(struct platform_device *pdev,
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs_reg");
+ 	if (res) {
+ 		qspi->base[CHIP_SELECT]  = devm_ioremap_resource(dev, res);
+-		if (IS_ERR(qspi->base[CHIP_SELECT])) {
+-			ret = PTR_ERR(qspi->base[CHIP_SELECT]);
+-			goto qspi_resource_err;
+-		}
++		if (IS_ERR(qspi->base[CHIP_SELECT]))
++			return PTR_ERR(qspi->base[CHIP_SELECT]);
+ 	}
+ 
+ 	qspi->dev_ids = kcalloc(num_irqs, sizeof(struct bcm_qspi_dev_id),
+ 				GFP_KERNEL);
+-	if (!qspi->dev_ids) {
+-		ret = -ENOMEM;
+-		goto qspi_resource_err;
+-	}
++	if (!qspi->dev_ids)
++		return -ENOMEM;
+ 
+ 	for (val = 0; val < num_irqs; val++) {
+ 		irq = -1;
+@@ -1357,7 +1349,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
+ 	qspi->xfer_mode.addrlen = -1;
+ 	qspi->xfer_mode.hp = -1;
+ 
+-	ret = devm_spi_register_master(&pdev->dev, master);
++	ret = spi_register_master(master);
+ 	if (ret < 0) {
+ 		dev_err(dev, "can't register master\n");
+ 		goto qspi_reg_err;
+@@ -1370,8 +1362,6 @@ qspi_reg_err:
+ 	clk_disable_unprepare(qspi->clk);
+ qspi_probe_err:
+ 	kfree(qspi->dev_ids);
+-qspi_resource_err:
+-	spi_master_put(master);
+ 	return ret;
+ }
+ /* probe function to be called by SoC specific platform driver probe */
+@@ -1381,10 +1371,10 @@ int bcm_qspi_remove(struct platform_device *pdev)
+ {
+ 	struct bcm_qspi *qspi = platform_get_drvdata(pdev);
+ 
++	spi_unregister_master(qspi->master);
+ 	bcm_qspi_hw_uninit(qspi);
+ 	clk_disable_unprepare(qspi->clk);
+ 	kfree(qspi->dev_ids);
+-	spi_unregister_master(qspi->master);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
+index 9ae1c96f4d3d4..5bc97b22491cd 100644
+--- a/drivers/spi/spi-bcm2835.c
++++ b/drivers/spi/spi-bcm2835.c
+@@ -1264,7 +1264,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
+ 	struct bcm2835_spi *bs;
+ 	int err;
+ 
+-	ctlr = spi_alloc_master(&pdev->dev, ALIGN(sizeof(*bs),
++	ctlr = devm_spi_alloc_master(&pdev->dev, ALIGN(sizeof(*bs),
+ 						  dma_get_cache_alignment()));
+ 	if (!ctlr)
+ 		return -ENOMEM;
+@@ -1284,23 +1284,19 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
+ 	bs = spi_controller_get_devdata(ctlr);
+ 
+ 	bs->regs = devm_platform_ioremap_resource(pdev, 0);
+-	if (IS_ERR(bs->regs)) {
+-		err = PTR_ERR(bs->regs);
+-		goto out_controller_put;
+-	}
++	if (IS_ERR(bs->regs))
++		return PTR_ERR(bs->regs);
+ 
+ 	bs->clk = devm_clk_get(&pdev->dev, NULL);
+ 	if (IS_ERR(bs->clk)) {
+ 		err = PTR_ERR(bs->clk);
+ 		dev_err(&pdev->dev, "could not get clk: %d\n", err);
+-		goto out_controller_put;
++		return err;
+ 	}
+ 
+ 	bs->irq = platform_get_irq(pdev, 0);
+-	if (bs->irq <= 0) {
+-		err = bs->irq ? bs->irq : -ENODEV;
+-		goto out_controller_put;
+-	}
++	if (bs->irq <= 0)
++		return bs->irq ? bs->irq : -ENODEV;
+ 
+ 	clk_prepare_enable(bs->clk);
+ 
+@@ -1330,8 +1326,6 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
+ 
+ out_clk_disable:
+ 	clk_disable_unprepare(bs->clk);
+-out_controller_put:
+-	spi_controller_put(ctlr);
+ 	return err;
+ }
+ 
+diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
+index 1e5aac1581aa4..8211107bfbe82 100644
+--- a/drivers/spi/spi-bcm2835aux.c
++++ b/drivers/spi/spi-bcm2835aux.c
+@@ -529,8 +529,9 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
+ 
+ 	bs->clk = devm_clk_get(&pdev->dev, NULL);
+ 	if (IS_ERR(bs->clk)) {
++		err = PTR_ERR(bs->clk);
+ 		dev_err(&pdev->dev, "could not get clk: %d\n", err);
+-		return PTR_ERR(bs->clk);
++		return err;
+ 	}
+ 
+ 	bs->irq = platform_get_irq(pdev, 0);
+diff --git a/drivers/staging/ralink-gdma/Kconfig b/drivers/staging/ralink-gdma/Kconfig
+index 54e8029e6b1af..0017376234e28 100644
+--- a/drivers/staging/ralink-gdma/Kconfig
++++ b/drivers/staging/ralink-gdma/Kconfig
+@@ -2,6 +2,7 @@
+ config DMA_RALINK
+ 	tristate "RALINK DMA support"
+ 	depends on RALINK && !SOC_RT288X
++	depends on DMADEVICES
+ 	select DMA_ENGINE
+ 	select DMA_VIRTUAL_CHANNELS
+ 
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index bca183369ad8b..3403667a9592f 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -483,8 +483,7 @@ EXPORT_SYMBOL(iscsit_queue_rsp);
+ void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+ {
+ 	spin_lock_bh(&conn->cmd_lock);
+-	if (!list_empty(&cmd->i_conn_node) &&
+-	    !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP))
++	if (!list_empty(&cmd->i_conn_node))
+ 		list_del_init(&cmd->i_conn_node);
+ 	spin_unlock_bh(&conn->cmd_lock);
+ 
+@@ -4082,12 +4081,22 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
+ 	spin_lock_bh(&conn->cmd_lock);
+ 	list_splice_init(&conn->conn_cmd_list, &tmp_list);
+ 
+-	list_for_each_entry(cmd, &tmp_list, i_conn_node) {
++	list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
+ 		struct se_cmd *se_cmd = &cmd->se_cmd;
+ 
+ 		if (se_cmd->se_tfo != NULL) {
+ 			spin_lock_irq(&se_cmd->t_state_lock);
+-			se_cmd->transport_state |= CMD_T_FABRIC_STOP;
++			if (se_cmd->transport_state & CMD_T_ABORTED) {
++				/*
++				 * LIO's abort path owns the cleanup for this,
++				 * so put it back on the list and let
++				 * aborted_task handle it.
++				 */
++				list_move_tail(&cmd->i_conn_node,
++					       &conn->conn_cmd_list);
++			} else {
++				se_cmd->transport_state |= CMD_T_FABRIC_STOP;
++			}
+ 			spin_unlock_irq(&se_cmd->t_state_lock);
+ 		}
+ 	}
+diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
+index cf2367ba08d63..aadedec3bfe7b 100644
+--- a/drivers/tee/optee/call.c
++++ b/drivers/tee/optee/call.c
+@@ -530,7 +530,8 @@ void optee_free_pages_list(void *list, size_t num_entries)
+ static bool is_normal_memory(pgprot_t p)
+ {
+ #if defined(CONFIG_ARM)
+-	return (pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC;
++	return (((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC) ||
++		((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK));
+ #elif defined(CONFIG_ARM64)
+ 	return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
+ #else
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index e26a6f18f4210..35e89460b9ca8 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -482,11 +482,11 @@ static void snoop_urb(struct usb_device *udev,
+ 
+ 	if (userurb) {		/* Async */
+ 		if (when == SUBMIT)
+-			dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
++			dev_info(&udev->dev, "userurb %px, ep%d %s-%s, "
+ 					"length %u\n",
+ 					userurb, ep, t, d, length);
+ 		else
+-			dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
++			dev_info(&udev->dev, "userurb %px, ep%d %s-%s, "
+ 					"actual_length %u status %d\n",
+ 					userurb, ep, t, d, length,
+ 					timeout_or_status);
+@@ -1992,7 +1992,7 @@ static int proc_reapurb(struct usb_dev_state *ps, void __user *arg)
+ 	if (as) {
+ 		int retval;
+ 
+-		snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
++		snoop(&ps->dev->dev, "reap %px\n", as->userurb);
+ 		retval = processcompl(as, (void __user * __user *)arg);
+ 		free_async(as);
+ 		return retval;
+@@ -2009,7 +2009,7 @@ static int proc_reapurbnonblock(struct usb_dev_state *ps, void __user *arg)
+ 
+ 	as = async_getcompleted(ps);
+ 	if (as) {
+-		snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
++		snoop(&ps->dev->dev, "reap %px\n", as->userurb);
+ 		retval = processcompl(as, (void __user * __user *)arg);
+ 		free_async(as);
+ 	} else {
+@@ -2139,7 +2139,7 @@ static int proc_reapurb_compat(struct usb_dev_state *ps, void __user *arg)
+ 	if (as) {
+ 		int retval;
+ 
+-		snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
++		snoop(&ps->dev->dev, "reap %px\n", as->userurb);
+ 		retval = processcompl_compat(as, (void __user * __user *)arg);
+ 		free_async(as);
+ 		return retval;
+@@ -2156,7 +2156,7 @@ static int proc_reapurbnonblock_compat(struct usb_dev_state *ps, void __user *ar
+ 
+ 	as = async_getcompleted(ps);
+ 	if (as) {
+-		snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
++		snoop(&ps->dev->dev, "reap %px\n", as->userurb);
+ 		retval = processcompl_compat(as, (void __user * __user *)arg);
+ 		free_async(as);
+ 	} else {
+@@ -2621,7 +2621,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
+ #endif
+ 
+ 	case USBDEVFS_DISCARDURB:
+-		snoop(&dev->dev, "%s: DISCARDURB %pK\n", __func__, p);
++		snoop(&dev->dev, "%s: DISCARDURB %px\n", __func__, p);
+ 		ret = proc_unlinkurb(ps, p);
+ 		break;
+ 
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 5ad14cdd97623..b55c3a699fc65 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -348,6 +348,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* Guillemot Webcam Hercules Dualpix Exchange*/
+ 	{ USB_DEVICE(0x06f8, 0x3005), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* Guillemot Hercules DJ Console audio card (BZ 208357) */
++	{ USB_DEVICE(0x06f8, 0xb000), .driver_info =
++			USB_QUIRK_ENDPOINT_BLACKLIST },
++
+ 	/* Midiman M-Audio Keystation 88es */
+ 	{ USB_DEVICE(0x0763, 0x0192), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+@@ -421,6 +425,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x1532, 0x0116), .driver_info =
+ 			USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ 
++	/* Lenovo ThinkCenter A630Z TI024Gen3 usb-audio */
++	{ USB_DEVICE(0x17ef, 0xa012), .driver_info =
++			USB_QUIRK_DISCONNECT_SUSPEND },
++
+ 	/* BUILDWIN Photo Frame */
+ 	{ USB_DEVICE(0x1908, 0x1315), .driver_info =
+ 			USB_QUIRK_HONOR_BNUMINTERFACES },
+@@ -521,6 +529,8 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
+  * Matched for devices with USB_QUIRK_ENDPOINT_BLACKLIST.
+  */
+ static const struct usb_device_id usb_endpoint_blacklist[] = {
++	{ USB_DEVICE_INTERFACE_NUMBER(0x06f8, 0xb000, 5), .driver_info = 0x01 },
++	{ USB_DEVICE_INTERFACE_NUMBER(0x06f8, 0xb000, 5), .driver_info = 0x81 },
+ 	{ USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 },
+ 	{ USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0208, 1), .driver_info = 0x85 },
+ 	{ }
+diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
+index 46af0aa07e2e3..b2b5b0689667b 100644
+--- a/drivers/usb/gadget/function/f_midi.c
++++ b/drivers/usb/gadget/function/f_midi.c
+@@ -1315,7 +1315,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
+ 	midi->id = kstrdup(opts->id, GFP_KERNEL);
+ 	if (opts->id && !midi->id) {
+ 		status = -ENOMEM;
+-		goto setup_fail;
++		goto midi_free;
+ 	}
+ 	midi->in_ports = opts->in_ports;
+ 	midi->out_ports = opts->out_ports;
+@@ -1327,7 +1327,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
+ 
+ 	status = kfifo_alloc(&midi->in_req_fifo, midi->qlen, GFP_KERNEL);
+ 	if (status)
+-		goto setup_fail;
++		goto midi_free;
+ 
+ 	spin_lock_init(&midi->transmit_lock);
+ 
+@@ -1343,9 +1343,13 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
+ 
+ 	return &midi->func;
+ 
++midi_free:
++	if (midi)
++		kfree(midi->id);
++	kfree(midi);
+ setup_fail:
+ 	mutex_unlock(&opts->lock);
+-	kfree(midi);
++
+ 	return ERR_PTR(status);
+ }
+ 
+diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
+index 238f555fe494a..cabcbb47f0ac1 100644
+--- a/drivers/usb/gadget/legacy/inode.c
++++ b/drivers/usb/gadget/legacy/inode.c
+@@ -2040,6 +2040,9 @@ gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
+ 	return 0;
+ 
+ Enomem:
++	kfree(CHIP);
++	CHIP = NULL;
++
+ 	return -ENOMEM;
+ }
+ 
+diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
+index f63f84a257256..98c484149ac7f 100644
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -320,7 +320,7 @@ static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
+ 	return 1;
+ }
+ 
+-static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
++static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
+ {
+ 	struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
+ 				struct vhost_scsi_cmd, tvc_se_cmd);
+@@ -340,6 +340,16 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
+ 	target_free_tag(se_sess, se_cmd);
+ }
+ 
++static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
++{
++	struct vhost_scsi_cmd *cmd = container_of(se_cmd,
++					struct vhost_scsi_cmd, tvc_se_cmd);
++	struct vhost_scsi *vs = cmd->tvc_vhost;
++
++	llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
++	vhost_work_queue(&vs->dev, &vs->vs_completion_work);
++}
++
+ static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
+ {
+ 	return 0;
+@@ -362,28 +372,15 @@ static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
+ 	return 0;
+ }
+ 
+-static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
+-{
+-	struct vhost_scsi *vs = cmd->tvc_vhost;
+-
+-	llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
+-
+-	vhost_work_queue(&vs->dev, &vs->vs_completion_work);
+-}
+-
+ static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
+ {
+-	struct vhost_scsi_cmd *cmd = container_of(se_cmd,
+-				struct vhost_scsi_cmd, tvc_se_cmd);
+-	vhost_scsi_complete_cmd(cmd);
++	transport_generic_free_cmd(se_cmd, 0);
+ 	return 0;
+ }
+ 
+ static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
+ {
+-	struct vhost_scsi_cmd *cmd = container_of(se_cmd,
+-				struct vhost_scsi_cmd, tvc_se_cmd);
+-	vhost_scsi_complete_cmd(cmd);
++	transport_generic_free_cmd(se_cmd, 0);
+ 	return 0;
+ }
+ 
+@@ -429,15 +426,6 @@ vhost_scsi_allocate_evt(struct vhost_scsi *vs,
+ 	return evt;
+ }
+ 
+-static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
+-{
+-	struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
+-
+-	/* TODO locking against target/backend threads? */
+-	transport_generic_free_cmd(se_cmd, 0);
+-
+-}
+-
+ static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
+ {
+ 	return target_put_sess_cmd(se_cmd);
+@@ -556,7 +544,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
+ 		} else
+ 			pr_err("Faulted on virtio_scsi_cmd_resp\n");
+ 
+-		vhost_scsi_free_cmd(cmd);
++		vhost_scsi_release_cmd_res(se_cmd);
+ 	}
+ 
+ 	vq = -1;
+@@ -1088,7 +1076,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
+ 						      &prot_iter, exp_data_len,
+ 						      &data_iter))) {
+ 				vq_err(vq, "Failed to map iov to sgl\n");
+-				vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
++				vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
+ 				goto err;
+ 			}
+ 		}
+diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
+index 2dcb7c58b31e1..81671272aa58f 100644
+--- a/drivers/video/fbdev/hyperv_fb.c
++++ b/drivers/video/fbdev/hyperv_fb.c
+@@ -703,7 +703,12 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
+ 		goto err1;
+ 	}
+ 
+-	fb_virt = ioremap(par->mem->start, screen_fb_size);
++	/*
++	 * Map the VRAM cacheable for performance. This is also required for
++	 * VM Connect to display properly for ARM64 Linux VM, as the host also
++	 * maps the VRAM cacheable.
++	 */
++	fb_virt = ioremap_cache(par->mem->start, screen_fb_size);
+ 	if (!fb_virt)
+ 		goto err2;
+ 
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 04fd02e6124dd..d9246fb8cea65 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -488,13 +488,13 @@ next2:
+ 			break;
+ 	}
+ out:
++	btrfs_free_path(path);
+ 	fs_info->qgroup_flags |= flags;
+ 	if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
+ 		clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+ 	else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
+ 		 ret >= 0)
+ 		ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
+-	btrfs_free_path(path);
+ 
+ 	if (ret < 0) {
+ 		ulist_free(fs_info->qgroup_ulist);
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 48e46323d519c..9feb8a1793efb 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -913,6 +913,7 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
+ 			    "invalid root item size, have %u expect %zu or %u",
+ 			    btrfs_item_size_nr(leaf, slot), sizeof(ri),
+ 			    btrfs_legacy_root_item_size());
++		return -EUCLEAN;
+ 	}
+ 
+ 	/*
+@@ -1268,6 +1269,7 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
+ 	"invalid item size, have %u expect aligned to %zu for key type %u",
+ 			    btrfs_item_size_nr(leaf, slot),
+ 			    sizeof(*dref), key->type);
++		return -EUCLEAN;
+ 	}
+ 	if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) {
+ 		generic_err(leaf, slot,
+@@ -1296,6 +1298,7 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
+ 			extent_err(leaf, slot,
+ 	"invalid extent data backref offset, have %llu expect aligned to %u",
+ 				   offset, leaf->fs_info->sectorsize);
++			return -EUCLEAN;
+ 		}
+ 	}
+ 	return 0;
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 808c5985904ed..457f8f858a3f0 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -1122,7 +1122,13 @@ static noinline struct btrfs_device *device_list_add(const char *path,
+ 			if (device->bdev != path_bdev) {
+ 				bdput(path_bdev);
+ 				mutex_unlock(&fs_devices->device_list_mutex);
+-				btrfs_warn_in_rcu(device->fs_info,
++				/*
++				 * device->fs_info may not be reliable here, so
++				 * pass in a NULL instead. This avoids a
++				 * possible use-after-free when the fs_info and
++				 * fs_info->sb are already torn down.
++				 */
++				btrfs_warn_in_rcu(NULL,
+ 	"duplicate device %s devid %llu generation %llu scanned by %s (%d)",
+ 						  path, devid, found_transid,
+ 						  current->comm,
+diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
+index 1619af216677c..1f55072aa3023 100644
+--- a/fs/cifs/cifsacl.c
++++ b/fs/cifs/cifsacl.c
+@@ -1198,6 +1198,7 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
+ 		cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc);
+ 	} else if (mode_from_special_sid) {
+ 		rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr, true);
++		kfree(pntsd);
+ 	} else {
+ 		/* get approximated mode from ACL */
+ 		rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr, false);
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 776029a57e717..6211f8b731a97 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -259,7 +259,7 @@ smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
+ }
+ 
+ static struct mid_q_entry *
+-smb2_find_mid(struct TCP_Server_Info *server, char *buf)
++__smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
+ {
+ 	struct mid_q_entry *mid;
+ 	struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
+@@ -276,6 +276,10 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
+ 		    (mid->mid_state == MID_REQUEST_SUBMITTED) &&
+ 		    (mid->command == shdr->Command)) {
+ 			kref_get(&mid->refcount);
++			if (dequeue) {
++				list_del_init(&mid->qhead);
++				mid->mid_flags |= MID_DELETED;
++			}
+ 			spin_unlock(&GlobalMid_Lock);
+ 			return mid;
+ 		}
+@@ -284,6 +288,18 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
+ 	return NULL;
+ }
+ 
++static struct mid_q_entry *
++smb2_find_mid(struct TCP_Server_Info *server, char *buf)
++{
++	return __smb2_find_mid(server, buf, false);
++}
++
++static struct mid_q_entry *
++smb2_find_dequeue_mid(struct TCP_Server_Info *server, char *buf)
++{
++	return __smb2_find_mid(server, buf, true);
++}
++
+ static void
+ smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
+ {
+@@ -3979,7 +3995,8 @@ init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
+ static int
+ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+ 		 char *buf, unsigned int buf_len, struct page **pages,
+-		 unsigned int npages, unsigned int page_data_size)
++		 unsigned int npages, unsigned int page_data_size,
++		 bool is_offloaded)
+ {
+ 	unsigned int data_offset;
+ 	unsigned int data_len;
+@@ -4001,7 +4018,8 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+ 
+ 	if (server->ops->is_session_expired &&
+ 	    server->ops->is_session_expired(buf)) {
+-		cifs_reconnect(server);
++		if (!is_offloaded)
++			cifs_reconnect(server);
+ 		wake_up(&server->response_q);
+ 		return -1;
+ 	}
+@@ -4026,7 +4044,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+ 		cifs_dbg(FYI, "%s: server returned error %d\n",
+ 			 __func__, rdata->result);
+ 		/* normal error on read response */
+-		dequeue_mid(mid, false);
++		if (is_offloaded)
++			mid->mid_state = MID_RESPONSE_RECEIVED;
++		else
++			dequeue_mid(mid, false);
+ 		return 0;
+ 	}
+ 
+@@ -4050,7 +4071,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+ 		cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
+ 			 __func__, data_offset);
+ 		rdata->result = -EIO;
+-		dequeue_mid(mid, rdata->result);
++		if (is_offloaded)
++			mid->mid_state = MID_RESPONSE_MALFORMED;
++		else
++			dequeue_mid(mid, rdata->result);
+ 		return 0;
+ 	}
+ 
+@@ -4066,21 +4090,30 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+ 			cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
+ 				 __func__, data_offset);
+ 			rdata->result = -EIO;
+-			dequeue_mid(mid, rdata->result);
++			if (is_offloaded)
++				mid->mid_state = MID_RESPONSE_MALFORMED;
++			else
++				dequeue_mid(mid, rdata->result);
+ 			return 0;
+ 		}
+ 
+ 		if (data_len > page_data_size - pad_len) {
+ 			/* data_len is corrupt -- discard frame */
+ 			rdata->result = -EIO;
+-			dequeue_mid(mid, rdata->result);
++			if (is_offloaded)
++				mid->mid_state = MID_RESPONSE_MALFORMED;
++			else
++				dequeue_mid(mid, rdata->result);
+ 			return 0;
+ 		}
+ 
+ 		rdata->result = init_read_bvec(pages, npages, page_data_size,
+ 					       cur_off, &bvec);
+ 		if (rdata->result != 0) {
+-			dequeue_mid(mid, rdata->result);
++			if (is_offloaded)
++				mid->mid_state = MID_RESPONSE_MALFORMED;
++			else
++				dequeue_mid(mid, rdata->result);
+ 			return 0;
+ 		}
+ 
+@@ -4095,7 +4128,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+ 		/* read response payload cannot be in both buf and pages */
+ 		WARN_ONCE(1, "buf can not contain only a part of read data");
+ 		rdata->result = -EIO;
+-		dequeue_mid(mid, rdata->result);
++		if (is_offloaded)
++			mid->mid_state = MID_RESPONSE_MALFORMED;
++		else
++			dequeue_mid(mid, rdata->result);
+ 		return 0;
+ 	}
+ 
+@@ -4106,7 +4142,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
+ 	if (length < 0)
+ 		return length;
+ 
+-	dequeue_mid(mid, false);
++	if (is_offloaded)
++		mid->mid_state = MID_RESPONSE_RECEIVED;
++	else
++		dequeue_mid(mid, false);
+ 	return length;
+ }
+ 
+@@ -4135,15 +4174,34 @@ static void smb2_decrypt_offload(struct work_struct *work)
+ 	}
+ 
+ 	dw->server->lstrp = jiffies;
+-	mid = smb2_find_mid(dw->server, dw->buf);
++	mid = smb2_find_dequeue_mid(dw->server, dw->buf);
+ 	if (mid == NULL)
+ 		cifs_dbg(FYI, "mid not found\n");
+ 	else {
+ 		mid->decrypted = true;
+ 		rc = handle_read_data(dw->server, mid, dw->buf,
+ 				      dw->server->vals->read_rsp_size,
+-				      dw->ppages, dw->npages, dw->len);
+-		mid->callback(mid);
++				      dw->ppages, dw->npages, dw->len,
++				      true);
++		if (rc >= 0) {
++#ifdef CONFIG_CIFS_STATS2
++			mid->when_received = jiffies;
++#endif
++			mid->callback(mid);
++		} else {
++			spin_lock(&GlobalMid_Lock);
++			if (dw->server->tcpStatus == CifsNeedReconnect) {
++				mid->mid_state = MID_RETRY_NEEDED;
++				spin_unlock(&GlobalMid_Lock);
++				mid->callback(mid);
++			} else {
++				mid->mid_state = MID_REQUEST_SUBMITTED;
++				mid->mid_flags &= ~(MID_DELETED);
++				list_add_tail(&mid->qhead,
++					&dw->server->pending_mid_q);
++				spin_unlock(&GlobalMid_Lock);
++			}
++		}
+ 		cifs_mid_q_entry_release(mid);
+ 	}
+ 
+@@ -4246,7 +4304,7 @@ non_offloaded_decrypt:
+ 		(*mid)->decrypted = true;
+ 		rc = handle_read_data(server, *mid, buf,
+ 				      server->vals->read_rsp_size,
+-				      pages, npages, len);
++				      pages, npages, len, false);
+ 	}
+ 
+ free_pages:
+@@ -4391,7 +4449,7 @@ smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+ 	char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
+ 
+ 	return handle_read_data(server, mid, buf, server->pdu_size,
+-				NULL, 0, 0);
++				NULL, 0, 0, false);
+ }
+ 
+ static int
+diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
+index 96c0c86f3fffe..0297ad95eb5cc 100644
+--- a/fs/efivarfs/inode.c
++++ b/fs/efivarfs/inode.c
+@@ -7,6 +7,7 @@
+ #include <linux/efi.h>
+ #include <linux/fs.h>
+ #include <linux/ctype.h>
++#include <linux/kmemleak.h>
+ #include <linux/slab.h>
+ #include <linux/uuid.h>
+ 
+@@ -103,6 +104,7 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
+ 	var->var.VariableName[i] = '\0';
+ 
+ 	inode->i_private = var;
++	kmemleak_ignore(var);
+ 
+ 	err = efivar_entry_add(var, &efivarfs_list);
+ 	if (err)
+diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
+index edcd6769a94b4..9760a52800b42 100644
+--- a/fs/efivarfs/super.c
++++ b/fs/efivarfs/super.c
+@@ -21,7 +21,6 @@ LIST_HEAD(efivarfs_list);
+ static void efivarfs_evict_inode(struct inode *inode)
+ {
+ 	clear_inode(inode);
+-	kfree(inode->i_private);
+ }
+ 
+ static const struct super_operations efivarfs_ops = {
+diff --git a/fs/proc/self.c b/fs/proc/self.c
+index 32af065397f80..582336862d258 100644
+--- a/fs/proc/self.c
++++ b/fs/proc/self.c
+@@ -16,6 +16,13 @@ static const char *proc_self_get_link(struct dentry *dentry,
+ 	pid_t tgid = task_tgid_nr_ns(current, ns);
+ 	char *name;
+ 
++	/*
++	 * Not currently supported. Once we can inherit all of struct pid,
++	 * we can allow this.
++	 */
++	if (current->flags & PF_KTHREAD)
++		return ERR_PTR(-EOPNOTSUPP);
++
+ 	if (!tgid)
+ 		return ERR_PTR(-ENOENT);
+ 	/* max length of unsigned int in decimal + NULL term */
+diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
+index ea39a0b54c637..15f77361eb130 100644
+--- a/include/asm-generic/pgtable.h
++++ b/include/asm-generic/pgtable.h
+@@ -1159,6 +1159,19 @@ static inline bool arch_has_pfn_modify_check(void)
+ 
+ #endif /* !__ASSEMBLY__ */
+ 
++#if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
++#ifdef CONFIG_PHYS_ADDR_T_64BIT
++/*
++ * ZSMALLOC needs to know the highest PFN on 32-bit architectures
++ * with physical address space extension, but falls back to
++ * BITS_PER_LONG otherwise.
++ */
++#error Missing MAX_POSSIBLE_PHYSMEM_BITS definition
++#else
++#define MAX_POSSIBLE_PHYSMEM_BITS 32
++#endif
++#endif
++
+ #ifndef has_transparent_hugepage
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ #define has_transparent_hugepage() 1
+diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
+index 77ebb61faf486..4c0e6539effdd 100644
+--- a/include/linux/netfilter.h
++++ b/include/linux/netfilter.h
+@@ -316,7 +316,7 @@ NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
+ 
+ 	INIT_LIST_HEAD(&sublist);
+ 	list_for_each_entry_safe(skb, next, head, list) {
+-		list_del(&skb->list);
++		skb_list_del_init(skb);
+ 		if (nf_hook(pf, hook, net, sk, skb, in, out, okfn) == 1)
+ 			list_add_tail(&skb->list, &sublist);
+ 	}
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index c25fb86ffae95..b3bbd10eb3f07 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -132,6 +132,9 @@ struct iscsi_task {
+ 	void			*dd_data;	/* driver/transport data */
+ };
+ 
++/* invalid scsi_task pointer */
++#define	INVALID_SCSI_TASK	(struct iscsi_task *)-1l
++
+ static inline int iscsi_task_has_unsol_data(struct iscsi_task *task)
+ {
+ 	return task->unsol_r2t.data_length > task->unsol_r2t.sent;
+diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
+index 67434278b81dd..a8af22e469ce5 100644
+--- a/include/trace/events/writeback.h
++++ b/include/trace/events/writeback.h
+@@ -192,7 +192,7 @@ TRACE_EVENT(inode_foreign_history,
+ 	),
+ 
+ 	TP_fast_assign(
+-		strncpy(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
++		strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
+ 		__entry->ino		= inode->i_ino;
+ 		__entry->cgroup_ino	= __trace_wbc_assign_cgroup(wbc);
+ 		__entry->history	= history;
+@@ -221,7 +221,7 @@ TRACE_EVENT(inode_switch_wbs,
+ 	),
+ 
+ 	TP_fast_assign(
+-		strncpy(__entry->name,	bdi_dev_name(old_wb->bdi), 32);
++		strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32);
+ 		__entry->ino		= inode->i_ino;
+ 		__entry->old_cgroup_ino	= __trace_wb_assign_cgroup(old_wb);
+ 		__entry->new_cgroup_ino	= __trace_wb_assign_cgroup(new_wb);
+@@ -254,7 +254,7 @@ TRACE_EVENT(track_foreign_dirty,
+ 		struct address_space *mapping = page_mapping(page);
+ 		struct inode *inode = mapping ? mapping->host : NULL;
+ 
+-		strncpy(__entry->name,	bdi_dev_name(wb->bdi), 32);
++		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
+ 		__entry->bdi_id		= wb->bdi->id;
+ 		__entry->ino		= inode ? inode->i_ino : 0;
+ 		__entry->memcg_id	= wb->memcg_css->id;
+@@ -287,7 +287,7 @@ TRACE_EVENT(flush_foreign,
+ 	),
+ 
+ 	TP_fast_assign(
+-		strncpy(__entry->name,	bdi_dev_name(wb->bdi), 32);
++		strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
+ 		__entry->cgroup_ino	= __trace_wb_assign_cgroup(wb);
+ 		__entry->frn_bdi_id	= frn_bdi_id;
+ 		__entry->frn_memcg_id	= frn_memcg_id;
+diff --git a/include/uapi/linux/wireless.h b/include/uapi/linux/wireless.h
+index a2c006a364e0b..24f3371ad8262 100644
+--- a/include/uapi/linux/wireless.h
++++ b/include/uapi/linux/wireless.h
+@@ -74,7 +74,11 @@
+ #include <linux/socket.h>		/* for "struct sockaddr" et al	*/
+ #include <linux/if.h>			/* for IFNAMSIZ and co... */
+ 
+-#include <stddef.h>                     /* for offsetof */
++#ifdef __KERNEL__
++#	include <linux/stddef.h>	/* for offsetof */
++#else
++#	include <stddef.h>		/* for offsetof */
++#endif
+ 
+ /***************************** VERSION *****************************/
+ /*
+diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h
+index 9eee32f5e4077..a93c0decfdd53 100644
+--- a/include/uapi/sound/skl-tplg-interface.h
++++ b/include/uapi/sound/skl-tplg-interface.h
+@@ -18,6 +18,8 @@
+  */
+ #define SKL_CONTROL_TYPE_BYTE_TLV	0x100
+ #define SKL_CONTROL_TYPE_MIC_SELECT	0x102
++#define SKL_CONTROL_TYPE_MULTI_IO_SELECT	0x103
++#define SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC	0x104
+ 
+ #define HDA_SST_CFG_MAX	900 /* size of copier cfg*/
+ #define MAX_IN_QUEUE 8
+diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c
+index 11941cf1adcc9..87f3de3d4e4ca 100644
+--- a/net/batman-adv/log.c
++++ b/net/batman-adv/log.c
+@@ -180,6 +180,7 @@ static const struct file_operations batadv_log_fops = {
+ 	.read           = batadv_log_read,
+ 	.poll           = batadv_log_poll,
+ 	.llseek         = no_llseek,
++	.owner          = THIS_MODULE,
+ };
+ 
+ /**
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 0a8220d30c992..ed2ab03cf971c 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -706,7 +706,7 @@ int fib_gw_from_via(struct fib_config *cfg, struct nlattr *nla,
+ 		cfg->fc_gw4 = *((__be32 *)via->rtvia_addr);
+ 		break;
+ 	case AF_INET6:
+-#ifdef CONFIG_IPV6
++#if IS_ENABLED(CONFIG_IPV6)
+ 		if (alen != sizeof(struct in6_addr)) {
+ 			NL_SET_ERR_MSG(extack, "Invalid IPv6 address in RTA_VIA");
+ 			return -EINVAL;
+diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c
+index 92a82e6b5fe62..38b9d74940835 100644
+--- a/sound/soc/intel/skylake/bxt-sst.c
++++ b/sound/soc/intel/skylake/bxt-sst.c
+@@ -17,7 +17,6 @@
+ #include "skl.h"
+ 
+ #define BXT_BASEFW_TIMEOUT	3000
+-#define BXT_INIT_TIMEOUT	300
+ #define BXT_ROM_INIT_TIMEOUT	70
+ #define BXT_IPC_PURGE_FW	0x01004000
+ 
+@@ -38,8 +37,6 @@
+ /* Delay before scheduling D0i3 entry */
+ #define BXT_D0I3_DELAY 5000
+ 
+-#define BXT_FW_ROM_INIT_RETRY 3
+-
+ static unsigned int bxt_get_errorcode(struct sst_dsp *ctx)
+ {
+ 	 return sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE);
+diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c
+index 4f64f097e9ae3..c6abcd5aa67b9 100644
+--- a/sound/soc/intel/skylake/cnl-sst.c
++++ b/sound/soc/intel/skylake/cnl-sst.c
+@@ -57,18 +57,34 @@ static int cnl_prepare_fw(struct sst_dsp *ctx, const void *fwdata, u32 fwsize)
+ 	ctx->dsp_ops.stream_tag = stream_tag;
+ 	memcpy(ctx->dmab.area, fwdata, fwsize);
+ 
++	ret = skl_dsp_core_power_up(ctx, SKL_DSP_CORE0_MASK);
++	if (ret < 0) {
++		dev_err(ctx->dev, "dsp core0 power up failed\n");
++		ret = -EIO;
++		goto base_fw_load_failed;
++	}
++
+ 	/* purge FW request */
+ 	sst_dsp_shim_write(ctx, CNL_ADSP_REG_HIPCIDR,
+ 			   CNL_ADSP_REG_HIPCIDR_BUSY | (CNL_IPC_PURGE |
+ 			   ((stream_tag - 1) << CNL_ROM_CTRL_DMA_ID)));
+ 
+-	ret = cnl_dsp_enable_core(ctx, SKL_DSP_CORE0_MASK);
++	ret = skl_dsp_start_core(ctx, SKL_DSP_CORE0_MASK);
+ 	if (ret < 0) {
+-		dev_err(ctx->dev, "dsp boot core failed ret: %d\n", ret);
++		dev_err(ctx->dev, "Start dsp core failed ret: %d\n", ret);
+ 		ret = -EIO;
+ 		goto base_fw_load_failed;
+ 	}
+ 
++	ret = sst_dsp_register_poll(ctx, CNL_ADSP_REG_HIPCIDA,
++				    CNL_ADSP_REG_HIPCIDA_DONE,
++				    CNL_ADSP_REG_HIPCIDA_DONE,
++				    BXT_INIT_TIMEOUT, "HIPCIDA Done");
++	if (ret < 0) {
++		dev_err(ctx->dev, "timeout for purge request: %d\n", ret);
++		goto base_fw_load_failed;
++	}
++
+ 	/* enable interrupt */
+ 	cnl_ipc_int_enable(ctx);
+ 	cnl_ipc_op_int_enable(ctx);
+@@ -109,7 +125,7 @@ static int cnl_load_base_firmware(struct sst_dsp *ctx)
+ {
+ 	struct firmware stripped_fw;
+ 	struct skl_dev *cnl = ctx->thread_context;
+-	int ret;
++	int ret, i;
+ 
+ 	if (!ctx->fw) {
+ 		ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev);
+@@ -131,12 +147,16 @@ static int cnl_load_base_firmware(struct sst_dsp *ctx)
+ 	stripped_fw.size = ctx->fw->size;
+ 	skl_dsp_strip_extended_manifest(&stripped_fw);
+ 
+-	ret = cnl_prepare_fw(ctx, stripped_fw.data, stripped_fw.size);
+-	if (ret < 0) {
+-		dev_err(ctx->dev, "prepare firmware failed: %d\n", ret);
+-		goto cnl_load_base_firmware_failed;
++	for (i = 0; i < BXT_FW_ROM_INIT_RETRY; i++) {
++		ret = cnl_prepare_fw(ctx, stripped_fw.data, stripped_fw.size);
++		if (!ret)
++			break;
++		dev_dbg(ctx->dev, "prepare firmware failed: %d\n", ret);
+ 	}
+ 
++	if (ret < 0)
++		goto cnl_load_base_firmware_failed;
++
+ 	ret = sst_transfer_fw_host_dma(ctx);
+ 	if (ret < 0) {
+ 		dev_err(ctx->dev, "transfer firmware failed: %d\n", ret);
+@@ -158,6 +178,7 @@ static int cnl_load_base_firmware(struct sst_dsp *ctx)
+ 	return 0;
+ 
+ cnl_load_base_firmware_failed:
++	dev_err(ctx->dev, "firmware load failed: %d\n", ret);
+ 	release_firmware(ctx->fw);
+ 	ctx->fw = NULL;
+ 
+diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
+index 19f328d71f244..d9c8f5cb389e3 100644
+--- a/sound/soc/intel/skylake/skl-nhlt.c
++++ b/sound/soc/intel/skylake/skl-nhlt.c
+@@ -182,7 +182,8 @@ void skl_nhlt_remove_sysfs(struct skl_dev *skl)
+ {
+ 	struct device *dev = &skl->pci->dev;
+ 
+-	sysfs_remove_file(&dev->kobj, &dev_attr_platform_id.attr);
++	if (skl->nhlt)
++		sysfs_remove_file(&dev->kobj, &dev_attr_platform_id.attr);
+ }
+ 
+ /*
+diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h
+index cdfec0fca5773..1df9ef422f61d 100644
+--- a/sound/soc/intel/skylake/skl-sst-dsp.h
++++ b/sound/soc/intel/skylake/skl-sst-dsp.h
+@@ -67,6 +67,8 @@ struct skl_dev;
+ 
+ #define SKL_FW_INIT			0x1
+ #define SKL_FW_RFW_START		0xf
++#define BXT_FW_ROM_INIT_RETRY		3
++#define BXT_INIT_TIMEOUT		300
+ 
+ #define SKL_ADSPIC_IPC			1
+ #define SKL_ADSPIS_IPC			1
+diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
+index 4b114ece58c61..aa5833001fde5 100644
+--- a/sound/soc/intel/skylake/skl-topology.c
++++ b/sound/soc/intel/skylake/skl-topology.c
+@@ -579,6 +579,38 @@ static int skl_tplg_unload_pipe_modules(struct skl_dev *skl,
+ 	return ret;
+ }
+ 
++static bool skl_tplg_is_multi_fmt(struct skl_dev *skl, struct skl_pipe *pipe)
++{
++	struct skl_pipe_fmt *cur_fmt;
++	struct skl_pipe_fmt *next_fmt;
++	int i;
++
++	if (pipe->nr_cfgs <= 1)
++		return false;
++
++	if (pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
++		return true;
++
++	for (i = 0; i < pipe->nr_cfgs - 1; i++) {
++		if (pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) {
++			cur_fmt = &pipe->configs[i].out_fmt;
++			next_fmt = &pipe->configs[i + 1].out_fmt;
++		} else {
++			cur_fmt = &pipe->configs[i].in_fmt;
++			next_fmt = &pipe->configs[i + 1].in_fmt;
++		}
++
++		if (!CHECK_HW_PARAMS(cur_fmt->channels, cur_fmt->freq,
++				     cur_fmt->bps,
++				     next_fmt->channels,
++				     next_fmt->freq,
++				     next_fmt->bps))
++			return true;
++	}
++
++	return false;
++}
++
+ /*
+  * Here, we select pipe format based on the pipe type and pipe
+  * direction to determine the current config index for the pipeline.
+@@ -601,6 +633,14 @@ skl_tplg_get_pipe_config(struct skl_dev *skl, struct skl_module_cfg *mconfig)
+ 		return 0;
+ 	}
+ 
++	if (skl_tplg_is_multi_fmt(skl, pipe)) {
++		pipe->cur_config_idx = pipe->pipe_config_idx;
++		pipe->memory_pages = pconfig->mem_pages;
++		dev_dbg(skl->dev, "found pipe config idx:%d\n",
++			pipe->cur_config_idx);
++		return 0;
++	}
++
+ 	if (pipe->conn_type == SKL_PIPE_CONN_TYPE_NONE) {
+ 		dev_dbg(skl->dev, "No conn_type detected, take 0th config\n");
+ 		pipe->cur_config_idx = 0;
+@@ -1315,6 +1355,68 @@ static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
+ 	return 0;
+ }
+ 
++static int skl_tplg_multi_config_set_get(struct snd_kcontrol *kcontrol,
++					 struct snd_ctl_elem_value *ucontrol,
++					 bool is_set)
++{
++	struct snd_soc_component *component =
++		snd_soc_kcontrol_component(kcontrol);
++	struct hdac_bus *bus = snd_soc_component_get_drvdata(component);
++	struct skl_dev *skl = bus_to_skl(bus);
++	struct skl_pipeline *ppl;
++	struct skl_pipe *pipe = NULL;
++	struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
++	u32 *pipe_id;
++
++	if (!ec)
++		return -EINVAL;
++
++	if (is_set && ucontrol->value.enumerated.item[0] > ec->items)
++		return -EINVAL;
++
++	pipe_id = ec->dobj.private;
++
++	list_for_each_entry(ppl, &skl->ppl_list, node) {
++		if (ppl->pipe->ppl_id == *pipe_id) {
++			pipe = ppl->pipe;
++			break;
++		}
++	}
++	if (!pipe)
++		return -EIO;
++
++	if (is_set)
++		pipe->pipe_config_idx = ucontrol->value.enumerated.item[0];
++	else
++		ucontrol->value.enumerated.item[0]  =  pipe->pipe_config_idx;
++
++	return 0;
++}
++
++static int skl_tplg_multi_config_get(struct snd_kcontrol *kcontrol,
++				     struct snd_ctl_elem_value *ucontrol)
++{
++	return skl_tplg_multi_config_set_get(kcontrol, ucontrol, false);
++}
++
++static int skl_tplg_multi_config_set(struct snd_kcontrol *kcontrol,
++				     struct snd_ctl_elem_value *ucontrol)
++{
++	return skl_tplg_multi_config_set_get(kcontrol, ucontrol, true);
++}
++
++static int skl_tplg_multi_config_get_dmic(struct snd_kcontrol *kcontrol,
++					  struct snd_ctl_elem_value *ucontrol)
++{
++	return skl_tplg_multi_config_set_get(kcontrol, ucontrol, false);
++}
++
++static int skl_tplg_multi_config_set_dmic(struct snd_kcontrol *kcontrol,
++					  struct snd_ctl_elem_value *ucontrol)
++{
++	return skl_tplg_multi_config_set_get(kcontrol, ucontrol, true);
++}
++
+ static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
+ 			unsigned int __user *data, unsigned int size)
+ {
+@@ -1854,6 +1956,16 @@ static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = {
+ 		.get = skl_tplg_mic_control_get,
+ 		.put = skl_tplg_mic_control_set,
+ 	},
++	{
++		.id = SKL_CONTROL_TYPE_MULTI_IO_SELECT,
++		.get = skl_tplg_multi_config_get,
++		.put = skl_tplg_multi_config_set,
++	},
++	{
++		.id = SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC,
++		.get = skl_tplg_multi_config_get_dmic,
++		.put = skl_tplg_multi_config_set_dmic,
++	}
+ };
+ 
+ static int skl_tplg_fill_pipe_cfg(struct device *dev,
+@@ -3014,12 +3126,21 @@ static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
+ 	case SND_SOC_TPLG_CTL_ENUM:
+ 		tplg_ec = container_of(hdr,
+ 				struct snd_soc_tplg_enum_control, hdr);
+-		if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READWRITE) {
++		if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READ) {
+ 			se = (struct soc_enum *)kctl->private_value;
+ 			if (tplg_ec->priv.size)
+-				return skl_init_enum_data(bus->dev, se,
+-						tplg_ec);
++				skl_init_enum_data(bus->dev, se, tplg_ec);
+ 		}
++
++		/*
++		 * now that the control initializations are done, remove
++		 * write permission for the DMIC configuration enums to
++		 * avoid conflicts between NHLT settings and user interaction
++		 */
++
++		if (hdr->ops.get == SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC)
++			kctl->access = SNDRV_CTL_ELEM_ACCESS_READ;
++
+ 		break;
+ 
+ 	default:
+@@ -3489,6 +3610,37 @@ static int skl_manifest_load(struct snd_soc_component *cmpnt, int index,
+ 	return 0;
+ }
+ 
++static void skl_tplg_complete(struct snd_soc_component *component)
++{
++	struct snd_soc_dobj *dobj;
++	struct snd_soc_acpi_mach *mach =
++		dev_get_platdata(component->card->dev);
++	int i;
++
++	list_for_each_entry(dobj, &component->dobj_list, list) {
++		struct snd_kcontrol *kcontrol = dobj->control.kcontrol;
++		struct soc_enum *se =
++			(struct soc_enum *)kcontrol->private_value;
++		char **texts = dobj->control.dtexts;
++		char chan_text[4];
++
++		if (dobj->type != SND_SOC_DOBJ_ENUM ||
++		    dobj->control.kcontrol->put !=
++		    skl_tplg_multi_config_set_dmic)
++			continue;
++		sprintf(chan_text, "c%d", mach->mach_params.dmic_num);
++
++		for (i = 0; i < se->items; i++) {
++			struct snd_ctl_elem_value val;
++
++			if (strstr(texts[i], chan_text)) {
++				val.value.enumerated.item[0] = i;
++				kcontrol->put(kcontrol, &val);
++			}
++		}
++	}
++}
++
+ static struct snd_soc_tplg_ops skl_tplg_ops  = {
+ 	.widget_load = skl_tplg_widget_load,
+ 	.control_load = skl_tplg_control_load,
+@@ -3498,6 +3650,7 @@ static struct snd_soc_tplg_ops skl_tplg_ops  = {
+ 	.io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops),
+ 	.manifest = skl_manifest_load,
+ 	.dai_load = skl_dai_load,
++	.complete = skl_tplg_complete,
+ };
+ 
+ /*
+diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h
+index e967800dbb623..06576147cc290 100644
+--- a/sound/soc/intel/skylake/skl-topology.h
++++ b/sound/soc/intel/skylake/skl-topology.h
+@@ -306,6 +306,7 @@ struct skl_pipe {
+ 	struct skl_path_config configs[SKL_MAX_PATH_CONFIGS];
+ 	struct list_head w_list;
+ 	bool passthru;
++	u32 pipe_config_idx;
+ };
+ 
+ enum skl_module_state {
+diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
+index 141dbbf975acd..2e5fbd2209235 100644
+--- a/sound/soc/intel/skylake/skl.c
++++ b/sound/soc/intel/skylake/skl.c
+@@ -129,6 +129,7 @@ static int skl_init_chip(struct hdac_bus *bus, bool full_reset)
+ 	struct hdac_ext_link *hlink;
+ 	int ret;
+ 
++	snd_hdac_set_codec_wakeup(bus, true);
+ 	skl_enable_miscbdcge(bus->dev, false);
+ 	ret = snd_hdac_bus_init_chip(bus, full_reset);
+ 
+@@ -137,6 +138,7 @@ static int skl_init_chip(struct hdac_bus *bus, bool full_reset)
+ 		writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV);
+ 
+ 	skl_enable_miscbdcge(bus->dev, true);
++	snd_hdac_set_codec_wakeup(bus, false);
+ 
+ 	return ret;
+ }
+@@ -480,13 +482,8 @@ static struct skl_ssp_clk skl_ssp_clks[] = {
+ static struct snd_soc_acpi_mach *skl_find_hda_machine(struct skl_dev *skl,
+ 					struct snd_soc_acpi_mach *machines)
+ {
+-	struct hdac_bus *bus = skl_to_bus(skl);
+ 	struct snd_soc_acpi_mach *mach;
+ 
+-	/* check if we have any codecs detected on bus */
+-	if (bus->codec_mask == 0)
+-		return NULL;
+-
+ 	/* point to common table */
+ 	mach = snd_soc_acpi_intel_hda_machines;
+ 
+@@ -635,6 +632,9 @@ static int skl_clock_device_register(struct skl_dev *skl)
+ 	struct platform_device_info pdevinfo = {NULL};
+ 	struct skl_clk_pdata *clk_pdata;
+ 
++	if (!skl->nhlt)
++		return 0;
++
+ 	clk_pdata = devm_kzalloc(&skl->pci->dev, sizeof(*clk_pdata),
+ 							GFP_KERNEL);
+ 	if (!clk_pdata)
+@@ -807,6 +807,9 @@ static void skl_probe_work(struct work_struct *work)
+ 			return;
+ 	}
+ 
++	skl_init_pci(skl);
++	skl_dum_set(bus);
++
+ 	err = skl_init_chip(bus, true);
+ 	if (err < 0) {
+ 		dev_err(bus->dev, "Init chip failed with err: %d\n", err);
+@@ -922,8 +925,6 @@ static int skl_first_init(struct hdac_bus *bus)
+ 		return -ENXIO;
+ 	}
+ 
+-	snd_hdac_bus_reset_link(bus, true);
+-
+ 	snd_hdac_bus_parse_capabilities(bus);
+ 
+ 	/* check if PPCAP exists */
+@@ -971,11 +972,7 @@ static int skl_first_init(struct hdac_bus *bus)
+ 	if (err < 0)
+ 		return err;
+ 
+-	/* initialize chip */
+-	skl_init_pci(skl);
+-	skl_dum_set(bus);
+-
+-	return skl_init_chip(bus, true);
++	return 0;
+ }
+ 
+ static int skl_probe(struct pci_dev *pci,
+@@ -1080,8 +1077,6 @@ static int skl_probe(struct pci_dev *pci,
+ 	if (bus->mlcap)
+ 		snd_hdac_ext_bus_get_ml_capabilities(bus);
+ 
+-	snd_hdac_bus_stop_chip(bus);
+-
+ 	/* create device for soc dmic */
+ 	err = skl_dmic_device_register(skl);
+ 	if (err < 0) {
+@@ -1098,7 +1093,8 @@ out_dsp_free:
+ out_clk_free:
+ 	skl_clock_device_unregister(skl);
+ out_nhlt_free:
+-	intel_nhlt_free(skl->nhlt);
++	if (skl->nhlt)
++		intel_nhlt_free(skl->nhlt);
+ out_free:
+ 	skl_free(bus);
+ 
+@@ -1147,7 +1143,8 @@ static void skl_remove(struct pci_dev *pci)
+ 	skl_dmic_device_unregister(skl);
+ 	skl_clock_device_unregister(skl);
+ 	skl_nhlt_remove_sysfs(skl);
+-	intel_nhlt_free(skl->nhlt);
++	if (skl->nhlt)
++		intel_nhlt_free(skl->nhlt);
+ 	skl_free(bus);
+ 	dev_set_drvdata(&pci->dev, NULL);
+ }
+diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
+index 5544bfbd0f6c0..ab34ef2c661f8 100644
+--- a/tools/perf/util/dwarf-aux.c
++++ b/tools/perf/util/dwarf-aux.c
+@@ -319,6 +319,7 @@ bool die_is_func_def(Dwarf_Die *dw_die)
+ int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr)
+ {
+ 	Dwarf_Addr base, end;
++	Dwarf_Attribute attr;
+ 
+ 	if (!addr)
+ 		return -EINVAL;
+@@ -326,6 +327,13 @@ int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr)
+ 	if (dwarf_entrypc(dw_die, addr) == 0)
+ 		return 0;
+ 
++	/*
++	 *  Since the dwarf_ranges() will return 0 if there is no
++	 * DW_AT_ranges attribute, we should check it first.
++	 */
++	if (!dwarf_attr(dw_die, DW_AT_ranges, &attr))
++		return -ENOENT;
++
+ 	return dwarf_ranges(dw_die, 0, &base, addr, &end) < 0 ? -ENOENT : 0;
+ }
+ 
+diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
+index 373e399e57d28..93147cc40162f 100644
+--- a/tools/perf/util/stat-display.c
++++ b/tools/perf/util/stat-display.c
+@@ -316,13 +316,10 @@ static int first_shadow_cpu(struct perf_stat_config *config,
+ 	struct evlist *evlist = evsel->evlist;
+ 	int i;
+ 
+-	if (!config->aggr_get_id)
+-		return 0;
+-
+ 	if (config->aggr_mode == AGGR_NONE)
+ 		return id;
+ 
+-	if (config->aggr_mode == AGGR_GLOBAL)
++	if (!config->aggr_get_id)
+ 		return 0;
+ 
+ 	for (i = 0; i < perf_evsel__nr_cpus(evsel); i++) {
+diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
+index 4c5909e38f78a..b1e639ea22e9a 100644
+--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
++++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
+@@ -223,6 +223,23 @@ static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
+ 	return extract_bytes(value, addr & 7, len);
+ }
+ 
++static unsigned long vgic_uaccess_read_v3r_typer(struct kvm_vcpu *vcpu,
++						 gpa_t addr, unsigned int len)
++{
++	unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
++	int target_vcpu_id = vcpu->vcpu_id;
++	u64 value;
++
++	value = (u64)(mpidr & GENMASK(23, 0)) << 32;
++	value |= ((target_vcpu_id & 0xffff) << 8);
++
++	if (vgic_has_its(vcpu->kvm))
++		value |= GICR_TYPER_PLPIS;
++
++	/* reporting of the Last bit is not supported for userspace */
++	return extract_bytes(value, addr & 7, len);
++}
++
+ static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu,
+ 					     gpa_t addr, unsigned int len)
+ {
+@@ -528,8 +545,9 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
+ 	REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
+ 		vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
+ 		VGIC_ACCESS_32bit),
+-	REGISTER_DESC_WITH_LENGTH(GICR_TYPER,
+-		vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8,
++	REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_TYPER,
++		vgic_mmio_read_v3r_typer, vgic_mmio_write_wi,
++		vgic_uaccess_read_v3r_typer, vgic_mmio_uaccess_write_wi, 8,
+ 		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+ 	REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
+ 		vgic_mmio_read_raz, vgic_mmio_write_wi, 4,


             reply	other threads:[~2020-12-02 12:50 UTC|newest]

Thread overview: 305+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-02 12:50 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2024-04-18  3:06 [gentoo-commits] proj/linux-patches:5.4 commit in: / Alice Ferrazzi
2023-10-05 14:24 Mike Pagano
2023-09-23 10:18 Mike Pagano
2023-09-02  9:58 Mike Pagano
2023-08-30 14:56 Mike Pagano
2023-08-16 17:00 Mike Pagano
2023-08-11 11:57 Mike Pagano
2023-08-08 18:42 Mike Pagano
2023-07-27 11:51 Mike Pagano
2023-07-24 20:29 Mike Pagano
2023-06-28 10:28 Mike Pagano
2023-06-21 14:55 Alice Ferrazzi
2023-06-14 10:20 Mike Pagano
2023-06-09 11:32 Mike Pagano
2023-06-05 11:50 Mike Pagano
2023-05-30 12:56 Mike Pagano
2023-05-17 11:21 Mike Pagano
2023-05-17 11:00 Mike Pagano
2023-05-10 17:58 Mike Pagano
2023-04-26  9:51 Alice Ferrazzi
2023-04-20 11:17 Alice Ferrazzi
2023-04-05 10:01 Alice Ferrazzi
2023-03-30 13:41 Alice Ferrazzi
2023-03-22 14:16 Alice Ferrazzi
2023-03-17 10:46 Mike Pagano
2023-03-13 11:34 Alice Ferrazzi
2023-03-11 16:20 Mike Pagano
2023-03-03 12:31 Mike Pagano
2023-02-25 11:42 Mike Pagano
2023-02-24  3:08 Alice Ferrazzi
2023-02-22 14:41 Alice Ferrazzi
2023-02-06 12:48 Mike Pagano
2023-02-02 19:15 Mike Pagano
2023-01-24  7:25 Alice Ferrazzi
2023-01-18 11:10 Mike Pagano
2022-12-19 12:27 Alice Ferrazzi
2022-12-14 12:14 Mike Pagano
2022-12-08 12:13 Alice Ferrazzi
2022-11-25 17:05 Mike Pagano
2022-11-10 17:59 Mike Pagano
2022-11-03 15:13 Mike Pagano
2022-11-01 19:47 Mike Pagano
2022-10-29  9:52 Mike Pagano
2022-10-26 11:44 Mike Pagano
2022-10-17 16:48 Mike Pagano
2022-10-15 10:06 Mike Pagano
2022-10-07 11:12 Mike Pagano
2022-10-05 11:58 Mike Pagano
2022-09-28  9:26 Mike Pagano
2022-09-20 12:02 Mike Pagano
2022-09-15 10:31 Mike Pagano
2022-09-05 12:04 Mike Pagano
2022-08-25 10:34 Mike Pagano
2022-08-11 12:35 Mike Pagano
2022-08-03 14:51 Alice Ferrazzi
2022-07-29 15:29 Mike Pagano
2022-07-21 20:09 Mike Pagano
2022-07-15 10:04 Mike Pagano
2022-07-12 16:01 Mike Pagano
2022-07-07 16:18 Mike Pagano
2022-07-02 16:08 Mike Pagano
2022-06-29 11:09 Mike Pagano
2022-06-27 19:03 Mike Pagano
2022-06-25 19:46 Mike Pagano
2022-06-22 13:50 Mike Pagano
2022-06-22 13:25 Mike Pagano
2022-06-22 12:47 Mike Pagano
2022-06-16 11:43 Mike Pagano
2022-06-14 17:12 Mike Pagano
2022-06-06 11:04 Mike Pagano
2022-05-25 11:55 Mike Pagano
2022-05-18  9:49 Mike Pagano
2022-05-15 22:11 Mike Pagano
2022-05-12 11:30 Mike Pagano
2022-05-09 10:55 Mike Pagano
2022-04-27 12:21 Mike Pagano
2022-04-20 12:08 Mike Pagano
2022-04-15 13:10 Mike Pagano
2022-04-12 19:21 Mike Pagano
2022-03-28 10:58 Mike Pagano
2022-03-23 11:57 Mike Pagano
2022-03-19 13:21 Mike Pagano
2022-03-16 13:31 Mike Pagano
2022-03-11 10:55 Mike Pagano
2022-03-08 18:31 Mike Pagano
2022-03-02 13:07 Mike Pagano
2022-02-23 12:38 Mike Pagano
2022-02-16 12:46 Mike Pagano
2022-02-11 12:36 Mike Pagano
2022-02-08 17:55 Mike Pagano
2022-02-05 12:14 Mike Pagano
2022-02-01 17:24 Mike Pagano
2022-01-31 13:01 Mike Pagano
2022-01-29 17:44 Mike Pagano
2022-01-27 11:38 Mike Pagano
2022-01-20 10:00 Mike Pagano
2022-01-16 10:22 Mike Pagano
2022-01-11 14:34 Mike Pagano
2022-01-05 12:54 Mike Pagano
2021-12-29 13:07 Mike Pagano
2021-12-22 14:06 Mike Pagano
2021-12-17 11:55 Mike Pagano
2021-12-16 16:51 Mike Pagano
2021-12-14 14:19 Mike Pagano
2021-12-08 12:54 Mike Pagano
2021-12-01 12:50 Mike Pagano
2021-11-26 11:58 Mike Pagano
2021-11-21 20:44 Mike Pagano
2021-11-17 12:00 Mike Pagano
2021-11-12 14:14 Mike Pagano
2021-11-06 13:26 Mike Pagano
2021-11-04 11:23 Mike Pagano
2021-11-02 19:31 Mike Pagano
2021-10-27 15:51 Mike Pagano
2021-10-27 11:58 Mike Pagano
2021-10-20 13:24 Mike Pagano
2021-10-17 13:12 Mike Pagano
2021-10-13 14:55 Alice Ferrazzi
2021-10-09 21:32 Mike Pagano
2021-10-06 14:06 Mike Pagano
2021-09-30 10:49 Mike Pagano
2021-09-26 14:13 Mike Pagano
2021-09-22 11:39 Mike Pagano
2021-09-20 22:03 Mike Pagano
2021-09-16 11:19 Mike Pagano
2021-09-15 12:00 Mike Pagano
2021-09-12 14:38 Mike Pagano
2021-09-03 11:21 Mike Pagano
2021-09-03  9:39 Alice Ferrazzi
2021-08-26 14:36 Mike Pagano
2021-08-18 12:46 Mike Pagano
2021-08-15 20:06 Mike Pagano
2021-08-12 11:52 Mike Pagano
2021-08-08 13:38 Mike Pagano
2021-08-04 11:53 Mike Pagano
2021-08-03 12:23 Mike Pagano
2021-07-31 10:32 Alice Ferrazzi
2021-07-28 12:36 Mike Pagano
2021-07-25 17:27 Mike Pagano
2021-07-20 15:39 Alice Ferrazzi
2021-07-19 11:18 Mike Pagano
2021-07-14 16:22 Mike Pagano
2021-07-13 12:37 Mike Pagano
2021-07-11 14:44 Mike Pagano
2021-07-07 13:13 Mike Pagano
2021-06-30 14:24 Mike Pagano
2021-06-23 15:11 Mike Pagano
2021-06-18 11:38 Mike Pagano
2021-06-16 12:23 Mike Pagano
2021-06-10 11:59 Mike Pagano
2021-06-07 11:23 Mike Pagano
2021-06-03 10:28 Alice Ferrazzi
2021-05-28 12:03 Alice Ferrazzi
2021-05-26 12:06 Mike Pagano
2021-05-22 10:04 Mike Pagano
2021-05-19 12:23 Mike Pagano
2021-05-14 14:10 Alice Ferrazzi
2021-05-11 14:20 Mike Pagano
2021-05-07 11:44 Alice Ferrazzi
2021-05-07 11:37 Mike Pagano
2021-05-02 16:02 Mike Pagano
2021-05-02 16:00 Mike Pagano
2021-04-30 19:01 Mike Pagano
2021-04-28 11:52 Alice Ferrazzi
2021-04-21 11:42 Mike Pagano
2021-04-16 11:14 Alice Ferrazzi
2021-04-14 11:20 Alice Ferrazzi
2021-04-10 13:25 Mike Pagano
2021-04-07 13:27 Mike Pagano
2021-03-30 13:12 Alice Ferrazzi
2021-03-24 12:09 Mike Pagano
2021-03-22 15:55 Mike Pagano
2021-03-20 14:32 Mike Pagano
2021-03-17 18:43 Mike Pagano
2021-03-16 16:04 Mike Pagano
2021-03-11 14:08 Mike Pagano
2021-03-09 12:18 Mike Pagano
2021-03-07 15:16 Mike Pagano
2021-03-04 14:51 Mike Pagano
2021-03-04 12:06 Alice Ferrazzi
2021-03-01 23:49 Mike Pagano
2021-03-01 23:44 Mike Pagano
2021-02-27 14:16 Mike Pagano
2021-02-26 10:01 Alice Ferrazzi
2021-02-23 17:01 Mike Pagano
2021-02-23 14:28 Alice Ferrazzi
2021-02-17 11:39 Alice Ferrazzi
2021-02-13 14:46 Alice Ferrazzi
2021-02-10  9:53 Alice Ferrazzi
2021-02-07 15:24 Alice Ferrazzi
2021-02-03 23:48 Mike Pagano
2021-01-30 13:37 Alice Ferrazzi
2021-01-27 11:13 Mike Pagano
2021-01-23 17:50 Mike Pagano
2021-01-23 16:37 Mike Pagano
2021-01-19 20:32 Mike Pagano
2021-01-17 16:19 Mike Pagano
2021-01-12 20:05 Mike Pagano
2021-01-09 17:51 Mike Pagano
2021-01-08 16:08 Mike Pagano
2021-01-06 14:14 Mike Pagano
2020-12-30 12:53 Mike Pagano
2020-12-21 13:27 Mike Pagano
2020-12-16 23:14 Mike Pagano
2020-12-11 12:56 Mike Pagano
2020-12-08 12:07 Mike Pagano
2020-11-26 14:27 Mike Pagano
2020-11-24 14:44 Mike Pagano
2020-11-22 19:31 Mike Pagano
2020-11-18 20:19 Mike Pagano
2020-11-18 20:10 Mike Pagano
2020-11-18 20:03 Mike Pagano
2020-11-13 12:16 Mike Pagano
2020-11-11 15:48 Mike Pagano
2020-11-10 13:57 Mike Pagano
2020-11-05 12:36 Mike Pagano
2020-11-01 20:31 Mike Pagano
2020-10-29 11:19 Mike Pagano
2020-10-17 10:18 Mike Pagano
2020-10-14 20:37 Mike Pagano
2020-10-07 12:48 Mike Pagano
2020-10-01 12:49 Mike Pagano
2020-09-26 21:59 Mike Pagano
2020-09-24 15:38 Mike Pagano
2020-09-24 15:38 Mike Pagano
2020-09-24 15:38 Mike Pagano
2020-09-23 12:10 Mike Pagano
2020-09-17 14:56 Mike Pagano
2020-09-12 18:08 Mike Pagano
2020-09-09 18:00 Mike Pagano
2020-09-08 22:26 Mike Pagano
2020-09-05 10:47 Mike Pagano
2020-09-03 11:38 Mike Pagano
2020-08-26 11:16 Mike Pagano
2020-08-21 13:25 Alice Ferrazzi
2020-08-19  9:28 Alice Ferrazzi
2020-08-12 23:30 Alice Ferrazzi
2020-08-07 12:16 Alice Ferrazzi
2020-08-05 14:45 Thomas Deutschmann
2020-08-01 19:45 Mike Pagano
2020-07-31 18:28 Mike Pagano
2020-07-31 18:04 Mike Pagano
2020-07-30 14:58 Mike Pagano
2020-07-29 12:40 Mike Pagano
2020-07-22 12:53 Mike Pagano
2020-07-16 11:19 Mike Pagano
2020-07-09 12:13 Mike Pagano
2020-07-01 12:23 Mike Pagano
2020-06-29 17:40 Mike Pagano
2020-06-24 16:49 Mike Pagano
2020-06-22 14:48 Mike Pagano
2020-06-17 16:40 Mike Pagano
2020-06-10 19:42 Mike Pagano
2020-06-07 21:53 Mike Pagano
2020-06-03 11:43 Mike Pagano
2020-06-02 11:37 Mike Pagano
2020-05-27 16:31 Mike Pagano
2020-05-20 11:37 Mike Pagano
2020-05-20 11:33 Mike Pagano
2020-05-14 11:32 Mike Pagano
2020-05-13 12:18 Mike Pagano
2020-05-11 22:49 Mike Pagano
2020-05-09 22:12 Mike Pagano
2020-05-06 11:47 Mike Pagano
2020-05-02 19:24 Mike Pagano
2020-05-02 13:25 Mike Pagano
2020-04-29 17:56 Mike Pagano
2020-04-23 11:55 Mike Pagano
2020-04-21 11:19 Mike Pagano
2020-04-17 11:46 Mike Pagano
2020-04-15 15:52 Mike Pagano
2020-04-13 11:18 Mike Pagano
2020-04-08 12:42 Mike Pagano
2020-04-02 15:26 Mike Pagano
2020-04-01 12:03 Mike Pagano
2020-03-25 15:01 Mike Pagano
2020-03-21 18:58 Mike Pagano
2020-03-18 14:23 Mike Pagano
2020-03-12 14:04 Mike Pagano
2020-03-05 16:26 Mike Pagano
2020-02-28 16:41 Mike Pagano
2020-02-24 11:09 Mike Pagano
2020-02-19 23:48 Mike Pagano
2020-02-14 23:55 Mike Pagano
2020-02-11 15:35 Mike Pagano
2020-02-06 11:07 Mike Pagano
2020-02-01 10:53 Mike Pagano
2020-02-01 10:31 Mike Pagano
2020-01-29 16:18 Mike Pagano
2020-01-26 12:27 Mike Pagano
2020-01-23 11:09 Mike Pagano
2020-01-17 19:57 Mike Pagano
2020-01-14 22:33 Mike Pagano
2020-01-12 15:01 Mike Pagano
2020-01-09 11:17 Mike Pagano
2020-01-04 19:59 Mike Pagano
2019-12-31 17:48 Mike Pagano
2019-12-30 23:03 Mike Pagano
2019-12-21 15:01 Mike Pagano
2019-12-18 19:30 Mike Pagano
2019-12-17 21:57 Mike Pagano
2019-12-13 12:39 Mike Pagano
2019-12-05  1:04 Thomas Deutschmann
2019-11-29 21:21 Thomas Deutschmann

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1606913414.28deb8e904daee3a90ee2257efcaab7e12f29612.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox