public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Fri, 28 Feb 2020 16:38:19 +0000 (UTC)	[thread overview]
Message-ID: <1582907884.95211ef5ab2b6b97467a0a274eeb89815029df2e.mpagano@gentoo> (raw)

commit:     95211ef5ab2b6b97467a0a274eeb89815029df2e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Feb 28 16:38:04 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Feb 28 16:38:04 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=95211ef5

Linux patch 4.19.107

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1106_linux-4.19.107.patch | 4497 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4501 insertions(+)

diff --git a/0000_README b/0000_README
index 3213eab..7d48aad 100644
--- a/0000_README
+++ b/0000_README
@@ -463,6 +463,10 @@ Patch:  1105_linux-4.19.106.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.19.106
 
+Patch:  1106_linux-4.19.107.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.19.107
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1106_linux-4.19.107.patch b/1106_linux-4.19.107.patch
new file mode 100644
index 0000000..4d8e950
--- /dev/null
+++ b/1106_linux-4.19.107.patch
@@ -0,0 +1,4497 @@
+diff --git a/MAINTAINERS b/MAINTAINERS
+index d735500d3dad..b9f9da0b886f 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -7340,7 +7340,7 @@ M:	Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+ M:	Rodrigo Vivi <rodrigo.vivi@intel.com>
+ L:	intel-gfx@lists.freedesktop.org
+ W:	https://01.org/linuxgraphics/
+-B:	https://01.org/linuxgraphics/documentation/how-report-bugs
++B:	https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
+ C:	irc://chat.freenode.net/intel-gfx
+ Q:	http://patchwork.freedesktop.org/project/intel-gfx/
+ T:	git git://anongit.freedesktop.org/drm-intel
+diff --git a/Makefile b/Makefile
+index c010fd4a3286..69e2527a6968 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 106
++SUBLEVEL = 107
+ EXTRAVERSION =
+ NAME = "People's Front"
+ 
+diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
+index b3e8db376ecd..57b3745f7f1b 100644
+--- a/arch/powerpc/kernel/signal.c
++++ b/arch/powerpc/kernel/signal.c
+@@ -200,14 +200,27 @@ unsigned long get_tm_stackpointer(struct task_struct *tsk)
+ 	 * normal/non-checkpointed stack pointer.
+ 	 */
+ 
++	unsigned long ret = tsk->thread.regs->gpr[1];
++
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ 	BUG_ON(tsk != current);
+ 
+ 	if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) {
++		preempt_disable();
+ 		tm_reclaim_current(TM_CAUSE_SIGNAL);
+ 		if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr))
+-			return tsk->thread.ckpt_regs.gpr[1];
++			ret = tsk->thread.ckpt_regs.gpr[1];
++
++		/*
++		 * If we treclaim, we must clear the current thread's TM bits
++		 * before re-enabling preemption. Otherwise we might be
++		 * preempted and have the live MSR[TS] changed behind our back
++		 * (tm_recheckpoint_new_task() would recheckpoint). Besides, we
++		 * enter the signal handler in non-transactional state.
++		 */
++		tsk->thread.regs->msr &= ~MSR_TS_MASK;
++		preempt_enable();
+ 	}
+ #endif
+-	return tsk->thread.regs->gpr[1];
++	return ret;
+ }
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index 906b05c2adae..06b4b828d258 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -493,19 +493,11 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
+  */
+ static int save_tm_user_regs(struct pt_regs *regs,
+ 			     struct mcontext __user *frame,
+-			     struct mcontext __user *tm_frame, int sigret)
++			     struct mcontext __user *tm_frame, int sigret,
++			     unsigned long msr)
+ {
+-	unsigned long msr = regs->msr;
+-
+ 	WARN_ON(tm_suspend_disabled);
+ 
+-	/* Remove TM bits from thread's MSR.  The MSR in the sigcontext
+-	 * just indicates to userland that we were doing a transaction, but we
+-	 * don't want to return in transactional state.  This also ensures
+-	 * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
+-	 */
+-	regs->msr &= ~MSR_TS_MASK;
+-
+ 	/* Save both sets of general registers */
+ 	if (save_general_regs(&current->thread.ckpt_regs, frame)
+ 	    || save_general_regs(regs, tm_frame))
+@@ -916,6 +908,10 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
+ 	int sigret;
+ 	unsigned long tramp;
+ 	struct pt_regs *regs = tsk->thread.regs;
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++	/* Save the thread's msr before get_tm_stackpointer() changes it */
++	unsigned long msr = regs->msr;
++#endif
+ 
+ 	BUG_ON(tsk != current);
+ 
+@@ -948,13 +944,13 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
+ 
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ 	tm_frame = &rt_sf->uc_transact.uc_mcontext;
+-	if (MSR_TM_ACTIVE(regs->msr)) {
++	if (MSR_TM_ACTIVE(msr)) {
+ 		if (__put_user((unsigned long)&rt_sf->uc_transact,
+ 			       &rt_sf->uc.uc_link) ||
+ 		    __put_user((unsigned long)tm_frame,
+ 			       &rt_sf->uc_transact.uc_regs))
+ 			goto badframe;
+-		if (save_tm_user_regs(regs, frame, tm_frame, sigret))
++		if (save_tm_user_regs(regs, frame, tm_frame, sigret, msr))
+ 			goto badframe;
+ 	}
+ 	else
+@@ -1365,6 +1361,10 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
+ 	int sigret;
+ 	unsigned long tramp;
+ 	struct pt_regs *regs = tsk->thread.regs;
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++	/* Save the thread's msr before get_tm_stackpointer() changes it */
++	unsigned long msr = regs->msr;
++#endif
+ 
+ 	BUG_ON(tsk != current);
+ 
+@@ -1398,9 +1398,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
+ 
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ 	tm_mctx = &frame->mctx_transact;
+-	if (MSR_TM_ACTIVE(regs->msr)) {
++	if (MSR_TM_ACTIVE(msr)) {
+ 		if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
+-				      sigret))
++				      sigret, msr))
+ 			goto badframe;
+ 	}
+ 	else
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index b5933d7219db..b088b0700d0d 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -196,7 +196,8 @@ static long setup_sigcontext(struct sigcontext __user *sc,
+ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
+ 				 struct sigcontext __user *tm_sc,
+ 				 struct task_struct *tsk,
+-				 int signr, sigset_t *set, unsigned long handler)
++				 int signr, sigset_t *set, unsigned long handler,
++				 unsigned long msr)
+ {
+ 	/* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
+ 	 * process never used altivec yet (MSR_VEC is zero in pt_regs of
+@@ -211,12 +212,11 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
+ 	elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
+ #endif
+ 	struct pt_regs *regs = tsk->thread.regs;
+-	unsigned long msr = tsk->thread.regs->msr;
+ 	long err = 0;
+ 
+ 	BUG_ON(tsk != current);
+ 
+-	BUG_ON(!MSR_TM_ACTIVE(regs->msr));
++	BUG_ON(!MSR_TM_ACTIVE(msr));
+ 
+ 	WARN_ON(tm_suspend_disabled);
+ 
+@@ -226,13 +226,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
+ 	 */
+ 	msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX);
+ 
+-	/* Remove TM bits from thread's MSR.  The MSR in the sigcontext
+-	 * just indicates to userland that we were doing a transaction, but we
+-	 * don't want to return in transactional state.  This also ensures
+-	 * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
+-	 */
+-	regs->msr &= ~MSR_TS_MASK;
+-
+ #ifdef CONFIG_ALTIVEC
+ 	err |= __put_user(v_regs, &sc->v_regs);
+ 	err |= __put_user(tm_v_regs, &tm_sc->v_regs);
+@@ -803,6 +796,10 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
+ 	unsigned long newsp = 0;
+ 	long err = 0;
+ 	struct pt_regs *regs = tsk->thread.regs;
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++	/* Save the thread's msr before get_tm_stackpointer() changes it */
++	unsigned long msr = regs->msr;
++#endif
+ 
+ 	BUG_ON(tsk != current);
+ 
+@@ -820,7 +817,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
+ 	err |= __put_user(0, &frame->uc.uc_flags);
+ 	err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]);
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+-	if (MSR_TM_ACTIVE(regs->msr)) {
++	if (MSR_TM_ACTIVE(msr)) {
+ 		/* The ucontext_t passed to userland points to the second
+ 		 * ucontext_t (for transactional state) with its uc_link ptr.
+ 		 */
+@@ -828,7 +825,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
+ 		err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
+ 					    &frame->uc_transact.uc_mcontext,
+ 					    tsk, ksig->sig, NULL,
+-					    (unsigned long)ksig->ka.sa.sa_handler);
++					    (unsigned long)ksig->ka.sa.sa_handler,
++					    msr);
+ 	} else
+ #endif
+ 	{
+diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
+index ac3c86b21d79..349b1c1ef779 100644
+--- a/arch/s390/include/asm/page.h
++++ b/arch/s390/include/asm/page.h
+@@ -42,7 +42,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end);
+ 
+ static inline void storage_key_init_range(unsigned long start, unsigned long end)
+ {
+-	if (PAGE_DEFAULT_KEY)
++	if (PAGE_DEFAULT_KEY != 0)
+ 		__storage_key_init_range(start, end);
+ }
+ 
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 21a58fcc3dd4..067288d4ef6e 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1040,7 +1040,7 @@ struct kvm_x86_ops {
+ 	void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
+ 	void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
+ 	void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
+-	void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
++	int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
+ 	int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
+ 	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
+ 	int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 0f4feee6d082..d2c25a13e1ce 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -455,6 +455,8 @@
+ #define MSR_K7_HWCR			0xc0010015
+ #define MSR_K7_HWCR_SMMLOCK_BIT		0
+ #define MSR_K7_HWCR_SMMLOCK		BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT)
++#define MSR_K7_HWCR_IRPERF_EN_BIT	30
++#define MSR_K7_HWCR_IRPERF_EN		BIT_ULL(MSR_K7_HWCR_IRPERF_EN_BIT)
+ #define MSR_K7_FID_VID_CTL		0xc0010041
+ #define MSR_K7_FID_VID_STATUS		0xc0010042
+ 
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 75715fa0e822..120769955687 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -25,6 +25,7 @@
+ 
+ static const int amd_erratum_383[];
+ static const int amd_erratum_400[];
++static const int amd_erratum_1054[];
+ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
+ 
+ /*
+@@ -983,6 +984,15 @@ static void init_amd(struct cpuinfo_x86 *c)
+ 	/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
+ 	if (!cpu_has(c, X86_FEATURE_XENPV))
+ 		set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
++
++	/*
++	 * Turn on the Instructions Retired free counter on machines not
++	 * susceptible to erratum #1054 "Instructions Retired Performance
++	 * Counter May Be Inaccurate".
++	 */
++	if (cpu_has(c, X86_FEATURE_IRPERF) &&
++	    !cpu_has_amd_erratum(c, amd_erratum_1054))
++		msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
+ }
+ 
+ #ifdef CONFIG_X86_32
+@@ -1110,6 +1120,10 @@ static const int amd_erratum_400[] =
+ static const int amd_erratum_383[] =
+ 	AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
+ 
++/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
++static const int amd_erratum_1054[] =
++	AMD_OSVW_ERRATUM(0, AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
++
+ 
+ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
+ {
+diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+index da0b6967349a..f878d24ff3c1 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+@@ -1117,9 +1117,12 @@ static const struct sysfs_ops threshold_ops = {
+ 	.store			= store,
+ };
+ 
++static void threshold_block_release(struct kobject *kobj);
++
+ static struct kobj_type threshold_ktype = {
+ 	.sysfs_ops		= &threshold_ops,
+ 	.default_attrs		= default_attrs,
++	.release		= threshold_block_release,
+ };
+ 
+ static const char *get_name(unsigned int bank, struct threshold_block *b)
+@@ -1152,8 +1155,9 @@ static const char *get_name(unsigned int bank, struct threshold_block *b)
+ 	return buf_mcatype;
+ }
+ 
+-static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
+-				     unsigned int block, u32 address)
++static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb,
++				     unsigned int bank, unsigned int block,
++				     u32 address)
+ {
+ 	struct threshold_block *b = NULL;
+ 	u32 low, high;
+@@ -1197,16 +1201,12 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
+ 
+ 	INIT_LIST_HEAD(&b->miscj);
+ 
+-	if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
+-		list_add(&b->miscj,
+-			 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
+-	} else {
+-		per_cpu(threshold_banks, cpu)[bank]->blocks = b;
+-	}
++	if (tb->blocks)
++		list_add(&b->miscj, &tb->blocks->miscj);
++	else
++		tb->blocks = b;
+ 
+-	err = kobject_init_and_add(&b->kobj, &threshold_ktype,
+-				   per_cpu(threshold_banks, cpu)[bank]->kobj,
+-				   get_name(bank, b));
++	err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b));
+ 	if (err)
+ 		goto out_free;
+ recurse:
+@@ -1214,7 +1214,7 @@ recurse:
+ 	if (!address)
+ 		return 0;
+ 
+-	err = allocate_threshold_blocks(cpu, bank, block, address);
++	err = allocate_threshold_blocks(cpu, tb, bank, block, address);
+ 	if (err)
+ 		goto out_free;
+ 
+@@ -1299,8 +1299,6 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
+ 		goto out_free;
+ 	}
+ 
+-	per_cpu(threshold_banks, cpu)[bank] = b;
+-
+ 	if (is_shared_bank(bank)) {
+ 		refcount_set(&b->cpus, 1);
+ 
+@@ -1311,9 +1309,13 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
+ 		}
+ 	}
+ 
+-	err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank));
+-	if (!err)
+-		goto out;
++	err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank));
++	if (err)
++		goto out_free;
++
++	per_cpu(threshold_banks, cpu)[bank] = b;
++
++	return 0;
+ 
+  out_free:
+ 	kfree(b);
+@@ -1322,8 +1324,12 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
+ 	return err;
+ }
+ 
+-static void deallocate_threshold_block(unsigned int cpu,
+-						 unsigned int bank)
++static void threshold_block_release(struct kobject *kobj)
++{
++	kfree(to_block(kobj));
++}
++
++static void deallocate_threshold_block(unsigned int cpu, unsigned int bank)
+ {
+ 	struct threshold_block *pos = NULL;
+ 	struct threshold_block *tmp = NULL;
+@@ -1333,13 +1339,11 @@ static void deallocate_threshold_block(unsigned int cpu,
+ 		return;
+ 
+ 	list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
+-		kobject_put(&pos->kobj);
+ 		list_del(&pos->miscj);
+-		kfree(pos);
++		kobject_put(&pos->kobj);
+ 	}
+ 
+-	kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
+-	per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
++	kobject_put(&head->blocks->kobj);
+ }
+ 
+ static void __threshold_remove_blocks(struct threshold_bank *b)
+diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
+index 3cc3b2d130a0..4d000aea05e0 100644
+--- a/arch/x86/kvm/irq_comm.c
++++ b/arch/x86/kvm/irq_comm.c
+@@ -427,7 +427,7 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
+ 
+ 			kvm_set_msi_irq(vcpu->kvm, entry, &irq);
+ 
+-			if (irq.level && kvm_apic_match_dest(vcpu, NULL, 0,
++			if (irq.trig_mode && kvm_apic_match_dest(vcpu, NULL, 0,
+ 						irq.dest_id, irq.dest_mode))
+ 				__set_bit(irq.vector, ioapic_handled_vectors);
+ 		}
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 05905961ecca..8c6392534d14 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -633,9 +633,11 @@ static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
+ static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
+ {
+ 	u8 val;
+-	if (pv_eoi_get_user(vcpu, &val) < 0)
++	if (pv_eoi_get_user(vcpu, &val) < 0) {
+ 		apic_debug("Can't read EOI MSR value: 0x%llx\n",
+ 			   (unsigned long long)vcpu->arch.pv_eoi.msr_val);
++		return false;
++	}
+ 	return val & 0x1;
+ }
+ 
+@@ -1060,11 +1062,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
+ 				apic_clear_vector(vector, apic->regs + APIC_TMR);
+ 		}
+ 
+-		if (vcpu->arch.apicv_active)
+-			kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
+-		else {
++		if (kvm_x86_ops->deliver_posted_interrupt(vcpu, vector)) {
+ 			kvm_lapic_set_irr(vector, apic);
+-
+ 			kvm_make_request(KVM_REQ_EVENT, vcpu);
+ 			kvm_vcpu_kick(vcpu);
+ 		}
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 7657dcd72134..0219693bf08e 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -5140,8 +5140,11 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+ 	return;
+ }
+ 
+-static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
++static int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
+ {
++	if (!vcpu->arch.apicv_active)
++		return -1;
++
+ 	kvm_lapic_set_irr(vec, vcpu->arch.apic);
+ 	smp_mb__after_atomic();
+ 
+@@ -5150,6 +5153,8 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
+ 		       kvm_cpu_get_apicid(vcpu->cpu));
+ 	else
+ 		kvm_vcpu_wake_up(vcpu);
++
++	return 0;
+ }
+ 
+ static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index aead984d89ad..9c48484dbe23 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -5725,6 +5725,26 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
+ 		 (ss.selector & SEGMENT_RPL_MASK));
+ }
+ 
++static bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu,
++					unsigned int port, int size);
++static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
++				       struct vmcs12 *vmcs12)
++{
++	unsigned long exit_qualification;
++	unsigned short port;
++	int size;
++
++	if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
++		return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
++
++	exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
++
++	port = exit_qualification >> 16;
++	size = (exit_qualification & 7) + 1;
++
++	return nested_vmx_check_io_bitmaps(vcpu, port, size);
++}
++
+ /*
+  * Check if guest state is valid. Returns true if valid, false if
+  * not.
+@@ -6264,24 +6284,29 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
+  * 2. If target vcpu isn't running(root mode), kick it to pick up the
+  * interrupt from PIR in next vmentry.
+  */
+-static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
++static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
+ {
+ 	struct vcpu_vmx *vmx = to_vmx(vcpu);
+ 	int r;
+ 
+ 	r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
+ 	if (!r)
+-		return;
++		return 0;
++
++	if (!vcpu->arch.apicv_active)
++		return -1;
+ 
+ 	if (pi_test_and_set_pir(vector, &vmx->pi_desc))
+-		return;
++		return 0;
+ 
+ 	/* If a previous notification has sent the IPI, nothing to do.  */
+ 	if (pi_test_and_set_on(&vmx->pi_desc))
+-		return;
++		return 0;
+ 
+ 	if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
+ 		kvm_vcpu_kick(vcpu);
++
++	return 0;
+ }
+ 
+ /*
+@@ -9469,23 +9494,17 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
+ static const int kvm_vmx_max_exit_handlers =
+ 	ARRAY_SIZE(kvm_vmx_exit_handlers);
+ 
+-static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
+-				       struct vmcs12 *vmcs12)
++/*
++ * Return true if an IO instruction with the specified port and size should cause
++ * a VM-exit into L1.
++ */
++bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
++				 int size)
+ {
+-	unsigned long exit_qualification;
++	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ 	gpa_t bitmap, last_bitmap;
+-	unsigned int port;
+-	int size;
+ 	u8 b;
+ 
+-	if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
+-		return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
+-
+-	exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+-
+-	port = exit_qualification >> 16;
+-	size = (exit_qualification & 7) + 1;
+-
+ 	last_bitmap = (gpa_t)-1;
+ 	b = -1;
+ 
+@@ -13675,6 +13694,39 @@ static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
+ 		to_vmx(vcpu)->nested.sync_shadow_vmcs = true;
+ }
+ 
++static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
++				  struct x86_instruction_info *info)
++{
++	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
++	unsigned short port;
++	bool intercept;
++	int size;
++
++	if (info->intercept == x86_intercept_in ||
++	    info->intercept == x86_intercept_ins) {
++		port = info->src_val;
++		size = info->dst_bytes;
++	} else {
++		port = info->dst_val;
++		size = info->src_bytes;
++	}
++
++	/*
++	 * If the 'use IO bitmaps' VM-execution control is 0, IO instruction
++	 * VM-exits depend on the 'unconditional IO exiting' VM-execution
++	 * control.
++	 *
++	 * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps.
++	 */
++	if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
++		intercept = nested_cpu_has(vmcs12,
++					   CPU_BASED_UNCOND_IO_EXITING);
++	else
++		intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
++
++	return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
++}
++
+ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
+ 			       struct x86_instruction_info *info,
+ 			       enum x86_intercept_stage stage)
+@@ -13682,19 +13734,31 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
+ 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
+ 
++	switch (info->intercept) {
+ 	/*
+ 	 * RDPID causes #UD if disabled through secondary execution controls.
+ 	 * Because it is marked as EmulateOnUD, we need to intercept it here.
+ 	 */
+-	if (info->intercept == x86_intercept_rdtscp &&
+-	    !nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
+-		ctxt->exception.vector = UD_VECTOR;
+-		ctxt->exception.error_code_valid = false;
+-		return X86EMUL_PROPAGATE_FAULT;
+-	}
++	case x86_intercept_rdtscp:
++		if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
++			ctxt->exception.vector = UD_VECTOR;
++			ctxt->exception.error_code_valid = false;
++			return X86EMUL_PROPAGATE_FAULT;
++		}
++		break;
++
++	case x86_intercept_in:
++	case x86_intercept_ins:
++	case x86_intercept_out:
++	case x86_intercept_outs:
++		return vmx_check_intercept_io(vcpu, info);
+ 
+ 	/* TODO: check more intercepts... */
+-	return X86EMUL_CONTINUE;
++	default:
++		break;
++	}
++
++	return X86EMUL_UNHANDLEABLE;
+ }
+ 
+ #ifdef CONFIG_X86_64
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index fa1c5a442957..bbc8710704e2 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -96,6 +96,7 @@ enum board_ids {
+ 
+ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+ static void ahci_remove_one(struct pci_dev *dev);
++static void ahci_shutdown_one(struct pci_dev *dev);
+ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
+ 				 unsigned long deadline);
+ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
+@@ -609,6 +610,7 @@ static struct pci_driver ahci_pci_driver = {
+ 	.id_table		= ahci_pci_tbl,
+ 	.probe			= ahci_init_one,
+ 	.remove			= ahci_remove_one,
++	.shutdown		= ahci_shutdown_one,
+ 	.driver = {
+ 		.pm		= &ahci_pci_pm_ops,
+ 	},
+@@ -1897,6 +1899,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	return 0;
+ }
+ 
++static void ahci_shutdown_one(struct pci_dev *pdev)
++{
++	ata_pci_shutdown_one(pdev);
++}
++
+ static void ahci_remove_one(struct pci_dev *pdev)
+ {
+ 	pm_runtime_get_noresume(&pdev->dev);
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index b45b6f7722ce..75d582ca917f 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -6780,6 +6780,26 @@ void ata_pci_remove_one(struct pci_dev *pdev)
+ 	ata_host_detach(host);
+ }
+ 
++void ata_pci_shutdown_one(struct pci_dev *pdev)
++{
++	struct ata_host *host = pci_get_drvdata(pdev);
++	int i;
++
++	for (i = 0; i < host->n_ports; i++) {
++		struct ata_port *ap = host->ports[i];
++
++		ap->pflags |= ATA_PFLAG_FROZEN;
++
++		/* Disable port interrupts */
++		if (ap->ops->freeze)
++			ap->ops->freeze(ap);
++
++		/* Stop the port DMA engines */
++		if (ap->ops->port_stop)
++			ap->ops->port_stop(ap);
++	}
++}
++
+ /* move to PCI subsystem */
+ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
+ {
+@@ -7400,6 +7420,7 @@ EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
+ 
+ #ifdef CONFIG_PCI
+ EXPORT_SYMBOL_GPL(pci_test_config_bits);
++EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
+ EXPORT_SYMBOL_GPL(ata_pci_remove_one);
+ #ifdef CONFIG_PM
+ EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index e71589e244fb..bf222c4b2f82 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -852,14 +852,17 @@ static void reset_fdc_info(int mode)
+ /* selects the fdc and drive, and enables the fdc's input/dma. */
+ static void set_fdc(int drive)
+ {
++	unsigned int new_fdc = fdc;
++
+ 	if (drive >= 0 && drive < N_DRIVE) {
+-		fdc = FDC(drive);
++		new_fdc = FDC(drive);
+ 		current_drive = drive;
+ 	}
+-	if (fdc != 1 && fdc != 0) {
++	if (new_fdc >= N_FDC) {
+ 		pr_info("bad fdc value\n");
+ 		return;
+ 	}
++	fdc = new_fdc;
+ 	set_dor(fdc, ~0, 8);
+ #if N_FDC > 1
+ 	set_dor(1 - fdc, ~8, 0);
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 28b110cd3977..53e822793d46 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1609,9 +1609,8 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
+ 	print_once = true;
+ #endif
+ 	if (__ratelimit(&unseeded_warning))
+-		printk_deferred(KERN_NOTICE "random: %s called from %pS "
+-				"with crng_init=%d\n", func_name, caller,
+-				crng_init);
++		pr_notice("random: %s called from %pS with crng_init=%d\n",
++			  func_name, caller, crng_init);
+ }
+ 
+ /*
+diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
+index d66a7fdff898..ceb82e74f5b4 100644
+--- a/drivers/dma/imx-sdma.c
++++ b/drivers/dma/imx-sdma.c
+@@ -738,8 +738,12 @@ static void sdma_start_desc(struct sdma_channel *sdmac)
+ 		return;
+ 	}
+ 	sdmac->desc = desc = to_sdma_desc(&vd->tx);
+-
+-	list_del(&vd->node);
++	/*
++	 * Do not delete the node in desc_issued list in cyclic mode, otherwise
++	 * the desc allocated will never be freed in vchan_dma_desc_free_list
++	 */
++	if (!(sdmac->flags & IMX_DMA_SG_LOOP))
++		list_del(&vd->node);
+ 
+ 	sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
+ 	sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
+@@ -1040,6 +1044,7 @@ static void sdma_channel_terminate_work(struct work_struct *work)
+ 
+ 	spin_lock_irqsave(&sdmac->vc.lock, flags);
+ 	vchan_get_all_descriptors(&sdmac->vc, &head);
++	sdmac->desc = NULL;
+ 	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+ 	vchan_dma_desc_free_list(&sdmac->vc, &head);
+ }
+@@ -1047,19 +1052,11 @@ static void sdma_channel_terminate_work(struct work_struct *work)
+ static int sdma_disable_channel_async(struct dma_chan *chan)
+ {
+ 	struct sdma_channel *sdmac = to_sdma_chan(chan);
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&sdmac->vc.lock, flags);
+ 
+ 	sdma_disable_channel(chan);
+ 
+-	if (sdmac->desc) {
+-		vchan_terminate_vdesc(&sdmac->desc->vd);
+-		sdmac->desc = NULL;
++	if (sdmac->desc)
+ 		schedule_work(&sdmac->terminate_worker);
+-	}
+-
+-	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 83f2717fcf81..9e74f4304313 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -205,7 +205,12 @@ static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
+ 
+ static u32 soc15_get_xclk(struct amdgpu_device *adev)
+ {
+-	return adev->clock.spll.reference_freq;
++	u32 reference_clock = adev->clock.spll.reference_freq;
++
++	if (adev->asic_type == CHIP_RAVEN)
++		return reference_clock / 4;
++
++	return reference_clock;
+ }
+ 
+ 
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+index b3db4553098d..d343ae66c64f 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+@@ -405,6 +405,8 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
+ 		asyw->clr.ntfy = armw->ntfy.handle != 0;
+ 		asyw->clr.sema = armw->sema.handle != 0;
+ 		asyw->clr.xlut = armw->xlut.handle != 0;
++		if (asyw->clr.xlut && asyw->visible)
++			asyw->set.xlut = asyw->xlut.handle != 0;
+ 		if (wndw->func->image_clr)
+ 			asyw->clr.image = armw->image.handle[0] != 0;
+ 	}
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 9899f7e155a5..f39670c5c25c 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -2584,6 +2584,17 @@ isert_wait4logout(struct isert_conn *isert_conn)
+ 	}
+ }
+ 
++static void
++isert_wait4cmds(struct iscsi_conn *conn)
++{
++	isert_info("iscsi_conn %p\n", conn);
++
++	if (conn->sess) {
++		target_sess_cmd_list_set_waiting(conn->sess->se_sess);
++		target_wait_for_sess_cmds(conn->sess->se_sess);
++	}
++}
++
+ /**
+  * isert_put_unsol_pending_cmds() - Drop commands waiting for
+  *     unsolicitate dataout
+@@ -2631,6 +2642,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
+ 
+ 	ib_drain_qp(isert_conn->qp);
+ 	isert_put_unsol_pending_cmds(conn);
++	isert_wait4cmds(conn);
+ 	isert_wait4logout(isert_conn);
+ 
+ 	queue_work(isert_release_wq, &isert_conn->release_work);
+diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
+index ee70e9921cf1..9a6ed5eeaad1 100644
+--- a/drivers/iommu/qcom_iommu.c
++++ b/drivers/iommu/qcom_iommu.c
+@@ -333,21 +333,19 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain)
+ {
+ 	struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
+ 
+-	if (WARN_ON(qcom_domain->iommu))    /* forgot to detach? */
+-		return;
+-
+ 	iommu_put_dma_cookie(domain);
+ 
+-	/* NOTE: unmap can be called after client device is powered off,
+-	 * for example, with GPUs or anything involving dma-buf.  So we
+-	 * cannot rely on the device_link.  Make sure the IOMMU is on to
+-	 * avoid unclocked accesses in the TLB inv path:
+-	 */
+-	pm_runtime_get_sync(qcom_domain->iommu->dev);
+-
+-	free_io_pgtable_ops(qcom_domain->pgtbl_ops);
+-
+-	pm_runtime_put_sync(qcom_domain->iommu->dev);
++	if (qcom_domain->iommu) {
++		/*
++		 * NOTE: unmap can be called after client device is powered
++		 * off, for example, with GPUs or anything involving dma-buf.
++		 * So we cannot rely on the device_link.  Make sure the IOMMU
++		 * is on to avoid unclocked accesses in the TLB inv path:
++		 */
++		pm_runtime_get_sync(qcom_domain->iommu->dev);
++		free_io_pgtable_ops(qcom_domain->pgtbl_ops);
++		pm_runtime_put_sync(qcom_domain->iommu->dev);
++	}
+ 
+ 	kfree(qcom_domain);
+ }
+@@ -392,7 +390,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
+ 	struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
+ 	unsigned i;
+ 
+-	if (!qcom_domain->iommu)
++	if (WARN_ON(!qcom_domain->iommu))
+ 		return;
+ 
+ 	pm_runtime_get_sync(qcom_iommu->dev);
+@@ -405,8 +403,6 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
+ 		ctx->domain = NULL;
+ 	}
+ 	pm_runtime_put_sync(qcom_iommu->dev);
+-
+-	qcom_domain->iommu = NULL;
+ }
+ 
+ static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 838ee58d80cd..e8bc25aed44c 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -569,6 +569,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+ 	}
+ 
+ 	INIT_WORK(&ctrl->ana_work, nvme_ana_work);
++	kfree(ctrl->ana_log_buf);
+ 	ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
+ 	if (!ctrl->ana_log_buf) {
+ 		error = -ENOMEM;
+diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
+index be815330ed95..e3df4bf521b5 100644
+--- a/drivers/staging/android/ashmem.c
++++ b/drivers/staging/android/ashmem.c
+@@ -350,8 +350,23 @@ static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
+ 	       _calc_vm_trans(prot, PROT_EXEC,  VM_MAYEXEC);
+ }
+ 
++static int ashmem_vmfile_mmap(struct file *file, struct vm_area_struct *vma)
++{
++	/* do not allow to mmap ashmem backing shmem file directly */
++	return -EPERM;
++}
++
++static unsigned long
++ashmem_vmfile_get_unmapped_area(struct file *file, unsigned long addr,
++				unsigned long len, unsigned long pgoff,
++				unsigned long flags)
++{
++	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
++}
++
+ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
+ {
++	static struct file_operations vmfile_fops;
+ 	struct ashmem_area *asma = file->private_data;
+ 	int ret = 0;
+ 
+@@ -392,6 +407,19 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
+ 		}
+ 		vmfile->f_mode |= FMODE_LSEEK;
+ 		asma->file = vmfile;
++		/*
++		 * override mmap operation of the vmfile so that it can't be
++		 * remapped which would lead to creation of a new vma with no
++		 * asma permission checks. Have to override get_unmapped_area
++		 * as well to prevent VM_BUG_ON check for f_ops modification.
++		 */
++		if (!vmfile_fops.mmap) {
++			vmfile_fops = *vmfile->f_op;
++			vmfile_fops.mmap = ashmem_vmfile_mmap;
++			vmfile_fops.get_unmapped_area =
++					ashmem_vmfile_get_unmapped_area;
++		}
++		vmfile->f_op = &vmfile_fops;
+ 	}
+ 	get_file(asma->file);
+ 
+diff --git a/drivers/staging/greybus/audio_manager.c b/drivers/staging/greybus/audio_manager.c
+index d44b070d8862..0f5c68edf2c1 100644
+--- a/drivers/staging/greybus/audio_manager.c
++++ b/drivers/staging/greybus/audio_manager.c
+@@ -89,8 +89,8 @@ void gb_audio_manager_remove_all(void)
+ 
+ 	list_for_each_entry_safe(module, next, &modules_list, list) {
+ 		list_del(&module->list);
+-		kobject_put(&module->kobj);
+ 		ida_simple_remove(&module_id, module->id);
++		kobject_put(&module->kobj);
+ 	}
+ 
+ 	is_empty = list_empty(&modules_list);
+diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+index 2db4444267a7..0003f0c38038 100644
+--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
++++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+@@ -2026,7 +2026,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
+ 	struct ieee_param *param;
+ 	uint ret = 0;
+ 
+-	if (p->length < sizeof(struct ieee_param) || !p->pointer) {
++	if (!p->pointer || p->length != sizeof(struct ieee_param)) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+@@ -2819,7 +2819,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
+ 		goto out;
+ 	}
+ 
+-	if (!p->pointer) {
++	if (!p->pointer || p->length != sizeof(struct ieee_param)) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
+index 10b3f9733bad..4a27c3927da9 100644
+--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
++++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
+@@ -478,14 +478,13 @@ int rtl8723bs_xmit_thread(void *context)
+ 	s32 ret;
+ 	struct adapter *padapter;
+ 	struct xmit_priv *pxmitpriv;
+-	u8 thread_name[20] = "RTWHALXT";
+-
++	u8 thread_name[20];
+ 
+ 	ret = _SUCCESS;
+ 	padapter = context;
+ 	pxmitpriv = &padapter->xmitpriv;
+ 
+-	rtw_sprintf(thread_name, 20, "%s-"ADPT_FMT, thread_name, ADPT_ARG(padapter));
++	rtw_sprintf(thread_name, 20, "RTWHALXT-" ADPT_FMT, ADPT_ARG(padapter));
+ 	thread_enter(thread_name);
+ 
+ 	DBG_871X("start "FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter));
+diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+index 4f120e72c7d2..466d25ccc4bb 100644
+--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
++++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+@@ -3400,7 +3400,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
+ 
+ 	/* down(&ieee->wx_sem); */
+ 
+-	if (p->length < sizeof(struct ieee_param) || !p->pointer) {
++	if (!p->pointer || p->length != sizeof(struct ieee_param)) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+@@ -4236,7 +4236,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
+ 
+ 
+ 	/* if (p->length < sizeof(struct ieee_param) || !p->pointer) { */
+-	if (!p->pointer) {
++	if (!p->pointer || p->length != sizeof(*param)) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
+index 3b94e80f1d5e..879ceef517fb 100644
+--- a/drivers/staging/vt6656/dpc.c
++++ b/drivers/staging/vt6656/dpc.c
+@@ -130,7 +130,7 @@ int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
+ 
+ 	vnt_rf_rssi_to_dbm(priv, *rssi, &rx_dbm);
+ 
+-	priv->bb_pre_ed_rssi = (u8)rx_dbm + 1;
++	priv->bb_pre_ed_rssi = (u8)-rx_dbm + 1;
+ 	priv->current_rssi = priv->bb_pre_ed_rssi;
+ 
+ 	skb_pull(skb, 8);
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 14bd54d0e79d..03e9cb156df9 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -1157,9 +1157,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ 		hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
+ 		conn->cid);
+ 
+-	if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
+-		return iscsit_add_reject_cmd(cmd,
+-				ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
++	target_get_sess_cmd(&cmd->se_cmd, true);
+ 
+ 	cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
+ 						     scsilun_to_int(&hdr->lun));
+@@ -2000,9 +1998,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ 			      conn->sess->se_sess, 0, DMA_NONE,
+ 			      TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
+ 
+-	if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
+-		return iscsit_add_reject_cmd(cmd,
+-				ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
++	target_get_sess_cmd(&cmd->se_cmd, true);
+ 
+ 	/*
+ 	 * TASK_REASSIGN for ERL=2 / connection stays inside of
+@@ -4123,6 +4119,9 @@ int iscsit_close_connection(
+ 	iscsit_stop_nopin_response_timer(conn);
+ 	iscsit_stop_nopin_timer(conn);
+ 
++	if (conn->conn_transport->iscsit_wait_conn)
++		conn->conn_transport->iscsit_wait_conn(conn);
++
+ 	/*
+ 	 * During Connection recovery drop unacknowledged out of order
+ 	 * commands for this connection, and prepare the other commands
+@@ -4205,11 +4204,6 @@ int iscsit_close_connection(
+ 	 * must wait until they have completed.
+ 	 */
+ 	iscsit_check_conn_usage_count(conn);
+-	target_sess_cmd_list_set_waiting(sess->se_sess);
+-	target_wait_for_sess_cmds(sess->se_sess);
+-
+-	if (conn->conn_transport->iscsit_wait_conn)
+-		conn->conn_transport->iscsit_wait_conn(conn);
+ 
+ 	ahash_request_free(conn->conn_tx_hash);
+ 	if (conn->conn_rx_hash) {
+diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
+index 678bf3365947..42d90ceec279 100644
+--- a/drivers/thunderbolt/switch.c
++++ b/drivers/thunderbolt/switch.c
+@@ -264,6 +264,12 @@ static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
+ 	return ret;
+ }
+ 
++static int tb_switch_nvm_no_read(void *priv, unsigned int offset, void *val,
++				 size_t bytes)
++{
++	return -EPERM;
++}
++
+ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
+ 			       size_t bytes)
+ {
+@@ -309,6 +315,7 @@ static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
+ 		config.read_only = true;
+ 	} else {
+ 		config.name = "nvm_non_active";
++		config.reg_read = tb_switch_nvm_no_read;
+ 		config.reg_write = tb_switch_nvm_write;
+ 		config.root_only = true;
+ 	}
+diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
+index fa1672993b4c..048a7bcae5f9 100644
+--- a/drivers/tty/serdev/serdev-ttyport.c
++++ b/drivers/tty/serdev/serdev-ttyport.c
+@@ -265,7 +265,6 @@ struct device *serdev_tty_port_register(struct tty_port *port,
+ 					struct device *parent,
+ 					struct tty_driver *drv, int idx)
+ {
+-	const struct tty_port_client_operations *old_ops;
+ 	struct serdev_controller *ctrl;
+ 	struct serport *serport;
+ 	int ret;
+@@ -284,7 +283,6 @@ struct device *serdev_tty_port_register(struct tty_port *port,
+ 
+ 	ctrl->ops = &ctrl_ops;
+ 
+-	old_ops = port->client_ops;
+ 	port->client_ops = &client_ops;
+ 	port->client_data = ctrl;
+ 
+@@ -297,7 +295,7 @@ struct device *serdev_tty_port_register(struct tty_port *port,
+ 
+ err_reset_data:
+ 	port->client_data = NULL;
+-	port->client_ops = old_ops;
++	port->client_ops = &tty_port_default_client_ops;
+ 	serdev_controller_put(ctrl);
+ 
+ 	return ERR_PTR(ret);
+@@ -312,8 +310,8 @@ int serdev_tty_port_unregister(struct tty_port *port)
+ 		return -ENODEV;
+ 
+ 	serdev_controller_remove(ctrl);
+-	port->client_ops = NULL;
+ 	port->client_data = NULL;
++	port->client_ops = &tty_port_default_client_ops;
+ 	serdev_controller_put(ctrl);
+ 
+ 	return 0;
+diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
+index 435bec40dee6..2d5c3643e6a5 100644
+--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
++++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
+@@ -375,7 +375,6 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
+ 		port.port.line = rc;
+ 
+ 	port.port.irq = irq_of_parse_and_map(np, 0);
+-	port.port.irqflags = IRQF_SHARED;
+ 	port.port.handle_irq = aspeed_vuart_handle_irq;
+ 	port.port.iotype = UPIO_MEM;
+ 	port.port.type = PORT_16550A;
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index 69aaee5d7fe1..b9567ef843fc 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -177,7 +177,7 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
+ 	struct hlist_head *h;
+ 	struct hlist_node *n;
+ 	struct irq_info *i;
+-	int ret, irq_flags = up->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0;
++	int ret;
+ 
+ 	mutex_lock(&hash_mutex);
+ 
+@@ -212,9 +212,8 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
+ 		INIT_LIST_HEAD(&up->list);
+ 		i->head = &up->list;
+ 		spin_unlock_irq(&i->lock);
+-		irq_flags |= up->port.irqflags;
+ 		ret = request_irq(up->port.irq, serial8250_interrupt,
+-				  irq_flags, up->port.name, i);
++				  up->port.irqflags, up->port.name, i);
+ 		if (ret < 0)
+ 			serial_do_unlink(i, up);
+ 	}
+diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
+index 2488de1c4bc4..8fedc075fb1e 100644
+--- a/drivers/tty/serial/8250/8250_of.c
++++ b/drivers/tty/serial/8250/8250_of.c
+@@ -171,7 +171,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
+ 
+ 	port->type = type;
+ 	port->uartclk = clk;
+-	port->irqflags |= IRQF_SHARED;
+ 
+ 	if (of_property_read_bool(np, "no-loopback-test"))
+ 		port->flags |= UPF_SKIP_TEST;
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index aa4de6907f77..5a04d4ddca73 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -2253,6 +2253,10 @@ int serial8250_do_startup(struct uart_port *port)
+ 		}
+ 	}
+ 
++	/* Check if we need to have shared IRQs */
++	if (port->irq && (up->port.flags & UPF_SHARE_IRQ))
++		up->port.irqflags |= IRQF_SHARED;
++
+ 	if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
+ 		unsigned char iir1;
+ 		/*
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index f34520e9ad6e..936d401f20b9 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -490,7 +490,8 @@ static void atmel_stop_tx(struct uart_port *port)
+ 	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
+ 
+ 	if (atmel_uart_is_half_duplex(port))
+-		atmel_start_rx(port);
++		if (!atomic_read(&atmel_port->tasklet_shutdown))
++			atmel_start_rx(port);
+ 
+ }
+ 
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 672e97978279..4066cb2b79cb 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -608,7 +608,7 @@ static void imx_uart_dma_tx(struct imx_port *sport)
+ 
+ 	sport->tx_bytes = uart_circ_chars_pending(xmit);
+ 
+-	if (xmit->tail < xmit->head) {
++	if (xmit->tail < xmit->head || xmit->head == 0) {
+ 		sport->dma_tx_nents = 1;
+ 		sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
+ 	} else {
+diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
+index b3f7d1a1e97f..4458419f053b 100644
+--- a/drivers/tty/serial/qcom_geni_serial.c
++++ b/drivers/tty/serial/qcom_geni_serial.c
+@@ -85,7 +85,7 @@
+ #define DEF_FIFO_DEPTH_WORDS	16
+ #define DEF_TX_WM		2
+ #define DEF_FIFO_WIDTH_BITS	32
+-#define UART_CONSOLE_RX_WM	2
++#define UART_RX_WM		2
+ #define MAX_LOOPBACK_CFG	3
+ 
+ #ifdef CONFIG_CONSOLE_POLL
+@@ -101,10 +101,6 @@ struct qcom_geni_serial_port {
+ 	u32 tx_fifo_depth;
+ 	u32 tx_fifo_width;
+ 	u32 rx_fifo_depth;
+-	u32 tx_wm;
+-	u32 rx_wm;
+-	u32 rx_rfr;
+-	enum geni_se_xfer_mode xfer_mode;
+ 	bool setup;
+ 	int (*handle_rx)(struct uart_port *uport, u32 bytes, bool drop);
+ 	unsigned int baud;
+@@ -125,6 +121,7 @@ static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop);
+ static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop);
+ static unsigned int qcom_geni_serial_tx_empty(struct uart_port *port);
+ static void qcom_geni_serial_stop_rx(struct uart_port *uport);
++static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop);
+ 
+ static const unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200,
+ 					32000000, 48000000, 64000000, 80000000,
+@@ -226,7 +223,7 @@ static unsigned int qcom_geni_serial_get_mctrl(struct uart_port *uport)
+ 	if (uart_console(uport)) {
+ 		mctrl |= TIOCM_CTS;
+ 	} else {
+-		geni_ios = readl_relaxed(uport->membase + SE_GENI_IOS);
++		geni_ios = readl(uport->membase + SE_GENI_IOS);
+ 		if (!(geni_ios & IO2_DATA_IN))
+ 			mctrl |= TIOCM_CTS;
+ 	}
+@@ -244,7 +241,7 @@ static void qcom_geni_serial_set_mctrl(struct uart_port *uport,
+ 
+ 	if (!(mctrl & TIOCM_RTS))
+ 		uart_manual_rfr = UART_MANUAL_RFR_EN | UART_RFR_NOT_READY;
+-	writel_relaxed(uart_manual_rfr, uport->membase + SE_UART_MANUAL_RFR);
++	writel(uart_manual_rfr, uport->membase + SE_UART_MANUAL_RFR);
+ }
+ 
+ static const char *qcom_geni_serial_get_type(struct uart_port *uport)
+@@ -273,9 +270,6 @@ static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
+ 	unsigned int fifo_bits;
+ 	unsigned long timeout_us = 20000;
+ 
+-	/* Ensure polling is not re-ordered before the prior writes/reads */
+-	mb();
+-
+ 	if (uport->private_data) {
+ 		port = to_dev_port(uport, uport);
+ 		baud = port->baud;
+@@ -295,7 +289,7 @@ static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
+ 	 */
+ 	timeout_us = DIV_ROUND_UP(timeout_us, 10) * 10;
+ 	while (timeout_us) {
+-		reg = readl_relaxed(uport->membase + offset);
++		reg = readl(uport->membase + offset);
+ 		if ((bool)(reg & field) == set)
+ 			return true;
+ 		udelay(10);
+@@ -308,7 +302,7 @@ static void qcom_geni_serial_setup_tx(struct uart_port *uport, u32 xmit_size)
+ {
+ 	u32 m_cmd;
+ 
+-	writel_relaxed(xmit_size, uport->membase + SE_UART_TX_TRANS_LEN);
++	writel(xmit_size, uport->membase + SE_UART_TX_TRANS_LEN);
+ 	m_cmd = UART_START_TX << M_OPCODE_SHFT;
+ 	writel(m_cmd, uport->membase + SE_GENI_M_CMD0);
+ }
+@@ -321,13 +315,13 @@ static void qcom_geni_serial_poll_tx_done(struct uart_port *uport)
+ 	done = qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ 						M_CMD_DONE_EN, true);
+ 	if (!done) {
+-		writel_relaxed(M_GENI_CMD_ABORT, uport->membase +
++		writel(M_GENI_CMD_ABORT, uport->membase +
+ 						SE_GENI_M_CMD_CTRL_REG);
+ 		irq_clear |= M_CMD_ABORT_EN;
+ 		qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ 							M_CMD_ABORT_EN, true);
+ 	}
+-	writel_relaxed(irq_clear, uport->membase + SE_GENI_M_IRQ_CLEAR);
++	writel(irq_clear, uport->membase + SE_GENI_M_IRQ_CLEAR);
+ }
+ 
+ static void qcom_geni_serial_abort_rx(struct uart_port *uport)
+@@ -337,8 +331,8 @@ static void qcom_geni_serial_abort_rx(struct uart_port *uport)
+ 	writel(S_GENI_CMD_ABORT, uport->membase + SE_GENI_S_CMD_CTRL_REG);
+ 	qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
+ 					S_GENI_CMD_ABORT, false);
+-	writel_relaxed(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
+-	writel_relaxed(FORCE_DEFAULT, uport->membase + GENI_FORCE_DEFAULT_REG);
++	writel(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
++	writel(FORCE_DEFAULT, uport->membase + GENI_FORCE_DEFAULT_REG);
+ }
+ 
+ #ifdef CONFIG_CONSOLE_POLL
+@@ -347,19 +341,13 @@ static int qcom_geni_serial_get_char(struct uart_port *uport)
+ 	u32 rx_fifo;
+ 	u32 status;
+ 
+-	status = readl_relaxed(uport->membase + SE_GENI_M_IRQ_STATUS);
+-	writel_relaxed(status, uport->membase + SE_GENI_M_IRQ_CLEAR);
+-
+-	status = readl_relaxed(uport->membase + SE_GENI_S_IRQ_STATUS);
+-	writel_relaxed(status, uport->membase + SE_GENI_S_IRQ_CLEAR);
++	status = readl(uport->membase + SE_GENI_M_IRQ_STATUS);
++	writel(status, uport->membase + SE_GENI_M_IRQ_CLEAR);
+ 
+-	/*
+-	 * Ensure the writes to clear interrupts is not re-ordered after
+-	 * reading the data.
+-	 */
+-	mb();
++	status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
++	writel(status, uport->membase + SE_GENI_S_IRQ_CLEAR);
+ 
+-	status = readl_relaxed(uport->membase + SE_GENI_RX_FIFO_STATUS);
++	status = readl(uport->membase + SE_GENI_RX_FIFO_STATUS);
+ 	if (!(status & RX_FIFO_WC_MSK))
+ 		return NO_POLL_CHAR;
+ 
+@@ -370,15 +358,12 @@ static int qcom_geni_serial_get_char(struct uart_port *uport)
+ static void qcom_geni_serial_poll_put_char(struct uart_port *uport,
+ 							unsigned char c)
+ {
+-	struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+-
+-	writel_relaxed(port->tx_wm, uport->membase + SE_GENI_TX_WATERMARK_REG);
++	writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
+ 	qcom_geni_serial_setup_tx(uport, 1);
+ 	WARN_ON(!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ 						M_TX_FIFO_WATERMARK_EN, true));
+-	writel_relaxed(c, uport->membase + SE_GENI_TX_FIFOn);
+-	writel_relaxed(M_TX_FIFO_WATERMARK_EN, uport->membase +
+-							SE_GENI_M_IRQ_CLEAR);
++	writel(c, uport->membase + SE_GENI_TX_FIFOn);
++	writel(M_TX_FIFO_WATERMARK_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
+ 	qcom_geni_serial_poll_tx_done(uport);
+ }
+ #endif
+@@ -386,7 +371,7 @@ static void qcom_geni_serial_poll_put_char(struct uart_port *uport,
+ #ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
+ static void qcom_geni_serial_wr_char(struct uart_port *uport, int ch)
+ {
+-	writel_relaxed(ch, uport->membase + SE_GENI_TX_FIFOn);
++	writel(ch, uport->membase + SE_GENI_TX_FIFOn);
+ }
+ 
+ static void
+@@ -405,7 +390,7 @@ __qcom_geni_serial_console_write(struct uart_port *uport, const char *s,
+ 			bytes_to_send++;
+ 	}
+ 
+-	writel_relaxed(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
++	writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
+ 	qcom_geni_serial_setup_tx(uport, bytes_to_send);
+ 	for (i = 0; i < count; ) {
+ 		size_t chars_to_write = 0;
+@@ -423,7 +408,7 @@ __qcom_geni_serial_console_write(struct uart_port *uport, const char *s,
+ 		chars_to_write = min_t(size_t, count - i, avail / 2);
+ 		uart_console_write(uport, s + i, chars_to_write,
+ 						qcom_geni_serial_wr_char);
+-		writel_relaxed(M_TX_FIFO_WATERMARK_EN, uport->membase +
++		writel(M_TX_FIFO_WATERMARK_EN, uport->membase +
+ 							SE_GENI_M_IRQ_CLEAR);
+ 		i += chars_to_write;
+ 	}
+@@ -438,6 +423,7 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
+ 	bool locked = true;
+ 	unsigned long flags;
+ 	u32 geni_status;
++	u32 irq_en;
+ 
+ 	WARN_ON(co->index < 0 || co->index >= GENI_UART_CONS_PORTS);
+ 
+@@ -451,7 +437,7 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
+ 	else
+ 		spin_lock_irqsave(&uport->lock, flags);
+ 
+-	geni_status = readl_relaxed(uport->membase + SE_GENI_STATUS);
++	geni_status = readl(uport->membase + SE_GENI_STATUS);
+ 
+ 	/* Cancel the current write to log the fault */
+ 	if (!locked) {
+@@ -461,17 +447,22 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
+ 			geni_se_abort_m_cmd(&port->se);
+ 			qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ 							M_CMD_ABORT_EN, true);
+-			writel_relaxed(M_CMD_ABORT_EN, uport->membase +
++			writel(M_CMD_ABORT_EN, uport->membase +
+ 							SE_GENI_M_IRQ_CLEAR);
+ 		}
+-		writel_relaxed(M_CMD_CANCEL_EN, uport->membase +
+-							SE_GENI_M_IRQ_CLEAR);
++		writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
+ 	} else if ((geni_status & M_GENI_CMD_ACTIVE) && !port->tx_remaining) {
+ 		/*
+ 		 * It seems we can't interrupt existing transfers if all data
+ 		 * has been sent, in which case we need to look for done first.
+ 		 */
+ 		qcom_geni_serial_poll_tx_done(uport);
++
++		if (uart_circ_chars_pending(&uport->state->xmit)) {
++			irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
++			writel(irq_en | M_TX_FIFO_WATERMARK_EN,
++					uport->membase + SE_GENI_M_IRQ_EN);
++		}
+ 	}
+ 
+ 	__qcom_geni_serial_console_write(uport, s, count);
+@@ -556,29 +547,20 @@ static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop)
+ static void qcom_geni_serial_start_tx(struct uart_port *uport)
+ {
+ 	u32 irq_en;
+-	struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ 	u32 status;
+ 
+-	if (port->xfer_mode == GENI_SE_FIFO) {
+-		/*
+-		 * readl ensures reading & writing of IRQ_EN register
+-		 * is not re-ordered before checking the status of the
+-		 * Serial Engine.
+-		 */
+-		status = readl(uport->membase + SE_GENI_STATUS);
+-		if (status & M_GENI_CMD_ACTIVE)
+-			return;
++	status = readl(uport->membase + SE_GENI_STATUS);
++	if (status & M_GENI_CMD_ACTIVE)
++		return;
+ 
+-		if (!qcom_geni_serial_tx_empty(uport))
+-			return;
++	if (!qcom_geni_serial_tx_empty(uport))
++		return;
+ 
+-		irq_en = readl_relaxed(uport->membase +	SE_GENI_M_IRQ_EN);
+-		irq_en |= M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN;
++	irq_en = readl(uport->membase +	SE_GENI_M_IRQ_EN);
++	irq_en |= M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN;
+ 
+-		writel_relaxed(port->tx_wm, uport->membase +
+-						SE_GENI_TX_WATERMARK_REG);
+-		writel_relaxed(irq_en, uport->membase +	SE_GENI_M_IRQ_EN);
+-	}
++	writel(DEF_TX_WM, uport->membase + SE_GENI_TX_WATERMARK_REG);
++	writel(irq_en, uport->membase +	SE_GENI_M_IRQ_EN);
+ }
+ 
+ static void qcom_geni_serial_stop_tx(struct uart_port *uport)
+@@ -587,35 +569,24 @@ static void qcom_geni_serial_stop_tx(struct uart_port *uport)
+ 	u32 status;
+ 	struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ 
+-	irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
+-	irq_en &= ~M_CMD_DONE_EN;
+-	if (port->xfer_mode == GENI_SE_FIFO) {
+-		irq_en &= ~M_TX_FIFO_WATERMARK_EN;
+-		writel_relaxed(0, uport->membase +
+-				     SE_GENI_TX_WATERMARK_REG);
+-	}
+-	writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+-	status = readl_relaxed(uport->membase + SE_GENI_STATUS);
++	irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
++	irq_en &= ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN);
++	writel(0, uport->membase + SE_GENI_TX_WATERMARK_REG);
++	writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
++	status = readl(uport->membase + SE_GENI_STATUS);
+ 	/* Possible stop tx is called multiple times. */
+ 	if (!(status & M_GENI_CMD_ACTIVE))
+ 		return;
+ 
+-	/*
+-	 * Ensure cancel command write is not re-ordered before checking
+-	 * the status of the Primary Sequencer.
+-	 */
+-	mb();
+-
+ 	geni_se_cancel_m_cmd(&port->se);
+ 	if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ 						M_CMD_CANCEL_EN, true)) {
+ 		geni_se_abort_m_cmd(&port->se);
+ 		qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ 						M_CMD_ABORT_EN, true);
+-		writel_relaxed(M_CMD_ABORT_EN, uport->membase +
+-							SE_GENI_M_IRQ_CLEAR);
++		writel(M_CMD_ABORT_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
+ 	}
+-	writel_relaxed(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
++	writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
+ }
+ 
+ static void qcom_geni_serial_start_rx(struct uart_port *uport)
+@@ -624,27 +595,19 @@ static void qcom_geni_serial_start_rx(struct uart_port *uport)
+ 	u32 status;
+ 	struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ 
+-	status = readl_relaxed(uport->membase + SE_GENI_STATUS);
++	status = readl(uport->membase + SE_GENI_STATUS);
+ 	if (status & S_GENI_CMD_ACTIVE)
+ 		qcom_geni_serial_stop_rx(uport);
+ 
+-	/*
+-	 * Ensure setup command write is not re-ordered before checking
+-	 * the status of the Secondary Sequencer.
+-	 */
+-	mb();
+-
+ 	geni_se_setup_s_cmd(&port->se, UART_START_READ, 0);
+ 
+-	if (port->xfer_mode == GENI_SE_FIFO) {
+-		irq_en = readl_relaxed(uport->membase + SE_GENI_S_IRQ_EN);
+-		irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN;
+-		writel_relaxed(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
++	irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN);
++	irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN;
++	writel(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
+ 
+-		irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
+-		irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
+-		writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+-	}
++	irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
++	irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
++	writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+ }
+ 
+ static void qcom_geni_serial_stop_rx(struct uart_port *uport)
+@@ -652,34 +615,35 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport)
+ 	u32 irq_en;
+ 	u32 status;
+ 	struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+-	u32 irq_clear = S_CMD_DONE_EN;
++	u32 s_irq_status;
+ 
+-	if (port->xfer_mode == GENI_SE_FIFO) {
+-		irq_en = readl_relaxed(uport->membase + SE_GENI_S_IRQ_EN);
+-		irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
+-		writel_relaxed(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
++	irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN);
++	irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
++	writel(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
+ 
+-		irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
+-		irq_en &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
+-		writel_relaxed(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+-	}
++	irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
++	irq_en &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
++	writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+ 
+-	status = readl_relaxed(uport->membase + SE_GENI_STATUS);
++	status = readl(uport->membase + SE_GENI_STATUS);
+ 	/* Possible stop rx is called multiple times. */
+ 	if (!(status & S_GENI_CMD_ACTIVE))
+ 		return;
+ 
++	geni_se_cancel_s_cmd(&port->se);
++	qcom_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS,
++					S_CMD_CANCEL_EN, true);
+ 	/*
+-	 * Ensure cancel command write is not re-ordered before checking
+-	 * the status of the Secondary Sequencer.
++	 * If timeout occurs secondary engine remains active
++	 * and Abort sequence is executed.
+ 	 */
+-	mb();
++	s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
++	/* Flush the Rx buffer */
++	if (s_irq_status & S_RX_FIFO_LAST_EN)
++		qcom_geni_serial_handle_rx(uport, true);
++	writel(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
+ 
+-	geni_se_cancel_s_cmd(&port->se);
+-	qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
+-					S_GENI_CMD_CANCEL, false);
+-	status = readl_relaxed(uport->membase + SE_GENI_STATUS);
+-	writel_relaxed(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
++	status = readl(uport->membase + SE_GENI_STATUS);
+ 	if (status & S_GENI_CMD_ACTIVE)
+ 		qcom_geni_serial_abort_rx(uport);
+ }
+@@ -693,7 +657,7 @@ static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop)
+ 	u32 total_bytes;
+ 	struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ 
+-	status = readl_relaxed(uport->membase +	SE_GENI_RX_FIFO_STATUS);
++	status = readl(uport->membase +	SE_GENI_RX_FIFO_STATUS);
+ 	word_cnt = status & RX_FIFO_WC_MSK;
+ 	last_word_partial = status & RX_LAST;
+ 	last_word_byte_cnt = (status & RX_LAST_BYTE_VALID_MSK) >>
+@@ -719,10 +683,11 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done,
+ 	size_t pending;
+ 	int i;
+ 	u32 status;
++	u32 irq_en;
+ 	unsigned int chunk;
+ 	int tail;
+ 
+-	status = readl_relaxed(uport->membase + SE_GENI_TX_FIFO_STATUS);
++	status = readl(uport->membase + SE_GENI_TX_FIFO_STATUS);
+ 
+ 	/* Complete the current tx command before taking newly added data */
+ 	if (active)
+@@ -747,6 +712,11 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done,
+ 	if (!port->tx_remaining) {
+ 		qcom_geni_serial_setup_tx(uport, pending);
+ 		port->tx_remaining = pending;
++
++		irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
++		if (!(irq_en & M_TX_FIFO_WATERMARK_EN))
++			writel(irq_en | M_TX_FIFO_WATERMARK_EN,
++					uport->membase + SE_GENI_M_IRQ_EN);
+ 	}
+ 
+ 	remaining = chunk;
+@@ -770,7 +740,23 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done,
+ 	}
+ 
+ 	xmit->tail = tail & (UART_XMIT_SIZE - 1);
++
++	/*
++	 * The tx fifo watermark is level triggered and latched. Though we had
++	 * cleared it in qcom_geni_serial_isr it will have already reasserted
++	 * so we must clear it again here after our writes.
++	 */
++	writel(M_TX_FIFO_WATERMARK_EN,
++			uport->membase + SE_GENI_M_IRQ_CLEAR);
++
+ out_write_wakeup:
++	if (!port->tx_remaining) {
++		irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
++		if (irq_en & M_TX_FIFO_WATERMARK_EN)
++			writel(irq_en & ~M_TX_FIFO_WATERMARK_EN,
++					uport->membase + SE_GENI_M_IRQ_EN);
++	}
++
+ 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ 		uart_write_wakeup(uport);
+ }
+@@ -791,12 +777,12 @@ static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
+ 		return IRQ_NONE;
+ 
+ 	spin_lock_irqsave(&uport->lock, flags);
+-	m_irq_status = readl_relaxed(uport->membase + SE_GENI_M_IRQ_STATUS);
+-	s_irq_status = readl_relaxed(uport->membase + SE_GENI_S_IRQ_STATUS);
+-	geni_status = readl_relaxed(uport->membase + SE_GENI_STATUS);
+-	m_irq_en = readl_relaxed(uport->membase + SE_GENI_M_IRQ_EN);
+-	writel_relaxed(m_irq_status, uport->membase + SE_GENI_M_IRQ_CLEAR);
+-	writel_relaxed(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
++	m_irq_status = readl(uport->membase + SE_GENI_M_IRQ_STATUS);
++	s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
++	geni_status = readl(uport->membase + SE_GENI_STATUS);
++	m_irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
++	writel(m_irq_status, uport->membase + SE_GENI_M_IRQ_CLEAR);
++	writel(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
+ 
+ 	if (WARN_ON(m_irq_status & M_ILLEGAL_CMD_EN))
+ 		goto out_unlock;
+@@ -806,8 +792,7 @@ static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
+ 		tty_insert_flip_char(tport, 0, TTY_OVERRUN);
+ 	}
+ 
+-	if (m_irq_status & (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN) &&
+-	    m_irq_en & (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN))
++	if (m_irq_status & m_irq_en & (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN))
+ 		qcom_geni_serial_handle_tx(uport, m_irq_status & M_CMD_DONE_EN,
+ 					geni_status & M_GENI_CMD_ACTIVE);
+ 
+@@ -842,17 +827,6 @@ static void get_tx_fifo_size(struct qcom_geni_serial_port *port)
+ 		(port->tx_fifo_depth * port->tx_fifo_width) / BITS_PER_BYTE;
+ }
+ 
+-static void set_rfr_wm(struct qcom_geni_serial_port *port)
+-{
+-	/*
+-	 * Set RFR (Flow off) to FIFO_DEPTH - 2.
+-	 * RX WM level at 10% RX_FIFO_DEPTH.
+-	 * TX WM level at 10% TX_FIFO_DEPTH.
+-	 */
+-	port->rx_rfr = port->rx_fifo_depth - 2;
+-	port->rx_wm = UART_CONSOLE_RX_WM;
+-	port->tx_wm = DEF_TX_WM;
+-}
+ 
+ static void qcom_geni_serial_shutdown(struct uart_port *uport)
+ {
+@@ -891,21 +865,19 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
+ 
+ 	get_tx_fifo_size(port);
+ 
+-	set_rfr_wm(port);
+-	writel_relaxed(rxstale, uport->membase + SE_UART_RX_STALE_CNT);
++	writel(rxstale, uport->membase + SE_UART_RX_STALE_CNT);
+ 	/*
+ 	 * Make an unconditional cancel on the main sequencer to reset
+ 	 * it else we could end up in data loss scenarios.
+ 	 */
+-	port->xfer_mode = GENI_SE_FIFO;
+ 	if (uart_console(uport))
+ 		qcom_geni_serial_poll_tx_done(uport);
+ 	geni_se_config_packing(&port->se, BITS_PER_BYTE, port->tx_bytes_pw,
+ 						false, true, false);
+ 	geni_se_config_packing(&port->se, BITS_PER_BYTE, port->rx_bytes_pw,
+ 						false, false, true);
+-	geni_se_init(&port->se, port->rx_wm, port->rx_rfr);
+-	geni_se_select_mode(&port->se, port->xfer_mode);
++	geni_se_init(&port->se, UART_RX_WM, port->rx_fifo_depth - 2);
++	geni_se_select_mode(&port->se, GENI_SE_FIFO);
+ 	if (!uart_console(uport)) {
+ 		port->rx_fifo = devm_kcalloc(uport->dev,
+ 			port->rx_fifo_depth, sizeof(u32), GFP_KERNEL);
+@@ -996,10 +968,10 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
+ 	ser_clk_cfg |= clk_div << CLK_DIV_SHFT;
+ 
+ 	/* parity */
+-	tx_trans_cfg = readl_relaxed(uport->membase + SE_UART_TX_TRANS_CFG);
+-	tx_parity_cfg = readl_relaxed(uport->membase + SE_UART_TX_PARITY_CFG);
+-	rx_trans_cfg = readl_relaxed(uport->membase + SE_UART_RX_TRANS_CFG);
+-	rx_parity_cfg = readl_relaxed(uport->membase + SE_UART_RX_PARITY_CFG);
++	tx_trans_cfg = readl(uport->membase + SE_UART_TX_TRANS_CFG);
++	tx_parity_cfg = readl(uport->membase + SE_UART_TX_PARITY_CFG);
++	rx_trans_cfg = readl(uport->membase + SE_UART_RX_TRANS_CFG);
++	rx_parity_cfg = readl(uport->membase + SE_UART_RX_PARITY_CFG);
+ 	if (termios->c_cflag & PARENB) {
+ 		tx_trans_cfg |= UART_TX_PAR_EN;
+ 		rx_trans_cfg |= UART_RX_PAR_EN;
+@@ -1055,17 +1027,17 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
+ 		uart_update_timeout(uport, termios->c_cflag, baud);
+ 
+ 	if (!uart_console(uport))
+-		writel_relaxed(port->loopback,
++		writel(port->loopback,
+ 				uport->membase + SE_UART_LOOPBACK_CFG);
+-	writel_relaxed(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
+-	writel_relaxed(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
+-	writel_relaxed(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
+-	writel_relaxed(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
+-	writel_relaxed(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
+-	writel_relaxed(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
+-	writel_relaxed(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
+-	writel_relaxed(ser_clk_cfg, uport->membase + GENI_SER_M_CLK_CFG);
+-	writel_relaxed(ser_clk_cfg, uport->membase + GENI_SER_S_CLK_CFG);
++	writel(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
++	writel(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
++	writel(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
++	writel(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
++	writel(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
++	writel(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
++	writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
++	writel(ser_clk_cfg, uport->membase + GENI_SER_M_CLK_CFG);
++	writel(ser_clk_cfg, uport->membase + GENI_SER_S_CLK_CFG);
+ out_restart_rx:
+ 	qcom_geni_serial_start_rx(uport);
+ }
+@@ -1156,13 +1128,13 @@ static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
+ 	geni_se_init(&se, DEF_FIFO_DEPTH_WORDS / 2, DEF_FIFO_DEPTH_WORDS - 2);
+ 	geni_se_select_mode(&se, GENI_SE_FIFO);
+ 
+-	writel_relaxed(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
+-	writel_relaxed(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
+-	writel_relaxed(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
+-	writel_relaxed(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
+-	writel_relaxed(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
+-	writel_relaxed(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
+-	writel_relaxed(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
++	writel(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
++	writel(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
++	writel(rx_trans_cfg, uport->membase + SE_UART_RX_TRANS_CFG);
++	writel(rx_parity_cfg, uport->membase + SE_UART_RX_PARITY_CFG);
++	writel(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
++	writel(bits_per_char, uport->membase + SE_UART_RX_WORD_LEN);
++	writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
+ 
+ 	dev->con->write = qcom_geni_serial_earlycon_write;
+ 	dev->con->setup = NULL;
+diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
+index c699d41a2a48..fbacb00c2601 100644
+--- a/drivers/tty/tty_port.c
++++ b/drivers/tty/tty_port.c
+@@ -52,10 +52,11 @@ static void tty_port_default_wakeup(struct tty_port *port)
+ 	}
+ }
+ 
+-static const struct tty_port_client_operations default_client_ops = {
++const struct tty_port_client_operations tty_port_default_client_ops = {
+ 	.receive_buf = tty_port_default_receive_buf,
+ 	.write_wakeup = tty_port_default_wakeup,
+ };
++EXPORT_SYMBOL_GPL(tty_port_default_client_ops);
+ 
+ void tty_port_init(struct tty_port *port)
+ {
+@@ -68,7 +69,7 @@ void tty_port_init(struct tty_port *port)
+ 	spin_lock_init(&port->lock);
+ 	port->close_delay = (50 * HZ) / 100;
+ 	port->closing_wait = (3000 * HZ) / 100;
+-	port->client_ops = &default_client_ops;
++	port->client_ops = &tty_port_default_client_ops;
+ 	kref_init(&port->kref);
+ }
+ EXPORT_SYMBOL(tty_port_init);
+diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
+index 07496c711d7d..3ac4fe549c2e 100644
+--- a/drivers/tty/vt/selection.c
++++ b/drivers/tty/vt/selection.c
+@@ -27,6 +27,8 @@
+ #include <linux/console.h>
+ #include <linux/tty_flip.h>
+ 
++#include <linux/sched/signal.h>
++
+ /* Don't take this from <ctype.h>: 011-015 on the screen aren't spaces */
+ #define isspace(c)	((c) == ' ')
+ 
+@@ -337,6 +339,7 @@ int paste_selection(struct tty_struct *tty)
+ 	unsigned int count;
+ 	struct  tty_ldisc *ld;
+ 	DECLARE_WAITQUEUE(wait, current);
++	int ret = 0;
+ 
+ 	console_lock();
+ 	poke_blanked_console();
+@@ -350,6 +353,10 @@ int paste_selection(struct tty_struct *tty)
+ 	add_wait_queue(&vc->paste_wait, &wait);
+ 	while (sel_buffer && sel_buffer_lth > pasted) {
+ 		set_current_state(TASK_INTERRUPTIBLE);
++		if (signal_pending(current)) {
++			ret = -EINTR;
++			break;
++		}
+ 		if (tty_throttled(tty)) {
+ 			schedule();
+ 			continue;
+@@ -365,5 +372,5 @@ int paste_selection(struct tty_struct *tty)
+ 
+ 	tty_buffer_unlock_exclusive(&vc->port);
+ 	tty_ldisc_deref(ld);
+-	return 0;
++	return ret;
+ }
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index d673e3592662..ddaecb1bd9fd 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -936,10 +936,21 @@ static void flush_scrollback(struct vc_data *vc)
+ 	WARN_CONSOLE_UNLOCKED();
+ 
+ 	set_origin(vc);
+-	if (vc->vc_sw->con_flush_scrollback)
++	if (vc->vc_sw->con_flush_scrollback) {
+ 		vc->vc_sw->con_flush_scrollback(vc);
+-	else
++	} else if (con_is_visible(vc)) {
++		/*
++		 * When no con_flush_scrollback method is provided then the
++		 * legacy way for flushing the scrollback buffer is to use
++		 * a side effect of the con_switch method. We do it only on
++		 * the foreground console as background consoles have no
++		 * scrollback buffers in that case and we obviously don't
++		 * want to switch to them.
++		 */
++		hide_cursor(vc);
+ 		vc->vc_sw->con_switch(vc);
++		set_cursor(vc);
++	}
+ }
+ 
+ /*
+diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
+index 73cdc0d633dd..2bb6de89b029 100644
+--- a/drivers/tty/vt/vt_ioctl.c
++++ b/drivers/tty/vt/vt_ioctl.c
+@@ -876,15 +876,20 @@ int vt_ioctl(struct tty_struct *tty,
+ 			return -EINVAL;
+ 
+ 		for (i = 0; i < MAX_NR_CONSOLES; i++) {
++			struct vc_data *vcp;
++
+ 			if (!vc_cons[i].d)
+ 				continue;
+ 			console_lock();
+-			if (v.v_vlin)
+-				vc_cons[i].d->vc_scan_lines = v.v_vlin;
+-			if (v.v_clin)
+-				vc_cons[i].d->vc_font.height = v.v_clin;
+-			vc_cons[i].d->vc_resize_user = 1;
+-			vc_resize(vc_cons[i].d, v.v_cols, v.v_rows);
++			vcp = vc_cons[i].d;
++			if (vcp) {
++				if (v.v_vlin)
++					vcp->vc_scan_lines = v.v_vlin;
++				if (v.v_clin)
++					vcp->vc_font.height = v.v_clin;
++				vcp->vc_resize_user = 1;
++				vc_resize(vcp, v.v_cols, v.v_rows);
++			}
+ 			console_unlock();
+ 		}
+ 		break;
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 0bf0e62bede3..2025261e97a1 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -256,6 +256,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ 		struct usb_host_interface *ifp, int num_ep,
+ 		unsigned char *buffer, int size)
+ {
++	struct usb_device *udev = to_usb_device(ddev);
+ 	unsigned char *buffer0 = buffer;
+ 	struct usb_endpoint_descriptor *d;
+ 	struct usb_host_endpoint *endpoint;
+@@ -297,6 +298,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
+ 		goto skip_to_next_endpoint_or_interface_descriptor;
+ 	}
+ 
++	/* Ignore blacklisted endpoints */
++	if (udev->quirks & USB_QUIRK_ENDPOINT_BLACKLIST) {
++		if (usb_endpoint_is_blacklisted(udev, ifp, d)) {
++			dev_warn(ddev, "config %d interface %d altsetting %d has a blacklisted endpoint with address 0x%X, skipping\n",
++					cfgno, inum, asnum,
++					d->bEndpointAddress);
++			goto skip_to_next_endpoint_or_interface_descriptor;
++		}
++	}
++
+ 	endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
+ 	++ifp->desc.bNumEndpoints;
+ 
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 6ab4ca1d9ae1..27486b0a027a 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -36,7 +36,9 @@
+ #include "otg_whitelist.h"
+ 
+ #define USB_VENDOR_GENESYS_LOGIC		0x05e3
++#define USB_VENDOR_SMSC				0x0424
+ #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND	0x01
++#define HUB_QUIRK_DISABLE_AUTOSUSPEND		0x02
+ 
+ #define USB_TP_TRANSMISSION_DELAY	40	/* ns */
+ #define USB_TP_TRANSMISSION_DELAY_MAX	65535	/* ns */
+@@ -1190,11 +1192,6 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ #ifdef CONFIG_PM
+ 			udev->reset_resume = 1;
+ #endif
+-			/* Don't set the change_bits when the device
+-			 * was powered off.
+-			 */
+-			if (test_bit(port1, hub->power_bits))
+-				set_bit(port1, hub->change_bits);
+ 
+ 		} else {
+ 			/* The power session is gone; tell hub_wq */
+@@ -1700,6 +1697,10 @@ static void hub_disconnect(struct usb_interface *intf)
+ 	kfree(hub->buffer);
+ 
+ 	pm_suspend_ignore_children(&intf->dev, false);
++
++	if (hub->quirk_disable_autosuspend)
++		usb_autopm_put_interface(intf);
++
+ 	kref_put(&hub->kref, hub_release);
+ }
+ 
+@@ -1830,6 +1831,11 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ 	if (id->driver_info & HUB_QUIRK_CHECK_PORT_AUTOSUSPEND)
+ 		hub->quirk_check_port_auto_suspend = 1;
+ 
++	if (id->driver_info & HUB_QUIRK_DISABLE_AUTOSUSPEND) {
++		hub->quirk_disable_autosuspend = 1;
++		usb_autopm_get_interface(intf);
++	}
++
+ 	if (hub_configure(hub, &desc->endpoint[0].desc) >= 0)
+ 		return 0;
+ 
+@@ -5410,6 +5416,10 @@ out_hdev_lock:
+ }
+ 
+ static const struct usb_device_id hub_id_table[] = {
++    { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS,
++      .idVendor = USB_VENDOR_SMSC,
++      .bInterfaceClass = USB_CLASS_HUB,
++      .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
+     { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+ 			| USB_DEVICE_ID_MATCH_INT_CLASS,
+       .idVendor = USB_VENDOR_GENESYS_LOGIC,
+diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
+index 4accfb63f7dc..d0bbbd76ba8e 100644
+--- a/drivers/usb/core/hub.h
++++ b/drivers/usb/core/hub.h
+@@ -61,6 +61,7 @@ struct usb_hub {
+ 	unsigned		quiescing:1;
+ 	unsigned		disconnected:1;
+ 	unsigned		in_reset:1;
++	unsigned		quirk_disable_autosuspend:1;
+ 
+ 	unsigned		quirk_check_port_auto_suspend:1;
+ 
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 6b6413073584..2b24336a72e5 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -354,6 +354,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x0904, 0x6103), .driver_info =
+ 			USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+ 
++	/* Sound Devices USBPre2 */
++	{ USB_DEVICE(0x0926, 0x0202), .driver_info =
++			USB_QUIRK_ENDPOINT_BLACKLIST },
++
+ 	/* Keytouch QWERTY Panel keyboard */
+ 	{ USB_DEVICE(0x0926, 0x3333), .driver_info =
+ 			USB_QUIRK_CONFIG_INTF_STRINGS },
+@@ -445,6 +449,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* INTEL VALUE SSD */
+ 	{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* novation SoundControl XL */
++	{ USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ 	{ }  /* terminating entry must be last */
+ };
+ 
+@@ -472,6 +479,39 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
+ 	{ }  /* terminating entry must be last */
+ };
+ 
++/*
++ * Entries for blacklisted endpoints that should be ignored when parsing
++ * configuration descriptors.
++ *
++ * Matched for devices with USB_QUIRK_ENDPOINT_BLACKLIST.
++ */
++static const struct usb_device_id usb_endpoint_blacklist[] = {
++	{ USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 },
++	{ }
++};
++
++bool usb_endpoint_is_blacklisted(struct usb_device *udev,
++		struct usb_host_interface *intf,
++		struct usb_endpoint_descriptor *epd)
++{
++	const struct usb_device_id *id;
++	unsigned int address;
++
++	for (id = usb_endpoint_blacklist; id->match_flags; ++id) {
++		if (!usb_match_device(udev, id))
++			continue;
++
++		if (!usb_match_one_id_intf(udev, intf, id))
++			continue;
++
++		address = id->driver_info;
++		if (address == epd->bEndpointAddress)
++			return true;
++	}
++
++	return false;
++}
++
+ static bool usb_match_any_interface(struct usb_device *udev,
+ 				    const struct usb_device_id *id)
+ {
+diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
+index d95a5358f73d..c0df5a468d78 100644
+--- a/drivers/usb/core/usb.h
++++ b/drivers/usb/core/usb.h
+@@ -37,6 +37,9 @@ extern void usb_authorize_interface(struct usb_interface *);
+ extern void usb_detect_quirks(struct usb_device *udev);
+ extern void usb_detect_interface_quirks(struct usb_device *udev);
+ extern void usb_release_quirk_list(void);
++extern bool usb_endpoint_is_blacklisted(struct usb_device *udev,
++		struct usb_host_interface *intf,
++		struct usb_endpoint_descriptor *epd);
+ extern int usb_remove_device(struct usb_device *udev);
+ 
+ extern int usb_get_device_descriptor(struct usb_device *dev,
+diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
+index 17f3e7b4d4fe..d8424834902d 100644
+--- a/drivers/usb/dwc2/gadget.c
++++ b/drivers/usb/dwc2/gadget.c
+@@ -1004,11 +1004,6 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
+ 	else
+ 		packets = 1;	/* send one packet if length is zero. */
+ 
+-	if (hs_ep->isochronous && length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
+-		dev_err(hsotg->dev, "req length > maxpacket*mc\n");
+-		return;
+-	}
+-
+ 	if (dir_in && index != 0)
+ 		if (hs_ep->isochronous)
+ 			epsize = DXEPTSIZ_MC(packets);
+@@ -1312,6 +1307,13 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
+ 	req->actual = 0;
+ 	req->status = -EINPROGRESS;
+ 
++	/* Don't queue ISOC request if length greater than mps*mc */
++	if (hs_ep->isochronous &&
++	    req->length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
++		dev_err(hs->dev, "req length > maxpacket*mc\n");
++		return -EINVAL;
++	}
++
+ 	/* In DDMA mode for ISOC's don't queue request if length greater
+ 	 * than descriptor limits.
+ 	 */
+@@ -1542,6 +1544,7 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
+ 	struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
+ 	struct dwc2_hsotg_ep *ep;
+ 	__le16 reply;
++	u16 status;
+ 	int ret;
+ 
+ 	dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
+@@ -1553,11 +1556,10 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
+ 
+ 	switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ 	case USB_RECIP_DEVICE:
+-		/*
+-		 * bit 0 => self powered
+-		 * bit 1 => remote wakeup
+-		 */
+-		reply = cpu_to_le16(0);
++		status = 1 << USB_DEVICE_SELF_POWERED;
++		status |= hsotg->remote_wakeup_allowed <<
++			  USB_DEVICE_REMOTE_WAKEUP;
++		reply = cpu_to_le16(status);
+ 		break;
+ 
+ 	case USB_RECIP_INTERFACE:
+@@ -1668,7 +1670,10 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
+ 	case USB_RECIP_DEVICE:
+ 		switch (wValue) {
+ 		case USB_DEVICE_REMOTE_WAKEUP:
+-			hsotg->remote_wakeup_allowed = 1;
++			if (set)
++				hsotg->remote_wakeup_allowed = 1;
++			else
++				hsotg->remote_wakeup_allowed = 0;
+ 			break;
+ 
+ 		case USB_DEVICE_TEST_MODE:
+@@ -1678,16 +1683,17 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
+ 				return -EINVAL;
+ 
+ 			hsotg->test_mode = wIndex >> 8;
+-			ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
+-			if (ret) {
+-				dev_err(hsotg->dev,
+-					"%s: failed to send reply\n", __func__);
+-				return ret;
+-			}
+ 			break;
+ 		default:
+ 			return -ENOENT;
+ 		}
++
++		ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
++		if (ret) {
++			dev_err(hsotg->dev,
++				"%s: failed to send reply\n", __func__);
++			return ret;
++		}
+ 		break;
+ 
+ 	case USB_RECIP_ENDPOINT:
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index a6e682a000fc..430cfd620854 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2224,7 +2224,8 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
+ 	if (event->status & DEPEVT_STATUS_SHORT && !chain)
+ 		return 1;
+ 
+-	if (event->status & DEPEVT_STATUS_IOC)
++	if ((trb->ctrl & DWC3_TRB_CTRL_IOC) ||
++	    (trb->ctrl & DWC3_TRB_CTRL_LST))
+ 		return 1;
+ 
+ 	return 0;
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index 33115e19756c..fea7c7e0143f 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -437,12 +437,10 @@ static u8 encode_bMaxPower(enum usb_device_speed speed,
+ 		val = CONFIG_USB_GADGET_VBUS_DRAW;
+ 	if (!val)
+ 		return 0;
+-	switch (speed) {
+-	case USB_SPEED_SUPER:
+-		return DIV_ROUND_UP(val, 8);
+-	default:
++	if (speed < USB_SPEED_SUPER)
+ 		return DIV_ROUND_UP(val, 2);
+-	}
++	else
++		return DIV_ROUND_UP(val, 8);
+ }
+ 
+ static int config_buf(struct usb_configuration *config,
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 9772c0de59b7..a024230f00e2 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -55,6 +55,7 @@ static u8 usb_bos_descriptor [] = {
+ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
+ 				     u16 wLength)
+ {
++	struct xhci_port_cap *port_cap = NULL;
+ 	int i, ssa_count;
+ 	u32 temp;
+ 	u16 desc_size, ssp_cap_size, ssa_size = 0;
+@@ -64,16 +65,24 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
+ 	ssp_cap_size = sizeof(usb_bos_descriptor) - desc_size;
+ 
+ 	/* does xhci support USB 3.1 Enhanced SuperSpeed */
+-	if (xhci->usb3_rhub.min_rev >= 0x01) {
++	for (i = 0; i < xhci->num_port_caps; i++) {
++		if (xhci->port_caps[i].maj_rev == 0x03 &&
++		    xhci->port_caps[i].min_rev >= 0x01) {
++			usb3_1 = true;
++			port_cap = &xhci->port_caps[i];
++			break;
++		}
++	}
++
++	if (usb3_1) {
+ 		/* does xhci provide a PSI table for SSA speed attributes? */
+-		if (xhci->usb3_rhub.psi_count) {
++		if (port_cap->psi_count) {
+ 			/* two SSA entries for each unique PSI ID, RX and TX */
+-			ssa_count = xhci->usb3_rhub.psi_uid_count * 2;
++			ssa_count = port_cap->psi_uid_count * 2;
+ 			ssa_size = ssa_count * sizeof(u32);
+ 			ssp_cap_size -= 16; /* skip copying the default SSA */
+ 		}
+ 		desc_size += ssp_cap_size;
+-		usb3_1 = true;
+ 	}
+ 	memcpy(buf, &usb_bos_descriptor, min(desc_size, wLength));
+ 
+@@ -99,7 +108,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
+ 	}
+ 
+ 	/* If PSI table exists, add the custom speed attributes from it */
+-	if (usb3_1 && xhci->usb3_rhub.psi_count) {
++	if (usb3_1 && port_cap->psi_count) {
+ 		u32 ssp_cap_base, bm_attrib, psi, psi_mant, psi_exp;
+ 		int offset;
+ 
+@@ -111,7 +120,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
+ 
+ 		/* attribute count SSAC bits 4:0 and ID count SSIC bits 8:5 */
+ 		bm_attrib = (ssa_count - 1) & 0x1f;
+-		bm_attrib |= (xhci->usb3_rhub.psi_uid_count - 1) << 5;
++		bm_attrib |= (port_cap->psi_uid_count - 1) << 5;
+ 		put_unaligned_le32(bm_attrib, &buf[ssp_cap_base + 4]);
+ 
+ 		if (wLength < desc_size + ssa_size)
+@@ -124,8 +133,8 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
+ 		 * USB 3.1 requires two SSA entries (RX and TX) for every link
+ 		 */
+ 		offset = desc_size;
+-		for (i = 0; i < xhci->usb3_rhub.psi_count; i++) {
+-			psi = xhci->usb3_rhub.psi[i];
++		for (i = 0; i < port_cap->psi_count; i++) {
++			psi = port_cap->psi[i];
+ 			psi &= ~USB_SSP_SUBLINK_SPEED_RSVD;
+ 			psi_exp = XHCI_EXT_PORT_PSIE(psi);
+ 			psi_mant = XHCI_EXT_PORT_PSIM(psi);
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 82ce6d8b708d..9e87c282a743 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1475,9 +1475,15 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
+ 	/* Allow 3 retries for everything but isoc, set CErr = 3 */
+ 	if (!usb_endpoint_xfer_isoc(&ep->desc))
+ 		err_count = 3;
+-	/* Some devices get this wrong */
+-	if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_HIGH)
+-		max_packet = 512;
++	/* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */
++	if (usb_endpoint_xfer_bulk(&ep->desc)) {
++		if (udev->speed == USB_SPEED_HIGH)
++			max_packet = 512;
++		if (udev->speed == USB_SPEED_FULL) {
++			max_packet = rounddown_pow_of_two(max_packet);
++			max_packet = clamp_val(max_packet, 8, 64);
++		}
++	}
+ 	/* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
+ 	if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
+ 		avg_trb_len = 8;
+@@ -1909,17 +1915,17 @@ no_bw:
+ 	xhci->usb3_rhub.num_ports = 0;
+ 	xhci->num_active_eps = 0;
+ 	kfree(xhci->usb2_rhub.ports);
+-	kfree(xhci->usb2_rhub.psi);
+ 	kfree(xhci->usb3_rhub.ports);
+-	kfree(xhci->usb3_rhub.psi);
+ 	kfree(xhci->hw_ports);
+ 	kfree(xhci->rh_bw);
+ 	kfree(xhci->ext_caps);
++	for (i = 0; i < xhci->num_port_caps; i++)
++		kfree(xhci->port_caps[i].psi);
++	kfree(xhci->port_caps);
++	xhci->num_port_caps = 0;
+ 
+ 	xhci->usb2_rhub.ports = NULL;
+-	xhci->usb2_rhub.psi = NULL;
+ 	xhci->usb3_rhub.ports = NULL;
+-	xhci->usb3_rhub.psi = NULL;
+ 	xhci->hw_ports = NULL;
+ 	xhci->rh_bw = NULL;
+ 	xhci->ext_caps = NULL;
+@@ -2120,6 +2126,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
+ 	u8 major_revision, minor_revision;
+ 	struct xhci_hub *rhub;
+ 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
++	struct xhci_port_cap *port_cap;
+ 
+ 	temp = readl(addr);
+ 	major_revision = XHCI_EXT_PORT_MAJOR(temp);
+@@ -2154,31 +2161,39 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
+ 		/* WTF? "Valid values are ‘1’ to MaxPorts" */
+ 		return;
+ 
+-	rhub->psi_count = XHCI_EXT_PORT_PSIC(temp);
+-	if (rhub->psi_count) {
+-		rhub->psi = kcalloc_node(rhub->psi_count, sizeof(*rhub->psi),
+-				    GFP_KERNEL, dev_to_node(dev));
+-		if (!rhub->psi)
+-			rhub->psi_count = 0;
++	port_cap = &xhci->port_caps[xhci->num_port_caps++];
++	if (xhci->num_port_caps > max_caps)
++		return;
++
++	port_cap->maj_rev = major_revision;
++	port_cap->min_rev = minor_revision;
++	port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp);
+ 
+-		rhub->psi_uid_count++;
+-		for (i = 0; i < rhub->psi_count; i++) {
+-			rhub->psi[i] = readl(addr + 4 + i);
++	if (port_cap->psi_count) {
++		port_cap->psi = kcalloc_node(port_cap->psi_count,
++					     sizeof(*port_cap->psi),
++					     GFP_KERNEL, dev_to_node(dev));
++		if (!port_cap->psi)
++			port_cap->psi_count = 0;
++
++		port_cap->psi_uid_count++;
++		for (i = 0; i < port_cap->psi_count; i++) {
++			port_cap->psi[i] = readl(addr + 4 + i);
+ 
+ 			/* count unique ID values, two consecutive entries can
+ 			 * have the same ID if link is assymetric
+ 			 */
+-			if (i && (XHCI_EXT_PORT_PSIV(rhub->psi[i]) !=
+-				  XHCI_EXT_PORT_PSIV(rhub->psi[i - 1])))
+-				rhub->psi_uid_count++;
++			if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) !=
++				  XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1])))
++				port_cap->psi_uid_count++;
+ 
+ 			xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
+-				  XHCI_EXT_PORT_PSIV(rhub->psi[i]),
+-				  XHCI_EXT_PORT_PSIE(rhub->psi[i]),
+-				  XHCI_EXT_PORT_PLT(rhub->psi[i]),
+-				  XHCI_EXT_PORT_PFD(rhub->psi[i]),
+-				  XHCI_EXT_PORT_LP(rhub->psi[i]),
+-				  XHCI_EXT_PORT_PSIM(rhub->psi[i]));
++				  XHCI_EXT_PORT_PSIV(port_cap->psi[i]),
++				  XHCI_EXT_PORT_PSIE(port_cap->psi[i]),
++				  XHCI_EXT_PORT_PLT(port_cap->psi[i]),
++				  XHCI_EXT_PORT_PFD(port_cap->psi[i]),
++				  XHCI_EXT_PORT_LP(port_cap->psi[i]),
++				  XHCI_EXT_PORT_PSIM(port_cap->psi[i]));
+ 		}
+ 	}
+ 	/* cache usb2 port capabilities */
+@@ -2225,6 +2240,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
+ 			continue;
+ 		}
+ 		hw_port->rhub = rhub;
++		hw_port->port_cap = port_cap;
+ 		rhub->num_ports++;
+ 	}
+ 	/* FIXME: Should we disable ports not in the Extended Capabilities? */
+@@ -2315,6 +2331,11 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
+ 	if (!xhci->ext_caps)
+ 		return -ENOMEM;
+ 
++	xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps),
++				flags, dev_to_node(dev));
++	if (!xhci->port_caps)
++		return -ENOMEM;
++
+ 	offset = cap_start;
+ 
+ 	while (offset) {
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 075c49cfe60f..58cf551a1246 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -41,6 +41,7 @@
+ #define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI		0x1aa8
+ #define PCI_DEVICE_ID_INTEL_APL_XHCI			0x5aa8
+ #define PCI_DEVICE_ID_INTEL_DNV_XHCI			0x19d0
++#define PCI_DEVICE_ID_INTEL_CML_XHCI			0xa3af
+ 
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_4			0x43b9
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_3			0x43ba
+@@ -179,7 +180,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
+ 		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
+ 		 pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
+-		 pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) {
++		 pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI ||
++		 pdev->device == PCI_DEVICE_ID_INTEL_CML_XHCI)) {
+ 		xhci->quirks |= XHCI_PME_STUCK_QUIRK;
+ 	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+@@ -283,6 +285,9 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
+ 	if (!usb_hcd_is_primary_hcd(hcd))
+ 		return 0;
+ 
++	if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
++		xhci_pme_acpi_rtd3_enable(pdev);
++
+ 	xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
+ 
+ 	/* Find any debug ports */
+@@ -340,9 +345,6 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 			HCC_MAX_PSA(xhci->hcc_params) >= 4)
+ 		xhci->shared_hcd->can_do_streams = 1;
+ 
+-	if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
+-		xhci_pme_acpi_rtd3_enable(dev);
+-
+ 	/* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
+ 	pm_runtime_put_noidle(&dev->dev);
+ 
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 98b67605d3cf..509a7fce8f05 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2692,6 +2692,42 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
+ 	return 1;
+ }
+ 
++/*
++ * Update Event Ring Dequeue Pointer:
++ * - When all events have finished
++ * - To avoid "Event Ring Full Error" condition
++ */
++static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
++		union xhci_trb *event_ring_deq)
++{
++	u64 temp_64;
++	dma_addr_t deq;
++
++	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
++	/* If necessary, update the HW's version of the event ring deq ptr. */
++	if (event_ring_deq != xhci->event_ring->dequeue) {
++		deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
++				xhci->event_ring->dequeue);
++		if (deq == 0)
++			xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
++		/*
++		 * Per 4.9.4, Software writes to the ERDP register shall
++		 * always advance the Event Ring Dequeue Pointer value.
++		 */
++		if ((temp_64 & (u64) ~ERST_PTR_MASK) ==
++				((u64) deq & (u64) ~ERST_PTR_MASK))
++			return;
++
++		/* Update HC event ring dequeue pointer */
++		temp_64 &= ERST_PTR_MASK;
++		temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
++	}
++
++	/* Clear the event handler busy flag (RW1C) */
++	temp_64 |= ERST_EHB;
++	xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
++}
++
+ /*
+  * xHCI spec says we can get an interrupt, and if the HC has an error condition,
+  * we might get bad data out of the event ring.  Section 4.10.2.7 has a list of
+@@ -2703,9 +2739,9 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
+ 	union xhci_trb *event_ring_deq;
+ 	irqreturn_t ret = IRQ_NONE;
+ 	unsigned long flags;
+-	dma_addr_t deq;
+ 	u64 temp_64;
+ 	u32 status;
++	int event_loop = 0;
+ 
+ 	spin_lock_irqsave(&xhci->lock, flags);
+ 	/* Check if the xHC generated the interrupt, or the irq is shared */
+@@ -2759,24 +2795,14 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
+ 	/* FIXME this should be a delayed service routine
+ 	 * that clears the EHB.
+ 	 */
+-	while (xhci_handle_event(xhci) > 0) {}
+-
+-	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+-	/* If necessary, update the HW's version of the event ring deq ptr. */
+-	if (event_ring_deq != xhci->event_ring->dequeue) {
+-		deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
+-				xhci->event_ring->dequeue);
+-		if (deq == 0)
+-			xhci_warn(xhci, "WARN something wrong with SW event "
+-					"ring dequeue ptr.\n");
+-		/* Update HC event ring dequeue pointer */
+-		temp_64 &= ERST_PTR_MASK;
+-		temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
++	while (xhci_handle_event(xhci) > 0) {
++		if (event_loop++ < TRBS_PER_SEGMENT / 2)
++			continue;
++		xhci_update_erst_dequeue(xhci, event_ring_deq);
++		event_loop = 0;
+ 	}
+ 
+-	/* Clear the event handler busy flag (RW1C); event ring is empty. */
+-	temp_64 |= ERST_EHB;
+-	xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
++	xhci_update_erst_dequeue(xhci, event_ring_deq);
+ 	ret = IRQ_HANDLED;
+ 
+ out:
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 9b33031cf6fc..4dedc822237f 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1704,11 +1704,21 @@ static inline unsigned int hcd_index(struct usb_hcd *hcd)
+ 	else
+ 		return 1;
+ }
++
++struct xhci_port_cap {
++	u32			*psi;	/* array of protocol speed ID entries */
++	u8			psi_count;
++	u8			psi_uid_count;
++	u8			maj_rev;
++	u8			min_rev;
++};
++
+ struct xhci_port {
+ 	__le32 __iomem		*addr;
+ 	int			hw_portnum;
+ 	int			hcd_portnum;
+ 	struct xhci_hub		*rhub;
++	struct xhci_port_cap	*port_cap;
+ };
+ 
+ struct xhci_hub {
+@@ -1718,9 +1728,6 @@ struct xhci_hub {
+ 	/* supported prococol extended capabiliy values */
+ 	u8			maj_rev;
+ 	u8			min_rev;
+-	u32			*psi;	/* array of protocol speed ID entries */
+-	u8			psi_count;
+-	u8			psi_uid_count;
+ };
+ 
+ /* There is one xhci_hcd structure per controller */
+@@ -1882,6 +1889,9 @@ struct xhci_hcd {
+ 	/* cached usb2 extened protocol capabilites */
+ 	u32                     *ext_caps;
+ 	unsigned int            num_ext_caps;
++	/* cached extended protocol port capabilities */
++	struct xhci_port_cap	*port_caps;
++	unsigned int		num_port_caps;
+ 	/* Compliance Mode Recovery Data */
+ 	struct timer_list	comp_mode_recovery_timer;
+ 	u32			port_status_u0;
+diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
+index 2d9d9490cdd4..92875a264b14 100644
+--- a/drivers/usb/misc/iowarrior.c
++++ b/drivers/usb/misc/iowarrior.c
+@@ -33,6 +33,14 @@
+ #define USB_DEVICE_ID_CODEMERCS_IOWPV2	0x1512
+ /* full speed iowarrior */
+ #define USB_DEVICE_ID_CODEMERCS_IOW56	0x1503
++/* fuller speed iowarrior */
++#define USB_DEVICE_ID_CODEMERCS_IOW28	0x1504
++#define USB_DEVICE_ID_CODEMERCS_IOW28L	0x1505
++#define USB_DEVICE_ID_CODEMERCS_IOW100	0x1506
++
++/* OEMed devices */
++#define USB_DEVICE_ID_CODEMERCS_IOW24SAG	0x158a
++#define USB_DEVICE_ID_CODEMERCS_IOW56AM		0x158b
+ 
+ /* Get a minor range for your devices from the usb maintainer */
+ #ifdef CONFIG_USB_DYNAMIC_MINORS
+@@ -137,6 +145,11 @@ static const struct usb_device_id iowarrior_ids[] = {
+ 	{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV1)},
+ 	{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV2)},
+ 	{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56)},
++	{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24SAG)},
++	{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56AM)},
++	{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28)},
++	{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28L)},
++	{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW100)},
+ 	{}			/* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, iowarrior_ids);
+@@ -361,6 +374,7 @@ static ssize_t iowarrior_write(struct file *file,
+ 	}
+ 	switch (dev->product_id) {
+ 	case USB_DEVICE_ID_CODEMERCS_IOW24:
++	case USB_DEVICE_ID_CODEMERCS_IOW24SAG:
+ 	case USB_DEVICE_ID_CODEMERCS_IOWPV1:
+ 	case USB_DEVICE_ID_CODEMERCS_IOWPV2:
+ 	case USB_DEVICE_ID_CODEMERCS_IOW40:
+@@ -375,6 +389,10 @@ static ssize_t iowarrior_write(struct file *file,
+ 		goto exit;
+ 		break;
+ 	case USB_DEVICE_ID_CODEMERCS_IOW56:
++	case USB_DEVICE_ID_CODEMERCS_IOW56AM:
++	case USB_DEVICE_ID_CODEMERCS_IOW28:
++	case USB_DEVICE_ID_CODEMERCS_IOW28L:
++	case USB_DEVICE_ID_CODEMERCS_IOW100:
+ 		/* The IOW56 uses asynchronous IO and more urbs */
+ 		if (atomic_read(&dev->write_busy) == MAX_WRITES_IN_FLIGHT) {
+ 			/* Wait until we are below the limit for submitted urbs */
+@@ -499,6 +517,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
+ 	switch (cmd) {
+ 	case IOW_WRITE:
+ 		if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24 ||
++		    dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24SAG ||
+ 		    dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV1 ||
+ 		    dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV2 ||
+ 		    dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW40) {
+@@ -782,7 +801,11 @@ static int iowarrior_probe(struct usb_interface *interface,
+ 		goto error;
+ 	}
+ 
+-	if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) {
++	if ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) ||
++	    (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) ||
++	    (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) ||
++	    (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) ||
++	    (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)) {
+ 		res = usb_find_last_int_out_endpoint(iface_desc,
+ 				&dev->int_out_endpoint);
+ 		if (res) {
+@@ -795,7 +818,11 @@ static int iowarrior_probe(struct usb_interface *interface,
+ 	/* we have to check the report_size often, so remember it in the endianness suitable for our machine */
+ 	dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
+ 	if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
+-	    (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56))
++	    ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) ||
++	     (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) ||
++	     (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) ||
++	     (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) ||
++	     (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)))
+ 		/* IOWarrior56 has wMaxPacketSize different from report size */
+ 		dev->report_size = 7;
+ 
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 1c6eb3a8741e..62ca8e29da48 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -45,6 +45,7 @@ struct uas_dev_info {
+ 	struct scsi_cmnd *cmnd[MAX_CMNDS];
+ 	spinlock_t lock;
+ 	struct work_struct work;
++	struct work_struct scan_work;      /* for async scanning */
+ };
+ 
+ enum {
+@@ -114,6 +115,17 @@ out:
+ 	spin_unlock_irqrestore(&devinfo->lock, flags);
+ }
+ 
++static void uas_scan_work(struct work_struct *work)
++{
++	struct uas_dev_info *devinfo =
++		container_of(work, struct uas_dev_info, scan_work);
++	struct Scsi_Host *shost = usb_get_intfdata(devinfo->intf);
++
++	dev_dbg(&devinfo->intf->dev, "starting scan\n");
++	scsi_scan_host(shost);
++	dev_dbg(&devinfo->intf->dev, "scan complete\n");
++}
++
+ static void uas_add_work(struct uas_cmd_info *cmdinfo)
+ {
+ 	struct scsi_pointer *scp = (void *)cmdinfo;
+@@ -989,6 +1001,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ 	init_usb_anchor(&devinfo->data_urbs);
+ 	spin_lock_init(&devinfo->lock);
+ 	INIT_WORK(&devinfo->work, uas_do_work);
++	INIT_WORK(&devinfo->scan_work, uas_scan_work);
+ 
+ 	result = uas_configure_endpoints(devinfo);
+ 	if (result)
+@@ -1005,7 +1018,9 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ 	if (result)
+ 		goto free_streams;
+ 
+-	scsi_scan_host(shost);
++	/* Submit the delayed_work for SCSI-device scanning */
++	schedule_work(&devinfo->scan_work);
++
+ 	return result;
+ 
+ free_streams:
+@@ -1173,6 +1188,12 @@ static void uas_disconnect(struct usb_interface *intf)
+ 	usb_kill_anchored_urbs(&devinfo->data_urbs);
+ 	uas_zap_pending(devinfo, DID_NO_CONNECT);
+ 
++	/*
++	 * Prevent SCSI scanning (if it hasn't started yet)
++	 * or wait for the SCSI-scanning routine to stop.
++	 */
++	cancel_work_sync(&devinfo->scan_work);
++
+ 	scsi_remove_host(shost);
+ 	uas_free_streams(devinfo);
+ 	scsi_host_put(shost);
+diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
+index 08cb419eb4e6..5f6b77ea34fb 100644
+--- a/drivers/xen/preempt.c
++++ b/drivers/xen/preempt.c
+@@ -37,7 +37,9 @@ asmlinkage __visible void xen_maybe_preempt_hcall(void)
+ 		 * cpu.
+ 		 */
+ 		__this_cpu_write(xen_in_preemptible_hcall, false);
+-		_cond_resched();
++		local_irq_enable();
++		cond_resched();
++		local_irq_disable();
+ 		__this_cpu_write(xen_in_preemptible_hcall, true);
+ 	}
+ }
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index ea45112a98be..b5039b16de93 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3153,6 +3153,7 @@ retry_root_backup:
+ 	if (IS_ERR(fs_info->fs_root)) {
+ 		err = PTR_ERR(fs_info->fs_root);
+ 		btrfs_warn(fs_info, "failed to read fs tree: %d", err);
++		fs_info->fs_root = NULL;
+ 		goto fail_qgroup;
+ 	}
+ 
+@@ -4468,7 +4469,6 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
+ 	wake_up(&fs_info->transaction_wait);
+ 
+ 	btrfs_destroy_delayed_inodes(fs_info);
+-	btrfs_assert_delayed_root_empty(fs_info);
+ 
+ 	btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
+ 				     EXTENT_DIRTY);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 4ea9dd93a545..dec508a28ffa 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -10348,6 +10348,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
+ 	struct btrfs_root *root = BTRFS_I(inode)->root;
+ 	struct btrfs_key ins;
+ 	u64 cur_offset = start;
++	u64 clear_offset = start;
+ 	u64 i_size;
+ 	u64 cur_bytes;
+ 	u64 last_alloc = (u64)-1;
+@@ -10382,6 +10383,15 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
+ 				btrfs_end_transaction(trans);
+ 			break;
+ 		}
++
++		/*
++		 * We've reserved this space, and thus converted it from
++		 * ->bytes_may_use to ->bytes_reserved.  Any error that happens
++		 * from here on out we will only need to clear our reservation
++		 * for the remaining unreserved area, so advance our
++		 * clear_offset by our extent size.
++		 */
++		clear_offset += ins.offset;
+ 		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
+ 
+ 		last_alloc = ins.offset;
+@@ -10462,9 +10472,9 @@ next:
+ 		if (own_trans)
+ 			btrfs_end_transaction(trans);
+ 	}
+-	if (cur_offset < end)
+-		btrfs_free_reserved_data_space(inode, NULL, cur_offset,
+-			end - cur_offset + 1);
++	if (clear_offset < end)
++		btrfs_free_reserved_data_space(inode, NULL, clear_offset,
++			end - clear_offset + 1);
+ 	return ret;
+ }
+ 
+diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
+index 0c4ef208b8b9..0f6d53ec78ed 100644
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -712,10 +712,15 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
+ 		}
+ 		btrfs_start_ordered_extent(inode, ordered, 1);
+ 		end = ordered->file_offset;
++		/*
++		 * If the ordered extent had an error save the error but don't
++		 * exit without waiting first for all other ordered extents in
++		 * the range to complete.
++		 */
+ 		if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
+ 			ret = -EIO;
+ 		btrfs_put_ordered_extent(ordered);
+-		if (ret || end == 0 || end == start)
++		if (end == 0 || end == start)
+ 			break;
+ 		end--;
+ 	}
+diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
+index 708f931c36f1..8e5353bd72cf 100644
+--- a/fs/ecryptfs/crypto.c
++++ b/fs/ecryptfs/crypto.c
+@@ -325,8 +325,10 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
+ 	struct extent_crypt_result ecr;
+ 	int rc = 0;
+ 
+-	BUG_ON(!crypt_stat || !crypt_stat->tfm
+-	       || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED));
++	if (!crypt_stat || !crypt_stat->tfm
++	       || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED))
++		return -EINVAL;
++
+ 	if (unlikely(ecryptfs_verbosity > 0)) {
+ 		ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
+ 				crypt_stat->key_size);
+diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
+index e74fe84d0886..250cb23ae69f 100644
+--- a/fs/ecryptfs/keystore.c
++++ b/fs/ecryptfs/keystore.c
+@@ -1318,7 +1318,7 @@ parse_tag_1_packet(struct ecryptfs_crypt_stat *crypt_stat,
+ 		printk(KERN_WARNING "Tag 1 packet contains key larger "
+ 		       "than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES\n");
+ 		rc = -EINVAL;
+-		goto out;
++		goto out_free;
+ 	}
+ 	memcpy((*new_auth_tok)->session_key.encrypted_key,
+ 	       &data[(*packet_size)], (body_size - (ECRYPTFS_SIG_SIZE + 2)));
+diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
+index 9fdd5bcf4564..aa3ddb48ebac 100644
+--- a/fs/ecryptfs/messaging.c
++++ b/fs/ecryptfs/messaging.c
+@@ -392,6 +392,7 @@ int __init ecryptfs_init_messaging(void)
+ 					* ecryptfs_message_buf_len),
+ 				       GFP_KERNEL);
+ 	if (!ecryptfs_msg_ctx_arr) {
++		kfree(ecryptfs_daemon_hash);
+ 		rc = -ENOMEM;
+ 		goto out;
+ 	}
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index e5d6ee61ff48..f9645de9d04c 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -270,6 +270,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
+ 	ext4_group_t ngroups = ext4_get_groups_count(sb);
+ 	struct ext4_group_desc *desc;
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
++	struct buffer_head *bh_p;
+ 
+ 	if (block_group >= ngroups) {
+ 		ext4_error(sb, "block_group >= groups_count - block_group = %u,"
+@@ -280,7 +281,14 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
+ 
+ 	group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
+ 	offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
+-	if (!sbi->s_group_desc[group_desc]) {
++	bh_p = sbi_array_rcu_deref(sbi, s_group_desc, group_desc);
++	/*
++	 * sbi_array_rcu_deref returns with rcu unlocked, this is ok since
++	 * the pointer being dereferenced won't be dereferenced again. By
++	 * looking at the usage in add_new_gdb() the value isn't modified,
++	 * just the pointer, and so it remains valid.
++	 */
++	if (!bh_p) {
+ 		ext4_error(sb, "Group descriptor not loaded - "
+ 			   "block_group = %u, group_desc = %u, desc = %u",
+ 			   block_group, group_desc, offset);
+@@ -288,10 +296,10 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
+ 	}
+ 
+ 	desc = (struct ext4_group_desc *)(
+-		(__u8 *)sbi->s_group_desc[group_desc]->b_data +
++		(__u8 *)bh_p->b_data +
+ 		offset * EXT4_DESC_SIZE(sb));
+ 	if (bh)
+-		*bh = sbi->s_group_desc[group_desc];
++		*bh = bh_p;
+ 	return desc;
+ }
+ 
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 5c0e06645b1e..0a4461ac4225 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1372,7 +1372,7 @@ struct ext4_sb_info {
+ 	loff_t s_bitmap_maxbytes;	/* max bytes for bitmap files */
+ 	struct buffer_head * s_sbh;	/* Buffer containing the super block */
+ 	struct ext4_super_block *s_es;	/* Pointer to the super block in the buffer */
+-	struct buffer_head **s_group_desc;
++	struct buffer_head * __rcu *s_group_desc;
+ 	unsigned int s_mount_opt;
+ 	unsigned int s_mount_opt2;
+ 	unsigned int s_mount_flags;
+@@ -1430,7 +1430,7 @@ struct ext4_sb_info {
+ #endif
+ 
+ 	/* for buddy allocator */
+-	struct ext4_group_info ***s_group_info;
++	struct ext4_group_info ** __rcu *s_group_info;
+ 	struct inode *s_buddy_cache;
+ 	spinlock_t s_md_lock;
+ 	unsigned short *s_mb_offsets;
+@@ -1480,7 +1480,7 @@ struct ext4_sb_info {
+ 	unsigned int s_extent_max_zeroout_kb;
+ 
+ 	unsigned int s_log_groups_per_flex;
+-	struct flex_groups *s_flex_groups;
++	struct flex_groups * __rcu *s_flex_groups;
+ 	ext4_group_t s_flex_groups_allocated;
+ 
+ 	/* workqueue for reserved extent conversions (buffered io) */
+@@ -1520,8 +1520,11 @@ struct ext4_sb_info {
+ 	struct ratelimit_state s_warning_ratelimit_state;
+ 	struct ratelimit_state s_msg_ratelimit_state;
+ 
+-	/* Barrier between changing inodes' journal flags and writepages ops. */
+-	struct percpu_rw_semaphore s_journal_flag_rwsem;
++	/*
++	 * Barrier between writepages ops and changing any inode's JOURNAL_DATA
++	 * or EXTENTS flag.
++	 */
++	struct percpu_rw_semaphore s_writepages_rwsem;
+ 	struct dax_device *s_daxdev;
+ };
+ 
+@@ -1541,6 +1544,23 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
+ 		 ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
+ }
+ 
++/*
++ * Returns: sbi->field[index]
++ * Used to access an array element from the following sbi fields which require
++ * rcu protection to avoid dereferencing an invalid pointer due to reassignment
++ * - s_group_desc
++ * - s_group_info
++ * - s_flex_group
++ */
++#define sbi_array_rcu_deref(sbi, field, index)				   \
++({									   \
++	typeof(*((sbi)->field)) _v;					   \
++	rcu_read_lock();						   \
++	_v = ((typeof(_v)*)rcu_dereference((sbi)->field))[index];	   \
++	rcu_read_unlock();						   \
++	_v;								   \
++})
++
+ /*
+  * Inode dynamic state flags
+  */
+@@ -2564,6 +2584,7 @@ extern int ext4_generic_delete_entry(handle_t *handle,
+ extern bool ext4_empty_dir(struct inode *inode);
+ 
+ /* resize.c */
++extern void ext4_kvfree_array_rcu(void *to_free);
+ extern int ext4_group_add(struct super_block *sb,
+ 				struct ext4_new_group_data *input);
+ extern int ext4_group_extend(struct super_block *sb,
+@@ -2811,13 +2832,13 @@ static inline
+ struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
+ 					    ext4_group_t group)
+ {
+-	 struct ext4_group_info ***grp_info;
++	 struct ext4_group_info **grp_info;
+ 	 long indexv, indexh;
+ 	 BUG_ON(group >= EXT4_SB(sb)->s_groups_count);
+-	 grp_info = EXT4_SB(sb)->s_group_info;
+ 	 indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
+ 	 indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
+-	 return grp_info[indexv][indexh];
++	 grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
++	 return grp_info[indexh];
+ }
+ 
+ /*
+@@ -2867,7 +2888,7 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
+ 		     !inode_is_locked(inode));
+ 	down_write(&EXT4_I(inode)->i_data_sem);
+ 	if (newsize > EXT4_I(inode)->i_disksize)
+-		EXT4_I(inode)->i_disksize = newsize;
++		WRITE_ONCE(EXT4_I(inode)->i_disksize, newsize);
+ 	up_write(&EXT4_I(inode)->i_data_sem);
+ }
+ 
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 091a18a51c99..dafa7e4aaecb 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -330,11 +330,13 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
+ 
+ 	percpu_counter_inc(&sbi->s_freeinodes_counter);
+ 	if (sbi->s_log_groups_per_flex) {
+-		ext4_group_t f = ext4_flex_group(sbi, block_group);
++		struct flex_groups *fg;
+ 
+-		atomic_inc(&sbi->s_flex_groups[f].free_inodes);
++		fg = sbi_array_rcu_deref(sbi, s_flex_groups,
++					 ext4_flex_group(sbi, block_group));
++		atomic_inc(&fg->free_inodes);
+ 		if (is_directory)
+-			atomic_dec(&sbi->s_flex_groups[f].used_dirs);
++			atomic_dec(&fg->used_dirs);
+ 	}
+ 	BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
+ 	fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
+@@ -370,12 +372,13 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
+ 			    int flex_size, struct orlov_stats *stats)
+ {
+ 	struct ext4_group_desc *desc;
+-	struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
+ 
+ 	if (flex_size > 1) {
+-		stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
+-		stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
+-		stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
++		struct flex_groups *fg = sbi_array_rcu_deref(EXT4_SB(sb),
++							     s_flex_groups, g);
++		stats->free_inodes = atomic_read(&fg->free_inodes);
++		stats->free_clusters = atomic64_read(&fg->free_clusters);
++		stats->used_dirs = atomic_read(&fg->used_dirs);
+ 		return;
+ 	}
+ 
+@@ -1056,7 +1059,8 @@ got:
+ 		if (sbi->s_log_groups_per_flex) {
+ 			ext4_group_t f = ext4_flex_group(sbi, group);
+ 
+-			atomic_inc(&sbi->s_flex_groups[f].used_dirs);
++			atomic_inc(&sbi_array_rcu_deref(sbi, s_flex_groups,
++							f)->used_dirs);
+ 		}
+ 	}
+ 	if (ext4_has_group_desc_csum(sb)) {
+@@ -1079,7 +1083,8 @@ got:
+ 
+ 	if (sbi->s_log_groups_per_flex) {
+ 		flex_group = ext4_flex_group(sbi, group);
+-		atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
++		atomic_dec(&sbi_array_rcu_deref(sbi, s_flex_groups,
++						flex_group)->free_inodes);
+ 	}
+ 
+ 	inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 8e535bb34d5f..23b4b1745a39 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -2569,7 +2569,7 @@ update_disksize:
+ 	 * truncate are avoided by checking i_size under i_data_sem.
+ 	 */
+ 	disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
+-	if (disksize > EXT4_I(inode)->i_disksize) {
++	if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
+ 		int err2;
+ 		loff_t i_size;
+ 
+@@ -2730,7 +2730,7 @@ static int ext4_writepages(struct address_space *mapping,
+ 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ 		return -EIO;
+ 
+-	percpu_down_read(&sbi->s_journal_flag_rwsem);
++	percpu_down_read(&sbi->s_writepages_rwsem);
+ 	trace_ext4_writepages(inode, wbc);
+ 
+ 	/*
+@@ -2950,7 +2950,7 @@ unplug:
+ out_writepages:
+ 	trace_ext4_writepages_result(inode, wbc, ret,
+ 				     nr_to_write - wbc->nr_to_write);
+-	percpu_up_read(&sbi->s_journal_flag_rwsem);
++	percpu_up_read(&sbi->s_writepages_rwsem);
+ 	return ret;
+ }
+ 
+@@ -2965,13 +2965,13 @@ static int ext4_dax_writepages(struct address_space *mapping,
+ 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ 		return -EIO;
+ 
+-	percpu_down_read(&sbi->s_journal_flag_rwsem);
++	percpu_down_read(&sbi->s_writepages_rwsem);
+ 	trace_ext4_writepages(inode, wbc);
+ 
+ 	ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev, wbc);
+ 	trace_ext4_writepages_result(inode, wbc, ret,
+ 				     nr_to_write - wbc->nr_to_write);
+-	percpu_up_read(&sbi->s_journal_flag_rwsem);
++	percpu_up_read(&sbi->s_writepages_rwsem);
+ 	return ret;
+ }
+ 
+@@ -6207,7 +6207,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
+ 		}
+ 	}
+ 
+-	percpu_down_write(&sbi->s_journal_flag_rwsem);
++	percpu_down_write(&sbi->s_writepages_rwsem);
+ 	jbd2_journal_lock_updates(journal);
+ 
+ 	/*
+@@ -6224,7 +6224,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
+ 		err = jbd2_journal_flush(journal);
+ 		if (err < 0) {
+ 			jbd2_journal_unlock_updates(journal);
+-			percpu_up_write(&sbi->s_journal_flag_rwsem);
++			percpu_up_write(&sbi->s_writepages_rwsem);
+ 			return err;
+ 		}
+ 		ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
+@@ -6232,7 +6232,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
+ 	ext4_set_aops(inode);
+ 
+ 	jbd2_journal_unlock_updates(journal);
+-	percpu_up_write(&sbi->s_journal_flag_rwsem);
++	percpu_up_write(&sbi->s_writepages_rwsem);
+ 
+ 	if (val)
+ 		up_write(&EXT4_I(inode)->i_mmap_sem);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index cc229f3357f7..71121fcf9e8c 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2356,7 +2356,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	unsigned size;
+-	struct ext4_group_info ***new_groupinfo;
++	struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
+ 
+ 	size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
+ 		EXT4_DESC_PER_BLOCK_BITS(sb);
+@@ -2369,13 +2369,16 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
+ 		ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
+ 		return -ENOMEM;
+ 	}
+-	if (sbi->s_group_info) {
+-		memcpy(new_groupinfo, sbi->s_group_info,
++	rcu_read_lock();
++	old_groupinfo = rcu_dereference(sbi->s_group_info);
++	if (old_groupinfo)
++		memcpy(new_groupinfo, old_groupinfo,
+ 		       sbi->s_group_info_size * sizeof(*sbi->s_group_info));
+-		kvfree(sbi->s_group_info);
+-	}
+-	sbi->s_group_info = new_groupinfo;
++	rcu_read_unlock();
++	rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
+ 	sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
++	if (old_groupinfo)
++		ext4_kvfree_array_rcu(old_groupinfo);
+ 	ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", 
+ 		   sbi->s_group_info_size);
+ 	return 0;
+@@ -2387,6 +2390,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
+ {
+ 	int i;
+ 	int metalen = 0;
++	int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	struct ext4_group_info **meta_group_info;
+ 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
+@@ -2405,12 +2409,12 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
+ 				 "for a buddy group");
+ 			goto exit_meta_group_info;
+ 		}
+-		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
+-			meta_group_info;
++		rcu_read_lock();
++		rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
++		rcu_read_unlock();
+ 	}
+ 
+-	meta_group_info =
+-		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
++	meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
+ 	i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
+ 
+ 	meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
+@@ -2458,8 +2462,13 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
+ exit_group_info:
+ 	/* If a meta_group_info table has been allocated, release it now */
+ 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
+-		kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
+-		sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
++		struct ext4_group_info ***group_info;
++
++		rcu_read_lock();
++		group_info = rcu_dereference(sbi->s_group_info);
++		kfree(group_info[idx]);
++		group_info[idx] = NULL;
++		rcu_read_unlock();
+ 	}
+ exit_meta_group_info:
+ 	return -ENOMEM;
+@@ -2472,6 +2481,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	int err;
+ 	struct ext4_group_desc *desc;
++	struct ext4_group_info ***group_info;
+ 	struct kmem_cache *cachep;
+ 
+ 	err = ext4_mb_alloc_groupinfo(sb, ngroups);
+@@ -2506,11 +2516,16 @@ err_freebuddy:
+ 	while (i-- > 0)
+ 		kmem_cache_free(cachep, ext4_get_group_info(sb, i));
+ 	i = sbi->s_group_info_size;
++	rcu_read_lock();
++	group_info = rcu_dereference(sbi->s_group_info);
+ 	while (i-- > 0)
+-		kfree(sbi->s_group_info[i]);
++		kfree(group_info[i]);
++	rcu_read_unlock();
+ 	iput(sbi->s_buddy_cache);
+ err_freesgi:
+-	kvfree(sbi->s_group_info);
++	rcu_read_lock();
++	kvfree(rcu_dereference(sbi->s_group_info));
++	rcu_read_unlock();
+ 	return -ENOMEM;
+ }
+ 
+@@ -2699,7 +2714,7 @@ int ext4_mb_release(struct super_block *sb)
+ 	ext4_group_t ngroups = ext4_get_groups_count(sb);
+ 	ext4_group_t i;
+ 	int num_meta_group_infos;
+-	struct ext4_group_info *grinfo;
++	struct ext4_group_info *grinfo, ***group_info;
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
+ 
+@@ -2717,9 +2732,12 @@ int ext4_mb_release(struct super_block *sb)
+ 		num_meta_group_infos = (ngroups +
+ 				EXT4_DESC_PER_BLOCK(sb) - 1) >>
+ 			EXT4_DESC_PER_BLOCK_BITS(sb);
++		rcu_read_lock();
++		group_info = rcu_dereference(sbi->s_group_info);
+ 		for (i = 0; i < num_meta_group_infos; i++)
+-			kfree(sbi->s_group_info[i]);
+-		kvfree(sbi->s_group_info);
++			kfree(group_info[i]);
++		kvfree(group_info);
++		rcu_read_unlock();
+ 	}
+ 	kfree(sbi->s_mb_offsets);
+ 	kfree(sbi->s_mb_maxs);
+@@ -3018,7 +3036,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
+ 		ext4_group_t flex_group = ext4_flex_group(sbi,
+ 							  ac->ac_b_ex.fe_group);
+ 		atomic64_sub(ac->ac_b_ex.fe_len,
+-			     &sbi->s_flex_groups[flex_group].free_clusters);
++			     &sbi_array_rcu_deref(sbi, s_flex_groups,
++						  flex_group)->free_clusters);
+ 	}
+ 
+ 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
+@@ -4912,7 +4931,8 @@ do_more:
+ 	if (sbi->s_log_groups_per_flex) {
+ 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
+ 		atomic64_add(count_clusters,
+-			     &sbi->s_flex_groups[flex_group].free_clusters);
++			     &sbi_array_rcu_deref(sbi, s_flex_groups,
++						  flex_group)->free_clusters);
+ 	}
+ 
+ 	if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
+@@ -5061,7 +5081,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
+ 	if (sbi->s_log_groups_per_flex) {
+ 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
+ 		atomic64_add(clusters_freed,
+-			     &sbi->s_flex_groups[flex_group].free_clusters);
++			     &sbi_array_rcu_deref(sbi, s_flex_groups,
++						  flex_group)->free_clusters);
+ 	}
+ 
+ 	ext4_mb_unload_buddy(&e4b);
+diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
+index a98bfca9c463..bec4ad787c7d 100644
+--- a/fs/ext4/migrate.c
++++ b/fs/ext4/migrate.c
+@@ -427,6 +427,7 @@ static int free_ext_block(handle_t *handle, struct inode *inode)
+ 
+ int ext4_ext_migrate(struct inode *inode)
+ {
++	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ 	handle_t *handle;
+ 	int retval = 0, i;
+ 	__le32 *i_data;
+@@ -451,6 +452,8 @@ int ext4_ext_migrate(struct inode *inode)
+ 		 */
+ 		return retval;
+ 
++	percpu_down_write(&sbi->s_writepages_rwsem);
++
+ 	/*
+ 	 * Worst case we can touch the allocation bitmaps, a bgd
+ 	 * block, and a block to link in the orphan list.  We do need
+@@ -461,7 +464,7 @@ int ext4_ext_migrate(struct inode *inode)
+ 
+ 	if (IS_ERR(handle)) {
+ 		retval = PTR_ERR(handle);
+-		return retval;
++		goto out_unlock;
+ 	}
+ 	goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
+ 		EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
+@@ -472,7 +475,7 @@ int ext4_ext_migrate(struct inode *inode)
+ 	if (IS_ERR(tmp_inode)) {
+ 		retval = PTR_ERR(tmp_inode);
+ 		ext4_journal_stop(handle);
+-		return retval;
++		goto out_unlock;
+ 	}
+ 	i_size_write(tmp_inode, i_size_read(inode));
+ 	/*
+@@ -514,7 +517,7 @@ int ext4_ext_migrate(struct inode *inode)
+ 		 */
+ 		ext4_orphan_del(NULL, tmp_inode);
+ 		retval = PTR_ERR(handle);
+-		goto out;
++		goto out_tmp_inode;
+ 	}
+ 
+ 	ei = EXT4_I(inode);
+@@ -595,10 +598,11 @@ err_out:
+ 	/* Reset the extent details */
+ 	ext4_ext_tree_init(handle, tmp_inode);
+ 	ext4_journal_stop(handle);
+-out:
++out_tmp_inode:
+ 	unlock_new_inode(tmp_inode);
+ 	iput(tmp_inode);
+-
++out_unlock:
++	percpu_up_write(&sbi->s_writepages_rwsem);
+ 	return retval;
+ }
+ 
+@@ -608,7 +612,8 @@ out:
+ int ext4_ind_migrate(struct inode *inode)
+ {
+ 	struct ext4_extent_header	*eh;
+-	struct ext4_super_block		*es = EXT4_SB(inode->i_sb)->s_es;
++	struct ext4_sb_info		*sbi = EXT4_SB(inode->i_sb);
++	struct ext4_super_block		*es = sbi->s_es;
+ 	struct ext4_inode_info		*ei = EXT4_I(inode);
+ 	struct ext4_extent		*ex;
+ 	unsigned int			i, len;
+@@ -632,9 +637,13 @@ int ext4_ind_migrate(struct inode *inode)
+ 	if (test_opt(inode->i_sb, DELALLOC))
+ 		ext4_alloc_da_blocks(inode);
+ 
++	percpu_down_write(&sbi->s_writepages_rwsem);
++
+ 	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
+-	if (IS_ERR(handle))
+-		return PTR_ERR(handle);
++	if (IS_ERR(handle)) {
++		ret = PTR_ERR(handle);
++		goto out_unlock;
++	}
+ 
+ 	down_write(&EXT4_I(inode)->i_data_sem);
+ 	ret = ext4_ext_check_inode(inode);
+@@ -669,5 +678,7 @@ int ext4_ind_migrate(struct inode *inode)
+ errout:
+ 	ext4_journal_stop(handle);
+ 	up_write(&EXT4_I(inode)->i_data_sem);
++out_unlock:
++	percpu_up_write(&sbi->s_writepages_rwsem);
+ 	return ret;
+ }
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 4608d0d3b7f9..a8f2e3549bb9 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1431,6 +1431,7 @@ restart:
+ 		/*
+ 		 * We deal with the read-ahead logic here.
+ 		 */
++		cond_resched();
+ 		if (ra_ptr >= ra_max) {
+ 			/* Refill the readahead buffer */
+ 			ra_ptr = 0;
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 4d5c0fc9d23a..ef552d93708e 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -17,6 +17,33 @@
+ 
+ #include "ext4_jbd2.h"
+ 
++struct ext4_rcu_ptr {
++	struct rcu_head rcu;
++	void *ptr;
++};
++
++static void ext4_rcu_ptr_callback(struct rcu_head *head)
++{
++	struct ext4_rcu_ptr *ptr;
++
++	ptr = container_of(head, struct ext4_rcu_ptr, rcu);
++	kvfree(ptr->ptr);
++	kfree(ptr);
++}
++
++void ext4_kvfree_array_rcu(void *to_free)
++{
++	struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
++
++	if (ptr) {
++		ptr->ptr = to_free;
++		call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
++		return;
++	}
++	synchronize_rcu();
++	kvfree(to_free);
++}
++
+ int ext4_resize_begin(struct super_block *sb)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+@@ -560,8 +587,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
+ 				brelse(gdb);
+ 				goto out;
+ 			}
+-			memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data,
+-			       gdb->b_size);
++			memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
++				s_group_desc, j)->b_data, gdb->b_size);
+ 			set_buffer_uptodate(gdb);
+ 
+ 			err = ext4_handle_dirty_metadata(handle, NULL, gdb);
+@@ -879,13 +906,15 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
+ 	}
+ 	brelse(dind);
+ 
+-	o_group_desc = EXT4_SB(sb)->s_group_desc;
++	rcu_read_lock();
++	o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
+ 	memcpy(n_group_desc, o_group_desc,
+ 	       EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
++	rcu_read_unlock();
+ 	n_group_desc[gdb_num] = gdb_bh;
+-	EXT4_SB(sb)->s_group_desc = n_group_desc;
++	rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
+ 	EXT4_SB(sb)->s_gdb_count++;
+-	kvfree(o_group_desc);
++	ext4_kvfree_array_rcu(o_group_desc);
+ 
+ 	le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
+ 	err = ext4_handle_dirty_super(handle, sb);
+@@ -929,9 +958,11 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
+ 		return err;
+ 	}
+ 
+-	o_group_desc = EXT4_SB(sb)->s_group_desc;
++	rcu_read_lock();
++	o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
+ 	memcpy(n_group_desc, o_group_desc,
+ 	       EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
++	rcu_read_unlock();
+ 	n_group_desc[gdb_num] = gdb_bh;
+ 
+ 	BUFFER_TRACE(gdb_bh, "get_write_access");
+@@ -942,9 +973,9 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
+ 		return err;
+ 	}
+ 
+-	EXT4_SB(sb)->s_group_desc = n_group_desc;
++	rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
+ 	EXT4_SB(sb)->s_gdb_count++;
+-	kvfree(o_group_desc);
++	ext4_kvfree_array_rcu(o_group_desc);
+ 	return err;
+ }
+ 
+@@ -1210,7 +1241,8 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
+ 		 * use non-sparse filesystems anymore.  This is already checked above.
+ 		 */
+ 		if (gdb_off) {
+-			gdb_bh = sbi->s_group_desc[gdb_num];
++			gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
++						     gdb_num);
+ 			BUFFER_TRACE(gdb_bh, "get_write_access");
+ 			err = ext4_journal_get_write_access(handle, gdb_bh);
+ 
+@@ -1292,7 +1324,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
+ 		/*
+ 		 * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
+ 		 */
+-		gdb_bh = sbi->s_group_desc[gdb_num];
++		gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
+ 		/* Update group descriptor block for new group */
+ 		gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
+ 						 gdb_off * EXT4_DESC_SIZE(sb));
+@@ -1420,11 +1452,14 @@ static void ext4_update_super(struct super_block *sb,
+ 		   percpu_counter_read(&sbi->s_freeclusters_counter));
+ 	if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
+ 		ext4_group_t flex_group;
++		struct flex_groups *fg;
++
+ 		flex_group = ext4_flex_group(sbi, group_data[0].group);
++		fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
+ 		atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
+-			     &sbi->s_flex_groups[flex_group].free_clusters);
++			     &fg->free_clusters);
+ 		atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
+-			   &sbi->s_flex_groups[flex_group].free_inodes);
++			   &fg->free_inodes);
+ 	}
+ 
+ 	/*
+@@ -1519,7 +1554,8 @@ exit_journal:
+ 		for (; gdb_num <= gdb_num_end; gdb_num++) {
+ 			struct buffer_head *gdb_bh;
+ 
+-			gdb_bh = sbi->s_group_desc[gdb_num];
++			gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
++						     gdb_num);
+ 			if (old_gdb == gdb_bh->b_blocknr)
+ 				continue;
+ 			update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index e080e90178a0..cb797489b2d8 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -969,6 +969,8 @@ static void ext4_put_super(struct super_block *sb)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	struct ext4_super_block *es = sbi->s_es;
++	struct buffer_head **group_desc;
++	struct flex_groups **flex_groups;
+ 	int aborted = 0;
+ 	int i, err;
+ 
+@@ -999,15 +1001,23 @@ static void ext4_put_super(struct super_block *sb)
+ 	if (!sb_rdonly(sb))
+ 		ext4_commit_super(sb, 1);
+ 
++	rcu_read_lock();
++	group_desc = rcu_dereference(sbi->s_group_desc);
+ 	for (i = 0; i < sbi->s_gdb_count; i++)
+-		brelse(sbi->s_group_desc[i]);
+-	kvfree(sbi->s_group_desc);
+-	kvfree(sbi->s_flex_groups);
++		brelse(group_desc[i]);
++	kvfree(group_desc);
++	flex_groups = rcu_dereference(sbi->s_flex_groups);
++	if (flex_groups) {
++		for (i = 0; i < sbi->s_flex_groups_allocated; i++)
++			kvfree(flex_groups[i]);
++		kvfree(flex_groups);
++	}
++	rcu_read_unlock();
+ 	percpu_counter_destroy(&sbi->s_freeclusters_counter);
+ 	percpu_counter_destroy(&sbi->s_freeinodes_counter);
+ 	percpu_counter_destroy(&sbi->s_dirs_counter);
+ 	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+-	percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
++	percpu_free_rwsem(&sbi->s_writepages_rwsem);
+ #ifdef CONFIG_QUOTA
+ 	for (i = 0; i < EXT4_MAXQUOTAS; i++)
+ 		kfree(get_qf_name(sb, sbi, i));
+@@ -2287,8 +2297,8 @@ done:
+ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+-	struct flex_groups *new_groups;
+-	int size;
++	struct flex_groups **old_groups, **new_groups;
++	int size, i;
+ 
+ 	if (!sbi->s_log_groups_per_flex)
+ 		return 0;
+@@ -2297,22 +2307,37 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
+ 	if (size <= sbi->s_flex_groups_allocated)
+ 		return 0;
+ 
+-	size = roundup_pow_of_two(size * sizeof(struct flex_groups));
+-	new_groups = kvzalloc(size, GFP_KERNEL);
++	new_groups = kvzalloc(roundup_pow_of_two(size *
++			      sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
+ 	if (!new_groups) {
+-		ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
+-			 size / (int) sizeof(struct flex_groups));
++		ext4_msg(sb, KERN_ERR,
++			 "not enough memory for %d flex group pointers", size);
+ 		return -ENOMEM;
+ 	}
+-
+-	if (sbi->s_flex_groups) {
+-		memcpy(new_groups, sbi->s_flex_groups,
+-		       (sbi->s_flex_groups_allocated *
+-			sizeof(struct flex_groups)));
+-		kvfree(sbi->s_flex_groups);
++	for (i = sbi->s_flex_groups_allocated; i < size; i++) {
++		new_groups[i] = kvzalloc(roundup_pow_of_two(
++					 sizeof(struct flex_groups)),
++					 GFP_KERNEL);
++		if (!new_groups[i]) {
++			for (i--; i >= sbi->s_flex_groups_allocated; i--)
++				kvfree(new_groups[i]);
++			kvfree(new_groups);
++			ext4_msg(sb, KERN_ERR,
++				 "not enough memory for %d flex groups", size);
++			return -ENOMEM;
++		}
+ 	}
+-	sbi->s_flex_groups = new_groups;
+-	sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
++	rcu_read_lock();
++	old_groups = rcu_dereference(sbi->s_flex_groups);
++	if (old_groups)
++		memcpy(new_groups, old_groups,
++		       (sbi->s_flex_groups_allocated *
++			sizeof(struct flex_groups *)));
++	rcu_read_unlock();
++	rcu_assign_pointer(sbi->s_flex_groups, new_groups);
++	sbi->s_flex_groups_allocated = size;
++	if (old_groups)
++		ext4_kvfree_array_rcu(old_groups);
+ 	return 0;
+ }
+ 
+@@ -2320,6 +2345,7 @@ static int ext4_fill_flex_info(struct super_block *sb)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	struct ext4_group_desc *gdp = NULL;
++	struct flex_groups *fg;
+ 	ext4_group_t flex_group;
+ 	int i, err;
+ 
+@@ -2337,12 +2363,11 @@ static int ext4_fill_flex_info(struct super_block *sb)
+ 		gdp = ext4_get_group_desc(sb, i, NULL);
+ 
+ 		flex_group = ext4_flex_group(sbi, i);
+-		atomic_add(ext4_free_inodes_count(sb, gdp),
+-			   &sbi->s_flex_groups[flex_group].free_inodes);
++		fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
++		atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
+ 		atomic64_add(ext4_free_group_clusters(sb, gdp),
+-			     &sbi->s_flex_groups[flex_group].free_clusters);
+-		atomic_add(ext4_used_dirs_count(sb, gdp),
+-			   &sbi->s_flex_groups[flex_group].used_dirs);
++			     &fg->free_clusters);
++		atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
+ 	}
+ 
+ 	return 1;
+@@ -2923,7 +2948,7 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
+ 		return 0;
+ 	}
+ 
+-#if !defined(CONFIG_QUOTA) || !defined(CONFIG_QFMT_V2)
++#if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2)
+ 	if (!readonly && (ext4_has_feature_quota(sb) ||
+ 			  ext4_has_feature_project(sb))) {
+ 		ext4_msg(sb, KERN_ERR,
+@@ -3548,9 +3573,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ {
+ 	struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
+ 	char *orig_data = kstrdup(data, GFP_KERNEL);
+-	struct buffer_head *bh;
++	struct buffer_head *bh, **group_desc;
+ 	struct ext4_super_block *es = NULL;
+ 	struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
++	struct flex_groups **flex_groups;
+ 	ext4_fsblk_t block;
+ 	ext4_fsblk_t sb_block = get_sb_block(&data);
+ 	ext4_fsblk_t logical_sb_block;
+@@ -4166,9 +4192,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 			goto failed_mount;
+ 		}
+ 	}
+-	sbi->s_group_desc = kvmalloc_array(db_count,
+-					   sizeof(struct buffer_head *),
+-					   GFP_KERNEL);
++	rcu_assign_pointer(sbi->s_group_desc,
++			   kvmalloc_array(db_count,
++					  sizeof(struct buffer_head *),
++					  GFP_KERNEL));
+ 	if (sbi->s_group_desc == NULL) {
+ 		ext4_msg(sb, KERN_ERR, "not enough memory");
+ 		ret = -ENOMEM;
+@@ -4184,14 +4211,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 	}
+ 
+ 	for (i = 0; i < db_count; i++) {
++		struct buffer_head *bh;
++
+ 		block = descriptor_loc(sb, logical_sb_block, i);
+-		sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
+-		if (!sbi->s_group_desc[i]) {
++		bh = sb_bread_unmovable(sb, block);
++		if (!bh) {
+ 			ext4_msg(sb, KERN_ERR,
+ 			       "can't read group descriptor %d", i);
+ 			db_count = i;
+ 			goto failed_mount2;
+ 		}
++		rcu_read_lock();
++		rcu_dereference(sbi->s_group_desc)[i] = bh;
++		rcu_read_unlock();
+ 	}
+ 	sbi->s_gdb_count = db_count;
+ 	if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
+@@ -4463,7 +4495,7 @@ no_journal:
+ 		err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
+ 					  GFP_KERNEL);
+ 	if (!err)
+-		err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);
++		err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
+ 
+ 	if (err) {
+ 		ext4_msg(sb, KERN_ERR, "insufficient memory");
+@@ -4551,13 +4583,19 @@ failed_mount7:
+ 	ext4_unregister_li_request(sb);
+ failed_mount6:
+ 	ext4_mb_release(sb);
+-	if (sbi->s_flex_groups)
+-		kvfree(sbi->s_flex_groups);
++	rcu_read_lock();
++	flex_groups = rcu_dereference(sbi->s_flex_groups);
++	if (flex_groups) {
++		for (i = 0; i < sbi->s_flex_groups_allocated; i++)
++			kvfree(flex_groups[i]);
++		kvfree(flex_groups);
++	}
++	rcu_read_unlock();
+ 	percpu_counter_destroy(&sbi->s_freeclusters_counter);
+ 	percpu_counter_destroy(&sbi->s_freeinodes_counter);
+ 	percpu_counter_destroy(&sbi->s_dirs_counter);
+ 	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+-	percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
++	percpu_free_rwsem(&sbi->s_writepages_rwsem);
+ failed_mount5:
+ 	ext4_ext_release(sb);
+ 	ext4_release_system_zone(sb);
+@@ -4588,9 +4626,12 @@ failed_mount3:
+ 	if (sbi->s_mmp_tsk)
+ 		kthread_stop(sbi->s_mmp_tsk);
+ failed_mount2:
++	rcu_read_lock();
++	group_desc = rcu_dereference(sbi->s_group_desc);
+ 	for (i = 0; i < db_count; i++)
+-		brelse(sbi->s_group_desc[i]);
+-	kvfree(sbi->s_group_desc);
++		brelse(group_desc[i]);
++	kvfree(group_desc);
++	rcu_read_unlock();
+ failed_mount:
+ 	if (sbi->s_chksum_driver)
+ 		crypto_free_shash(sbi->s_chksum_driver);
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 97ffe12a2262..04ffef9cea8c 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -831,8 +831,6 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
+ 	char *frozen_buffer = NULL;
+ 	unsigned long start_lock, time_lock;
+ 
+-	if (is_handle_aborted(handle))
+-		return -EROFS;
+ 	journal = transaction->t_journal;
+ 
+ 	jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
+@@ -1084,6 +1082,9 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
+ 	struct journal_head *jh;
+ 	int rc;
+ 
++	if (is_handle_aborted(handle))
++		return -EROFS;
++
+ 	if (jbd2_write_access_granted(handle, bh, false))
+ 		return 0;
+ 
+@@ -1221,6 +1222,9 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
+ 	struct journal_head *jh;
+ 	char *committed_data = NULL;
+ 
++	if (is_handle_aborted(handle))
++		return -EROFS;
++
+ 	if (jbd2_write_access_granted(handle, bh, true))
+ 		return 0;
+ 
+diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
+index 99bc5b3ae26e..733eaf95e207 100644
+--- a/include/linux/intel-svm.h
++++ b/include/linux/intel-svm.h
+@@ -130,7 +130,7 @@ static inline int intel_svm_unbind_mm(struct device *dev, int pasid)
+ 	BUG();
+ }
+ 
+-static int intel_svm_is_pasid_valid(struct device *dev, int pasid)
++static inline int intel_svm_is_pasid_valid(struct device *dev, int pasid)
+ {
+ 	return -EINVAL;
+ }
+diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
+index 8301f1df0682..092445543258 100644
+--- a/include/linux/irqdomain.h
++++ b/include/linux/irqdomain.h
+@@ -188,7 +188,7 @@ enum {
+ 	IRQ_DOMAIN_FLAG_HIERARCHY	= (1 << 0),
+ 
+ 	/* Irq domain name was allocated in __irq_domain_add() */
+-	IRQ_DOMAIN_NAME_ALLOCATED	= (1 << 6),
++	IRQ_DOMAIN_NAME_ALLOCATED	= (1 << 1),
+ 
+ 	/* Irq domain is an IPI domain with virq per cpu */
+ 	IRQ_DOMAIN_FLAG_IPI_PER_CPU	= (1 << 2),
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index aff09d0b3545..75a916d7ab2a 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -1236,6 +1236,7 @@ struct pci_bits {
+ };
+ 
+ extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
++extern void ata_pci_shutdown_one(struct pci_dev *pdev);
+ extern void ata_pci_remove_one(struct pci_dev *pdev);
+ 
+ #ifdef CONFIG_PM
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index 76db046f09ab..248a137112e8 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -225,6 +225,8 @@ struct tty_port_client_operations {
+ 	void (*write_wakeup)(struct tty_port *port);
+ };
+ 
++extern const struct tty_port_client_operations tty_port_default_client_ops;
++
+ struct tty_port {
+ 	struct tty_bufhead	buf;		/* Locked internally */
+ 	struct tty_struct	*tty;		/* Back pointer */
+diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
+index a1be64c9940f..22c1f579afe3 100644
+--- a/include/linux/usb/quirks.h
++++ b/include/linux/usb/quirks.h
+@@ -69,4 +69,7 @@
+ /* Hub needs extra delay after resetting its port. */
+ #define USB_QUIRK_HUB_SLOW_RESET		BIT(14)
+ 
++/* device has blacklisted endpoints */
++#define USB_QUIRK_ENDPOINT_BLACKLIST		BIT(15)
++
+ #endif /* __LINUX_USB_QUIRKS_H */
+diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
+index f0a01a54bd15..df156f1d50b2 100644
+--- a/include/scsi/iscsi_proto.h
++++ b/include/scsi/iscsi_proto.h
+@@ -638,7 +638,6 @@ struct iscsi_reject {
+ #define ISCSI_REASON_BOOKMARK_INVALID	9
+ #define ISCSI_REASON_BOOKMARK_NO_RESOURCES	10
+ #define ISCSI_REASON_NEGOTIATION_RESET	11
+-#define ISCSI_REASON_WAITING_FOR_LOGOUT	12
+ 
+ /* Max. number of Key=Value pairs in a text message */
+ #define MAX_KEY_VALUE_PAIRS	8192
+diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
+index 6665cb29e1a2..c2a71fd8dfaf 100644
+--- a/include/sound/rawmidi.h
++++ b/include/sound/rawmidi.h
+@@ -92,9 +92,9 @@ struct snd_rawmidi_substream {
+ 	struct list_head list;		/* list of all substream for given stream */
+ 	int stream;			/* direction */
+ 	int number;			/* substream number */
+-	unsigned int opened: 1,		/* open flag */
+-		     append: 1,		/* append flag (merge more streams) */
+-		     active_sensing: 1; /* send active sensing when close */
++	bool opened;			/* open flag */
++	bool append;			/* append flag (merge more streams) */
++	bool active_sensing;		/* send active sensing when close */
+ 	int use_count;			/* use counter (for output) */
+ 	size_t bytes;
+ 	struct snd_rawmidi *rmidi;
+diff --git a/ipc/sem.c b/ipc/sem.c
+index 26f8e37fcdcb..2bf535dd0b93 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -2345,11 +2345,9 @@ void exit_sem(struct task_struct *tsk)
+ 		ipc_assert_locked_object(&sma->sem_perm);
+ 		list_del(&un->list_id);
+ 
+-		/* we are the last process using this ulp, acquiring ulp->lock
+-		 * isn't required. Besides that, we are also protected against
+-		 * IPC_RMID as we hold sma->sem_perm lock now
+-		 */
++		spin_lock(&ulp->lock);
+ 		list_del_rcu(&un->list_proc);
++		spin_unlock(&ulp->lock);
+ 
+ 		/* perform adjustments registered in un */
+ 		for (i = 0; i < sma->sem_nsems; i++) {
+diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
+index 86477f3894e5..66e13aace241 100644
+--- a/kernel/bpf/offload.c
++++ b/kernel/bpf/offload.c
+@@ -289,7 +289,7 @@ int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
+ 
+ 	ulen = info->jited_prog_len;
+ 	info->jited_prog_len = aux->offload->jited_len;
+-	if (info->jited_prog_len & ulen) {
++	if (info->jited_prog_len && ulen) {
+ 		uinsns = u64_to_user_ptr(info->jited_prog_insns);
+ 		ulen = min_t(u32, info->jited_prog_len, ulen);
+ 		if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
+diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
+index ea57f3d397fe..3f4618510d05 100644
+--- a/kernel/irq/internals.h
++++ b/kernel/irq/internals.h
+@@ -126,8 +126,6 @@ static inline void unregister_handler_proc(unsigned int irq,
+ 
+ extern bool irq_can_set_affinity_usr(unsigned int irq);
+ 
+-extern int irq_select_affinity_usr(unsigned int irq);
+-
+ extern void irq_set_thread_affinity(struct irq_desc *desc);
+ 
+ extern int irq_do_set_affinity(struct irq_data *data,
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 23bcfa71077f..eb69b805f908 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -441,23 +441,9 @@ int irq_setup_affinity(struct irq_desc *desc)
+ {
+ 	return irq_select_affinity(irq_desc_get_irq(desc));
+ }
+-#endif
++#endif /* CONFIG_AUTO_IRQ_AFFINITY */
++#endif /* CONFIG_SMP */
+ 
+-/*
+- * Called when a bogus affinity is set via /proc/irq
+- */
+-int irq_select_affinity_usr(unsigned int irq)
+-{
+-	struct irq_desc *desc = irq_to_desc(irq);
+-	unsigned long flags;
+-	int ret;
+-
+-	raw_spin_lock_irqsave(&desc->lock, flags);
+-	ret = irq_setup_affinity(desc);
+-	raw_spin_unlock_irqrestore(&desc->lock, flags);
+-	return ret;
+-}
+-#endif
+ 
+ /**
+  *	irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
+diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
+index da9addb8d655..e8c655b7a430 100644
+--- a/kernel/irq/proc.c
++++ b/kernel/irq/proc.c
+@@ -115,6 +115,28 @@ static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
+ 	return show_irq_affinity(AFFINITY_LIST, m);
+ }
+ 
++#ifndef CONFIG_AUTO_IRQ_AFFINITY
++static inline int irq_select_affinity_usr(unsigned int irq)
++{
++	/*
++	 * If the interrupt is started up already then this fails. The
++	 * interrupt is assigned to an online CPU already. There is no
++	 * point to move it around randomly. Tell user space that the
++	 * selected mask is bogus.
++	 *
++	 * If not then any change to the affinity is pointless because the
++	 * startup code invokes irq_setup_affinity() which will select
++	 * a online CPU anyway.
++	 */
++	return -EINVAL;
++}
++#else
++/* ALPHA magic affinity auto selector. Keep it for historical reasons. */
++static inline int irq_select_affinity_usr(unsigned int irq)
++{
++	return irq_select_affinity(irq);
++}
++#endif
+ 
+ static ssize_t write_irq_affinity(int type, struct file *file,
+ 		const char __user *buffer, size_t count, loff_t *pos)
+diff --git a/lib/stackdepot.c b/lib/stackdepot.c
+index e513459a5601..3376a3291186 100644
+--- a/lib/stackdepot.c
++++ b/lib/stackdepot.c
+@@ -92,15 +92,19 @@ static bool init_stack_slab(void **prealloc)
+ 		return true;
+ 	if (stack_slabs[depot_index] == NULL) {
+ 		stack_slabs[depot_index] = *prealloc;
++		*prealloc = NULL;
+ 	} else {
+-		stack_slabs[depot_index + 1] = *prealloc;
++		/* If this is the last depot slab, do not touch the next one. */
++		if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
++			stack_slabs[depot_index + 1] = *prealloc;
++			*prealloc = NULL;
++		}
+ 		/*
+ 		 * This smp_store_release pairs with smp_load_acquire() from
+ 		 * |next_slab_inited| above and in depot_save_stack().
+ 		 */
+ 		smp_store_release(&next_slab_inited, 1);
+ 	}
+-	*prealloc = NULL;
+ 	return true;
+ }
+ 
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 3a3d109dce21..0f7ff204083e 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -419,8 +419,10 @@ int memcg_expand_shrinker_maps(int new_id)
+ 		if (mem_cgroup_is_root(memcg))
+ 			continue;
+ 		ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
+-		if (ret)
++		if (ret) {
++			mem_cgroup_iter_break(NULL, memcg);
+ 			goto unlock;
++		}
+ 	}
+ unlock:
+ 	if (!ret)
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index b37610c0eac6..bc2ecd43251a 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2446,10 +2446,13 @@ out:
+ 			/*
+ 			 * Scan types proportional to swappiness and
+ 			 * their relative recent reclaim efficiency.
+-			 * Make sure we don't miss the last page
+-			 * because of a round-off error.
++			 * Make sure we don't miss the last page on
++			 * the offlined memory cgroups because of a
++			 * round-off error.
+ 			 */
+-			scan = DIV64_U64_ROUND_UP(scan * fraction[file],
++			scan = mem_cgroup_online(memcg) ?
++			       div64_u64(scan * fraction[file], denominator) :
++			       DIV64_U64_ROUND_UP(scan * fraction[file],
+ 						  denominator);
+ 			break;
+ 		case SCAN_FILE:
+diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
+index 1ad4017f9b73..0c2dc6def86d 100644
+--- a/net/netfilter/xt_hashlimit.c
++++ b/net/netfilter/xt_hashlimit.c
+@@ -845,6 +845,8 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ 	return hashlimit_mt_common(skb, par, hinfo, &info->cfg, 3);
+ }
+ 
++#define HASHLIMIT_MAX_SIZE 1048576
++
+ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
+ 				     struct xt_hashlimit_htable **hinfo,
+ 				     struct hashlimit_cfg3 *cfg,
+@@ -855,6 +857,14 @@ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
+ 
+ 	if (cfg->gc_interval == 0 || cfg->expire == 0)
+ 		return -EINVAL;
++	if (cfg->size > HASHLIMIT_MAX_SIZE) {
++		cfg->size = HASHLIMIT_MAX_SIZE;
++		pr_info_ratelimited("size too large, truncated to %u\n", cfg->size);
++	}
++	if (cfg->max > HASHLIMIT_MAX_SIZE) {
++		cfg->max = HASHLIMIT_MAX_SIZE;
++		pr_info_ratelimited("max too large, truncated to %u\n", cfg->max);
++	}
+ 	if (par->family == NFPROTO_IPV4) {
+ 		if (cfg->srcmask > 32 || cfg->dstmask > 32)
+ 			return -EINVAL;
+diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
+index 17fdfce1625f..964c4e45de11 100644
+--- a/net/rxrpc/call_object.c
++++ b/net/rxrpc/call_object.c
+@@ -647,11 +647,11 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
+ }
+ 
+ /*
+- * Final call destruction under RCU.
++ * Final call destruction - but must be done in process context.
+  */
+-static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
++static void rxrpc_destroy_call(struct work_struct *work)
+ {
+-	struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
++	struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
+ 	struct rxrpc_net *rxnet = call->rxnet;
+ 
+ 	rxrpc_put_connection(call->conn);
+@@ -663,6 +663,22 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
+ 		wake_up_var(&rxnet->nr_calls);
+ }
+ 
++/*
++ * Final call destruction under RCU.
++ */
++static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
++{
++	struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
++
++	if (in_softirq()) {
++		INIT_WORK(&call->processor, rxrpc_destroy_call);
++		if (!rxrpc_queue_work(&call->processor))
++			BUG();
++	} else {
++		rxrpc_destroy_call(&call->processor);
++	}
++}
++
+ /*
+  * clean up a call
+  */
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index bd3d68e0489d..aaf9c419c3dd 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -563,7 +563,7 @@ static int update_timestamp_of_queue(struct snd_seq_event *event,
+ 	event->queue = queue;
+ 	event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK;
+ 	if (real_time) {
+-		event->time.time = snd_seq_timer_get_cur_time(q->timer);
++		event->time.time = snd_seq_timer_get_cur_time(q->timer, true);
+ 		event->flags |= SNDRV_SEQ_TIME_STAMP_REAL;
+ 	} else {
+ 		event->time.tick = snd_seq_timer_get_cur_tick(q->timer);
+@@ -1642,7 +1642,7 @@ static int snd_seq_ioctl_get_queue_status(struct snd_seq_client *client,
+ 	tmr = queue->timer;
+ 	status->events = queue->tickq->cells + queue->timeq->cells;
+ 
+-	status->time = snd_seq_timer_get_cur_time(tmr);
++	status->time = snd_seq_timer_get_cur_time(tmr, true);
+ 	status->tick = snd_seq_timer_get_cur_tick(tmr);
+ 
+ 	status->running = tmr->running;
+diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
+index 3b3ac96f1f5f..28b4dd45b8d1 100644
+--- a/sound/core/seq/seq_queue.c
++++ b/sound/core/seq/seq_queue.c
+@@ -251,6 +251,8 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
+ {
+ 	unsigned long flags;
+ 	struct snd_seq_event_cell *cell;
++	snd_seq_tick_time_t cur_tick;
++	snd_seq_real_time_t cur_time;
+ 
+ 	if (q == NULL)
+ 		return;
+@@ -267,17 +269,18 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
+ 
+       __again:
+ 	/* Process tick queue... */
++	cur_tick = snd_seq_timer_get_cur_tick(q->timer);
+ 	for (;;) {
+-		cell = snd_seq_prioq_cell_out(q->tickq,
+-					      &q->timer->tick.cur_tick);
++		cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick);
+ 		if (!cell)
+ 			break;
+ 		snd_seq_dispatch_event(cell, atomic, hop);
+ 	}
+ 
+ 	/* Process time queue... */
++	cur_time = snd_seq_timer_get_cur_time(q->timer, false);
+ 	for (;;) {
+-		cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time);
++		cell = snd_seq_prioq_cell_out(q->timeq, &cur_time);
+ 		if (!cell)
+ 			break;
+ 		snd_seq_dispatch_event(cell, atomic, hop);
+@@ -405,6 +408,7 @@ int snd_seq_queue_check_access(int queueid, int client)
+ int snd_seq_queue_set_owner(int queueid, int client, int locked)
+ {
+ 	struct snd_seq_queue *q = queueptr(queueid);
++	unsigned long flags;
+ 
+ 	if (q == NULL)
+ 		return -EINVAL;
+@@ -414,8 +418,10 @@ int snd_seq_queue_set_owner(int queueid, int client, int locked)
+ 		return -EPERM;
+ 	}
+ 
++	spin_lock_irqsave(&q->owner_lock, flags);
+ 	q->locked = locked ? 1 : 0;
+ 	q->owner = client;
++	spin_unlock_irqrestore(&q->owner_lock, flags);
+ 	queue_access_unlock(q);
+ 	queuefree(q);
+ 
+@@ -552,15 +558,17 @@ void snd_seq_queue_client_termination(int client)
+ 	unsigned long flags;
+ 	int i;
+ 	struct snd_seq_queue *q;
++	bool matched;
+ 
+ 	for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
+ 		if ((q = queueptr(i)) == NULL)
+ 			continue;
+ 		spin_lock_irqsave(&q->owner_lock, flags);
+-		if (q->owner == client)
++		matched = (q->owner == client);
++		if (matched)
+ 			q->klocked = 1;
+ 		spin_unlock_irqrestore(&q->owner_lock, flags);
+-		if (q->owner == client) {
++		if (matched) {
+ 			if (q->timer->running)
+ 				snd_seq_timer_stop(q->timer);
+ 			snd_seq_timer_reset(q->timer);
+@@ -752,6 +760,8 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry,
+ 	int i, bpm;
+ 	struct snd_seq_queue *q;
+ 	struct snd_seq_timer *tmr;
++	bool locked;
++	int owner;
+ 
+ 	for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
+ 		if ((q = queueptr(i)) == NULL)
+@@ -763,9 +773,14 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry,
+ 		else
+ 			bpm = 0;
+ 
++		spin_lock_irq(&q->owner_lock);
++		locked = q->locked;
++		owner = q->owner;
++		spin_unlock_irq(&q->owner_lock);
++
+ 		snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
+-		snd_iprintf(buffer, "owned by client    : %d\n", q->owner);
+-		snd_iprintf(buffer, "lock status        : %s\n", q->locked ? "Locked" : "Free");
++		snd_iprintf(buffer, "owned by client    : %d\n", owner);
++		snd_iprintf(buffer, "lock status        : %s\n", locked ? "Locked" : "Free");
+ 		snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
+ 		snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
+ 		snd_iprintf(buffer, "timer state        : %s\n", tmr->running ? "Running" : "Stopped");
+diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
+index aed8e1c1f02f..3da44a4f9257 100644
+--- a/sound/core/seq/seq_timer.c
++++ b/sound/core/seq/seq_timer.c
+@@ -437,14 +437,15 @@ int snd_seq_timer_continue(struct snd_seq_timer *tmr)
+ }
+ 
+ /* return current 'real' time. use timeofday() to get better granularity. */
+-snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
++snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
++					       bool adjust_ktime)
+ {
+ 	snd_seq_real_time_t cur_time;
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&tmr->lock, flags);
+ 	cur_time = tmr->cur_time;
+-	if (tmr->running) { 
++	if (adjust_ktime && tmr->running) {
+ 		struct timespec64 tm;
+ 
+ 		ktime_get_ts64(&tm);
+@@ -461,7 +462,13 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
+  high PPQ values) */
+ snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr)
+ {
+-	return tmr->tick.cur_tick;
++	snd_seq_tick_time_t cur_tick;
++	unsigned long flags;
++
++	spin_lock_irqsave(&tmr->lock, flags);
++	cur_tick = tmr->tick.cur_tick;
++	spin_unlock_irqrestore(&tmr->lock, flags);
++	return cur_tick;
+ }
+ 
+ 
+diff --git a/sound/core/seq/seq_timer.h b/sound/core/seq/seq_timer.h
+index 62f390671096..44f52f5963db 100644
+--- a/sound/core/seq/seq_timer.h
++++ b/sound/core/seq/seq_timer.h
+@@ -135,7 +135,8 @@ int snd_seq_timer_set_tempo_ppq(struct snd_seq_timer *tmr, int tempo, int ppq);
+ int snd_seq_timer_set_position_tick(struct snd_seq_timer *tmr, snd_seq_tick_time_t position);
+ int snd_seq_timer_set_position_time(struct snd_seq_timer *tmr, snd_seq_real_time_t position);
+ int snd_seq_timer_set_skew(struct snd_seq_timer *tmr, unsigned int skew, unsigned int base);
+-snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr);
++snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
++					       bool adjust_ktime);
+ snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr);
+ 
+ extern int seq_default_timer_class;
+diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c
+index f21633cd9b38..acbe61b8db7b 100644
+--- a/sound/hda/hdmi_chmap.c
++++ b/sound/hda/hdmi_chmap.c
+@@ -249,7 +249,7 @@ void snd_hdac_print_channel_allocation(int spk_alloc, char *buf, int buflen)
+ 
+ 	for (i = 0, j = 0; i < ARRAY_SIZE(cea_speaker_allocation_names); i++) {
+ 		if (spk_alloc & (1 << i))
+-			j += snprintf(buf + j, buflen - j,  " %s",
++			j += scnprintf(buf + j, buflen - j,  " %s",
+ 					cea_speaker_allocation_names[i]);
+ 	}
+ 	buf[j] = '\0';	/* necessary when j == 0 */
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 82b0dc9f528f..f3a6b1d869d8 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -4019,7 +4019,7 @@ void snd_print_pcm_bits(int pcm, char *buf, int buflen)
+ 
+ 	for (i = 0, j = 0; i < ARRAY_SIZE(bits); i++)
+ 		if (pcm & (AC_SUPPCM_BITS_8 << i))
+-			j += snprintf(buf + j, buflen - j,  " %d", bits[i]);
++			j += scnprintf(buf + j, buflen - j,  " %d", bits[i]);
+ 
+ 	buf[j] = '\0'; /* necessary when j == 0 */
+ }
+diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
+index ba7fe9b6655c..864cc8c9ada0 100644
+--- a/sound/pci/hda/hda_eld.c
++++ b/sound/pci/hda/hda_eld.c
+@@ -373,7 +373,7 @@ static void hdmi_print_pcm_rates(int pcm, char *buf, int buflen)
+ 
+ 	for (i = 0, j = 0; i < ARRAY_SIZE(alsa_rates); i++)
+ 		if (pcm & (1 << i))
+-			j += snprintf(buf + j, buflen - j,  " %d",
++			j += scnprintf(buf + j, buflen - j,  " %d",
+ 				alsa_rates[i]);
+ 
+ 	buf[j] = '\0'; /* necessary when j == 0 */
+diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
+index 6ec79c58d48d..6535155e992d 100644
+--- a/sound/pci/hda/hda_sysfs.c
++++ b/sound/pci/hda/hda_sysfs.c
+@@ -221,7 +221,7 @@ static ssize_t init_verbs_show(struct device *dev,
+ 	int i, len = 0;
+ 	mutex_lock(&codec->user_mutex);
+ 	snd_array_for_each(&codec->init_verbs, i, v) {
+-		len += snprintf(buf + len, PAGE_SIZE - len,
++		len += scnprintf(buf + len, PAGE_SIZE - len,
+ 				"0x%02x 0x%03x 0x%04x\n",
+ 				v->nid, v->verb, v->param);
+ 	}
+@@ -271,7 +271,7 @@ static ssize_t hints_show(struct device *dev,
+ 	int i, len = 0;
+ 	mutex_lock(&codec->user_mutex);
+ 	snd_array_for_each(&codec->hints, i, hint) {
+-		len += snprintf(buf + len, PAGE_SIZE - len,
++		len += scnprintf(buf + len, PAGE_SIZE - len,
+ 				"%s = %s\n", hint->key, hint->val);
+ 	}
+ 	mutex_unlock(&codec->user_mutex);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index a8a47e1596dd..94fffc0675a7 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2442,7 +2442,9 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
+ 	SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
++	SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
++	SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
+ 	SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+ 	SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
+diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
+index a3db6a68dfe6..8bcdeb281770 100644
+--- a/sound/soc/sunxi/sun8i-codec.c
++++ b/sound/soc/sunxi/sun8i-codec.c
+@@ -89,6 +89,7 @@
+ 
+ #define SUN8I_SYS_SR_CTRL_AIF1_FS_MASK		GENMASK(15, 12)
+ #define SUN8I_SYS_SR_CTRL_AIF2_FS_MASK		GENMASK(11, 8)
++#define SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT_MASK	GENMASK(3, 2)
+ #define SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ_MASK	GENMASK(5, 4)
+ #define SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_MASK	GENMASK(8, 6)
+ #define SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV_MASK	GENMASK(12, 9)
+@@ -250,7 +251,7 @@ static int sun8i_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+ 		return -EINVAL;
+ 	}
+ 	regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL,
+-			   BIT(SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT),
++			   SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT_MASK,
+ 			   value << SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT);
+ 
+ 	return 0;


             reply	other threads:[~2020-02-28 16:38 UTC|newest]

Thread overview: 332+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-28 16:38 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2024-04-18  3:06 [gentoo-commits] proj/linux-patches:4.19 commit in: / Alice Ferrazzi
2023-09-02  9:59 Mike Pagano
2023-08-30 15:00 Mike Pagano
2023-08-16 16:59 Mike Pagano
2023-08-11 11:58 Mike Pagano
2023-08-08 18:43 Mike Pagano
2023-07-24 20:30 Mike Pagano
2023-06-28 10:29 Mike Pagano
2023-06-21 14:55 Alice Ferrazzi
2023-06-14 10:21 Mike Pagano
2023-06-09 11:32 Mike Pagano
2023-05-30 12:57 Mike Pagano
2023-05-17 11:14 Mike Pagano
2023-05-17 11:01 Mike Pagano
2023-05-10 17:59 Mike Pagano
2023-04-26  9:35 Alice Ferrazzi
2023-04-20 11:17 Alice Ferrazzi
2023-04-05 11:41 Mike Pagano
2023-03-22 14:16 Alice Ferrazzi
2023-03-17 10:46 Mike Pagano
2023-03-13 11:35 Alice Ferrazzi
2023-03-11 16:01 Mike Pagano
2023-03-03 12:31 Mike Pagano
2023-02-25 11:41 Mike Pagano
2023-02-24  3:19 Alice Ferrazzi
2023-02-24  3:15 Alice Ferrazzi
2023-02-22 14:51 Alice Ferrazzi
2023-02-06 12:49 Mike Pagano
2023-01-24  7:16 Alice Ferrazzi
2023-01-18 11:11 Mike Pagano
2022-12-14 12:15 Mike Pagano
2022-12-08 12:14 Alice Ferrazzi
2022-11-25 17:04 Mike Pagano
2022-11-23  9:39 Alice Ferrazzi
2022-11-10 17:58 Mike Pagano
2022-11-03 15:11 Mike Pagano
2022-11-01 19:48 Mike Pagano
2022-10-26 11:41 Mike Pagano
2022-10-05 11:59 Mike Pagano
2022-09-28  9:18 Mike Pagano
2022-09-20 12:03 Mike Pagano
2022-09-15 11:09 Mike Pagano
2022-09-05 12:06 Mike Pagano
2022-08-25 10:35 Mike Pagano
2022-08-11 12:36 Mike Pagano
2022-07-29 15:28 Mike Pagano
2022-07-21 20:12 Mike Pagano
2022-07-12 16:01 Mike Pagano
2022-07-07 16:18 Mike Pagano
2022-07-02 16:07 Mike Pagano
2022-06-25 10:22 Mike Pagano
2022-06-16 11:40 Mike Pagano
2022-06-14 16:02 Mike Pagano
2022-06-06 11:05 Mike Pagano
2022-05-27 12:24 Mike Pagano
2022-05-25 11:55 Mike Pagano
2022-05-18  9:50 Mike Pagano
2022-05-15 22:12 Mike Pagano
2022-05-12 11:30 Mike Pagano
2022-05-01 17:04 Mike Pagano
2022-04-27 12:03 Mike Pagano
2022-04-20 12:09 Mike Pagano
2022-04-15 13:11 Mike Pagano
2022-04-12 19:24 Mike Pagano
2022-03-28 10:59 Mike Pagano
2022-03-23 11:57 Mike Pagano
2022-03-16 13:27 Mike Pagano
2022-03-11 10:56 Mike Pagano
2022-03-08 18:30 Mike Pagano
2022-03-02 13:08 Mike Pagano
2022-02-26 21:14 Mike Pagano
2022-02-23 12:39 Mike Pagano
2022-02-16 12:47 Mike Pagano
2022-02-11 12:53 Mike Pagano
2022-02-11 12:46 Mike Pagano
2022-02-11 12:45 Mike Pagano
2022-02-11 12:37 Mike Pagano
2022-02-08 17:56 Mike Pagano
2022-01-29 17:45 Mike Pagano
2022-01-27 11:39 Mike Pagano
2022-01-11 13:14 Mike Pagano
2022-01-05 12:55 Mike Pagano
2021-12-29 13:11 Mike Pagano
2021-12-22 14:07 Mike Pagano
2021-12-14 10:36 Mike Pagano
2021-12-08 12:55 Mike Pagano
2021-12-01 12:51 Mike Pagano
2021-11-26 11:59 Mike Pagano
2021-11-12 14:16 Mike Pagano
2021-11-06 13:26 Mike Pagano
2021-11-02 19:32 Mike Pagano
2021-10-27 11:59 Mike Pagano
2021-10-20 13:26 Mike Pagano
2021-10-17 13:12 Mike Pagano
2021-10-13 15:00 Alice Ferrazzi
2021-10-09 21:33 Mike Pagano
2021-10-06 14:06 Mike Pagano
2021-09-26 14:13 Mike Pagano
2021-09-22 11:40 Mike Pagano
2021-09-20 22:05 Mike Pagano
2021-09-03 11:22 Mike Pagano
2021-09-03 10:08 Alice Ferrazzi
2021-08-26 14:06 Mike Pagano
2021-08-25 22:45 Mike Pagano
2021-08-25 20:41 Mike Pagano
2021-08-15 20:07 Mike Pagano
2021-08-12 11:51 Mike Pagano
2021-08-08 13:39 Mike Pagano
2021-08-04 11:54 Mike Pagano
2021-08-03 12:26 Mike Pagano
2021-07-31 10:34 Alice Ferrazzi
2021-07-28 12:37 Mike Pagano
2021-07-20 15:35 Alice Ferrazzi
2021-07-13 12:38 Mike Pagano
2021-07-11 14:45 Mike Pagano
2021-06-30 14:25 Mike Pagano
2021-06-16 12:22 Mike Pagano
2021-06-10 11:46 Mike Pagano
2021-06-03 10:32 Alice Ferrazzi
2021-05-26 12:05 Mike Pagano
2021-05-22 10:03 Mike Pagano
2021-05-07 11:40 Alice Ferrazzi
2021-04-30 19:02 Mike Pagano
2021-04-28 18:31 Mike Pagano
2021-04-28 11:44 Alice Ferrazzi
2021-04-16 11:15 Alice Ferrazzi
2021-04-14 11:22 Alice Ferrazzi
2021-04-10 13:24 Mike Pagano
2021-04-07 12:21 Mike Pagano
2021-03-30 14:17 Mike Pagano
2021-03-24 12:08 Mike Pagano
2021-03-22 15:50 Mike Pagano
2021-03-20 14:26 Mike Pagano
2021-03-17 16:21 Mike Pagano
2021-03-11 14:05 Mike Pagano
2021-03-07 15:15 Mike Pagano
2021-03-04 12:08 Mike Pagano
2021-02-23 14:31 Alice Ferrazzi
2021-02-13 15:28 Alice Ferrazzi
2021-02-10 10:03 Alice Ferrazzi
2021-02-07 14:40 Alice Ferrazzi
2021-02-03 23:43 Mike Pagano
2021-01-30 13:34 Alice Ferrazzi
2021-01-27 11:15 Mike Pagano
2021-01-23 16:36 Mike Pagano
2021-01-19 20:34 Mike Pagano
2021-01-17 16:20 Mike Pagano
2021-01-12 20:06 Mike Pagano
2021-01-09 12:57 Mike Pagano
2021-01-06 14:15 Mike Pagano
2020-12-30 12:52 Mike Pagano
2020-12-11 12:56 Mike Pagano
2020-12-08 12:06 Mike Pagano
2020-12-02 12:49 Mike Pagano
2020-11-24 14:40 Mike Pagano
2020-11-22 19:26 Mike Pagano
2020-11-18 19:56 Mike Pagano
2020-11-11 15:43 Mike Pagano
2020-11-10 13:56 Mike Pagano
2020-11-05 12:35 Mike Pagano
2020-11-01 20:29 Mike Pagano
2020-10-29 11:18 Mike Pagano
2020-10-17 10:17 Mike Pagano
2020-10-14 20:36 Mike Pagano
2020-10-07 12:50 Mike Pagano
2020-10-01 12:45 Mike Pagano
2020-09-26 22:07 Mike Pagano
2020-09-26 22:00 Mike Pagano
2020-09-24 15:58 Mike Pagano
2020-09-23 12:07 Mike Pagano
2020-09-17 15:01 Mike Pagano
2020-09-17 14:55 Mike Pagano
2020-09-12 17:59 Mike Pagano
2020-09-09 17:59 Mike Pagano
2020-09-03 11:37 Mike Pagano
2020-08-26 11:15 Mike Pagano
2020-08-21 10:49 Alice Ferrazzi
2020-08-19  9:36 Alice Ferrazzi
2020-08-12 23:36 Alice Ferrazzi
2020-08-07 19:16 Mike Pagano
2020-08-05 14:51 Thomas Deutschmann
2020-07-31 18:00 Mike Pagano
2020-07-29 12:33 Mike Pagano
2020-07-22 12:42 Mike Pagano
2020-07-16 11:17 Mike Pagano
2020-07-09 12:12 Mike Pagano
2020-07-01 12:14 Mike Pagano
2020-06-29 17:41 Mike Pagano
2020-06-25 15:07 Mike Pagano
2020-06-22 14:47 Mike Pagano
2020-06-10 21:27 Mike Pagano
2020-06-07 21:52 Mike Pagano
2020-06-03 11:41 Mike Pagano
2020-05-27 16:25 Mike Pagano
2020-05-20 11:30 Mike Pagano
2020-05-20 11:27 Mike Pagano
2020-05-14 11:30 Mike Pagano
2020-05-13 12:33 Mike Pagano
2020-05-11 22:50 Mike Pagano
2020-05-09 22:20 Mike Pagano
2020-05-06 11:46 Mike Pagano
2020-05-02 19:24 Mike Pagano
2020-04-29 17:57 Mike Pagano
2020-04-23 11:44 Mike Pagano
2020-04-21 11:15 Mike Pagano
2020-04-17 11:45 Mike Pagano
2020-04-15 17:09 Mike Pagano
2020-04-13 11:34 Mike Pagano
2020-04-02 15:24 Mike Pagano
2020-03-25 14:58 Mike Pagano
2020-03-20 11:57 Mike Pagano
2020-03-18 14:21 Mike Pagano
2020-03-16 12:23 Mike Pagano
2020-03-11 17:20 Mike Pagano
2020-03-05 16:23 Mike Pagano
2020-02-24 11:06 Mike Pagano
2020-02-19 23:45 Mike Pagano
2020-02-14 23:52 Mike Pagano
2020-02-11 16:20 Mike Pagano
2020-02-05 17:05 Mike Pagano
2020-02-01 10:37 Mike Pagano
2020-02-01 10:30 Mike Pagano
2020-01-29 16:16 Mike Pagano
2020-01-27 14:25 Mike Pagano
2020-01-23 11:07 Mike Pagano
2020-01-17 19:56 Mike Pagano
2020-01-14 22:30 Mike Pagano
2020-01-12 15:00 Mike Pagano
2020-01-09 11:15 Mike Pagano
2020-01-04 19:50 Mike Pagano
2019-12-31 17:46 Mike Pagano
2019-12-21 15:03 Mike Pagano
2019-12-17 21:56 Mike Pagano
2019-12-13 12:35 Mike Pagano
2019-12-05 12:03 Alice Ferrazzi
2019-12-01 14:06 Thomas Deutschmann
2019-11-24 15:44 Mike Pagano
2019-11-20 19:36 Mike Pagano
2019-11-12 21:00 Mike Pagano
2019-11-10 16:20 Mike Pagano
2019-11-06 14:26 Mike Pagano
2019-10-29 12:04 Mike Pagano
2019-10-17 22:27 Mike Pagano
2019-10-11 17:04 Mike Pagano
2019-10-07 17:42 Mike Pagano
2019-10-05 11:42 Mike Pagano
2019-10-01 10:10 Mike Pagano
2019-09-21 17:11 Mike Pagano
2019-09-19 12:34 Mike Pagano
2019-09-19 10:04 Mike Pagano
2019-09-16 12:26 Mike Pagano
2019-09-10 11:12 Mike Pagano
2019-09-06 17:25 Mike Pagano
2019-08-29 14:15 Mike Pagano
2019-08-25 17:37 Mike Pagano
2019-08-23 22:18 Mike Pagano
2019-08-16 12:26 Mike Pagano
2019-08-16 12:13 Mike Pagano
2019-08-09 17:45 Mike Pagano
2019-08-06 19:19 Mike Pagano
2019-08-04 16:15 Mike Pagano
2019-07-31 15:09 Mike Pagano
2019-07-31 10:22 Mike Pagano
2019-07-28 16:27 Mike Pagano
2019-07-26 11:35 Mike Pagano
2019-07-21 14:41 Mike Pagano
2019-07-14 15:44 Mike Pagano
2019-07-10 11:05 Mike Pagano
2019-07-03 11:34 Mike Pagano
2019-06-25 10:53 Mike Pagano
2019-06-22 19:06 Mike Pagano
2019-06-19 17:17 Thomas Deutschmann
2019-06-17 19:22 Mike Pagano
2019-06-15 15:07 Mike Pagano
2019-06-11 12:42 Mike Pagano
2019-06-10 19:43 Mike Pagano
2019-06-09 16:19 Mike Pagano
2019-06-04 11:11 Mike Pagano
2019-05-31 15:02 Mike Pagano
2019-05-26 17:10 Mike Pagano
2019-05-22 11:02 Mike Pagano
2019-05-16 23:03 Mike Pagano
2019-05-14 21:00 Mike Pagano
2019-05-10 19:40 Mike Pagano
2019-05-08 10:06 Mike Pagano
2019-05-05 13:42 Mike Pagano
2019-05-04 18:28 Mike Pagano
2019-05-02 10:13 Mike Pagano
2019-04-27 17:36 Mike Pagano
2019-04-20 11:09 Mike Pagano
2019-04-19 19:51 Mike Pagano
2019-04-05 21:46 Mike Pagano
2019-04-03 10:59 Mike Pagano
2019-03-27 10:22 Mike Pagano
2019-03-23 20:23 Mike Pagano
2019-03-19 16:58 Mike Pagano
2019-03-13 22:08 Mike Pagano
2019-03-10 14:15 Mike Pagano
2019-03-06 19:06 Mike Pagano
2019-03-05 18:04 Mike Pagano
2019-02-27 11:23 Mike Pagano
2019-02-23 11:35 Mike Pagano
2019-02-23  0:46 Mike Pagano
2019-02-20 11:19 Mike Pagano
2019-02-16  0:42 Mike Pagano
2019-02-15 12:39 Mike Pagano
2019-02-12 20:53 Mike Pagano
2019-02-06 17:08 Mike Pagano
2019-01-31 11:28 Mike Pagano
2019-01-26 15:09 Mike Pagano
2019-01-22 23:06 Mike Pagano
2019-01-16 23:32 Mike Pagano
2019-01-13 19:29 Mike Pagano
2019-01-09 17:54 Mike Pagano
2018-12-29 18:55 Mike Pagano
2018-12-29  1:08 Mike Pagano
2018-12-21 14:58 Mike Pagano
2018-12-19 19:09 Mike Pagano
2018-12-17 11:42 Mike Pagano
2018-12-13 11:40 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-05 20:16 Mike Pagano
2018-12-01 15:08 Mike Pagano
2018-11-27 16:16 Mike Pagano
2018-11-23 12:42 Mike Pagano
2018-11-21 12:30 Mike Pagano
2018-11-14  0:47 Mike Pagano
2018-11-14  0:47 Mike Pagano
2018-11-13 20:44 Mike Pagano
2018-11-04 16:22 Alice Ferrazzi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1582907884.95211ef5ab2b6b97467a0a274eeb89815029df2e.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox