public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Fri, 21 Dec 2018 14:46:24 +0000 (UTC)	[thread overview]
Message-ID: <1545403562.1e235dfa67bf6924c75d560b809b40f93a3460c5.mpagano@gentoo> (raw)

commit:     1e235dfa67bf6924c75d560b809b40f93a3460c5
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Dec 21 14:46:02 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Dec 21 14:46:02 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1e235dfa

linux-patches: Linux patch 4.14.90

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1089_linux-4.14.90.patch | 2839 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2843 insertions(+)

diff --git a/0000_README b/0000_README
index 1e80ac5..dc7f560 100644
--- a/0000_README
+++ b/0000_README
@@ -399,6 +399,10 @@ Patch:  1088_4.14.89.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.14.89
 
+Patch:  1089_4.14.90.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.14.90
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1089_linux-4.14.90.patch b/1089_linux-4.14.90.patch
new file mode 100644
index 0000000..f62945f
--- /dev/null
+++ b/1089_linux-4.14.90.patch
@@ -0,0 +1,2839 @@
+diff --git a/Makefile b/Makefile
+index b83477be8d0c..280c7193e246 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 89
++SUBLEVEL = 90
+ EXTRAVERSION =
+ NAME = Petit Gorille
+ 
+diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
+index c22b181e8206..2f39d9b3886e 100644
+--- a/arch/arc/include/asm/io.h
++++ b/arch/arc/include/asm/io.h
+@@ -12,6 +12,7 @@
+ #include <linux/types.h>
+ #include <asm/byteorder.h>
+ #include <asm/page.h>
++#include <asm/unaligned.h>
+ 
+ #ifdef CONFIG_ISA_ARCV2
+ #include <asm/barrier.h>
+@@ -94,6 +95,42 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
+ 	return w;
+ }
+ 
++/*
++ * {read,write}s{b,w,l}() repeatedly access the same IO address in
++ * native endianness in 8-, 16-, 32-bit chunks {into,from} memory,
++ * @count times
++ */
++#define __raw_readsx(t,f) \
++static inline void __raw_reads##f(const volatile void __iomem *addr,	\
++				  void *ptr, unsigned int count)	\
++{									\
++	bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;	\
++	u##t *buf = ptr;						\
++									\
++	if (!count)							\
++		return;							\
++									\
++	/* Some ARC CPU's don't support unaligned accesses */		\
++	if (is_aligned) {						\
++		do {							\
++			u##t x = __raw_read##f(addr);			\
++			*buf++ = x;					\
++		} while (--count);					\
++	} else {							\
++		do {							\
++			u##t x = __raw_read##f(addr);			\
++			put_unaligned(x, buf++);			\
++		} while (--count);					\
++	}								\
++}
++
++#define __raw_readsb __raw_readsb
++__raw_readsx(8, b)
++#define __raw_readsw __raw_readsw
++__raw_readsx(16, w)
++#define __raw_readsl __raw_readsl
++__raw_readsx(32, l)
++
+ #define __raw_writeb __raw_writeb
+ static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
+ {
+@@ -126,6 +163,35 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
+ 
+ }
+ 
++#define __raw_writesx(t,f)						\
++static inline void __raw_writes##f(volatile void __iomem *addr, 	\
++				   const void *ptr, unsigned int count)	\
++{									\
++	bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;	\
++	const u##t *buf = ptr;						\
++									\
++	if (!count)							\
++		return;							\
++									\
++	/* Some ARC CPU's don't support unaligned accesses */		\
++	if (is_aligned) {						\
++		do {							\
++			__raw_write##f(*buf++, addr);			\
++		} while (--count);					\
++	} else {							\
++		do {							\
++			__raw_write##f(get_unaligned(buf++), addr);	\
++		} while (--count);					\
++	}								\
++}
++
++#define __raw_writesb __raw_writesb
++__raw_writesx(8, b)
++#define __raw_writesw __raw_writesw
++__raw_writesx(16, w)
++#define __raw_writesl __raw_writesl
++__raw_writesx(32, l)
++
+ /*
+  * MMIO can also get buffered/optimized in micro-arch, so barriers needed
+  * Based on ARM model for the typical use case
+@@ -141,10 +207,16 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
+ #define readb(c)		({ u8  __v = readb_relaxed(c); __iormb(); __v; })
+ #define readw(c)		({ u16 __v = readw_relaxed(c); __iormb(); __v; })
+ #define readl(c)		({ u32 __v = readl_relaxed(c); __iormb(); __v; })
++#define readsb(p,d,l)		({ __raw_readsb(p,d,l); __iormb(); })
++#define readsw(p,d,l)		({ __raw_readsw(p,d,l); __iormb(); })
++#define readsl(p,d,l)		({ __raw_readsl(p,d,l); __iormb(); })
+ 
+ #define writeb(v,c)		({ __iowmb(); writeb_relaxed(v,c); })
+ #define writew(v,c)		({ __iowmb(); writew_relaxed(v,c); })
+ #define writel(v,c)		({ __iowmb(); writel_relaxed(v,c); })
++#define writesb(p,d,l)		({ __iowmb(); __raw_writesb(p,d,l); })
++#define writesw(p,d,l)		({ __iowmb(); __raw_writesw(p,d,l); })
++#define writesl(p,d,l)		({ __iowmb(); __raw_writesl(p,d,l); })
+ 
+ /*
+  * Relaxed API for drivers which can handle barrier ordering themselves
+diff --git a/arch/arm/mach-mmp/cputype.h b/arch/arm/mach-mmp/cputype.h
+index 446edaeb78a7..a96abcf521b4 100644
+--- a/arch/arm/mach-mmp/cputype.h
++++ b/arch/arm/mach-mmp/cputype.h
+@@ -44,10 +44,12 @@ static inline int cpu_is_pxa910(void)
+ #define cpu_is_pxa910()	(0)
+ #endif
+ 
+-#ifdef CONFIG_CPU_MMP2
++#if defined(CONFIG_CPU_MMP2) || defined(CONFIG_MACH_MMP2_DT)
+ static inline int cpu_is_mmp2(void)
+ {
+-	return (((read_cpuid_id() >> 8) & 0xff) == 0x58);
++	return (((read_cpuid_id() >> 8) & 0xff) == 0x58) &&
++		(((mmp_chip_id & 0xfff) == 0x410) ||
++		 ((mmp_chip_id & 0xfff) == 0x610));
+ }
+ #else
+ #define cpu_is_mmp2()	(0)
+diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
+index de78109d002d..50a70edbc863 100644
+--- a/arch/arm/mm/cache-v7.S
++++ b/arch/arm/mm/cache-v7.S
+@@ -359,14 +359,16 @@ v7_dma_inv_range:
+ 	ALT_UP(W(nop))
+ #endif
+ 	mcrne	p15, 0, r0, c7, c14, 1		@ clean & invalidate D / U line
++	addne	r0, r0, r2
+ 
+ 	tst	r1, r3
+ 	bic	r1, r1, r3
+ 	mcrne	p15, 0, r1, c7, c14, 1		@ clean & invalidate D / U line
+-1:
+-	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D / U line
+-	add	r0, r0, r2
+ 	cmp	r0, r1
++1:
++	mcrlo	p15, 0, r0, c7, c6, 1		@ invalidate D / U line
++	addlo	r0, r0, r2
++	cmplo	r0, r1
+ 	blo	1b
+ 	dsb	st
+ 	ret	lr
+diff --git a/arch/arm/mm/cache-v7m.S b/arch/arm/mm/cache-v7m.S
+index 788486e830d3..32aa2a2aa260 100644
+--- a/arch/arm/mm/cache-v7m.S
++++ b/arch/arm/mm/cache-v7m.S
+@@ -73,9 +73,11 @@
+ /*
+  * dcimvac: Invalidate data cache line by MVA to PoC
+  */
+-.macro dcimvac, rt, tmp
+-	v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC
++.irp    c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
++.macro dcimvac\c, rt, tmp
++	v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c
+ .endm
++.endr
+ 
+ /*
+  * dccmvau: Clean data cache line by MVA to PoU
+@@ -369,14 +371,16 @@ v7m_dma_inv_range:
+ 	tst	r0, r3
+ 	bic	r0, r0, r3
+ 	dccimvacne r0, r3
++	addne	r0, r0, r2
+ 	subne	r3, r2, #1	@ restore r3, corrupted by v7m's dccimvac
+ 	tst	r1, r3
+ 	bic	r1, r1, r3
+ 	dccimvacne r1, r3
+-1:
+-	dcimvac r0, r3
+-	add	r0, r0, r2
+ 	cmp	r0, r1
++1:
++	dcimvaclo r0, r3
++	addlo	r0, r0, r2
++	cmplo	r0, r1
+ 	blo	1b
+ 	dsb	st
+ 	ret	lr
+diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
+index ece2d1d43724..dafeb5f81353 100644
+--- a/arch/arm/net/bpf_jit_32.c
++++ b/arch/arm/net/bpf_jit_32.c
+@@ -915,7 +915,7 @@ static inline void emit_str_r(const u8 dst, const u8 src, bool dstk,
+ /* dst = *(size*)(src + off) */
+ static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk,
+ 			      s32 off, struct jit_ctx *ctx, const u8 sz){
+-	const u8 *tmp = bpf2a32[TMP_REG_1];
++	const u8 *tmp = bpf2a32[TMP_REG_2];
+ 	const u8 *rd = dstk ? tmp : dst;
+ 	u8 rm = src;
+ 	s32 off_max;
+diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
+index 58470b151bc3..ba88b5b68db6 100644
+--- a/arch/arm64/mm/dma-mapping.c
++++ b/arch/arm64/mm/dma-mapping.c
+@@ -633,9 +633,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
+ 						   prot,
+ 						   __builtin_return_address(0));
+ 		if (addr) {
+-			memset(addr, 0, size);
+ 			if (!coherent)
+ 				__dma_flush_area(page_to_virt(page), iosize);
++			memset(addr, 0, size);
+ 		} else {
+ 			iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
+ 			dma_release_from_contiguous(dev, page,
+diff --git a/arch/powerpc/kernel/msi.c b/arch/powerpc/kernel/msi.c
+index dab616a33b8d..f2197654be07 100644
+--- a/arch/powerpc/kernel/msi.c
++++ b/arch/powerpc/kernel/msi.c
+@@ -34,5 +34,10 @@ void arch_teardown_msi_irqs(struct pci_dev *dev)
+ {
+ 	struct pci_controller *phb = pci_bus_to_host(dev->bus);
+ 
+-	phb->controller_ops.teardown_msi_irqs(dev);
++	/*
++	 * We can be called even when arch_setup_msi_irqs() returns -ENOSYS,
++	 * so check the pointer again.
++	 */
++	if (phb->controller_ops.teardown_msi_irqs)
++		phb->controller_ops.teardown_msi_irqs(dev);
+ }
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index ce3658dd98e8..c5290aecdf06 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -241,9 +241,6 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+ 
+ # Avoid indirect branches in kernel to deal with Spectre
+ ifdef CONFIG_RETPOLINE
+-ifeq ($(RETPOLINE_CFLAGS),)
+-  $(error You are building kernel with non-retpoline compiler, please update your compiler.)
+-endif
+   KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
+ endif
+ 
+@@ -260,6 +257,13 @@ archprepare:
+ ifeq ($(CONFIG_KEXEC_FILE),y)
+ 	$(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
+ endif
++ifdef CONFIG_RETPOLINE
++ifeq ($(RETPOLINE_CFLAGS),)
++	@echo "You are building kernel with non-retpoline compiler." >&2
++	@echo "Please update your compiler." >&2
++	@false
++endif
++endif
+ 
+ ###
+ # Kernel objects
+diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
+index 9982dd96f093..f784b95e44df 100644
+--- a/arch/x86/include/asm/qspinlock.h
++++ b/arch/x86/include/asm/qspinlock.h
+@@ -5,6 +5,29 @@
+ #include <asm/cpufeature.h>
+ #include <asm-generic/qspinlock_types.h>
+ #include <asm/paravirt.h>
++#include <asm/rmwcc.h>
++
++#define _Q_PENDING_LOOPS	(1 << 9)
++
++#define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
++
++static __always_inline bool __queued_RMW_btsl(struct qspinlock *lock)
++{
++	GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter,
++			 "I", _Q_PENDING_OFFSET, "%0", c);
++}
++
++static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
++{
++	u32 val = 0;
++
++	if (__queued_RMW_btsl(lock))
++		val |= _Q_PENDING_VAL;
++
++	val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
++
++	return val;
++}
+ 
+ #define	queued_spin_unlock queued_spin_unlock
+ /**
+@@ -15,7 +38,7 @@
+  */
+ static inline void native_queued_spin_unlock(struct qspinlock *lock)
+ {
+-	smp_store_release((u8 *)lock, 0);
++	smp_store_release(&lock->locked, 0);
+ }
+ 
+ #ifdef CONFIG_PARAVIRT_SPINLOCKS
+diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h
+index 923307ea11c7..9ef5ee03d2d7 100644
+--- a/arch/x86/include/asm/qspinlock_paravirt.h
++++ b/arch/x86/include/asm/qspinlock_paravirt.h
+@@ -22,8 +22,7 @@ PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath);
+  *
+  * void __pv_queued_spin_unlock(struct qspinlock *lock)
+  * {
+- *	struct __qspinlock *l = (void *)lock;
+- *	u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
++ *	u8 lockval = cmpxchg(&lock->locked, _Q_LOCKED_VAL, 0);
+  *
+  *	if (likely(lockval == _Q_LOCKED_VAL))
+  *		return;
+diff --git a/arch/x86/platform/efi/early_printk.c b/arch/x86/platform/efi/early_printk.c
+index 5fdacb322ceb..c3e6be110b7d 100644
+--- a/arch/x86/platform/efi/early_printk.c
++++ b/arch/x86/platform/efi/early_printk.c
+@@ -179,7 +179,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num)
+ 			num--;
+ 		}
+ 
+-		if (efi_x >= si->lfb_width) {
++		if (efi_x + font->width > si->lfb_width) {
+ 			efi_x = 0;
+ 			efi_y += font->height;
+ 		}
+diff --git a/block/elevator.c b/block/elevator.c
+index 153926a90901..8320d97240be 100644
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -83,12 +83,15 @@ bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
+ }
+ EXPORT_SYMBOL(elv_bio_merge_ok);
+ 
+-static struct elevator_type *elevator_find(const char *name)
++/*
++ * Return scheduler with name 'name' and with matching 'mq capability
++ */
++static struct elevator_type *elevator_find(const char *name, bool mq)
+ {
+ 	struct elevator_type *e;
+ 
+ 	list_for_each_entry(e, &elv_list, list) {
+-		if (!strcmp(e->elevator_name, name))
++		if (!strcmp(e->elevator_name, name) && (mq == e->uses_mq))
+ 			return e;
+ 	}
+ 
+@@ -100,25 +103,25 @@ static void elevator_put(struct elevator_type *e)
+ 	module_put(e->elevator_owner);
+ }
+ 
+-static struct elevator_type *elevator_get(const char *name, bool try_loading)
++static struct elevator_type *elevator_get(struct request_queue *q,
++					  const char *name, bool try_loading)
+ {
+ 	struct elevator_type *e;
+ 
+ 	spin_lock(&elv_list_lock);
+ 
+-	e = elevator_find(name);
++	e = elevator_find(name, q->mq_ops != NULL);
+ 	if (!e && try_loading) {
+ 		spin_unlock(&elv_list_lock);
+ 		request_module("%s-iosched", name);
+ 		spin_lock(&elv_list_lock);
+-		e = elevator_find(name);
++		e = elevator_find(name, q->mq_ops != NULL);
+ 	}
+ 
+ 	if (e && !try_module_get(e->elevator_owner))
+ 		e = NULL;
+ 
+ 	spin_unlock(&elv_list_lock);
+-
+ 	return e;
+ }
+ 
+@@ -144,8 +147,12 @@ void __init load_default_elevator_module(void)
+ 	if (!chosen_elevator[0])
+ 		return;
+ 
++	/*
++	 * Boot parameter is deprecated, we haven't supported that for MQ.
++	 * Only look for non-mq schedulers from here.
++	 */
+ 	spin_lock(&elv_list_lock);
+-	e = elevator_find(chosen_elevator);
++	e = elevator_find(chosen_elevator, false);
+ 	spin_unlock(&elv_list_lock);
+ 
+ 	if (!e)
+@@ -202,7 +209,7 @@ int elevator_init(struct request_queue *q, char *name)
+ 	q->boundary_rq = NULL;
+ 
+ 	if (name) {
+-		e = elevator_get(name, true);
++		e = elevator_get(q, name, true);
+ 		if (!e)
+ 			return -EINVAL;
+ 	}
+@@ -214,7 +221,7 @@ int elevator_init(struct request_queue *q, char *name)
+ 	 * allowed from async.
+ 	 */
+ 	if (!e && !q->mq_ops && *chosen_elevator) {
+-		e = elevator_get(chosen_elevator, false);
++		e = elevator_get(q, chosen_elevator, false);
+ 		if (!e)
+ 			printk(KERN_ERR "I/O scheduler %s not found\n",
+ 							chosen_elevator);
+@@ -229,17 +236,17 @@ int elevator_init(struct request_queue *q, char *name)
+ 		 */
+ 		if (q->mq_ops) {
+ 			if (q->nr_hw_queues == 1)
+-				e = elevator_get("mq-deadline", false);
++				e = elevator_get(q, "mq-deadline", false);
+ 			if (!e)
+ 				return 0;
+ 		} else
+-			e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);
++			e = elevator_get(q, CONFIG_DEFAULT_IOSCHED, false);
+ 
+ 		if (!e) {
+ 			printk(KERN_ERR
+ 				"Default I/O scheduler not found. " \
+ 				"Using noop.\n");
+-			e = elevator_get("noop", false);
++			e = elevator_get(q, "noop", false);
+ 		}
+ 	}
+ 
+@@ -905,7 +912,7 @@ int elv_register(struct elevator_type *e)
+ 
+ 	/* register, don't allow duplicate names */
+ 	spin_lock(&elv_list_lock);
+-	if (elevator_find(e->elevator_name)) {
++	if (elevator_find(e->elevator_name, e->uses_mq)) {
+ 		spin_unlock(&elv_list_lock);
+ 		if (e->icq_cache)
+ 			kmem_cache_destroy(e->icq_cache);
+@@ -1066,7 +1073,7 @@ static int __elevator_change(struct request_queue *q, const char *name)
+ 		return elevator_switch(q, NULL);
+ 
+ 	strlcpy(elevator_name, name, sizeof(elevator_name));
+-	e = elevator_get(strstrip(elevator_name), true);
++	e = elevator_get(q, strstrip(elevator_name), true);
+ 	if (!e)
+ 		return -EINVAL;
+ 
+@@ -1076,15 +1083,6 @@ static int __elevator_change(struct request_queue *q, const char *name)
+ 		return 0;
+ 	}
+ 
+-	if (!e->uses_mq && q->mq_ops) {
+-		elevator_put(e);
+-		return -EINVAL;
+-	}
+-	if (e->uses_mq && !q->mq_ops) {
+-		elevator_put(e);
+-		return -EINVAL;
+-	}
+-
+ 	return elevator_switch(q, e);
+ }
+ 
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 6938bd86ff1c..04f406d7e973 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4593,6 +4593,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	{ "SSD*INTEL*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ 	{ "Samsung*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ 	{ "SAMSUNG*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
++	{ "SAMSUNG*MZ7KM*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ 	{ "ST[1248][0248]0[FH]*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ 
+ 	/*
+diff --git a/drivers/clk/mmp/clk.c b/drivers/clk/mmp/clk.c
+index ad8d483a35cd..ca7d37e2c7be 100644
+--- a/drivers/clk/mmp/clk.c
++++ b/drivers/clk/mmp/clk.c
+@@ -183,7 +183,7 @@ void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id,
+ 		pr_err("CLK %d has invalid pointer %p\n", id, clk);
+ 		return;
+ 	}
+-	if (id > unit->nr_clks) {
++	if (id >= unit->nr_clks) {
+ 		pr_err("CLK %d is invalid\n", id);
+ 		return;
+ 	}
+diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c
+index ca9a0a536174..05c127cafa46 100644
+--- a/drivers/clk/mvebu/cp110-system-controller.c
++++ b/drivers/clk/mvebu/cp110-system-controller.c
+@@ -203,11 +203,11 @@ static struct clk_hw *cp110_of_clk_get(struct of_phandle_args *clkspec,
+ 	unsigned int idx = clkspec->args[1];
+ 
+ 	if (type == CP110_CLK_TYPE_CORE) {
+-		if (idx > CP110_MAX_CORE_CLOCKS)
++		if (idx >= CP110_MAX_CORE_CLOCKS)
+ 			return ERR_PTR(-EINVAL);
+ 		return clk_data->hws[idx];
+ 	} else if (type == CP110_CLK_TYPE_GATABLE) {
+-		if (idx > CP110_MAX_GATABLE_CLOCKS)
++		if (idx >= CP110_MAX_GATABLE_CLOCKS)
+ 			return ERR_PTR(-EINVAL);
+ 		return clk_data->hws[CP110_MAX_CORE_CLOCKS + idx];
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+index e8d9479615c9..bb4b804255a8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+@@ -723,7 +723,8 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
+ 					      (adev->pdev->revision == 0xe7) ||
+ 					      (adev->pdev->revision == 0xef))) ||
+ 					    ((adev->pdev->device == 0x6fdf) &&
+-					     (adev->pdev->revision == 0xef))) {
++					     ((adev->pdev->revision == 0xef) ||
++					      (adev->pdev->revision == 0xff)))) {
+ 						info->is_kicker = true;
+ 						strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
+ 					} else
+diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
+index 0cd827e11fa2..de26df0c6044 100644
+--- a/drivers/gpu/drm/ast/ast_fb.c
++++ b/drivers/gpu/drm/ast/ast_fb.c
+@@ -263,6 +263,7 @@ static void ast_fbdev_destroy(struct drm_device *dev,
+ {
+ 	struct ast_framebuffer *afb = &afbdev->afb;
+ 
++	drm_crtc_force_disable_all(dev);
+ 	drm_fb_helper_unregister_fbi(&afbdev->helper);
+ 
+ 	if (afb->obj) {
+diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
+index 0775e71ea95b..e0483c068d23 100644
+--- a/drivers/gpu/drm/i915/intel_lrc.c
++++ b/drivers/gpu/drm/i915/intel_lrc.c
+@@ -343,8 +343,13 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
+ 	 * may not be visible to the HW prior to the completion of the UC
+ 	 * register write and that we may begin execution from the context
+ 	 * before its image is complete leading to invalid PD chasing.
++	 *
++	 * Furthermore, Braswell, at least, wants a full mb to be sure that
++	 * the writes are coherent in memory (visible to the GPU) prior to
++	 * execution, and not just visible to other CPUs (as is the result of
++	 * wmb).
+ 	 */
+-	wmb();
++	mb();
+ 	return ce->lrc_desc;
+ }
+ 
+diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
+index 025d454163b0..8f77047a226d 100644
+--- a/drivers/gpu/drm/msm/msm_atomic.c
++++ b/drivers/gpu/drm/msm/msm_atomic.c
+@@ -93,7 +93,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
+ 		if (!new_crtc_state->active)
+ 			continue;
+ 
++		if (drm_crtc_vblank_get(crtc))
++			continue;
++
+ 		kms->funcs->wait_for_crtc_commit_done(kms, crtc);
++
++		drm_crtc_vblank_put(crtc);
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
+index b23d33622f37..2a90aa4caec0 100644
+--- a/drivers/gpu/drm/msm/msm_iommu.c
++++ b/drivers/gpu/drm/msm/msm_iommu.c
+@@ -66,7 +66,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
+ //	pm_runtime_get_sync(mmu->dev);
+ 	ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
+ //	pm_runtime_put_sync(mmu->dev);
+-	WARN_ON(ret < 0);
++	WARN_ON(!ret);
+ 
+ 	return (ret == len) ? 0 : -EINVAL;
+ }
+diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
+index 926ec51ba5be..25f6a1f6ce20 100644
+--- a/drivers/gpu/drm/nouveau/nv50_display.c
++++ b/drivers/gpu/drm/nouveau/nv50_display.c
+@@ -3378,6 +3378,7 @@ nv50_mstm_del(struct nv50_mstm **pmstm)
+ {
+ 	struct nv50_mstm *mstm = *pmstm;
+ 	if (mstm) {
++		drm_dp_mst_topology_mgr_destroy(&mstm->mgr);
+ 		kfree(*pmstm);
+ 		*pmstm = NULL;
+ 	}
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+index 4cacb03f6733..ff3d0f5efbb1 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+@@ -425,11 +425,6 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
+-{
+-	rockchip_drm_platform_remove(pdev);
+-}
+-
+ static const struct of_device_id rockchip_drm_dt_ids[] = {
+ 	{ .compatible = "rockchip,display-subsystem", },
+ 	{ /* sentinel */ },
+@@ -439,7 +434,6 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
+ static struct platform_driver rockchip_drm_platform_driver = {
+ 	.probe = rockchip_drm_platform_probe,
+ 	.remove = rockchip_drm_platform_remove,
+-	.shutdown = rockchip_drm_platform_shutdown,
+ 	.driver = {
+ 		.name = "rockchip-drm",
+ 		.of_match_table = rockchip_drm_dt_ids,
+diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
+index 6039f071fab1..5f1de24206ab 100644
+--- a/drivers/hid/hid-hyperv.c
++++ b/drivers/hid/hid-hyperv.c
+@@ -309,7 +309,7 @@ static void mousevsc_on_receive(struct hv_device *device,
+ 		hid_input_report(input_dev->hid_device, HID_INPUT_REPORT,
+ 				 input_dev->input_buf, len, 1);
+ 
+-		pm_wakeup_event(&input_dev->device->device, 0);
++		pm_wakeup_hard_event(&input_dev->device->device);
+ 
+ 		break;
+ 	default:
+diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
+index 13f07482ec68..deea13838648 100644
+--- a/drivers/i2c/busses/i2c-axxia.c
++++ b/drivers/i2c/busses/i2c-axxia.c
+@@ -74,8 +74,7 @@
+ 				 MST_STATUS_ND)
+ #define   MST_STATUS_ERR	(MST_STATUS_NAK | \
+ 				 MST_STATUS_AL  | \
+-				 MST_STATUS_IP  | \
+-				 MST_STATUS_TSS)
++				 MST_STATUS_IP)
+ #define MST_TX_BYTES_XFRD	0x50
+ #define MST_RX_BYTES_XFRD	0x54
+ #define SCL_HIGH_PERIOD		0x80
+@@ -241,7 +240,7 @@ static int axxia_i2c_empty_rx_fifo(struct axxia_i2c_dev *idev)
+ 			 */
+ 			if (c <= 0 || c > I2C_SMBUS_BLOCK_MAX) {
+ 				idev->msg_err = -EPROTO;
+-				i2c_int_disable(idev, ~0);
++				i2c_int_disable(idev, ~MST_STATUS_TSS);
+ 				complete(&idev->msg_complete);
+ 				break;
+ 			}
+@@ -299,14 +298,19 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev)
+ 
+ 	if (status & MST_STATUS_SCC) {
+ 		/* Stop completed */
+-		i2c_int_disable(idev, ~0);
++		i2c_int_disable(idev, ~MST_STATUS_TSS);
+ 		complete(&idev->msg_complete);
+ 	} else if (status & MST_STATUS_SNS) {
+ 		/* Transfer done */
+-		i2c_int_disable(idev, ~0);
++		i2c_int_disable(idev, ~MST_STATUS_TSS);
+ 		if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len)
+ 			axxia_i2c_empty_rx_fifo(idev);
+ 		complete(&idev->msg_complete);
++	} else if (status & MST_STATUS_TSS) {
++		/* Transfer timeout */
++		idev->msg_err = -ETIMEDOUT;
++		i2c_int_disable(idev, ~MST_STATUS_TSS);
++		complete(&idev->msg_complete);
+ 	} else if (unlikely(status & MST_STATUS_ERR)) {
+ 		/* Transfer error */
+ 		i2c_int_disable(idev, ~0);
+@@ -339,10 +343,10 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
+ 	u32 rx_xfer, tx_xfer;
+ 	u32 addr_1, addr_2;
+ 	unsigned long time_left;
++	unsigned int wt_value;
+ 
+ 	idev->msg = msg;
+ 	idev->msg_xfrd = 0;
+-	idev->msg_err = 0;
+ 	reinit_completion(&idev->msg_complete);
+ 
+ 	if (i2c_m_ten(msg)) {
+@@ -382,9 +386,18 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
+ 	else if (axxia_i2c_fill_tx_fifo(idev) != 0)
+ 		int_mask |= MST_STATUS_TFL;
+ 
++	wt_value = WT_VALUE(readl(idev->base + WAIT_TIMER_CONTROL));
++	/* Disable wait timer temporarly */
++	writel(wt_value, idev->base + WAIT_TIMER_CONTROL);
++	/* Check if timeout error happened */
++	if (idev->msg_err)
++		goto out;
++
+ 	/* Start manual mode */
+ 	writel(CMD_MANUAL, idev->base + MST_COMMAND);
+ 
++	writel(WT_EN | wt_value, idev->base + WAIT_TIMER_CONTROL);
++
+ 	i2c_int_enable(idev, int_mask);
+ 
+ 	time_left = wait_for_completion_timeout(&idev->msg_complete,
+@@ -395,13 +408,15 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
+ 	if (readl(idev->base + MST_COMMAND) & CMD_BUSY)
+ 		dev_warn(idev->dev, "busy after xfer\n");
+ 
+-	if (time_left == 0)
++	if (time_left == 0) {
+ 		idev->msg_err = -ETIMEDOUT;
+-
+-	if (idev->msg_err == -ETIMEDOUT)
+ 		i2c_recover_bus(&idev->adapter);
++		axxia_i2c_init(idev);
++	}
+ 
+-	if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO)
++out:
++	if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO &&
++			idev->msg_err != -ETIMEDOUT)
+ 		axxia_i2c_init(idev);
+ 
+ 	return idev->msg_err;
+@@ -409,7 +424,7 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
+ 
+ static int axxia_i2c_stop(struct axxia_i2c_dev *idev)
+ {
+-	u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC;
++	u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC | MST_STATUS_TSS;
+ 	unsigned long time_left;
+ 
+ 	reinit_completion(&idev->msg_complete);
+@@ -436,6 +451,9 @@ axxia_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+ 	int i;
+ 	int ret = 0;
+ 
++	idev->msg_err = 0;
++	i2c_int_enable(idev, MST_STATUS_TSS);
++
+ 	for (i = 0; ret == 0 && i < num; ++i)
+ 		ret = axxia_i2c_xfer_msg(idev, &msgs[i]);
+ 
+diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
+index efefcfa24a4c..d2178f701b41 100644
+--- a/drivers/i2c/busses/i2c-scmi.c
++++ b/drivers/i2c/busses/i2c-scmi.c
+@@ -364,6 +364,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
+ {
+ 	struct acpi_smbus_cmi *smbus_cmi;
+ 	const struct acpi_device_id *id;
++	int ret;
+ 
+ 	smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL);
+ 	if (!smbus_cmi)
+@@ -385,8 +386,10 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
+ 	acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1,
+ 			    acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL);
+ 
+-	if (smbus_cmi->cap_info == 0)
++	if (smbus_cmi->cap_info == 0) {
++		ret = -ENODEV;
+ 		goto err;
++	}
+ 
+ 	snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name),
+ 		"SMBus CMI adapter %s",
+@@ -397,7 +400,8 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
+ 	smbus_cmi->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ 	smbus_cmi->adapter.dev.parent = &device->dev;
+ 
+-	if (i2c_add_adapter(&smbus_cmi->adapter)) {
++	ret = i2c_add_adapter(&smbus_cmi->adapter);
++	if (ret) {
+ 		dev_err(&device->dev, "Couldn't register adapter!\n");
+ 		goto err;
+ 	}
+@@ -407,7 +411,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
+ err:
+ 	kfree(smbus_cmi);
+ 	device->driver_data = NULL;
+-	return -EIO;
++	return ret;
+ }
+ 
+ static int acpi_smbus_cmi_remove(struct acpi_device *device)
+diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
+index a403e8579b65..bc26ec822e26 100644
+--- a/drivers/i2c/busses/i2c-uniphier-f.c
++++ b/drivers/i2c/busses/i2c-uniphier-f.c
+@@ -470,9 +470,26 @@ static void uniphier_fi2c_hw_init(struct uniphier_fi2c_priv *priv)
+ 
+ 	uniphier_fi2c_reset(priv);
+ 
++	/*
++	 *  Standard-mode: tLOW + tHIGH = 10 us
++	 *  Fast-mode:     tLOW + tHIGH = 2.5 us
++	 */
+ 	writel(cyc, priv->membase + UNIPHIER_FI2C_CYC);
+-	writel(cyc / 2, priv->membase + UNIPHIER_FI2C_LCTL);
++	/*
++	 *  Standard-mode: tLOW = 4.7 us, tHIGH = 4.0 us, tBUF = 4.7 us
++	 *  Fast-mode:     tLOW = 1.3 us, tHIGH = 0.6 us, tBUF = 1.3 us
++	 * "tLow/tHIGH = 5/4" meets both.
++	 */
++	writel(cyc * 5 / 9, priv->membase + UNIPHIER_FI2C_LCTL);
++	/*
++	 *  Standard-mode: tHD;STA = 4.0 us, tSU;STA = 4.7 us, tSU;STO = 4.0 us
++	 *  Fast-mode:     tHD;STA = 0.6 us, tSU;STA = 0.6 us, tSU;STO = 0.6 us
++	 */
+ 	writel(cyc / 2, priv->membase + UNIPHIER_FI2C_SSUT);
++	/*
++	 *  Standard-mode: tSU;DAT = 250 ns
++	 *  Fast-mode:     tSU;DAT = 100 ns
++	 */
+ 	writel(cyc / 16, priv->membase + UNIPHIER_FI2C_DSUT);
+ 
+ 	uniphier_fi2c_prepare_operation(priv);
+diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
+index 454f914ae66d..c488e558aef7 100644
+--- a/drivers/i2c/busses/i2c-uniphier.c
++++ b/drivers/i2c/busses/i2c-uniphier.c
+@@ -320,7 +320,13 @@ static void uniphier_i2c_hw_init(struct uniphier_i2c_priv *priv)
+ 
+ 	uniphier_i2c_reset(priv, true);
+ 
+-	writel((cyc / 2 << 16) | cyc, priv->membase + UNIPHIER_I2C_CLK);
++	/*
++	 * Bit30-16: clock cycles of tLOW.
++	 *  Standard-mode: tLOW = 4.7 us, tHIGH = 4.0 us
++	 *  Fast-mode:     tLOW = 1.3 us, tHIGH = 0.6 us
++	 * "tLow/tHIGH = 5/4" meets both.
++	 */
++	writel((cyc * 5 / 9 << 16) | cyc, priv->membase + UNIPHIER_I2C_CLK);
+ 
+ 	uniphier_i2c_reset(priv, false);
+ }
+diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
+index c5b902b86b44..203ed4adc04a 100644
+--- a/drivers/ide/pmac.c
++++ b/drivers/ide/pmac.c
+@@ -920,6 +920,7 @@ static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
+ 	struct device_node *root = of_find_node_by_path("/");
+ 	const char *model = of_get_property(root, "model", NULL);
+ 
++	of_node_put(root);
+ 	/* Get cable type from device-tree. */
+ 	if (cable && !strncmp(cable, "80-", 3)) {
+ 		/* Some drives fail to detect 80c cable in PowerBook */
+diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
+index c14ec04f2a89..cbe5ab26d95b 100644
+--- a/drivers/infiniband/hw/hfi1/user_sdma.c
++++ b/drivers/infiniband/hw/hfi1/user_sdma.c
+@@ -187,7 +187,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
+ 	pq->ctxt = uctxt->ctxt;
+ 	pq->subctxt = fd->subctxt;
+ 	pq->n_max_reqs = hfi1_sdma_comp_ring_size;
+-	pq->state = SDMA_PKT_Q_INACTIVE;
+ 	atomic_set(&pq->n_reqs, 0);
+ 	init_waitqueue_head(&pq->wait);
+ 	atomic_set(&pq->n_locked, 0);
+@@ -276,7 +275,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
+ 		/* Wait until all requests have been freed. */
+ 		wait_event_interruptible(
+ 			pq->wait,
+-			(ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
++			!atomic_read(&pq->n_reqs));
+ 		kfree(pq->reqs);
+ 		kfree(pq->req_in_use);
+ 		kmem_cache_destroy(pq->txreq_cache);
+@@ -312,6 +311,13 @@ static u8 dlid_to_selector(u16 dlid)
+ 	return mapping[hash];
+ }
+ 
++/**
++ * hfi1_user_sdma_process_request() - Process and start a user sdma request
++ * @fd: valid file descriptor
++ * @iovec: array of io vectors to process
++ * @dim: overall iovec array size
++ * @count: number of io vector array entries processed
++ */
+ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
+ 				   struct iovec *iovec, unsigned long dim,
+ 				   unsigned long *count)
+@@ -560,20 +566,12 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
+ 		req->ahg_idx = sdma_ahg_alloc(req->sde);
+ 
+ 	set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
++	pq->state = SDMA_PKT_Q_ACTIVE;
+ 	/* Send the first N packets in the request to buy us some time */
+ 	ret = user_sdma_send_pkts(req, pcount);
+ 	if (unlikely(ret < 0 && ret != -EBUSY))
+ 		goto free_req;
+ 
+-	/*
+-	 * It is possible that the SDMA engine would have processed all the
+-	 * submitted packets by the time we get here. Therefore, only set
+-	 * packet queue state to ACTIVE if there are still uncompleted
+-	 * requests.
+-	 */
+-	if (atomic_read(&pq->n_reqs))
+-		xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
+-
+ 	/*
+ 	 * This is a somewhat blocking send implementation.
+ 	 * The driver will block the caller until all packets of the
+@@ -1391,10 +1389,8 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
+ 
+ static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
+ {
+-	if (atomic_dec_and_test(&pq->n_reqs)) {
+-		xchg(&pq->state, SDMA_PKT_Q_INACTIVE);
++	if (atomic_dec_and_test(&pq->n_reqs))
+ 		wake_up(&pq->wait);
+-	}
+ }
+ 
+ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
+diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
+index 5af52334b7dc..2b5326d6db53 100644
+--- a/drivers/infiniband/hw/hfi1/user_sdma.h
++++ b/drivers/infiniband/hw/hfi1/user_sdma.h
+@@ -94,9 +94,10 @@
+ #define TXREQ_FLAGS_REQ_ACK   BIT(0)      /* Set the ACK bit in the header */
+ #define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */
+ 
+-#define SDMA_PKT_Q_INACTIVE BIT(0)
+-#define SDMA_PKT_Q_ACTIVE   BIT(1)
+-#define SDMA_PKT_Q_DEFERRED BIT(2)
++enum pkt_q_sdma_state {
++	SDMA_PKT_Q_ACTIVE,
++	SDMA_PKT_Q_DEFERRED,
++};
+ 
+ /*
+  * Maximum retry attempts to submit a TX request
+@@ -124,7 +125,7 @@ struct hfi1_user_sdma_pkt_q {
+ 	struct user_sdma_request *reqs;
+ 	unsigned long *req_in_use;
+ 	struct iowait busy;
+-	unsigned state;
++	enum pkt_q_sdma_state state;
+ 	wait_queue_head_t wait;
+ 	unsigned long unpinned;
+ 	struct mmu_rb_handler *handler;
+diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
+index 940d38b08e6b..ce8e2baf31bb 100644
+--- a/drivers/input/keyboard/omap4-keypad.c
++++ b/drivers/input/keyboard/omap4-keypad.c
+@@ -60,8 +60,18 @@
+ 
+ /* OMAP4 values */
+ #define OMAP4_VAL_IRQDISABLE		0x0
+-#define OMAP4_VAL_DEBOUNCINGTIME	0x7
+-#define OMAP4_VAL_PVT			0x7
++
++/*
++ * Errata i689: If a key is released for a time shorter than debounce time,
++ * the keyboard will idle and never detect the key release. The workaround
++ * is to use at least a 12ms debounce time. See omap5432 TRM chapter
++ * "26.4.6.2 Keyboard Controller Timer" for more information.
++ */
++#define OMAP4_KEYPAD_PTV_DIV_128        0x6
++#define OMAP4_KEYPAD_DEBOUNCINGTIME_MS(dbms, ptv)     \
++	((((dbms) * 1000) / ((1 << ((ptv) + 1)) * (1000000 / 32768))) - 1)
++#define OMAP4_VAL_DEBOUNCINGTIME_16MS					\
++	OMAP4_KEYPAD_DEBOUNCINGTIME_MS(16, OMAP4_KEYPAD_PTV_DIV_128)
+ 
+ enum {
+ 	KBD_REVISION_OMAP4 = 0,
+@@ -181,9 +191,9 @@ static int omap4_keypad_open(struct input_dev *input)
+ 
+ 	kbd_writel(keypad_data, OMAP4_KBD_CTRL,
+ 			OMAP4_DEF_CTRL_NOSOFTMODE |
+-			(OMAP4_VAL_PVT << OMAP4_DEF_CTRL_PTV_SHIFT));
++			(OMAP4_KEYPAD_PTV_DIV_128 << OMAP4_DEF_CTRL_PTV_SHIFT));
+ 	kbd_writel(keypad_data, OMAP4_KBD_DEBOUNCINGTIME,
+-			OMAP4_VAL_DEBOUNCINGTIME);
++			OMAP4_VAL_DEBOUNCINGTIME_16MS);
+ 	/* clear pending interrupts */
+ 	kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
+ 			 kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 65c9095eb517..54f0d037b5b6 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -178,6 +178,7 @@ static const char * const smbus_pnp_ids[] = {
+ 	"LEN0096", /* X280 */
+ 	"LEN0097", /* X280 -> ALPS trackpoint */
+ 	"LEN200f", /* T450s */
++	"SYN3221", /* HP 15-ay000 */
+ 	NULL
+ };
+ 
+diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
+index 25151d9214e0..55288a026e4e 100644
+--- a/drivers/input/serio/hyperv-keyboard.c
++++ b/drivers/input/serio/hyperv-keyboard.c
+@@ -177,7 +177,7 @@ static void hv_kbd_on_receive(struct hv_device *hv_dev,
+ 		 * state because the Enter-UP can trigger a wakeup at once.
+ 		 */
+ 		if (!(info & IS_BREAK))
+-			pm_wakeup_event(&hv_dev->device, 0);
++			pm_wakeup_hard_event(&hv_dev->device);
+ 
+ 		break;
+ 
+diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
+index 7f1c64c4ad24..bc60db87e6f1 100644
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -929,6 +929,10 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
+ 	bool dirty_flag;
+ 	*result = true;
+ 
++	if (from_cblock(cmd->cache_blocks) == 0)
++		/* Nothing to do */
++		return 0;
++
+ 	r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
+ 				   from_cblock(cmd->cache_blocks), &cmd->dirty_cursor);
+ 	if (r) {
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 699c40c7fe60..da98fc7b995c 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -195,7 +195,7 @@ static void throttle_unlock(struct throttle *t)
+ struct dm_thin_new_mapping;
+ 
+ /*
+- * The pool runs in 4 modes.  Ordered in degraded order for comparisons.
++ * The pool runs in various modes.  Ordered in degraded order for comparisons.
+  */
+ enum pool_mode {
+ 	PM_WRITE,		/* metadata may be changed */
+@@ -281,9 +281,38 @@ struct pool {
+ 	struct dm_bio_prison_cell **cell_sort_array;
+ };
+ 
+-static enum pool_mode get_pool_mode(struct pool *pool);
+ static void metadata_operation_failed(struct pool *pool, const char *op, int r);
+ 
++static enum pool_mode get_pool_mode(struct pool *pool)
++{
++	return pool->pf.mode;
++}
++
++static void notify_of_pool_mode_change(struct pool *pool)
++{
++	const char *descs[] = {
++		"write",
++		"out-of-data-space",
++		"read-only",
++		"read-only",
++		"fail"
++	};
++	const char *extra_desc = NULL;
++	enum pool_mode mode = get_pool_mode(pool);
++
++	if (mode == PM_OUT_OF_DATA_SPACE) {
++		if (!pool->pf.error_if_no_space)
++			extra_desc = " (queue IO)";
++		else
++			extra_desc = " (error IO)";
++	}
++
++	dm_table_event(pool->ti->table);
++	DMINFO("%s: switching pool to %s%s mode",
++	       dm_device_name(pool->pool_md),
++	       descs[(int)mode], extra_desc ? : "");
++}
++
+ /*
+  * Target context for a pool.
+  */
+@@ -2362,8 +2391,6 @@ static void do_waker(struct work_struct *ws)
+ 	queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
+ }
+ 
+-static void notify_of_pool_mode_change_to_oods(struct pool *pool);
+-
+ /*
+  * We're holding onto IO to allow userland time to react.  After the
+  * timeout either the pool will have been resized (and thus back in
+@@ -2376,7 +2403,7 @@ static void do_no_space_timeout(struct work_struct *ws)
+ 
+ 	if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
+ 		pool->pf.error_if_no_space = true;
+-		notify_of_pool_mode_change_to_oods(pool);
++		notify_of_pool_mode_change(pool);
+ 		error_retry_list_with_code(pool, BLK_STS_NOSPC);
+ 	}
+ }
+@@ -2444,26 +2471,6 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
+ 
+ /*----------------------------------------------------------------*/
+ 
+-static enum pool_mode get_pool_mode(struct pool *pool)
+-{
+-	return pool->pf.mode;
+-}
+-
+-static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
+-{
+-	dm_table_event(pool->ti->table);
+-	DMINFO("%s: switching pool to %s mode",
+-	       dm_device_name(pool->pool_md), new_mode);
+-}
+-
+-static void notify_of_pool_mode_change_to_oods(struct pool *pool)
+-{
+-	if (!pool->pf.error_if_no_space)
+-		notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)");
+-	else
+-		notify_of_pool_mode_change(pool, "out-of-data-space (error IO)");
+-}
+-
+ static bool passdown_enabled(struct pool_c *pt)
+ {
+ 	return pt->adjusted_pf.discard_passdown;
+@@ -2512,8 +2519,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
+ 
+ 	switch (new_mode) {
+ 	case PM_FAIL:
+-		if (old_mode != new_mode)
+-			notify_of_pool_mode_change(pool, "failure");
+ 		dm_pool_metadata_read_only(pool->pmd);
+ 		pool->process_bio = process_bio_fail;
+ 		pool->process_discard = process_bio_fail;
+@@ -2527,8 +2532,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
+ 
+ 	case PM_OUT_OF_METADATA_SPACE:
+ 	case PM_READ_ONLY:
+-		if (!is_read_only_pool_mode(old_mode))
+-			notify_of_pool_mode_change(pool, "read-only");
+ 		dm_pool_metadata_read_only(pool->pmd);
+ 		pool->process_bio = process_bio_read_only;
+ 		pool->process_discard = process_bio_success;
+@@ -2549,8 +2552,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
+ 		 * alarming rate.  Adjust your low water mark if you're
+ 		 * frequently seeing this mode.
+ 		 */
+-		if (old_mode != new_mode)
+-			notify_of_pool_mode_change_to_oods(pool);
+ 		pool->out_of_data_space = true;
+ 		pool->process_bio = process_bio_read_only;
+ 		pool->process_discard = process_discard_bio;
+@@ -2563,8 +2564,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
+ 		break;
+ 
+ 	case PM_WRITE:
+-		if (old_mode != new_mode)
+-			notify_of_pool_mode_change(pool, "write");
+ 		if (old_mode == PM_OUT_OF_DATA_SPACE)
+ 			cancel_delayed_work_sync(&pool->no_space_timeout);
+ 		pool->out_of_data_space = false;
+@@ -2584,6 +2583,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
+ 	 * doesn't cause an unexpected mode transition on resume.
+ 	 */
+ 	pt->adjusted_pf.mode = new_mode;
++
++	if (old_mode != new_mode)
++		notify_of_pool_mode_change(pool);
+ }
+ 
+ static void abort_transaction(struct pool *pool)
+diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
+index bd49f34d7654..c28c51ad650f 100644
+--- a/drivers/mmc/host/omap.c
++++ b/drivers/mmc/host/omap.c
+@@ -104,6 +104,7 @@ struct mmc_omap_slot {
+ 	unsigned int		vdd;
+ 	u16			saved_con;
+ 	u16			bus_mode;
++	u16			power_mode;
+ 	unsigned int		fclk_freq;
+ 
+ 	struct tasklet_struct	cover_tasklet;
+@@ -1157,7 +1158,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+ 	struct mmc_omap_slot *slot = mmc_priv(mmc);
+ 	struct mmc_omap_host *host = slot->host;
+ 	int i, dsor;
+-	int clk_enabled;
++	int clk_enabled, init_stream;
+ 
+ 	mmc_omap_select_slot(slot, 0);
+ 
+@@ -1167,6 +1168,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+ 		slot->vdd = ios->vdd;
+ 
+ 	clk_enabled = 0;
++	init_stream = 0;
+ 	switch (ios->power_mode) {
+ 	case MMC_POWER_OFF:
+ 		mmc_omap_set_power(slot, 0, ios->vdd);
+@@ -1174,13 +1176,17 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+ 	case MMC_POWER_UP:
+ 		/* Cannot touch dsor yet, just power up MMC */
+ 		mmc_omap_set_power(slot, 1, ios->vdd);
++		slot->power_mode = ios->power_mode;
+ 		goto exit;
+ 	case MMC_POWER_ON:
+ 		mmc_omap_fclk_enable(host, 1);
+ 		clk_enabled = 1;
+ 		dsor |= 1 << 11;
++		if (slot->power_mode != MMC_POWER_ON)
++			init_stream = 1;
+ 		break;
+ 	}
++	slot->power_mode = ios->power_mode;
+ 
+ 	if (slot->bus_mode != ios->bus_mode) {
+ 		if (slot->pdata->set_bus_mode != NULL)
+@@ -1196,7 +1202,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+ 	for (i = 0; i < 2; i++)
+ 		OMAP_MMC_WRITE(host, CON, dsor);
+ 	slot->saved_con = dsor;
+-	if (ios->power_mode == MMC_POWER_ON) {
++	if (init_stream) {
+ 		/* worst case at 400kHz, 80 cycles makes 200 microsecs */
+ 		int usecs = 250;
+ 
+@@ -1234,6 +1240,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
+ 	slot->host = host;
+ 	slot->mmc = mmc;
+ 	slot->id = id;
++	slot->power_mode = MMC_POWER_UNDEFINED;
+ 	slot->pdata = &host->pdata->slots[id];
+ 
+ 	host->slots[id] = slot;
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index f063fe569339..0edcc2763f3c 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -193,8 +193,12 @@ void sdhci_reset(struct sdhci_host *host, u8 mask)
+ 	timeout = ktime_add_ms(ktime_get(), 100);
+ 
+ 	/* hw clears the bit when it's done */
+-	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
+-		if (ktime_after(ktime_get(), timeout)) {
++	while (1) {
++		bool timedout = ktime_after(ktime_get(), timeout);
++
++		if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
++			break;
++		if (timedout) {
+ 			pr_err("%s: Reset 0x%x never completed.\n",
+ 				mmc_hostname(host->mmc), (int)mask);
+ 			sdhci_dumpregs(host);
+@@ -1417,9 +1421,13 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
+ 
+ 	/* Wait max 20 ms */
+ 	timeout = ktime_add_ms(ktime_get(), 20);
+-	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
+-		& SDHCI_CLOCK_INT_STABLE)) {
+-		if (ktime_after(ktime_get(), timeout)) {
++	while (1) {
++		bool timedout = ktime_after(ktime_get(), timeout);
++
++		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
++		if (clk & SDHCI_CLOCK_INT_STABLE)
++			break;
++		if (timedout) {
+ 			pr_err("%s: Internal clock never stabilised.\n",
+ 			       mmc_hostname(host->mmc));
+ 			sdhci_dumpregs(host);
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index f43fb2f958a5..93dfcef8afc4 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -2086,6 +2086,9 @@ void bond_3ad_unbind_slave(struct slave *slave)
+ 		   aggregator->aggregator_identifier);
+ 
+ 	/* Tell the partner that this port is not suitable for aggregation */
++	port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
++	port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
++	port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
+ 	port->actor_oper_port_state &= ~AD_STATE_AGGREGATION;
+ 	__update_lacpdu_from_port(port);
+ 	ad_lacpdu_send(port);
+diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
+index f123ed57630d..86b41840f41a 100644
+--- a/drivers/net/dsa/mv88e6060.c
++++ b/drivers/net/dsa/mv88e6060.c
+@@ -114,8 +114,7 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
+ 	/* Reset the switch. */
+ 	REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
+ 		  GLOBAL_ATU_CONTROL_SWRESET |
+-		  GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
+-		  GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
++		  GLOBAL_ATU_CONTROL_LEARNDIS);
+ 
+ 	/* Wait up to one second for reset to complete. */
+ 	timeout = jiffies + 1 * HZ;
+@@ -140,13 +139,10 @@ static int mv88e6060_setup_global(struct dsa_switch *ds)
+ 	 */
+ 	REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536);
+ 
+-	/* Enable automatic address learning, set the address
+-	 * database size to 1024 entries, and set the default aging
+-	 * time to 5 minutes.
++	/* Disable automatic address learning.
+ 	 */
+ 	REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
+-		  GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
+-		  GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
++		  GLOBAL_ATU_CONTROL_LEARNDIS);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
+index 9530405030a7..97425d94e280 100644
+--- a/drivers/net/ethernet/freescale/fman/fman.c
++++ b/drivers/net/ethernet/freescale/fman/fman.c
+@@ -2786,7 +2786,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
+ 	if (!muram_node) {
+ 		dev_err(&of_dev->dev, "%s: could not find MURAM node\n",
+ 			__func__);
+-		goto fman_node_put;
++		goto fman_free;
+ 	}
+ 
+ 	err = of_address_to_resource(muram_node, 0,
+@@ -2795,11 +2795,10 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
+ 		of_node_put(muram_node);
+ 		dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n",
+ 			__func__, err);
+-		goto fman_node_put;
++		goto fman_free;
+ 	}
+ 
+ 	of_node_put(muram_node);
+-	of_node_put(fm_node);
+ 
+ 	err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman);
+ 	if (err < 0) {
+diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
+index 22b1cc012bc9..c1a39be0dbe7 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
++++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
+@@ -5,7 +5,7 @@
+ config MLX4_EN
+ 	tristate "Mellanox Technologies 1/10/40Gbit Ethernet support"
+ 	depends on MAY_USE_DEVLINK
+-	depends on PCI
++	depends on PCI && NETDEVICES && ETHERNET && INET
+ 	select MLX4_CORE
+ 	imply PTP_1588_CLOCK
+ 	---help---
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+index 32c25772f755..21611613f44c 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+@@ -295,7 +295,13 @@ static bool
+ mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
+ 				    bridge_port)
+ {
+-	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev);
++	struct net_device *dev = bridge_port->dev;
++	struct mlxsw_sp *mlxsw_sp;
++
++	if (is_vlan_dev(dev))
++		mlxsw_sp = mlxsw_sp_lower_get(vlan_dev_real_dev(dev));
++	else
++		mlxsw_sp = mlxsw_sp_lower_get(dev);
+ 
+ 	/* In case ports were pulled from out of a bridged LAG, then
+ 	 * it's possible the reference count isn't zero, yet the bridge
+@@ -1646,7 +1652,7 @@ mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
+ 	u16 vid = vlan_dev_vlan_id(bridge_port->dev);
+ 
+ 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+-	if (WARN_ON(!mlxsw_sp_port_vlan))
++	if (!mlxsw_sp_port_vlan)
+ 		return;
+ 
+ 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index 670224be3c8b..8f57ca969c9f 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -3472,16 +3472,16 @@ static int __init init_mac80211_hwsim(void)
+ 	if (err)
+ 		goto out_unregister_pernet;
+ 
++	err = hwsim_init_netlink();
++	if (err)
++		goto out_unregister_driver;
++
+ 	hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim");
+ 	if (IS_ERR(hwsim_class)) {
+ 		err = PTR_ERR(hwsim_class);
+-		goto out_unregister_driver;
++		goto out_exit_netlink;
+ 	}
+ 
+-	err = hwsim_init_netlink();
+-	if (err < 0)
+-		goto out_unregister_driver;
+-
+ 	for (i = 0; i < radios; i++) {
+ 		struct hwsim_new_radio_params param = { 0 };
+ 
+@@ -3587,6 +3587,8 @@ out_free_mon:
+ 	free_netdev(hwsim_mon);
+ out_free_radios:
+ 	mac80211_hwsim_free();
++out_exit_netlink:
++	hwsim_exit_netlink();
+ out_unregister_driver:
+ 	platform_driver_unregister(&mac80211_hwsim_driver);
+ out_unregister_pernet:
+diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
+index a70b3d24936d..5d8140e58f6f 100644
+--- a/drivers/nvme/target/rdma.c
++++ b/drivers/nvme/target/rdma.c
+@@ -524,6 +524,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
+ {
+ 	struct nvmet_rdma_rsp *rsp =
+ 		container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
++	struct nvmet_rdma_queue *queue = cq->cq_context;
+ 
+ 	nvmet_rdma_release_rsp(rsp);
+ 
+@@ -531,7 +532,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
+ 		     wc->status != IB_WC_WR_FLUSH_ERR)) {
+ 		pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
+ 			wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
+-		nvmet_rdma_error_comp(rsp->queue);
++		nvmet_rdma_error_comp(queue);
+ 	}
+ }
+ 
+diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
+index 6624499eae72..4ada80317a3b 100644
+--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
++++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
+@@ -568,7 +568,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
+ 	SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11),
+ 		  SUNXI_FUNCTION(0x0, "gpio_in"),
+ 		  SUNXI_FUNCTION(0x1, "gpio_out"),
+-		  SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)),	/* PH_EINT11 */
++		  SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)),	/* PH_EINT11 */
+ };
+ 
+ static const struct sunxi_pinctrl_desc sun8i_a83t_pinctrl_data = {
+diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
+index 9af591d5223c..71eee39520f0 100644
+--- a/drivers/rtc/rtc-snvs.c
++++ b/drivers/rtc/rtc-snvs.c
+@@ -47,49 +47,83 @@ struct snvs_rtc_data {
+ 	struct clk *clk;
+ };
+ 
++/* Read 64 bit timer register, which could be in inconsistent state */
++static u64 rtc_read_lpsrt(struct snvs_rtc_data *data)
++{
++	u32 msb, lsb;
++
++	regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &msb);
++	regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &lsb);
++	return (u64)msb << 32 | lsb;
++}
++
++/* Read the secure real time counter, taking care to deal with the cases of the
++ * counter updating while being read.
++ */
+ static u32 rtc_read_lp_counter(struct snvs_rtc_data *data)
+ {
+ 	u64 read1, read2;
+-	u32 val;
++	unsigned int timeout = 100;
+ 
++	/* As expected, the registers might update between the read of the LSB
++	 * reg and the MSB reg.  It's also possible that one register might be
++	 * in partially modified state as well.
++	 */
++	read1 = rtc_read_lpsrt(data);
+ 	do {
+-		regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &val);
+-		read1 = val;
+-		read1 <<= 32;
+-		regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &val);
+-		read1 |= val;
+-
+-		regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &val);
+-		read2 = val;
+-		read2 <<= 32;
+-		regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &val);
+-		read2 |= val;
+-	} while (read1 != read2);
++		read2 = read1;
++		read1 = rtc_read_lpsrt(data);
++	} while (read1 != read2 && --timeout);
++	if (!timeout)
++		dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n");
+ 
+ 	/* Convert 47-bit counter to 32-bit raw second count */
+ 	return (u32) (read1 >> CNTR_TO_SECS_SH);
+ }
+ 
+-static void rtc_write_sync_lp(struct snvs_rtc_data *data)
++/* Just read the lsb from the counter, dealing with inconsistent state */
++static int rtc_read_lp_counter_lsb(struct snvs_rtc_data *data, u32 *lsb)
+ {
+-	u32 count1, count2, count3;
+-	int i;
+-
+-	/* Wait for 3 CKIL cycles */
+-	for (i = 0; i < 3; i++) {
+-		do {
+-			regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
+-			regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count2);
+-		} while (count1 != count2);
+-
+-		/* Now wait until counter value changes */
+-		do {
+-			do {
+-				regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count2);
+-				regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count3);
+-			} while (count2 != count3);
+-		} while (count3 == count1);
++	u32 count1, count2;
++	unsigned int timeout = 100;
++
++	regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
++	do {
++		count2 = count1;
++		regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
++	} while (count1 != count2 && --timeout);
++	if (!timeout) {
++		dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n");
++		return -ETIMEDOUT;
+ 	}
++
++	*lsb = count1;
++	return 0;
++}
++
++static int rtc_write_sync_lp(struct snvs_rtc_data *data)
++{
++	u32 count1, count2;
++	u32 elapsed;
++	unsigned int timeout = 1000;
++	int ret;
++
++	ret = rtc_read_lp_counter_lsb(data, &count1);
++	if (ret)
++		return ret;
++
++	/* Wait for 3 CKIL cycles, about 61.0-91.5 µs */
++	do {
++		ret = rtc_read_lp_counter_lsb(data, &count2);
++		if (ret)
++			return ret;
++		elapsed = count2 - count1; /* wrap around _is_ handled! */
++	} while (elapsed < 3 && --timeout);
++	if (!timeout) {
++		dev_err(&data->rtc->dev, "Timeout waiting for LPSRT Counter to change\n");
++		return -ETIMEDOUT;
++	}
++	return 0;
+ }
+ 
+ static int snvs_rtc_enable(struct snvs_rtc_data *data, bool enable)
+@@ -173,9 +207,7 @@ static int snvs_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
+ 			   (SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN),
+ 			   enable ? (SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN) : 0);
+ 
+-	rtc_write_sync_lp(data);
+-
+-	return 0;
++	return rtc_write_sync_lp(data);
+ }
+ 
+ static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+@@ -183,11 +215,14 @@ static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+ 	struct snvs_rtc_data *data = dev_get_drvdata(dev);
+ 	struct rtc_time *alrm_tm = &alrm->time;
+ 	unsigned long time;
++	int ret;
+ 
+ 	rtc_tm_to_time(alrm_tm, &time);
+ 
+ 	regmap_update_bits(data->regmap, data->offset + SNVS_LPCR, SNVS_LPCR_LPTA_EN, 0);
+-	rtc_write_sync_lp(data);
++	ret = rtc_write_sync_lp(data);
++	if (ret)
++		return ret;
+ 	regmap_write(data->regmap, data->offset + SNVS_LPTAR, time);
+ 
+ 	/* Clear alarm interrupt status bit */
+diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
+index f32765d3cbd8..db761aca8667 100644
+--- a/drivers/sbus/char/display7seg.c
++++ b/drivers/sbus/char/display7seg.c
+@@ -221,6 +221,7 @@ static int d7s_probe(struct platform_device *op)
+ 	dev_set_drvdata(&op->dev, p);
+ 	d7s_device = p;
+ 	err = 0;
++	of_node_put(opts);
+ 
+ out:
+ 	return err;
+diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
+index 56e962a01493..b8481927bfe4 100644
+--- a/drivers/sbus/char/envctrl.c
++++ b/drivers/sbus/char/envctrl.c
+@@ -910,8 +910,10 @@ static void envctrl_init_i2c_child(struct device_node *dp,
+ 			for (len = 0; len < PCF8584_MAX_CHANNELS; ++len) {
+ 				pchild->mon_type[len] = ENVCTRL_NOMON;
+ 			}
++			of_node_put(root_node);
+ 			return;
+ 		}
++		of_node_put(root_node);
+ 	}
+ 
+ 	/* Get the monitor channels. */
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index cf8a15e54d83..3ff536b350a1 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -2416,8 +2416,8 @@ int iscsi_eh_session_reset(struct scsi_cmnd *sc)
+ failed:
+ 		ISCSI_DBG_EH(session,
+ 			     "failing session reset: Could not log back into "
+-			     "%s, %s [age %d]\n", session->targetname,
+-			     conn->persistent_address, session->age);
++			     "%s [age %d]\n", session->targetname,
++			     session->age);
+ 		spin_unlock_bh(&session->frwd_lock);
+ 		mutex_unlock(&session->eh_mutex);
+ 		return FAILED;
+diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
+index 0cd947f78b5b..890b8aaf95e1 100644
+--- a/drivers/scsi/vmw_pvscsi.c
++++ b/drivers/scsi/vmw_pvscsi.c
+@@ -1202,8 +1202,6 @@ static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
+ 
+ static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
+ {
+-	pvscsi_shutdown_intr(adapter);
+-
+ 	if (adapter->workqueue)
+ 		destroy_workqueue(adapter->workqueue);
+ 
+@@ -1535,6 +1533,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ out_reset_adapter:
+ 	ll_adapter_reset(adapter);
+ out_release_resources:
++	pvscsi_shutdown_intr(adapter);
+ 	pvscsi_release_resources(adapter);
+ 	scsi_host_put(host);
+ out_disable_device:
+@@ -1543,6 +1542,7 @@ out_disable_device:
+ 	return error;
+ 
+ out_release_resources_and_disable:
++	pvscsi_shutdown_intr(adapter);
+ 	pvscsi_release_resources(adapter);
+ 	goto out_disable_device;
+ }
+diff --git a/drivers/tty/serial/suncore.c b/drivers/tty/serial/suncore.c
+index 127472bd6a7c..209f314745ab 100644
+--- a/drivers/tty/serial/suncore.c
++++ b/drivers/tty/serial/suncore.c
+@@ -111,6 +111,7 @@ void sunserial_console_termios(struct console *con, struct device_node *uart_dp)
+ 		mode = of_get_property(dp, mode_prop, NULL);
+ 		if (!mode)
+ 			mode = "9600,8,n,1,-";
++		of_node_put(dp);
+ 	}
+ 
+ 	cflag = CREAD | HUPCL | CLOCAL;
+diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
+index b044a0800805..248533c0f9ac 100644
+--- a/drivers/vhost/vsock.c
++++ b/drivers/vhost/vsock.c
+@@ -561,13 +561,21 @@ static void vhost_vsock_reset_orphans(struct sock *sk)
+ 	 * executing.
+ 	 */
+ 
+-	if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) {
+-		sock_set_flag(sk, SOCK_DONE);
+-		vsk->peer_shutdown = SHUTDOWN_MASK;
+-		sk->sk_state = SS_UNCONNECTED;
+-		sk->sk_err = ECONNRESET;
+-		sk->sk_error_report(sk);
+-	}
++	/* If the peer is still valid, no need to reset connection */
++	if (vhost_vsock_get(vsk->remote_addr.svm_cid))
++		return;
++
++	/* If the close timeout is pending, let it expire.  This avoids races
++	 * with the timeout callback.
++	 */
++	if (vsk->close_work_scheduled)
++		return;
++
++	sock_set_flag(sk, SOCK_DONE);
++	vsk->peer_shutdown = SHUTDOWN_MASK;
++	sk->sk_state = SS_UNCONNECTED;
++	sk->sk_err = ECONNRESET;
++	sk->sk_error_report(sk);
+ }
+ 
+ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
+diff --git a/fs/aio.c b/fs/aio.c
+index 3a749c3a92e3..a2de58f77338 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -43,6 +43,7 @@
+ 
+ #include <asm/kmap_types.h>
+ #include <linux/uaccess.h>
++#include <linux/nospec.h>
+ 
+ #include "internal.h"
+ 
+@@ -1084,6 +1085,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
+ 	if (!table || id >= table->nr)
+ 		goto out;
+ 
++	id = array_index_nospec(id, table->nr);
+ 	ctx = rcu_dereference(table->table[id]);
+ 	if (ctx && ctx->user_id == ctx_id) {
+ 		if (percpu_ref_tryget_live(&ctx->users))
+diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
+index cb0f1fbe836d..7b95e7971d18 100644
+--- a/fs/cifs/Kconfig
++++ b/fs/cifs/Kconfig
+@@ -121,7 +121,7 @@ config CIFS_XATTR
+ 
+ config CIFS_POSIX
+         bool "CIFS POSIX Extensions"
+-        depends on CIFS_XATTR
++        depends on CIFS && CIFS_ALLOW_INSECURE_LEGACY && CIFS_XATTR
+         help
+           Enabling this option will cause the cifs client to attempt to
+ 	  negotiate a newer dialect with servers, such as Samba 3.0.5
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index 29868c35c19a..d933ecb7a08c 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1424,7 +1424,7 @@ static int fuse_dir_open(struct inode *inode, struct file *file)
+ 
+ static int fuse_dir_release(struct inode *inode, struct file *file)
+ {
+-	fuse_release_common(file, FUSE_RELEASEDIR);
++	fuse_release_common(file, true);
+ 
+ 	return 0;
+ }
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 47d7a510be5b..52514a64dcd6 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -86,12 +86,12 @@ static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
+ 	iput(req->misc.release.inode);
+ }
+ 
+-static void fuse_file_put(struct fuse_file *ff, bool sync)
++static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir)
+ {
+ 	if (refcount_dec_and_test(&ff->count)) {
+ 		struct fuse_req *req = ff->reserved_req;
+ 
+-		if (ff->fc->no_open) {
++		if (ff->fc->no_open && !isdir) {
+ 			/*
+ 			 * Drop the release request when client does not
+ 			 * implement 'open'
+@@ -244,10 +244,11 @@ static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
+ 	req->in.args[0].value = inarg;
+ }
+ 
+-void fuse_release_common(struct file *file, int opcode)
++void fuse_release_common(struct file *file, bool isdir)
+ {
+ 	struct fuse_file *ff = file->private_data;
+ 	struct fuse_req *req = ff->reserved_req;
++	int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
+ 
+ 	fuse_prepare_release(ff, file->f_flags, opcode);
+ 
+@@ -269,7 +270,7 @@ void fuse_release_common(struct file *file, int opcode)
+ 	 * synchronous RELEASE is allowed (and desirable) in this case
+ 	 * because the server can be trusted not to screw up.
+ 	 */
+-	fuse_file_put(ff, ff->fc->destroy_req != NULL);
++	fuse_file_put(ff, ff->fc->destroy_req != NULL, isdir);
+ }
+ 
+ static int fuse_open(struct inode *inode, struct file *file)
+@@ -285,7 +286,7 @@ static int fuse_release(struct inode *inode, struct file *file)
+ 	if (fc->writeback_cache)
+ 		write_inode_now(inode, 1);
+ 
+-	fuse_release_common(file, FUSE_RELEASE);
++	fuse_release_common(file, false);
+ 
+ 	/* return value is ignored by VFS */
+ 	return 0;
+@@ -299,7 +300,7 @@ void fuse_sync_release(struct fuse_file *ff, int flags)
+ 	 * iput(NULL) is a no-op and since the refcount is 1 and everything's
+ 	 * synchronous, we are fine with not doing igrab() here"
+ 	 */
+-	fuse_file_put(ff, true);
++	fuse_file_put(ff, true, false);
+ }
+ EXPORT_SYMBOL_GPL(fuse_sync_release);
+ 
+@@ -804,7 +805,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
+ 		put_page(page);
+ 	}
+ 	if (req->ff)
+-		fuse_file_put(req->ff, false);
++		fuse_file_put(req->ff, false, false);
+ }
+ 
+ static void fuse_send_readpages(struct fuse_req *req, struct file *file)
+@@ -1458,7 +1459,7 @@ static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
+ 		__free_page(req->pages[i]);
+ 
+ 	if (req->ff)
+-		fuse_file_put(req->ff, false);
++		fuse_file_put(req->ff, false, false);
+ }
+ 
+ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
+@@ -1615,7 +1616,7 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
+ 	ff = __fuse_write_file_get(fc, fi);
+ 	err = fuse_flush_times(inode, ff);
+ 	if (ff)
+-		fuse_file_put(ff, 0);
++		fuse_file_put(ff, false, false);
+ 
+ 	return err;
+ }
+@@ -1929,7 +1930,7 @@ static int fuse_writepages(struct address_space *mapping,
+ 		err = 0;
+ 	}
+ 	if (data.ff)
+-		fuse_file_put(data.ff, false);
++		fuse_file_put(data.ff, false, false);
+ 
+ 	kfree(data.orig_pages);
+ out:
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index e105640153ce..e682f2eff6c0 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -739,7 +739,7 @@ void fuse_sync_release(struct fuse_file *ff, int flags);
+ /**
+  * Send RELEASE or RELEASEDIR request
+  */
+-void fuse_release_common(struct file *file, int opcode);
++void fuse_release_common(struct file *file, bool isdir);
+ 
+ /**
+  * Send FSYNC or FSYNCDIR request
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 621c517b325c..89c03a507dd9 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -98,8 +98,11 @@ struct nfs_direct_req {
+ 	struct pnfs_ds_commit_info ds_cinfo;	/* Storage for cinfo */
+ 	struct work_struct	work;
+ 	int			flags;
++	/* for write */
+ #define NFS_ODIRECT_DO_COMMIT		(1)	/* an unstable reply was received */
+ #define NFS_ODIRECT_RESCHED_WRITES	(2)	/* write verification failed */
++	/* for read */
++#define NFS_ODIRECT_SHOULD_DIRTY	(3)	/* dirty user-space page after read */
+ 	struct nfs_writeverf	verf;		/* unstable write verifier */
+ };
+ 
+@@ -412,7 +415,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
+ 		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+ 		struct page *page = req->wb_page;
+ 
+-		if (!PageCompound(page) && bytes < hdr->good_bytes)
++		if (!PageCompound(page) && bytes < hdr->good_bytes &&
++		    (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
+ 			set_page_dirty(page);
+ 		bytes += req->wb_bytes;
+ 		nfs_list_remove_request(req);
+@@ -587,6 +591,9 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
+ 	if (!is_sync_kiocb(iocb))
+ 		dreq->iocb = iocb;
+ 
++	if (iter_is_iovec(iter))
++		dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
++
+ 	nfs_start_io_direct(inode);
+ 
+ 	NFS_I(inode)->read_io += count;
+diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
+index f92e1f2fc846..5f10052d2671 100644
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -1567,7 +1567,6 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
+ 		cond_resched();
+ 
+ 		BUG_ON(!vma_can_userfault(vma));
+-		WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
+ 
+ 		/*
+ 		 * Nothing to do: this vma is already registered into this
+@@ -1576,6 +1575,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
+ 		if (!vma->vm_userfaultfd_ctx.ctx)
+ 			goto skip;
+ 
++		WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
++
+ 		if (vma->vm_start > start)
+ 			start = vma->vm_start;
+ 		vma_end = min(end, vma->vm_end);
+diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
+index 034acd0c4956..d10f1e7d6ba8 100644
+--- a/include/asm-generic/qspinlock_types.h
++++ b/include/asm-generic/qspinlock_types.h
+@@ -29,13 +29,41 @@
+ #endif
+ 
+ typedef struct qspinlock {
+-	atomic_t	val;
++	union {
++		atomic_t val;
++
++		/*
++		 * By using the whole 2nd least significant byte for the
++		 * pending bit, we can allow better optimization of the lock
++		 * acquisition for the pending bit holder.
++		 */
++#ifdef __LITTLE_ENDIAN
++		struct {
++			u8	locked;
++			u8	pending;
++		};
++		struct {
++			u16	locked_pending;
++			u16	tail;
++		};
++#else
++		struct {
++			u16	tail;
++			u16	locked_pending;
++		};
++		struct {
++			u8	reserved[2];
++			u8	pending;
++			u8	locked;
++		};
++#endif
++	};
+ } arch_spinlock_t;
+ 
+ /*
+  * Initializier
+  */
+-#define	__ARCH_SPIN_LOCK_UNLOCKED	{ ATOMIC_INIT(0) }
++#define	__ARCH_SPIN_LOCK_UNLOCKED	{ { .val = ATOMIC_INIT(0) } }
+ 
+ /*
+  * Bitfields in the atomic value:
+diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
+index 50dc42aeaa56..1011a1b292ac 100644
+--- a/kernel/locking/qspinlock.c
++++ b/kernel/locking/qspinlock.c
+@@ -76,6 +76,18 @@
+ #define MAX_NODES	4
+ #endif
+ 
++/*
++ * The pending bit spinning loop count.
++ * This heuristic is used to limit the number of lockword accesses
++ * made by atomic_cond_read_relaxed when waiting for the lock to
++ * transition out of the "== _Q_PENDING_VAL" state. We don't spin
++ * indefinitely because there's no guarantee that we'll make forward
++ * progress.
++ */
++#ifndef _Q_PENDING_LOOPS
++#define _Q_PENDING_LOOPS	1
++#endif
++
+ /*
+  * Per-CPU queue node structures; we can never have more than 4 nested
+  * contexts: task, softirq, hardirq, nmi.
+@@ -114,41 +126,18 @@ static inline __pure struct mcs_spinlock *decode_tail(u32 tail)
+ 
+ #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
+ 
+-/*
+- * By using the whole 2nd least significant byte for the pending bit, we
+- * can allow better optimization of the lock acquisition for the pending
+- * bit holder.
++#if _Q_PENDING_BITS == 8
++/**
++ * clear_pending - clear the pending bit.
++ * @lock: Pointer to queued spinlock structure
+  *
+- * This internal structure is also used by the set_locked function which
+- * is not restricted to _Q_PENDING_BITS == 8.
++ * *,1,* -> *,0,*
+  */
+-struct __qspinlock {
+-	union {
+-		atomic_t val;
+-#ifdef __LITTLE_ENDIAN
+-		struct {
+-			u8	locked;
+-			u8	pending;
+-		};
+-		struct {
+-			u16	locked_pending;
+-			u16	tail;
+-		};
+-#else
+-		struct {
+-			u16	tail;
+-			u16	locked_pending;
+-		};
+-		struct {
+-			u8	reserved[2];
+-			u8	pending;
+-			u8	locked;
+-		};
+-#endif
+-	};
+-};
++static __always_inline void clear_pending(struct qspinlock *lock)
++{
++	WRITE_ONCE(lock->pending, 0);
++}
+ 
+-#if _Q_PENDING_BITS == 8
+ /**
+  * clear_pending_set_locked - take ownership and clear the pending bit.
+  * @lock: Pointer to queued spinlock structure
+@@ -159,9 +148,7 @@ struct __qspinlock {
+  */
+ static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
+ {
+-	struct __qspinlock *l = (void *)lock;
+-
+-	WRITE_ONCE(l->locked_pending, _Q_LOCKED_VAL);
++	WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL);
+ }
+ 
+ /*
+@@ -170,24 +157,33 @@ static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
+  * @tail : The new queue tail code word
+  * Return: The previous queue tail code word
+  *
+- * xchg(lock, tail)
++ * xchg(lock, tail), which heads an address dependency
+  *
+  * p,*,* -> n,*,* ; prev = xchg(lock, node)
+  */
+ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
+ {
+-	struct __qspinlock *l = (void *)lock;
+-
+ 	/*
+ 	 * Use release semantics to make sure that the MCS node is properly
+ 	 * initialized before changing the tail code.
+ 	 */
+-	return (u32)xchg_release(&l->tail,
++	return (u32)xchg_release(&lock->tail,
+ 				 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
+ }
+ 
+ #else /* _Q_PENDING_BITS == 8 */
+ 
++/**
++ * clear_pending - clear the pending bit.
++ * @lock: Pointer to queued spinlock structure
++ *
++ * *,1,* -> *,0,*
++ */
++static __always_inline void clear_pending(struct qspinlock *lock)
++{
++	atomic_andnot(_Q_PENDING_VAL, &lock->val);
++}
++
+ /**
+  * clear_pending_set_locked - take ownership and clear the pending bit.
+  * @lock: Pointer to queued spinlock structure
+@@ -229,6 +225,20 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
+ }
+ #endif /* _Q_PENDING_BITS == 8 */
+ 
++/**
++ * queued_fetch_set_pending_acquire - fetch the whole lock value and set pending
++ * @lock : Pointer to queued spinlock structure
++ * Return: The previous lock value
++ *
++ * *,*,* -> *,1,*
++ */
++#ifndef queued_fetch_set_pending_acquire
++static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
++{
++	return atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
++}
++#endif
++
+ /**
+  * set_locked - Set the lock bit and own the lock
+  * @lock: Pointer to queued spinlock structure
+@@ -237,9 +247,7 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
+  */
+ static __always_inline void set_locked(struct qspinlock *lock)
+ {
+-	struct __qspinlock *l = (void *)lock;
+-
+-	WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
++	WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
+ }
+ 
+ 
+@@ -294,7 +302,7 @@ static __always_inline u32  __pv_wait_head_or_lock(struct qspinlock *lock,
+ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+ {
+ 	struct mcs_spinlock *prev, *next, *node;
+-	u32 new, old, tail;
++	u32 old, tail;
+ 	int idx;
+ 
+ 	BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
+@@ -306,65 +314,58 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+ 		return;
+ 
+ 	/*
+-	 * wait for in-progress pending->locked hand-overs
++	 * Wait for in-progress pending->locked hand-overs with a bounded
++	 * number of spins so that we guarantee forward progress.
+ 	 *
+ 	 * 0,1,0 -> 0,0,1
+ 	 */
+ 	if (val == _Q_PENDING_VAL) {
+-		while ((val = atomic_read(&lock->val)) == _Q_PENDING_VAL)
+-			cpu_relax();
++		int cnt = _Q_PENDING_LOOPS;
++		val = smp_cond_load_acquire(&lock->val.counter,
++					       (VAL != _Q_PENDING_VAL) || !cnt--);
+ 	}
+ 
++	/*
++	 * If we observe any contention; queue.
++	 */
++	if (val & ~_Q_LOCKED_MASK)
++		goto queue;
++
+ 	/*
+ 	 * trylock || pending
+ 	 *
+ 	 * 0,0,0 -> 0,0,1 ; trylock
+ 	 * 0,0,1 -> 0,1,1 ; pending
+ 	 */
+-	for (;;) {
+-		/*
+-		 * If we observe any contention; queue.
+-		 */
+-		if (val & ~_Q_LOCKED_MASK)
+-			goto queue;
+-
+-		new = _Q_LOCKED_VAL;
+-		if (val == new)
+-			new |= _Q_PENDING_VAL;
+-
+-		/*
+-		 * Acquire semantic is required here as the function may
+-		 * return immediately if the lock was free.
+-		 */
+-		old = atomic_cmpxchg_acquire(&lock->val, val, new);
+-		if (old == val)
+-			break;
+-
+-		val = old;
+-	}
++	val = queued_fetch_set_pending_acquire(lock);
+ 
+ 	/*
+-	 * we won the trylock
++	 * If we observe any contention; undo and queue.
+ 	 */
+-	if (new == _Q_LOCKED_VAL)
+-		return;
++	if (unlikely(val & ~_Q_LOCKED_MASK)) {
++		if (!(val & _Q_PENDING_MASK))
++			clear_pending(lock);
++		goto queue;
++	}
+ 
+ 	/*
+-	 * we're pending, wait for the owner to go away.
++	 * We're pending, wait for the owner to go away.
+ 	 *
+-	 * *,1,1 -> *,1,0
++	 * 0,1,1 -> 0,1,0
+ 	 *
+ 	 * this wait loop must be a load-acquire such that we match the
+ 	 * store-release that clears the locked bit and create lock
+-	 * sequentiality; this is because not all clear_pending_set_locked()
+-	 * implementations imply full barriers.
++	 * sequentiality; this is because not all
++	 * clear_pending_set_locked() implementations imply full
++	 * barriers.
+ 	 */
+-	smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_MASK));
++	if (val & _Q_LOCKED_MASK)
++		smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_MASK));
+ 
+ 	/*
+ 	 * take ownership and clear the pending bit.
+ 	 *
+-	 * *,1,0 -> *,0,1
++	 * 0,1,0 -> 0,0,1
+ 	 */
+ 	clear_pending_set_locked(lock);
+ 	return;
+@@ -416,16 +417,15 @@ queue:
+ 	 */
+ 	if (old & _Q_TAIL_MASK) {
+ 		prev = decode_tail(old);
++
+ 		/*
+-		 * The above xchg_tail() is also a load of @lock which generates,
+-		 * through decode_tail(), a pointer.
+-		 *
+-		 * The address dependency matches the RELEASE of xchg_tail()
+-		 * such that the access to @prev must happen after.
++		 * We must ensure that the stores to @node are observed before
++		 * the write to prev->next. The address dependency from
++		 * xchg_tail is not sufficient to ensure this because the read
++		 * component of xchg_tail is unordered with respect to the
++		 * initialisation of @node.
+ 		 */
+-		smp_read_barrier_depends();
+-
+-		WRITE_ONCE(prev->next, node);
++		smp_store_release(&prev->next, node);
+ 
+ 		pv_wait_node(node, prev);
+ 		arch_mcs_spin_lock_contended(&node->locked);
+@@ -472,30 +472,27 @@ locked:
+ 	 * claim the lock:
+ 	 *
+ 	 * n,0,0 -> 0,0,1 : lock, uncontended
+-	 * *,0,0 -> *,0,1 : lock, contended
++	 * *,*,0 -> *,*,1 : lock, contended
+ 	 *
+-	 * If the queue head is the only one in the queue (lock value == tail),
+-	 * clear the tail code and grab the lock. Otherwise, we only need
+-	 * to grab the lock.
++	 * If the queue head is the only one in the queue (lock value == tail)
++	 * and nobody is pending, clear the tail code and grab the lock.
++	 * Otherwise, we only need to grab the lock.
+ 	 */
+-	for (;;) {
+-		/* In the PV case we might already have _Q_LOCKED_VAL set */
+-		if ((val & _Q_TAIL_MASK) != tail) {
+-			set_locked(lock);
+-			break;
+-		}
++
++	/* In the PV case we might already have _Q_LOCKED_VAL set */
++	if ((val & _Q_TAIL_MASK) == tail) {
+ 		/*
+ 		 * The smp_cond_load_acquire() call above has provided the
+-		 * necessary acquire semantics required for locking. At most
+-		 * two iterations of this loop may be ran.
++		 * necessary acquire semantics required for locking.
+ 		 */
+ 		old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL);
+ 		if (old == val)
+-			goto release;	/* No contention */
+-
+-		val = old;
++			goto release; /* No contention */
+ 	}
+ 
++	/* Either somebody is queued behind us or _Q_PENDING_VAL is set */
++	set_locked(lock);
++
+ 	/*
+ 	 * contended path; wait for next if not observed yet, release.
+ 	 */
+diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
+index 15b6a39366c6..1e882dfc8b79 100644
+--- a/kernel/locking/qspinlock_paravirt.h
++++ b/kernel/locking/qspinlock_paravirt.h
+@@ -70,10 +70,8 @@ struct pv_node {
+ #define queued_spin_trylock(l)	pv_queued_spin_steal_lock(l)
+ static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock)
+ {
+-	struct __qspinlock *l = (void *)lock;
+-
+ 	if (!(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) &&
+-	    (cmpxchg_acquire(&l->locked, 0, _Q_LOCKED_VAL) == 0)) {
++	    (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) {
+ 		qstat_inc(qstat_pv_lock_stealing, true);
+ 		return true;
+ 	}
+@@ -88,16 +86,7 @@ static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock)
+ #if _Q_PENDING_BITS == 8
+ static __always_inline void set_pending(struct qspinlock *lock)
+ {
+-	struct __qspinlock *l = (void *)lock;
+-
+-	WRITE_ONCE(l->pending, 1);
+-}
+-
+-static __always_inline void clear_pending(struct qspinlock *lock)
+-{
+-	struct __qspinlock *l = (void *)lock;
+-
+-	WRITE_ONCE(l->pending, 0);
++	WRITE_ONCE(lock->pending, 1);
+ }
+ 
+ /*
+@@ -107,10 +96,8 @@ static __always_inline void clear_pending(struct qspinlock *lock)
+  */
+ static __always_inline int trylock_clear_pending(struct qspinlock *lock)
+ {
+-	struct __qspinlock *l = (void *)lock;
+-
+-	return !READ_ONCE(l->locked) &&
+-	       (cmpxchg_acquire(&l->locked_pending, _Q_PENDING_VAL,
++	return !READ_ONCE(lock->locked) &&
++	       (cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL,
+ 				_Q_LOCKED_VAL) == _Q_PENDING_VAL);
+ }
+ #else /* _Q_PENDING_BITS == 8 */
+@@ -119,11 +106,6 @@ static __always_inline void set_pending(struct qspinlock *lock)
+ 	atomic_or(_Q_PENDING_VAL, &lock->val);
+ }
+ 
+-static __always_inline void clear_pending(struct qspinlock *lock)
+-{
+-	atomic_andnot(_Q_PENDING_VAL, &lock->val);
+-}
+-
+ static __always_inline int trylock_clear_pending(struct qspinlock *lock)
+ {
+ 	int val = atomic_read(&lock->val);
+@@ -355,7 +337,6 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
+ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
+ {
+ 	struct pv_node *pn = (struct pv_node *)node;
+-	struct __qspinlock *l = (void *)lock;
+ 
+ 	/*
+ 	 * If the vCPU is indeed halted, advance its state to match that of
+@@ -384,7 +365,7 @@ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
+ 	 * the hash table later on at unlock time, no atomic instruction is
+ 	 * needed.
+ 	 */
+-	WRITE_ONCE(l->locked, _Q_SLOW_VAL);
++	WRITE_ONCE(lock->locked, _Q_SLOW_VAL);
+ 	(void)pv_hash(lock, pn);
+ }
+ 
+@@ -399,7 +380,6 @@ static u32
+ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
+ {
+ 	struct pv_node *pn = (struct pv_node *)node;
+-	struct __qspinlock *l = (void *)lock;
+ 	struct qspinlock **lp = NULL;
+ 	int waitcnt = 0;
+ 	int loop;
+@@ -450,13 +430,13 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
+ 			 *
+ 			 * Matches the smp_rmb() in __pv_queued_spin_unlock().
+ 			 */
+-			if (xchg(&l->locked, _Q_SLOW_VAL) == 0) {
++			if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) {
+ 				/*
+ 				 * The lock was free and now we own the lock.
+ 				 * Change the lock value back to _Q_LOCKED_VAL
+ 				 * and unhash the table.
+ 				 */
+-				WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
++				WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
+ 				WRITE_ONCE(*lp, NULL);
+ 				goto gotlock;
+ 			}
+@@ -464,7 +444,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
+ 		WRITE_ONCE(pn->state, vcpu_hashed);
+ 		qstat_inc(qstat_pv_wait_head, true);
+ 		qstat_inc(qstat_pv_wait_again, waitcnt);
+-		pv_wait(&l->locked, _Q_SLOW_VAL);
++		pv_wait(&lock->locked, _Q_SLOW_VAL);
+ 
+ 		/*
+ 		 * Because of lock stealing, the queue head vCPU may not be
+@@ -489,7 +469,6 @@ gotlock:
+ __visible void
+ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
+ {
+-	struct __qspinlock *l = (void *)lock;
+ 	struct pv_node *node;
+ 
+ 	if (unlikely(locked != _Q_SLOW_VAL)) {
+@@ -518,7 +497,7 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
+ 	 * Now that we have a reference to the (likely) blocked pv_node,
+ 	 * release the lock.
+ 	 */
+-	smp_store_release(&l->locked, 0);
++	smp_store_release(&lock->locked, 0);
+ 
+ 	/*
+ 	 * At this point the memory pointed at by lock can be freed/reused,
+@@ -544,7 +523,6 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
+ #ifndef __pv_queued_spin_unlock
+ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
+ {
+-	struct __qspinlock *l = (void *)lock;
+ 	u8 locked;
+ 
+ 	/*
+@@ -552,7 +530,7 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
+ 	 * unhash. Otherwise it would be possible to have multiple @lock
+ 	 * entries, which would be BAD.
+ 	 */
+-	locked = cmpxchg_release(&l->locked, _Q_LOCKED_VAL, 0);
++	locked = cmpxchg_release(&lock->locked, _Q_LOCKED_VAL, 0);
+ 	if (likely(locked == _Q_LOCKED_VAL))
+ 		return;
+ 
+diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
+index 0e7f5428a148..0ed768b56c60 100644
+--- a/kernel/time/timer_list.c
++++ b/kernel/time/timer_list.c
+@@ -389,7 +389,7 @@ static int __init init_timer_list_procfs(void)
+ {
+ 	struct proc_dir_entry *pe;
+ 
+-	pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
++	pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
+ 	if (!pe)
+ 		return -ENOMEM;
+ 	return 0;
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 7379bcf3baa0..9937d7cf2a64 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -5534,6 +5534,7 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops)
+ 	if (ops->flags & FTRACE_OPS_FL_ENABLED)
+ 		ftrace_shutdown(ops, 0);
+ 	ops->flags |= FTRACE_OPS_FL_DELETED;
++	ftrace_free_filter(ops);
+ 	mutex_unlock(&ftrace_lock);
+ }
+ 
+diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
+index 43254c5e7e16..e2da180ca172 100644
+--- a/kernel/trace/trace_events_trigger.c
++++ b/kernel/trace/trace_events_trigger.c
+@@ -744,8 +744,10 @@ int set_trigger_filter(char *filter_str,
+ 
+ 	/* The filter is for the 'trigger' event, not the triggered event */
+ 	ret = create_event_filter(file->event_call, filter_str, false, &filter);
+-	if (ret)
+-		goto out;
++	/*
++	 * If create_event_filter() fails, filter still needs to be freed.
++	 * Which the calling code will do with data->filter.
++	 */
+  assign:
+ 	tmp = rcu_access_pointer(data->filter);
+ 
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 328ac10084e4..4c59b5507e7a 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -1861,7 +1861,8 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
+ 		params[ac].acm = acm;
+ 		params[ac].uapsd = uapsd;
+ 
+-		if (params[ac].cw_min > params[ac].cw_max) {
++		if (params[ac].cw_min == 0 ||
++		    params[ac].cw_min > params[ac].cw_max) {
+ 			sdata_info(sdata,
+ 				   "AP has invalid WMM params (CWmin/max=%d/%d for ACI %d), using defaults\n",
+ 				   params[ac].cw_min, params[ac].cw_max, aci);
+diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
+index a2f19b9906e9..543518384aa7 100644
+--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
++++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
+@@ -168,7 +168,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 	struct hash_ipportnet4_elem e = { .cidr = HOST_MASK - 1 };
+ 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+ 	u32 ip = 0, ip_to = 0, p = 0, port, port_to;
+-	u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
++	u32 ip2_from = 0, ip2_to = 0, ip2;
+ 	bool with_ports = false;
+ 	u8 cidr;
+ 	int ret;
+@@ -269,22 +269,21 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 		ip_set_mask_from_to(ip2_from, ip2_to, e.cidr + 1);
+ 	}
+ 
+-	if (retried)
++	if (retried) {
+ 		ip = ntohl(h->next.ip);
++		p = ntohs(h->next.port);
++		ip2 = ntohl(h->next.ip2);
++	} else {
++		p = port;
++		ip2 = ip2_from;
++	}
+ 	for (; ip <= ip_to; ip++) {
+ 		e.ip = htonl(ip);
+-		p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
+-						       : port;
+ 		for (; p <= port_to; p++) {
+ 			e.port = htons(p);
+-			ip2 = retried &&
+-			      ip == ntohl(h->next.ip) &&
+-			      p == ntohs(h->next.port)
+-				? ntohl(h->next.ip2) : ip2_from;
+-			while (ip2 <= ip2_to) {
++			do {
+ 				e.ip2 = htonl(ip2);
+-				ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
+-								&cidr);
++				ip2 = ip_set_range_to_cidr(ip2, ip2_to, &cidr);
+ 				e.cidr = cidr - 1;
+ 				ret = adtfn(set, &e, &ext, &ext, flags);
+ 
+@@ -292,9 +291,10 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 					return ret;
+ 
+ 				ret = 0;
+-				ip2 = ip2_last + 1;
+-			}
++			} while (ip2++ < ip2_to);
++			ip2 = ip2_from;
+ 		}
++		p = port;
+ 	}
+ 	return ret;
+ }
+diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
+index 1c67a1761e45..5449e23af13a 100644
+--- a/net/netfilter/ipset/ip_set_hash_net.c
++++ b/net/netfilter/ipset/ip_set_hash_net.c
+@@ -143,7 +143,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 	ipset_adtfn adtfn = set->variant->adt[adt];
+ 	struct hash_net4_elem e = { .cidr = HOST_MASK };
+ 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+-	u32 ip = 0, ip_to = 0, last;
++	u32 ip = 0, ip_to = 0;
+ 	int ret;
+ 
+ 	if (tb[IPSET_ATTR_LINENO])
+@@ -193,16 +193,15 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 	}
+ 	if (retried)
+ 		ip = ntohl(h->next.ip);
+-	while (ip <= ip_to) {
++	do {
+ 		e.ip = htonl(ip);
+-		last = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
++		ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
+ 		ret = adtfn(set, &e, &ext, &ext, flags);
+ 		if (ret && !ip_set_eexist(ret, flags))
+ 			return ret;
+ 
+ 		ret = 0;
+-		ip = last + 1;
+-	}
++	} while (ip++ < ip_to);
+ 	return ret;
+ }
+ 
+diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
+index d417074f1c1a..f5164c1efce2 100644
+--- a/net/netfilter/ipset/ip_set_hash_netiface.c
++++ b/net/netfilter/ipset/ip_set_hash_netiface.c
+@@ -200,7 +200,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 	ipset_adtfn adtfn = set->variant->adt[adt];
+ 	struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 };
+ 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+-	u32 ip = 0, ip_to = 0, last;
++	u32 ip = 0, ip_to = 0;
+ 	int ret;
+ 
+ 	if (tb[IPSET_ATTR_LINENO])
+@@ -255,17 +255,16 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 
+ 	if (retried)
+ 		ip = ntohl(h->next.ip);
+-	while (ip <= ip_to) {
++	do {
+ 		e.ip = htonl(ip);
+-		last = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
++		ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
+ 		ret = adtfn(set, &e, &ext, &ext, flags);
+ 
+ 		if (ret && !ip_set_eexist(ret, flags))
+ 			return ret;
+ 
+ 		ret = 0;
+-		ip = last + 1;
+-	}
++	} while (ip++ < ip_to);
+ 	return ret;
+ }
+ 
+diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
+index 7f9ae2e9645b..5a2b923bd81f 100644
+--- a/net/netfilter/ipset/ip_set_hash_netnet.c
++++ b/net/netfilter/ipset/ip_set_hash_netnet.c
+@@ -169,8 +169,8 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 	ipset_adtfn adtfn = set->variant->adt[adt];
+ 	struct hash_netnet4_elem e = { };
+ 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+-	u32 ip = 0, ip_to = 0, last;
+-	u32 ip2 = 0, ip2_from = 0, ip2_to = 0, last2;
++	u32 ip = 0, ip_to = 0;
++	u32 ip2 = 0, ip2_from = 0, ip2_to = 0;
+ 	int ret;
+ 
+ 	if (tb[IPSET_ATTR_LINENO])
+@@ -247,27 +247,27 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 		ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
+ 	}
+ 
+-	if (retried)
++	if (retried) {
+ 		ip = ntohl(h->next.ip[0]);
++		ip2 = ntohl(h->next.ip[1]);
++	} else {
++		ip2 = ip2_from;
++	}
+ 
+-	while (ip <= ip_to) {
++	do {
+ 		e.ip[0] = htonl(ip);
+-		last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
+-		ip2 = (retried &&
+-		       ip == ntohl(h->next.ip[0])) ? ntohl(h->next.ip[1])
+-						   : ip2_from;
+-		while (ip2 <= ip2_to) {
++		ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
++		do {
+ 			e.ip[1] = htonl(ip2);
+-			last2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]);
++			ip2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]);
+ 			ret = adtfn(set, &e, &ext, &ext, flags);
+ 			if (ret && !ip_set_eexist(ret, flags))
+ 				return ret;
+ 
+ 			ret = 0;
+-			ip2 = last2 + 1;
+-		}
+-		ip = last + 1;
+-	}
++		} while (ip2++ < ip2_to);
++		ip2 = ip2_from;
++	} while (ip++ < ip_to);
+ 	return ret;
+ }
+ 
+diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
+index e6ef382febe4..1a187be9ebc8 100644
+--- a/net/netfilter/ipset/ip_set_hash_netport.c
++++ b/net/netfilter/ipset/ip_set_hash_netport.c
+@@ -161,7 +161,7 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 	ipset_adtfn adtfn = set->variant->adt[adt];
+ 	struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 };
+ 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+-	u32 port, port_to, p = 0, ip = 0, ip_to = 0, last;
++	u32 port, port_to, p = 0, ip = 0, ip_to = 0;
+ 	bool with_ports = false;
+ 	u8 cidr;
+ 	int ret;
+@@ -239,25 +239,26 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 		ip_set_mask_from_to(ip, ip_to, e.cidr + 1);
+ 	}
+ 
+-	if (retried)
++	if (retried) {
+ 		ip = ntohl(h->next.ip);
+-	while (ip <= ip_to) {
++		p = ntohs(h->next.port);
++	} else {
++		p = port;
++	}
++	do {
+ 		e.ip = htonl(ip);
+-		last = ip_set_range_to_cidr(ip, ip_to, &cidr);
++		ip = ip_set_range_to_cidr(ip, ip_to, &cidr);
+ 		e.cidr = cidr - 1;
+-		p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
+-						       : port;
+ 		for (; p <= port_to; p++) {
+ 			e.port = htons(p);
+ 			ret = adtfn(set, &e, &ext, &ext, flags);
+-
+ 			if (ret && !ip_set_eexist(ret, flags))
+ 				return ret;
+ 
+ 			ret = 0;
+ 		}
+-		ip = last + 1;
+-	}
++		p = port;
++	} while (ip++ < ip_to);
+ 	return ret;
+ }
+ 
+diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
+index 0e6e40c6f652..613e18e720a4 100644
+--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
++++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
+@@ -184,8 +184,8 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 	ipset_adtfn adtfn = set->variant->adt[adt];
+ 	struct hash_netportnet4_elem e = { };
+ 	struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+-	u32 ip = 0, ip_to = 0, ip_last, p = 0, port, port_to;
+-	u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
++	u32 ip = 0, ip_to = 0, p = 0, port, port_to;
++	u32 ip2_from = 0, ip2_to = 0, ip2;
+ 	bool with_ports = false;
+ 	int ret;
+ 
+@@ -288,33 +288,34 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ 		ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
+ 	}
+ 
+-	if (retried)
++	if (retried) {
+ 		ip = ntohl(h->next.ip[0]);
++		p = ntohs(h->next.port);
++		ip2 = ntohl(h->next.ip[1]);
++	} else {
++		p = port;
++		ip2 = ip2_from;
++	}
+ 
+-	while (ip <= ip_to) {
++	do {
+ 		e.ip[0] = htonl(ip);
+-		ip_last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
+-		p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port)
+-							  : port;
++		ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
+ 		for (; p <= port_to; p++) {
+ 			e.port = htons(p);
+-			ip2 = (retried && ip == ntohl(h->next.ip[0]) &&
+-			       p == ntohs(h->next.port)) ? ntohl(h->next.ip[1])
+-							 : ip2_from;
+-			while (ip2 <= ip2_to) {
++			do {
+ 				e.ip[1] = htonl(ip2);
+-				ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
+-								&e.cidr[1]);
++				ip2 = ip_set_range_to_cidr(ip2, ip2_to,
++							   &e.cidr[1]);
+ 				ret = adtfn(set, &e, &ext, &ext, flags);
+ 				if (ret && !ip_set_eexist(ret, flags))
+ 					return ret;
+ 
+ 				ret = 0;
+-				ip2 = ip2_last + 1;
+-			}
++			} while (ip2++ < ip2_to);
++			ip2 = ip2_from;
+ 		}
+-		ip = ip_last + 1;
+-	}
++		p = port;
++	} while (ip++ < ip_to);
+ 	return ret;
+ }
+ 
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index 8eb0c4f3b3e9..d0282cc88b14 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -780,8 +780,15 @@ void xprt_connect(struct rpc_task *task)
+ 			return;
+ 		if (xprt_test_and_set_connecting(xprt))
+ 			return;
+-		xprt->stat.connect_start = jiffies;
+-		xprt->ops->connect(xprt, task);
++		/* Race breaker */
++		if (!xprt_connected(xprt)) {
++			xprt->stat.connect_start = jiffies;
++			xprt->ops->connect(xprt, task);
++		} else {
++			xprt_clear_connecting(xprt);
++			task->tk_status = 0;
++			rpc_wake_up_queued_task(&xprt->pending, task);
++		}
+ 	}
+ 	xprt_release_write(xprt, task);
+ }
+diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
+index 041dbbb30ff0..a0591d06c61b 100644
+--- a/tools/testing/selftests/bpf/test_verifier.c
++++ b/tools/testing/selftests/bpf/test_verifier.c
+@@ -8070,7 +8070,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
+ 
+ 	reject_from_alignment = fd_prog < 0 &&
+ 				(test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
+-				strstr(bpf_vlog, "Unknown alignment.");
++				strstr(bpf_vlog, "misaligned");
+ #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ 	if (reject_from_alignment) {
+ 		printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",


             reply	other threads:[~2018-12-21 14:46 UTC|newest]

Thread overview: 448+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-12-21 14:46 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2023-08-30 15:01 [gentoo-commits] proj/linux-patches:4.14 commit in: / Mike Pagano
2023-08-16 16:58 Mike Pagano
2023-08-11 11:58 Mike Pagano
2023-08-08 18:44 Mike Pagano
2023-06-28 10:30 Mike Pagano
2023-06-21 14:56 Alice Ferrazzi
2023-06-14 10:22 Mike Pagano
2023-06-09 11:33 Mike Pagano
2023-05-30 12:58 Mike Pagano
2023-05-17 11:01 Mike Pagano
2023-04-26  9:35 Alice Ferrazzi
2023-04-20 11:18 Alice Ferrazzi
2023-04-05 10:02 Alice Ferrazzi
2023-03-22 14:16 Alice Ferrazzi
2023-03-17 10:47 Mike Pagano
2023-03-13 11:36 Alice Ferrazzi
2023-03-11 16:02 Mike Pagano
2023-02-25 11:40 Mike Pagano
2023-02-24  3:13 Alice Ferrazzi
2023-02-22 14:48 Alice Ferrazzi
2023-02-22 14:46 Alice Ferrazzi
2023-02-06 12:50 Mike Pagano
2023-01-24  7:18 Alice Ferrazzi
2023-01-18 11:11 Mike Pagano
2022-12-14 12:24 Mike Pagano
2022-12-08 12:39 Alice Ferrazzi
2022-11-25 17:03 Mike Pagano
2022-11-10 15:14 Mike Pagano
2022-11-03 15:09 Mike Pagano
2022-11-01 19:49 Mike Pagano
2022-10-26 11:42 Mike Pagano
2022-09-28  9:18 Mike Pagano
2022-09-20 12:04 Mike Pagano
2022-09-15 11:10 Mike Pagano
2022-09-05 12:06 Mike Pagano
2022-08-25 10:36 Mike Pagano
2022-07-29 15:27 Mike Pagano
2022-07-21 20:13 Mike Pagano
2022-07-12 16:02 Mike Pagano
2022-07-07 16:19 Mike Pagano
2022-07-02 16:06 Mike Pagano
2022-06-25 10:23 Mike Pagano
2022-06-16 11:41 Mike Pagano
2022-06-14 15:48 Mike Pagano
2022-06-06 11:06 Mike Pagano
2022-05-27 12:28 Mike Pagano
2022-05-25 11:56 Mike Pagano
2022-05-18  9:51 Mike Pagano
2022-05-15 22:13 Mike Pagano
2022-05-12 11:31 Mike Pagano
2022-04-27 11:38 Mike Pagano
2022-04-20 12:10 Mike Pagano
2022-04-02 16:32 Mike Pagano
2022-03-28 11:00 Mike Pagano
2022-03-23 11:58 Mike Pagano
2022-03-16 13:21 Mike Pagano
2022-03-11 10:57 Mike Pagano
2022-03-08 18:28 Mike Pagano
2022-03-02 13:08 Mike Pagano
2022-02-26 23:30 Mike Pagano
2022-02-23 12:40 Mike Pagano
2022-02-16 12:49 Mike Pagano
2022-02-11 12:48 Mike Pagano
2022-02-08 17:57 Mike Pagano
2022-01-29 17:45 Mike Pagano
2022-01-27 11:40 Mike Pagano
2022-01-11 13:16 Mike Pagano
2022-01-05 12:56 Mike Pagano
2021-12-29 13:12 Mike Pagano
2021-12-22 14:07 Mike Pagano
2021-12-14 10:36 Mike Pagano
2021-12-08 12:56 Mike Pagano
2021-11-26 12:00 Mike Pagano
2021-11-12 13:47 Mike Pagano
2021-11-02 19:36 Mike Pagano
2021-10-27 11:59 Mike Pagano
2021-10-20 13:33 Mike Pagano
2021-10-17 13:13 Mike Pagano
2021-10-09 21:34 Mike Pagano
2021-10-06 14:04 Mike Pagano
2021-09-26 14:14 Mike Pagano
2021-09-22 11:41 Mike Pagano
2021-09-20 22:05 Mike Pagano
2021-09-03 11:24 Mike Pagano
2021-08-26 14:05 Mike Pagano
2021-08-25 23:04 Mike Pagano
2021-08-15 20:08 Mike Pagano
2021-08-08 13:40 Mike Pagano
2021-08-04 11:55 Mike Pagano
2021-08-03 12:45 Mike Pagano
2021-07-28 12:38 Mike Pagano
2021-07-20 15:32 Alice Ferrazzi
2021-07-11 14:46 Mike Pagano
2021-06-30 14:26 Mike Pagano
2021-06-16 12:21 Mike Pagano
2021-06-10 11:16 Mike Pagano
2021-06-03 10:35 Alice Ferrazzi
2021-05-26 12:04 Mike Pagano
2021-05-22 10:03 Mike Pagano
2021-04-28 18:22 Mike Pagano
2021-04-28 11:31 Alice Ferrazzi
2021-04-16 11:17 Alice Ferrazzi
2021-04-10 13:23 Mike Pagano
2021-04-07 12:17 Mike Pagano
2021-03-30 14:15 Mike Pagano
2021-03-24 12:07 Mike Pagano
2021-03-17 16:18 Mike Pagano
2021-03-11 14:04 Mike Pagano
2021-03-07 15:14 Mike Pagano
2021-03-03 18:15 Alice Ferrazzi
2021-02-23 13:51 Alice Ferrazzi
2021-02-10 10:07 Alice Ferrazzi
2021-02-07 14:17 Alice Ferrazzi
2021-02-03 23:38 Mike Pagano
2021-01-30 12:58 Alice Ferrazzi
2021-01-23 16:35 Mike Pagano
2021-01-21 11:25 Alice Ferrazzi
2021-01-17 16:21 Mike Pagano
2021-01-12 20:07 Mike Pagano
2021-01-09 12:56 Mike Pagano
2020-12-29 14:20 Mike Pagano
2020-12-11 12:55 Mike Pagano
2020-12-08 12:05 Mike Pagano
2020-12-02 12:48 Mike Pagano
2020-11-24 13:44 Mike Pagano
2020-11-22 19:17 Mike Pagano
2020-11-18 19:24 Mike Pagano
2020-11-11 15:36 Mike Pagano
2020-11-10 13:55 Mike Pagano
2020-11-05 12:34 Mike Pagano
2020-10-29 11:17 Mike Pagano
2020-10-17 10:17 Mike Pagano
2020-10-14 20:35 Mike Pagano
2020-10-01 12:42 Mike Pagano
2020-10-01 12:34 Mike Pagano
2020-09-24 16:00 Mike Pagano
2020-09-23 12:05 Mike Pagano
2020-09-23 12:03 Mike Pagano
2020-09-12 17:50 Mike Pagano
2020-09-09 17:58 Mike Pagano
2020-09-03 11:36 Mike Pagano
2020-08-26 11:14 Mike Pagano
2020-08-21 10:51 Alice Ferrazzi
2020-08-07 19:14 Mike Pagano
2020-08-05 14:57 Thomas Deutschmann
2020-07-31 17:56 Mike Pagano
2020-07-29 12:30 Mike Pagano
2020-07-22 13:47 Mike Pagano
2020-07-09 12:10 Mike Pagano
2020-07-01 12:13 Mike Pagano
2020-06-29 17:43 Mike Pagano
2020-06-25 15:08 Mike Pagano
2020-06-22 14:45 Mike Pagano
2020-06-11 11:32 Mike Pagano
2020-06-03 11:39 Mike Pagano
2020-05-27 15:25 Mike Pagano
2020-05-20 11:26 Mike Pagano
2020-05-13 12:46 Mike Pagano
2020-05-11 22:51 Mike Pagano
2020-05-05 17:41 Mike Pagano
2020-05-02 19:23 Mike Pagano
2020-04-24 12:02 Mike Pagano
2020-04-15 17:38 Mike Pagano
2020-04-13 11:16 Mike Pagano
2020-04-02 15:23 Mike Pagano
2020-03-20 11:56 Mike Pagano
2020-03-11 18:19 Mike Pagano
2020-02-28 16:34 Mike Pagano
2020-02-14 23:46 Mike Pagano
2020-02-05 17:22 Mike Pagano
2020-02-05 14:49 Mike Pagano
2020-01-29 16:14 Mike Pagano
2020-01-27 14:24 Mike Pagano
2020-01-23 11:05 Mike Pagano
2020-01-17 19:53 Mike Pagano
2020-01-14 22:28 Mike Pagano
2020-01-12 14:53 Mike Pagano
2020-01-09 11:14 Mike Pagano
2020-01-04 16:49 Mike Pagano
2019-12-31 13:56 Mike Pagano
2019-12-21 15:00 Mike Pagano
2019-12-17 21:55 Mike Pagano
2019-12-05 15:20 Alice Ferrazzi
2019-12-01 14:08 Thomas Deutschmann
2019-11-24 15:42 Mike Pagano
2019-11-20 18:18 Mike Pagano
2019-11-12 20:59 Mike Pagano
2019-11-10 16:19 Mike Pagano
2019-11-06 14:25 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 11:33 Mike Pagano
2019-10-17 22:26 Mike Pagano
2019-10-11 17:02 Mike Pagano
2019-10-07 17:39 Mike Pagano
2019-10-05 11:40 Mike Pagano
2019-09-21 16:30 Mike Pagano
2019-09-19 23:28 Mike Pagano
2019-09-19 10:03 Mike Pagano
2019-09-16 12:23 Mike Pagano
2019-09-10 11:11 Mike Pagano
2019-09-06 17:19 Mike Pagano
2019-08-29 14:13 Mike Pagano
2019-08-25 17:36 Mike Pagano
2019-08-23 22:15 Mike Pagano
2019-08-16 12:14 Mike Pagano
2019-08-09 17:34 Mike Pagano
2019-08-06 19:16 Mike Pagano
2019-08-04 16:06 Mike Pagano
2019-07-31 10:23 Mike Pagano
2019-07-21 14:40 Mike Pagano
2019-07-10 11:04 Mike Pagano
2019-07-03 13:02 Mike Pagano
2019-06-27 11:09 Mike Pagano
2019-06-25 10:52 Mike Pagano
2019-06-22 19:09 Mike Pagano
2019-06-19 17:19 Thomas Deutschmann
2019-06-17 19:20 Mike Pagano
2019-06-15 15:05 Mike Pagano
2019-06-11 17:51 Mike Pagano
2019-06-11 12:40 Mike Pagano
2019-06-09 16:17 Mike Pagano
2019-05-31 16:41 Mike Pagano
2019-05-26 17:11 Mike Pagano
2019-05-21 17:17 Mike Pagano
2019-05-16 23:02 Mike Pagano
2019-05-14 20:55 Mike Pagano
2019-05-10 19:39 Mike Pagano
2019-05-08 10:05 Mike Pagano
2019-05-04 18:34 Mike Pagano
2019-05-04 18:27 Mike Pagano
2019-05-02 10:14 Mike Pagano
2019-04-27 17:35 Mike Pagano
2019-04-24 22:58 Mike Pagano
2019-04-20 11:08 Mike Pagano
2019-04-19 19:53 Mike Pagano
2019-04-05 21:45 Mike Pagano
2019-04-03 10:58 Mike Pagano
2019-03-27 10:21 Mike Pagano
2019-03-23 14:30 Mike Pagano
2019-03-23 14:19 Mike Pagano
2019-03-19 16:57 Mike Pagano
2019-03-13 22:07 Mike Pagano
2019-03-06 19:09 Mike Pagano
2019-03-05 18:03 Mike Pagano
2019-02-27 11:22 Mike Pagano
2019-02-23 14:43 Mike Pagano
2019-02-20 11:17 Mike Pagano
2019-02-16  0:44 Mike Pagano
2019-02-15 12:51 Mike Pagano
2019-02-12 20:52 Mike Pagano
2019-02-06 17:06 Mike Pagano
2019-01-31 11:24 Mike Pagano
2019-01-26 15:06 Mike Pagano
2019-01-23 11:30 Mike Pagano
2019-01-16 23:30 Mike Pagano
2019-01-13 19:27 Mike Pagano
2019-01-09 17:53 Mike Pagano
2018-12-29 22:47 Mike Pagano
2018-12-29 18:54 Mike Pagano
2018-12-17 11:40 Mike Pagano
2018-12-13 11:38 Mike Pagano
2018-12-08 13:22 Mike Pagano
2018-12-05 19:42 Mike Pagano
2018-12-01 17:26 Mike Pagano
2018-12-01 15:06 Mike Pagano
2018-11-27 16:17 Mike Pagano
2018-11-23 12:44 Mike Pagano
2018-11-21 12:27 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:33 Mike Pagano
2018-11-13 21:19 Mike Pagano
2018-11-11  1:19 Mike Pagano
2018-11-10 21:31 Mike Pagano
2018-11-04 17:31 Alice Ferrazzi
2018-10-20 12:41 Mike Pagano
2018-10-18 10:26 Mike Pagano
2018-10-13 16:33 Mike Pagano
2018-10-10 11:18 Mike Pagano
2018-10-04 10:42 Mike Pagano
2018-09-29 13:35 Mike Pagano
2018-09-26 10:41 Mike Pagano
2018-09-19 22:40 Mike Pagano
2018-09-15 10:12 Mike Pagano
2018-09-09 23:28 Mike Pagano
2018-09-05 15:28 Mike Pagano
2018-08-24 11:44 Mike Pagano
2018-08-22 10:01 Alice Ferrazzi
2018-08-18 18:12 Mike Pagano
2018-08-17 19:37 Mike Pagano
2018-08-17 19:26 Mike Pagano
2018-08-16 11:49 Mike Pagano
2018-08-15 16:48 Mike Pagano
2018-08-09 10:54 Mike Pagano
2018-08-07 18:11 Mike Pagano
2018-08-03 12:27 Mike Pagano
2018-07-28 10:39 Mike Pagano
2018-07-25 10:27 Mike Pagano
2018-07-22 15:13 Mike Pagano
2018-07-17 10:27 Mike Pagano
2018-07-12 16:13 Alice Ferrazzi
2018-07-09 15:07 Alice Ferrazzi
2018-07-03 13:18 Mike Pagano
2018-06-26 16:32 Alice Ferrazzi
2018-06-20 19:42 Mike Pagano
2018-06-16 15:43 Mike Pagano
2018-06-11 21:46 Mike Pagano
2018-06-08 23:48 Mike Pagano
2018-06-05 11:22 Mike Pagano
2018-05-30 22:33 Mike Pagano
2018-05-30 11:42 Mike Pagano
2018-05-25 15:36 Mike Pagano
2018-05-22 18:45 Mike Pagano
2018-05-20 22:21 Mike Pagano
2018-05-16 10:24 Mike Pagano
2018-05-09 10:55 Mike Pagano
2018-05-02 16:14 Mike Pagano
2018-04-30 10:30 Mike Pagano
2018-04-26 10:21 Mike Pagano
2018-04-24 11:27 Mike Pagano
2018-04-19 10:43 Mike Pagano
2018-04-12 15:10 Mike Pagano
2018-04-08 14:27 Mike Pagano
2018-03-31 22:18 Mike Pagano
2018-03-28 17:01 Mike Pagano
2018-03-25 13:38 Mike Pagano
2018-03-21 14:41 Mike Pagano
2018-03-19 12:01 Mike Pagano
2018-03-15 10:28 Mike Pagano
2018-03-11 17:38 Mike Pagano
2018-03-09 16:34 Alice Ferrazzi
2018-03-05  2:24 Alice Ferrazzi
2018-02-28 18:28 Alice Ferrazzi
2018-02-28 15:00 Alice Ferrazzi
2018-02-25 13:40 Alice Ferrazzi
2018-02-22 23:23 Mike Pagano
2018-02-17 14:28 Alice Ferrazzi
2018-02-17 14:27 Alice Ferrazzi
2018-02-13 13:19 Alice Ferrazzi
2018-02-08  0:41 Mike Pagano
2018-02-03 21:21 Mike Pagano
2018-01-31 13:50 Alice Ferrazzi
2018-01-23 21:20 Mike Pagano
2018-01-23 21:18 Mike Pagano
2018-01-17  9:39 Alice Ferrazzi
2018-01-17  9:14 Alice Ferrazzi
2018-01-10 11:52 Mike Pagano
2018-01-10 11:43 Mike Pagano
2018-01-05 15:41 Alice Ferrazzi
2018-01-05 15:41 Alice Ferrazzi
2018-01-05 15:02 Alice Ferrazzi
2018-01-04 15:18 Alice Ferrazzi
2018-01-04  7:40 Alice Ferrazzi
2018-01-04  7:32 Alice Ferrazzi
2018-01-04  0:23 Alice Ferrazzi
2018-01-02 20:19 Mike Pagano
2018-01-02 20:14 Mike Pagano
2017-12-30 12:20 Alice Ferrazzi
2017-12-29 17:54 Alice Ferrazzi
2017-12-29 17:18 Alice Ferrazzi
2017-12-25 14:34 Alice Ferrazzi
2017-12-20 17:51 Alice Ferrazzi
2017-12-20 12:43 Mike Pagano
2017-12-17 14:33 Alice Ferrazzi
2017-12-14  9:11 Alice Ferrazzi
2017-12-10 13:02 Alice Ferrazzi
2017-12-09 14:07 Alice Ferrazzi
2017-12-05 11:37 Mike Pagano
2017-11-30 12:15 Alice Ferrazzi
2017-11-24  9:18 Alice Ferrazzi
2017-11-24  9:15 Alice Ferrazzi
2017-11-21 11:34 Mike Pagano
2017-11-21 11:24 Mike Pagano
2017-11-16 19:08 Mike Pagano
2017-10-23 16:31 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1545403562.1e235dfa67bf6924c75d560b809b40f93a3460c5.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox