From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Sat, 22 Jun 2019 19:06:48 +0000 (UTC) [thread overview]
Message-ID: <1561230388.62a38b9ab69d2607601ef3a8fcdfaec29c1a5053.mpagano@gentoo> (raw)
commit: 62a38b9ab69d2607601ef3a8fcdfaec29c1a5053
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Jun 22 19:06:28 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Jun 22 19:06:28 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=62a38b9a
Linux patches 4.19.54 and 4.19.55
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 8 +
1053_linux-4.19.54.patch | 1700 ++++++++++++++++++++++++++++++++++++++++++++++
1054_linux-4.19.55.patch | 27 +
3 files changed, 1735 insertions(+)
diff --git a/0000_README b/0000_README
index 76bab6e..545a5dd 100644
--- a/0000_README
+++ b/0000_README
@@ -255,6 +255,14 @@ Patch: 1052_linux-4.19.53.patch
From: https://www.kernel.org
Desc: Linux 4.19.53
+Patch: 1053_linux-4.19.54.patch
+From: https://www.kernel.org
+Desc: Linux 4.19.54
+
+Patch: 1054_linux-4.19.55.patch
+From: https://www.kernel.org
+Desc: Linux 4.19.55
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1053_linux-4.19.54.patch b/1053_linux-4.19.54.patch
new file mode 100644
index 0000000..e75cc51
--- /dev/null
+++ b/1053_linux-4.19.54.patch
@@ -0,0 +1,1700 @@
+diff --git a/Makefile b/Makefile
+index bedcb121dc3d..b234837e4d07 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 53
++SUBLEVEL = 54
+ EXTRAVERSION =
+ NAME = "People's Front"
+
+diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
+index ad8be16a39c9..58102652bf9e 100644
+--- a/arch/arm64/include/asm/syscall.h
++++ b/arch/arm64/include/asm/syscall.h
+@@ -20,7 +20,7 @@
+ #include <linux/compat.h>
+ #include <linux/err.h>
+
+-typedef long (*syscall_fn_t)(struct pt_regs *regs);
++typedef long (*syscall_fn_t)(const struct pt_regs *regs);
+
+ extern const syscall_fn_t sys_call_table[];
+
+diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h
+index a4477e515b79..507d0ee6bc69 100644
+--- a/arch/arm64/include/asm/syscall_wrapper.h
++++ b/arch/arm64/include/asm/syscall_wrapper.h
+@@ -30,10 +30,10 @@
+ } \
+ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+-#define COMPAT_SYSCALL_DEFINE0(sname) \
+- asmlinkage long __arm64_compat_sys_##sname(void); \
+- ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO); \
+- asmlinkage long __arm64_compat_sys_##sname(void)
++#define COMPAT_SYSCALL_DEFINE0(sname) \
++ asmlinkage long __arm64_compat_sys_##sname(const struct pt_regs *__unused); \
++ ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO); \
++ asmlinkage long __arm64_compat_sys_##sname(const struct pt_regs *__unused)
+
+ #define COND_SYSCALL_COMPAT(name) \
+ cond_syscall(__arm64_compat_sys_##name);
+@@ -62,11 +62,11 @@
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+ #ifndef SYSCALL_DEFINE0
+-#define SYSCALL_DEFINE0(sname) \
+- SYSCALL_METADATA(_##sname, 0); \
+- asmlinkage long __arm64_sys_##sname(void); \
+- ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO); \
+- asmlinkage long __arm64_sys_##sname(void)
++#define SYSCALL_DEFINE0(sname) \
++ SYSCALL_METADATA(_##sname, 0); \
++ asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused); \
++ ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO); \
++ asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused)
+ #endif
+
+ #ifndef COND_SYSCALL
+diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c
+index 162a95ed0881..fe20c461582a 100644
+--- a/arch/arm64/kernel/sys.c
++++ b/arch/arm64/kernel/sys.c
+@@ -47,22 +47,26 @@ SYSCALL_DEFINE1(arm64_personality, unsigned int, personality)
+ return ksys_personality(personality);
+ }
+
++asmlinkage long sys_ni_syscall(void);
++
++asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused)
++{
++ return sys_ni_syscall();
++}
++
+ /*
+ * Wrappers to pass the pt_regs argument.
+ */
+ #define __arm64_sys_personality __arm64_sys_arm64_personality
+
+-asmlinkage long sys_ni_syscall(const struct pt_regs *);
+-#define __arm64_sys_ni_syscall sys_ni_syscall
+-
+ #undef __SYSCALL
+ #define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
+ #include <asm/unistd.h>
+
+ #undef __SYSCALL
+-#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym,
++#define __SYSCALL(nr, sym) [nr] = __arm64_##sym,
+
+ const syscall_fn_t sys_call_table[__NR_syscalls] = {
+- [0 ... __NR_syscalls - 1] = (syscall_fn_t)sys_ni_syscall,
++ [0 ... __NR_syscalls - 1] = __arm64_sys_ni_syscall,
+ #include <asm/unistd.h>
+ };
+diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c
+index 0f8bcb7de700..3c80a40c1c9d 100644
+--- a/arch/arm64/kernel/sys32.c
++++ b/arch/arm64/kernel/sys32.c
+@@ -133,17 +133,14 @@ COMPAT_SYSCALL_DEFINE6(aarch32_fallocate, int, fd, int, mode,
+ return ksys_fallocate(fd, mode, arg_u64(offset), arg_u64(len));
+ }
+
+-asmlinkage long sys_ni_syscall(const struct pt_regs *);
+-#define __arm64_sys_ni_syscall sys_ni_syscall
+-
+ #undef __SYSCALL
+ #define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
+ #include <asm/unistd32.h>
+
+ #undef __SYSCALL
+-#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym,
++#define __SYSCALL(nr, sym) [nr] = __arm64_##sym,
+
+ const syscall_fn_t compat_sys_call_table[__NR_compat_syscalls] = {
+- [0 ... __NR_compat_syscalls - 1] = (syscall_fn_t)sys_ni_syscall,
++ [0 ... __NR_compat_syscalls - 1] = __arm64_sys_ni_syscall,
+ #include <asm/unistd32.h>
+ };
+diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
+index aa19b7ac8222..476c7b4be378 100644
+--- a/arch/ia64/mm/numa.c
++++ b/arch/ia64/mm/numa.c
+@@ -49,6 +49,7 @@ paddr_to_nid(unsigned long paddr)
+
+ return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0);
+ }
++EXPORT_SYMBOL(paddr_to_nid);
+
+ #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA)
+ /*
+diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
+index bccc5051249e..2b6049e83970 100644
+--- a/arch/powerpc/include/asm/kvm_host.h
++++ b/arch/powerpc/include/asm/kvm_host.h
+@@ -299,6 +299,7 @@ struct kvm_arch {
+ #ifdef CONFIG_PPC_BOOK3S_64
+ struct list_head spapr_tce_tables;
+ struct list_head rtas_tokens;
++ struct mutex rtas_token_lock;
+ DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
+ #endif
+ #ifdef CONFIG_KVM_MPIC
+diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
+index 87348e498c89..281f074581a3 100644
+--- a/arch/powerpc/kvm/book3s.c
++++ b/arch/powerpc/kvm/book3s.c
+@@ -840,6 +840,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
+ #ifdef CONFIG_PPC64
+ INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
+ INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
++ mutex_init(&kvm->arch.rtas_token_lock);
+ #endif
+
+ return kvm->arch.kvm_ops->init_vm(kvm);
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 3e3a71594e63..083dcedba11c 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -426,12 +426,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
+
+ static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
+ {
+- struct kvm_vcpu *ret;
+-
+- mutex_lock(&kvm->lock);
+- ret = kvm_get_vcpu_by_id(kvm, id);
+- mutex_unlock(&kvm->lock);
+- return ret;
++ return kvm_get_vcpu_by_id(kvm, id);
+ }
+
+ static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
+@@ -1309,7 +1304,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ u64 mask;
+
+- mutex_lock(&kvm->lock);
+ spin_lock(&vc->lock);
+ /*
+ * If ILE (interrupt little-endian) has changed, update the
+@@ -1349,7 +1343,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
+ mask &= 0xFFFFFFFF;
+ vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
+ spin_unlock(&vc->lock);
+- mutex_unlock(&kvm->lock);
+ }
+
+ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
+diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
+index 2d3b2b1cc272..8f2355138f80 100644
+--- a/arch/powerpc/kvm/book3s_rtas.c
++++ b/arch/powerpc/kvm/book3s_rtas.c
+@@ -146,7 +146,7 @@ static int rtas_token_undefine(struct kvm *kvm, char *name)
+ {
+ struct rtas_token_definition *d, *tmp;
+
+- lockdep_assert_held(&kvm->lock);
++ lockdep_assert_held(&kvm->arch.rtas_token_lock);
+
+ list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
+ if (rtas_name_matches(d->handler->name, name)) {
+@@ -167,7 +167,7 @@ static int rtas_token_define(struct kvm *kvm, char *name, u64 token)
+ bool found;
+ int i;
+
+- lockdep_assert_held(&kvm->lock);
++ lockdep_assert_held(&kvm->arch.rtas_token_lock);
+
+ list_for_each_entry(d, &kvm->arch.rtas_tokens, list) {
+ if (d->token == token)
+@@ -206,14 +206,14 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
+ if (copy_from_user(&args, argp, sizeof(args)))
+ return -EFAULT;
+
+- mutex_lock(&kvm->lock);
++ mutex_lock(&kvm->arch.rtas_token_lock);
+
+ if (args.token)
+ rc = rtas_token_define(kvm, args.name, args.token);
+ else
+ rc = rtas_token_undefine(kvm, args.name);
+
+- mutex_unlock(&kvm->lock);
++ mutex_unlock(&kvm->arch.rtas_token_lock);
+
+ return rc;
+ }
+@@ -245,7 +245,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
+ orig_rets = args.rets;
+ args.rets = &args.args[be32_to_cpu(args.nargs)];
+
+- mutex_lock(&vcpu->kvm->lock);
++ mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
+
+ rc = -ENOENT;
+ list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
+@@ -256,7 +256,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
+ }
+ }
+
+- mutex_unlock(&vcpu->kvm->lock);
++ mutex_unlock(&vcpu->kvm->arch.rtas_token_lock);
+
+ if (rc == 0) {
+ args.rets = orig_rets;
+@@ -282,8 +282,6 @@ void kvmppc_rtas_tokens_free(struct kvm *kvm)
+ {
+ struct rtas_token_definition *d, *tmp;
+
+- lockdep_assert_held(&kvm->lock);
+-
+ list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
+ list_del(&d->list);
+ kfree(d);
+diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
+index 3d27f02695e4..828f6656f8f7 100644
+--- a/arch/powerpc/platforms/powernv/opal-imc.c
++++ b/arch/powerpc/platforms/powernv/opal-imc.c
+@@ -161,6 +161,10 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
+ struct imc_pmu *pmu_ptr;
+ u32 offset;
+
++ /* Return for unknown domain */
++ if (domain < 0)
++ return -EINVAL;
++
+ /* memory for pmu */
+ pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL);
+ if (!pmu_ptr)
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index b7b01d762d32..e91814d1a27f 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -684,7 +684,7 @@ struct event_constraint intel_core2_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
+ EVENT_CONSTRAINT_END
+ };
+
+@@ -693,7 +693,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
+ /* Allow all events as PEBS with no flags */
+ INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
+ EVENT_CONSTRAINT_END
+@@ -701,7 +701,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
+
+ struct event_constraint intel_slm_pebs_event_constraints[] = {
+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1),
+ /* Allow all events as PEBS with no flags */
+ INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
+ EVENT_CONSTRAINT_END
+@@ -726,7 +726,7 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = {
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
+ EVENT_CONSTRAINT_END
+ };
+
+@@ -743,7 +743,7 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = {
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
+ EVENT_CONSTRAINT_END
+ };
+
+@@ -752,7 +752,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
+ INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
+ INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
+ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
+ INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
+@@ -767,9 +767,9 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = {
+ INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
+ INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
+ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
+ /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
+ INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
+@@ -783,9 +783,9 @@ struct event_constraint intel_hsw_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
+ INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
+ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
+ /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
+@@ -806,9 +806,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
+ INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
+ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
+ /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
+@@ -829,9 +829,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = {
+ struct event_constraint intel_skl_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
+ /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
+ /* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */
+- INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
+ INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 6a25278e0092..da1f5e78363e 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -819,8 +819,11 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
+ {
+ set_cpu_cap(c, X86_FEATURE_ZEN);
+
+- /* Fix erratum 1076: CPB feature bit not being set in CPUID. */
+- if (!cpu_has(c, X86_FEATURE_CPB))
++ /*
++ * Fix erratum 1076: CPB feature bit not being set in CPUID.
++ * Always set it, except when running under a hypervisor.
++ */
++ if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
+ set_cpu_cap(c, X86_FEATURE_CPB);
+ }
+
+diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
+index a7c2673ffd36..1806260938e8 100644
+--- a/drivers/acpi/device_pm.c
++++ b/drivers/acpi/device_pm.c
+@@ -948,8 +948,8 @@ static bool acpi_dev_needs_resume(struct device *dev, struct acpi_device *adev)
+ u32 sys_target = acpi_target_system_state();
+ int ret, state;
+
+- if (!pm_runtime_suspended(dev) || !adev ||
+- device_may_wakeup(dev) != !!adev->wakeup.prepare_count)
++ if (!pm_runtime_suspended(dev) || !adev || (adev->wakeup.flags.valid &&
++ device_may_wakeup(dev) != !!adev->wakeup.prepare_count))
+ return true;
+
+ if (sys_target == ACPI_STATE_S0)
+diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
+index 421b05392220..ca3218337fd7 100644
+--- a/drivers/clk/ti/clkctrl.c
++++ b/drivers/clk/ti/clkctrl.c
+@@ -137,9 +137,6 @@ static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
+ int ret;
+ union omap4_timeout timeout = { 0 };
+
+- if (!clk->enable_bit)
+- return 0;
+-
+ if (clk->clkdm) {
+ ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
+ if (ret) {
+@@ -151,6 +148,9 @@ static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
+ }
+ }
+
++ if (!clk->enable_bit)
++ return 0;
++
+ val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
+
+ val &= ~OMAP4_MODULEMODE_MASK;
+@@ -179,7 +179,7 @@ static void _omap4_clkctrl_clk_disable(struct clk_hw *hw)
+ union omap4_timeout timeout = { 0 };
+
+ if (!clk->enable_bit)
+- return;
++ goto exit;
+
+ val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
+
+diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
+index 4f52c3a8ec99..ed51221621a5 100644
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -784,6 +784,7 @@ config GPIO_ADP5588
+ config GPIO_ADP5588_IRQ
+ bool "Interrupt controller support for ADP5588"
+ depends on GPIO_ADP5588=y
++ select GPIOLIB_IRQCHIP
+ help
+ Say yes here to enable the adp5588 to be used as an interrupt
+ controller. It requires the driver to be built in the kernel.
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+index 9146e30e24a6..468dff2f7904 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+@@ -124,6 +124,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
+ return;
+ etnaviv_dump_core = false;
+
++ mutex_lock(&gpu->mmu->lock);
++
+ mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
+
+ /* We always dump registers, mmu, ring and end marker */
+@@ -166,6 +168,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
+ iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+ PAGE_KERNEL);
+ if (!iter.start) {
++ mutex_unlock(&gpu->mmu->lock);
+ dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
+ return;
+ }
+@@ -233,6 +236,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
+ obj->base.size);
+ }
+
++ mutex_unlock(&gpu->mmu->lock);
++
+ etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
+
+ dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
+diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
+index ccd76c71af09..cb07651f4b46 100644
+--- a/drivers/i2c/i2c-dev.c
++++ b/drivers/i2c/i2c-dev.c
+@@ -283,6 +283,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
+ msgs[i].len < 1 || msgs[i].buf[0] < 1 ||
+ msgs[i].len < msgs[i].buf[0] +
+ I2C_SMBUS_BLOCK_MAX) {
++ i++;
+ res = -EINVAL;
+ break;
+ }
+diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
+index b2abc44fa5cb..a73337b74f41 100644
+--- a/drivers/isdn/mISDN/socket.c
++++ b/drivers/isdn/mISDN/socket.c
+@@ -394,7 +394,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+ memcpy(di.channelmap, dev->channelmap,
+ sizeof(di.channelmap));
+ di.nrbchan = dev->nrbchan;
+- strcpy(di.name, dev_name(&dev->dev));
++ strscpy(di.name, dev_name(&dev->dev), sizeof(di.name));
+ if (copy_to_user((void __user *)arg, &di, sizeof(di)))
+ err = -EFAULT;
+ } else
+@@ -677,7 +677,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+ memcpy(di.channelmap, dev->channelmap,
+ sizeof(di.channelmap));
+ di.nrbchan = dev->nrbchan;
+- strcpy(di.name, dev_name(&dev->dev));
++ strscpy(di.name, dev_name(&dev->dev), sizeof(di.name));
+ if (copy_to_user((void __user *)arg, &di, sizeof(di)))
+ err = -EFAULT;
+ } else
+@@ -691,6 +691,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+ err = -EFAULT;
+ break;
+ }
++ dn.name[sizeof(dn.name) - 1] = '\0';
+ dev = get_mdevice(dn.id);
+ if (dev)
+ err = device_rename(&dev->dev, dn.name);
+diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
+index 6dedd43442cc..35b767baf21f 100644
+--- a/drivers/net/dsa/rtl8366.c
++++ b/drivers/net/dsa/rtl8366.c
+@@ -307,7 +307,8 @@ int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
+ struct rtl8366_vlan_4k vlan4k;
+ int ret;
+
+- if (!smi->ops->is_vlan_valid(smi, port))
++ /* Use VLAN nr port + 1 since VLAN0 is not valid */
++ if (!smi->ops->is_vlan_valid(smi, port + 1))
+ return -EINVAL;
+
+ dev_info(smi->dev, "%s filtering on port %d\n",
+@@ -318,12 +319,12 @@ int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
+ * The hardware support filter ID (FID) 0..7, I have no clue how to
+ * support this in the driver when the callback only says on/off.
+ */
+- ret = smi->ops->get_vlan_4k(smi, port, &vlan4k);
++ ret = smi->ops->get_vlan_4k(smi, port + 1, &vlan4k);
+ if (ret)
+ return ret;
+
+ /* Just set the filter to FID 1 for now then */
+- ret = rtl8366_set_vlan(smi, port,
++ ret = rtl8366_set_vlan(smi, port + 1,
+ vlan4k.member,
+ vlan4k.untag,
+ 1);
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+index 6f3312350cac..b3c7994d73eb 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+@@ -139,10 +139,10 @@ void aq_ring_queue_stop(struct aq_ring_s *ring)
+ bool aq_ring_tx_clean(struct aq_ring_s *self)
+ {
+ struct device *dev = aq_nic_get_dev(self->aq_nic);
+- unsigned int budget = AQ_CFG_TX_CLEAN_BUDGET;
++ unsigned int budget;
+
+- for (; self->sw_head != self->hw_head && budget--;
+- self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
++ for (budget = AQ_CFG_TX_CLEAN_BUDGET;
++ budget && self->sw_head != self->hw_head; budget--) {
+ struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
+
+ if (likely(buff->is_mapped)) {
+@@ -167,6 +167,7 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
+
+ buff->pa = 0U;
+ buff->eop_index = 0xffffU;
++ self->sw_head = aq_ring_next_dx(self, self->sw_head);
+ }
+
+ return !!budget;
+diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+index 56363ff5c891..51cd1f98bcf0 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+@@ -695,38 +695,41 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
+ if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
+ /* MAC error or DMA error */
+ buff->is_error = 1U;
+- } else {
+- if (self->aq_nic_cfg->is_rss) {
+- /* last 4 byte */
+- u16 rss_type = rxd_wb->type & 0xFU;
+-
+- if (rss_type && rss_type < 0x8U) {
+- buff->is_hash_l4 = (rss_type == 0x4 ||
+- rss_type == 0x5);
+- buff->rss_hash = rxd_wb->rss_hash;
+- }
++ }
++ if (self->aq_nic_cfg->is_rss) {
++ /* last 4 byte */
++ u16 rss_type = rxd_wb->type & 0xFU;
++
++ if (rss_type && rss_type < 0x8U) {
++ buff->is_hash_l4 = (rss_type == 0x4 ||
++ rss_type == 0x5);
++ buff->rss_hash = rxd_wb->rss_hash;
+ }
++ }
+
+- if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
+- buff->len = rxd_wb->pkt_len %
+- AQ_CFG_RX_FRAME_MAX;
+- buff->len = buff->len ?
+- buff->len : AQ_CFG_RX_FRAME_MAX;
+- buff->next = 0U;
+- buff->is_eop = 1U;
++ if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
++ buff->len = rxd_wb->pkt_len %
++ AQ_CFG_RX_FRAME_MAX;
++ buff->len = buff->len ?
++ buff->len : AQ_CFG_RX_FRAME_MAX;
++ buff->next = 0U;
++ buff->is_eop = 1U;
++ } else {
++ buff->len =
++ rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ?
++ AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len;
++
++ if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
++ rxd_wb->status) {
++ /* LRO */
++ buff->next = rxd_wb->next_desc_ptr;
++ ++ring->stats.rx.lro_packets;
+ } else {
+- if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
+- rxd_wb->status) {
+- /* LRO */
+- buff->next = rxd_wb->next_desc_ptr;
+- ++ring->stats.rx.lro_packets;
+- } else {
+- /* jumbo */
+- buff->next =
+- aq_ring_next_dx(ring,
+- ring->hw_head);
+- ++ring->stats.rx.jumbo_packets;
+- }
++ /* jumbo */
++ buff->next =
++ aq_ring_next_dx(ring,
++ ring->hw_head);
++ ++ring->stats.rx.jumbo_packets;
+ }
+ }
+ }
+diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
+index 66535d1653f6..f16853c3c851 100644
+--- a/drivers/net/ethernet/dec/tulip/de4x5.c
++++ b/drivers/net/ethernet/dec/tulip/de4x5.c
+@@ -2107,7 +2107,6 @@ static struct eisa_driver de4x5_eisa_driver = {
+ .remove = de4x5_eisa_remove,
+ }
+ };
+-MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
+ #endif
+
+ #ifdef CONFIG_PCI
+diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
+index 3f6749fc889f..bfb16a474490 100644
+--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
++++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
+@@ -1105,7 +1105,7 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
+ break;
+ case ETHTOOL_GRXRINGS:
+- cmd->data = adapter->num_rx_qs - 1;
++ cmd->data = adapter->num_rx_qs;
+ break;
+ default:
+ return -EINVAL;
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+index 392fd895f278..ae2240074d8e 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+@@ -1905,8 +1905,7 @@ static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
+ }
+
+ /* Find tcam entry with matched pair <vid,port> */
+-static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
+- u16 mask)
++static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
+ {
+ unsigned char byte[2], enable[2];
+ struct mvpp2_prs_entry pe;
+@@ -1914,13 +1913,13 @@ static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
+ int tid;
+
+ /* Go through the all entries with MVPP2_PRS_LU_VID */
+- for (tid = MVPP2_PE_VID_FILT_RANGE_START;
+- tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) {
+- if (!priv->prs_shadow[tid].valid ||
+- priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
++ for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
++ tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
++ if (!port->priv->prs_shadow[tid].valid ||
++ port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
+ continue;
+
+- mvpp2_prs_init_from_hw(priv, &pe, tid);
++ mvpp2_prs_init_from_hw(port->priv, &pe, tid);
+
+ mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
+ mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
+@@ -1950,7 +1949,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
+ memset(&pe, 0, sizeof(pe));
+
+ /* Scan TCAM and see if entry with this <vid,port> already exist */
+- tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask);
++ tid = mvpp2_prs_vid_range_find(port, vid, mask);
+
+ reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
+ if (reg_val & MVPP2_DSA_EXTENDED)
+@@ -2008,7 +2007,7 @@ void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
+ int tid;
+
+ /* Scan TCAM and see if entry with this <vid,port> already exist */
+- tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff);
++ tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
+
+ /* No such entry */
+ if (tid < 0)
+@@ -2026,8 +2025,10 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
+
+ for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
+ tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
+- if (priv->prs_shadow[tid].valid)
+- mvpp2_prs_vid_entry_remove(port, tid);
++ if (priv->prs_shadow[tid].valid) {
++ mvpp2_prs_hw_inv(priv, tid);
++ priv->prs_shadow[tid].valid = false;
++ }
+ }
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+index 37ba7c78859d..1c225be9c7db 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+@@ -342,11 +342,32 @@ void mlx5_unregister_interface(struct mlx5_interface *intf)
+ }
+ EXPORT_SYMBOL(mlx5_unregister_interface);
+
++/* Must be called with intf_mutex held */
++static bool mlx5_has_added_dev_by_protocol(struct mlx5_core_dev *mdev, int protocol)
++{
++ struct mlx5_device_context *dev_ctx;
++ struct mlx5_interface *intf;
++ bool found = false;
++
++ list_for_each_entry(intf, &intf_list, list) {
++ if (intf->protocol == protocol) {
++ dev_ctx = mlx5_get_device(intf, &mdev->priv);
++ if (dev_ctx && test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
++ found = true;
++ break;
++ }
++ }
++
++ return found;
++}
++
+ void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol)
+ {
+ mutex_lock(&mlx5_intf_mutex);
+- mlx5_remove_dev_by_protocol(mdev, protocol);
+- mlx5_add_dev_by_protocol(mdev, protocol);
++ if (mlx5_has_added_dev_by_protocol(mdev, protocol)) {
++ mlx5_remove_dev_by_protocol(mdev, protocol);
++ mlx5_add_dev_by_protocol(mdev, protocol);
++ }
+ mutex_unlock(&mlx5_intf_mutex);
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index c5b82e283d13..ff2f6b8e2fab 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -2488,6 +2488,10 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
+ mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL);
+
+ autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
++ if (!autoneg && cmd->base.speed == SPEED_56000) {
++ netdev_err(dev, "56G not supported with autoneg off\n");
++ return -EINVAL;
++ }
+ eth_proto_new = autoneg ?
+ mlxsw_sp_to_ptys_advert_link(cmd) :
+ mlxsw_sp_to_ptys_speed(cmd->base.speed);
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index f27a0dc8c563..5e3e6e262ba3 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -1588,6 +1588,10 @@ static void sh_eth_dev_exit(struct net_device *ndev)
+ sh_eth_get_stats(ndev);
+ mdp->cd->soft_reset(ndev);
+
++ /* Set the RMII mode again if required */
++ if (mdp->cd->rmiimode)
++ sh_eth_write(ndev, 0x1, RMIIMODE);
++
+ /* Set MAC address again */
+ update_mac_address(ndev);
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 50c00822b2d8..45e64d71a93f 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -3319,6 +3319,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
+ entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
+ }
+ rx_q->dirty_rx = entry;
++ stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
+ }
+
+ /**
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 9d699bd5f715..cf6b9b1771f1 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -2405,7 +2405,7 @@ static struct hv_driver netvsc_drv = {
+ .probe = netvsc_probe,
+ .remove = netvsc_remove,
+ .driver = {
+- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
++ .probe_type = PROBE_FORCE_SYNCHRONOUS,
+ },
+ };
+
+diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
+index b3935778b19f..e4bf9e7d7583 100644
+--- a/drivers/net/phy/dp83867.c
++++ b/drivers/net/phy/dp83867.c
+@@ -260,10 +260,8 @@ static int dp83867_config_init(struct phy_device *phydev)
+ ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
+ if (ret)
+ return ret;
+- }
+
+- if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) &&
+- (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) {
++ /* Set up RGMII delays */
+ val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL);
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index f6e70f2dfd12..e029c7977a56 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -54,6 +54,10 @@ struct phylink {
+
+ /* The link configuration settings */
+ struct phylink_link_state link_config;
++
++ /* The current settings */
++ phy_interface_t cur_interface;
++
+ struct gpio_desc *link_gpio;
+ struct timer_list link_poll;
+ void (*get_fixed_state)(struct net_device *dev,
+@@ -477,12 +481,12 @@ static void phylink_resolve(struct work_struct *w)
+ if (!link_state.link) {
+ netif_carrier_off(ndev);
+ pl->ops->mac_link_down(ndev, pl->link_an_mode,
+- pl->phy_state.interface);
++ pl->cur_interface);
+ netdev_info(ndev, "Link is Down\n");
+ } else {
++ pl->cur_interface = link_state.interface;
+ pl->ops->mac_link_up(ndev, pl->link_an_mode,
+- pl->phy_state.interface,
+- pl->phydev);
++ pl->cur_interface, pl->phydev);
+
+ netif_carrier_on(ndev);
+
+diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
+index f8436d1c4d45..f7218c1673ce 100644
+--- a/drivers/pci/pci-acpi.c
++++ b/drivers/pci/pci-acpi.c
+@@ -625,7 +625,8 @@ static bool acpi_pci_need_resume(struct pci_dev *dev)
+ if (!adev || !acpi_device_power_manageable(adev))
+ return false;
+
+- if (device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
++ if (adev->wakeup.flags.valid &&
++ device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
+ return true;
+
+ if (acpi_target_system_state() == ACPI_STATE_S0)
+diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
+index f2c561ca731a..cd2c247d6d0c 100644
+--- a/drivers/scsi/cxgbi/libcxgbi.c
++++ b/drivers/scsi/cxgbi/libcxgbi.c
+@@ -641,6 +641,10 @@ cxgbi_check_route(struct sockaddr *dst_addr, int ifindex)
+
+ if (ndev->flags & IFF_LOOPBACK) {
+ ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
++ if (!ndev) {
++ err = -ENETUNREACH;
++ goto rel_neigh;
++ }
+ mtu = ndev->mtu;
+ pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
+ n->dev->name, ndev->name, mtu);
+diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
+index 12dc7100bb4c..d1154baa9436 100644
+--- a/drivers/scsi/device_handler/scsi_dh_alua.c
++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
+@@ -1173,10 +1173,8 @@ static int __init alua_init(void)
+ int r;
+
+ kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0);
+- if (!kaluad_wq) {
+- /* Temporary failure, bypass */
+- return SCSI_DH_DEV_TEMP_BUSY;
+- }
++ if (!kaluad_wq)
++ return -ENOMEM;
+
+ r = scsi_register_device_handler(&alua_dh);
+ if (r != 0) {
+diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
+index 231eb79efa32..b141d1061f38 100644
+--- a/drivers/scsi/libsas/sas_expander.c
++++ b/drivers/scsi/libsas/sas_expander.c
+@@ -989,6 +989,8 @@ static struct domain_device *sas_ex_discover_expander(
+ list_del(&child->dev_list_node);
+ spin_unlock_irq(&parent->port->dev_list_lock);
+ sas_put_device(child);
++ sas_port_delete(phy->port);
++ phy->port = NULL;
+ return NULL;
+ }
+ list_add_tail(&child->siblings, &parent->ex_dev.children);
+diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
+index 3781e8109dd7..411d656f2530 100644
+--- a/drivers/scsi/smartpqi/smartpqi_init.c
++++ b/drivers/scsi/smartpqi/smartpqi_init.c
+@@ -6378,7 +6378,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
+ else
+ mask = DMA_BIT_MASK(32);
+
+- rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
++ rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
+ goto disable_device;
+diff --git a/drivers/staging/vc04_services/bcm2835-camera/controls.c b/drivers/staging/vc04_services/bcm2835-camera/controls.c
+index cff7b1e07153..b688ebc01740 100644
+--- a/drivers/staging/vc04_services/bcm2835-camera/controls.c
++++ b/drivers/staging/vc04_services/bcm2835-camera/controls.c
+@@ -576,7 +576,7 @@ exit:
+ dev->colourfx.enable ? "true" : "false",
+ dev->colourfx.u, dev->colourfx.v,
+ ret, (ret == 0 ? 0 : -EINVAL));
+- return (ret == 0 ? 0 : EINVAL);
++ return (ret == 0 ? 0 : -EINVAL);
+ }
+
+ static int ctrl_set_colfx(struct bm2835_mmal_dev *dev,
+@@ -600,7 +600,7 @@ static int ctrl_set_colfx(struct bm2835_mmal_dev *dev,
+ "%s: After: mmal_ctrl:%p ctrl id:0x%x ctrl val:%d ret %d(%d)\n",
+ __func__, mmal_ctrl, ctrl->id, ctrl->val, ret,
+ (ret == 0 ? 0 : -EINVAL));
+- return (ret == 0 ? 0 : EINVAL);
++ return (ret == 0 ? 0 : -EINVAL);
+ }
+
+ static int ctrl_set_bitrate(struct bm2835_mmal_dev *dev,
+diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
+index 63e34d868de8..f8503f8fc44e 100644
+--- a/drivers/tty/serial/sunhv.c
++++ b/drivers/tty/serial/sunhv.c
+@@ -397,7 +397,7 @@ static const struct uart_ops sunhv_pops = {
+ static struct uart_driver sunhv_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "sunhv",
+- .dev_name = "ttyS",
++ .dev_name = "ttyHV",
+ .major = TTY_MAJOR,
+ };
+
+diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
+index cadc01336bf8..7ba6afc7ef23 100644
+--- a/drivers/usb/host/xhci-debugfs.c
++++ b/drivers/usb/host/xhci-debugfs.c
+@@ -440,6 +440,9 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
+ struct xhci_ep_priv *epriv;
+ struct xhci_slot_priv *spriv = dev->debugfs_private;
+
++ if (!spriv)
++ return;
++
+ if (spriv->eps[ep_index])
+ return;
+
+diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
+index 91da7e44d5d4..3a144eecb6a7 100644
+--- a/drivers/xen/pvcalls-front.c
++++ b/drivers/xen/pvcalls-front.c
+@@ -538,7 +538,6 @@ out:
+ int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg,
+ size_t len)
+ {
+- struct pvcalls_bedata *bedata;
+ struct sock_mapping *map;
+ int sent, tot_sent = 0;
+ int count = 0, flags;
+@@ -550,7 +549,6 @@ int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg,
+ map = pvcalls_enter_sock(sock);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+- bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+ mutex_lock(&map->active.out_mutex);
+ if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) {
+@@ -633,7 +631,6 @@ out:
+ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
+ {
+- struct pvcalls_bedata *bedata;
+ int ret;
+ struct sock_mapping *map;
+
+@@ -643,7 +640,6 @@ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ map = pvcalls_enter_sock(sock);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+- bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+ mutex_lock(&map->active.in_mutex);
+ if (len > XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER))
+diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
+index 092981171df1..d75a2385b37c 100644
+--- a/drivers/xen/xenbus/xenbus.h
++++ b/drivers/xen/xenbus/xenbus.h
+@@ -83,6 +83,7 @@ struct xb_req_data {
+ int num_vecs;
+ int err;
+ enum xb_req_state state;
++ bool user_req;
+ void (*cb)(struct xb_req_data *);
+ void *par;
+ };
+@@ -133,4 +134,6 @@ void xenbus_ring_ops_init(void);
+ int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par);
+ void xenbus_dev_queue_reply(struct xb_req_data *req);
+
++extern unsigned int xb_dev_generation_id;
++
+ #endif
+diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
+index 0782ff3c2273..39c63152a358 100644
+--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
++++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
+@@ -62,6 +62,8 @@
+
+ #include "xenbus.h"
+
++unsigned int xb_dev_generation_id;
++
+ /*
+ * An element of a list of outstanding transactions, for which we're
+ * still waiting a reply.
+@@ -69,6 +71,7 @@
+ struct xenbus_transaction_holder {
+ struct list_head list;
+ struct xenbus_transaction handle;
++ unsigned int generation_id;
+ };
+
+ /*
+@@ -441,6 +444,7 @@ static int xenbus_write_transaction(unsigned msg_type,
+ rc = -ENOMEM;
+ goto out;
+ }
++ trans->generation_id = xb_dev_generation_id;
+ list_add(&trans->list, &u->transactions);
+ } else if (msg->hdr.tx_id != 0 &&
+ !xenbus_get_transaction(u, msg->hdr.tx_id))
+@@ -449,6 +453,20 @@ static int xenbus_write_transaction(unsigned msg_type,
+ !(msg->hdr.len == 2 &&
+ (!strcmp(msg->body, "T") || !strcmp(msg->body, "F"))))
+ return xenbus_command_reply(u, XS_ERROR, "EINVAL");
++ else if (msg_type == XS_TRANSACTION_END) {
++ trans = xenbus_get_transaction(u, msg->hdr.tx_id);
++ if (trans && trans->generation_id != xb_dev_generation_id) {
++ list_del(&trans->list);
++ kfree(trans);
++ if (!strcmp(msg->body, "T"))
++ return xenbus_command_reply(u, XS_ERROR,
++ "EAGAIN");
++ else
++ return xenbus_command_reply(u,
++ XS_TRANSACTION_END,
++ "OK");
++ }
++ }
+
+ rc = xenbus_dev_request_and_reply(&msg->hdr, u);
+ if (rc && trans) {
+diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
+index 49a3874ae6bb..ddc18da61834 100644
+--- a/drivers/xen/xenbus/xenbus_xs.c
++++ b/drivers/xen/xenbus/xenbus_xs.c
+@@ -105,6 +105,7 @@ static void xs_suspend_enter(void)
+
+ static void xs_suspend_exit(void)
+ {
++ xb_dev_generation_id++;
+ spin_lock(&xs_state_lock);
+ xs_suspend_active--;
+ spin_unlock(&xs_state_lock);
+@@ -125,7 +126,7 @@ static uint32_t xs_request_enter(struct xb_req_data *req)
+ spin_lock(&xs_state_lock);
+ }
+
+- if (req->type == XS_TRANSACTION_START)
++ if (req->type == XS_TRANSACTION_START && !req->user_req)
+ xs_state_users++;
+ xs_state_users++;
+ rq_id = xs_request_id++;
+@@ -140,7 +141,7 @@ void xs_request_exit(struct xb_req_data *req)
+ spin_lock(&xs_state_lock);
+ xs_state_users--;
+ if ((req->type == XS_TRANSACTION_START && req->msg.type == XS_ERROR) ||
+- (req->type == XS_TRANSACTION_END &&
++ (req->type == XS_TRANSACTION_END && !req->user_req &&
+ !WARN_ON_ONCE(req->msg.type == XS_ERROR &&
+ !strcmp(req->body, "ENOENT"))))
+ xs_state_users--;
+@@ -286,6 +287,7 @@ int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par)
+ req->num_vecs = 1;
+ req->cb = xenbus_dev_queue_reply;
+ req->par = par;
++ req->user_req = true;
+
+ xs_send(req, msg);
+
+@@ -313,6 +315,7 @@ static void *xs_talkv(struct xenbus_transaction t,
+ req->vec = iovec;
+ req->num_vecs = num_vecs;
+ req->cb = xs_wake_up;
++ req->user_req = false;
+
+ msg.req_id = 0;
+ msg.tx_id = t.id;
+diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
+index 920d350df37b..809c1edffbaf 100644
+--- a/fs/configfs/dir.c
++++ b/fs/configfs/dir.c
+@@ -58,15 +58,13 @@ static void configfs_d_iput(struct dentry * dentry,
+ if (sd) {
+ /* Coordinate with configfs_readdir */
+ spin_lock(&configfs_dirent_lock);
+- /* Coordinate with configfs_attach_attr where will increase
+- * sd->s_count and update sd->s_dentry to new allocated one.
+- * Only set sd->dentry to null when this dentry is the only
+- * sd owner.
+- * If not do so, configfs_d_iput may run just after
+- * configfs_attach_attr and set sd->s_dentry to null
+- * even it's still in use.
++ /*
++ * Set sd->s_dentry to null only when this dentry is the one
++ * that is going to be killed. Otherwise configfs_d_iput may
++ * run just after configfs_attach_attr and set sd->s_dentry to
++ * NULL even it's still in use.
+ */
+- if (atomic_read(&sd->s_count) <= 2)
++ if (sd->s_dentry == dentry)
+ sd->s_dentry = NULL;
+
+ spin_unlock(&configfs_dirent_lock);
+diff --git a/fs/inode.c b/fs/inode.c
+index 42f6d25f32a5..5c63693326bb 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -1817,8 +1817,13 @@ int file_remove_privs(struct file *file)
+ int kill;
+ int error = 0;
+
+- /* Fast path for nothing security related */
+- if (IS_NOSEC(inode))
++ /*
++ * Fast path for nothing security related.
++ * As well for non-regular files, e.g. blkdev inodes.
++ * For example, blkdev_write_iter() might get here
++ * trying to remove privs which it is not allowed to.
++ */
++ if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
+ return 0;
+
+ kill = dentry_needs_remove_privs(dentry);
+diff --git a/fs/ocfs2/filecheck.c b/fs/ocfs2/filecheck.c
+index f65f2b2f594d..1906cc962c4d 100644
+--- a/fs/ocfs2/filecheck.c
++++ b/fs/ocfs2/filecheck.c
+@@ -193,6 +193,7 @@ int ocfs2_filecheck_create_sysfs(struct ocfs2_super *osb)
+ ret = kobject_init_and_add(&entry->fs_kobj, &ocfs2_ktype_filecheck,
+ NULL, "filecheck");
+ if (ret) {
++ kobject_put(&entry->fs_kobj);
+ kfree(fcheck);
+ return ret;
+ }
+diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
+index cebb79fe2c72..0d10b7ce0da7 100644
+--- a/include/linux/sched/mm.h
++++ b/include/linux/sched/mm.h
+@@ -54,6 +54,10 @@ static inline void mmdrop(struct mm_struct *mm)
+ * followed by taking the mmap_sem for writing before modifying the
+ * vmas or anything the coredump pretends not to change from under it.
+ *
++ * It also has to be called when mmgrab() is used in the context of
++ * the process, but then the mm_count refcount is transferred outside
++ * the context of the process to run down_write() on that pinned mm.
++ *
+ * NOTE: find_extend_vma() called from GUP context is the only place
+ * that can modify the "mm" (notably the vm_start/end) under mmap_sem
+ * for reading and outside the context of the process, so it is also
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index 99c7f199f2d4..12f351b253bb 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -49,14 +49,30 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
+ unsigned long head;
+
+ again:
++ /*
++ * In order to avoid publishing a head value that goes backwards,
++ * we must ensure the load of @rb->head happens after we've
++ * incremented @rb->nest.
++ *
++ * Otherwise we can observe a @rb->head value before one published
++ * by an IRQ/NMI happening between the load and the increment.
++ */
++ barrier();
+ head = local_read(&rb->head);
+
+ /*
+- * IRQ/NMI can happen here, which means we can miss a head update.
++ * IRQ/NMI can happen here and advance @rb->head, causing our
++ * load above to be stale.
+ */
+
+- if (!local_dec_and_test(&rb->nest))
++ /*
++ * If this isn't the outermost nesting, we don't have to update
++ * @rb->user_page->data_head.
++ */
++ if (local_read(&rb->nest) > 1) {
++ local_dec(&rb->nest);
+ goto out;
++ }
+
+ /*
+ * Since the mmap() consumer (userspace) can run on a different CPU:
+@@ -85,12 +101,21 @@ again:
+ * See perf_output_begin().
+ */
+ smp_wmb(); /* B, matches C */
+- rb->user_page->data_head = head;
++ WRITE_ONCE(rb->user_page->data_head, head);
++
++ /*
++ * We must publish the head before decrementing the nest count,
++ * otherwise an IRQ/NMI can publish a more recent head value and our
++ * write will (temporarily) publish a stale value.
++ */
++ barrier();
++ local_set(&rb->nest, 0);
+
+ /*
+- * Now check if we missed an update -- rely on previous implied
+- * compiler barriers to force a re-read.
++ * Ensure we decrement @rb->nest before we validate the @rb->head.
++ * Otherwise we cannot be sure we caught the 'last' nested update.
+ */
++ barrier();
+ if (unlikely(head != local_read(&rb->head))) {
+ local_inc(&rb->nest);
+ goto again;
+@@ -465,7 +490,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
+ handle->aux_flags);
+ }
+
+- rb->user_page->aux_head = rb->aux_head;
++ WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
+ if (rb_need_aux_wakeup(rb))
+ wakeup = true;
+
+@@ -497,7 +522,7 @@ int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
+
+ rb->aux_head += size;
+
+- rb->user_page->aux_head = rb->aux_head;
++ WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
+ if (rb_need_aux_wakeup(rb)) {
+ perf_output_wakeup(handle);
+ handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index fde5820be24d..ecefdba4b0dd 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -1005,6 +1005,9 @@ static void collapse_huge_page(struct mm_struct *mm,
+ * handled by the anon_vma lock + PG_lock.
+ */
+ down_write(&mm->mmap_sem);
++ result = SCAN_ANY_PROCESS;
++ if (!mmget_still_valid(mm))
++ goto out;
+ result = hugepage_vma_revalidate(mm, address, &vma);
+ if (result)
+ goto out;
+diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
+index 66f74c85cf6b..66d54fc11831 100644
+--- a/net/ax25/ax25_route.c
++++ b/net/ax25/ax25_route.c
+@@ -429,9 +429,11 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
+ }
+
+ if (ax25->sk != NULL) {
++ local_bh_disable();
+ bh_lock_sock(ax25->sk);
+ sock_reset_flag(ax25->sk, SOCK_ZAPPED);
+ bh_unlock_sock(ax25->sk);
++ local_bh_enable();
+ }
+
+ put:
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 4e4ac77c6816..cd9e991f21d7 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -2751,6 +2751,7 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
+ }
+
+ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
++ __acquires(tbl->lock)
+ __acquires(rcu_bh)
+ {
+ struct neigh_seq_state *state = seq->private;
+@@ -2761,6 +2762,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
+
+ rcu_read_lock_bh();
+ state->nht = rcu_dereference_bh(tbl->nht);
++ read_lock(&tbl->lock);
+
+ return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
+ }
+@@ -2794,8 +2796,13 @@ out:
+ EXPORT_SYMBOL(neigh_seq_next);
+
+ void neigh_seq_stop(struct seq_file *seq, void *v)
++ __releases(tbl->lock)
+ __releases(rcu_bh)
+ {
++ struct neigh_seq_state *state = seq->private;
++ struct neigh_table *tbl = state->tbl;
++
++ read_unlock(&tbl->lock);
+ rcu_read_unlock_bh();
+ }
+ EXPORT_SYMBOL(neigh_seq_stop);
+diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
+index be5f3d7ceb96..f994f50e1516 100644
+--- a/net/ipv6/ip6_flowlabel.c
++++ b/net/ipv6/ip6_flowlabel.c
+@@ -254,9 +254,9 @@ struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label)
+ rcu_read_lock_bh();
+ for_each_sk_fl_rcu(np, sfl) {
+ struct ip6_flowlabel *fl = sfl->fl;
+- if (fl->label == label) {
++
++ if (fl->label == label && atomic_inc_not_zero(&fl->users)) {
+ fl->lastuse = jiffies;
+- atomic_inc(&fl->users);
+ rcu_read_unlock_bh();
+ return fl;
+ }
+@@ -622,7 +622,8 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
+ goto done;
+ }
+ fl1 = sfl->fl;
+- atomic_inc(&fl1->users);
++ if (!atomic_inc_not_zero(&fl1->users))
++ fl1 = NULL;
+ break;
+ }
+ }
+diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
+index db6e0afe3a20..1740f852002e 100644
+--- a/net/lapb/lapb_iface.c
++++ b/net/lapb/lapb_iface.c
+@@ -182,6 +182,7 @@ int lapb_unregister(struct net_device *dev)
+ lapb = __lapb_devtostruct(dev);
+ if (!lapb)
+ goto out;
++ lapb_put(lapb);
+
+ lapb_stop_t1timer(lapb);
+ lapb_stop_t2timer(lapb);
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index a42c1bc7c698..62c0e80dcd71 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -2280,7 +2280,6 @@ static void __net_exit __ip_vs_cleanup(struct net *net)
+ {
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+- nf_unregister_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
+ ip_vs_service_net_cleanup(ipvs); /* ip_vs_flush() with locks */
+ ip_vs_conn_net_cleanup(ipvs);
+ ip_vs_app_net_cleanup(ipvs);
+@@ -2295,6 +2294,7 @@ static void __net_exit __ip_vs_dev_cleanup(struct net *net)
+ {
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ EnterFunction(2);
++ nf_unregister_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
+ ipvs->enable = 0; /* Disable packet reception */
+ smp_wmb();
+ ip_vs_sync_net_cleanup(ipvs);
+diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
+index d67a96a25a68..7569ba00e732 100644
+--- a/net/netfilter/nf_queue.c
++++ b/net/netfilter/nf_queue.c
+@@ -238,6 +238,7 @@ static unsigned int nf_iterate(struct sk_buff *skb,
+ repeat:
+ verdict = nf_hook_entry_hookfn(hook, skb, state);
+ if (verdict != NF_ACCEPT) {
++ *index = i;
+ if (verdict != NF_REPEAT)
+ return verdict;
+ goto repeat;
+diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
+index 376181cc1def..9f2875efb4ac 100644
+--- a/net/nfc/netlink.c
++++ b/net/nfc/netlink.c
+@@ -922,7 +922,8 @@ static int nfc_genl_deactivate_target(struct sk_buff *skb,
+ u32 device_idx, target_idx;
+ int rc;
+
+- if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
++ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
++ !info->attrs[NFC_ATTR_TARGET_INDEX])
+ return -EINVAL;
+
+ device_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
+index bb95c43aae76..5a304cfc8423 100644
+--- a/net/openvswitch/vport-internal_dev.c
++++ b/net/openvswitch/vport-internal_dev.c
+@@ -169,7 +169,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
+ {
+ struct vport *vport;
+ struct internal_dev *internal_dev;
++ struct net_device *dev;
+ int err;
++ bool free_vport = true;
+
+ vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
+ if (IS_ERR(vport)) {
+@@ -177,8 +179,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
+ goto error;
+ }
+
+- vport->dev = alloc_netdev(sizeof(struct internal_dev),
+- parms->name, NET_NAME_USER, do_setup);
++ dev = alloc_netdev(sizeof(struct internal_dev),
++ parms->name, NET_NAME_USER, do_setup);
++ vport->dev = dev;
+ if (!vport->dev) {
+ err = -ENOMEM;
+ goto error_free_vport;
+@@ -199,8 +202,10 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
+
+ rtnl_lock();
+ err = register_netdevice(vport->dev);
+- if (err)
++ if (err) {
++ free_vport = false;
+ goto error_unlock;
++ }
+
+ dev_set_promiscuity(vport->dev, 1);
+ rtnl_unlock();
+@@ -210,11 +215,12 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
+
+ error_unlock:
+ rtnl_unlock();
+- free_percpu(vport->dev->tstats);
++ free_percpu(dev->tstats);
+ error_free_netdev:
+- free_netdev(vport->dev);
++ free_netdev(dev);
+ error_free_vport:
+- ovs_vport_free(vport);
++ if (free_vport)
++ ovs_vport_free(vport);
+ error:
+ return ERR_PTR(err);
+ }
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
+index ae65a1cfa596..fb546b2d67ca 100644
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -2600,6 +2600,8 @@ do_addr_param:
+ case SCTP_PARAM_STATE_COOKIE:
+ asoc->peer.cookie_len =
+ ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
++ if (asoc->peer.cookie)
++ kfree(asoc->peer.cookie);
+ asoc->peer.cookie = kmemdup(param.cookie->body, asoc->peer.cookie_len, gfp);
+ if (!asoc->peer.cookie)
+ retval = 0;
+@@ -2664,6 +2666,8 @@ do_addr_param:
+ goto fall_through;
+
+ /* Save peer's random parameter */
++ if (asoc->peer.peer_random)
++ kfree(asoc->peer.peer_random);
+ asoc->peer.peer_random = kmemdup(param.p,
+ ntohs(param.p->length), gfp);
+ if (!asoc->peer.peer_random) {
+@@ -2677,6 +2681,8 @@ do_addr_param:
+ goto fall_through;
+
+ /* Save peer's HMAC list */
++ if (asoc->peer.peer_hmacs)
++ kfree(asoc->peer.peer_hmacs);
+ asoc->peer.peer_hmacs = kmemdup(param.p,
+ ntohs(param.p->length), gfp);
+ if (!asoc->peer.peer_hmacs) {
+@@ -2692,6 +2698,8 @@ do_addr_param:
+ if (!ep->auth_enable)
+ goto fall_through;
+
++ if (asoc->peer.peer_chunks)
++ kfree(asoc->peer.peer_chunks);
+ asoc->peer.peer_chunks = kmemdup(param.p,
+ ntohs(param.p->length), gfp);
+ if (!asoc->peer.peer_chunks)
+diff --git a/net/tipc/group.c b/net/tipc/group.c
+index 06fee142f09f..3ee93b5c19b6 100644
+--- a/net/tipc/group.c
++++ b/net/tipc/group.c
+@@ -218,6 +218,7 @@ void tipc_group_delete(struct net *net, struct tipc_group *grp)
+
+ rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) {
+ tipc_group_proto_xmit(grp, m, GRP_LEAVE_MSG, &xmitq);
++ __skb_queue_purge(&m->deferredq);
+ list_del(&m->list);
+ kfree(m);
+ }
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index f3f3d06cb6d8..e30f53728725 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -871,8 +871,10 @@ virtio_transport_recv_connected(struct sock *sk,
+ if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
+ vsk->peer_shutdown |= SEND_SHUTDOWN;
+ if (vsk->peer_shutdown == SHUTDOWN_MASK &&
+- vsock_stream_has_data(vsk) <= 0)
++ vsock_stream_has_data(vsk) <= 0) {
++ sock_set_flag(sk, SOCK_DONE);
+ sk->sk_state = TCP_CLOSING;
++ }
+ if (le32_to_cpu(pkt->hdr.flags))
+ sk->sk_state_change(sk);
+ break;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 45bf89ed31de..308ce76149cc 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -378,6 +378,7 @@ enum {
+
+ #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
+ #define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
++#define IS_CNL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9dc8)
+
+ static char *driver_short_names[] = {
+ [AZX_DRIVER_ICH] = "HDA Intel",
+@@ -1795,8 +1796,8 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
+ else
+ chip->bdl_pos_adj = bdl_pos_adj[dev];
+
+- /* Workaround for a communication error on CFL (bko#199007) */
+- if (IS_CFL(pci))
++ /* Workaround for a communication error on CFL (bko#199007) and CNL */
++ if (IS_CFL(pci) || IS_CNL(pci))
+ chip->polling_mode = 1;
+
+ err = azx_bus_init(chip, model[dev], &pci_hda_io_ops);
+diff --git a/tools/perf/arch/s390/util/machine.c b/tools/perf/arch/s390/util/machine.c
+index 0b2054007314..a19690a17291 100644
+--- a/tools/perf/arch/s390/util/machine.c
++++ b/tools/perf/arch/s390/util/machine.c
+@@ -5,16 +5,19 @@
+ #include "util.h"
+ #include "machine.h"
+ #include "api/fs/fs.h"
++#include "debug.h"
+
+ int arch__fix_module_text_start(u64 *start, const char *name)
+ {
++ u64 m_start = *start;
+ char path[PATH_MAX];
+
+ snprintf(path, PATH_MAX, "module/%.*s/sections/.text",
+ (int)strlen(name) - 2, name + 1);
+-
+- if (sysfs__read_ull(path, (unsigned long long *)start) < 0)
+- return -1;
++ if (sysfs__read_ull(path, (unsigned long long *)start) < 0) {
++ pr_debug2("Using module %s start:%#lx\n", path, m_start);
++ *start = m_start;
++ }
+
+ return 0;
+ }
+diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
+index abd38abf1d91..24f2a87cf91d 100644
+--- a/tools/perf/util/data-convert-bt.c
++++ b/tools/perf/util/data-convert-bt.c
+@@ -271,7 +271,7 @@ static int string_set_value(struct bt_ctf_field *field, const char *string)
+ if (i > 0)
+ strncpy(buffer, string, i);
+ }
+- strncat(buffer + p, numstr, 4);
++ memcpy(buffer + p, numstr, 4);
+ p += 3;
+ }
+ }
+diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
+index 2048d393ece6..56007a7e0b4d 100644
+--- a/tools/perf/util/thread.c
++++ b/tools/perf/util/thread.c
+@@ -128,7 +128,7 @@ void thread__put(struct thread *thread)
+ }
+ }
+
+-struct namespaces *thread__namespaces(const struct thread *thread)
++static struct namespaces *__thread__namespaces(const struct thread *thread)
+ {
+ if (list_empty(&thread->namespaces_list))
+ return NULL;
+@@ -136,10 +136,21 @@ struct namespaces *thread__namespaces(const struct thread *thread)
+ return list_first_entry(&thread->namespaces_list, struct namespaces, list);
+ }
+
++struct namespaces *thread__namespaces(const struct thread *thread)
++{
++ struct namespaces *ns;
++
++ down_read((struct rw_semaphore *)&thread->namespaces_lock);
++ ns = __thread__namespaces(thread);
++ up_read((struct rw_semaphore *)&thread->namespaces_lock);
++
++ return ns;
++}
++
+ static int __thread__set_namespaces(struct thread *thread, u64 timestamp,
+ struct namespaces_event *event)
+ {
+- struct namespaces *new, *curr = thread__namespaces(thread);
++ struct namespaces *new, *curr = __thread__namespaces(thread);
+
+ new = namespaces__new(event);
+ if (!new)
+diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
+index 8ec76681605c..f25f72a75cf3 100755
+--- a/tools/testing/selftests/netfilter/nft_nat.sh
++++ b/tools/testing/selftests/netfilter/nft_nat.sh
+@@ -23,7 +23,11 @@ ip netns add ns0
+ ip netns add ns1
+ ip netns add ns2
+
+-ip link add veth0 netns ns0 type veth peer name eth0 netns ns1
++ip link add veth0 netns ns0 type veth peer name eth0 netns ns1 > /dev/null 2>&1
++if [ $? -ne 0 ];then
++ echo "SKIP: No virtual ethernet pair device support in kernel"
++ exit $ksft_skip
++fi
+ ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
+
+ ip -net ns0 link set lo up
diff --git a/1054_linux-4.19.55.patch b/1054_linux-4.19.55.patch
new file mode 100644
index 0000000..ee0b997
--- /dev/null
+++ b/1054_linux-4.19.55.patch
@@ -0,0 +1,27 @@
+diff --git a/Makefile b/Makefile
+index b234837e4d07..3addd4c286fa 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 54
++SUBLEVEL = 55
+ EXTRAVERSION =
+ NAME = "People's Front"
+
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 147ed82b73d3..221d9b72423b 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1299,7 +1299,8 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
+ if (nsize < 0)
+ nsize = 0;
+
+- if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf)) {
++ if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf &&
++ tcp_queue != TCP_FRAG_IN_WRITE_QUEUE)) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
+ return -ENOMEM;
+ }
next reply other threads:[~2019-06-22 19:06 UTC|newest]
Thread overview: 332+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-06-22 19:06 Mike Pagano [this message]
-- strict thread matches above, loose matches on Subject: below --
2024-04-18 3:06 [gentoo-commits] proj/linux-patches:4.19 commit in: / Alice Ferrazzi
2023-09-02 9:59 Mike Pagano
2023-08-30 15:00 Mike Pagano
2023-08-16 16:59 Mike Pagano
2023-08-11 11:58 Mike Pagano
2023-08-08 18:43 Mike Pagano
2023-07-24 20:30 Mike Pagano
2023-06-28 10:29 Mike Pagano
2023-06-21 14:55 Alice Ferrazzi
2023-06-14 10:21 Mike Pagano
2023-06-09 11:32 Mike Pagano
2023-05-30 12:57 Mike Pagano
2023-05-17 11:14 Mike Pagano
2023-05-17 11:01 Mike Pagano
2023-05-10 17:59 Mike Pagano
2023-04-26 9:35 Alice Ferrazzi
2023-04-20 11:17 Alice Ferrazzi
2023-04-05 11:41 Mike Pagano
2023-03-22 14:16 Alice Ferrazzi
2023-03-17 10:46 Mike Pagano
2023-03-13 11:35 Alice Ferrazzi
2023-03-11 16:01 Mike Pagano
2023-03-03 12:31 Mike Pagano
2023-02-25 11:41 Mike Pagano
2023-02-24 3:19 Alice Ferrazzi
2023-02-24 3:15 Alice Ferrazzi
2023-02-22 14:51 Alice Ferrazzi
2023-02-06 12:49 Mike Pagano
2023-01-24 7:16 Alice Ferrazzi
2023-01-18 11:11 Mike Pagano
2022-12-14 12:15 Mike Pagano
2022-12-08 12:14 Alice Ferrazzi
2022-11-25 17:04 Mike Pagano
2022-11-23 9:39 Alice Ferrazzi
2022-11-10 17:58 Mike Pagano
2022-11-03 15:11 Mike Pagano
2022-11-01 19:48 Mike Pagano
2022-10-26 11:41 Mike Pagano
2022-10-05 11:59 Mike Pagano
2022-09-28 9:18 Mike Pagano
2022-09-20 12:03 Mike Pagano
2022-09-15 11:09 Mike Pagano
2022-09-05 12:06 Mike Pagano
2022-08-25 10:35 Mike Pagano
2022-08-11 12:36 Mike Pagano
2022-07-29 15:28 Mike Pagano
2022-07-21 20:12 Mike Pagano
2022-07-12 16:01 Mike Pagano
2022-07-07 16:18 Mike Pagano
2022-07-02 16:07 Mike Pagano
2022-06-25 10:22 Mike Pagano
2022-06-16 11:40 Mike Pagano
2022-06-14 16:02 Mike Pagano
2022-06-06 11:05 Mike Pagano
2022-05-27 12:24 Mike Pagano
2022-05-25 11:55 Mike Pagano
2022-05-18 9:50 Mike Pagano
2022-05-15 22:12 Mike Pagano
2022-05-12 11:30 Mike Pagano
2022-05-01 17:04 Mike Pagano
2022-04-27 12:03 Mike Pagano
2022-04-20 12:09 Mike Pagano
2022-04-15 13:11 Mike Pagano
2022-04-12 19:24 Mike Pagano
2022-03-28 10:59 Mike Pagano
2022-03-23 11:57 Mike Pagano
2022-03-16 13:27 Mike Pagano
2022-03-11 10:56 Mike Pagano
2022-03-08 18:30 Mike Pagano
2022-03-02 13:08 Mike Pagano
2022-02-26 21:14 Mike Pagano
2022-02-23 12:39 Mike Pagano
2022-02-16 12:47 Mike Pagano
2022-02-11 12:53 Mike Pagano
2022-02-11 12:46 Mike Pagano
2022-02-11 12:45 Mike Pagano
2022-02-11 12:37 Mike Pagano
2022-02-08 17:56 Mike Pagano
2022-01-29 17:45 Mike Pagano
2022-01-27 11:39 Mike Pagano
2022-01-11 13:14 Mike Pagano
2022-01-05 12:55 Mike Pagano
2021-12-29 13:11 Mike Pagano
2021-12-22 14:07 Mike Pagano
2021-12-14 10:36 Mike Pagano
2021-12-08 12:55 Mike Pagano
2021-12-01 12:51 Mike Pagano
2021-11-26 11:59 Mike Pagano
2021-11-12 14:16 Mike Pagano
2021-11-06 13:26 Mike Pagano
2021-11-02 19:32 Mike Pagano
2021-10-27 11:59 Mike Pagano
2021-10-20 13:26 Mike Pagano
2021-10-17 13:12 Mike Pagano
2021-10-13 15:00 Alice Ferrazzi
2021-10-09 21:33 Mike Pagano
2021-10-06 14:06 Mike Pagano
2021-09-26 14:13 Mike Pagano
2021-09-22 11:40 Mike Pagano
2021-09-20 22:05 Mike Pagano
2021-09-03 11:22 Mike Pagano
2021-09-03 10:08 Alice Ferrazzi
2021-08-26 14:06 Mike Pagano
2021-08-25 22:45 Mike Pagano
2021-08-25 20:41 Mike Pagano
2021-08-15 20:07 Mike Pagano
2021-08-12 11:51 Mike Pagano
2021-08-08 13:39 Mike Pagano
2021-08-04 11:54 Mike Pagano
2021-08-03 12:26 Mike Pagano
2021-07-31 10:34 Alice Ferrazzi
2021-07-28 12:37 Mike Pagano
2021-07-20 15:35 Alice Ferrazzi
2021-07-13 12:38 Mike Pagano
2021-07-11 14:45 Mike Pagano
2021-06-30 14:25 Mike Pagano
2021-06-16 12:22 Mike Pagano
2021-06-10 11:46 Mike Pagano
2021-06-03 10:32 Alice Ferrazzi
2021-05-26 12:05 Mike Pagano
2021-05-22 10:03 Mike Pagano
2021-05-07 11:40 Alice Ferrazzi
2021-04-30 19:02 Mike Pagano
2021-04-28 18:31 Mike Pagano
2021-04-28 11:44 Alice Ferrazzi
2021-04-16 11:15 Alice Ferrazzi
2021-04-14 11:22 Alice Ferrazzi
2021-04-10 13:24 Mike Pagano
2021-04-07 12:21 Mike Pagano
2021-03-30 14:17 Mike Pagano
2021-03-24 12:08 Mike Pagano
2021-03-22 15:50 Mike Pagano
2021-03-20 14:26 Mike Pagano
2021-03-17 16:21 Mike Pagano
2021-03-11 14:05 Mike Pagano
2021-03-07 15:15 Mike Pagano
2021-03-04 12:08 Mike Pagano
2021-02-23 14:31 Alice Ferrazzi
2021-02-13 15:28 Alice Ferrazzi
2021-02-10 10:03 Alice Ferrazzi
2021-02-07 14:40 Alice Ferrazzi
2021-02-03 23:43 Mike Pagano
2021-01-30 13:34 Alice Ferrazzi
2021-01-27 11:15 Mike Pagano
2021-01-23 16:36 Mike Pagano
2021-01-19 20:34 Mike Pagano
2021-01-17 16:20 Mike Pagano
2021-01-12 20:06 Mike Pagano
2021-01-09 12:57 Mike Pagano
2021-01-06 14:15 Mike Pagano
2020-12-30 12:52 Mike Pagano
2020-12-11 12:56 Mike Pagano
2020-12-08 12:06 Mike Pagano
2020-12-02 12:49 Mike Pagano
2020-11-24 14:40 Mike Pagano
2020-11-22 19:26 Mike Pagano
2020-11-18 19:56 Mike Pagano
2020-11-11 15:43 Mike Pagano
2020-11-10 13:56 Mike Pagano
2020-11-05 12:35 Mike Pagano
2020-11-01 20:29 Mike Pagano
2020-10-29 11:18 Mike Pagano
2020-10-17 10:17 Mike Pagano
2020-10-14 20:36 Mike Pagano
2020-10-07 12:50 Mike Pagano
2020-10-01 12:45 Mike Pagano
2020-09-26 22:07 Mike Pagano
2020-09-26 22:00 Mike Pagano
2020-09-24 15:58 Mike Pagano
2020-09-23 12:07 Mike Pagano
2020-09-17 15:01 Mike Pagano
2020-09-17 14:55 Mike Pagano
2020-09-12 17:59 Mike Pagano
2020-09-09 17:59 Mike Pagano
2020-09-03 11:37 Mike Pagano
2020-08-26 11:15 Mike Pagano
2020-08-21 10:49 Alice Ferrazzi
2020-08-19 9:36 Alice Ferrazzi
2020-08-12 23:36 Alice Ferrazzi
2020-08-07 19:16 Mike Pagano
2020-08-05 14:51 Thomas Deutschmann
2020-07-31 18:00 Mike Pagano
2020-07-29 12:33 Mike Pagano
2020-07-22 12:42 Mike Pagano
2020-07-16 11:17 Mike Pagano
2020-07-09 12:12 Mike Pagano
2020-07-01 12:14 Mike Pagano
2020-06-29 17:41 Mike Pagano
2020-06-25 15:07 Mike Pagano
2020-06-22 14:47 Mike Pagano
2020-06-10 21:27 Mike Pagano
2020-06-07 21:52 Mike Pagano
2020-06-03 11:41 Mike Pagano
2020-05-27 16:25 Mike Pagano
2020-05-20 11:30 Mike Pagano
2020-05-20 11:27 Mike Pagano
2020-05-14 11:30 Mike Pagano
2020-05-13 12:33 Mike Pagano
2020-05-11 22:50 Mike Pagano
2020-05-09 22:20 Mike Pagano
2020-05-06 11:46 Mike Pagano
2020-05-02 19:24 Mike Pagano
2020-04-29 17:57 Mike Pagano
2020-04-23 11:44 Mike Pagano
2020-04-21 11:15 Mike Pagano
2020-04-17 11:45 Mike Pagano
2020-04-15 17:09 Mike Pagano
2020-04-13 11:34 Mike Pagano
2020-04-02 15:24 Mike Pagano
2020-03-25 14:58 Mike Pagano
2020-03-20 11:57 Mike Pagano
2020-03-18 14:21 Mike Pagano
2020-03-16 12:23 Mike Pagano
2020-03-11 17:20 Mike Pagano
2020-03-05 16:23 Mike Pagano
2020-02-28 16:38 Mike Pagano
2020-02-24 11:06 Mike Pagano
2020-02-19 23:45 Mike Pagano
2020-02-14 23:52 Mike Pagano
2020-02-11 16:20 Mike Pagano
2020-02-05 17:05 Mike Pagano
2020-02-01 10:37 Mike Pagano
2020-02-01 10:30 Mike Pagano
2020-01-29 16:16 Mike Pagano
2020-01-27 14:25 Mike Pagano
2020-01-23 11:07 Mike Pagano
2020-01-17 19:56 Mike Pagano
2020-01-14 22:30 Mike Pagano
2020-01-12 15:00 Mike Pagano
2020-01-09 11:15 Mike Pagano
2020-01-04 19:50 Mike Pagano
2019-12-31 17:46 Mike Pagano
2019-12-21 15:03 Mike Pagano
2019-12-17 21:56 Mike Pagano
2019-12-13 12:35 Mike Pagano
2019-12-05 12:03 Alice Ferrazzi
2019-12-01 14:06 Thomas Deutschmann
2019-11-24 15:44 Mike Pagano
2019-11-20 19:36 Mike Pagano
2019-11-12 21:00 Mike Pagano
2019-11-10 16:20 Mike Pagano
2019-11-06 14:26 Mike Pagano
2019-10-29 12:04 Mike Pagano
2019-10-17 22:27 Mike Pagano
2019-10-11 17:04 Mike Pagano
2019-10-07 17:42 Mike Pagano
2019-10-05 11:42 Mike Pagano
2019-10-01 10:10 Mike Pagano
2019-09-21 17:11 Mike Pagano
2019-09-19 12:34 Mike Pagano
2019-09-19 10:04 Mike Pagano
2019-09-16 12:26 Mike Pagano
2019-09-10 11:12 Mike Pagano
2019-09-06 17:25 Mike Pagano
2019-08-29 14:15 Mike Pagano
2019-08-25 17:37 Mike Pagano
2019-08-23 22:18 Mike Pagano
2019-08-16 12:26 Mike Pagano
2019-08-16 12:13 Mike Pagano
2019-08-09 17:45 Mike Pagano
2019-08-06 19:19 Mike Pagano
2019-08-04 16:15 Mike Pagano
2019-07-31 15:09 Mike Pagano
2019-07-31 10:22 Mike Pagano
2019-07-28 16:27 Mike Pagano
2019-07-26 11:35 Mike Pagano
2019-07-21 14:41 Mike Pagano
2019-07-14 15:44 Mike Pagano
2019-07-10 11:05 Mike Pagano
2019-07-03 11:34 Mike Pagano
2019-06-25 10:53 Mike Pagano
2019-06-19 17:17 Thomas Deutschmann
2019-06-17 19:22 Mike Pagano
2019-06-15 15:07 Mike Pagano
2019-06-11 12:42 Mike Pagano
2019-06-10 19:43 Mike Pagano
2019-06-09 16:19 Mike Pagano
2019-06-04 11:11 Mike Pagano
2019-05-31 15:02 Mike Pagano
2019-05-26 17:10 Mike Pagano
2019-05-22 11:02 Mike Pagano
2019-05-16 23:03 Mike Pagano
2019-05-14 21:00 Mike Pagano
2019-05-10 19:40 Mike Pagano
2019-05-08 10:06 Mike Pagano
2019-05-05 13:42 Mike Pagano
2019-05-04 18:28 Mike Pagano
2019-05-02 10:13 Mike Pagano
2019-04-27 17:36 Mike Pagano
2019-04-20 11:09 Mike Pagano
2019-04-19 19:51 Mike Pagano
2019-04-05 21:46 Mike Pagano
2019-04-03 10:59 Mike Pagano
2019-03-27 10:22 Mike Pagano
2019-03-23 20:23 Mike Pagano
2019-03-19 16:58 Mike Pagano
2019-03-13 22:08 Mike Pagano
2019-03-10 14:15 Mike Pagano
2019-03-06 19:06 Mike Pagano
2019-03-05 18:04 Mike Pagano
2019-02-27 11:23 Mike Pagano
2019-02-23 11:35 Mike Pagano
2019-02-23 0:46 Mike Pagano
2019-02-20 11:19 Mike Pagano
2019-02-16 0:42 Mike Pagano
2019-02-15 12:39 Mike Pagano
2019-02-12 20:53 Mike Pagano
2019-02-06 17:08 Mike Pagano
2019-01-31 11:28 Mike Pagano
2019-01-26 15:09 Mike Pagano
2019-01-22 23:06 Mike Pagano
2019-01-16 23:32 Mike Pagano
2019-01-13 19:29 Mike Pagano
2019-01-09 17:54 Mike Pagano
2018-12-29 18:55 Mike Pagano
2018-12-29 1:08 Mike Pagano
2018-12-21 14:58 Mike Pagano
2018-12-19 19:09 Mike Pagano
2018-12-17 11:42 Mike Pagano
2018-12-13 11:40 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-05 20:16 Mike Pagano
2018-12-01 15:08 Mike Pagano
2018-11-27 16:16 Mike Pagano
2018-11-23 12:42 Mike Pagano
2018-11-21 12:30 Mike Pagano
2018-11-14 0:47 Mike Pagano
2018-11-14 0:47 Mike Pagano
2018-11-13 20:44 Mike Pagano
2018-11-04 16:22 Alice Ferrazzi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1561230388.62a38b9ab69d2607601ef3a8fcdfaec29c1a5053.mpagano@gentoo \
--to=mpagano@gentoo.org \
--cc=gentoo-commits@lists.gentoo.org \
--cc=gentoo-dev@lists.gentoo.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox