From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Wed, 6 Jun 2018 18:04:10 +0000 (UTC) [thread overview]
Message-ID: <1528308243.cdf8ca221eb77267c919d87bc783e83c7ed6a1eb.mpagano@gentoo> (raw)
commit: cdf8ca221eb77267c919d87bc783e83c7ed6a1eb
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jun 6 18:04:03 2018 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jun 6 18:04:03 2018 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=cdf8ca22
Linux patch 4.9.107
0000_README | 4 +
1106_linux-4.9.107.patch | 3059 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 3063 insertions(+)
diff --git a/0000_README b/0000_README
index d5b0351..5798d3a 100644
--- a/0000_README
+++ b/0000_README
@@ -467,6 +467,10 @@ Patch: 1105_linux-4.9.106.patch
From: http://www.kernel.org
Desc: Linux 4.9.106
+Patch: 1106_linux-4.9.107.patch
+From: http://www.kernel.org
+Desc: Linux 4.9.107
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1106_linux-4.9.107.patch b/1106_linux-4.9.107.patch
new file mode 100644
index 0000000..50f9eb8
--- /dev/null
+++ b/1106_linux-4.9.107.patch
@@ -0,0 +1,3059 @@
+diff --git a/Makefile b/Makefile
+index 48d87e3a36c1..ac30e448e0a5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 106
++SUBLEVEL = 107
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+
+diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
+index 7457ce082b5f..d32a0160c89f 100644
+--- a/arch/arm64/include/asm/atomic_lse.h
++++ b/arch/arm64/include/asm/atomic_lse.h
+@@ -117,7 +117,7 @@ static inline void atomic_and(int i, atomic_t *v)
+ /* LSE atomics */
+ " mvn %w[i], %w[i]\n"
+ " stclr %w[i], %[v]")
+- : [i] "+r" (w0), [v] "+Q" (v->counter)
++ : [i] "+&r" (w0), [v] "+Q" (v->counter)
+ : "r" (x1)
+ : __LL_SC_CLOBBERS);
+ }
+@@ -135,7 +135,7 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v) \
+ /* LSE atomics */ \
+ " mvn %w[i], %w[i]\n" \
+ " ldclr" #mb " %w[i], %w[i], %[v]") \
+- : [i] "+r" (w0), [v] "+Q" (v->counter) \
++ : [i] "+&r" (w0), [v] "+Q" (v->counter) \
+ : "r" (x1) \
+ : __LL_SC_CLOBBERS, ##cl); \
+ \
+@@ -161,7 +161,7 @@ static inline void atomic_sub(int i, atomic_t *v)
+ /* LSE atomics */
+ " neg %w[i], %w[i]\n"
+ " stadd %w[i], %[v]")
+- : [i] "+r" (w0), [v] "+Q" (v->counter)
++ : [i] "+&r" (w0), [v] "+Q" (v->counter)
+ : "r" (x1)
+ : __LL_SC_CLOBBERS);
+ }
+@@ -180,7 +180,7 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \
+ " neg %w[i], %w[i]\n" \
+ " ldadd" #mb " %w[i], w30, %[v]\n" \
+ " add %w[i], %w[i], w30") \
+- : [i] "+r" (w0), [v] "+Q" (v->counter) \
++ : [i] "+&r" (w0), [v] "+Q" (v->counter) \
+ : "r" (x1) \
+ : __LL_SC_CLOBBERS , ##cl); \
+ \
+@@ -207,7 +207,7 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
+ /* LSE atomics */ \
+ " neg %w[i], %w[i]\n" \
+ " ldadd" #mb " %w[i], %w[i], %[v]") \
+- : [i] "+r" (w0), [v] "+Q" (v->counter) \
++ : [i] "+&r" (w0), [v] "+Q" (v->counter) \
+ : "r" (x1) \
+ : __LL_SC_CLOBBERS, ##cl); \
+ \
+@@ -314,7 +314,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
+ /* LSE atomics */
+ " mvn %[i], %[i]\n"
+ " stclr %[i], %[v]")
+- : [i] "+r" (x0), [v] "+Q" (v->counter)
++ : [i] "+&r" (x0), [v] "+Q" (v->counter)
+ : "r" (x1)
+ : __LL_SC_CLOBBERS);
+ }
+@@ -332,7 +332,7 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
+ /* LSE atomics */ \
+ " mvn %[i], %[i]\n" \
+ " ldclr" #mb " %[i], %[i], %[v]") \
+- : [i] "+r" (x0), [v] "+Q" (v->counter) \
++ : [i] "+&r" (x0), [v] "+Q" (v->counter) \
+ : "r" (x1) \
+ : __LL_SC_CLOBBERS, ##cl); \
+ \
+@@ -358,7 +358,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
+ /* LSE atomics */
+ " neg %[i], %[i]\n"
+ " stadd %[i], %[v]")
+- : [i] "+r" (x0), [v] "+Q" (v->counter)
++ : [i] "+&r" (x0), [v] "+Q" (v->counter)
+ : "r" (x1)
+ : __LL_SC_CLOBBERS);
+ }
+@@ -377,7 +377,7 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
+ " neg %[i], %[i]\n" \
+ " ldadd" #mb " %[i], x30, %[v]\n" \
+ " add %[i], %[i], x30") \
+- : [i] "+r" (x0), [v] "+Q" (v->counter) \
++ : [i] "+&r" (x0), [v] "+Q" (v->counter) \
+ : "r" (x1) \
+ : __LL_SC_CLOBBERS, ##cl); \
+ \
+@@ -404,7 +404,7 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
+ /* LSE atomics */ \
+ " neg %[i], %[i]\n" \
+ " ldadd" #mb " %[i], %[i], %[v]") \
+- : [i] "+r" (x0), [v] "+Q" (v->counter) \
++ : [i] "+&r" (x0), [v] "+Q" (v->counter) \
+ : "r" (x1) \
+ : __LL_SC_CLOBBERS, ##cl); \
+ \
+@@ -516,7 +516,7 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
+ " eor %[old1], %[old1], %[oldval1]\n" \
+ " eor %[old2], %[old2], %[oldval2]\n" \
+ " orr %[old1], %[old1], %[old2]") \
+- : [old1] "+r" (x0), [old2] "+r" (x1), \
++ : [old1] "+&r" (x0), [old2] "+&r" (x1), \
+ [v] "+Q" (*(unsigned long *)ptr) \
+ : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
+ [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
+diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
+index 0bc0b1de90c4..4ea85ebdf4df 100644
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -9,8 +9,6 @@
+ #ifndef __ASM_CPUFEATURE_H
+ #define __ASM_CPUFEATURE_H
+
+-#include <linux/jump_label.h>
+-
+ #include <asm/cpucaps.h>
+ #include <asm/hwcap.h>
+ #include <asm/sysreg.h>
+@@ -27,6 +25,8 @@
+
+ #ifndef __ASSEMBLY__
+
++#include <linux/bug.h>
++#include <linux/jump_label.h>
+ #include <linux/kernel.h>
+
+ /* CPU feature register tracking */
+@@ -96,6 +96,7 @@ struct arm64_cpu_capabilities {
+
+ extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
+ extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
++extern struct static_key_false arm64_const_caps_ready;
+
+ bool this_cpu_has_cap(unsigned int cap);
+
+@@ -104,14 +105,27 @@ static inline bool cpu_have_feature(unsigned int num)
+ return elf_hwcap & (1UL << num);
+ }
+
++/* System capability check for constant caps */
++static inline bool __cpus_have_const_cap(int num)
++{
++ if (num >= ARM64_NCAPS)
++ return false;
++ return static_branch_unlikely(&cpu_hwcap_keys[num]);
++}
++
+ static inline bool cpus_have_cap(unsigned int num)
+ {
+ if (num >= ARM64_NCAPS)
+ return false;
+- if (__builtin_constant_p(num))
+- return static_branch_unlikely(&cpu_hwcap_keys[num]);
++ return test_bit(num, cpu_hwcaps);
++}
++
++static inline bool cpus_have_const_cap(int num)
++{
++ if (static_branch_likely(&arm64_const_caps_ready))
++ return __cpus_have_const_cap(num);
+ else
+- return test_bit(num, cpu_hwcaps);
++ return cpus_have_cap(num);
+ }
+
+ static inline void cpus_set_cap(unsigned int num)
+@@ -121,7 +135,6 @@ static inline void cpus_set_cap(unsigned int num)
+ num, ARM64_NCAPS);
+ } else {
+ __set_bit(num, cpu_hwcaps);
+- static_branch_enable(&cpu_hwcap_keys[num]);
+ }
+ }
+
+@@ -200,7 +213,7 @@ static inline bool cpu_supports_mixed_endian_el0(void)
+
+ static inline bool system_supports_32bit_el0(void)
+ {
+- return cpus_have_cap(ARM64_HAS_32BIT_EL0);
++ return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
+ }
+
+ static inline bool system_supports_mixed_endian_el0(void)
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index 0a33ea304e63..2abb4493f4f6 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -24,6 +24,7 @@
+
+ #include <linux/types.h>
+ #include <linux/kvm_types.h>
++#include <asm/cpufeature.h>
+ #include <asm/kvm.h>
+ #include <asm/kvm_asm.h>
+ #include <asm/kvm_mmio.h>
+@@ -358,9 +359,12 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
+ unsigned long vector_ptr)
+ {
+ /*
+- * Call initialization code, and switch to the full blown
+- * HYP code.
++ * Call initialization code, and switch to the full blown HYP code.
++ * If the cpucaps haven't been finalized yet, something has gone very
++ * wrong, and hyp will crash and burn when it uses any
++ * cpus_have_const_cap() wrapper.
+ */
++ BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
+ __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
+ }
+
+@@ -398,7 +402,7 @@ static inline void __cpu_init_stage2(void)
+
+ static inline bool kvm_arm_harden_branch_predictor(void)
+ {
+- return cpus_have_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
++ return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
+ }
+
+ #endif /* __ARM64_KVM_HOST_H__ */
+diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
+index eac73a640ea7..824c83db9b47 100644
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -341,7 +341,7 @@ static inline void *kvm_get_hyp_vector(void)
+ vect = __bp_harden_hyp_vecs_start +
+ data->hyp_vectors_slot * SZ_2K;
+
+- if (!cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN))
++ if (!cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
+ vect = lm_alias(vect);
+ }
+
+diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
+index d51158a61892..6ac34c75f4e1 100644
+--- a/arch/arm64/include/asm/mmu.h
++++ b/arch/arm64/include/asm/mmu.h
+@@ -37,7 +37,7 @@ typedef struct {
+ static inline bool arm64_kernel_unmapped_at_el0(void)
+ {
+ return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
+- cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0);
++ cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
+ }
+
+ typedef void (*bp_hardening_cb_t)(void);
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index a0ee01202503..7959d2c92010 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -47,6 +47,7 @@ unsigned int compat_elf_hwcap2 __read_mostly;
+ #endif
+
+ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
++EXPORT_SYMBOL(cpu_hwcaps);
+
+ DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
+ EXPORT_SYMBOL(cpu_hwcap_keys);
+@@ -762,7 +763,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+ * ThunderX leads to apparent I-cache corruption of kernel text, which
+ * ends as well as you might imagine. Don't even try.
+ */
+- if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
++ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
+ str = "ARM64_WORKAROUND_CAVIUM_27456";
+ __kpti_forced = -1;
+ }
+@@ -1051,8 +1052,16 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
+ */
+ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
+ {
+- for (; caps->matches; caps++)
+- if (caps->enable && cpus_have_cap(caps->capability))
++ for (; caps->matches; caps++) {
++ unsigned int num = caps->capability;
++
++ if (!cpus_have_cap(num))
++ continue;
++
++ /* Ensure cpus_have_const_cap(num) works */
++ static_branch_enable(&cpu_hwcap_keys[num]);
++
++ if (caps->enable) {
+ /*
+ * Use stop_machine() as it schedules the work allowing
+ * us to modify PSTATE, instead of on_each_cpu() which
+@@ -1060,6 +1069,8 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
+ * we return.
+ */
+ stop_machine(caps->enable, (void *)caps, cpu_online_mask);
++ }
++ }
+ }
+
+ /*
+@@ -1163,6 +1174,14 @@ static void __init setup_feature_capabilities(void)
+ enable_cpu_capabilities(arm64_features);
+ }
+
++DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
++EXPORT_SYMBOL(arm64_const_caps_ready);
++
++static void __init mark_const_caps_ready(void)
++{
++ static_branch_enable(&arm64_const_caps_ready);
++}
++
+ extern const struct arm64_cpu_capabilities arm64_errata[];
+
+ bool this_cpu_has_cap(unsigned int cap)
+@@ -1179,6 +1198,7 @@ void __init setup_cpu_features(void)
+ /* Set the CPU feature capabilies */
+ setup_feature_capabilities();
+ enable_errata_workarounds();
++ mark_const_caps_ready();
+ setup_elf_hwcaps(arm64_elf_hwcaps);
+
+ if (system_supports_32bit_el0())
+@@ -1203,5 +1223,5 @@ void __init setup_cpu_features(void)
+ static bool __maybe_unused
+ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
+ {
+- return (cpus_have_cap(ARM64_HAS_PAN) && !cpus_have_cap(ARM64_HAS_UAO));
++ return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
+ }
+diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
+index 0972ce58316d..e917d119490c 100644
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -291,7 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
+ memset(childregs, 0, sizeof(struct pt_regs));
+ childregs->pstate = PSR_MODE_EL1h;
+ if (IS_ENABLED(CONFIG_ARM64_UAO) &&
+- cpus_have_cap(ARM64_HAS_UAO))
++ cpus_have_const_cap(ARM64_HAS_UAO))
+ childregs->pstate |= PSR_UAO_BIT;
+ p->thread.cpu_context.x19 = stack_start;
+ p->thread.cpu_context.x20 = stk_sz;
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index 6e716a5f1173..ebb575c4231b 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -699,6 +699,10 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
+ if (value & ~known_bits)
+ return -EOPNOTSUPP;
+
++ /* Setting FRE without FR is not supported. */
++ if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
++ return -EOPNOTSUPP;
++
+ /* Avoid inadvertently triggering emulation */
+ if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
+ !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
+diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
+index 8f7bf74d1c0b..4f64913b4b4c 100644
+--- a/arch/mips/kernel/ptrace.c
++++ b/arch/mips/kernel/ptrace.c
+@@ -838,7 +838,7 @@ long arch_ptrace(struct task_struct *child, long request,
+ break;
+ }
+ #endif
+- tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
++ tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
+ break;
+ case PC:
+ tmp = regs->cp0_epc;
+diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
+index bc9afbabbe14..b1e945738138 100644
+--- a/arch/mips/kernel/ptrace32.c
++++ b/arch/mips/kernel/ptrace32.c
+@@ -107,7 +107,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ addr & 1);
+ break;
+ }
+- tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
++ tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
+ break;
+ case PC:
+ tmp = regs->cp0_epc;
+diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
+index 903e76a9f158..e2200100828d 100644
+--- a/arch/powerpc/include/asm/exception-64s.h
++++ b/arch/powerpc/include/asm/exception-64s.h
+@@ -51,6 +51,27 @@
+ #define EX_PPR 88 /* SMT thread status register (priority) */
+ #define EX_CTR 96
+
++#define STF_ENTRY_BARRIER_SLOT \
++ STF_ENTRY_BARRIER_FIXUP_SECTION; \
++ nop; \
++ nop; \
++ nop
++
++#define STF_EXIT_BARRIER_SLOT \
++ STF_EXIT_BARRIER_FIXUP_SECTION; \
++ nop; \
++ nop; \
++ nop; \
++ nop; \
++ nop; \
++ nop
++
++/*
++ * r10 must be free to use, r13 must be paca
++ */
++#define INTERRUPT_TO_KERNEL \
++ STF_ENTRY_BARRIER_SLOT
++
+ /*
+ * Macros for annotating the expected destination of (h)rfid
+ *
+@@ -67,16 +88,19 @@
+ rfid
+
+ #define RFI_TO_USER \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ rfid; \
+ b rfi_flush_fallback
+
+ #define RFI_TO_USER_OR_KERNEL \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ rfid; \
+ b rfi_flush_fallback
+
+ #define RFI_TO_GUEST \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ rfid; \
+ b rfi_flush_fallback
+@@ -85,21 +109,25 @@
+ hrfid
+
+ #define HRFI_TO_USER \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+
+ #define HRFI_TO_USER_OR_KERNEL \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+
+ #define HRFI_TO_GUEST \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+
+ #define HRFI_TO_UNKNOWN \
++ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ hrfid; \
+ b hrfi_flush_fallback
+@@ -225,6 +253,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
+ #define __EXCEPTION_PROLOG_1(area, extra, vec) \
+ OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
+ OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
++ INTERRUPT_TO_KERNEL; \
+ SAVE_CTR(r10, area); \
+ mfcr r9; \
+ extra(vec); \
+diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
+index 7b332342071c..0bf8202feca6 100644
+--- a/arch/powerpc/include/asm/feature-fixups.h
++++ b/arch/powerpc/include/asm/feature-fixups.h
+@@ -189,6 +189,22 @@ void apply_feature_fixups(void);
+ void setup_feature_keys(void);
+ #endif
+
++#define STF_ENTRY_BARRIER_FIXUP_SECTION \
++953: \
++ .pushsection __stf_entry_barrier_fixup,"a"; \
++ .align 2; \
++954: \
++ FTR_ENTRY_OFFSET 953b-954b; \
++ .popsection;
++
++#define STF_EXIT_BARRIER_FIXUP_SECTION \
++955: \
++ .pushsection __stf_exit_barrier_fixup,"a"; \
++ .align 2; \
++956: \
++ FTR_ENTRY_OFFSET 955b-956b; \
++ .popsection;
++
+ #define RFI_FLUSH_FIXUP_SECTION \
+ 951: \
+ .pushsection __rfi_flush_fixup,"a"; \
+@@ -200,6 +216,9 @@ void setup_feature_keys(void);
+
+ #ifndef __ASSEMBLY__
+
++extern long stf_barrier_fallback;
++extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
++extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
+ extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
+
+ #endif
+diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
+index dc0996b9d75d..9d978102bf0d 100644
+--- a/arch/powerpc/include/asm/hvcall.h
++++ b/arch/powerpc/include/asm/hvcall.h
+@@ -313,6 +313,9 @@
+ #define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2
+ #define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3
+ #define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4
++#define H_CPU_CHAR_BRANCH_HINTS_HONORED (1ull << 58) // IBM bit 5
++#define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6
++#define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7
+
+ #define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
+ #define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
+diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
+new file mode 100644
+index 000000000000..44989b22383c
+--- /dev/null
++++ b/arch/powerpc/include/asm/security_features.h
+@@ -0,0 +1,85 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Security related feature bit definitions.
++ *
++ * Copyright 2018, Michael Ellerman, IBM Corporation.
++ */
++
++#ifndef _ASM_POWERPC_SECURITY_FEATURES_H
++#define _ASM_POWERPC_SECURITY_FEATURES_H
++
++
++extern unsigned long powerpc_security_features;
++extern bool rfi_flush;
++
++/* These are bit flags */
++enum stf_barrier_type {
++ STF_BARRIER_NONE = 0x1,
++ STF_BARRIER_FALLBACK = 0x2,
++ STF_BARRIER_EIEIO = 0x4,
++ STF_BARRIER_SYNC_ORI = 0x8,
++};
++
++void setup_stf_barrier(void);
++void do_stf_barrier_fixups(enum stf_barrier_type types);
++
++static inline void security_ftr_set(unsigned long feature)
++{
++ powerpc_security_features |= feature;
++}
++
++static inline void security_ftr_clear(unsigned long feature)
++{
++ powerpc_security_features &= ~feature;
++}
++
++static inline bool security_ftr_enabled(unsigned long feature)
++{
++ return !!(powerpc_security_features & feature);
++}
++
++
++// Features indicating support for Spectre/Meltdown mitigations
++
++// The L1-D cache can be flushed with ori r30,r30,0
++#define SEC_FTR_L1D_FLUSH_ORI30 0x0000000000000001ull
++
++// The L1-D cache can be flushed with mtspr 882,r0 (aka SPRN_TRIG2)
++#define SEC_FTR_L1D_FLUSH_TRIG2 0x0000000000000002ull
++
++// ori r31,r31,0 acts as a speculation barrier
++#define SEC_FTR_SPEC_BAR_ORI31 0x0000000000000004ull
++
++// Speculation past bctr is disabled
++#define SEC_FTR_BCCTRL_SERIALISED 0x0000000000000008ull
++
++// Entries in L1-D are private to a SMT thread
++#define SEC_FTR_L1D_THREAD_PRIV 0x0000000000000010ull
++
++// Indirect branch prediction cache disabled
++#define SEC_FTR_COUNT_CACHE_DISABLED 0x0000000000000020ull
++
++
++// Features indicating need for Spectre/Meltdown mitigations
++
++// The L1-D cache should be flushed on MSR[HV] 1->0 transition (hypervisor to guest)
++#define SEC_FTR_L1D_FLUSH_HV 0x0000000000000040ull
++
++// The L1-D cache should be flushed on MSR[PR] 0->1 transition (kernel to userspace)
++#define SEC_FTR_L1D_FLUSH_PR 0x0000000000000080ull
++
++// A speculation barrier should be used for bounds checks (Spectre variant 1)
++#define SEC_FTR_BNDS_CHK_SPEC_BAR 0x0000000000000100ull
++
++// Firmware configuration indicates user favours security over performance
++#define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull
++
++
++// Features enabled by default
++#define SEC_FTR_DEFAULT \
++ (SEC_FTR_L1D_FLUSH_HV | \
++ SEC_FTR_L1D_FLUSH_PR | \
++ SEC_FTR_BNDS_CHK_SPEC_BAR | \
++ SEC_FTR_FAVOUR_SECURITY)
++
++#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
+diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
+index 6825a67cc3db..3f160cd20107 100644
+--- a/arch/powerpc/include/asm/setup.h
++++ b/arch/powerpc/include/asm/setup.h
+@@ -48,7 +48,7 @@ enum l1d_flush_type {
+ L1D_FLUSH_MTTRIG = 0x8,
+ };
+
+-void __init setup_rfi_flush(enum l1d_flush_type, bool enable);
++void setup_rfi_flush(enum l1d_flush_type, bool enable);
+ void do_rfi_flush_fixups(enum l1d_flush_type types);
+
+ #endif /* !__ASSEMBLY__ */
+diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
+index adb52d101133..13885786282b 100644
+--- a/arch/powerpc/kernel/Makefile
++++ b/arch/powerpc/kernel/Makefile
+@@ -44,7 +44,7 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
+ obj-$(CONFIG_VDSO32) += vdso32/
+ obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+ obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
+-obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
++obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o security.o
+ obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
+ obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
+ obj-$(CONFIG_PPC64) += vdso64/
+diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
+index 9e05c8828ee2..ff45d007d195 100644
+--- a/arch/powerpc/kernel/cpu_setup_power.S
++++ b/arch/powerpc/kernel/cpu_setup_power.S
+@@ -28,6 +28,7 @@ _GLOBAL(__setup_cpu_power7)
+ beqlr
+ li r0,0
+ mtspr SPRN_LPID,r0
++ mtspr SPRN_PCR,r0
+ mfspr r3,SPRN_LPCR
+ bl __init_LPCR
+ bl __init_tlb_power7
+@@ -41,6 +42,7 @@ _GLOBAL(__restore_cpu_power7)
+ beqlr
+ li r0,0
+ mtspr SPRN_LPID,r0
++ mtspr SPRN_PCR,r0
+ mfspr r3,SPRN_LPCR
+ bl __init_LPCR
+ bl __init_tlb_power7
+@@ -57,6 +59,7 @@ _GLOBAL(__setup_cpu_power8)
+ beqlr
+ li r0,0
+ mtspr SPRN_LPID,r0
++ mtspr SPRN_PCR,r0
+ mfspr r3,SPRN_LPCR
+ ori r3, r3, LPCR_PECEDH
+ bl __init_LPCR
+@@ -78,6 +81,7 @@ _GLOBAL(__restore_cpu_power8)
+ beqlr
+ li r0,0
+ mtspr SPRN_LPID,r0
++ mtspr SPRN_PCR,r0
+ mfspr r3,SPRN_LPCR
+ ori r3, r3, LPCR_PECEDH
+ bl __init_LPCR
+@@ -98,6 +102,7 @@ _GLOBAL(__setup_cpu_power9)
+ li r0,0
+ mtspr SPRN_LPID,r0
+ mtspr SPRN_PID,r0
++ mtspr SPRN_PCR,r0
+ mfspr r3,SPRN_LPCR
+ LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
+ or r3, r3, r4
+@@ -121,6 +126,7 @@ _GLOBAL(__restore_cpu_power9)
+ li r0,0
+ mtspr SPRN_LPID,r0
+ mtspr SPRN_PID,r0
++ mtspr SPRN_PCR,r0
+ mfspr r3,SPRN_LPCR
+ LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
+ or r3, r3, r4
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index 94b5dfb087e9..d50cc9b38b80 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -846,7 +846,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ #endif
+
+
+-EXC_REAL_MASKABLE(decrementer, 0x900, 0x980)
++EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x980)
+ EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x4980, 0x900)
+ TRAMP_KVM(PACA_EXGEN, 0x900)
+ EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
+@@ -884,6 +884,7 @@ BEGIN_FTR_SECTION \
+ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
+ mr r9,r13 ; \
+ GET_PACA(r13) ; \
++ INTERRUPT_TO_KERNEL ; \
+ mfspr r11,SPRN_SRR0 ; \
+ 0:
+
+@@ -1353,6 +1354,19 @@ masked_##_H##interrupt: \
+ ##_H##RFI_TO_KERNEL; \
+ b .
+
++TRAMP_REAL_BEGIN(stf_barrier_fallback)
++ std r9,PACA_EXRFI+EX_R9(r13)
++ std r10,PACA_EXRFI+EX_R10(r13)
++ sync
++ ld r9,PACA_EXRFI+EX_R9(r13)
++ ld r10,PACA_EXRFI+EX_R10(r13)
++ ori 31,31,0
++ .rept 14
++ b 1f
++1:
++ .endr
++ blr
++
+ /*
+ * Real mode exceptions actually use this too, but alternate
+ * instruction code patches (which end up in the common .text area)
+diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
+new file mode 100644
+index 000000000000..2277df84ef6e
+--- /dev/null
++++ b/arch/powerpc/kernel/security.c
+@@ -0,0 +1,237 @@
++// SPDX-License-Identifier: GPL-2.0+
++//
++// Security related flags and so on.
++//
++// Copyright 2018, Michael Ellerman, IBM Corporation.
++
++#include <linux/kernel.h>
++#include <linux/debugfs.h>
++#include <linux/device.h>
++#include <linux/seq_buf.h>
++
++#include <asm/security_features.h>
++
++
++unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
++
++ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ bool thread_priv;
++
++ thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
++
++ if (rfi_flush || thread_priv) {
++ struct seq_buf s;
++ seq_buf_init(&s, buf, PAGE_SIZE - 1);
++
++ seq_buf_printf(&s, "Mitigation: ");
++
++ if (rfi_flush)
++ seq_buf_printf(&s, "RFI Flush");
++
++ if (rfi_flush && thread_priv)
++ seq_buf_printf(&s, ", ");
++
++ if (thread_priv)
++ seq_buf_printf(&s, "L1D private per thread");
++
++ seq_buf_printf(&s, "\n");
++
++ return s.len;
++ }
++
++ if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
++ !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
++ return sprintf(buf, "Not affected\n");
++
++ return sprintf(buf, "Vulnerable\n");
++}
++
++ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR))
++ return sprintf(buf, "Not affected\n");
++
++ return sprintf(buf, "Vulnerable\n");
++}
++
++ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ bool bcs, ccd, ori;
++ struct seq_buf s;
++
++ seq_buf_init(&s, buf, PAGE_SIZE - 1);
++
++ bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
++ ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
++ ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31);
++
++ if (bcs || ccd) {
++ seq_buf_printf(&s, "Mitigation: ");
++
++ if (bcs)
++ seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
++
++ if (bcs && ccd)
++ seq_buf_printf(&s, ", ");
++
++ if (ccd)
++ seq_buf_printf(&s, "Indirect branch cache disabled");
++ } else
++ seq_buf_printf(&s, "Vulnerable");
++
++ if (ori)
++ seq_buf_printf(&s, ", ori31 speculation barrier enabled");
++
++ seq_buf_printf(&s, "\n");
++
++ return s.len;
++}
++
++/*
++ * Store-forwarding barrier support.
++ */
++
++static enum stf_barrier_type stf_enabled_flush_types;
++static bool no_stf_barrier;
++bool stf_barrier;
++
++static int __init handle_no_stf_barrier(char *p)
++{
++ pr_info("stf-barrier: disabled on command line.");
++ no_stf_barrier = true;
++ return 0;
++}
++
++early_param("no_stf_barrier", handle_no_stf_barrier);
++
++/* This is the generic flag used by other architectures */
++static int __init handle_ssbd(char *p)
++{
++ if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) {
++ /* Until firmware tells us, we have the barrier with auto */
++ return 0;
++ } else if (strncmp(p, "off", 3) == 0) {
++ handle_no_stf_barrier(NULL);
++ return 0;
++ } else
++ return 1;
++
++ return 0;
++}
++early_param("spec_store_bypass_disable", handle_ssbd);
++
++/* This is the generic flag used by other architectures */
++static int __init handle_no_ssbd(char *p)
++{
++ handle_no_stf_barrier(NULL);
++ return 0;
++}
++early_param("nospec_store_bypass_disable", handle_no_ssbd);
++
++static void stf_barrier_enable(bool enable)
++{
++ if (enable)
++ do_stf_barrier_fixups(stf_enabled_flush_types);
++ else
++ do_stf_barrier_fixups(STF_BARRIER_NONE);
++
++ stf_barrier = enable;
++}
++
++void setup_stf_barrier(void)
++{
++ enum stf_barrier_type type;
++ bool enable, hv;
++
++ hv = cpu_has_feature(CPU_FTR_HVMODE);
++
++ /* Default to fallback in case fw-features are not available */
++ if (cpu_has_feature(CPU_FTR_ARCH_300))
++ type = STF_BARRIER_EIEIO;
++ else if (cpu_has_feature(CPU_FTR_ARCH_207S))
++ type = STF_BARRIER_SYNC_ORI;
++ else if (cpu_has_feature(CPU_FTR_ARCH_206))
++ type = STF_BARRIER_FALLBACK;
++ else
++ type = STF_BARRIER_NONE;
++
++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
++ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) ||
++ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv));
++
++ if (type == STF_BARRIER_FALLBACK) {
++ pr_info("stf-barrier: fallback barrier available\n");
++ } else if (type == STF_BARRIER_SYNC_ORI) {
++ pr_info("stf-barrier: hwsync barrier available\n");
++ } else if (type == STF_BARRIER_EIEIO) {
++ pr_info("stf-barrier: eieio barrier available\n");
++ }
++
++ stf_enabled_flush_types = type;
++
++ if (!no_stf_barrier)
++ stf_barrier_enable(enable);
++}
++
++ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) {
++ const char *type;
++ switch (stf_enabled_flush_types) {
++ case STF_BARRIER_EIEIO:
++ type = "eieio";
++ break;
++ case STF_BARRIER_SYNC_ORI:
++ type = "hwsync";
++ break;
++ case STF_BARRIER_FALLBACK:
++ type = "fallback";
++ break;
++ default:
++ type = "unknown";
++ }
++ return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type);
++ }
++
++ if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
++ !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
++ return sprintf(buf, "Not affected\n");
++
++ return sprintf(buf, "Vulnerable\n");
++}
++
++#ifdef CONFIG_DEBUG_FS
++static int stf_barrier_set(void *data, u64 val)
++{
++ bool enable;
++
++ if (val == 1)
++ enable = true;
++ else if (val == 0)
++ enable = false;
++ else
++ return -EINVAL;
++
++ /* Only do anything if we're changing state */
++ if (enable != stf_barrier)
++ stf_barrier_enable(enable);
++
++ return 0;
++}
++
++static int stf_barrier_get(void *data, u64 *val)
++{
++ *val = stf_barrier ? 1 : 0;
++ return 0;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n");
++
++static __init int stf_barrier_debugfs_init(void)
++{
++ debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier);
++ return 0;
++}
++device_initcall(stf_barrier_debugfs_init);
++#endif /* CONFIG_DEBUG_FS */
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index 5243501d95ef..fdba10695208 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -679,6 +679,7 @@ static int __init disable_hardlockup_detector(void)
+ return 0;
+ }
+ early_initcall(disable_hardlockup_detector);
++#endif /* CONFIG_HARDLOCKUP_DETECTOR */
+
+ #ifdef CONFIG_PPC_BOOK3S_64
+ static enum l1d_flush_type enabled_flush_types;
+@@ -716,9 +717,6 @@ static void do_nothing(void *unused)
+
+ void rfi_flush_enable(bool enable)
+ {
+- if (rfi_flush == enable)
+- return;
+-
+ if (enable) {
+ do_rfi_flush_fixups(enabled_flush_types);
+ on_each_cpu(do_nothing, NULL, 1);
+@@ -728,11 +726,15 @@ void rfi_flush_enable(bool enable)
+ rfi_flush = enable;
+ }
+
+-static void init_fallback_flush(void)
++static void __ref init_fallback_flush(void)
+ {
+ u64 l1d_size, limit;
+ int cpu;
+
++ /* Only allocate the fallback flush area once (at boot time). */
++ if (l1d_flush_fallback_area)
++ return;
++
+ l1d_size = ppc64_caches.dsize;
+ limit = min(safe_stack_limit(), ppc64_rma_size);
+
+@@ -750,18 +752,18 @@ static void init_fallback_flush(void)
+ }
+ }
+
+-void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
++void setup_rfi_flush(enum l1d_flush_type types, bool enable)
+ {
+ if (types & L1D_FLUSH_FALLBACK) {
+- pr_info("rfi-flush: Using fallback displacement flush\n");
++ pr_info("rfi-flush: fallback displacement flush available\n");
+ init_fallback_flush();
+ }
+
+ if (types & L1D_FLUSH_ORI)
+- pr_info("rfi-flush: Using ori type flush\n");
++ pr_info("rfi-flush: ori type flush available\n");
+
+ if (types & L1D_FLUSH_MTTRIG)
+- pr_info("rfi-flush: Using mttrig type flush\n");
++ pr_info("rfi-flush: mttrig type flush available\n");
+
+ enabled_flush_types = types;
+
+@@ -772,13 +774,19 @@ void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
+ #ifdef CONFIG_DEBUG_FS
+ static int rfi_flush_set(void *data, u64 val)
+ {
++ bool enable;
++
+ if (val == 1)
+- rfi_flush_enable(true);
++ enable = true;
+ else if (val == 0)
+- rfi_flush_enable(false);
++ enable = false;
+ else
+ return -EINVAL;
+
++ /* Only do anything if we're changing state */
++ if (enable != rfi_flush)
++ rfi_flush_enable(enable);
++
+ return 0;
+ }
+
+@@ -797,13 +805,4 @@ static __init int rfi_flush_debugfs_init(void)
+ }
+ device_initcall(rfi_flush_debugfs_init);
+ #endif
+-
+-ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+- if (rfi_flush)
+- return sprintf(buf, "Mitigation: RFI Flush\n");
+-
+- return sprintf(buf, "Vulnerable\n");
+-}
+ #endif /* CONFIG_PPC_BOOK3S_64 */
+-#endif
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
+index b61fb7902018..c16fddbb6ab8 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -133,6 +133,20 @@ SECTIONS
+ RODATA
+
+ #ifdef CONFIG_PPC64
++ . = ALIGN(8);
++ __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
++ __start___stf_entry_barrier_fixup = .;
++ *(__stf_entry_barrier_fixup)
++ __stop___stf_entry_barrier_fixup = .;
++ }
++
++ . = ALIGN(8);
++ __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
++ __start___stf_exit_barrier_fixup = .;
++ *(__stf_exit_barrier_fixup)
++ __stop___stf_exit_barrier_fixup = .;
++ }
++
+ . = ALIGN(8);
+ __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
+ __start___rfi_flush_fixup = .;
+diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
+index 46c8338a61bc..cf1398e3c2e0 100644
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -22,6 +22,7 @@
+ #include <asm/page.h>
+ #include <asm/sections.h>
+ #include <asm/setup.h>
++#include <asm/security_features.h>
+ #include <asm/firmware.h>
+ #include <asm/setup.h>
+
+@@ -117,6 +118,120 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+ }
+
+ #ifdef CONFIG_PPC_BOOK3S_64
++void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
++{
++ unsigned int instrs[3], *dest;
++ long *start, *end;
++ int i;
++
++ start = PTRRELOC(&__start___stf_entry_barrier_fixup),
++ end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
++
++ instrs[0] = 0x60000000; /* nop */
++ instrs[1] = 0x60000000; /* nop */
++ instrs[2] = 0x60000000; /* nop */
++
++ i = 0;
++ if (types & STF_BARRIER_FALLBACK) {
++ instrs[i++] = 0x7d4802a6; /* mflr r10 */
++ instrs[i++] = 0x60000000; /* branch patched below */
++ instrs[i++] = 0x7d4803a6; /* mtlr r10 */
++ } else if (types & STF_BARRIER_EIEIO) {
++ instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
++ } else if (types & STF_BARRIER_SYNC_ORI) {
++ instrs[i++] = 0x7c0004ac; /* hwsync */
++ instrs[i++] = 0xe94d0000; /* ld r10,0(r13) */
++ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
++ }
++
++ for (i = 0; start < end; start++, i++) {
++ dest = (void *)start + *start;
++
++ pr_devel("patching dest %lx\n", (unsigned long)dest);
++
++ patch_instruction(dest, instrs[0]);
++
++ if (types & STF_BARRIER_FALLBACK)
++ patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback,
++ BRANCH_SET_LINK);
++ else
++ patch_instruction(dest + 1, instrs[1]);
++
++ patch_instruction(dest + 2, instrs[2]);
++ }
++
++ printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
++ (types == STF_BARRIER_NONE) ? "no" :
++ (types == STF_BARRIER_FALLBACK) ? "fallback" :
++ (types == STF_BARRIER_EIEIO) ? "eieio" :
++ (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
++ : "unknown");
++}
++
++void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
++{
++ unsigned int instrs[6], *dest;
++ long *start, *end;
++ int i;
++
++ start = PTRRELOC(&__start___stf_exit_barrier_fixup),
++ end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
++
++ instrs[0] = 0x60000000; /* nop */
++ instrs[1] = 0x60000000; /* nop */
++ instrs[2] = 0x60000000; /* nop */
++ instrs[3] = 0x60000000; /* nop */
++ instrs[4] = 0x60000000; /* nop */
++ instrs[5] = 0x60000000; /* nop */
++
++ i = 0;
++ if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
++ if (cpu_has_feature(CPU_FTR_HVMODE)) {
++ instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
++ instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
++ } else {
++ instrs[i++] = 0x7db243a6; /* mtsprg 2,r13 */
++ instrs[i++] = 0x7db142a6; /* mfsprg r13,1 */
++ }
++ instrs[i++] = 0x7c0004ac; /* hwsync */
++ instrs[i++] = 0xe9ad0000; /* ld r13,0(r13) */
++ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
++ if (cpu_has_feature(CPU_FTR_HVMODE)) {
++ instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
++ } else {
++ instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
++ }
++ } else if (types & STF_BARRIER_EIEIO) {
++ instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
++ }
++
++ for (i = 0; start < end; start++, i++) {
++ dest = (void *)start + *start;
++
++ pr_devel("patching dest %lx\n", (unsigned long)dest);
++
++ patch_instruction(dest, instrs[0]);
++ patch_instruction(dest + 1, instrs[1]);
++ patch_instruction(dest + 2, instrs[2]);
++ patch_instruction(dest + 3, instrs[3]);
++ patch_instruction(dest + 4, instrs[4]);
++ patch_instruction(dest + 5, instrs[5]);
++ }
++ printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
++ (types == STF_BARRIER_NONE) ? "no" :
++ (types == STF_BARRIER_FALLBACK) ? "fallback" :
++ (types == STF_BARRIER_EIEIO) ? "eieio" :
++ (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
++ : "unknown");
++}
++
++
++void do_stf_barrier_fixups(enum stf_barrier_type types)
++{
++ do_stf_entry_barrier_fixups(types);
++ do_stf_exit_barrier_fixups(types);
++}
++
+ void do_rfi_flush_fixups(enum l1d_flush_type types)
+ {
+ unsigned int instrs[3], *dest;
+@@ -153,7 +268,14 @@ void do_rfi_flush_fixups(enum l1d_flush_type types)
+ patch_instruction(dest + 2, instrs[2]);
+ }
+
+- printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i);
++ printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i,
++ (types == L1D_FLUSH_NONE) ? "no" :
++ (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
++ (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
++ ? "ori+mttrig type"
++ : "ori type" :
++ (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
++ : "unknown");
+ }
+ #endif /* CONFIG_PPC_BOOK3S_64 */
+
+diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
+index 6f8b4c19373a..17203abf38e8 100644
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -37,53 +37,92 @@
+ #include <asm/smp.h>
+ #include <asm/tm.h>
+ #include <asm/setup.h>
++#include <asm/security_features.h>
+
+ #include "powernv.h"
+
++
++static bool fw_feature_is(const char *state, const char *name,
++ struct device_node *fw_features)
++{
++ struct device_node *np;
++ bool rc = false;
++
++ np = of_get_child_by_name(fw_features, name);
++ if (np) {
++ rc = of_property_read_bool(np, state);
++ of_node_put(np);
++ }
++
++ return rc;
++}
++
++static void init_fw_feat_flags(struct device_node *np)
++{
++ if (fw_feature_is("enabled", "inst-spec-barrier-ori31,31,0", np))
++ security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
++
++ if (fw_feature_is("enabled", "fw-bcctrl-serialized", np))
++ security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);
++
++ if (fw_feature_is("enabled", "inst-l1d-flush-ori30,30,0", np))
++ security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);
++
++ if (fw_feature_is("enabled", "inst-l1d-flush-trig2", np))
++ security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2);
++
++ if (fw_feature_is("enabled", "fw-l1d-thread-split", np))
++ security_ftr_set(SEC_FTR_L1D_THREAD_PRIV);
++
++ if (fw_feature_is("enabled", "fw-count-cache-disabled", np))
++ security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
++
++ /*
++ * The features below are enabled by default, so we instead look to see
++ * if firmware has *disabled* them, and clear them if so.
++ */
++ if (fw_feature_is("disabled", "speculation-policy-favor-security", np))
++ security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
++
++ if (fw_feature_is("disabled", "needs-l1d-flush-msr-pr-0-to-1", np))
++ security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
++
++ if (fw_feature_is("disabled", "needs-l1d-flush-msr-hv-1-to-0", np))
++ security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);
++
++ if (fw_feature_is("disabled", "needs-spec-barrier-for-bound-checks", np))
++ security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
++}
++
+ static void pnv_setup_rfi_flush(void)
+ {
+ struct device_node *np, *fw_features;
+ enum l1d_flush_type type;
+- int enable;
++ bool enable;
+
+ /* Default to fallback in case fw-features are not available */
+ type = L1D_FLUSH_FALLBACK;
+- enable = 1;
+
+ np = of_find_node_by_name(NULL, "ibm,opal");
+ fw_features = of_get_child_by_name(np, "fw-features");
+ of_node_put(np);
+
+ if (fw_features) {
+- np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2");
+- if (np && of_property_read_bool(np, "enabled"))
+- type = L1D_FLUSH_MTTRIG;
++ init_fw_feat_flags(fw_features);
++ of_node_put(fw_features);
+
+- of_node_put(np);
++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2))
++ type = L1D_FLUSH_MTTRIG;
+
+- np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0");
+- if (np && of_property_read_bool(np, "enabled"))
++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30))
+ type = L1D_FLUSH_ORI;
+-
+- of_node_put(np);
+-
+- /* Enable unless firmware says NOT to */
+- enable = 2;
+- np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0");
+- if (np && of_property_read_bool(np, "disabled"))
+- enable--;
+-
+- of_node_put(np);
+-
+- np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1");
+- if (np && of_property_read_bool(np, "disabled"))
+- enable--;
+-
+- of_node_put(np);
+- of_node_put(fw_features);
+ }
+
+- setup_rfi_flush(type, enable > 0);
++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
++ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \
++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
++
++ setup_rfi_flush(type, enable);
+ }
+
+ static void __init pnv_setup_arch(void)
+@@ -91,6 +130,7 @@ static void __init pnv_setup_arch(void)
+ set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
+
+ pnv_setup_rfi_flush();
++ setup_stf_barrier();
+
+ /* Initialize SMP */
+ pnv_smp_init();
+diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
+index 6a5e7467445c..3784a7abfcc8 100644
+--- a/arch/powerpc/platforms/pseries/mobility.c
++++ b/arch/powerpc/platforms/pseries/mobility.c
+@@ -314,6 +314,9 @@ void post_mobility_fixup(void)
+ printk(KERN_ERR "Post-mobility device tree update "
+ "failed: %d\n", rc);
+
++ /* Possibly switch to a new RFI flush type */
++ pseries_setup_rfi_flush();
++
+ return;
+ }
+
+diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
+index b1be7b713fe6..62ff57cf6c24 100644
+--- a/arch/powerpc/platforms/pseries/pseries.h
++++ b/arch/powerpc/platforms/pseries/pseries.h
+@@ -79,4 +79,6 @@ extern struct pci_controller_ops pseries_pci_controller_ops;
+
+ unsigned long pseries_memory_block_size(void);
+
++void pseries_setup_rfi_flush(void);
++
+ #endif /* _PSERIES_PSERIES_H */
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index 1845fc611912..91ade7755823 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -66,6 +66,7 @@
+ #include <asm/reg.h>
+ #include <asm/plpar_wrappers.h>
+ #include <asm/kexec.h>
++#include <asm/security_features.h>
+
+ #include "pseries.h"
+
+@@ -450,35 +451,78 @@ static void __init find_and_init_phbs(void)
+ of_pci_check_probe_only();
+ }
+
+-static void pseries_setup_rfi_flush(void)
++static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
++{
++ /*
++ * The features below are disabled by default, so we instead look to see
++ * if firmware has *enabled* them, and set them if so.
++ */
++ if (result->character & H_CPU_CHAR_SPEC_BAR_ORI31)
++ security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
++
++ if (result->character & H_CPU_CHAR_BCCTRL_SERIALISED)
++ security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);
++
++ if (result->character & H_CPU_CHAR_L1D_FLUSH_ORI30)
++ security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);
++
++ if (result->character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
++ security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2);
++
++ if (result->character & H_CPU_CHAR_L1D_THREAD_PRIV)
++ security_ftr_set(SEC_FTR_L1D_THREAD_PRIV);
++
++ if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED)
++ security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
++
++ /*
++ * The features below are enabled by default, so we instead look to see
++ * if firmware has *disabled* them, and clear them if so.
++ */
++ if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY))
++ security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
++
++ if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
++ security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
++
++ if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR))
++ security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
++}
++
++void pseries_setup_rfi_flush(void)
+ {
+ struct h_cpu_char_result result;
+ enum l1d_flush_type types;
+ bool enable;
+ long rc;
+
+- /* Enable by default */
+- enable = true;
++ /*
++ * Set features to the defaults assumed by init_cpu_char_feature_flags()
++ * so it can set/clear again any features that might have changed after
++ * migration, and in case the hypercall fails and it is not even called.
++ */
++ powerpc_security_features = SEC_FTR_DEFAULT;
+
+ rc = plpar_get_cpu_characteristics(&result);
+- if (rc == H_SUCCESS) {
+- types = L1D_FLUSH_NONE;
++ if (rc == H_SUCCESS)
++ init_cpu_char_feature_flags(&result);
+
+- if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
+- types |= L1D_FLUSH_MTTRIG;
+- if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
+- types |= L1D_FLUSH_ORI;
++ /*
++ * We're the guest so this doesn't apply to us, clear it to simplify
++ * handling of it elsewhere.
++ */
++ security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);
+
+- /* Use fallback if nothing set in hcall */
+- if (types == L1D_FLUSH_NONE)
+- types = L1D_FLUSH_FALLBACK;
++ types = L1D_FLUSH_FALLBACK;
+
+- if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
+- enable = false;
+- } else {
+- /* Default to fallback if case hcall is not available */
+- types = L1D_FLUSH_FALLBACK;
+- }
++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2))
++ types |= L1D_FLUSH_MTTRIG;
++
++ if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30))
++ types |= L1D_FLUSH_ORI;
++
++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);
+
+ setup_rfi_flush(types, enable);
+ }
+@@ -501,6 +545,7 @@ static void __init pSeries_setup_arch(void)
+ fwnmi_init();
+
+ pseries_setup_rfi_flush();
++ setup_stf_barrier();
+
+ /* By default, only probe PCI (can be overridden by rtas_pci) */
+ pci_add_flags(PCI_PROBE_ONLY);
+diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
+index f87a55d77094..9b3f2e212b37 100644
+--- a/arch/sparc/kernel/ds.c
++++ b/arch/sparc/kernel/ds.c
+@@ -908,7 +908,7 @@ static int register_services(struct ds_info *dp)
+ pbuf.req.handle = cp->handle;
+ pbuf.req.major = 1;
+ pbuf.req.minor = 0;
+- strcpy(pbuf.req.svc_id, cp->service_id);
++ strcpy(pbuf.id_buf, cp->service_id);
+
+ err = __ds_send(lp, &pbuf, msg_len);
+ if (err > 0)
+diff --git a/arch/sparc/lib/multi3.S b/arch/sparc/lib/multi3.S
+index d6b6c97fe3c7..703127aaf4a5 100644
+--- a/arch/sparc/lib/multi3.S
++++ b/arch/sparc/lib/multi3.S
+@@ -5,26 +5,26 @@
+ .align 4
+ ENTRY(__multi3) /* %o0 = u, %o1 = v */
+ mov %o1, %g1
+- srl %o3, 0, %g4
+- mulx %g4, %g1, %o1
++ srl %o3, 0, %o4
++ mulx %o4, %g1, %o1
+ srlx %g1, 0x20, %g3
+- mulx %g3, %g4, %g5
+- sllx %g5, 0x20, %o5
+- srl %g1, 0, %g4
++ mulx %g3, %o4, %g7
++ sllx %g7, 0x20, %o5
++ srl %g1, 0, %o4
+ sub %o1, %o5, %o5
+ srlx %o5, 0x20, %o5
+- addcc %g5, %o5, %g5
++ addcc %g7, %o5, %g7
+ srlx %o3, 0x20, %o5
+- mulx %g4, %o5, %g4
++ mulx %o4, %o5, %o4
+ mulx %g3, %o5, %o5
+ sethi %hi(0x80000000), %g3
+- addcc %g5, %g4, %g5
+- srlx %g5, 0x20, %g5
++ addcc %g7, %o4, %g7
++ srlx %g7, 0x20, %g7
+ add %g3, %g3, %g3
+ movcc %xcc, %g0, %g3
+- addcc %o5, %g5, %o5
+- sllx %g4, 0x20, %g4
+- add %o1, %g4, %o1
++ addcc %o5, %g7, %o5
++ sllx %o4, 0x20, %o4
++ add %o1, %o4, %o1
+ add %o5, %g3, %g2
+ mulx %g1, %o2, %g1
+ add %g1, %g2, %g1
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index e3a3f5a64884..2986a13b9786 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -472,6 +472,12 @@ static void __init xen_init_cpuid_mask(void)
+ cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32));
+ }
+
++static void __init xen_init_capabilities(void)
++{
++ if (xen_pv_domain())
++ setup_force_cpu_cap(X86_FEATURE_XENPV);
++}
++
+ static void xen_set_debugreg(int reg, unsigned long val)
+ {
+ HYPERVISOR_set_debugreg(reg, val);
+@@ -1634,6 +1640,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
+
+ xen_init_irq_ops();
+ xen_init_cpuid_mask();
++ xen_init_capabilities();
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+ /*
+@@ -1978,12 +1985,6 @@ bool xen_hvm_need_lapic(void)
+ }
+ EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
+
+-static void xen_set_cpu_features(struct cpuinfo_x86 *c)
+-{
+- if (xen_pv_domain())
+- set_cpu_cap(c, X86_FEATURE_XENPV);
+-}
+-
+ static void xen_pin_vcpu(int cpu)
+ {
+ static bool disable_pinning;
+@@ -2030,7 +2031,6 @@ const struct hypervisor_x86 x86_hyper_xen = {
+ .init_platform = xen_hvm_guest_init,
+ #endif
+ .x2apic_available = xen_x2apic_para_available,
+- .set_cpu_features = xen_set_cpu_features,
+ .pin_vcpu = xen_pin_vcpu,
+ };
+ EXPORT_SYMBOL(x86_hyper_xen);
+diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
+index 6b54e02da10c..e48140e76043 100644
+--- a/drivers/dma-buf/dma-buf.c
++++ b/drivers/dma-buf/dma-buf.c
+@@ -551,7 +551,7 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
+ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+ {
+- struct sg_table *sg_table = ERR_PTR(-EINVAL);
++ struct sg_table *sg_table;
+
+ might_sleep();
+
+diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
+index 3e6fe82c6d64..4d49fa0911c6 100644
+--- a/drivers/gpu/drm/drm_dp_helper.c
++++ b/drivers/gpu/drm/drm_dp_helper.c
+@@ -1065,6 +1065,7 @@ int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE])
+ static const u16 psr_setup_time_us[] = {
+ PSR_SETUP_TIME(330),
+ PSR_SETUP_TIME(275),
++ PSR_SETUP_TIME(220),
+ PSR_SETUP_TIME(165),
+ PSR_SETUP_TIME(110),
+ PSR_SETUP_TIME(55),
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 3517c0ed984a..479d64184da5 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -864,6 +864,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
+ },
+ },
++ {
++ .callback = intel_no_lvds_dmi_callback,
++ .ident = "Radiant P845",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Radiant Systems Inc"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "P845"),
++ },
++ },
+
+ { } /* terminating entry */
+ };
+diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
+index 877a0ed76abf..c38645106783 100644
+--- a/drivers/hwtracing/stm/core.c
++++ b/drivers/hwtracing/stm/core.c
+@@ -27,6 +27,7 @@
+ #include <linux/stm.h>
+ #include <linux/fs.h>
+ #include <linux/mm.h>
++#include <linux/vmalloc.h>
+ #include "stm.h"
+
+ #include <uapi/linux/stm.h>
+@@ -682,7 +683,7 @@ static void stm_device_release(struct device *dev)
+ {
+ struct stm_device *stm = to_stm_device(dev);
+
+- kfree(stm);
++ vfree(stm);
+ }
+
+ int stm_register_device(struct device *parent, struct stm_data *stm_data,
+@@ -699,7 +700,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
+ return -EINVAL;
+
+ nmasters = stm_data->sw_end - stm_data->sw_start + 1;
+- stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL);
++ stm = vzalloc(sizeof(*stm) + nmasters * sizeof(void *));
+ if (!stm)
+ return -ENOMEM;
+
+@@ -752,7 +753,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
+ /* matches device_initialize() above */
+ put_device(&stm->dev);
+ err_free:
+- kfree(stm);
++ vfree(stm);
+
+ return err;
+ }
+diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c
+index c5b999f0c519..7ef9b13262a8 100644
+--- a/drivers/iio/buffer/kfifo_buf.c
++++ b/drivers/iio/buffer/kfifo_buf.c
+@@ -24,6 +24,13 @@ static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
+ if ((length == 0) || (bytes_per_datum == 0))
+ return -EINVAL;
+
++ /*
++ * Make sure we don't overflow an unsigned int after kfifo rounds up to
++ * the next power of 2.
++ */
++ if (roundup_pow_of_two(length) > UINT_MAX / bytes_per_datum)
++ return -EINVAL;
++
+ return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
+ bytes_per_datum, GFP_KERNEL);
+ }
+diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
+index ae04826e82fc..a32dd851e712 100644
+--- a/drivers/infiniband/core/cache.c
++++ b/drivers/infiniband/core/cache.c
+@@ -437,7 +437,7 @@ static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
+ return -EINVAL;
+
+ if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
+- return -EAGAIN;
++ return -EINVAL;
+
+ memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
+ if (attr) {
+diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
+index e23b2495d52e..05b8695a6369 100644
+--- a/drivers/input/mouse/elan_i2c_smbus.c
++++ b/drivers/input/mouse/elan_i2c_smbus.c
+@@ -130,7 +130,7 @@ static int elan_smbus_get_baseline_data(struct i2c_client *client,
+ bool max_baseline, u8 *value)
+ {
+ int error;
+- u8 val[3];
++ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
+
+ error = i2c_smbus_read_block_data(client,
+ max_baseline ?
+@@ -149,7 +149,7 @@ static int elan_smbus_get_version(struct i2c_client *client,
+ bool iap, u8 *version)
+ {
+ int error;
+- u8 val[3];
++ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
+
+ error = i2c_smbus_read_block_data(client,
+ iap ? ETP_SMBUS_IAP_VERSION_CMD :
+@@ -169,7 +169,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
+ u8 *ic_type, u8 *version)
+ {
+ int error;
+- u8 val[3];
++ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
+
+ error = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_SM_VERSION_CMD, val);
+@@ -186,7 +186,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
+ static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id)
+ {
+ int error;
+- u8 val[3];
++ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
+
+ error = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_UNIQUEID_CMD, val);
+@@ -203,7 +203,7 @@ static int elan_smbus_get_checksum(struct i2c_client *client,
+ bool iap, u16 *csum)
+ {
+ int error;
+- u8 val[3];
++ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
+
+ error = i2c_smbus_read_block_data(client,
+ iap ? ETP_SMBUS_FW_CHECKSUM_CMD :
+@@ -224,7 +224,7 @@ static int elan_smbus_get_max(struct i2c_client *client,
+ {
+ int ret;
+ int error;
+- u8 val[3];
++ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
+
+ ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val);
+ if (ret != 3) {
+@@ -244,7 +244,7 @@ static int elan_smbus_get_resolution(struct i2c_client *client,
+ {
+ int ret;
+ int error;
+- u8 val[3];
++ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
+
+ ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RESOLUTION_CMD, val);
+ if (ret != 3) {
+@@ -265,7 +265,7 @@ static int elan_smbus_get_num_traces(struct i2c_client *client,
+ {
+ int ret;
+ int error;
+- u8 val[3];
++ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
+
+ ret = i2c_smbus_read_block_data(client, ETP_SMBUS_XY_TRACENUM_CMD, val);
+ if (ret != 3) {
+@@ -292,7 +292,7 @@ static int elan_smbus_iap_get_mode(struct i2c_client *client,
+ {
+ int error;
+ u16 constant;
+- u8 val[3];
++ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
+
+ error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val);
+ if (error < 0) {
+@@ -343,7 +343,7 @@ static int elan_smbus_prepare_fw_update(struct i2c_client *client)
+ int len;
+ int error;
+ enum tp_mode mode;
+- u8 val[3];
++ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
+ u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06};
+ u16 password;
+
+@@ -417,7 +417,7 @@ static int elan_smbus_write_fw_block(struct i2c_client *client,
+ struct device *dev = &client->dev;
+ int error;
+ u16 result;
+- u8 val[3];
++ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
+
+ /*
+ * Due to the limitation of smbus protocol limiting
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index 0b1d5bdd0862..f7b8681aed3f 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -120,11 +120,10 @@ static void gic_redist_wait_for_rwp(void)
+ }
+
+ #ifdef CONFIG_ARM64
+-static DEFINE_STATIC_KEY_FALSE(is_cavium_thunderx);
+
+ static u64 __maybe_unused gic_read_iar(void)
+ {
+- if (static_branch_unlikely(&is_cavium_thunderx))
++ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
+ return gic_read_iar_cavium_thunderx();
+ else
+ return gic_read_iar_common();
+@@ -908,14 +907,6 @@ static const struct irq_domain_ops partition_domain_ops = {
+ .select = gic_irq_domain_select,
+ };
+
+-static void gicv3_enable_quirks(void)
+-{
+-#ifdef CONFIG_ARM64
+- if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_23154))
+- static_branch_enable(&is_cavium_thunderx);
+-#endif
+-}
+-
+ static int __init gic_init_bases(void __iomem *dist_base,
+ struct redist_region *rdist_regs,
+ u32 nr_redist_regions,
+@@ -938,8 +929,6 @@ static int __init gic_init_bases(void __iomem *dist_base,
+ gic_data.nr_redist_regions = nr_redist_regions;
+ gic_data.redist_stride = redist_stride;
+
+- gicv3_enable_quirks();
+-
+ /*
+ * Find out how many interrupts are supported.
+ * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index bcbb80ff86a7..1a92cd719e19 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -142,16 +142,17 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_alloc *frags,
+ int i)
+ {
+- const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
+- u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride;
+-
+-
+- if (next_frag_end > frags[i].page_size)
+- dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
+- frag_info->dma_dir);
++ if (frags[i].page) {
++ const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
++ u32 next_frag_end = frags[i].page_offset +
++ 2 * frag_info->frag_stride;
+
+- if (frags[i].page)
++ if (next_frag_end > frags[i].page_size) {
++ dma_unmap_page(priv->ddev, frags[i].dma,
++ frags[i].page_size, frag_info->dma_dir);
++ }
+ put_page(frags[i].page);
++ }
+ }
+
+ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
+@@ -586,21 +587,28 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
+ int length)
+ {
+ struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags;
+- struct mlx4_en_frag_info *frag_info;
+ int nr;
+ dma_addr_t dma;
+
+ /* Collect used fragments while replacing them in the HW descriptors */
+ for (nr = 0; nr < priv->num_frags; nr++) {
+- frag_info = &priv->frag_info[nr];
++ struct mlx4_en_frag_info *frag_info = &priv->frag_info[nr];
++ u32 next_frag_end = frags[nr].page_offset +
++ 2 * frag_info->frag_stride;
++
+ if (length <= frag_info->frag_prefix_size)
+ break;
+ if (unlikely(!frags[nr].page))
+ goto fail;
+
+ dma = be64_to_cpu(rx_desc->data[nr].addr);
+- dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size,
+- DMA_FROM_DEVICE);
++ if (next_frag_end > frags[nr].page_size)
++ dma_unmap_page(priv->ddev, frags[nr].dma,
++ frags[nr].page_size, frag_info->dma_dir);
++ else
++ dma_sync_single_for_cpu(priv->ddev, dma,
++ frag_info->frag_size,
++ DMA_FROM_DEVICE);
+
+ /* Save page reference in skb */
+ __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
+index ec2ea56f7933..fdbd35954d15 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
+@@ -304,9 +304,6 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
+ writeVal = 0x00000000;
+ if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
+ writeVal = writeVal - 0x06060606;
+- else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+- TXHIGHPWRLEVEL_BT2)
+- writeVal = writeVal;
+ *(p_outwriteval + rf) = writeVal;
+ }
+ }
+diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
+index 056845bdf67b..bedce3453dd3 100644
+--- a/drivers/pinctrl/qcom/pinctrl-msm.c
++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
+@@ -790,7 +790,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
+ return -EINVAL;
+
+ chip = &pctrl->chip;
+- chip->base = -1;
++ chip->base = 0;
+ chip->ngpio = ngpio;
+ chip->label = dev_name(pctrl->dev);
+ chip->parent = pctrl->dev;
+diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
+index f9a245465fd0..6a25bfd4541e 100644
+--- a/drivers/platform/chrome/cros_ec_lpc.c
++++ b/drivers/platform/chrome/cros_ec_lpc.c
+@@ -49,7 +49,6 @@ static int ec_response_timed_out(void)
+ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
+ struct cros_ec_command *msg)
+ {
+- struct ec_host_request *request;
+ struct ec_host_response response;
+ u8 sum = 0;
+ int i;
+@@ -62,8 +61,6 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
+ for (i = 0; i < ret; i++)
+ outb(ec->dout[i], EC_LPC_ADDR_HOST_PACKET + i);
+
+- request = (struct ec_host_request *)ec->dout;
+-
+ /* Here we go */
+ outb(EC_COMMAND_PROTOCOL_3, EC_LPC_ADDR_HOST_CMD);
+
+diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
+index e3cd3ece4412..c3d1891d2d3f 100644
+--- a/drivers/scsi/scsi_transport_srp.c
++++ b/drivers/scsi/scsi_transport_srp.c
+@@ -52,6 +52,8 @@ struct srp_internal {
+ struct transport_container rport_attr_cont;
+ };
+
++static int scsi_is_srp_rport(const struct device *dev);
++
+ #define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
+
+ #define dev_to_rport(d) container_of(d, struct srp_rport, dev)
+@@ -61,9 +63,24 @@ static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
+ return dev_to_shost(r->dev.parent);
+ }
+
++static int find_child_rport(struct device *dev, void *data)
++{
++ struct device **child = data;
++
++ if (scsi_is_srp_rport(dev)) {
++ WARN_ON_ONCE(*child);
++ *child = dev;
++ }
++ return 0;
++}
++
+ static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost)
+ {
+- return transport_class_to_srp_rport(&shost->shost_gendev);
++ struct device *child = NULL;
++
++ WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child,
++ find_child_rport) < 0);
++ return child ? dev_to_rport(child) : NULL;
+ }
+
+ /**
+@@ -637,7 +654,8 @@ static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
+ struct srp_rport *rport = shost_to_rport(shost);
+
+ pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
+- return rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 &&
++ return rport && rport->fast_io_fail_tmo < 0 &&
++ rport->dev_loss_tmo < 0 &&
+ i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
+ BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
+ }
+diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
+index 845b874e2977..5bb2316f60bf 100644
+--- a/drivers/scsi/ufs/ufs.h
++++ b/drivers/scsi/ufs/ufs.h
+@@ -145,7 +145,7 @@ enum attr_idn {
+ /* Descriptor idn for Query requests */
+ enum desc_idn {
+ QUERY_DESC_IDN_DEVICE = 0x0,
+- QUERY_DESC_IDN_CONFIGURAION = 0x1,
++ QUERY_DESC_IDN_CONFIGURATION = 0x1,
+ QUERY_DESC_IDN_UNIT = 0x2,
+ QUERY_DESC_IDN_RFU_0 = 0x3,
+ QUERY_DESC_IDN_INTERCONNECT = 0x4,
+@@ -161,19 +161,13 @@ enum desc_header_offset {
+ QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
+ };
+
+-enum ufs_desc_max_size {
+- QUERY_DESC_DEVICE_MAX_SIZE = 0x1F,
+- QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90,
+- QUERY_DESC_UNIT_MAX_SIZE = 0x23,
+- QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06,
+- /*
+- * Max. 126 UNICODE characters (2 bytes per character) plus 2 bytes
+- * of descriptor header.
+- */
+- QUERY_DESC_STRING_MAX_SIZE = 0xFE,
+- QUERY_DESC_GEOMETRY_MAX_SIZE = 0x44,
+- QUERY_DESC_POWER_MAX_SIZE = 0x62,
+- QUERY_DESC_RFU_MAX_SIZE = 0x00,
++enum ufs_desc_def_size {
++ QUERY_DESC_DEVICE_DEF_SIZE = 0x40,
++ QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
++ QUERY_DESC_UNIT_DEF_SIZE = 0x23,
++ QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
++ QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44,
++ QUERY_DESC_POWER_DEF_SIZE = 0x62,
+ };
+
+ /* Unit descriptor parameters offsets in bytes*/
+@@ -522,4 +516,16 @@ struct ufs_dev_info {
+ bool is_lu_power_on_wp;
+ };
+
++#define MAX_MODEL_LEN 16
++/**
++ * ufs_dev_desc - ufs device details from the device descriptor
++ *
++ * @wmanufacturerid: card details
++ * @model: card model
++ */
++struct ufs_dev_desc {
++ u16 wmanufacturerid;
++ char model[MAX_MODEL_LEN + 1];
++};
++
+ #endif /* End of Header */
+diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
+index 08b799d4efcc..71f73d1d1ad1 100644
+--- a/drivers/scsi/ufs/ufs_quirks.h
++++ b/drivers/scsi/ufs/ufs_quirks.h
+@@ -21,41 +21,28 @@
+ #define UFS_ANY_VENDOR 0xFFFF
+ #define UFS_ANY_MODEL "ANY_MODEL"
+
+-#define MAX_MODEL_LEN 16
+-
+ #define UFS_VENDOR_TOSHIBA 0x198
+ #define UFS_VENDOR_SAMSUNG 0x1CE
+ #define UFS_VENDOR_SKHYNIX 0x1AD
+
+-/**
+- * ufs_device_info - ufs device details
+- * @wmanufacturerid: card details
+- * @model: card model
+- */
+-struct ufs_device_info {
+- u16 wmanufacturerid;
+- char model[MAX_MODEL_LEN + 1];
+-};
+-
+ /**
+ * ufs_dev_fix - ufs device quirk info
+ * @card: ufs card details
+ * @quirk: device quirk
+ */
+ struct ufs_dev_fix {
+- struct ufs_device_info card;
++ struct ufs_dev_desc card;
+ unsigned int quirk;
+ };
+
+ #define END_FIX { { 0 }, 0 }
+
+ /* add specific device quirk */
+-#define UFS_FIX(_vendor, _model, _quirk) \
+- { \
+- .card.wmanufacturerid = (_vendor),\
+- .card.model = (_model), \
+- .quirk = (_quirk), \
+- }
++#define UFS_FIX(_vendor, _model, _quirk) { \
++ .card.wmanufacturerid = (_vendor),\
++ .card.model = (_model), \
++ .quirk = (_quirk), \
++}
+
+ /*
+ * If UFS device is having issue in processing LCC (Line Control
+@@ -144,7 +131,4 @@ struct ufs_dev_fix {
+ */
+ #define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8)
+
+-struct ufs_hba;
+-void ufs_advertise_fixup_device(struct ufs_hba *hba);
+-
+ #endif /* UFS_QUIRKS_H_ */
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 98a7111dd53f..86a3110c6d75 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -98,19 +98,6 @@
+ _ret; \
+ })
+
+-static u32 ufs_query_desc_max_size[] = {
+- QUERY_DESC_DEVICE_MAX_SIZE,
+- QUERY_DESC_CONFIGURAION_MAX_SIZE,
+- QUERY_DESC_UNIT_MAX_SIZE,
+- QUERY_DESC_RFU_MAX_SIZE,
+- QUERY_DESC_INTERCONNECT_MAX_SIZE,
+- QUERY_DESC_STRING_MAX_SIZE,
+- QUERY_DESC_RFU_MAX_SIZE,
+- QUERY_DESC_GEOMETRY_MAX_SIZE,
+- QUERY_DESC_POWER_MAX_SIZE,
+- QUERY_DESC_RFU_MAX_SIZE,
+-};
+-
+ enum {
+ UFSHCD_MAX_CHANNEL = 0,
+ UFSHCD_MAX_ID = 1,
+@@ -1961,7 +1948,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
+ goto out;
+ }
+
+- if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
++ if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
+ dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
+ __func__, *buf_len);
+ err = -EINVAL;
+@@ -2040,6 +2027,92 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
+ }
+ EXPORT_SYMBOL(ufshcd_query_descriptor_retry);
+
++/**
++ * ufshcd_read_desc_length - read the specified descriptor length from header
++ * @hba: Pointer to adapter instance
++ * @desc_id: descriptor idn value
++ * @desc_index: descriptor index
++ * @desc_length: pointer to variable to read the length of descriptor
++ *
++ * Return 0 in case of success, non-zero otherwise
++ */
++static int ufshcd_read_desc_length(struct ufs_hba *hba,
++ enum desc_idn desc_id,
++ int desc_index,
++ int *desc_length)
++{
++ int ret;
++ u8 header[QUERY_DESC_HDR_SIZE];
++ int header_len = QUERY_DESC_HDR_SIZE;
++
++ if (desc_id >= QUERY_DESC_IDN_MAX)
++ return -EINVAL;
++
++ ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
++ desc_id, desc_index, 0, header,
++ &header_len);
++
++ if (ret) {
++ dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
++ __func__, desc_id);
++ return ret;
++ } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
++ dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
++ __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
++ desc_id);
++ ret = -EINVAL;
++ }
++
++ *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
++ return ret;
++
++}
++
++/**
++ * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
++ * @hba: Pointer to adapter instance
++ * @desc_id: descriptor idn value
++ * @desc_len: mapped desc length (out)
++ *
++ * Return 0 in case of success, non-zero otherwise
++ */
++int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
++ enum desc_idn desc_id, int *desc_len)
++{
++ switch (desc_id) {
++ case QUERY_DESC_IDN_DEVICE:
++ *desc_len = hba->desc_size.dev_desc;
++ break;
++ case QUERY_DESC_IDN_POWER:
++ *desc_len = hba->desc_size.pwr_desc;
++ break;
++ case QUERY_DESC_IDN_GEOMETRY:
++ *desc_len = hba->desc_size.geom_desc;
++ break;
++ case QUERY_DESC_IDN_CONFIGURATION:
++ *desc_len = hba->desc_size.conf_desc;
++ break;
++ case QUERY_DESC_IDN_UNIT:
++ *desc_len = hba->desc_size.unit_desc;
++ break;
++ case QUERY_DESC_IDN_INTERCONNECT:
++ *desc_len = hba->desc_size.interc_desc;
++ break;
++ case QUERY_DESC_IDN_STRING:
++ *desc_len = QUERY_DESC_MAX_SIZE;
++ break;
++ case QUERY_DESC_IDN_RFU_0:
++ case QUERY_DESC_IDN_RFU_1:
++ *desc_len = 0;
++ break;
++ default:
++ *desc_len = 0;
++ return -EINVAL;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
++
+ /**
+ * ufshcd_read_desc_param - read the specified descriptor parameter
+ * @hba: Pointer to adapter instance
+@@ -2054,50 +2127,64 @@ EXPORT_SYMBOL(ufshcd_query_descriptor_retry);
+ static int ufshcd_read_desc_param(struct ufs_hba *hba,
+ enum desc_idn desc_id,
+ int desc_index,
+- u32 param_offset,
++ u8 param_offset,
+ u8 *param_read_buf,
+- u32 param_size)
++ u8 param_size)
+ {
+ int ret;
+ u8 *desc_buf;
+- u32 buff_len;
++ int buff_len;
+ bool is_kmalloc = true;
+
+- /* safety checks */
+- if (desc_id >= QUERY_DESC_IDN_MAX)
++ /* Safety check */
++ if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
+ return -EINVAL;
+
+- buff_len = ufs_query_desc_max_size[desc_id];
+- if ((param_offset + param_size) > buff_len)
+- return -EINVAL;
++ /* Get the max length of descriptor from structure filled up at probe
++ * time.
++ */
++ ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
+
+- if (!param_offset && (param_size == buff_len)) {
+- /* memory space already available to hold full descriptor */
+- desc_buf = param_read_buf;
+- is_kmalloc = false;
+- } else {
+- /* allocate memory to hold full descriptor */
++ /* Sanity checks */
++ if (ret || !buff_len) {
++ dev_err(hba->dev, "%s: Failed to get full descriptor length",
++ __func__);
++ return ret;
++ }
++
++ /* Check whether we need temp memory */
++ if (param_offset != 0 || param_size < buff_len) {
+ desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ if (!desc_buf)
+ return -ENOMEM;
++ } else {
++ desc_buf = param_read_buf;
++ is_kmalloc = false;
+ }
+
++ /* Request for full descriptor */
+ ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
+- desc_id, desc_index, 0, desc_buf,
+- &buff_len);
+-
+- if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
+- (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
+- ufs_query_desc_max_size[desc_id])
+- || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
+- dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
+- __func__, desc_id, param_offset, buff_len, ret);
+- if (!ret)
+- ret = -EINVAL;
++ desc_id, desc_index, 0,
++ desc_buf, &buff_len);
++
++ if (ret) {
++ dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
++ __func__, desc_id, desc_index, param_offset, ret);
++ goto out;
++ }
+
++ /* Sanity check */
++ if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
++ dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
++ __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
++ ret = -EINVAL;
+ goto out;
+ }
+
++ /* Check wherher we will not copy more data, than available */
++ if (is_kmalloc && param_size > buff_len)
++ param_size = buff_len;
++
+ if (is_kmalloc)
+ memcpy(param_read_buf, &desc_buf[param_offset], param_size);
+ out:
+@@ -4789,8 +4876,8 @@ static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
+ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
+ {
+ int ret;
+- int buff_len = QUERY_DESC_POWER_MAX_SIZE;
+- u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
++ int buff_len = hba->desc_size.pwr_desc;
++ u8 desc_buf[hba->desc_size.pwr_desc];
+
+ ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
+ if (ret) {
+@@ -4883,16 +4970,15 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
+ return ret;
+ }
+
+-static int ufs_get_device_info(struct ufs_hba *hba,
+- struct ufs_device_info *card_data)
++static int ufs_get_device_desc(struct ufs_hba *hba,
++ struct ufs_dev_desc *dev_desc)
+ {
+ int err;
+ u8 model_index;
+- u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0};
+- u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
++ u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1] = {0};
++ u8 desc_buf[hba->desc_size.dev_desc];
+
+- err = ufshcd_read_device_desc(hba, desc_buf,
+- QUERY_DESC_DEVICE_MAX_SIZE);
++ err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
+ if (err) {
+ dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
+ __func__, err);
+@@ -4903,50 +4989,40 @@ static int ufs_get_device_info(struct ufs_hba *hba,
+ * getting vendor (manufacturerID) and Bank Index in big endian
+ * format
+ */
+- card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
++ dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
+ desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
+
+ model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
+
+ err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
+- QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
++ QUERY_DESC_MAX_SIZE, ASCII_STD);
+ if (err) {
+ dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
+ __func__, err);
+ goto out;
+ }
+
+- str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
+- strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
++ str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
++ strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
+ min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
+ MAX_MODEL_LEN));
+
+ /* Null terminate the model string */
+- card_data->model[MAX_MODEL_LEN] = '\0';
++ dev_desc->model[MAX_MODEL_LEN] = '\0';
+
+ out:
+ return err;
+ }
+
+-void ufs_advertise_fixup_device(struct ufs_hba *hba)
++static void ufs_fixup_device_setup(struct ufs_hba *hba,
++ struct ufs_dev_desc *dev_desc)
+ {
+- int err;
+ struct ufs_dev_fix *f;
+- struct ufs_device_info card_data;
+-
+- card_data.wmanufacturerid = 0;
+-
+- err = ufs_get_device_info(hba, &card_data);
+- if (err) {
+- dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
+- __func__, err);
+- return;
+- }
+
+ for (f = ufs_fixups; f->quirk; f++) {
+- if (((f->card.wmanufacturerid == card_data.wmanufacturerid) ||
+- (f->card.wmanufacturerid == UFS_ANY_VENDOR)) &&
+- (STR_PRFX_EQUAL(f->card.model, card_data.model) ||
++ if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
++ f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
++ (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
+ !strcmp(f->card.model, UFS_ANY_MODEL)))
+ hba->dev_quirks |= f->quirk;
+ }
+@@ -5116,6 +5192,51 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
+ ufshcd_vops_apply_dev_quirks(hba);
+ }
+
++static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
++{
++ int err;
++
++ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
++ &hba->desc_size.dev_desc);
++ if (err)
++ hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
++
++ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
++ &hba->desc_size.pwr_desc);
++ if (err)
++ hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
++
++ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
++ &hba->desc_size.interc_desc);
++ if (err)
++ hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
++
++ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
++ &hba->desc_size.conf_desc);
++ if (err)
++ hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
++
++ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
++ &hba->desc_size.unit_desc);
++ if (err)
++ hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
++
++ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
++ &hba->desc_size.geom_desc);
++ if (err)
++ hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
++}
++
++static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
++{
++ hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
++ hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
++ hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
++ hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
++ hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
++ hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
++}
++
+ /**
+ * ufshcd_probe_hba - probe hba to detect device and initialize
+ * @hba: per-adapter instance
+@@ -5124,6 +5245,7 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
+ */
+ static int ufshcd_probe_hba(struct ufs_hba *hba)
+ {
++ struct ufs_dev_desc card = {0};
+ int ret;
+
+ ret = ufshcd_link_startup(hba);
+@@ -5147,7 +5269,17 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
+ if (ret)
+ goto out;
+
+- ufs_advertise_fixup_device(hba);
++ /* Init check for device descriptor sizes */
++ ufshcd_init_desc_sizes(hba);
++
++ ret = ufs_get_device_desc(hba, &card);
++ if (ret) {
++ dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
++ __func__, ret);
++ goto out;
++ }
++
++ ufs_fixup_device_setup(hba, &card);
+ ufshcd_tune_unipro_params(hba);
+
+ ret = ufshcd_set_vccq_rail_unused(hba,
+@@ -5173,6 +5305,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
+
+ /* set the state as operational after switching to desired gear */
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
++
+ /*
+ * If we are in error handling context or in power management callbacks
+ * context, no need to scan the host
+@@ -6549,6 +6682,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+ hba->mmio_base = mmio_base;
+ hba->irq = irq;
+
++ /* Set descriptor lengths to specification defaults */
++ ufshcd_def_desc_sizes(hba);
++
+ err = ufshcd_hba_init(hba);
+ if (err)
+ goto out_error;
+diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
+index f2170d5058a8..6dbd2e176333 100644
+--- a/drivers/scsi/ufs/ufshcd.h
++++ b/drivers/scsi/ufs/ufshcd.h
+@@ -205,6 +205,15 @@ struct ufs_dev_cmd {
+ struct ufs_query query;
+ };
+
++struct ufs_desc_size {
++ int dev_desc;
++ int pwr_desc;
++ int geom_desc;
++ int interc_desc;
++ int unit_desc;
++ int conf_desc;
++};
++
+ /**
+ * struct ufs_clk_info - UFS clock related info
+ * @list: list headed by hba->clk_list_head
+@@ -388,6 +397,7 @@ struct ufs_init_prefetch {
+ * @clk_list_head: UFS host controller clocks list node head
+ * @pwr_info: holds current power mode
+ * @max_pwr_info: keeps the device max valid pwm
++ * @desc_size: descriptor sizes reported by device
+ * @urgent_bkops_lvl: keeps track of urgent bkops level for device
+ * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
+ * device is known or not.
+@@ -563,6 +573,8 @@ struct ufs_hba {
+
+ enum bkops_status urgent_bkops_lvl;
+ bool is_urgent_bkops_lvl_checked;
++
++ struct ufs_desc_size desc_size;
+ };
+
+ /* Returns true if clocks can be gated. Otherwise false */
+@@ -736,6 +748,10 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
+ enum flag_idn idn, bool *flag_res);
+ int ufshcd_hold(struct ufs_hba *hba, bool async);
+ void ufshcd_release(struct ufs_hba *hba);
++
++int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
++ int *desc_length);
++
+ u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
+
+ /* Wrapper functions for safely calling variant operations */
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index b42d7f1c9089..6b1863293fe1 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -2320,12 +2320,67 @@ static int __init pl011_console_setup(struct console *co, char *options)
+ return uart_set_options(&uap->port, co, baud, parity, bits, flow);
+ }
+
++/**
++ * pl011_console_match - non-standard console matching
++ * @co: registering console
++ * @name: name from console command line
++ * @idx: index from console command line
++ * @options: ptr to option string from console command line
++ *
++ * Only attempts to match console command lines of the form:
++ * console=pl011,mmio|mmio32,<addr>[,<options>]
++ * console=pl011,0x<addr>[,<options>]
++ * This form is used to register an initial earlycon boot console and
++ * replace it with the amba_console at pl011 driver init.
++ *
++ * Performs console setup for a match (as required by interface)
++ * If no <options> are specified, then assume the h/w is already setup.
++ *
++ * Returns 0 if console matches; otherwise non-zero to use default matching
++ */
++static int __init pl011_console_match(struct console *co, char *name, int idx,
++ char *options)
++{
++ unsigned char iotype;
++ resource_size_t addr;
++ int i;
++
++ if (strcmp(name, "pl011") != 0)
++ return -ENODEV;
++
++ if (uart_parse_earlycon(options, &iotype, &addr, &options))
++ return -ENODEV;
++
++ if (iotype != UPIO_MEM && iotype != UPIO_MEM32)
++ return -ENODEV;
++
++ /* try to match the port specified on the command line */
++ for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
++ struct uart_port *port;
++
++ if (!amba_ports[i])
++ continue;
++
++ port = &amba_ports[i]->port;
++
++ if (port->mapbase != addr)
++ continue;
++
++ co->index = i;
++ port->cons = co;
++ return pl011_console_setup(co, options);
++ }
++
++ return -ENODEV;
++}
++
+ static struct uart_driver amba_reg;
+ static struct console amba_console = {
+ .name = "ttyAMA",
+ .write = pl011_console_write,
+ .device = uart_console_device,
+ .setup = pl011_console_setup,
++ .match = pl011_console_match,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &amba_reg,
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index d98531823998..46b4dea7a0ec 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -33,7 +33,7 @@ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *);
+ static void cp210x_close(struct usb_serial_port *);
+ static void cp210x_get_termios(struct tty_struct *, struct usb_serial_port *);
+ static void cp210x_get_termios_port(struct usb_serial_port *port,
+- unsigned int *cflagp, unsigned int *baudp);
++ tcflag_t *cflagp, unsigned int *baudp);
+ static void cp210x_change_speed(struct tty_struct *, struct usb_serial_port *,
+ struct ktermios *);
+ static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *,
+@@ -728,7 +728,7 @@ static void cp210x_get_termios(struct tty_struct *tty,
+ &tty->termios.c_cflag, &baud);
+ tty_encode_baud_rate(tty, baud, baud);
+ } else {
+- unsigned int cflag;
++ tcflag_t cflag;
+ cflag = 0;
+ cp210x_get_termios_port(port, &cflag, &baud);
+ }
+@@ -739,10 +739,10 @@ static void cp210x_get_termios(struct tty_struct *tty,
+ * This is the heart of cp210x_get_termios which always uses a &usb_serial_port.
+ */
+ static void cp210x_get_termios_port(struct usb_serial_port *port,
+- unsigned int *cflagp, unsigned int *baudp)
++ tcflag_t *cflagp, unsigned int *baudp)
+ {
+ struct device *dev = &port->dev;
+- unsigned int cflag;
++ tcflag_t cflag;
+ struct cp210x_flow_ctl flow_ctl;
+ u32 baud;
+ u16 bits;
+diff --git a/fs/aio.c b/fs/aio.c
+index 42d8c09311d1..b1170a7affe2 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -636,9 +636,8 @@ static void free_ioctx_users(struct percpu_ref *ref)
+ while (!list_empty(&ctx->active_reqs)) {
+ req = list_first_entry(&ctx->active_reqs,
+ struct aio_kiocb, ki_list);
+-
+- list_del_init(&req->ki_list);
+ kiocb_cancel(req);
++ list_del_init(&req->ki_list);
+ }
+
+ spin_unlock_irq(&ctx->ctx_lock);
+diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
+index c3702cda010a..e567551402a6 100644
+--- a/fs/xfs/libxfs/xfs_alloc.c
++++ b/fs/xfs/libxfs/xfs_alloc.c
+@@ -2034,6 +2034,93 @@ xfs_alloc_space_available(
+ return true;
+ }
+
++/*
++ * Check the agfl fields of the agf for inconsistency or corruption. The purpose
++ * is to detect an agfl header padding mismatch between current and early v5
++ * kernels. This problem manifests as a 1-slot size difference between the
++ * on-disk flcount and the active [first, last] range of a wrapped agfl. This
++ * may also catch variants of agfl count corruption unrelated to padding. Either
++ * way, we'll reset the agfl and warn the user.
++ *
++ * Return true if a reset is required before the agfl can be used, false
++ * otherwise.
++ */
++static bool
++xfs_agfl_needs_reset(
++ struct xfs_mount *mp,
++ struct xfs_agf *agf)
++{
++ uint32_t f = be32_to_cpu(agf->agf_flfirst);
++ uint32_t l = be32_to_cpu(agf->agf_fllast);
++ uint32_t c = be32_to_cpu(agf->agf_flcount);
++ int agfl_size = XFS_AGFL_SIZE(mp);
++ int active;
++
++ /* no agfl header on v4 supers */
++ if (!xfs_sb_version_hascrc(&mp->m_sb))
++ return false;
++
++ /*
++ * The agf read verifier catches severe corruption of these fields.
++ * Repeat some sanity checks to cover a packed -> unpacked mismatch if
++ * the verifier allows it.
++ */
++ if (f >= agfl_size || l >= agfl_size)
++ return true;
++ if (c > agfl_size)
++ return true;
++
++ /*
++ * Check consistency between the on-disk count and the active range. An
++ * agfl padding mismatch manifests as an inconsistent flcount.
++ */
++ if (c && l >= f)
++ active = l - f + 1;
++ else if (c)
++ active = agfl_size - f + l + 1;
++ else
++ active = 0;
++
++ return active != c;
++}
++
++/*
++ * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
++ * agfl content cannot be trusted. Warn the user that a repair is required to
++ * recover leaked blocks.
++ *
++ * The purpose of this mechanism is to handle filesystems affected by the agfl
++ * header padding mismatch problem. A reset keeps the filesystem online with a
++ * relatively minor free space accounting inconsistency rather than suffer the
++ * inevitable crash from use of an invalid agfl block.
++ */
++static void
++xfs_agfl_reset(
++ struct xfs_trans *tp,
++ struct xfs_buf *agbp,
++ struct xfs_perag *pag)
++{
++ struct xfs_mount *mp = tp->t_mountp;
++ struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
++
++ ASSERT(pag->pagf_agflreset);
++ trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
++
++ xfs_warn(mp,
++ "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
++ "Please unmount and run xfs_repair.",
++ pag->pag_agno, pag->pagf_flcount);
++
++ agf->agf_flfirst = 0;
++ agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
++ agf->agf_flcount = 0;
++ xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
++ XFS_AGF_FLCOUNT);
++
++ pag->pagf_flcount = 0;
++ pag->pagf_agflreset = false;
++}
++
+ /*
+ * Decide whether to use this allocation group for this allocation.
+ * If so, fix up the btree freelist's size.
+@@ -2095,6 +2182,10 @@ xfs_alloc_fix_freelist(
+ }
+ }
+
++ /* reset a padding mismatched agfl before final free space check */
++ if (pag->pagf_agflreset)
++ xfs_agfl_reset(tp, agbp, pag);
++
+ /* If there isn't enough total space or single-extent, reject it. */
+ need = xfs_alloc_min_freelist(mp, pag);
+ if (!xfs_alloc_space_available(args, need, flags))
+@@ -2251,6 +2342,7 @@ xfs_alloc_get_freelist(
+ agf->agf_flfirst = 0;
+
+ pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
++ ASSERT(!pag->pagf_agflreset);
+ be32_add_cpu(&agf->agf_flcount, -1);
+ xfs_trans_agflist_delta(tp, -1);
+ pag->pagf_flcount--;
+@@ -2362,6 +2454,7 @@ xfs_alloc_put_freelist(
+ agf->agf_fllast = 0;
+
+ pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
++ ASSERT(!pag->pagf_agflreset);
+ be32_add_cpu(&agf->agf_flcount, 1);
+ xfs_trans_agflist_delta(tp, 1);
+ pag->pagf_flcount++;
+@@ -2568,6 +2661,7 @@ xfs_alloc_read_agf(
+ pag->pagb_count = 0;
+ pag->pagb_tree = RB_ROOT;
+ pag->pagf_init = 1;
++ pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
+ }
+ #ifdef DEBUG
+ else if (!XFS_FORCED_SHUTDOWN(mp)) {
+diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
+index 5415f9031ef8..7cb099e1c84c 100644
+--- a/fs/xfs/xfs_mount.h
++++ b/fs/xfs/xfs_mount.h
+@@ -368,6 +368,7 @@ typedef struct xfs_perag {
+ char pagi_inodeok; /* The agi is ok for inodes */
+ __uint8_t pagf_levels[XFS_BTNUM_AGF];
+ /* # of levels in bno & cnt btree */
++ bool pagf_agflreset; /* agfl requires reset before use */
+ __uint32_t pagf_flcount; /* count of blocks in freelist */
+ xfs_extlen_t pagf_freeblks; /* total free blocks */
+ xfs_extlen_t pagf_longest; /* longest free space */
+diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
+index bdf69e1c7410..42a7c0da898f 100644
+--- a/fs/xfs/xfs_trace.h
++++ b/fs/xfs/xfs_trace.h
+@@ -1516,7 +1516,7 @@ TRACE_EVENT(xfs_trans_commit_lsn,
+ __entry->lsn)
+ );
+
+-TRACE_EVENT(xfs_agf,
++DECLARE_EVENT_CLASS(xfs_agf_class,
+ TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags,
+ unsigned long caller_ip),
+ TP_ARGS(mp, agf, flags, caller_ip),
+@@ -1572,6 +1572,13 @@ TRACE_EVENT(xfs_agf,
+ __entry->longest,
+ (void *)__entry->caller_ip)
+ );
++#define DEFINE_AGF_EVENT(name) \
++DEFINE_EVENT(xfs_agf_class, name, \
++ TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags, \
++ unsigned long caller_ip), \
++ TP_ARGS(mp, agf, flags, caller_ip))
++DEFINE_AGF_EVENT(xfs_agf);
++DEFINE_AGF_EVENT(xfs_agfl_reset);
+
+ TRACE_EVENT(xfs_free_extent,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
+diff --git a/include/linux/tcp.h b/include/linux/tcp.h
+index f50b717ce644..d0c3615f9050 100644
+--- a/include/linux/tcp.h
++++ b/include/linux/tcp.h
+@@ -337,7 +337,7 @@ struct tcp_sock {
+
+ /* Receiver queue space */
+ struct {
+- int space;
++ u32 space;
+ u32 seq;
+ u32 time;
+ } rcvq_space;
+diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
+index 7290eacc8cee..b902f106f69a 100644
+--- a/include/uapi/linux/nl80211.h
++++ b/include/uapi/linux/nl80211.h
+@@ -2379,7 +2379,7 @@ enum nl80211_attrs {
+ #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
+ #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
+
+-#define NL80211_WIPHY_NAME_MAXLEN 128
++#define NL80211_WIPHY_NAME_MAXLEN 64
+
+ #define NL80211_MAX_SUPP_RATES 32
+ #define NL80211_MAX_SUPP_HT_RATES 77
+diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
+index 6721a1e89f39..88f398af57fa 100644
+--- a/kernel/trace/trace_events_trigger.c
++++ b/kernel/trace/trace_events_trigger.c
+@@ -481,9 +481,10 @@ clear_event_triggers(struct trace_array *tr)
+ struct trace_event_file *file;
+
+ list_for_each_entry(file, &tr->events, list) {
+- struct event_trigger_data *data;
+- list_for_each_entry_rcu(data, &file->triggers, list) {
++ struct event_trigger_data *data, *n;
++ list_for_each_entry_safe(data, n, &file->triggers, list) {
+ trace_event_trigger_enable_disable(file, 0);
++ list_del_rcu(&data->list);
+ if (data->ops->free)
+ data->ops->free(data->ops, data);
+ }
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 2d4b6478237b..f03ca5ab86b1 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1393,7 +1393,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
+ return ret;
+
+ mapping = page_mapping(page);
+- migrate_dirty = mapping && mapping->a_ops->migratepage;
++ migrate_dirty = !mapping || mapping->a_ops->migratepage;
+ unlock_page(page);
+ if (!migrate_dirty)
+ return ret;
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 52b0a84be765..94a55b83e48c 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -581,8 +581,8 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
+ void tcp_rcv_space_adjust(struct sock *sk)
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
++ u32 copied;
+ int time;
+- int copied;
+
+ time = tcp_time_stamp - tp->rcvq_space.time;
+ if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0)
+@@ -604,12 +604,13 @@ void tcp_rcv_space_adjust(struct sock *sk)
+
+ if (sysctl_tcp_moderate_rcvbuf &&
+ !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
+- int rcvwin, rcvmem, rcvbuf;
++ int rcvmem, rcvbuf;
++ u64 rcvwin;
+
+ /* minimal window to cope with packet losses, assuming
+ * steady state. Add some cushion because of small variations.
+ */
+- rcvwin = (copied << 1) + 16 * tp->advmss;
++ rcvwin = ((u64)copied << 1) + 16 * tp->advmss;
+
+ /* If rate increased by 25%,
+ * assume slow start, rcvwin = 3 * copied
+@@ -629,7 +630,8 @@ void tcp_rcv_space_adjust(struct sock *sk)
+ while (tcp_win_from_space(rcvmem) < tp->advmss)
+ rcvmem += 128;
+
+- rcvbuf = min(rcvwin / tp->advmss * rcvmem, sysctl_tcp_rmem[2]);
++ do_div(rcvwin, tp->advmss);
++ rcvbuf = min_t(u64, rcvwin * rcvmem, sysctl_tcp_rmem[2]);
+ if (rcvbuf > sk->sk_rcvbuf) {
+ sk->sk_rcvbuf = rcvbuf;
+
+diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
+index 7bf8b005a178..1e6f23f77f15 100644
+--- a/security/integrity/ima/ima_appraise.c
++++ b/security/integrity/ima/ima_appraise.c
+@@ -389,14 +389,10 @@ int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
+ result = ima_protect_xattr(dentry, xattr_name, xattr_value,
+ xattr_value_len);
+ if (result == 1) {
+- bool digsig;
+-
+ if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST))
+ return -EINVAL;
+- digsig = (xvalue->type == EVM_IMA_XATTR_DIGSIG);
+- if (!digsig && (ima_appraise & IMA_APPRAISE_ENFORCE))
+- return -EPERM;
+- ima_reset_appraise_flags(d_backing_inode(dentry), digsig);
++ ima_reset_appraise_flags(d_backing_inode(dentry),
++ (xvalue->type == EVM_IMA_XATTR_DIGSIG) ? 1 : 0);
+ result = 0;
+ }
+ return result;
+diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
+index d656b7c98394..bfc4ffa1fa1a 100644
+--- a/security/selinux/ss/services.c
++++ b/security/selinux/ss/services.c
+@@ -1435,7 +1435,7 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
+ scontext_len, &context, def_sid);
+ if (rc == -EINVAL && force) {
+ context.str = str;
+- context.len = scontext_len;
++ context.len = strlen(str) + 1;
+ str = NULL;
+ } else if (rc)
+ goto out_unlock;
+diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c
+index a086c35f91bb..79a9fdf94d38 100644
+--- a/sound/soc/intel/common/sst-firmware.c
++++ b/sound/soc/intel/common/sst-firmware.c
+@@ -274,7 +274,6 @@ int sst_dma_new(struct sst_dsp *sst)
+ struct sst_pdata *sst_pdata = sst->pdata;
+ struct sst_dma *dma;
+ struct resource mem;
+- const char *dma_dev_name;
+ int ret = 0;
+
+ if (sst->pdata->resindex_dma_base == -1)
+@@ -285,7 +284,6 @@ int sst_dma_new(struct sst_dsp *sst)
+ * is attached to the ADSP IP. */
+ switch (sst->pdata->dma_engine) {
+ case SST_DMA_TYPE_DW:
+- dma_dev_name = "dw_dmac";
+ break;
+ default:
+ dev_err(sst->dev, "error: invalid DMA engine %d\n",
next reply other threads:[~2018-06-06 18:04 UTC|newest]
Thread overview: 393+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-06-06 18:04 Mike Pagano [this message]
-- strict thread matches above, loose matches on Subject: below --
2023-01-07 11:37 [gentoo-commits] proj/linux-patches:4.9 commit in: / Mike Pagano
2022-12-14 12:24 Mike Pagano
2022-12-08 13:09 Alice Ferrazzi
2022-11-25 17:02 Mike Pagano
2022-11-10 15:14 Mike Pagano
2022-11-03 15:09 Mike Pagano
2022-10-26 11:43 Mike Pagano
2022-09-28 9:19 Mike Pagano
2022-09-20 12:04 Mike Pagano
2022-09-15 11:10 Mike Pagano
2022-09-05 12:08 Mike Pagano
2022-08-25 10:37 Mike Pagano
2022-07-29 15:25 Mike Pagano
2022-07-21 20:14 Mike Pagano
2022-07-12 16:03 Mike Pagano
2022-07-07 16:20 Mike Pagano
2022-07-02 16:04 Mike Pagano
2022-06-25 10:24 Mike Pagano
2022-06-16 11:42 Mike Pagano
2022-06-14 15:49 Mike Pagano
2022-06-06 11:07 Mike Pagano
2022-05-27 12:41 Mike Pagano
2022-05-25 11:57 Mike Pagano
2022-05-18 9:52 Mike Pagano
2022-05-15 22:14 Mike Pagano
2022-05-12 11:32 Mike Pagano
2022-04-27 11:38 Mike Pagano
2022-04-20 12:12 Mike Pagano
2022-03-28 11:01 Mike Pagano
2022-03-23 11:59 Mike Pagano
2022-03-16 13:22 Mike Pagano
2022-03-11 10:57 Mike Pagano
2022-03-08 18:28 Mike Pagano
2022-03-02 13:09 Mike Pagano
2022-02-26 23:38 Mike Pagano
2022-02-23 12:40 Mike Pagano
2022-02-16 12:49 Mike Pagano
2022-02-11 12:38 Mike Pagano
2022-02-08 18:03 Mike Pagano
2022-01-29 17:46 Mike Pagano
2022-01-27 11:41 Mike Pagano
2022-01-11 12:59 Mike Pagano
2022-01-05 12:57 Mike Pagano
2021-12-29 13:13 Mike Pagano
2021-12-22 14:08 Mike Pagano
2021-12-14 10:37 Mike Pagano
2021-12-08 12:57 Mike Pagano
2021-11-26 12:01 Mike Pagano
2021-11-12 13:38 Mike Pagano
2021-11-02 17:06 Mike Pagano
2021-10-27 12:00 Mike Pagano
2021-10-17 13:14 Mike Pagano
2021-10-09 21:35 Mike Pagano
2021-10-06 11:32 Mike Pagano
2021-09-26 14:15 Mike Pagano
2021-09-22 11:42 Mike Pagano
2021-09-20 22:06 Mike Pagano
2021-09-03 11:24 Mike Pagano
2021-08-26 14:03 Mike Pagano
2021-08-25 23:14 Mike Pagano
2021-08-25 23:13 Mike Pagano
2021-08-15 20:10 Mike Pagano
2021-08-08 13:41 Mike Pagano
2021-08-04 11:55 Mike Pagano
2021-08-03 12:49 Mike Pagano
2021-07-28 12:39 Mike Pagano
2021-07-20 15:29 Alice Ferrazzi
2021-07-11 14:47 Mike Pagano
2021-06-30 14:28 Mike Pagano
2021-06-17 14:23 Alice Ferrazzi
2021-06-17 11:08 Alice Ferrazzi
2021-06-10 11:10 Mike Pagano
2021-06-03 10:41 Alice Ferrazzi
2021-05-26 12:03 Mike Pagano
2021-05-22 10:01 Mike Pagano
2021-04-28 11:03 Alice Ferrazzi
2021-04-16 11:19 Alice Ferrazzi
2021-04-10 13:22 Mike Pagano
2021-04-07 12:14 Mike Pagano
2021-03-30 14:14 Mike Pagano
2021-03-24 12:07 Mike Pagano
2021-03-17 15:58 Mike Pagano
2021-03-11 14:04 Mike Pagano
2021-03-07 15:13 Mike Pagano
2021-03-03 17:24 Alice Ferrazzi
2021-02-23 13:38 Alice Ferrazzi
2021-02-10 10:15 Alice Ferrazzi
2021-02-05 14:53 Alice Ferrazzi
2021-02-03 23:25 Mike Pagano
2021-01-30 13:18 Alice Ferrazzi
2021-01-23 16:34 Mike Pagano
2021-01-17 16:22 Mike Pagano
2021-01-12 20:08 Mike Pagano
2021-01-09 12:54 Mike Pagano
2020-12-29 14:18 Mike Pagano
2020-12-11 12:54 Mike Pagano
2020-12-02 12:48 Mike Pagano
2020-11-24 13:39 Mike Pagano
2020-11-22 19:12 Mike Pagano
2020-11-18 19:23 Mike Pagano
2020-11-11 15:32 Mike Pagano
2020-11-10 13:54 Mike Pagano
2020-10-29 11:17 Mike Pagano
2020-10-17 10:14 Mike Pagano
2020-10-14 20:34 Mike Pagano
2020-10-01 19:03 Mike Pagano
2020-10-01 18:59 Mike Pagano
2020-09-24 16:02 Mike Pagano
2020-09-23 11:59 Mike Pagano
2020-09-23 11:57 Mike Pagano
2020-09-12 17:31 Mike Pagano
2020-09-03 11:34 Mike Pagano
2020-08-26 11:13 Mike Pagano
2020-08-21 11:23 Alice Ferrazzi
2020-08-21 11:02 Alice Ferrazzi
2020-07-31 16:13 Mike Pagano
2020-07-22 12:30 Mike Pagano
2020-07-09 12:07 Mike Pagano
2020-07-01 12:10 Mike Pagano
2020-06-22 14:44 Mike Pagano
2020-06-11 11:28 Mike Pagano
2020-06-03 11:37 Mike Pagano
2020-05-27 15:26 Mike Pagano
2020-05-20 11:24 Mike Pagano
2020-05-13 12:50 Mike Pagano
2020-05-11 22:52 Mike Pagano
2020-05-05 17:39 Mike Pagano
2020-05-02 19:22 Mike Pagano
2020-04-24 12:01 Mike Pagano
2020-04-15 17:55 Mike Pagano
2020-04-13 11:15 Mike Pagano
2020-04-02 18:55 Mike Pagano
2020-03-20 11:54 Mike Pagano
2020-03-11 10:15 Mike Pagano
2020-02-28 15:29 Mike Pagano
2020-02-14 23:36 Mike Pagano
2020-02-05 14:48 Mike Pagano
2020-01-29 12:36 Mike Pagano
2020-01-23 11:02 Mike Pagano
2020-01-14 22:26 Mike Pagano
2020-01-12 14:52 Mike Pagano
2020-01-04 16:48 Mike Pagano
2019-12-21 14:54 Mike Pagano
2019-12-05 15:17 Alice Ferrazzi
2019-11-29 21:39 Thomas Deutschmann
2019-11-28 23:51 Mike Pagano
2019-11-25 12:08 Mike Pagano
2019-11-16 10:54 Mike Pagano
2019-11-12 20:58 Mike Pagano
2019-11-10 16:15 Mike Pagano
2019-11-06 14:24 Mike Pagano
2019-10-29 11:16 Mike Pagano
2019-10-17 22:21 Mike Pagano
2019-10-07 17:37 Mike Pagano
2019-10-05 11:39 Mike Pagano
2019-09-21 15:57 Mike Pagano
2019-09-19 23:16 Mike Pagano
2019-09-16 12:22 Mike Pagano
2019-09-10 11:10 Mike Pagano
2019-09-06 17:18 Mike Pagano
2019-08-25 17:34 Mike Pagano
2019-08-11 10:59 Mike Pagano
2019-08-06 19:16 Mike Pagano
2019-08-04 16:05 Mike Pagano
2019-07-21 14:38 Mike Pagano
2019-07-10 11:03 Mike Pagano
2019-06-27 11:10 Mike Pagano
2019-06-22 19:04 Mike Pagano
2019-06-17 19:19 Mike Pagano
2019-06-11 17:40 Mike Pagano
2019-06-11 12:39 Mike Pagano
2019-05-31 16:42 Mike Pagano
2019-05-26 17:12 Mike Pagano
2019-05-21 17:14 Mike Pagano
2019-05-16 22:59 Mike Pagano
2019-05-14 20:08 Mike Pagano
2019-05-10 19:38 Mike Pagano
2019-05-08 10:03 Mike Pagano
2019-05-04 18:26 Mike Pagano
2019-05-02 10:16 Mike Pagano
2019-04-27 17:29 Mike Pagano
2019-04-20 11:06 Mike Pagano
2019-04-19 19:54 Mike Pagano
2019-04-05 21:42 Mike Pagano
2019-04-03 10:48 Mike Pagano
2019-03-27 10:20 Mike Pagano
2019-03-23 14:57 Mike Pagano
2019-03-23 14:18 Mike Pagano
2019-03-19 16:56 Mike Pagano
2019-03-13 22:05 Mike Pagano
2019-03-06 19:12 Mike Pagano
2019-03-05 17:59 Mike Pagano
2019-02-27 11:20 Mike Pagano
2019-02-23 14:42 Mike Pagano
2019-02-20 11:16 Mike Pagano
2019-02-15 12:46 Mike Pagano
2019-02-12 20:51 Mike Pagano
2019-02-06 20:14 Mike Pagano
2019-01-31 11:22 Mike Pagano
2019-01-26 15:03 Mike Pagano
2019-01-23 11:29 Mike Pagano
2019-01-16 23:29 Mike Pagano
2019-01-13 19:26 Mike Pagano
2019-01-09 18:09 Mike Pagano
2019-01-09 17:52 Mike Pagano
2018-12-29 22:53 Mike Pagano
2018-12-29 18:51 Mike Pagano
2018-12-21 14:44 Mike Pagano
2018-12-17 11:39 Mike Pagano
2018-12-13 11:36 Mike Pagano
2018-12-08 13:25 Mike Pagano
2018-12-05 19:44 Mike Pagano
2018-12-01 18:00 Mike Pagano
2018-12-01 15:04 Mike Pagano
2018-11-27 16:22 Mike Pagano
2018-11-23 12:48 Mike Pagano
2018-11-23 12:45 Mike Pagano
2018-11-21 12:20 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-13 21:20 Mike Pagano
2018-11-11 1:44 Mike Pagano
2018-11-11 1:31 Mike Pagano
2018-11-10 21:30 Mike Pagano
2018-10-20 12:43 Mike Pagano
2018-10-18 10:25 Mike Pagano
2018-10-13 16:34 Mike Pagano
2018-10-10 11:19 Mike Pagano
2018-10-04 10:40 Mike Pagano
2018-09-29 13:33 Mike Pagano
2018-09-26 10:42 Mike Pagano
2018-09-19 22:38 Mike Pagano
2018-09-15 10:10 Mike Pagano
2018-09-09 23:27 Mike Pagano
2018-09-05 15:27 Mike Pagano
2018-08-24 11:43 Mike Pagano
2018-08-22 10:06 Alice Ferrazzi
2018-08-18 18:07 Mike Pagano
2018-08-17 19:32 Mike Pagano
2018-08-17 19:25 Mike Pagano
2018-08-16 11:51 Mike Pagano
2018-08-15 16:46 Mike Pagano
2018-08-09 10:52 Mike Pagano
2018-08-07 18:12 Mike Pagano
2018-08-03 12:25 Mike Pagano
2018-07-28 10:38 Mike Pagano
2018-07-25 10:26 Mike Pagano
2018-07-22 15:14 Mike Pagano
2018-07-17 10:25 Mike Pagano
2018-07-12 15:42 Alice Ferrazzi
2018-07-03 13:16 Mike Pagano
2018-06-26 16:34 Alice Ferrazzi
2018-06-16 15:42 Mike Pagano
2018-06-13 15:03 Mike Pagano
2018-06-05 11:21 Mike Pagano
2018-05-30 22:34 Mike Pagano
2018-05-30 11:39 Mike Pagano
2018-05-25 14:54 Mike Pagano
2018-05-22 17:28 Mike Pagano
2018-05-20 22:20 Mike Pagano
2018-05-16 10:23 Mike Pagano
2018-05-09 10:54 Mike Pagano
2018-05-02 16:13 Mike Pagano
2018-04-30 10:29 Mike Pagano
2018-04-24 11:30 Mike Pagano
2018-04-20 11:12 Mike Pagano
2018-04-13 22:21 Mike Pagano
2018-04-08 14:26 Mike Pagano
2018-03-31 22:17 Mike Pagano
2018-03-28 17:42 Mike Pagano
2018-03-25 14:31 Mike Pagano
2018-03-25 13:39 Mike Pagano
2018-03-22 12:58 Mike Pagano
2018-03-18 22:15 Mike Pagano
2018-03-11 18:26 Mike Pagano
2018-03-05 2:38 Alice Ferrazzi
2018-02-28 18:46 Alice Ferrazzi
2018-02-28 15:02 Alice Ferrazzi
2018-02-25 15:47 Mike Pagano
2018-02-22 23:22 Mike Pagano
2018-02-17 15:02 Alice Ferrazzi
2018-02-13 13:25 Alice Ferrazzi
2018-02-03 21:22 Mike Pagano
2018-01-31 13:31 Alice Ferrazzi
2018-01-23 21:17 Mike Pagano
2018-01-17 10:18 Alice Ferrazzi
2018-01-17 10:18 Alice Ferrazzi
2018-01-17 9:16 Alice Ferrazzi
2018-01-15 14:57 Alice Ferrazzi
2018-01-10 12:21 Alice Ferrazzi
2018-01-10 11:47 Mike Pagano
2018-01-05 15:54 Alice Ferrazzi
2018-01-05 15:04 Alice Ferrazzi
2018-01-02 20:13 Mike Pagano
2017-12-29 17:20 Alice Ferrazzi
2017-12-25 14:36 Alice Ferrazzi
2017-12-20 12:44 Mike Pagano
2017-12-16 17:42 Alice Ferrazzi
2017-12-14 8:58 Alice Ferrazzi
2017-12-09 23:29 Mike Pagano
2017-12-05 11:38 Mike Pagano
2017-11-30 12:19 Alice Ferrazzi
2017-11-24 9:44 Alice Ferrazzi
2017-11-21 9:18 Alice Ferrazzi
2017-11-18 18:24 Mike Pagano
2017-11-15 15:44 Mike Pagano
2017-11-08 13:49 Mike Pagano
2017-11-02 10:03 Mike Pagano
2017-10-27 10:29 Mike Pagano
2017-10-21 20:15 Mike Pagano
2017-10-18 13:46 Mike Pagano
2017-10-12 22:26 Mike Pagano
2017-10-12 12:37 Mike Pagano
2017-10-08 14:23 Mike Pagano
2017-10-08 14:21 Mike Pagano
2017-10-08 14:13 Mike Pagano
2017-10-05 11:38 Mike Pagano
2017-09-27 16:38 Mike Pagano
2017-09-20 10:11 Mike Pagano
2017-09-14 11:39 Mike Pagano
2017-09-13 22:28 Mike Pagano
2017-09-13 16:25 Mike Pagano
2017-09-10 14:38 Mike Pagano
2017-09-07 22:43 Mike Pagano
2017-09-02 17:45 Mike Pagano
2017-08-30 10:06 Mike Pagano
2017-08-25 10:59 Mike Pagano
2017-08-16 22:29 Mike Pagano
2017-08-13 16:51 Mike Pagano
2017-08-11 17:41 Mike Pagano
2017-08-07 10:26 Mike Pagano
2017-05-14 13:31 Mike Pagano
2017-05-08 10:43 Mike Pagano
2017-05-03 17:45 Mike Pagano
2017-04-27 9:05 Alice Ferrazzi
2017-04-22 17:01 Mike Pagano
2017-04-18 10:23 Mike Pagano
2017-04-12 18:01 Mike Pagano
2017-04-08 13:53 Mike Pagano
2017-03-31 10:44 Mike Pagano
2017-03-30 18:15 Mike Pagano
2017-03-26 11:54 Mike Pagano
2017-03-23 18:38 Mike Pagano
2017-03-22 12:42 Mike Pagano
2017-03-18 14:34 Mike Pagano
2017-03-15 19:21 Mike Pagano
2017-03-12 12:22 Mike Pagano
2017-03-02 16:23 Mike Pagano
2017-02-26 20:38 Mike Pagano
2017-02-26 20:36 Mike Pagano
2017-02-23 20:34 Mike Pagano
2017-02-23 20:11 Mike Pagano
2017-02-18 20:37 Mike Pagano
2017-02-18 16:13 Alice Ferrazzi
2017-02-15 16:02 Alice Ferrazzi
2017-02-14 23:08 Mike Pagano
2017-02-09 11:11 Alice Ferrazzi
2017-02-04 11:34 Alice Ferrazzi
2017-02-01 13:07 Alice Ferrazzi
2017-01-29 23:08 Alice Ferrazzi
2017-01-26 8:51 Alice Ferrazzi
2017-01-20 11:33 Alice Ferrazzi
2017-01-15 22:59 Mike Pagano
2017-01-12 22:53 Mike Pagano
2017-01-09 12:41 Mike Pagano
2017-01-07 0:55 Mike Pagano
2017-01-06 23:09 Mike Pagano
2016-12-31 19:39 Mike Pagano
2016-12-11 23:20 Mike Pagano
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1528308243.cdf8ca221eb77267c919d87bc783e83c7ed6a1eb.mpagano@gentoo \
--to=mpagano@gentoo.org \
--cc=gentoo-commits@lists.gentoo.org \
--cc=gentoo-dev@lists.gentoo.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox