From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from lists.gentoo.org (pigeon.gentoo.org [208.92.234.80]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by finch.gentoo.org (Postfix) with ESMTPS id C715E158090 for ; Thu, 12 May 2022 11:27:32 +0000 (UTC) Received: from pigeon.gentoo.org (localhost [127.0.0.1]) by pigeon.gentoo.org (Postfix) with SMTP id 0F1CFE0895; Thu, 12 May 2022 11:27:32 +0000 (UTC) Received: from smtp.gentoo.org (woodpecker.gentoo.org [140.211.166.183]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (4096 bits)) (No client certificate requested) by pigeon.gentoo.org (Postfix) with ESMTPS id A4313E0895 for ; Thu, 12 May 2022 11:27:31 +0000 (UTC) Received: from oystercatcher.gentoo.org (unknown [IPv6:2a01:4f8:202:4333:225:90ff:fed9:fc84]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (4096 bits) server-digest SHA256) (No client certificate requested) by smtp.gentoo.org (Postfix) with ESMTPS id 31251341489 for ; Thu, 12 May 2022 11:27:30 +0000 (UTC) Received: from localhost.localdomain (localhost [IPv6:::1]) by oystercatcher.gentoo.org (Postfix) with ESMTP id 5E4A43BF for ; Thu, 12 May 2022 11:27:27 +0000 (UTC) From: "Mike Pagano" To: gentoo-commits@lists.gentoo.org Content-Transfer-Encoding: 8bit Content-type: text/plain; charset=UTF-8 Reply-To: gentoo-dev@lists.gentoo.org, "Mike Pagano" Message-ID: <1652354828.33ae7af26e3ce63605bf43c87d215a1d710d852d.mpagano@gentoo> Subject: [gentoo-commits] proj/linux-patches:5.17 commit in: / X-VCS-Repository: proj/linux-patches X-VCS-Files: 0000_README 1006_linux-5.17.7.patch X-VCS-Directories: / X-VCS-Committer: mpagano X-VCS-Committer-Name: Mike Pagano X-VCS-Revision: 33ae7af26e3ce63605bf43c87d215a1d710d852d X-VCS-Branch: 5.17 Date: Thu, 12 May 2022 11:27:27 +0000 (UTC) Precedence: bulk List-Post: List-Help: List-Unsubscribe: List-Subscribe: List-Id: Gentoo Linux mail X-BeenThere: gentoo-commits@lists.gentoo.org X-Auto-Response-Suppress: DR, RN, NRN, OOF, AutoReply X-Archives-Salt: f55b1457-3447-4168-a8a1-64b7ed7686a3 X-Archives-Hash: 0fced594b756f71563e12b7d133145f8 commit: 33ae7af26e3ce63605bf43c87d215a1d710d852d Author: Mike Pagano gentoo org> AuthorDate: Thu May 12 11:27:08 2022 +0000 Commit: Mike Pagano gentoo org> CommitDate: Thu May 12 11:27:08 2022 +0000 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=33ae7af2 Linux patch 5.17.7 Signed-off-by: Mike Pagano gentoo.org> 0000_README | 4 + 1006_linux-5.17.7.patch | 4888 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 4892 insertions(+) diff --git a/0000_README b/0000_README index 91016f55..cf45e5d3 100644 --- a/0000_README +++ b/0000_README @@ -67,6 +67,10 @@ Patch: 1005_linux-5.17.6.patch From: http://www.kernel.org Desc: Linux 5.17.6 +Patch: 1006_linux-5.17.7.patch +From: http://www.kernel.org +Desc: Linux 5.17.7 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1006_linux-5.17.7.patch b/1006_linux-5.17.7.patch new file mode 100644 index 00000000..ed7d05cf --- /dev/null +++ b/1006_linux-5.17.7.patch @@ -0,0 +1,4888 @@ +diff --git a/Documentation/devicetree/bindings/pci/apple,pcie.yaml b/Documentation/devicetree/bindings/pci/apple,pcie.yaml +index 7f01e15fc81c2..daf602ac0d0fd 100644 +--- a/Documentation/devicetree/bindings/pci/apple,pcie.yaml ++++ b/Documentation/devicetree/bindings/pci/apple,pcie.yaml +@@ -142,7 +142,6 @@ examples: + device_type = "pci"; + reg = <0x0 0x0 0x0 0x0 0x0>; + reset-gpios = <&pinctrl_ap 152 0>; +- max-link-speed = <2>; + + #address-cells = <3>; + #size-cells = <2>; +@@ -153,7 +152,6 @@ examples: + device_type = "pci"; + reg = <0x800 0x0 0x0 0x0 0x0>; + reset-gpios = <&pinctrl_ap 153 0>; +- max-link-speed = <2>; + + #address-cells = <3>; + #size-cells = <2>; +@@ -164,7 +162,6 @@ examples: + device_type = "pci"; + reg = <0x1000 0x0 0x0 0x0 0x0>; + reset-gpios = <&pinctrl_ap 33 0>; +- max-link-speed = <1>; + + #address-cells = <3>; + #size-cells = <2>; +diff --git a/Makefile b/Makefile +index 7ef8dd5ab6f28..ce65b393a2b49 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 17 +-SUBLEVEL = 6 ++SUBLEVEL = 7 + EXTRAVERSION = + NAME = Superb Owl + +diff --git a/arch/mips/include/asm/timex.h b/arch/mips/include/asm/timex.h +index b05bb70a2e46f..8026baf46e729 100644 +--- a/arch/mips/include/asm/timex.h ++++ b/arch/mips/include/asm/timex.h +@@ -40,9 +40,9 @@ + typedef unsigned int cycles_t; + + /* +- * On R4000/R4400 before version 5.0 an erratum exists such that if the +- * cycle counter is read in the exact moment that it is matching the +- * compare register, no interrupt will be generated. ++ * On R4000/R4400 an erratum exists such that if the cycle counter is ++ * read in the exact moment that it is matching the compare register, ++ * no interrupt will be generated. + * + * There is a suggested workaround and also the erratum can't strike if + * the compare interrupt isn't being used as the clock source device. +@@ -63,7 +63,7 @@ static inline int can_use_mips_counter(unsigned int prid) + if (!__builtin_constant_p(cpu_has_counter)) + asm volatile("" : "=m" (cpu_data[0].options)); + if (likely(cpu_has_counter && +- prid >= (PRID_IMP_R4000 | PRID_REV_ENCODE_44(5, 0)))) ++ prid > (PRID_IMP_R4000 | PRID_REV_ENCODE_44(15, 15)))) + return 1; + else + return 0; +diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c +index caa01457dce60..ed339d7979f3f 100644 +--- a/arch/mips/kernel/time.c ++++ b/arch/mips/kernel/time.c +@@ -141,15 +141,10 @@ static __init int cpu_has_mfc0_count_bug(void) + case CPU_R4400MC: + /* + * The published errata for the R4400 up to 3.0 say the CPU +- * has the mfc0 from count bug. ++ * has the mfc0 from count bug. This seems the last version ++ * produced. + */ +- if ((current_cpu_data.processor_id & 0xff) <= 0x30) +- return 1; +- +- /* +- * we assume newer revisions are ok +- */ +- return 0; ++ return 1; + } + + return 0; +diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c +index 1b6129e7d776b..b861bbbc87178 100644 +--- a/arch/parisc/kernel/processor.c ++++ b/arch/parisc/kernel/processor.c +@@ -418,8 +418,7 @@ show_cpuinfo (struct seq_file *m, void *v) + } + seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities); + +- seq_printf(m, "model\t\t: %s\n" +- "model name\t: %s\n", ++ seq_printf(m, "model\t\t: %s - %s\n", + boot_cpu_data.pdc.sys_model_name, + cpuinfo->dev ? + cpuinfo->dev->name : "Unknown"); +diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c +index b91cb45ffd4e3..f005ddedb50e4 100644 +--- a/arch/parisc/kernel/setup.c ++++ b/arch/parisc/kernel/setup.c +@@ -161,6 +161,8 @@ void __init setup_arch(char **cmdline_p) + #ifdef CONFIG_PA11 + dma_ops_init(); + #endif ++ ++ clear_sched_clock_stable(); + } + + /* +diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c +index 061119a56fbe8..d8e59a1000ab7 100644 +--- a/arch/parisc/kernel/time.c ++++ b/arch/parisc/kernel/time.c +@@ -249,13 +249,9 @@ void __init time_init(void) + static int __init init_cr16_clocksource(void) + { + /* +- * The cr16 interval timers are not syncronized across CPUs, even if +- * they share the same socket. ++ * The cr16 interval timers are not synchronized across CPUs. + */ + if (num_online_cpus() > 1 && !running_on_qemu) { +- /* mark sched_clock unstable */ +- clear_sched_clock_stable(); +- + clocksource_cr16.name = "cr16_unstable"; + clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE; + clocksource_cr16.rating = 0; +diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c +index 0d588032d6e69..697a9aed4f77f 100644 +--- a/arch/riscv/mm/init.c ++++ b/arch/riscv/mm/init.c +@@ -206,8 +206,25 @@ static void __init setup_bootmem(void) + * early_init_fdt_reserve_self() since __pa() does + * not work for DTB pointers that are fixmap addresses + */ +- if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) +- memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va)); ++ if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) { ++ /* ++ * In case the DTB is not located in a memory region we won't ++ * be able to locate it later on via the linear mapping and ++ * get a segfault when accessing it via __va(dtb_early_pa). ++ * To avoid this situation copy DTB to a memory region. ++ * Note that memblock_phys_alloc will also reserve DTB region. ++ */ ++ if (!memblock_is_memory(dtb_early_pa)) { ++ size_t fdt_size = fdt_totalsize(dtb_early_va); ++ phys_addr_t new_dtb_early_pa = memblock_phys_alloc(fdt_size, PAGE_SIZE); ++ void *new_dtb_early_va = early_memremap(new_dtb_early_pa, fdt_size); ++ ++ memcpy(new_dtb_early_va, dtb_early_va, fdt_size); ++ early_memunmap(new_dtb_early_va, fdt_size); ++ _dtb_early_pa = new_dtb_early_pa; ++ } else ++ memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va)); ++ } + + early_init_fdt_scan_reserved_mem(); + dma_contiguous_reserve(dma32_phys_limit); +diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c +index 8dea01ffc5c18..5290d64723086 100644 +--- a/arch/x86/kernel/fpu/core.c ++++ b/arch/x86/kernel/fpu/core.c +@@ -41,17 +41,7 @@ struct fpu_state_config fpu_user_cfg __ro_after_init; + */ + struct fpstate init_fpstate __ro_after_init; + +-/* +- * Track whether the kernel is using the FPU state +- * currently. +- * +- * This flag is used: +- * +- * - by IRQ context code to potentially use the FPU +- * if it's unused. +- * +- * - to debug kernel_fpu_begin()/end() correctness +- */ ++/* Track in-kernel FPU usage */ + static DEFINE_PER_CPU(bool, in_kernel_fpu); + + /* +@@ -59,42 +49,37 @@ static DEFINE_PER_CPU(bool, in_kernel_fpu); + */ + DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); + +-static bool kernel_fpu_disabled(void) +-{ +- return this_cpu_read(in_kernel_fpu); +-} +- +-static bool interrupted_kernel_fpu_idle(void) +-{ +- return !kernel_fpu_disabled(); +-} +- +-/* +- * Were we in user mode (or vm86 mode) when we were +- * interrupted? +- * +- * Doing kernel_fpu_begin/end() is ok if we are running +- * in an interrupt context from user mode - we'll just +- * save the FPU state as required. +- */ +-static bool interrupted_user_mode(void) +-{ +- struct pt_regs *regs = get_irq_regs(); +- return regs && user_mode(regs); +-} +- + /* + * Can we use the FPU in kernel mode with the + * whole "kernel_fpu_begin/end()" sequence? +- * +- * It's always ok in process context (ie "not interrupt") +- * but it is sometimes ok even from an irq. + */ + bool irq_fpu_usable(void) + { +- return !in_interrupt() || +- interrupted_user_mode() || +- interrupted_kernel_fpu_idle(); ++ if (WARN_ON_ONCE(in_nmi())) ++ return false; ++ ++ /* In kernel FPU usage already active? */ ++ if (this_cpu_read(in_kernel_fpu)) ++ return false; ++ ++ /* ++ * When not in NMI or hard interrupt context, FPU can be used in: ++ * ++ * - Task context except from within fpregs_lock()'ed critical ++ * regions. ++ * ++ * - Soft interrupt processing context which cannot happen ++ * while in a fpregs_lock()'ed critical region. ++ */ ++ if (!in_hardirq()) ++ return true; ++ ++ /* ++ * In hard interrupt context it's safe when soft interrupts ++ * are enabled, which means the interrupt did not hit in ++ * a fpregs_lock()'ed critical region. ++ */ ++ return !softirq_count(); + } + EXPORT_SYMBOL(irq_fpu_usable); + +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c +index ed8a13ac4ab23..4c2a158bb6c4f 100644 +--- a/arch/x86/kernel/kvm.c ++++ b/arch/x86/kernel/kvm.c +@@ -69,6 +69,7 @@ static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __align + DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible; + static int has_steal_clock = 0; + ++static int has_guest_poll = 0; + /* + * No need for any "IO delay" on KVM + */ +@@ -706,14 +707,26 @@ static int kvm_cpu_down_prepare(unsigned int cpu) + + static int kvm_suspend(void) + { ++ u64 val = 0; ++ + kvm_guest_cpu_offline(false); + ++#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL ++ if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) ++ rdmsrl(MSR_KVM_POLL_CONTROL, val); ++ has_guest_poll = !(val & 1); ++#endif + return 0; + } + + static void kvm_resume(void) + { + kvm_cpu_online(raw_smp_processor_id()); ++ ++#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL ++ if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL) && has_guest_poll) ++ wrmsrl(MSR_KVM_POLL_CONTROL, 0); ++#endif + } + + static struct syscore_ops kvm_syscore_ops = { +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c +index b8f8d268d0585..ee15db75fd624 100644 +--- a/arch/x86/kvm/cpuid.c ++++ b/arch/x86/kvm/cpuid.c +@@ -865,6 +865,11 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) + union cpuid10_eax eax; + union cpuid10_edx edx; + ++ if (!static_cpu_has(X86_FEATURE_ARCH_PERFMON)) { ++ entry->eax = entry->ebx = entry->ecx = entry->edx = 0; ++ break; ++ } ++ + perf_get_x86_pmu_capability(&cap); + + /* +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c +index 2a10d0033c964..970d5c740b00b 100644 +--- a/arch/x86/kvm/lapic.c ++++ b/arch/x86/kvm/lapic.c +@@ -113,7 +113,8 @@ static inline u32 kvm_x2apic_id(struct kvm_lapic *apic) + + static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu) + { +- return pi_inject_timer && kvm_vcpu_apicv_active(vcpu); ++ return pi_inject_timer && kvm_vcpu_apicv_active(vcpu) && ++ (kvm_mwait_in_guest(vcpu->kvm) || kvm_hlt_in_guest(vcpu->kvm)); + } + + bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu) +@@ -2125,10 +2126,9 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) + break; + + case APIC_SELF_IPI: +- if (apic_x2apic_mode(apic)) { +- kvm_lapic_reg_write(apic, APIC_ICR, +- APIC_DEST_SELF | (val & APIC_VECTOR_MASK)); +- } else ++ if (apic_x2apic_mode(apic)) ++ kvm_apic_send_ipi(apic, APIC_DEST_SELF | (val & APIC_VECTOR_MASK), 0); ++ else + ret = 1; + break; + default: +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c +index 7f009ebb319ab..e7cd16e1e0a0b 100644 +--- a/arch/x86/kvm/mmu/mmu.c ++++ b/arch/x86/kvm/mmu/mmu.c +@@ -3239,6 +3239,8 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, + return; + + sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK); ++ if (WARN_ON(!sp)) ++ return; + + if (is_tdp_mmu_page(sp)) + kvm_tdp_mmu_put_root(kvm, sp, false); +diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c +index b5b0837df0d11..50108634835f4 100644 +--- a/arch/x86/kvm/svm/pmu.c ++++ b/arch/x86/kvm/svm/pmu.c +@@ -45,6 +45,22 @@ static struct kvm_event_hw_type_mapping amd_event_mapping[] = { + [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, + }; + ++/* duplicated from amd_f17h_perfmon_event_map. */ ++static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = { ++ [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES }, ++ [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS }, ++ [2] = { 0x60, 0xff, PERF_COUNT_HW_CACHE_REFERENCES }, ++ [3] = { 0x64, 0x09, PERF_COUNT_HW_CACHE_MISSES }, ++ [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, ++ [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, ++ [6] = { 0x87, 0x02, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, ++ [7] = { 0x87, 0x01, PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, ++}; ++ ++/* amd_pmc_perf_hw_id depends on these being the same size */ ++static_assert(ARRAY_SIZE(amd_event_mapping) == ++ ARRAY_SIZE(amd_f17h_event_mapping)); ++ + static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type) + { + struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); +@@ -140,6 +156,7 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, + + static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc) + { ++ struct kvm_event_hw_type_mapping *event_mapping; + u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT; + u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; + int i; +@@ -148,15 +165,20 @@ static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc) + if (WARN_ON(pmc_is_fixed(pmc))) + return PERF_COUNT_HW_MAX; + ++ if (guest_cpuid_family(pmc->vcpu) >= 0x17) ++ event_mapping = amd_f17h_event_mapping; ++ else ++ event_mapping = amd_event_mapping; ++ + for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++) +- if (amd_event_mapping[i].eventsel == event_select +- && amd_event_mapping[i].unit_mask == unit_mask) ++ if (event_mapping[i].eventsel == event_select ++ && event_mapping[i].unit_mask == unit_mask) + break; + + if (i == ARRAY_SIZE(amd_event_mapping)) + return PERF_COUNT_HW_MAX; + +- return amd_event_mapping[i].event_type; ++ return event_mapping[i].event_type; + } + + /* check if a PMC is enabled by comparing it against global_ctrl bits. Because +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c +index e5cecd4ad2d44..76e6411d4dde1 100644 +--- a/arch/x86/kvm/svm/sev.c ++++ b/arch/x86/kvm/svm/sev.c +@@ -1590,24 +1590,51 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm) + atomic_set_release(&src_sev->migration_in_progress, 0); + } + ++/* vCPU mutex subclasses. */ ++enum sev_migration_role { ++ SEV_MIGRATION_SOURCE = 0, ++ SEV_MIGRATION_TARGET, ++ SEV_NR_MIGRATION_ROLES, ++}; + +-static int sev_lock_vcpus_for_migration(struct kvm *kvm) ++static int sev_lock_vcpus_for_migration(struct kvm *kvm, ++ enum sev_migration_role role) + { + struct kvm_vcpu *vcpu; + unsigned long i, j; ++ bool first = true; + + kvm_for_each_vcpu(i, vcpu, kvm) { +- if (mutex_lock_killable(&vcpu->mutex)) ++ if (mutex_lock_killable_nested(&vcpu->mutex, role)) + goto out_unlock; ++ ++ if (first) { ++ /* ++ * Reset the role to one that avoids colliding with ++ * the role used for the first vcpu mutex. ++ */ ++ role = SEV_NR_MIGRATION_ROLES; ++ first = false; ++ } else { ++ mutex_release(&vcpu->mutex.dep_map, _THIS_IP_); ++ } + } + + return 0; + + out_unlock: ++ ++ first = true; + kvm_for_each_vcpu(j, vcpu, kvm) { + if (i == j) + break; + ++ if (first) ++ first = false; ++ else ++ mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_); ++ ++ + mutex_unlock(&vcpu->mutex); + } + return -EINTR; +@@ -1617,8 +1644,15 @@ static void sev_unlock_vcpus_for_migration(struct kvm *kvm) + { + struct kvm_vcpu *vcpu; + unsigned long i; ++ bool first = true; + + kvm_for_each_vcpu(i, vcpu, kvm) { ++ if (first) ++ first = false; ++ else ++ mutex_acquire(&vcpu->mutex.dep_map, ++ SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_); ++ + mutex_unlock(&vcpu->mutex); + } + } +@@ -1726,10 +1760,10 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd) + charged = true; + } + +- ret = sev_lock_vcpus_for_migration(kvm); ++ ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE); + if (ret) + goto out_dst_cgroup; +- ret = sev_lock_vcpus_for_migration(source_kvm); ++ ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET); + if (ret) + goto out_dst_vcpu; + +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c +index ef63cfd57029a..267d6dc4b8186 100644 +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -5473,7 +5473,7 @@ static bool vmx_emulation_required_with_pending_exception(struct kvm_vcpu *vcpu) + struct vcpu_vmx *vmx = to_vmx(vcpu); + + return vmx->emulation_required && !vmx->rmode.vm86_active && +- vcpu->arch.exception.pending; ++ (vcpu->arch.exception.pending || vcpu->arch.exception.injected); + } + + static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c +index c59265146e9c8..f1827257ef0e0 100644 +--- a/drivers/char/ipmi/ipmi_msghandler.c ++++ b/drivers/char/ipmi/ipmi_msghandler.c +@@ -3677,8 +3677,11 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf) + void ipmi_unregister_smi(struct ipmi_smi *intf) + { + struct ipmi_smi_watcher *w; +- int intf_num = intf->intf_num, index; ++ int intf_num, index; + ++ if (!intf) ++ return; ++ intf_num = intf->intf_num; + mutex_lock(&ipmi_interfaces_mutex); + intf->intf_num = -1; + intf->in_shutdown = true; +@@ -4518,6 +4521,8 @@ return_unspecified: + } else + /* The message was sent, start the timer. */ + intf_start_seq_timer(intf, msg->msgid); ++ requeue = 0; ++ goto out; + } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) + || (msg->rsp[1] != msg->data[1])) { + /* +diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c +index 64dedb3ef8ec4..5604a810fb3d2 100644 +--- a/drivers/char/ipmi/ipmi_si_intf.c ++++ b/drivers/char/ipmi/ipmi_si_intf.c +@@ -2220,10 +2220,7 @@ static void cleanup_one_si(struct smi_info *smi_info) + return; + + list_del(&smi_info->link); +- +- if (smi_info->intf) +- ipmi_unregister_smi(smi_info->intf); +- ++ ipmi_unregister_smi(smi_info->intf); + kfree(smi_info); + } + +diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c +index 54be88167c60b..f3b3953cac834 100644 +--- a/drivers/firewire/core-card.c ++++ b/drivers/firewire/core-card.c +@@ -668,6 +668,7 @@ EXPORT_SYMBOL_GPL(fw_card_release); + void fw_core_remove_card(struct fw_card *card) + { + struct fw_card_driver dummy_driver = dummy_driver_template; ++ unsigned long flags; + + card->driver->update_phy_reg(card, 4, + PHY_LINK_ACTIVE | PHY_CONTENDER, 0); +@@ -682,7 +683,9 @@ void fw_core_remove_card(struct fw_card *card) + dummy_driver.stop_iso = card->driver->stop_iso; + card->driver = &dummy_driver; + ++ spin_lock_irqsave(&card->lock, flags); + fw_destroy_nodes(card); ++ spin_unlock_irqrestore(&card->lock, flags); + + /* Wait for all users, especially device workqueue jobs, to finish. */ + fw_card_put(card); +diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c +index 9f89c17730b12..708e417200f46 100644 +--- a/drivers/firewire/core-cdev.c ++++ b/drivers/firewire/core-cdev.c +@@ -1500,6 +1500,7 @@ static void outbound_phy_packet_callback(struct fw_packet *packet, + { + struct outbound_phy_packet_event *e = + container_of(packet, struct outbound_phy_packet_event, p); ++ struct client *e_client; + + switch (status) { + /* expected: */ +@@ -1516,9 +1517,10 @@ static void outbound_phy_packet_callback(struct fw_packet *packet, + } + e->phy_packet.data[0] = packet->timestamp; + ++ e_client = e->client; + queue_event(e->client, &e->event, &e->phy_packet, + sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0); +- client_put(e->client); ++ client_put(e_client); + } + + static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) +diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c +index b63d55f5ebd33..f40c815343812 100644 +--- a/drivers/firewire/core-topology.c ++++ b/drivers/firewire/core-topology.c +@@ -375,16 +375,13 @@ static void report_found_node(struct fw_card *card, + card->bm_retries = 0; + } + ++/* Must be called with card->lock held */ + void fw_destroy_nodes(struct fw_card *card) + { +- unsigned long flags; +- +- spin_lock_irqsave(&card->lock, flags); + card->color++; + if (card->local_node != NULL) + for_each_fw_node(card, card->local_node, report_lost_node); + card->local_node = NULL; +- spin_unlock_irqrestore(&card->lock, flags); + } + + static void move_tree(struct fw_node *node0, struct fw_node *node1, int port) +@@ -510,6 +507,8 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, + struct fw_node *local_node; + unsigned long flags; + ++ spin_lock_irqsave(&card->lock, flags); ++ + /* + * If the selfID buffer is not the immediate successor of the + * previously processed one, we cannot reliably compare the +@@ -521,8 +520,6 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, + card->bm_retries = 0; + } + +- spin_lock_irqsave(&card->lock, flags); +- + card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated; + card->node_id = node_id; + /* +diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c +index ac487c96bb717..6c20815cc8d16 100644 +--- a/drivers/firewire/core-transaction.c ++++ b/drivers/firewire/core-transaction.c +@@ -73,24 +73,25 @@ static int try_cancel_split_timeout(struct fw_transaction *t) + static int close_transaction(struct fw_transaction *transaction, + struct fw_card *card, int rcode) + { +- struct fw_transaction *t; ++ struct fw_transaction *t = NULL, *iter; + unsigned long flags; + + spin_lock_irqsave(&card->lock, flags); +- list_for_each_entry(t, &card->transaction_list, link) { +- if (t == transaction) { +- if (!try_cancel_split_timeout(t)) { ++ list_for_each_entry(iter, &card->transaction_list, link) { ++ if (iter == transaction) { ++ if (!try_cancel_split_timeout(iter)) { + spin_unlock_irqrestore(&card->lock, flags); + goto timed_out; + } +- list_del_init(&t->link); +- card->tlabel_mask &= ~(1ULL << t->tlabel); ++ list_del_init(&iter->link); ++ card->tlabel_mask &= ~(1ULL << iter->tlabel); ++ t = iter; + break; + } + } + spin_unlock_irqrestore(&card->lock, flags); + +- if (&t->link != &card->transaction_list) { ++ if (t) { + t->callback(card, rcode, NULL, 0, t->callback_data); + return 0; + } +@@ -935,7 +936,7 @@ EXPORT_SYMBOL(fw_core_handle_request); + + void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) + { +- struct fw_transaction *t; ++ struct fw_transaction *t = NULL, *iter; + unsigned long flags; + u32 *data; + size_t data_length; +@@ -947,20 +948,21 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) + rcode = HEADER_GET_RCODE(p->header[1]); + + spin_lock_irqsave(&card->lock, flags); +- list_for_each_entry(t, &card->transaction_list, link) { +- if (t->node_id == source && t->tlabel == tlabel) { +- if (!try_cancel_split_timeout(t)) { ++ list_for_each_entry(iter, &card->transaction_list, link) { ++ if (iter->node_id == source && iter->tlabel == tlabel) { ++ if (!try_cancel_split_timeout(iter)) { + spin_unlock_irqrestore(&card->lock, flags); + goto timed_out; + } +- list_del_init(&t->link); +- card->tlabel_mask &= ~(1ULL << t->tlabel); ++ list_del_init(&iter->link); ++ card->tlabel_mask &= ~(1ULL << iter->tlabel); ++ t = iter; + break; + } + } + spin_unlock_irqrestore(&card->lock, flags); + +- if (&t->link == &card->transaction_list) { ++ if (!t) { + timed_out: + fw_notice(card, "unsolicited response (source %x, tlabel %x)\n", + source, tlabel); +diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c +index 85cd379fd3838..60051c0cabeaa 100644 +--- a/drivers/firewire/sbp2.c ++++ b/drivers/firewire/sbp2.c +@@ -408,7 +408,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request, + void *payload, size_t length, void *callback_data) + { + struct sbp2_logical_unit *lu = callback_data; +- struct sbp2_orb *orb; ++ struct sbp2_orb *orb = NULL, *iter; + struct sbp2_status status; + unsigned long flags; + +@@ -433,17 +433,18 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request, + + /* Lookup the orb corresponding to this status write. */ + spin_lock_irqsave(&lu->tgt->lock, flags); +- list_for_each_entry(orb, &lu->orb_list, link) { ++ list_for_each_entry(iter, &lu->orb_list, link) { + if (STATUS_GET_ORB_HIGH(status) == 0 && +- STATUS_GET_ORB_LOW(status) == orb->request_bus) { +- orb->rcode = RCODE_COMPLETE; +- list_del(&orb->link); ++ STATUS_GET_ORB_LOW(status) == iter->request_bus) { ++ iter->rcode = RCODE_COMPLETE; ++ list_del(&iter->link); ++ orb = iter; + break; + } + } + spin_unlock_irqrestore(&lu->tgt->lock, flags); + +- if (&orb->link != &lu->orb_list) { ++ if (orb) { + orb->callback(orb, &status); + kref_put(&orb->kref, free_orb); /* orb callback reference */ + } else { +diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c +index 4c1f9e1091b7f..a2c8dd329b31b 100644 +--- a/drivers/gpio/gpio-mvebu.c ++++ b/drivers/gpio/gpio-mvebu.c +@@ -871,13 +871,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev, + mvpwm->chip.dev = dev; + mvpwm->chip.ops = &mvebu_pwm_ops; + mvpwm->chip.npwm = mvchip->chip.ngpio; +- /* +- * There may already be some PWM allocated, so we can't force +- * mvpwm->chip.base to a fixed point like mvchip->chip.base. +- * So, we let pwmchip_add() do the numbering and take the next free +- * region. +- */ +- mvpwm->chip.base = -1; + + spin_lock_init(&mvpwm->lock); + +diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c +index d2fe76f3f34fd..8726921a11294 100644 +--- a/drivers/gpio/gpio-pca953x.c ++++ b/drivers/gpio/gpio-pca953x.c +@@ -762,11 +762,11 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin + bitmap_xor(cur_stat, new_stat, old_stat, gc->ngpio); + bitmap_and(trigger, cur_stat, chip->irq_mask, gc->ngpio); + ++ bitmap_copy(chip->irq_stat, new_stat, gc->ngpio); ++ + if (bitmap_empty(trigger, gc->ngpio)) + return false; + +- bitmap_copy(chip->irq_stat, new_stat, gc->ngpio); +- + bitmap_and(cur_stat, chip->irq_trig_fall, old_stat, gc->ngpio); + bitmap_and(old_stat, chip->irq_trig_raise, new_stat, gc->ngpio); + bitmap_or(new_stat, old_stat, cur_stat, gc->ngpio); +diff --git a/drivers/gpio/gpio-visconti.c b/drivers/gpio/gpio-visconti.c +index 47455810bdb91..e6534ea1eaa7a 100644 +--- a/drivers/gpio/gpio-visconti.c ++++ b/drivers/gpio/gpio-visconti.c +@@ -130,7 +130,6 @@ static int visconti_gpio_probe(struct platform_device *pdev) + struct gpio_irq_chip *girq; + struct irq_domain *parent; + struct device_node *irq_parent; +- struct fwnode_handle *fwnode; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); +@@ -150,14 +149,12 @@ static int visconti_gpio_probe(struct platform_device *pdev) + } + + parent = irq_find_host(irq_parent); ++ of_node_put(irq_parent); + if (!parent) { + dev_err(dev, "No IRQ parent domain\n"); + return -ENODEV; + } + +- fwnode = of_node_to_fwnode(irq_parent); +- of_node_put(irq_parent); +- + ret = bgpio_init(&priv->gpio_chip, dev, 4, + priv->base + GPIO_IDATA, + priv->base + GPIO_OSET, +@@ -180,7 +177,7 @@ static int visconti_gpio_probe(struct platform_device *pdev) + + girq = &priv->gpio_chip.irq; + girq->chip = irq_chip; +- girq->fwnode = fwnode; ++ girq->fwnode = of_node_to_fwnode(dev->of_node); + girq->parent_domain = parent; + girq->child_to_parent_hwirq = visconti_gpio_child_to_parent_hwirq; + girq->populate_parent_alloc_arg = visconti_gpio_populate_parent_fwspec; +diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c +index 91dcf2c6cdd84..775a7dadf9a39 100644 +--- a/drivers/gpio/gpiolib-of.c ++++ b/drivers/gpio/gpiolib-of.c +@@ -912,7 +912,7 @@ static void of_gpiochip_init_valid_mask(struct gpio_chip *chip) + i, &start); + of_property_read_u32_index(np, "gpio-reserved-ranges", + i + 1, &count); +- if (start >= chip->ngpio || start + count >= chip->ngpio) ++ if (start >= chip->ngpio || start + count > chip->ngpio) + continue; + + bitmap_clear(chip->valid_mask, start, count); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +index 07bc0f5047130..5d065bf7c5146 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +@@ -24,6 +24,7 @@ + #include + + #include ++#include + + #include "amdgpu.h" + #include "amdgpu_ras.h" +@@ -708,7 +709,8 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev) + adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; + + if (!reg) { +- if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ ++ /* passthrough mode exclus sriov mod */ ++ if (is_virtual_machine() && !xen_initial_domain()) + adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; + } + +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +index 49d5271dcfdc8..bbe94e8729831 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +@@ -4634,7 +4634,7 @@ static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video + &dpcd_pattern_type.value, + sizeof(dpcd_pattern_type)); + +- channel_count = dpcd_test_mode.bits.channel_count + 1; ++ channel_count = min(dpcd_test_mode.bits.channel_count + 1, AUDIO_CHANNELS_COUNT); + + // read pattern periods for requested channels when sawTooth pattern is requested + if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH || +diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c +index af9c09c308601..1d7f82e6eafea 100644 +--- a/drivers/gpu/drm/msm/dp/dp_display.c ++++ b/drivers/gpu/drm/msm/dp/dp_display.c +@@ -551,12 +551,6 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data) + + mutex_unlock(&dp->event_mutex); + +- /* +- * add fail safe mode outside event_mutex scope +- * to avoid potiential circular lock with drm thread +- */ +- dp_panel_add_fail_safe_mode(dp->dp_display.connector); +- + /* uevent will complete connection part */ + return 0; + }; +diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c +index 26c3653c99ec9..26f4b6959c31d 100644 +--- a/drivers/gpu/drm/msm/dp/dp_panel.c ++++ b/drivers/gpu/drm/msm/dp/dp_panel.c +@@ -151,15 +151,6 @@ static int dp_panel_update_modes(struct drm_connector *connector, + return rc; + } + +-void dp_panel_add_fail_safe_mode(struct drm_connector *connector) +-{ +- /* fail safe edid */ +- mutex_lock(&connector->dev->mode_config.mutex); +- if (drm_add_modes_noedid(connector, 640, 480)) +- drm_set_preferred_mode(connector, 640, 480); +- mutex_unlock(&connector->dev->mode_config.mutex); +-} +- + int dp_panel_read_sink_caps(struct dp_panel *dp_panel, + struct drm_connector *connector) + { +@@ -215,8 +206,6 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel, + rc = -ETIMEDOUT; + goto end; + } +- +- dp_panel_add_fail_safe_mode(connector); + } + + if (panel->aux_cfg_update_done) { +diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h +index 99739ea679a77..9023e5bb4b8b2 100644 +--- a/drivers/gpu/drm/msm/dp/dp_panel.h ++++ b/drivers/gpu/drm/msm/dp/dp_panel.h +@@ -59,7 +59,6 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel); + int dp_panel_deinit(struct dp_panel *dp_panel); + int dp_panel_timing_cfg(struct dp_panel *dp_panel); + void dp_panel_dump_regs(struct dp_panel *dp_panel); +-void dp_panel_add_fail_safe_mode(struct drm_connector *connector); + int dp_panel_read_sink_caps(struct dp_panel *dp_panel, + struct drm_connector *connector); + u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp, +diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c +index fb6d14d213a18..c67cd037a93fd 100644 +--- a/drivers/hwmon/adt7470.c ++++ b/drivers/hwmon/adt7470.c +@@ -19,6 +19,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -294,11 +295,10 @@ static int adt7470_update_thread(void *p) + adt7470_read_temperatures(data); + mutex_unlock(&data->lock); + +- set_current_state(TASK_INTERRUPTIBLE); + if (kthread_should_stop()) + break; + +- schedule_timeout(msecs_to_jiffies(data->auto_update_interval)); ++ schedule_timeout_interruptible(msecs_to_jiffies(data->auto_update_interval)); + } + + return 0; +diff --git a/drivers/hwmon/pmbus/delta-ahe50dc-fan.c b/drivers/hwmon/pmbus/delta-ahe50dc-fan.c +index 40dffd9c4cbfc..f546f0c12497b 100644 +--- a/drivers/hwmon/pmbus/delta-ahe50dc-fan.c ++++ b/drivers/hwmon/pmbus/delta-ahe50dc-fan.c +@@ -14,6 +14,21 @@ + + #define AHE50DC_PMBUS_READ_TEMP4 0xd0 + ++static int ahe50dc_fan_write_byte(struct i2c_client *client, int page, u8 value) ++{ ++ /* ++ * The CLEAR_FAULTS operation seems to sometimes (unpredictably, perhaps ++ * 5% of the time or so) trigger a problematic phenomenon in which the ++ * fan speeds surge momentarily and at least some (perhaps all?) of the ++ * system's power outputs experience a glitch. ++ * ++ * However, according to Delta it should be OK to simply not send any ++ * CLEAR_FAULTS commands (the device doesn't seem to be capable of ++ * reporting any faults anyway), so just blackhole them unconditionally. ++ */ ++ return value == PMBUS_CLEAR_FAULTS ? -EOPNOTSUPP : -ENODATA; ++} ++ + static int ahe50dc_fan_read_word_data(struct i2c_client *client, int page, int phase, int reg) + { + /* temp1 in (virtual) page 1 is remapped to mfr-specific temp4 */ +@@ -68,6 +83,7 @@ static struct pmbus_driver_info ahe50dc_fan_info = { + PMBUS_HAVE_VIN | PMBUS_HAVE_FAN12 | PMBUS_HAVE_FAN34 | + PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_STATUS_FAN34 | PMBUS_PAGE_VIRTUAL, + .func[1] = PMBUS_HAVE_TEMP | PMBUS_PAGE_VIRTUAL, ++ .write_byte = ahe50dc_fan_write_byte, + .read_word_data = ahe50dc_fan_read_word_data, + }; + +diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c +index ca0bfaf2f6911..5f8f824d997f8 100644 +--- a/drivers/hwmon/pmbus/pmbus_core.c ++++ b/drivers/hwmon/pmbus/pmbus_core.c +@@ -2326,6 +2326,9 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data, + data->has_status_word = true; + } + ++ /* Make sure PEC is disabled, will be enabled later if needed */ ++ client->flags &= ~I2C_CLIENT_PEC; ++ + /* Enable PEC if the controller and bus supports it */ + if (!(data->flags & PMBUS_NO_CAPABILITY)) { + ret = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY); +diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c +index 6dea0a49d1718..082a3ddb0fa3b 100644 +--- a/drivers/infiniband/hw/irdma/cm.c ++++ b/drivers/infiniband/hw/irdma/cm.c +@@ -2305,10 +2305,8 @@ err: + return NULL; + } + +-static void irdma_cm_node_free_cb(struct rcu_head *rcu_head) ++static void irdma_destroy_connection(struct irdma_cm_node *cm_node) + { +- struct irdma_cm_node *cm_node = +- container_of(rcu_head, struct irdma_cm_node, rcu_head); + struct irdma_cm_core *cm_core = cm_node->cm_core; + struct irdma_qp *iwqp; + struct irdma_cm_info nfo; +@@ -2356,7 +2354,6 @@ static void irdma_cm_node_free_cb(struct rcu_head *rcu_head) + } + + cm_core->cm_free_ah(cm_node); +- kfree(cm_node); + } + + /** +@@ -2384,8 +2381,9 @@ void irdma_rem_ref_cm_node(struct irdma_cm_node *cm_node) + + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + +- /* wait for all list walkers to exit their grace period */ +- call_rcu(&cm_node->rcu_head, irdma_cm_node_free_cb); ++ irdma_destroy_connection(cm_node); ++ ++ kfree_rcu(cm_node, rcu_head); + } + + /** +@@ -3465,12 +3463,6 @@ static void irdma_cm_disconn_true(struct irdma_qp *iwqp) + } + + cm_id = iwqp->cm_id; +- /* make sure we havent already closed this connection */ +- if (!cm_id) { +- spin_unlock_irqrestore(&iwqp->lock, flags); +- return; +- } +- + original_hw_tcp_state = iwqp->hw_tcp_state; + original_ibqp_state = iwqp->ibqp_state; + last_ae = iwqp->last_aeq; +@@ -3492,11 +3484,11 @@ static void irdma_cm_disconn_true(struct irdma_qp *iwqp) + disconn_status = -ECONNRESET; + } + +- if ((original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED || +- original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT || +- last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE || +- last_ae == IRDMA_AE_BAD_CLOSE || +- last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset)) { ++ if (original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED || ++ original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT || ++ last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE || ++ last_ae == IRDMA_AE_BAD_CLOSE || ++ last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset || !cm_id) { + issue_close = 1; + iwqp->cm_id = NULL; + qp->term_flags = 0; +diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c +index e81b74a518dd0..7f72a006367fe 100644 +--- a/drivers/infiniband/hw/irdma/utils.c ++++ b/drivers/infiniband/hw/irdma/utils.c +@@ -258,18 +258,16 @@ int irdma_net_event(struct notifier_block *notifier, unsigned long event, + u32 local_ipaddr[4] = {}; + bool ipv4 = true; + +- real_dev = rdma_vlan_dev_real_dev(netdev); +- if (!real_dev) +- real_dev = netdev; +- +- ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA); +- if (!ibdev) +- return NOTIFY_DONE; +- +- iwdev = to_iwdev(ibdev); +- + switch (event) { + case NETEVENT_NEIGH_UPDATE: ++ real_dev = rdma_vlan_dev_real_dev(netdev); ++ if (!real_dev) ++ real_dev = netdev; ++ ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA); ++ if (!ibdev) ++ return NOTIFY_DONE; ++ ++ iwdev = to_iwdev(ibdev); + p = (__be32 *)neigh->primary_key; + if (neigh->tbl->family == AF_INET6) { + ipv4 = false; +@@ -290,13 +288,12 @@ int irdma_net_event(struct notifier_block *notifier, unsigned long event, + irdma_manage_arp_cache(iwdev->rf, neigh->ha, + local_ipaddr, ipv4, + IRDMA_ARP_DELETE); ++ ib_device_put(ibdev); + break; + default: + break; + } + +- ib_device_put(ibdev); +- + return NOTIFY_DONE; + } + +diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c +index 1bf6404ec8340..c2aa713e0f4d7 100644 +--- a/drivers/infiniband/hw/irdma/verbs.c ++++ b/drivers/infiniband/hw/irdma/verbs.c +@@ -1620,13 +1620,13 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, + + if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) { + if (dont_wait) { +- if (iwqp->cm_id && iwqp->hw_tcp_state) { ++ if (iwqp->hw_tcp_state) { + spin_lock_irqsave(&iwqp->lock, flags); + iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED; + iwqp->last_aeq = IRDMA_AE_RESET_SENT; + spin_unlock_irqrestore(&iwqp->lock, flags); +- irdma_cm_disconn(iwqp); + } ++ irdma_cm_disconn(iwqp); + } else { + int close_timer_started; + +diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c +index 7acdd3c3a599d..17f34d584cd9e 100644 +--- a/drivers/infiniband/sw/siw/siw_cm.c ++++ b/drivers/infiniband/sw/siw/siw_cm.c +@@ -968,14 +968,15 @@ static void siw_accept_newconn(struct siw_cep *cep) + + siw_cep_set_inuse(new_cep); + rv = siw_proc_mpareq(new_cep); +- siw_cep_set_free(new_cep); +- + if (rv != -EAGAIN) { + siw_cep_put(cep); + new_cep->listen_cep = NULL; +- if (rv) ++ if (rv) { ++ siw_cep_set_free(new_cep); + goto error; ++ } + } ++ siw_cep_set_free(new_cep); + } + return; + +diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c +index 565ef55988112..68821f86b063c 100644 +--- a/drivers/iommu/apple-dart.c ++++ b/drivers/iommu/apple-dart.c +@@ -782,6 +782,7 @@ static const struct iommu_ops apple_dart_iommu_ops = { + .get_resv_regions = apple_dart_get_resv_regions, + .put_resv_regions = generic_iommu_put_resv_regions, + .pgsize_bitmap = -1UL, /* Restricted during dart probe */ ++ .owner = THIS_MODULE, + }; + + static irqreturn_t apple_dart_irq(int irq, void *dev) +@@ -857,16 +858,15 @@ static int apple_dart_probe(struct platform_device *pdev) + dart->dev = dev; + spin_lock_init(&dart->lock); + +- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ dart->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); ++ if (IS_ERR(dart->regs)) ++ return PTR_ERR(dart->regs); ++ + if (resource_size(res) < 0x4000) { + dev_err(dev, "MMIO region too small (%pr)\n", res); + return -EINVAL; + } + +- dart->regs = devm_ioremap_resource(dev, res); +- if (IS_ERR(dart->regs)) +- return PTR_ERR(dart->regs); +- + dart->irq = platform_get_irq(pdev, 0); + if (dart->irq < 0) + return -ENODEV; +diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +index a737ba5f727e6..f9e9b4fb78bd5 100644 +--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c ++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +@@ -183,7 +183,14 @@ static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn, + { + struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn); + struct arm_smmu_domain *smmu_domain = smmu_mn->domain; +- size_t size = end - start + 1; ++ size_t size; ++ ++ /* ++ * The mm_types defines vm_end as the first byte after the end address, ++ * different from IOMMU subsystem using the last address of an address ++ * range. So do a simple translation here by calculating size correctly. ++ */ ++ size = end - start; + + if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) + arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid, +diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c +index 5b196cfe9ed23..ab22733003464 100644 +--- a/drivers/iommu/intel/iommu.c ++++ b/drivers/iommu/intel/iommu.c +@@ -1717,7 +1717,8 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, + unsigned long pfn, unsigned int pages, + int ih, int map) + { +- unsigned int mask = ilog2(__roundup_pow_of_two(pages)); ++ unsigned int aligned_pages = __roundup_pow_of_two(pages); ++ unsigned int mask = ilog2(aligned_pages); + uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; + u16 did = domain->iommu_did[iommu->seq_id]; + +@@ -1729,10 +1730,30 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, + if (domain_use_first_level(domain)) { + domain_flush_piotlb(iommu, domain, addr, pages, ih); + } else { ++ unsigned long bitmask = aligned_pages - 1; ++ ++ /* ++ * PSI masks the low order bits of the base address. If the ++ * address isn't aligned to the mask, then compute a mask value ++ * needed to ensure the target range is flushed. ++ */ ++ if (unlikely(bitmask & pfn)) { ++ unsigned long end_pfn = pfn + pages - 1, shared_bits; ++ ++ /* ++ * Since end_pfn <= pfn + bitmask, the only way bits ++ * higher than bitmask can differ in pfn and end_pfn is ++ * by carrying. This means after masking out bitmask, ++ * high bits starting with the first set bit in ++ * shared_bits are all equal in both pfn and end_pfn. ++ */ ++ shared_bits = ~(pfn ^ end_pfn) & ~bitmask; ++ mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG; ++ } ++ + /* + * Fallback to domain selective flush if no PSI support or +- * the size is too big. PSI requires page size to be 2 ^ x, +- * and the base address is naturally aligned to the size. ++ * the size is too big. + */ + if (!cap_pgsel_inv(iommu->cap) || + mask > cap_max_amask_val(iommu->cap)) +diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c +index 5b5d69b04fcc8..06e51f7241877 100644 +--- a/drivers/iommu/intel/svm.c ++++ b/drivers/iommu/intel/svm.c +@@ -956,6 +956,10 @@ bad_req: + goto bad_req; + } + ++ /* Drop Stop Marker message. No need for a response. */ ++ if (unlikely(req->lpig && !req->rd_req && !req->wr_req)) ++ goto prq_advance; ++ + if (!svm || svm->pasid != req->pasid) { + /* + * It can't go away, because the driver is not permitted +diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c +index 43d1b9b2fa499..64f47ec9266a9 100644 +--- a/drivers/mmc/core/mmc.c ++++ b/drivers/mmc/core/mmc.c +@@ -1389,13 +1389,17 @@ static int mmc_select_hs400es(struct mmc_card *card) + goto out_err; + } + ++ /* ++ * Bump to HS timing and frequency. Some cards don't handle ++ * SEND_STATUS reliably at the initial frequency. ++ */ + mmc_set_timing(host, MMC_TIMING_MMC_HS); ++ mmc_set_bus_speed(card); ++ + err = mmc_switch_status(card, true); + if (err) + goto out_err; + +- mmc_set_clock(host, card->ext_csd.hs_max_dtr); +- + /* Switch card to DDR with strobe bit */ + val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE; + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, +@@ -1453,7 +1457,7 @@ out_err: + static int mmc_select_hs200(struct mmc_card *card) + { + struct mmc_host *host = card->host; +- unsigned int old_timing, old_signal_voltage; ++ unsigned int old_timing, old_signal_voltage, old_clock; + int err = -EINVAL; + u8 val; + +@@ -1484,8 +1488,17 @@ static int mmc_select_hs200(struct mmc_card *card) + false, true, MMC_CMD_RETRIES); + if (err) + goto err; ++ ++ /* ++ * Bump to HS timing and frequency. Some cards don't handle ++ * SEND_STATUS reliably at the initial frequency. ++ * NB: We can't move to full (HS200) speeds until after we've ++ * successfully switched over. ++ */ + old_timing = host->ios.timing; ++ old_clock = host->ios.clock; + mmc_set_timing(host, MMC_TIMING_MMC_HS200); ++ mmc_set_clock(card->host, card->ext_csd.hs_max_dtr); + + /* + * For HS200, CRC errors are not a reliable way to know the +@@ -1498,8 +1511,10 @@ static int mmc_select_hs200(struct mmc_card *card) + * mmc_select_timing() assumes timing has not changed if + * it is a switch error. + */ +- if (err == -EBADMSG) ++ if (err == -EBADMSG) { ++ mmc_set_clock(host, old_clock); + mmc_set_timing(host, old_timing); ++ } + } + err: + if (err) { +diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c +index f7c384db89bf3..e1580f78c6b2d 100644 +--- a/drivers/mmc/host/rtsx_pci_sdmmc.c ++++ b/drivers/mmc/host/rtsx_pci_sdmmc.c +@@ -38,10 +38,7 @@ struct realtek_pci_sdmmc { + bool double_clk; + bool eject; + bool initial_mode; +- int power_state; +-#define SDMMC_POWER_ON 1 +-#define SDMMC_POWER_OFF 0 +- ++ int prev_power_state; + int sg_count; + s32 cookie; + int cookie_sg_count; +@@ -905,7 +902,7 @@ static int sd_set_bus_width(struct realtek_pci_sdmmc *host, + return err; + } + +-static int sd_power_on(struct realtek_pci_sdmmc *host) ++static int sd_power_on(struct realtek_pci_sdmmc *host, unsigned char power_mode) + { + struct rtsx_pcr *pcr = host->pcr; + struct mmc_host *mmc = host->mmc; +@@ -913,9 +910,14 @@ static int sd_power_on(struct realtek_pci_sdmmc *host) + u32 val; + u8 test_mode; + +- if (host->power_state == SDMMC_POWER_ON) ++ if (host->prev_power_state == MMC_POWER_ON) + return 0; + ++ if (host->prev_power_state == MMC_POWER_UP) { ++ rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, 0); ++ goto finish; ++ } ++ + msleep(100); + + rtsx_pci_init_cmd(pcr); +@@ -936,10 +938,15 @@ static int sd_power_on(struct realtek_pci_sdmmc *host) + if (err < 0) + return err; + ++ mdelay(1); ++ + err = rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN); + if (err < 0) + return err; + ++ /* send at least 74 clocks */ ++ rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, SD_CLK_TOGGLE_EN); ++ + if (PCI_PID(pcr) == PID_5261) { + /* + * If test mode is set switch to SD Express mandatorily, +@@ -964,7 +971,8 @@ static int sd_power_on(struct realtek_pci_sdmmc *host) + } + } + +- host->power_state = SDMMC_POWER_ON; ++finish: ++ host->prev_power_state = power_mode; + return 0; + } + +@@ -973,7 +981,7 @@ static int sd_power_off(struct realtek_pci_sdmmc *host) + struct rtsx_pcr *pcr = host->pcr; + int err; + +- host->power_state = SDMMC_POWER_OFF; ++ host->prev_power_state = MMC_POWER_OFF; + + rtsx_pci_init_cmd(pcr); + +@@ -999,7 +1007,7 @@ static int sd_set_power_mode(struct realtek_pci_sdmmc *host, + if (power_mode == MMC_POWER_OFF) + err = sd_power_off(host); + else +- err = sd_power_on(host); ++ err = sd_power_on(host, power_mode); + + return err; + } +@@ -1482,10 +1490,11 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev) + + host = mmc_priv(mmc); + host->pcr = pcr; ++ mmc->ios.power_delay_ms = 5; + host->mmc = mmc; + host->pdev = pdev; + host->cookie = -1; +- host->power_state = SDMMC_POWER_OFF; ++ host->prev_power_state = MMC_POWER_OFF; + INIT_WORK(&host->work, sd_request); + platform_set_drvdata(pdev, host); + pcr->slots[RTSX_SD_CARD].p_dev = pdev; +diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c +index 50c71e0ba5e4e..ff9f5b63c337e 100644 +--- a/drivers/mmc/host/sdhci-msm.c ++++ b/drivers/mmc/host/sdhci-msm.c +@@ -17,6 +17,7 @@ + #include + #include + #include ++#include + + #include "sdhci-pltfm.h" + #include "cqhci.h" +@@ -2482,6 +2483,43 @@ static inline void sdhci_msm_get_of_property(struct platform_device *pdev, + of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config); + } + ++static int sdhci_msm_gcc_reset(struct device *dev, struct sdhci_host *host) ++{ ++ struct reset_control *reset; ++ int ret = 0; ++ ++ reset = reset_control_get_optional_exclusive(dev, NULL); ++ if (IS_ERR(reset)) ++ return dev_err_probe(dev, PTR_ERR(reset), ++ "unable to acquire core_reset\n"); ++ ++ if (!reset) ++ return ret; ++ ++ ret = reset_control_assert(reset); ++ if (ret) { ++ reset_control_put(reset); ++ return dev_err_probe(dev, ret, "core_reset assert failed\n"); ++ } ++ ++ /* ++ * The hardware requirement for delay between assert/deassert ++ * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to ++ * ~125us (4/32768). To be on the safe side add 200us delay. ++ */ ++ usleep_range(200, 210); ++ ++ ret = reset_control_deassert(reset); ++ if (ret) { ++ reset_control_put(reset); ++ return dev_err_probe(dev, ret, "core_reset deassert failed\n"); ++ } ++ ++ usleep_range(200, 210); ++ reset_control_put(reset); ++ ++ return ret; ++} + + static int sdhci_msm_probe(struct platform_device *pdev) + { +@@ -2529,6 +2567,10 @@ static int sdhci_msm_probe(struct platform_device *pdev) + + msm_host->saved_tuning_phase = INVALID_TUNING_PHASE; + ++ ret = sdhci_msm_gcc_reset(&pdev->dev, host); ++ if (ret) ++ goto pltfm_free; ++ + /* Setup SDCC bus voter clock. */ + msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus"); + if (!IS_ERR(msm_host->bus_clk)) { +diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c +index 2702736a1c57d..ce6cb8be654ef 100644 +--- a/drivers/mmc/host/sunxi-mmc.c ++++ b/drivers/mmc/host/sunxi-mmc.c +@@ -377,8 +377,9 @@ static void sunxi_mmc_init_idma_des(struct sunxi_mmc_host *host, + pdes[i].buf_addr_ptr1 = + cpu_to_le32(sg_dma_address(&data->sg[i]) >> + host->cfg->idma_des_shift); +- pdes[i].buf_addr_ptr2 = cpu_to_le32((u32)next_desc >> +- host->cfg->idma_des_shift); ++ pdes[i].buf_addr_ptr2 = ++ cpu_to_le32(next_desc >> ++ host->cfg->idma_des_shift); + } + + pdes[0].config |= cpu_to_le32(SDXC_IDMAC_DES0_FD); +diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c +index d0c5a7a60dafb..5215bd9b2c80d 100644 +--- a/drivers/net/can/grcan.c ++++ b/drivers/net/can/grcan.c +@@ -241,13 +241,14 @@ struct grcan_device_config { + .rxsize = GRCAN_DEFAULT_BUFFER_SIZE, \ + } + +-#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 0x4100 ++#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 4100 + #define GRLIB_VERSION_MASK 0xffff + + /* GRCAN private data structure */ + struct grcan_priv { + struct can_priv can; /* must be the first member */ + struct net_device *dev; ++ struct device *ofdev_dev; + struct napi_struct napi; + + struct grcan_registers __iomem *regs; /* ioremap'ed registers */ +@@ -921,7 +922,7 @@ static void grcan_free_dma_buffers(struct net_device *dev) + struct grcan_priv *priv = netdev_priv(dev); + struct grcan_dma *dma = &priv->dma; + +- dma_free_coherent(&dev->dev, dma->base_size, dma->base_buf, ++ dma_free_coherent(priv->ofdev_dev, dma->base_size, dma->base_buf, + dma->base_handle); + memset(dma, 0, sizeof(*dma)); + } +@@ -946,7 +947,7 @@ static int grcan_allocate_dma_buffers(struct net_device *dev, + + /* Extra GRCAN_BUFFER_ALIGNMENT to allow for alignment */ + dma->base_size = lsize + ssize + GRCAN_BUFFER_ALIGNMENT; +- dma->base_buf = dma_alloc_coherent(&dev->dev, ++ dma->base_buf = dma_alloc_coherent(priv->ofdev_dev, + dma->base_size, + &dma->base_handle, + GFP_KERNEL); +@@ -1102,8 +1103,10 @@ static int grcan_close(struct net_device *dev) + + priv->closing = true; + if (priv->need_txbug_workaround) { ++ spin_unlock_irqrestore(&priv->lock, flags); + del_timer_sync(&priv->hang_timer); + del_timer_sync(&priv->rr_timer); ++ spin_lock_irqsave(&priv->lock, flags); + } + netif_stop_queue(dev); + grcan_stop_hardware(dev); +@@ -1122,7 +1125,7 @@ static int grcan_close(struct net_device *dev) + return 0; + } + +-static int grcan_transmit_catch_up(struct net_device *dev, int budget) ++static void grcan_transmit_catch_up(struct net_device *dev) + { + struct grcan_priv *priv = netdev_priv(dev); + unsigned long flags; +@@ -1130,7 +1133,7 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget) + + spin_lock_irqsave(&priv->lock, flags); + +- work_done = catch_up_echo_skb(dev, budget, true); ++ work_done = catch_up_echo_skb(dev, -1, true); + if (work_done) { + if (!priv->resetting && !priv->closing && + !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) +@@ -1144,8 +1147,6 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget) + } + + spin_unlock_irqrestore(&priv->lock, flags); +- +- return work_done; + } + + static int grcan_receive(struct net_device *dev, int budget) +@@ -1227,19 +1228,13 @@ static int grcan_poll(struct napi_struct *napi, int budget) + struct net_device *dev = priv->dev; + struct grcan_registers __iomem *regs = priv->regs; + unsigned long flags; +- int tx_work_done, rx_work_done; +- int rx_budget = budget / 2; +- int tx_budget = budget - rx_budget; ++ int work_done; + +- /* Half of the budget for receiving messages */ +- rx_work_done = grcan_receive(dev, rx_budget); ++ work_done = grcan_receive(dev, budget); + +- /* Half of the budget for transmitting messages as that can trigger echo +- * frames being received +- */ +- tx_work_done = grcan_transmit_catch_up(dev, tx_budget); ++ grcan_transmit_catch_up(dev); + +- if (rx_work_done < rx_budget && tx_work_done < tx_budget) { ++ if (work_done < budget) { + napi_complete(napi); + + /* Guarantee no interference with a running reset that otherwise +@@ -1256,7 +1251,7 @@ static int grcan_poll(struct napi_struct *napi, int budget) + spin_unlock_irqrestore(&priv->lock, flags); + } + +- return rx_work_done + tx_work_done; ++ return work_done; + } + + /* Work tx bug by waiting while for the risky situation to clear. If that fails, +@@ -1587,6 +1582,7 @@ static int grcan_setup_netdev(struct platform_device *ofdev, + memcpy(&priv->config, &grcan_module_config, + sizeof(struct grcan_device_config)); + priv->dev = dev; ++ priv->ofdev_dev = &ofdev->dev; + priv->regs = base; + priv->can.bittiming_const = &grcan_bittiming_const; + priv->can.do_set_bittiming = grcan_set_bittiming; +@@ -1639,6 +1635,7 @@ exit_free_candev: + static int grcan_probe(struct platform_device *ofdev) + { + struct device_node *np = ofdev->dev.of_node; ++ struct device_node *sysid_parent; + u32 sysid, ambafreq; + int irq, err; + void __iomem *base; +@@ -1647,10 +1644,15 @@ static int grcan_probe(struct platform_device *ofdev) + /* Compare GRLIB version number with the first that does not + * have the tx bug (see start_xmit) + */ +- err = of_property_read_u32(np, "systemid", &sysid); +- if (!err && ((sysid & GRLIB_VERSION_MASK) +- >= GRCAN_TXBUG_SAFE_GRLIB_VERSION)) +- txbug = false; ++ sysid_parent = of_find_node_by_path("/ambapp0"); ++ if (sysid_parent) { ++ of_node_get(sysid_parent); ++ err = of_property_read_u32(sysid_parent, "systemid", &sysid); ++ if (!err && ((sysid & GRLIB_VERSION_MASK) >= ++ GRCAN_TXBUG_SAFE_GRLIB_VERSION)) ++ txbug = false; ++ of_node_put(sysid_parent); ++ } + + err = of_property_read_u32(np, "freq", &ambafreq); + if (err) { +diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c +index a251bc55727ff..fcdd022b24986 100644 +--- a/drivers/net/dsa/mt7530.c ++++ b/drivers/net/dsa/mt7530.c +@@ -2224,6 +2224,7 @@ mt7530_setup(struct dsa_switch *ds) + ret = of_get_phy_mode(mac_np, &interface); + if (ret && ret != -ENODEV) { + of_node_put(mac_np); ++ of_node_put(phy_node); + return ret; + } + id = of_mdio_parse_addr(ds->dev, phy_node); +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +index 6af0ae1d0c462..9167517de3d97 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -2678,6 +2678,10 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget) + u32 idx = le32_to_cpu(nqcmp->cq_handle_low); + struct bnxt_cp_ring_info *cpr2; + ++ /* No more budget for RX work */ ++ if (budget && work_done >= budget && idx == BNXT_RX_HDL) ++ break; ++ + cpr2 = cpr->cp_ring_arr[idx]; + work_done += __bnxt_poll_work(bp, cpr2, + budget - work_done); +@@ -10938,7 +10942,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp) + + if (bp->flags & BNXT_FLAG_CHIP_P5) + return bnxt_rfs_supported(bp); +- if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp)) ++ if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) + return false; + + vnics = 1 + bp->rx_nr_rings; +@@ -13194,10 +13198,9 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp) + goto init_dflt_ring_err; + + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; +- if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { +- bp->flags |= BNXT_FLAG_RFS; +- bp->dev->features |= NETIF_F_NTUPLE; +- } ++ ++ bnxt_set_dflt_rfs(bp); ++ + init_dflt_ring_err: + bnxt_ulp_irq_restart(bp, rc); + return rc; +diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c +index f2f1ce81fd9cc..0ec65ec634df5 100644 +--- a/drivers/net/ethernet/cavium/thunder/nic_main.c ++++ b/drivers/net/ethernet/cavium/thunder/nic_main.c +@@ -59,7 +59,7 @@ struct nicpf { + + /* MSI-X */ + u8 num_vec; +- bool irq_allocated[NIC_PF_MSIX_VECTORS]; ++ unsigned int irq_allocated[NIC_PF_MSIX_VECTORS]; + char irq_name[NIC_PF_MSIX_VECTORS][20]; + }; + +@@ -1150,7 +1150,7 @@ static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq) + u64 intr; + u8 vf; + +- if (irq == pci_irq_vector(nic->pdev, NIC_PF_INTR_ID_MBOX0)) ++ if (irq == nic->irq_allocated[NIC_PF_INTR_ID_MBOX0]) + mbx = 0; + else + mbx = 1; +@@ -1176,14 +1176,14 @@ static void nic_free_all_interrupts(struct nicpf *nic) + + for (irq = 0; irq < nic->num_vec; irq++) { + if (nic->irq_allocated[irq]) +- free_irq(pci_irq_vector(nic->pdev, irq), nic); +- nic->irq_allocated[irq] = false; ++ free_irq(nic->irq_allocated[irq], nic); ++ nic->irq_allocated[irq] = 0; + } + } + + static int nic_register_interrupts(struct nicpf *nic) + { +- int i, ret; ++ int i, ret, irq; + nic->num_vec = pci_msix_vec_count(nic->pdev); + + /* Enable MSI-X */ +@@ -1201,13 +1201,13 @@ static int nic_register_interrupts(struct nicpf *nic) + sprintf(nic->irq_name[i], + "NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0)); + +- ret = request_irq(pci_irq_vector(nic->pdev, i), +- nic_mbx_intr_handler, 0, ++ irq = pci_irq_vector(nic->pdev, i); ++ ret = request_irq(irq, nic_mbx_intr_handler, 0, + nic->irq_name[i], nic); + if (ret) + goto fail; + +- nic->irq_allocated[i] = true; ++ nic->irq_allocated[i] = irq; + } + + /* Enable mailbox interrupt */ +diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c +index 2d9b06d7caadb..f7dc7d825f637 100644 +--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c ++++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c +@@ -771,7 +771,7 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, + /* If we only have one page, still need to get shadown wqe when + * wqe rolling-over page + */ +- if (curr_pg != end_pg || MASKED_WQE_IDX(wq, end_prod_idx) < *prod_idx) { ++ if (curr_pg != end_pg || end_prod_idx < *prod_idx) { + void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; + + copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx); +@@ -841,7 +841,10 @@ struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, + + *cons_idx = curr_cons_idx; + +- if (curr_pg != end_pg) { ++ /* If we only have one page, still need to get shadown wqe when ++ * wqe rolling-over page ++ */ ++ if (curr_pg != end_pg || end_cons_idx < curr_cons_idx) { + void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; + + copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx); +diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c +index 32d83421226a2..5897940a418b6 100644 +--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c ++++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c +@@ -26,6 +26,7 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3) + break; + + ss->regmap[i] = syscon_node_to_regmap(np); ++ of_node_put(np); + if (IS_ERR(ss->regmap[i])) + return PTR_ERR(ss->regmap[i]); + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c +index 538adab6878b5..c5b560a8b026e 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c +@@ -31,6 +31,7 @@ static const char *const mlx5_rsc_sgmt_name[] = { + struct mlx5_rsc_dump { + u32 pdn; + u32 mkey; ++ u32 number_of_menu_items; + u16 fw_segment_type[MLX5_SGMT_TYPE_NUM]; + }; + +@@ -50,21 +51,37 @@ static int mlx5_rsc_dump_sgmt_get_by_name(char *name) + return -EINVAL; + } + +-static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page) ++#define MLX5_RSC_DUMP_MENU_HEADER_SIZE (MLX5_ST_SZ_BYTES(resource_dump_info_segment) + \ ++ MLX5_ST_SZ_BYTES(resource_dump_command_segment) + \ ++ MLX5_ST_SZ_BYTES(resource_dump_menu_segment)) ++ ++static int mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page, ++ int read_size, int start_idx) + { + void *data = page_address(page); + enum mlx5_sgmt_type sgmt_idx; + int num_of_items; + char *sgmt_name; + void *member; ++ int size = 0; + void *menu; + int i; + +- menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu); +- num_of_items = MLX5_GET(resource_dump_menu_segment, menu, num_of_records); ++ if (!start_idx) { ++ menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu); ++ rsc_dump->number_of_menu_items = MLX5_GET(resource_dump_menu_segment, menu, ++ num_of_records); ++ size = MLX5_RSC_DUMP_MENU_HEADER_SIZE; ++ data += size; ++ } ++ num_of_items = rsc_dump->number_of_menu_items; ++ ++ for (i = 0; start_idx + i < num_of_items; i++) { ++ size += MLX5_ST_SZ_BYTES(resource_dump_menu_record); ++ if (size >= read_size) ++ return start_idx + i; + +- for (i = 0; i < num_of_items; i++) { +- member = MLX5_ADDR_OF(resource_dump_menu_segment, menu, record[i]); ++ member = data + MLX5_ST_SZ_BYTES(resource_dump_menu_record) * i; + sgmt_name = MLX5_ADDR_OF(resource_dump_menu_record, member, segment_name); + sgmt_idx = mlx5_rsc_dump_sgmt_get_by_name(sgmt_name); + if (sgmt_idx == -EINVAL) +@@ -72,6 +89,7 @@ static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct + rsc_dump->fw_segment_type[sgmt_idx] = MLX5_GET(resource_dump_menu_record, + member, segment_type); + } ++ return 0; + } + + static int mlx5_rsc_dump_trigger(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd, +@@ -168,6 +186,7 @@ static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev) + struct mlx5_rsc_dump_cmd *cmd = NULL; + struct mlx5_rsc_key key = {}; + struct page *page; ++ int start_idx = 0; + int size; + int err; + +@@ -189,7 +208,7 @@ static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev) + if (err < 0) + goto destroy_cmd; + +- mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page); ++ start_idx = mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page, size, start_idx); + + } while (err > 0); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +index 673f1c82d3815..c9d5d8d93994d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +@@ -309,8 +309,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, + if (err) + return err; + +- err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, port_buff_cell_sz, +- xoff, &port_buffer, &update_buffer); ++ err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, xoff, ++ port_buff_cell_sz, &port_buffer, &update_buffer); + if (err) + return err; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +index 4a0d38d219edc..9028e9958c72d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +@@ -1739,6 +1739,8 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg) + static void + mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) + { ++ struct mlx5e_priv *priv; ++ + if (!refcount_dec_and_test(&ft->refcount)) + return; + +@@ -1748,6 +1750,8 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) + rhashtable_free_and_destroy(&ft->ct_entries_ht, + mlx5_tc_ct_flush_ft_entry, + ct_priv); ++ priv = netdev_priv(ct_priv->netdev); ++ flush_workqueue(priv->wq); + mlx5_tc_ct_free_pre_ct_tables(ft); + mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id); + kfree(ft); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +index 378fc8e3bd975..d87bbb0be7c86 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +@@ -713,6 +713,7 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, + struct net_device *filter_dev) + { + struct mlx5_esw_flow_attr *esw_attr = flow_attr->esw_attr; ++ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_tc_int_port *int_port; + TC_TUN_ROUTE_ATTR_INIT(attr); + u16 vport_num; +@@ -747,7 +748,7 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, + esw_attr->rx_tun_attr->vni = MLX5_GET(fte_match_param, spec->match_value, + misc_parameters.vxlan_vni); + esw_attr->rx_tun_attr->decap_vport = vport_num; +- } else if (netif_is_ovs_master(attr.route_dev)) { ++ } else if (netif_is_ovs_master(attr.route_dev) && mlx5e_tc_int_port_supported(esw)) { + int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv), + attr.route_dev->ifindex, + MLX5E_TC_INT_PORT_INGRESS); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +index a4c8d8d00d5a4..72e08559e0d05 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +@@ -1198,6 +1198,16 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv) + if (err) + return err; + ++ if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_PCP && priv->dcbx.dscp_app_cnt) { ++ /* ++ * Align the driver state with the register state. ++ * Temporary state change is required to enable the app list reset. ++ */ ++ priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_DSCP; ++ mlx5e_dcbnl_delete_app(priv); ++ priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP; ++ } ++ + mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params, + priv->dcbx_dp.trust_state); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 7e5c00349ccf9..e0f45cef97c34 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -2355,6 +2355,17 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, + match.key->vlan_priority); + + *match_level = MLX5_MATCH_L2; ++ ++ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN) && ++ match.mask->vlan_eth_type && ++ MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ++ ft_field_support.outer_second_vid, ++ fs_type)) { ++ MLX5_SET(fte_match_set_misc, misc_c, ++ outer_second_cvlan_tag, 1); ++ spec->match_criteria_enable |= ++ MLX5_MATCH_MISC_PARAMETERS; ++ } + } + } else if (*match_level != MLX5_MATCH_NONE) { + /* cvlan_tag enabled in match criteria and +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index e7e7b4b0dcdb5..cebfa8565c9d9 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -139,7 +139,7 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, + if (mlx5_esw_indir_table_decap_vport(attr)) + vport = mlx5_esw_indir_table_decap_vport(attr); + +- if (esw_attr->int_port) ++ if (attr && !attr->chain && esw_attr->int_port) + metadata = + mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port); + else +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +index 84dbe46d5ede6..862f5b7cb2106 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +@@ -112,6 +112,28 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev) + } + } + ++static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev) ++{ ++ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; ++ ++ del_timer_sync(&fw_reset->timer); ++} ++ ++static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health) ++{ ++ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; ++ ++ if (!test_and_clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) { ++ mlx5_core_warn(dev, "Reset request was already cleared\n"); ++ return -EALREADY; ++ } ++ ++ mlx5_stop_sync_reset_poll(dev); ++ if (poll_health) ++ mlx5_start_health_poll(dev); ++ return 0; ++} ++ + static void mlx5_sync_reset_reload_work(struct work_struct *work) + { + struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset, +@@ -119,6 +141,7 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work) + struct mlx5_core_dev *dev = fw_reset->dev; + int err; + ++ mlx5_sync_reset_clear_reset_requested(dev, false); + mlx5_enter_error_state(dev, true); + mlx5_unload_one(dev); + err = mlx5_health_wait_pci_up(dev); +@@ -128,23 +151,6 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work) + mlx5_fw_reset_complete_reload(dev); + } + +-static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev) +-{ +- struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; +- +- del_timer_sync(&fw_reset->timer); +-} +- +-static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health) +-{ +- struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; +- +- mlx5_stop_sync_reset_poll(dev); +- clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags); +- if (poll_health) +- mlx5_start_health_poll(dev); +-} +- + #define MLX5_RESET_POLL_INTERVAL (HZ / 10) + static void poll_sync_reset(struct timer_list *t) + { +@@ -159,7 +165,6 @@ static void poll_sync_reset(struct timer_list *t) + + if (fatal_error) { + mlx5_core_warn(dev, "Got Device Reset\n"); +- mlx5_sync_reset_clear_reset_requested(dev, false); + queue_work(fw_reset->wq, &fw_reset->reset_reload_work); + return; + } +@@ -186,13 +191,17 @@ static int mlx5_fw_reset_set_reset_sync_nack(struct mlx5_core_dev *dev) + return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, 0, 2, false); + } + +-static void mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev) ++static int mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev) + { + struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; + ++ if (test_and_set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) { ++ mlx5_core_warn(dev, "Reset request was already set\n"); ++ return -EALREADY; ++ } + mlx5_stop_health_poll(dev, true); +- set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags); + mlx5_start_sync_reset_poll(dev); ++ return 0; + } + + static void mlx5_fw_live_patch_event(struct work_struct *work) +@@ -221,7 +230,9 @@ static void mlx5_sync_reset_request_event(struct work_struct *work) + err ? "Failed" : "Sent"); + return; + } +- mlx5_sync_reset_set_reset_requested(dev); ++ if (mlx5_sync_reset_set_reset_requested(dev)) ++ return; ++ + err = mlx5_fw_reset_set_reset_sync_ack(dev); + if (err) + mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack Failed. Error code: %d\n", err); +@@ -319,7 +330,8 @@ static void mlx5_sync_reset_now_event(struct work_struct *work) + struct mlx5_core_dev *dev = fw_reset->dev; + int err; + +- mlx5_sync_reset_clear_reset_requested(dev, false); ++ if (mlx5_sync_reset_clear_reset_requested(dev, false)) ++ return; + + mlx5_core_warn(dev, "Sync Reset now. Device is going to reset.\n"); + +@@ -348,10 +360,8 @@ static void mlx5_sync_reset_abort_event(struct work_struct *work) + reset_abort_work); + struct mlx5_core_dev *dev = fw_reset->dev; + +- if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) ++ if (mlx5_sync_reset_clear_reset_requested(dev, true)) + return; +- +- mlx5_sync_reset_clear_reset_requested(dev, true); + mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n"); + } + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c +index 626aa60b6099b..7da710951572d 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c +@@ -100,6 +100,14 @@ static void mlx5_lag_fib_event_flush(struct notifier_block *nb) + flush_workqueue(mp->wq); + } + ++static void mlx5_lag_fib_set(struct lag_mp *mp, struct fib_info *fi, u32 dst, int dst_len) ++{ ++ mp->fib.mfi = fi; ++ mp->fib.priority = fi->fib_priority; ++ mp->fib.dst = dst; ++ mp->fib.dst_len = dst_len; ++} ++ + struct mlx5_fib_event_work { + struct work_struct work; + struct mlx5_lag *ldev; +@@ -110,10 +118,10 @@ struct mlx5_fib_event_work { + }; + }; + +-static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, +- unsigned long event, +- struct fib_info *fi) ++static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event, ++ struct fib_entry_notifier_info *fen_info) + { ++ struct fib_info *fi = fen_info->fi; + struct lag_mp *mp = &ldev->lag_mp; + struct fib_nh *fib_nh0, *fib_nh1; + unsigned int nhs; +@@ -121,13 +129,15 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, + /* Handle delete event */ + if (event == FIB_EVENT_ENTRY_DEL) { + /* stop track */ +- if (mp->mfi == fi) +- mp->mfi = NULL; ++ if (mp->fib.mfi == fi) ++ mp->fib.mfi = NULL; + return; + } + + /* Handle multipath entry with lower priority value */ +- if (mp->mfi && mp->mfi != fi && fi->fib_priority >= mp->mfi->fib_priority) ++ if (mp->fib.mfi && mp->fib.mfi != fi && ++ (mp->fib.dst != fen_info->dst || mp->fib.dst_len != fen_info->dst_len) && ++ fi->fib_priority >= mp->fib.priority) + return; + + /* Handle add/replace event */ +@@ -143,9 +153,9 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, + + i++; + mlx5_lag_set_port_affinity(ldev, i); ++ mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len); + } + +- mp->mfi = fi; + return; + } + +@@ -165,7 +175,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, + } + + /* First time we see multipath route */ +- if (!mp->mfi && !__mlx5_lag_is_active(ldev)) { ++ if (!mp->fib.mfi && !__mlx5_lag_is_active(ldev)) { + struct lag_tracker tracker; + + tracker = ldev->tracker; +@@ -173,7 +183,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, + } + + mlx5_lag_set_port_affinity(ldev, MLX5_LAG_NORMAL_AFFINITY); +- mp->mfi = fi; ++ mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len); + } + + static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev, +@@ -184,7 +194,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev, + struct lag_mp *mp = &ldev->lag_mp; + + /* Check the nh event is related to the route */ +- if (!mp->mfi || mp->mfi != fi) ++ if (!mp->fib.mfi || mp->fib.mfi != fi) + return; + + /* nh added/removed */ +@@ -214,7 +224,7 @@ static void mlx5_lag_fib_update(struct work_struct *work) + case FIB_EVENT_ENTRY_REPLACE: + case FIB_EVENT_ENTRY_DEL: + mlx5_lag_fib_route_event(ldev, fib_work->event, +- fib_work->fen_info.fi); ++ &fib_work->fen_info); + fib_info_put(fib_work->fen_info.fi); + break; + case FIB_EVENT_NH_ADD: +@@ -313,7 +323,7 @@ void mlx5_lag_mp_reset(struct mlx5_lag *ldev) + /* Clear mfi, as it might become stale when a route delete event + * has been missed, see mlx5_lag_fib_route_event(). + */ +- ldev->lag_mp.mfi = NULL; ++ ldev->lag_mp.fib.mfi = NULL; + } + + int mlx5_lag_mp_init(struct mlx5_lag *ldev) +@@ -324,7 +334,7 @@ int mlx5_lag_mp_init(struct mlx5_lag *ldev) + /* always clear mfi, as it might become stale when a route delete event + * has been missed + */ +- mp->mfi = NULL; ++ mp->fib.mfi = NULL; + + if (mp->fib_nb.notifier_call) + return 0; +@@ -354,5 +364,5 @@ void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev) + unregister_fib_notifier(&init_net, &mp->fib_nb); + destroy_workqueue(mp->wq); + mp->fib_nb.notifier_call = NULL; +- mp->mfi = NULL; ++ mp->fib.mfi = NULL; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h +index 57af962cad298..056a066da604b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h +@@ -15,7 +15,12 @@ enum mlx5_lag_port_affinity { + + struct lag_mp { + struct notifier_block fib_nb; +- struct fib_info *mfi; /* used in tracking fib events */ ++ struct { ++ const void *mfi; /* used in tracking fib events */ ++ u32 priority; ++ u32 dst; ++ int dst_len; ++ } fib; + struct workqueue_struct *wq; + }; + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +index a6592f9c3c05f..5be322528279a 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +@@ -505,7 +505,7 @@ static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev) + struct ttc_params ttc_params = {}; + + mlx5_lag_set_inner_ttc_params(ldev, &ttc_params); +- port_sel->inner.ttc = mlx5_create_ttc_table(dev, &ttc_params); ++ port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params); + if (IS_ERR(port_sel->inner.ttc)) + return PTR_ERR(port_sel->inner.ttc); + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c +index b63dec24747ab..b78f2ba25c19b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c +@@ -408,6 +408,8 @@ static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev, + for (tt = 0; tt < MLX5_NUM_TT; tt++) { + struct mlx5_ttc_rule *rule = &rules[tt]; + ++ if (test_bit(tt, params->ignore_dests)) ++ continue; + rule->rule = mlx5_generate_inner_ttc_rule(dev, ft, + ¶ms->dests[tt], + ttc_rules[tt].etype, +diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c +index 7a50ba00f8ae3..c854efdf1f25f 100644 +--- a/drivers/net/ethernet/smsc/smsc911x.c ++++ b/drivers/net/ethernet/smsc/smsc911x.c +@@ -2431,7 +2431,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev) + if (irq == -EPROBE_DEFER) { + retval = -EPROBE_DEFER; + goto out_0; +- } else if (irq <= 0) { ++ } else if (irq < 0) { + pr_warn("Could not allocate irq resource\n"); + retval = -ENODEV; + goto out_0; +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +index 8e8778cfbbadd..6f87e296a410f 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +@@ -454,6 +454,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, + plat->has_gmac4 = 1; + plat->force_sf_dma_mode = 0; + plat->tso_en = 1; ++ plat->sph_disable = 1; + + /* Multiplying factor to the clk_eee_i clock time + * period to make it closer to 100 ns. This value +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +index 09644ab0d87a7..fda53b4b9406f 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +@@ -916,6 +916,7 @@ static int sun8i_dwmac_register_mdio_mux(struct stmmac_priv *priv) + + ret = mdio_mux_init(priv->device, mdio_mux, mdio_mux_syscon_switch_fn, + &gmac->mux_handle, priv, priv->mii); ++ of_node_put(mdio_mux); + return ret; + } + +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 422e3225f476a..fb115273f5533 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -7077,7 +7077,7 @@ int stmmac_dvr_probe(struct device *device, + dev_info(priv->device, "TSO feature enabled\n"); + } + +- if (priv->dma_cap.sphen) { ++ if (priv->dma_cap.sphen && !priv->plat->sph_disable) { + ndev->hw_features |= NETIF_F_GRO; + priv->sph_cap = true; + priv->sph = priv->sph_cap; +diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c +index bd4b1528cf992..79e850fe4621c 100644 +--- a/drivers/net/ethernet/ti/cpsw_new.c ++++ b/drivers/net/ethernet/ti/cpsw_new.c +@@ -1246,8 +1246,10 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw) + data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM, + sizeof(struct cpsw_slave_data), + GFP_KERNEL); +- if (!data->slave_data) ++ if (!data->slave_data) { ++ of_node_put(tmp_node); + return -ENOMEM; ++ } + + /* Populate all the child nodes here... + */ +@@ -1341,6 +1343,7 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw) + + err_node_put: + of_node_put(port_np); ++ of_node_put(tmp_node); + return ret; + } + +diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c +index 77fa2cb03acaa..08a670bf2cd19 100644 +--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c ++++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c +@@ -823,10 +823,10 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, + static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) + { + struct mii_bus *bus; +- int rc; + struct resource res; + struct device_node *np = of_get_parent(lp->phy_node); + struct device_node *npp; ++ int rc, ret; + + /* Don't register the MDIO bus if the phy_node or its parent node + * can't be found. +@@ -836,8 +836,14 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) + return -ENODEV; + } + npp = of_get_parent(np); +- +- of_address_to_resource(npp, 0, &res); ++ ret = of_address_to_resource(npp, 0, &res); ++ of_node_put(npp); ++ if (ret) { ++ dev_err(dev, "%s resource error!\n", ++ dev->of_node->full_name); ++ of_node_put(np); ++ return ret; ++ } + if (lp->ndev->mem_start != res.start) { + struct phy_device *phydev; + phydev = of_phy_find_device(lp->phy_node); +@@ -846,6 +852,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) + "MDIO of the phy is not registered yet\n"); + else + put_device(&phydev->mdio.dev); ++ of_node_put(np); + return 0; + } + +@@ -858,6 +865,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) + bus = mdiobus_alloc(); + if (!bus) { + dev_err(dev, "Failed to allocate mdiobus\n"); ++ of_node_put(np); + return -ENOMEM; + } + +@@ -870,6 +878,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) + bus->parent = dev; + + rc = of_mdiobus_register(bus, np); ++ of_node_put(np); + if (rc) { + dev_err(dev, "Failed to register mdio bus.\n"); + goto err_register; +diff --git a/drivers/net/mdio/mdio-mux-bcm6368.c b/drivers/net/mdio/mdio-mux-bcm6368.c +index 6dcbf987d61b5..8b444a8eb6b55 100644 +--- a/drivers/net/mdio/mdio-mux-bcm6368.c ++++ b/drivers/net/mdio/mdio-mux-bcm6368.c +@@ -115,7 +115,7 @@ static int bcm6368_mdiomux_probe(struct platform_device *pdev) + md->mii_bus = devm_mdiobus_alloc(&pdev->dev); + if (!md->mii_bus) { + dev_err(&pdev->dev, "mdiomux bus alloc failed\n"); +- return ENOMEM; ++ return -ENOMEM; + } + + bus = md->mii_bus; +diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c +index 2fcf545012b16..1a5284de4341b 100644 +--- a/drivers/nfc/nfcmrvl/main.c ++++ b/drivers/nfc/nfcmrvl/main.c +@@ -183,6 +183,7 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv) + { + struct nci_dev *ndev = priv->ndev; + ++ nci_unregister_device(ndev); + if (priv->ndev->nfc_dev->fw_download_in_progress) + nfcmrvl_fw_dnld_abort(priv); + +@@ -191,7 +192,6 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv) + if (gpio_is_valid(priv->config.reset_n_io)) + gpio_free(priv->config.reset_n_io); + +- nci_unregister_device(ndev); + nci_free_device(ndev); + kfree(priv); + } +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c +index 15348be1a8aa5..5be382b19d9a7 100644 +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -38,10 +38,6 @@ + #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6) + #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7) + #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8) +-#define PCIE_CORE_INT_A_ASSERT_ENABLE 1 +-#define PCIE_CORE_INT_B_ASSERT_ENABLE 2 +-#define PCIE_CORE_INT_C_ASSERT_ENABLE 3 +-#define PCIE_CORE_INT_D_ASSERT_ENABLE 4 + /* PIO registers base address and register offsets */ + #define PIO_BASE_ADDR 0x4000 + #define PIO_CTRL (PIO_BASE_ADDR + 0x0) +@@ -102,6 +98,10 @@ + #define PCIE_MSG_PM_PME_MASK BIT(7) + #define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44) + #define PCIE_ISR0_MSI_INT_PENDING BIT(24) ++#define PCIE_ISR0_CORR_ERR BIT(11) ++#define PCIE_ISR0_NFAT_ERR BIT(12) ++#define PCIE_ISR0_FAT_ERR BIT(13) ++#define PCIE_ISR0_ERR_MASK GENMASK(13, 11) + #define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val)) + #define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val)) + #define PCIE_ISR0_ALL_MASK GENMASK(31, 0) +@@ -272,17 +272,16 @@ struct advk_pcie { + u32 actions; + } wins[OB_WIN_COUNT]; + u8 wins_count; ++ int irq; ++ struct irq_domain *rp_irq_domain; + struct irq_domain *irq_domain; + struct irq_chip irq_chip; + raw_spinlock_t irq_lock; + struct irq_domain *msi_domain; + struct irq_domain *msi_inner_domain; +- struct irq_chip msi_bottom_irq_chip; +- struct irq_chip msi_irq_chip; +- struct msi_domain_info msi_domain_info; ++ raw_spinlock_t msi_irq_lock; + DECLARE_BITMAP(msi_used, MSI_IRQ_NUM); + struct mutex msi_used_lock; +- u16 msi_msg; + int link_gen; + struct pci_bridge_emul bridge; + struct gpio_desc *reset_gpio; +@@ -477,6 +476,7 @@ static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num) + + static void advk_pcie_setup_hw(struct advk_pcie *pcie) + { ++ phys_addr_t msi_addr; + u32 reg; + int i; + +@@ -565,6 +565,11 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) + reg |= LANE_COUNT_1; + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); + ++ /* Set MSI address */ ++ msi_addr = virt_to_phys(pcie); ++ advk_writel(pcie, lower_32_bits(msi_addr), PCIE_MSI_ADDR_LOW_REG); ++ advk_writel(pcie, upper_32_bits(msi_addr), PCIE_MSI_ADDR_HIGH_REG); ++ + /* Enable MSI */ + reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG); + reg |= PCIE_CORE_CTRL2_MSI_ENABLE; +@@ -576,15 +581,20 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie) + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); + advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); + +- /* Disable All ISR0/1 Sources */ +- reg = PCIE_ISR0_ALL_MASK; ++ /* Disable All ISR0/1 and MSI Sources */ ++ advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG); ++ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); ++ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG); ++ ++ /* Unmask summary MSI interrupt */ ++ reg = advk_readl(pcie, PCIE_ISR0_MASK_REG); + reg &= ~PCIE_ISR0_MSI_INT_PENDING; + advk_writel(pcie, reg, PCIE_ISR0_MASK_REG); + +- advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); +- +- /* Unmask all MSIs */ +- advk_writel(pcie, ~(u32)PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG); ++ /* Unmask PME interrupt for processing of PME requester */ ++ reg = advk_readl(pcie, PCIE_ISR0_MASK_REG); ++ reg &= ~PCIE_MSG_PM_PME_MASK; ++ advk_writel(pcie, reg, PCIE_ISR0_MASK_REG); + + /* Enable summary interrupt for GIC SPI source */ + reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK); +@@ -778,11 +788,15 @@ advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge, + case PCI_INTERRUPT_LINE: { + /* + * From the whole 32bit register we support reading from HW only +- * one bit: PCI_BRIDGE_CTL_BUS_RESET. ++ * two bits: PCI_BRIDGE_CTL_BUS_RESET and PCI_BRIDGE_CTL_SERR. + * Other bits are retrieved only from emulated config buffer. + */ + __le32 *cfgspace = (__le32 *)&bridge->conf; + u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]); ++ if (advk_readl(pcie, PCIE_ISR0_MASK_REG) & PCIE_ISR0_ERR_MASK) ++ val &= ~(PCI_BRIDGE_CTL_SERR << 16); ++ else ++ val |= PCI_BRIDGE_CTL_SERR << 16; + if (advk_readl(pcie, PCIE_CORE_CTRL1_REG) & HOT_RESET_GEN) + val |= PCI_BRIDGE_CTL_BUS_RESET << 16; + else +@@ -808,6 +822,19 @@ advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge, + break; + + case PCI_INTERRUPT_LINE: ++ /* ++ * According to Figure 6-3: Pseudo Logic Diagram for Error ++ * Message Controls in PCIe base specification, SERR# Enable bit ++ * in Bridge Control register enable receiving of ERR_* messages ++ */ ++ if (mask & (PCI_BRIDGE_CTL_SERR << 16)) { ++ u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG); ++ if (new & (PCI_BRIDGE_CTL_SERR << 16)) ++ val &= ~PCIE_ISR0_ERR_MASK; ++ else ++ val |= PCIE_ISR0_ERR_MASK; ++ advk_writel(pcie, val, PCIE_ISR0_MASK_REG); ++ } + if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) { + u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG); + if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16)) +@@ -835,22 +862,11 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, + *value = PCI_EXP_SLTSTA_PDS << 16; + return PCI_BRIDGE_EMUL_HANDLED; + +- case PCI_EXP_RTCTL: { +- u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG); +- *value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE; +- *value |= le16_to_cpu(bridge->pcie_conf.rootctl) & PCI_EXP_RTCTL_CRSSVE; +- *value |= PCI_EXP_RTCAP_CRSVIS << 16; +- return PCI_BRIDGE_EMUL_HANDLED; +- } +- +- case PCI_EXP_RTSTA: { +- u32 isr0 = advk_readl(pcie, PCIE_ISR0_REG); +- u32 msglog = advk_readl(pcie, PCIE_MSG_LOG_REG); +- *value = msglog >> 16; +- if (isr0 & PCIE_MSG_PM_PME_MASK) +- *value |= PCI_EXP_RTSTA_PME; +- return PCI_BRIDGE_EMUL_HANDLED; +- } ++ /* ++ * PCI_EXP_RTCTL and PCI_EXP_RTSTA are also supported, but do not need ++ * to be handled here, because their values are stored in emulated ++ * config space buffer, and we read them from there when needed. ++ */ + + case PCI_EXP_LNKCAP: { + u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg); +@@ -905,19 +921,18 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, + break; + + case PCI_EXP_RTCTL: { +- /* Only mask/unmask PME interrupt */ +- u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG) & +- ~PCIE_MSG_PM_PME_MASK; +- if ((new & PCI_EXP_RTCTL_PMEIE) == 0) +- val |= PCIE_MSG_PM_PME_MASK; +- advk_writel(pcie, val, PCIE_ISR0_MASK_REG); ++ u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl); ++ /* Only emulation of PMEIE and CRSSVE bits is provided */ ++ rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_CRSSVE; ++ bridge->pcie_conf.rootctl = cpu_to_le16(rootctl); + break; + } + +- case PCI_EXP_RTSTA: +- new = (new & PCI_EXP_RTSTA_PME) >> 9; +- advk_writel(pcie, new, PCIE_ISR0_REG); +- break; ++ /* ++ * PCI_EXP_RTSTA is also supported, but does not need to be handled ++ * here, because its value is stored in emulated config space buffer, ++ * and we write it there when needed. ++ */ + + case PCI_EXP_DEVCTL: + case PCI_EXP_DEVCTL2: +@@ -961,7 +976,7 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie) + bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64); + + /* Support interrupt A for MSI feature */ +- bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE; ++ bridge->conf.intpin = PCI_INTERRUPT_INTA; + + /* Aardvark HW provides PCIe Capability structure in version 2 */ + bridge->pcie_conf.cap = cpu_to_le16(2); +@@ -983,8 +998,12 @@ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus, + return false; + + /* +- * If the link goes down after we check for link-up, nothing bad +- * happens but the config access times out. ++ * If the link goes down after we check for link-up, we have a problem: ++ * if a PIO request is executed while link-down, the whole controller ++ * gets stuck in a non-functional state, and even after link comes up ++ * again, PIO requests won't work anymore, and a reset of the whole PCIe ++ * controller is needed. Therefore we need to prevent sending PIO ++ * requests while the link is down. + */ + if (!pci_is_root_bus(bus) && !advk_pcie_link_up(pcie)) + return false; +@@ -1182,10 +1201,10 @@ static void advk_msi_irq_compose_msi_msg(struct irq_data *data, + struct msi_msg *msg) + { + struct advk_pcie *pcie = irq_data_get_irq_chip_data(data); +- phys_addr_t msi_msg = virt_to_phys(&pcie->msi_msg); ++ phys_addr_t msi_addr = virt_to_phys(pcie); + +- msg->address_lo = lower_32_bits(msi_msg); +- msg->address_hi = upper_32_bits(msi_msg); ++ msg->address_lo = lower_32_bits(msi_addr); ++ msg->address_hi = upper_32_bits(msi_addr); + msg->data = data->hwirq; + } + +@@ -1195,6 +1214,54 @@ static int advk_msi_set_affinity(struct irq_data *irq_data, + return -EINVAL; + } + ++static void advk_msi_irq_mask(struct irq_data *d) ++{ ++ struct advk_pcie *pcie = d->domain->host_data; ++ irq_hw_number_t hwirq = irqd_to_hwirq(d); ++ unsigned long flags; ++ u32 mask; ++ ++ raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags); ++ mask = advk_readl(pcie, PCIE_MSI_MASK_REG); ++ mask |= BIT(hwirq); ++ advk_writel(pcie, mask, PCIE_MSI_MASK_REG); ++ raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags); ++} ++ ++static void advk_msi_irq_unmask(struct irq_data *d) ++{ ++ struct advk_pcie *pcie = d->domain->host_data; ++ irq_hw_number_t hwirq = irqd_to_hwirq(d); ++ unsigned long flags; ++ u32 mask; ++ ++ raw_spin_lock_irqsave(&pcie->msi_irq_lock, flags); ++ mask = advk_readl(pcie, PCIE_MSI_MASK_REG); ++ mask &= ~BIT(hwirq); ++ advk_writel(pcie, mask, PCIE_MSI_MASK_REG); ++ raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags); ++} ++ ++static void advk_msi_top_irq_mask(struct irq_data *d) ++{ ++ pci_msi_mask_irq(d); ++ irq_chip_mask_parent(d); ++} ++ ++static void advk_msi_top_irq_unmask(struct irq_data *d) ++{ ++ pci_msi_unmask_irq(d); ++ irq_chip_unmask_parent(d); ++} ++ ++static struct irq_chip advk_msi_bottom_irq_chip = { ++ .name = "MSI", ++ .irq_compose_msi_msg = advk_msi_irq_compose_msi_msg, ++ .irq_set_affinity = advk_msi_set_affinity, ++ .irq_mask = advk_msi_irq_mask, ++ .irq_unmask = advk_msi_irq_unmask, ++}; ++ + static int advk_msi_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs, void *args) +@@ -1211,7 +1278,7 @@ static int advk_msi_irq_domain_alloc(struct irq_domain *domain, + + for (i = 0; i < nr_irqs; i++) + irq_domain_set_info(domain, virq + i, hwirq + i, +- &pcie->msi_bottom_irq_chip, ++ &advk_msi_bottom_irq_chip, + domain->host_data, handle_simple_irq, + NULL, NULL); + +@@ -1267,7 +1334,6 @@ static int advk_pcie_irq_map(struct irq_domain *h, + { + struct advk_pcie *pcie = h->host_data; + +- advk_pcie_irq_mask(irq_get_irq_data(virq)); + irq_set_status_flags(virq, IRQ_LEVEL); + irq_set_chip_and_handler(virq, &pcie->irq_chip, + handle_level_irq); +@@ -1281,37 +1347,25 @@ static const struct irq_domain_ops advk_pcie_irq_domain_ops = { + .xlate = irq_domain_xlate_onecell, + }; + ++static struct irq_chip advk_msi_irq_chip = { ++ .name = "advk-MSI", ++ .irq_mask = advk_msi_top_irq_mask, ++ .irq_unmask = advk_msi_top_irq_unmask, ++}; ++ ++static struct msi_domain_info advk_msi_domain_info = { ++ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | ++ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, ++ .chip = &advk_msi_irq_chip, ++}; ++ + static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) + { + struct device *dev = &pcie->pdev->dev; +- struct device_node *node = dev->of_node; +- struct irq_chip *bottom_ic, *msi_ic; +- struct msi_domain_info *msi_di; +- phys_addr_t msi_msg_phys; + ++ raw_spin_lock_init(&pcie->msi_irq_lock); + mutex_init(&pcie->msi_used_lock); + +- bottom_ic = &pcie->msi_bottom_irq_chip; +- +- bottom_ic->name = "MSI"; +- bottom_ic->irq_compose_msi_msg = advk_msi_irq_compose_msi_msg; +- bottom_ic->irq_set_affinity = advk_msi_set_affinity; +- +- msi_ic = &pcie->msi_irq_chip; +- msi_ic->name = "advk-MSI"; +- +- msi_di = &pcie->msi_domain_info; +- msi_di->flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | +- MSI_FLAG_MULTI_PCI_MSI; +- msi_di->chip = msi_ic; +- +- msi_msg_phys = virt_to_phys(&pcie->msi_msg); +- +- advk_writel(pcie, lower_32_bits(msi_msg_phys), +- PCIE_MSI_ADDR_LOW_REG); +- advk_writel(pcie, upper_32_bits(msi_msg_phys), +- PCIE_MSI_ADDR_HIGH_REG); +- + pcie->msi_inner_domain = + irq_domain_add_linear(NULL, MSI_IRQ_NUM, + &advk_msi_domain_ops, pcie); +@@ -1319,8 +1373,9 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) + return -ENOMEM; + + pcie->msi_domain = +- pci_msi_create_irq_domain(of_node_to_fwnode(node), +- msi_di, pcie->msi_inner_domain); ++ pci_msi_create_irq_domain(dev_fwnode(dev), ++ &advk_msi_domain_info, ++ pcie->msi_inner_domain); + if (!pcie->msi_domain) { + irq_domain_remove(pcie->msi_inner_domain); + return -ENOMEM; +@@ -1361,7 +1416,6 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie) + } + + irq_chip->irq_mask = advk_pcie_irq_mask; +- irq_chip->irq_mask_ack = advk_pcie_irq_mask; + irq_chip->irq_unmask = advk_pcie_irq_unmask; + + pcie->irq_domain = +@@ -1383,6 +1437,70 @@ static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie) + irq_domain_remove(pcie->irq_domain); + } + ++static struct irq_chip advk_rp_irq_chip = { ++ .name = "advk-RP", ++}; ++ ++static int advk_pcie_rp_irq_map(struct irq_domain *h, ++ unsigned int virq, irq_hw_number_t hwirq) ++{ ++ struct advk_pcie *pcie = h->host_data; ++ ++ irq_set_chip_and_handler(virq, &advk_rp_irq_chip, handle_simple_irq); ++ irq_set_chip_data(virq, pcie); ++ ++ return 0; ++} ++ ++static const struct irq_domain_ops advk_pcie_rp_irq_domain_ops = { ++ .map = advk_pcie_rp_irq_map, ++ .xlate = irq_domain_xlate_onecell, ++}; ++ ++static int advk_pcie_init_rp_irq_domain(struct advk_pcie *pcie) ++{ ++ pcie->rp_irq_domain = irq_domain_add_linear(NULL, 1, ++ &advk_pcie_rp_irq_domain_ops, ++ pcie); ++ if (!pcie->rp_irq_domain) { ++ dev_err(&pcie->pdev->dev, "Failed to add Root Port IRQ domain\n"); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static void advk_pcie_remove_rp_irq_domain(struct advk_pcie *pcie) ++{ ++ irq_domain_remove(pcie->rp_irq_domain); ++} ++ ++static void advk_pcie_handle_pme(struct advk_pcie *pcie) ++{ ++ u32 requester = advk_readl(pcie, PCIE_MSG_LOG_REG) >> 16; ++ ++ advk_writel(pcie, PCIE_MSG_PM_PME_MASK, PCIE_ISR0_REG); ++ ++ /* ++ * PCIE_MSG_LOG_REG contains the last inbound message, so store ++ * the requester ID only when PME was not asserted yet. ++ * Also do not trigger PME interrupt when PME is still asserted. ++ */ ++ if (!(le32_to_cpu(pcie->bridge.pcie_conf.rootsta) & PCI_EXP_RTSTA_PME)) { ++ pcie->bridge.pcie_conf.rootsta = cpu_to_le32(requester | PCI_EXP_RTSTA_PME); ++ ++ /* ++ * Trigger PME interrupt only if PMEIE bit in Root Control is set. ++ * Aardvark HW returns zero for PCI_EXP_FLAGS_IRQ, so use PCIe interrupt 0. ++ */ ++ if (!(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) & PCI_EXP_RTCTL_PMEIE)) ++ return; ++ ++ if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL) ++ dev_err_ratelimited(&pcie->pdev->dev, "unhandled PME IRQ\n"); ++ } ++} ++ + static void advk_pcie_handle_msi(struct advk_pcie *pcie) + { + u32 msi_val, msi_mask, msi_status, msi_idx; +@@ -1418,6 +1536,22 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie) + isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); + isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK); + ++ /* Process PME interrupt as the first one to do not miss PME requester id */ ++ if (isr0_status & PCIE_MSG_PM_PME_MASK) ++ advk_pcie_handle_pme(pcie); ++ ++ /* Process ERR interrupt */ ++ if (isr0_status & PCIE_ISR0_ERR_MASK) { ++ advk_writel(pcie, PCIE_ISR0_ERR_MASK, PCIE_ISR0_REG); ++ ++ /* ++ * Aardvark HW returns zero for PCI_ERR_ROOT_AER_IRQ, so use ++ * PCIe interrupt 0 ++ */ ++ if (generic_handle_domain_irq(pcie->rp_irq_domain, 0) == -EINVAL) ++ dev_err_ratelimited(&pcie->pdev->dev, "unhandled ERR IRQ\n"); ++ } ++ + /* Process MSI interrupts */ + if (isr0_status & PCIE_ISR0_MSI_INT_PENDING) + advk_pcie_handle_msi(pcie); +@@ -1430,28 +1564,50 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie) + advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i), + PCIE_ISR1_REG); + +- generic_handle_domain_irq(pcie->irq_domain, i); ++ if (generic_handle_domain_irq(pcie->irq_domain, i) == -EINVAL) ++ dev_err_ratelimited(&pcie->pdev->dev, "unexpected INT%c IRQ\n", ++ (char)i + 'A'); + } + } + +-static irqreturn_t advk_pcie_irq_handler(int irq, void *arg) ++static void advk_pcie_irq_handler(struct irq_desc *desc) + { +- struct advk_pcie *pcie = arg; +- u32 status; ++ struct advk_pcie *pcie = irq_desc_get_handler_data(desc); ++ struct irq_chip *chip = irq_desc_get_chip(desc); ++ u32 val, mask, status; + +- status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG); +- if (!(status & PCIE_IRQ_CORE_INT)) +- return IRQ_NONE; ++ chained_irq_enter(chip, desc); + +- advk_pcie_handle_int(pcie); ++ val = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG); ++ mask = advk_readl(pcie, HOST_CTRL_INT_MASK_REG); ++ status = val & ((~mask) & PCIE_IRQ_ALL_MASK); + +- /* Clear interrupt */ +- advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG); ++ if (status & PCIE_IRQ_CORE_INT) { ++ advk_pcie_handle_int(pcie); + +- return IRQ_HANDLED; ++ /* Clear interrupt */ ++ advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG); ++ } ++ ++ chained_irq_exit(chip, desc); ++} ++ ++static int advk_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) ++{ ++ struct advk_pcie *pcie = dev->bus->sysdata; ++ ++ /* ++ * Emulated root bridge has its own emulated irq chip and irq domain. ++ * Argument pin is the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD) and ++ * hwirq for irq_create_mapping() is indexed from zero. ++ */ ++ if (pci_is_root_bus(dev->bus)) ++ return irq_create_mapping(pcie->rp_irq_domain, pin - 1); ++ else ++ return of_irq_parse_and_map_pci(dev, slot, pin); + } + +-static void __maybe_unused advk_pcie_disable_phy(struct advk_pcie *pcie) ++static void advk_pcie_disable_phy(struct advk_pcie *pcie) + { + phy_power_off(pcie->phy); + phy_exit(pcie->phy); +@@ -1515,7 +1671,7 @@ static int advk_pcie_probe(struct platform_device *pdev) + struct advk_pcie *pcie; + struct pci_host_bridge *bridge; + struct resource_entry *entry; +- int ret, irq; ++ int ret; + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie)); + if (!bridge) +@@ -1601,17 +1757,9 @@ static int advk_pcie_probe(struct platform_device *pdev) + if (IS_ERR(pcie->base)) + return PTR_ERR(pcie->base); + +- irq = platform_get_irq(pdev, 0); +- if (irq < 0) +- return irq; +- +- ret = devm_request_irq(dev, irq, advk_pcie_irq_handler, +- IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie", +- pcie); +- if (ret) { +- dev_err(dev, "Failed to register interrupt\n"); +- return ret; +- } ++ pcie->irq = platform_get_irq(pdev, 0); ++ if (pcie->irq < 0) ++ return pcie->irq; + + pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node, + "reset-gpios", 0, +@@ -1660,11 +1808,24 @@ static int advk_pcie_probe(struct platform_device *pdev) + return ret; + } + ++ ret = advk_pcie_init_rp_irq_domain(pcie); ++ if (ret) { ++ dev_err(dev, "Failed to initialize irq\n"); ++ advk_pcie_remove_msi_irq_domain(pcie); ++ advk_pcie_remove_irq_domain(pcie); ++ return ret; ++ } ++ ++ irq_set_chained_handler_and_data(pcie->irq, advk_pcie_irq_handler, pcie); ++ + bridge->sysdata = pcie; + bridge->ops = &advk_pcie_ops; ++ bridge->map_irq = advk_pcie_map_irq; + + ret = pci_host_probe(bridge); + if (ret < 0) { ++ irq_set_chained_handler_and_data(pcie->irq, NULL, NULL); ++ advk_pcie_remove_rp_irq_domain(pcie); + advk_pcie_remove_msi_irq_domain(pcie); + advk_pcie_remove_irq_domain(pcie); + return ret; +@@ -1712,7 +1873,11 @@ static int advk_pcie_remove(struct platform_device *pdev) + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); + advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); + ++ /* Remove IRQ handler */ ++ irq_set_chained_handler_and_data(pcie->irq, NULL, NULL); ++ + /* Remove IRQ domains */ ++ advk_pcie_remove_rp_irq_domain(pcie); + advk_pcie_remove_msi_irq_domain(pcie); + advk_pcie_remove_irq_domain(pcie); + +diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c +index 8e87a31e329d0..ba6d787896606 100644 +--- a/drivers/s390/block/dasd.c ++++ b/drivers/s390/block/dasd.c +@@ -1422,6 +1422,13 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) + if (!cqr->lpm) + cqr->lpm = dasd_path_get_opm(device); + } ++ /* ++ * remember the amount of formatted tracks to prevent double format on ++ * ESE devices ++ */ ++ if (cqr->block) ++ cqr->trkcount = atomic_read(&cqr->block->trkcount); ++ + if (cqr->cpmode == 1) { + rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, + (long) cqr, cqr->lpm); +@@ -1639,6 +1646,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, + unsigned long now; + int nrf_suppressed = 0; + int fp_suppressed = 0; ++ struct request *req; + u8 *sense = NULL; + int expires; + +@@ -1739,7 +1747,12 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, + } + + if (dasd_ese_needs_format(cqr->block, irb)) { +- if (rq_data_dir((struct request *)cqr->callback_data) == READ) { ++ req = dasd_get_callback_data(cqr); ++ if (!req) { ++ cqr->status = DASD_CQR_ERROR; ++ return; ++ } ++ if (rq_data_dir(req) == READ) { + device->discipline->ese_read(cqr, irb); + cqr->status = DASD_CQR_SUCCESS; + cqr->stopclk = now; +@@ -2765,8 +2778,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) + * complete a request partially. + */ + if (proc_bytes) { +- blk_update_request(req, BLK_STS_OK, +- blk_rq_bytes(req) - proc_bytes); ++ blk_update_request(req, BLK_STS_OK, proc_bytes); + blk_mq_requeue_request(req, true); + } else if (likely(!blk_should_fake_timeout(req->q))) { + blk_mq_complete_request(req); +diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c +index 8410a25a65c13..e46461b4d8a75 100644 +--- a/drivers/s390/block/dasd_eckd.c ++++ b/drivers/s390/block/dasd_eckd.c +@@ -3083,13 +3083,24 @@ static int dasd_eckd_format_device(struct dasd_device *base, + } + + static bool test_and_set_format_track(struct dasd_format_entry *to_format, +- struct dasd_block *block) ++ struct dasd_ccw_req *cqr) + { ++ struct dasd_block *block = cqr->block; + struct dasd_format_entry *format; + unsigned long flags; + bool rc = false; + + spin_lock_irqsave(&block->format_lock, flags); ++ if (cqr->trkcount != atomic_read(&block->trkcount)) { ++ /* ++ * The number of formatted tracks has changed after request ++ * start and we can not tell if the current track was involved. ++ * To avoid data corruption treat it as if the current track is ++ * involved ++ */ ++ rc = true; ++ goto out; ++ } + list_for_each_entry(format, &block->format_list, list) { + if (format->track == to_format->track) { + rc = true; +@@ -3109,6 +3120,7 @@ static void clear_format_track(struct dasd_format_entry *format, + unsigned long flags; + + spin_lock_irqsave(&block->format_lock, flags); ++ atomic_inc(&block->trkcount); + list_del_init(&format->list); + spin_unlock_irqrestore(&block->format_lock, flags); + } +@@ -3145,7 +3157,7 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr, + sector_t curr_trk; + int rc; + +- req = cqr->callback_data; ++ req = dasd_get_callback_data(cqr); + block = cqr->block; + base = block->base; + private = base->private; +@@ -3170,8 +3182,11 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr, + } + format->track = curr_trk; + /* test if track is already in formatting by another thread */ +- if (test_and_set_format_track(format, block)) ++ if (test_and_set_format_track(format, cqr)) { ++ /* this is no real error so do not count down retries */ ++ cqr->retries++; + return ERR_PTR(-EEXIST); ++ } + + fdata.start_unit = curr_trk; + fdata.stop_unit = curr_trk; +@@ -3270,12 +3285,11 @@ static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb) + cqr->proc_bytes = blk_count * blksize; + return 0; + } +- if (dst && !skip_block) { +- dst += off; ++ if (dst && !skip_block) + memset(dst, 0, blksize); +- } else { ++ else + skip_block--; +- } ++ dst += blksize; + blk_count++; + } + } +diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h +index 8b458010f88a1..6e7f1a4a28a03 100644 +--- a/drivers/s390/block/dasd_int.h ++++ b/drivers/s390/block/dasd_int.h +@@ -188,6 +188,7 @@ struct dasd_ccw_req { + void (*callback)(struct dasd_ccw_req *, void *data); + void *callback_data; + unsigned int proc_bytes; /* bytes for partial completion */ ++ unsigned int trkcount; /* count formatted tracks */ + }; + + /* +@@ -611,6 +612,7 @@ struct dasd_block { + + struct list_head format_list; + spinlock_t format_lock; ++ atomic_t trkcount; + }; + + struct dasd_attention_data { +@@ -757,6 +759,18 @@ dasd_check_blocksize(int bsize) + return 0; + } + ++/* ++ * return the callback data of the original request in case there are ++ * ERP requests build on top of it ++ */ ++static inline void *dasd_get_callback_data(struct dasd_ccw_req *cqr) ++{ ++ while (cqr->refers) ++ cqr = cqr->refers; ++ ++ return cqr->callback_data; ++} ++ + /* externals in dasd.c */ + #define DASD_PROFILE_OFF 0 + #define DASD_PROFILE_ON 1 +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c +index 00f0f282e7a13..10a9369c9dea4 100644 +--- a/drivers/video/fbdev/core/fbmem.c ++++ b/drivers/video/fbdev/core/fbmem.c +@@ -1438,7 +1438,10 @@ fb_release(struct inode *inode, struct file *file) + __acquires(&info->lock) + __releases(&info->lock) + { +- struct fb_info * const info = file->private_data; ++ struct fb_info * const info = file_fb_info(file); ++ ++ if (!info) ++ return -ENODEV; + + lock_fb_info(info); + if (info->fbops->fb_release) +diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h +index b3e46aabc3d86..5c0035316dd01 100644 +--- a/fs/btrfs/btrfs_inode.h ++++ b/fs/btrfs/btrfs_inode.h +@@ -346,6 +346,17 @@ static inline bool btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation) + return ret; + } + ++/* ++ * Check if the inode has flags compatible with compression ++ */ ++static inline bool btrfs_inode_can_compress(const struct btrfs_inode *inode) ++{ ++ if (inode->flags & BTRFS_INODE_NODATACOW || ++ inode->flags & BTRFS_INODE_NODATASUM) ++ return false; ++ return true; ++} ++ + struct btrfs_dio_private { + struct inode *inode; + +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index ed986c70cbc5e..e5f13922a18fe 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -3569,6 +3569,17 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device + if (sectorsize < PAGE_SIZE) { + struct btrfs_subpage_info *subpage_info; + ++ /* ++ * V1 space cache has some hardcoded PAGE_SIZE usage, and is ++ * going to be deprecated. ++ * ++ * Force to use v2 cache for subpage case. ++ */ ++ btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE); ++ btrfs_set_and_info(fs_info, FREE_SPACE_TREE, ++ "forcing free space tree for sector size %u with page size %lu", ++ sectorsize, PAGE_SIZE); ++ + btrfs_warn(fs_info, + "read-write for sector size %u with page size %lu is experimental", + sectorsize, PAGE_SIZE); +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index ecd305649e129..10e205fbad6cf 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -485,17 +485,6 @@ static noinline int add_async_extent(struct async_chunk *cow, + return 0; + } + +-/* +- * Check if the inode has flags compatible with compression +- */ +-static inline bool inode_can_compress(struct btrfs_inode *inode) +-{ +- if (inode->flags & BTRFS_INODE_NODATACOW || +- inode->flags & BTRFS_INODE_NODATASUM) +- return false; +- return true; +-} +- + /* + * Check if the inode needs to be submitted to compression, based on mount + * options, defragmentation, properties or heuristics. +@@ -505,7 +494,7 @@ static inline int inode_need_compress(struct btrfs_inode *inode, u64 start, + { + struct btrfs_fs_info *fs_info = inode->root->fs_info; + +- if (!inode_can_compress(inode)) { ++ if (!btrfs_inode_can_compress(inode)) { + WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), + KERN_ERR "BTRFS: unexpected compression for ino %llu\n", + btrfs_ino(inode)); +@@ -2015,7 +2004,7 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page + (zoned && btrfs_is_data_reloc_root(inode->root))); + ret = run_delalloc_nocow(inode, locked_page, start, end, + page_started, nr_written); +- } else if (!inode_can_compress(inode) || ++ } else if (!btrfs_inode_can_compress(inode) || + !inode_need_compress(inode, start, end)) { + if (zoned) + ret = run_delalloc_zoned(inode, locked_page, start, end, +diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c +index 1a6d2d5b4b333..1b31481f9e72c 100644 +--- a/fs/btrfs/props.c ++++ b/fs/btrfs/props.c +@@ -17,9 +17,11 @@ static DEFINE_HASHTABLE(prop_handlers_ht, BTRFS_PROP_HANDLERS_HT_BITS); + struct prop_handler { + struct hlist_node node; + const char *xattr_name; +- int (*validate)(const char *value, size_t len); ++ int (*validate)(const struct btrfs_inode *inode, const char *value, ++ size_t len); + int (*apply)(struct inode *inode, const char *value, size_t len); + const char *(*extract)(struct inode *inode); ++ bool (*ignore)(const struct btrfs_inode *inode); + int inheritable; + }; + +@@ -55,7 +57,8 @@ find_prop_handler(const char *name, + return NULL; + } + +-int btrfs_validate_prop(const char *name, const char *value, size_t value_len) ++int btrfs_validate_prop(const struct btrfs_inode *inode, const char *name, ++ const char *value, size_t value_len) + { + const struct prop_handler *handler; + +@@ -69,7 +72,29 @@ int btrfs_validate_prop(const char *name, const char *value, size_t value_len) + if (value_len == 0) + return 0; + +- return handler->validate(value, value_len); ++ return handler->validate(inode, value, value_len); ++} ++ ++/* ++ * Check if a property should be ignored (not set) for an inode. ++ * ++ * @inode: The target inode. ++ * @name: The property's name. ++ * ++ * The caller must be sure the given property name is valid, for example by ++ * having previously called btrfs_validate_prop(). ++ * ++ * Returns: true if the property should be ignored for the given inode ++ * false if the property must not be ignored for the given inode ++ */ ++bool btrfs_ignore_prop(const struct btrfs_inode *inode, const char *name) ++{ ++ const struct prop_handler *handler; ++ ++ handler = find_prop_handler(name, NULL); ++ ASSERT(handler != NULL); ++ ++ return handler->ignore(inode); + } + + int btrfs_set_prop(struct btrfs_trans_handle *trans, struct inode *inode, +@@ -252,8 +277,12 @@ int btrfs_load_inode_props(struct inode *inode, struct btrfs_path *path) + return ret; + } + +-static int prop_compression_validate(const char *value, size_t len) ++static int prop_compression_validate(const struct btrfs_inode *inode, ++ const char *value, size_t len) + { ++ if (!btrfs_inode_can_compress(inode)) ++ return -EINVAL; ++ + if (!value) + return 0; + +@@ -310,6 +339,22 @@ static int prop_compression_apply(struct inode *inode, const char *value, + return 0; + } + ++static bool prop_compression_ignore(const struct btrfs_inode *inode) ++{ ++ /* ++ * Compression only has effect for regular files, and for directories ++ * we set it just to propagate it to new files created inside them. ++ * Everything else (symlinks, devices, sockets, fifos) is pointless as ++ * it will do nothing, so don't waste metadata space on a compression ++ * xattr for anything that is neither a file nor a directory. ++ */ ++ if (!S_ISREG(inode->vfs_inode.i_mode) && ++ !S_ISDIR(inode->vfs_inode.i_mode)) ++ return true; ++ ++ return false; ++} ++ + static const char *prop_compression_extract(struct inode *inode) + { + switch (BTRFS_I(inode)->prop_compress) { +@@ -330,6 +375,7 @@ static struct prop_handler prop_handlers[] = { + .validate = prop_compression_validate, + .apply = prop_compression_apply, + .extract = prop_compression_extract, ++ .ignore = prop_compression_ignore, + .inheritable = 1 + }, + }; +@@ -356,6 +402,9 @@ static int inherit_props(struct btrfs_trans_handle *trans, + if (!h->inheritable) + continue; + ++ if (h->ignore(BTRFS_I(inode))) ++ continue; ++ + value = h->extract(parent); + if (!value) + continue; +@@ -364,7 +413,7 @@ static int inherit_props(struct btrfs_trans_handle *trans, + * This is not strictly necessary as the property should be + * valid, but in case it isn't, don't propagate it further. + */ +- ret = h->validate(value, strlen(value)); ++ ret = h->validate(BTRFS_I(inode), value, strlen(value)); + if (ret) + continue; + +diff --git a/fs/btrfs/props.h b/fs/btrfs/props.h +index 40b2c65b518c6..59bea741cfcf4 100644 +--- a/fs/btrfs/props.h ++++ b/fs/btrfs/props.h +@@ -13,7 +13,9 @@ void __init btrfs_props_init(void); + int btrfs_set_prop(struct btrfs_trans_handle *trans, struct inode *inode, + const char *name, const char *value, size_t value_len, + int flags); +-int btrfs_validate_prop(const char *name, const char *value, size_t value_len); ++int btrfs_validate_prop(const struct btrfs_inode *inode, const char *name, ++ const char *value, size_t value_len); ++bool btrfs_ignore_prop(const struct btrfs_inode *inode, const char *name); + + int btrfs_load_inode_props(struct inode *inode, struct btrfs_path *path); + +diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c +index beb7f72d50b86..11927d440f11a 100644 +--- a/fs/btrfs/sysfs.c ++++ b/fs/btrfs/sysfs.c +@@ -919,6 +919,9 @@ static ssize_t btrfs_exclusive_operation_show(struct kobject *kobj, + case BTRFS_EXCLOP_BALANCE: + str = "balance\n"; + break; ++ case BTRFS_EXCLOP_BALANCE_PAUSED: ++ str = "balance paused\n"; ++ break; + case BTRFS_EXCLOP_DEV_ADD: + str = "device add\n"; + break; +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c +index 7a0bfa5bedb95..049ee19041c7b 100644 +--- a/fs/btrfs/tree-log.c ++++ b/fs/btrfs/tree-log.c +@@ -5655,6 +5655,18 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, + mutex_lock(&inode->log_mutex); + } + ++ /* ++ * For symlinks, we must always log their content, which is stored in an ++ * inline extent, otherwise we could end up with an empty symlink after ++ * log replay, which is invalid on linux (symlink(2) returns -ENOENT if ++ * one attempts to create an empty symlink). ++ * We don't need to worry about flushing delalloc, because when we create ++ * the inline extent when the symlink is created (we never have delalloc ++ * for symlinks). ++ */ ++ if (S_ISLNK(inode->vfs_inode.i_mode)) ++ inode_only = LOG_INODE_ALL; ++ + /* + * This is for cases where logging a directory could result in losing a + * a file after replaying the log. For example, if we move a file from a +@@ -6015,7 +6027,7 @@ process_leaf: + } + + ctx->log_new_dentries = false; +- if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK) ++ if (type == BTRFS_FT_DIR) + log_mode = LOG_INODE_ALL; + ret = btrfs_log_inode(trans, BTRFS_I(di_inode), + log_mode, ctx); +diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c +index 99abf41b89b92..85691dc2232fa 100644 +--- a/fs/btrfs/xattr.c ++++ b/fs/btrfs/xattr.c +@@ -262,7 +262,8 @@ int btrfs_setxattr_trans(struct inode *inode, const char *name, + inode_inc_iversion(inode); + inode->i_ctime = current_time(inode); + ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); +- BUG_ON(ret); ++ if (ret) ++ btrfs_abort_transaction(trans, ret); + out: + if (start_trans) + btrfs_end_transaction(trans); +@@ -403,10 +404,13 @@ static int btrfs_xattr_handler_set_prop(const struct xattr_handler *handler, + struct btrfs_root *root = BTRFS_I(inode)->root; + + name = xattr_full_name(handler, name); +- ret = btrfs_validate_prop(name, value, size); ++ ret = btrfs_validate_prop(BTRFS_I(inode), name, value, size); + if (ret) + return ret; + ++ if (btrfs_ignore_prop(BTRFS_I(inode), name)) ++ return 0; ++ + trans = btrfs_start_transaction(root, 2); + if (IS_ERR(trans)) + return PTR_ERR(trans); +@@ -416,7 +420,8 @@ static int btrfs_xattr_handler_set_prop(const struct xattr_handler *handler, + inode_inc_iversion(inode); + inode->i_ctime = current_time(inode); + ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); +- BUG_ON(ret); ++ if (ret) ++ btrfs_abort_transaction(trans, ret); + } + + btrfs_end_transaction(trans); +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index c36fa0d0d438b..3d307854c6504 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -363,6 +363,14 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent + kunmap_atomic(start); + } + ++static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version) ++{ ++ if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) { ++ fattr->pre_change_attr = version; ++ fattr->valid |= NFS_ATTR_FATTR_PRECHANGE; ++ } ++} ++ + static void nfs4_test_and_free_stateid(struct nfs_server *server, + nfs4_stateid *stateid, + const struct cred *cred) +@@ -6556,7 +6564,9 @@ static void nfs4_delegreturn_release(void *calldata) + pnfs_roc_release(&data->lr.arg, &data->lr.res, + data->res.lr_ret); + if (inode) { +- nfs_post_op_update_inode_force_wcc(inode, &data->fattr); ++ nfs4_fattr_set_prechange(&data->fattr, ++ inode_peek_iversion_raw(inode)); ++ nfs_refresh_inode(inode, &data->fattr); + nfs_iput_and_deactive(inode); + } + kfree(calldata); +diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h +index 24eea1b05ca27..29917850f0794 100644 +--- a/include/linux/stmmac.h ++++ b/include/linux/stmmac.h +@@ -270,5 +270,6 @@ struct plat_stmmacenet_data { + int msi_rx_base_vec; + int msi_tx_base_vec; + bool use_phy_wol; ++ bool sph_disable; + }; + #endif +diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h +index 99cbdf55a8bda..f09c60393e559 100644 +--- a/kernel/irq/internals.h ++++ b/kernel/irq/internals.h +@@ -29,12 +29,14 @@ extern struct irqaction chained_action; + * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed + * IRQTF_AFFINITY - irq thread is requested to adjust affinity + * IRQTF_FORCED_THREAD - irq action is force threaded ++ * IRQTF_READY - signals that irq thread is ready + */ + enum { + IRQTF_RUNTHREAD, + IRQTF_WARNED, + IRQTF_AFFINITY, + IRQTF_FORCED_THREAD, ++ IRQTF_READY, + }; + + /* +diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c +index 2267e6527db3c..a4426a00b9edf 100644 +--- a/kernel/irq/irqdesc.c ++++ b/kernel/irq/irqdesc.c +@@ -407,6 +407,7 @@ static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags, + lockdep_set_class(&desc->lock, &irq_desc_lock_class); + mutex_init(&desc->request_mutex); + init_rcu_head(&desc->rcu); ++ init_waitqueue_head(&desc->wait_for_threads); + + desc_set_defaults(irq, desc, node, affinity, owner); + irqd_set(&desc->irq_data, flags); +@@ -575,6 +576,7 @@ int __init early_irq_init(void) + raw_spin_lock_init(&desc[i].lock); + lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); + mutex_init(&desc[i].request_mutex); ++ init_waitqueue_head(&desc[i].wait_for_threads); + desc_set_defaults(i, &desc[i], node, NULL, NULL); + } + return arch_early_irq_init(); +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c +index f23ffd30385b1..8915fba0697a0 100644 +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -1248,6 +1248,31 @@ static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action) + raw_spin_unlock_irq(&desc->lock); + } + ++/* ++ * Internal function to notify that a interrupt thread is ready. ++ */ ++static void irq_thread_set_ready(struct irq_desc *desc, ++ struct irqaction *action) ++{ ++ set_bit(IRQTF_READY, &action->thread_flags); ++ wake_up(&desc->wait_for_threads); ++} ++ ++/* ++ * Internal function to wake up a interrupt thread and wait until it is ++ * ready. ++ */ ++static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc, ++ struct irqaction *action) ++{ ++ if (!action || !action->thread) ++ return; ++ ++ wake_up_process(action->thread); ++ wait_event(desc->wait_for_threads, ++ test_bit(IRQTF_READY, &action->thread_flags)); ++} ++ + /* + * Interrupt handler thread + */ +@@ -1259,6 +1284,8 @@ static int irq_thread(void *data) + irqreturn_t (*handler_fn)(struct irq_desc *desc, + struct irqaction *action); + ++ irq_thread_set_ready(desc, action); ++ + sched_set_fifo(current); + + if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD, +@@ -1683,8 +1710,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) + } + + if (!shared) { +- init_waitqueue_head(&desc->wait_for_threads); +- + /* Setup the type (level, edge polarity) if configured: */ + if (new->flags & IRQF_TRIGGER_MASK) { + ret = __irq_set_trigger(desc, +@@ -1780,14 +1805,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) + + irq_setup_timings(desc, new); + +- /* +- * Strictly no need to wake it up, but hung_task complains +- * when no hard interrupt wakes the thread up. +- */ +- if (new->thread) +- wake_up_process(new->thread); +- if (new->secondary) +- wake_up_process(new->secondary->thread); ++ wake_up_and_wait_for_irq_thread_ready(desc, new); ++ wake_up_and_wait_for_irq_thread_ready(desc, new->secondary); + + register_irq_proc(irq, desc); + new->dir = NULL; +diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c +index dcdcb85121e40..3b1398fbddaf8 100644 +--- a/kernel/time/timekeeping.c ++++ b/kernel/time/timekeeping.c +@@ -482,7 +482,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf) + * of the following timestamps. Callers need to be aware of that and + * deal with it. + */ +-u64 ktime_get_mono_fast_ns(void) ++u64 notrace ktime_get_mono_fast_ns(void) + { + return __ktime_get_fast_ns(&tk_fast_mono); + } +@@ -494,7 +494,7 @@ EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns); + * Contrary to ktime_get_mono_fast_ns() this is always correct because the + * conversion factor is not affected by NTP/PTP correction. + */ +-u64 ktime_get_raw_fast_ns(void) ++u64 notrace ktime_get_raw_fast_ns(void) + { + return __ktime_get_fast_ns(&tk_fast_raw); + } +diff --git a/net/can/isotp.c b/net/can/isotp.c +index 8c753dcefe7fc..26821487a0573 100644 +--- a/net/can/isotp.c ++++ b/net/can/isotp.c +@@ -1146,6 +1146,11 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) + + lock_sock(sk); + ++ if (so->bound) { ++ err = -EINVAL; ++ goto out; ++ } ++ + /* do not register frame reception for functional addressing */ + if (so->opt.flags & CAN_ISOTP_SF_BROADCAST) + do_rx_reg = 0; +@@ -1156,10 +1161,6 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) + goto out; + } + +- if (so->bound && addr->can_ifindex == so->ifindex && +- rx_id == so->rxid && tx_id == so->txid) +- goto out; +- + dev = dev_get_by_index(net, addr->can_ifindex); + if (!dev) { + err = -ENODEV; +@@ -1186,19 +1187,6 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) + + dev_put(dev); + +- if (so->bound && do_rx_reg) { +- /* unregister old filter */ +- if (so->ifindex) { +- dev = dev_get_by_index(net, so->ifindex); +- if (dev) { +- can_rx_unregister(net, dev, so->rxid, +- SINGLE_MASK(so->rxid), +- isotp_rcv, sk); +- dev_put(dev); +- } +- } +- } +- + /* switch to new settings */ + so->ifindex = ifindex; + so->rxid = rx_id; +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c +index 2ad3c7b42d6d2..1d9e6d5e9a76c 100644 +--- a/net/ipv4/igmp.c ++++ b/net/ipv4/igmp.c +@@ -2403,9 +2403,10 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct + /* decrease mem now to avoid the memleak warning */ + atomic_sub(struct_size(psl, sl_addr, psl->sl_max), + &sk->sk_omem_alloc); +- kfree_rcu(psl, rcu); + } + rcu_assign_pointer(pmc->sflist, newpsl); ++ if (psl) ++ kfree_rcu(psl, rcu); + psl = newpsl; + } + rv = 1; /* > 0 for insert logic below if sl_count is 0 */ +@@ -2507,11 +2508,13 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) + /* decrease mem now to avoid the memleak warning */ + atomic_sub(struct_size(psl, sl_addr, psl->sl_max), + &sk->sk_omem_alloc); +- kfree_rcu(psl, rcu); +- } else ++ } else { + (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, + 0, NULL, 0); ++ } + rcu_assign_pointer(pmc->sflist, newpsl); ++ if (psl) ++ kfree_rcu(psl, rcu); + pmc->sfmode = msf->imsf_fmode; + err = 0; + done: +diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c +index 909f937befd71..7f695c39d9a8c 100644 +--- a/net/ipv6/mcast.c ++++ b/net/ipv6/mcast.c +@@ -460,10 +460,10 @@ int ip6_mc_source(int add, int omode, struct sock *sk, + newpsl->sl_addr[i] = psl->sl_addr[i]; + atomic_sub(struct_size(psl, sl_addr, psl->sl_max), + &sk->sk_omem_alloc); +- kfree_rcu(psl, rcu); + } ++ rcu_assign_pointer(pmc->sflist, newpsl); ++ kfree_rcu(psl, rcu); + psl = newpsl; +- rcu_assign_pointer(pmc->sflist, psl); + } + rv = 1; /* > 0 for insert logic below if sl_count is 0 */ + for (i = 0; i < psl->sl_count; i++) { +@@ -565,12 +565,12 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf, + psl->sl_count, psl->sl_addr, 0); + atomic_sub(struct_size(psl, sl_addr, psl->sl_max), + &sk->sk_omem_alloc); +- kfree_rcu(psl, rcu); + } else { + ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); + } +- mutex_unlock(&idev->mc_lock); + rcu_assign_pointer(pmc->sflist, newpsl); ++ mutex_unlock(&idev->mc_lock); ++ kfree_rcu(psl, rcu); + pmc->sfmode = gsf->gf_fmode; + err = 0; + done: +diff --git a/net/nfc/core.c b/net/nfc/core.c +index dc7a2404efdf9..5b286e1e0a6ff 100644 +--- a/net/nfc/core.c ++++ b/net/nfc/core.c +@@ -38,7 +38,7 @@ int nfc_fw_download(struct nfc_dev *dev, const char *firmware_name) + + device_lock(&dev->dev); + +- if (!device_is_registered(&dev->dev)) { ++ if (dev->shutting_down) { + rc = -ENODEV; + goto error; + } +@@ -94,7 +94,7 @@ int nfc_dev_up(struct nfc_dev *dev) + + device_lock(&dev->dev); + +- if (!device_is_registered(&dev->dev)) { ++ if (dev->shutting_down) { + rc = -ENODEV; + goto error; + } +@@ -142,7 +142,7 @@ int nfc_dev_down(struct nfc_dev *dev) + + device_lock(&dev->dev); + +- if (!device_is_registered(&dev->dev)) { ++ if (dev->shutting_down) { + rc = -ENODEV; + goto error; + } +@@ -207,7 +207,7 @@ int nfc_start_poll(struct nfc_dev *dev, u32 im_protocols, u32 tm_protocols) + + device_lock(&dev->dev); + +- if (!device_is_registered(&dev->dev)) { ++ if (dev->shutting_down) { + rc = -ENODEV; + goto error; + } +@@ -246,7 +246,7 @@ int nfc_stop_poll(struct nfc_dev *dev) + + device_lock(&dev->dev); + +- if (!device_is_registered(&dev->dev)) { ++ if (dev->shutting_down) { + rc = -ENODEV; + goto error; + } +@@ -291,7 +291,7 @@ int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode) + + device_lock(&dev->dev); + +- if (!device_is_registered(&dev->dev)) { ++ if (dev->shutting_down) { + rc = -ENODEV; + goto error; + } +@@ -335,7 +335,7 @@ int nfc_dep_link_down(struct nfc_dev *dev) + + device_lock(&dev->dev); + +- if (!device_is_registered(&dev->dev)) { ++ if (dev->shutting_down) { + rc = -ENODEV; + goto error; + } +@@ -401,7 +401,7 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol) + + device_lock(&dev->dev); + +- if (!device_is_registered(&dev->dev)) { ++ if (dev->shutting_down) { + rc = -ENODEV; + goto error; + } +@@ -448,7 +448,7 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx, u8 mode) + + device_lock(&dev->dev); + +- if (!device_is_registered(&dev->dev)) { ++ if (dev->shutting_down) { + rc = -ENODEV; + goto error; + } +@@ -495,7 +495,7 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb, + + device_lock(&dev->dev); + +- if (!device_is_registered(&dev->dev)) { ++ if (dev->shutting_down) { + rc = -ENODEV; + kfree_skb(skb); + goto error; +@@ -552,7 +552,7 @@ int nfc_enable_se(struct nfc_dev *dev, u32 se_idx) + + device_lock(&dev->dev); + +- if (!device_is_registered(&dev->dev)) { ++ if (dev->shutting_down) { + rc = -ENODEV; + goto error; + } +@@ -601,7 +601,7 @@ int nfc_disable_se(struct nfc_dev *dev, u32 se_idx) + + device_lock(&dev->dev); + +- if (!device_is_registered(&dev->dev)) { ++ if (dev->shutting_down) { + rc = -ENODEV; + goto error; + } +@@ -1134,6 +1134,7 @@ int nfc_register_device(struct nfc_dev *dev) + dev->rfkill = NULL; + } + } ++ dev->shutting_down = false; + device_unlock(&dev->dev); + + rc = nfc_genl_device_added(dev); +@@ -1166,12 +1167,10 @@ void nfc_unregister_device(struct nfc_dev *dev) + rfkill_unregister(dev->rfkill); + rfkill_destroy(dev->rfkill); + } ++ dev->shutting_down = true; + device_unlock(&dev->dev); + + if (dev->ops->check_presence) { +- device_lock(&dev->dev); +- dev->shutting_down = true; +- device_unlock(&dev->dev); + del_timer_sync(&dev->check_pres_timer); + cancel_work_sync(&dev->check_pres_work); + } +diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c +index f184b0db79d40..7c62417ccfd78 100644 +--- a/net/nfc/netlink.c ++++ b/net/nfc/netlink.c +@@ -1244,7 +1244,7 @@ int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name, + struct sk_buff *msg; + void *hdr; + +- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); ++ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); + if (!msg) + return -ENOMEM; + +@@ -1260,7 +1260,7 @@ int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name, + + genlmsg_end(msg, hdr); + +- genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); ++ genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC); + + return 0; + +diff --git a/net/rds/tcp.c b/net/rds/tcp.c +index 5327d130c4b56..2f638f8b7b1e7 100644 +--- a/net/rds/tcp.c ++++ b/net/rds/tcp.c +@@ -495,6 +495,14 @@ void rds_tcp_tune(struct socket *sock) + + tcp_sock_set_nodelay(sock->sk); + lock_sock(sk); ++ /* TCP timer functions might access net namespace even after ++ * a process which created this net namespace terminated. ++ */ ++ if (!sk->sk_net_refcnt) { ++ sk->sk_net_refcnt = 1; ++ get_net_track(net, &sk->ns_tracker, GFP_KERNEL); ++ sock_inuse_add(net, 1); ++ } + if (rtn->sndbuf_size > 0) { + sk->sk_sndbuf = rtn->sndbuf_size; + sk->sk_userlocks |= SOCK_SNDBUF_LOCK; +diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c +index a4111408ffd0c..6a1611b0e3037 100644 +--- a/net/rxrpc/local_object.c ++++ b/net/rxrpc/local_object.c +@@ -117,6 +117,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) + local, srx->transport_type, srx->transport.family); + + udp_conf.family = srx->transport.family; ++ udp_conf.use_udp_checksums = true; + if (udp_conf.family == AF_INET) { + udp_conf.local_ip = srx->transport.sin.sin_addr; + udp_conf.local_udp_port = srx->transport.sin.sin_port; +@@ -124,6 +125,8 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) + } else { + udp_conf.local_ip6 = srx->transport.sin6.sin6_addr; + udp_conf.local_udp_port = srx->transport.sin6.sin6_port; ++ udp_conf.use_udp6_tx_checksums = true; ++ udp_conf.use_udp6_rx_checksums = true; + #endif + } + ret = udp_sock_create(net, &udp_conf, &local->socket); +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c +index 0222ad4523a9d..258ebc194ee2b 100644 +--- a/net/sunrpc/clnt.c ++++ b/net/sunrpc/clnt.c +@@ -1065,10 +1065,13 @@ rpc_task_get_next_xprt(struct rpc_clnt *clnt) + static + void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt) + { +- if (task->tk_xprt && +- !(test_bit(XPRT_OFFLINE, &task->tk_xprt->state) && +- (task->tk_flags & RPC_TASK_MOVEABLE))) +- return; ++ if (task->tk_xprt) { ++ if (!(test_bit(XPRT_OFFLINE, &task->tk_xprt->state) && ++ (task->tk_flags & RPC_TASK_MOVEABLE))) ++ return; ++ xprt_release(task); ++ xprt_put(task->tk_xprt); ++ } + if (task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) + task->tk_xprt = rpc_task_get_first_xprt(clnt); + else +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c +index 7aef2876beb38..eec9569af4c51 100644 +--- a/net/sunrpc/xprtsock.c ++++ b/net/sunrpc/xprtsock.c +@@ -1967,6 +1967,9 @@ static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task) + struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); + int ret; + ++ if (transport->file) ++ goto force_disconnect; ++ + if (RPC_IS_ASYNC(task)) { + /* + * We want the AF_LOCAL connect to be resolved in the +@@ -1979,11 +1982,17 @@ static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task) + */ + task->tk_rpc_status = -ENOTCONN; + rpc_exit(task, -ENOTCONN); +- return; ++ goto out_wake; + } + ret = xs_local_setup_socket(transport); + if (ret && !RPC_IS_SOFTCONN(task)) + msleep_interruptible(15000); ++ return; ++force_disconnect: ++ xprt_force_disconnect(xprt); ++out_wake: ++ xprt_clear_connecting(xprt); ++ xprt_wake_pending_tasks(xprt, -ENOTCONN); + } + + #if IS_ENABLED(CONFIG_SUNRPC_SWAP) +@@ -2867,9 +2876,6 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args) + } + xprt_set_bound(xprt); + xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL); +- ret = ERR_PTR(xs_local_setup_socket(transport)); +- if (ret) +- goto out_err; + break; + default: + ret = ERR_PTR(-EAFNOSUPPORT); +diff --git a/sound/firewire/fireworks/fireworks_hwdep.c b/sound/firewire/fireworks/fireworks_hwdep.c +index 626c0c34b0b66..3a53914277d35 100644 +--- a/sound/firewire/fireworks/fireworks_hwdep.c ++++ b/sound/firewire/fireworks/fireworks_hwdep.c +@@ -34,6 +34,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained, + type = SNDRV_FIREWIRE_EVENT_EFW_RESPONSE; + if (copy_to_user(buf, &type, sizeof(type))) + return -EFAULT; ++ count += sizeof(type); + remained -= sizeof(type); + buf += sizeof(type); + +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index c66d31d8a498c..51c54cf0f3127 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -8759,6 +8759,8 @@ static const struct hda_fixup alc269_fixups[] = { + [ALC287_FIXUP_CS35L41_I2C_2] = { + .type = HDA_FIXUP_FUNC, + .v.func = cs35l41_fixup_i2c_two, ++ .chained = true, ++ .chain_id = ALC269_FIXUP_THINKPAD_ACPI, + }, + [ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED] = { + .type = HDA_FIXUP_VERBS, +@@ -9191,6 +9193,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS), + SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME), + SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS), ++ SND_PCI_QUIRK(0x17aa, 0x3820, "Yoga Duet 7 13ITL6", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), + SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS), + SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF), + SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), +diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c +index 13009d08b09ac..c7493549a9a50 100644 +--- a/sound/soc/codecs/da7219.c ++++ b/sound/soc/codecs/da7219.c +@@ -446,7 +446,7 @@ static int da7219_tonegen_freq_put(struct snd_kcontrol *kcontrol, + struct soc_mixer_control *mixer_ctrl = + (struct soc_mixer_control *) kcontrol->private_value; + unsigned int reg = mixer_ctrl->reg; +- __le16 val; ++ __le16 val_new, val_old; + int ret; + + /* +@@ -454,13 +454,19 @@ static int da7219_tonegen_freq_put(struct snd_kcontrol *kcontrol, + * Therefore we need to convert to little endian here to align with + * HW registers. + */ +- val = cpu_to_le16(ucontrol->value.integer.value[0]); ++ val_new = cpu_to_le16(ucontrol->value.integer.value[0]); + + mutex_lock(&da7219->ctrl_lock); +- ret = regmap_raw_write(da7219->regmap, reg, &val, sizeof(val)); ++ ret = regmap_raw_read(da7219->regmap, reg, &val_old, sizeof(val_old)); ++ if (ret == 0 && (val_old != val_new)) ++ ret = regmap_raw_write(da7219->regmap, reg, ++ &val_new, sizeof(val_new)); + mutex_unlock(&da7219->ctrl_lock); + +- return ret; ++ if (ret < 0) ++ return ret; ++ ++ return val_old != val_new; + } + + +diff --git a/sound/soc/codecs/rt9120.c b/sound/soc/codecs/rt9120.c +index 7aa1772a915f3..6e0d7cf0c8c92 100644 +--- a/sound/soc/codecs/rt9120.c ++++ b/sound/soc/codecs/rt9120.c +@@ -341,7 +341,6 @@ static int rt9120_get_reg_size(unsigned int reg) + { + switch (reg) { + case 0x00: +- case 0x09: + case 0x20 ... 0x27: + return 2; + case 0x30 ... 0x3D: +diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c +index e4018ba3b19a2..7878c7a58ff10 100644 +--- a/sound/soc/codecs/wm8958-dsp2.c ++++ b/sound/soc/codecs/wm8958-dsp2.c +@@ -530,7 +530,7 @@ static int wm8958_mbc_put(struct snd_kcontrol *kcontrol, + + wm8958_dsp_apply(component, mbc, wm8994->mbc_ena[mbc]); + +- return 0; ++ return 1; + } + + #define WM8958_MBC_SWITCH(xname, xval) {\ +@@ -656,7 +656,7 @@ static int wm8958_vss_put(struct snd_kcontrol *kcontrol, + + wm8958_dsp_apply(component, vss, wm8994->vss_ena[vss]); + +- return 0; ++ return 1; + } + + +@@ -730,7 +730,7 @@ static int wm8958_hpf_put(struct snd_kcontrol *kcontrol, + + wm8958_dsp_apply(component, hpf % 3, ucontrol->value.integer.value[0]); + +- return 0; ++ return 1; + } + + #define WM8958_HPF_SWITCH(xname, xval) {\ +@@ -824,7 +824,7 @@ static int wm8958_enh_eq_put(struct snd_kcontrol *kcontrol, + + wm8958_dsp_apply(component, eq, ucontrol->value.integer.value[0]); + +- return 0; ++ return 1; + } + + #define WM8958_ENH_EQ_SWITCH(xname, xval) {\ +diff --git a/sound/soc/meson/aiu-acodec-ctrl.c b/sound/soc/meson/aiu-acodec-ctrl.c +index 27a6d3259c50a..442c215936d97 100644 +--- a/sound/soc/meson/aiu-acodec-ctrl.c ++++ b/sound/soc/meson/aiu-acodec-ctrl.c +@@ -58,7 +58,7 @@ static int aiu_acodec_ctrl_mux_put_enum(struct snd_kcontrol *kcontrol, + + snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL); + +- return 0; ++ return 1; + } + + static SOC_ENUM_SINGLE_DECL(aiu_acodec_ctrl_mux_enum, AIU_ACODEC_CTRL, +diff --git a/sound/soc/meson/aiu-codec-ctrl.c b/sound/soc/meson/aiu-codec-ctrl.c +index c3ea733fce91f..c966fc60dc733 100644 +--- a/sound/soc/meson/aiu-codec-ctrl.c ++++ b/sound/soc/meson/aiu-codec-ctrl.c +@@ -57,7 +57,7 @@ static int aiu_codec_ctrl_mux_put_enum(struct snd_kcontrol *kcontrol, + + snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL); + +- return 0; ++ return 1; + } + + static SOC_ENUM_SINGLE_DECL(aiu_hdmi_ctrl_mux_enum, AIU_HDMI_CLK_DATA_CTRL, +diff --git a/sound/soc/meson/axg-card.c b/sound/soc/meson/axg-card.c +index cbbaa55d92a66..2b77010c2c5ce 100644 +--- a/sound/soc/meson/axg-card.c ++++ b/sound/soc/meson/axg-card.c +@@ -320,7 +320,6 @@ static int axg_card_add_link(struct snd_soc_card *card, struct device_node *np, + + dai_link->cpus = cpu; + dai_link->num_cpus = 1; +- dai_link->nonatomic = true; + + ret = meson_card_parse_dai(card, np, &dai_link->cpus->of_node, + &dai_link->cpus->dai_name); +diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c +index 0c31934a96301..e076ced300257 100644 +--- a/sound/soc/meson/axg-tdm-interface.c ++++ b/sound/soc/meson/axg-tdm-interface.c +@@ -351,29 +351,13 @@ static int axg_tdm_iface_hw_free(struct snd_pcm_substream *substream, + return 0; + } + +-static int axg_tdm_iface_trigger(struct snd_pcm_substream *substream, +- int cmd, ++static int axg_tdm_iface_prepare(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) + { +- struct axg_tdm_stream *ts = +- snd_soc_dai_get_dma_data(dai, substream); +- +- switch (cmd) { +- case SNDRV_PCM_TRIGGER_START: +- case SNDRV_PCM_TRIGGER_RESUME: +- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: +- axg_tdm_stream_start(ts); +- break; +- case SNDRV_PCM_TRIGGER_SUSPEND: +- case SNDRV_PCM_TRIGGER_PAUSE_PUSH: +- case SNDRV_PCM_TRIGGER_STOP: +- axg_tdm_stream_stop(ts); +- break; +- default: +- return -EINVAL; +- } ++ struct axg_tdm_stream *ts = snd_soc_dai_get_dma_data(dai, substream); + +- return 0; ++ /* Force all attached formatters to update */ ++ return axg_tdm_stream_reset(ts); + } + + static int axg_tdm_iface_remove_dai(struct snd_soc_dai *dai) +@@ -413,8 +397,8 @@ static const struct snd_soc_dai_ops axg_tdm_iface_ops = { + .set_fmt = axg_tdm_iface_set_fmt, + .startup = axg_tdm_iface_startup, + .hw_params = axg_tdm_iface_hw_params, ++ .prepare = axg_tdm_iface_prepare, + .hw_free = axg_tdm_iface_hw_free, +- .trigger = axg_tdm_iface_trigger, + }; + + /* TDM Backend DAIs */ +diff --git a/sound/soc/meson/g12a-tohdmitx.c b/sound/soc/meson/g12a-tohdmitx.c +index 9b2b59536ced0..6c99052feafd8 100644 +--- a/sound/soc/meson/g12a-tohdmitx.c ++++ b/sound/soc/meson/g12a-tohdmitx.c +@@ -67,7 +67,7 @@ static int g12a_tohdmitx_i2s_mux_put_enum(struct snd_kcontrol *kcontrol, + + snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL); + +- return 0; ++ return 1; + } + + static SOC_ENUM_SINGLE_DECL(g12a_tohdmitx_i2s_mux_enum, TOHDMITX_CTRL0, +diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c +index 359987bf76d1b..c54c8ca8d7156 100644 +--- a/sound/soc/soc-generic-dmaengine-pcm.c ++++ b/sound/soc/soc-generic-dmaengine-pcm.c +@@ -86,10 +86,10 @@ static int dmaengine_pcm_hw_params(struct snd_soc_component *component, + + memset(&slave_config, 0, sizeof(slave_config)); + +- if (pcm->config && pcm->config->prepare_slave_config) +- prepare_slave_config = pcm->config->prepare_slave_config; +- else ++ if (!pcm->config) + prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config; ++ else ++ prepare_slave_config = pcm->config->prepare_slave_config; + + if (prepare_slave_config) { + int ret = prepare_slave_config(substream, params, &slave_config); +diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c +index a0ca58ba16273..58347eadd219b 100644 +--- a/sound/soc/soc-ops.c ++++ b/sound/soc/soc-ops.c +@@ -461,7 +461,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol, + ret = err; + } + } +- return err; ++ return ret; + } + EXPORT_SYMBOL_GPL(snd_soc_put_volsw_sx); + +diff --git a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh +index eaf8a04a7ca5f..10e54bcca7a93 100755 +--- a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh ++++ b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh +@@ -190,7 +190,7 @@ setup_prepare() + + tc filter add dev $eth0 ingress chain $(IS2 0 0) pref 1 \ + protocol ipv4 flower skip_sw ip_proto udp dst_port 5201 \ +- action police rate 50mbit burst 64k \ ++ action police rate 50mbit burst 64k conform-exceed drop/pipe \ + action goto chain $(IS2 1 0) + } + +diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h +index 8a470da7b71af..15a2875698b53 100644 +--- a/tools/testing/selftests/kvm/include/x86_64/processor.h ++++ b/tools/testing/selftests/kvm/include/x86_64/processor.h +@@ -60,6 +60,21 @@ + /* CPUID.0x8000_0001.EDX */ + #define CPUID_GBPAGES (1ul << 26) + ++/* Page table bitfield declarations */ ++#define PTE_PRESENT_MASK BIT_ULL(0) ++#define PTE_WRITABLE_MASK BIT_ULL(1) ++#define PTE_USER_MASK BIT_ULL(2) ++#define PTE_ACCESSED_MASK BIT_ULL(5) ++#define PTE_DIRTY_MASK BIT_ULL(6) ++#define PTE_LARGE_MASK BIT_ULL(7) ++#define PTE_GLOBAL_MASK BIT_ULL(8) ++#define PTE_NX_MASK BIT_ULL(63) ++ ++#define PAGE_SHIFT 12 ++ ++#define PHYSICAL_PAGE_MASK GENMASK_ULL(51, 12) ++#define PTE_GET_PFN(pte) (((pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) ++ + /* General Registers in 64-Bit Mode */ + struct gpr64_regs { + u64 rax; +diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c +index ba1fdc3dcf4a9..2c4a7563a4f8a 100644 +--- a/tools/testing/selftests/kvm/kvm_page_table_test.c ++++ b/tools/testing/selftests/kvm/kvm_page_table_test.c +@@ -278,7 +278,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg) + else + guest_test_phys_mem = p->phys_offset; + #ifdef __s390x__ +- alignment = max(0x100000, alignment); ++ alignment = max(0x100000UL, alignment); + #endif + guest_test_phys_mem = align_down(guest_test_phys_mem, alignment); + +diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c +index 9f000dfb55949..0dd442c260159 100644 +--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c ++++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c +@@ -19,38 +19,6 @@ + + vm_vaddr_t exception_handlers; + +-/* Virtual translation table structure declarations */ +-struct pageUpperEntry { +- uint64_t present:1; +- uint64_t writable:1; +- uint64_t user:1; +- uint64_t write_through:1; +- uint64_t cache_disable:1; +- uint64_t accessed:1; +- uint64_t ignored_06:1; +- uint64_t page_size:1; +- uint64_t ignored_11_08:4; +- uint64_t pfn:40; +- uint64_t ignored_62_52:11; +- uint64_t execute_disable:1; +-}; +- +-struct pageTableEntry { +- uint64_t present:1; +- uint64_t writable:1; +- uint64_t user:1; +- uint64_t write_through:1; +- uint64_t cache_disable:1; +- uint64_t accessed:1; +- uint64_t dirty:1; +- uint64_t reserved_07:1; +- uint64_t global:1; +- uint64_t ignored_11_09:3; +- uint64_t pfn:40; +- uint64_t ignored_62_52:11; +- uint64_t execute_disable:1; +-}; +- + void regs_dump(FILE *stream, struct kvm_regs *regs, + uint8_t indent) + { +@@ -195,23 +163,21 @@ static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_pfn, uint64_t vaddr, + return &page_table[index]; + } + +-static struct pageUpperEntry *virt_create_upper_pte(struct kvm_vm *vm, +- uint64_t pt_pfn, +- uint64_t vaddr, +- uint64_t paddr, +- int level, +- enum x86_page_size page_size) ++static uint64_t *virt_create_upper_pte(struct kvm_vm *vm, ++ uint64_t pt_pfn, ++ uint64_t vaddr, ++ uint64_t paddr, ++ int level, ++ enum x86_page_size page_size) + { +- struct pageUpperEntry *pte = virt_get_pte(vm, pt_pfn, vaddr, level); +- +- if (!pte->present) { +- pte->writable = true; +- pte->present = true; +- pte->page_size = (level == page_size); +- if (pte->page_size) +- pte->pfn = paddr >> vm->page_shift; ++ uint64_t *pte = virt_get_pte(vm, pt_pfn, vaddr, level); ++ ++ if (!(*pte & PTE_PRESENT_MASK)) { ++ *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK; ++ if (level == page_size) ++ *pte |= PTE_LARGE_MASK | (paddr & PHYSICAL_PAGE_MASK); + else +- pte->pfn = vm_alloc_page_table(vm) >> vm->page_shift; ++ *pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK; + } else { + /* + * Entry already present. Assert that the caller doesn't want +@@ -221,7 +187,7 @@ static struct pageUpperEntry *virt_create_upper_pte(struct kvm_vm *vm, + TEST_ASSERT(level != page_size, + "Cannot create hugepage at level: %u, vaddr: 0x%lx\n", + page_size, vaddr); +- TEST_ASSERT(!pte->page_size, ++ TEST_ASSERT(!(*pte & PTE_LARGE_MASK), + "Cannot create page table at level: %u, vaddr: 0x%lx\n", + level, vaddr); + } +@@ -232,8 +198,8 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, + enum x86_page_size page_size) + { + const uint64_t pg_size = 1ull << ((page_size * 9) + 12); +- struct pageUpperEntry *pml4e, *pdpe, *pde; +- struct pageTableEntry *pte; ++ uint64_t *pml4e, *pdpe, *pde; ++ uint64_t *pte; + + TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, + "Unknown or unsupported guest mode, mode: 0x%x", vm->mode); +@@ -257,24 +223,22 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, + */ + pml4e = virt_create_upper_pte(vm, vm->pgd >> vm->page_shift, + vaddr, paddr, 3, page_size); +- if (pml4e->page_size) ++ if (*pml4e & PTE_LARGE_MASK) + return; + +- pdpe = virt_create_upper_pte(vm, pml4e->pfn, vaddr, paddr, 2, page_size); +- if (pdpe->page_size) ++ pdpe = virt_create_upper_pte(vm, PTE_GET_PFN(*pml4e), vaddr, paddr, 2, page_size); ++ if (*pdpe & PTE_LARGE_MASK) + return; + +- pde = virt_create_upper_pte(vm, pdpe->pfn, vaddr, paddr, 1, page_size); +- if (pde->page_size) ++ pde = virt_create_upper_pte(vm, PTE_GET_PFN(*pdpe), vaddr, paddr, 1, page_size); ++ if (*pde & PTE_LARGE_MASK) + return; + + /* Fill in page table entry. */ +- pte = virt_get_pte(vm, pde->pfn, vaddr, 0); +- TEST_ASSERT(!pte->present, ++ pte = virt_get_pte(vm, PTE_GET_PFN(*pde), vaddr, 0); ++ TEST_ASSERT(!(*pte & PTE_PRESENT_MASK), + "PTE already present for 4k page at vaddr: 0x%lx\n", vaddr); +- pte->pfn = paddr >> vm->page_shift; +- pte->writable = true; +- pte->present = 1; ++ *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK); + } + + void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) +@@ -282,12 +246,12 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) + __virt_pg_map(vm, vaddr, paddr, X86_PAGE_SIZE_4K); + } + +-static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, ++static uint64_t *_vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, + uint64_t vaddr) + { + uint16_t index[4]; +- struct pageUpperEntry *pml4e, *pdpe, *pde; +- struct pageTableEntry *pte; ++ uint64_t *pml4e, *pdpe, *pde; ++ uint64_t *pte; + struct kvm_cpuid_entry2 *entry; + struct kvm_sregs sregs; + int max_phy_addr; +@@ -329,30 +293,29 @@ static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vc + index[3] = (vaddr >> 39) & 0x1ffu; + + pml4e = addr_gpa2hva(vm, vm->pgd); +- TEST_ASSERT(pml4e[index[3]].present, ++ TEST_ASSERT(pml4e[index[3]] & PTE_PRESENT_MASK, + "Expected pml4e to be present for gva: 0x%08lx", vaddr); +- TEST_ASSERT((*(uint64_t*)(&pml4e[index[3]]) & +- (rsvd_mask | (1ull << 7))) == 0, ++ TEST_ASSERT((pml4e[index[3]] & (rsvd_mask | PTE_LARGE_MASK)) == 0, + "Unexpected reserved bits set."); + +- pdpe = addr_gpa2hva(vm, pml4e[index[3]].pfn * vm->page_size); +- TEST_ASSERT(pdpe[index[2]].present, ++ pdpe = addr_gpa2hva(vm, PTE_GET_PFN(pml4e[index[3]]) * vm->page_size); ++ TEST_ASSERT(pdpe[index[2]] & PTE_PRESENT_MASK, + "Expected pdpe to be present for gva: 0x%08lx", vaddr); +- TEST_ASSERT(pdpe[index[2]].page_size == 0, ++ TEST_ASSERT(!(pdpe[index[2]] & PTE_LARGE_MASK), + "Expected pdpe to map a pde not a 1-GByte page."); +- TEST_ASSERT((*(uint64_t*)(&pdpe[index[2]]) & rsvd_mask) == 0, ++ TEST_ASSERT((pdpe[index[2]] & rsvd_mask) == 0, + "Unexpected reserved bits set."); + +- pde = addr_gpa2hva(vm, pdpe[index[2]].pfn * vm->page_size); +- TEST_ASSERT(pde[index[1]].present, ++ pde = addr_gpa2hva(vm, PTE_GET_PFN(pdpe[index[2]]) * vm->page_size); ++ TEST_ASSERT(pde[index[1]] & PTE_PRESENT_MASK, + "Expected pde to be present for gva: 0x%08lx", vaddr); +- TEST_ASSERT(pde[index[1]].page_size == 0, ++ TEST_ASSERT(!(pde[index[1]] & PTE_LARGE_MASK), + "Expected pde to map a pte not a 2-MByte page."); +- TEST_ASSERT((*(uint64_t*)(&pde[index[1]]) & rsvd_mask) == 0, ++ TEST_ASSERT((pde[index[1]] & rsvd_mask) == 0, + "Unexpected reserved bits set."); + +- pte = addr_gpa2hva(vm, pde[index[1]].pfn * vm->page_size); +- TEST_ASSERT(pte[index[0]].present, ++ pte = addr_gpa2hva(vm, PTE_GET_PFN(pde[index[1]]) * vm->page_size); ++ TEST_ASSERT(pte[index[0]] & PTE_PRESENT_MASK, + "Expected pte to be present for gva: 0x%08lx", vaddr); + + return &pte[index[0]]; +@@ -360,7 +323,7 @@ static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vc + + uint64_t vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr) + { +- struct pageTableEntry *pte = _vm_get_page_table_entry(vm, vcpuid, vaddr); ++ uint64_t *pte = _vm_get_page_table_entry(vm, vcpuid, vaddr); + + return *(uint64_t *)pte; + } +@@ -368,18 +331,17 @@ uint64_t vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr) + void vm_set_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr, + uint64_t pte) + { +- struct pageTableEntry *new_pte = _vm_get_page_table_entry(vm, vcpuid, +- vaddr); ++ uint64_t *new_pte = _vm_get_page_table_entry(vm, vcpuid, vaddr); + + *(uint64_t *)new_pte = pte; + } + + void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) + { +- struct pageUpperEntry *pml4e, *pml4e_start; +- struct pageUpperEntry *pdpe, *pdpe_start; +- struct pageUpperEntry *pde, *pde_start; +- struct pageTableEntry *pte, *pte_start; ++ uint64_t *pml4e, *pml4e_start; ++ uint64_t *pdpe, *pdpe_start; ++ uint64_t *pde, *pde_start; ++ uint64_t *pte, *pte_start; + + if (!vm->pgd_created) + return; +@@ -389,58 +351,58 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) + fprintf(stream, "%*s index hvaddr gpaddr " + "addr w exec dirty\n", + indent, ""); +- pml4e_start = (struct pageUpperEntry *) addr_gpa2hva(vm, vm->pgd); ++ pml4e_start = (uint64_t *) addr_gpa2hva(vm, vm->pgd); + for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) { + pml4e = &pml4e_start[n1]; +- if (!pml4e->present) ++ if (!(*pml4e & PTE_PRESENT_MASK)) + continue; +- fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10lx %u " ++ fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10llx %u " + " %u\n", + indent, "", + pml4e - pml4e_start, pml4e, +- addr_hva2gpa(vm, pml4e), (uint64_t) pml4e->pfn, +- pml4e->writable, pml4e->execute_disable); ++ addr_hva2gpa(vm, pml4e), PTE_GET_PFN(*pml4e), ++ !!(*pml4e & PTE_WRITABLE_MASK), !!(*pml4e & PTE_NX_MASK)); + +- pdpe_start = addr_gpa2hva(vm, pml4e->pfn * vm->page_size); ++ pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK); + for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) { + pdpe = &pdpe_start[n2]; +- if (!pdpe->present) ++ if (!(*pdpe & PTE_PRESENT_MASK)) + continue; +- fprintf(stream, "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10lx " ++ fprintf(stream, "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10llx " + "%u %u\n", + indent, "", + pdpe - pdpe_start, pdpe, + addr_hva2gpa(vm, pdpe), +- (uint64_t) pdpe->pfn, pdpe->writable, +- pdpe->execute_disable); ++ PTE_GET_PFN(*pdpe), !!(*pdpe & PTE_WRITABLE_MASK), ++ !!(*pdpe & PTE_NX_MASK)); + +- pde_start = addr_gpa2hva(vm, pdpe->pfn * vm->page_size); ++ pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK); + for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) { + pde = &pde_start[n3]; +- if (!pde->present) ++ if (!(*pde & PTE_PRESENT_MASK)) + continue; + fprintf(stream, "%*spde 0x%-3zx %p " +- "0x%-12lx 0x%-10lx %u %u\n", ++ "0x%-12lx 0x%-10llx %u %u\n", + indent, "", pde - pde_start, pde, + addr_hva2gpa(vm, pde), +- (uint64_t) pde->pfn, pde->writable, +- pde->execute_disable); ++ PTE_GET_PFN(*pde), !!(*pde & PTE_WRITABLE_MASK), ++ !!(*pde & PTE_NX_MASK)); + +- pte_start = addr_gpa2hva(vm, pde->pfn * vm->page_size); ++ pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK); + for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) { + pte = &pte_start[n4]; +- if (!pte->present) ++ if (!(*pte & PTE_PRESENT_MASK)) + continue; + fprintf(stream, "%*spte 0x%-3zx %p " +- "0x%-12lx 0x%-10lx %u %u " ++ "0x%-12lx 0x%-10llx %u %u " + " %u 0x%-10lx\n", + indent, "", + pte - pte_start, pte, + addr_hva2gpa(vm, pte), +- (uint64_t) pte->pfn, +- pte->writable, +- pte->execute_disable, +- pte->dirty, ++ PTE_GET_PFN(*pte), ++ !!(*pte & PTE_WRITABLE_MASK), ++ !!(*pte & PTE_NX_MASK), ++ !!(*pte & PTE_DIRTY_MASK), + ((uint64_t) n1 << 27) + | ((uint64_t) n2 << 18) + | ((uint64_t) n3 << 9) +@@ -558,8 +520,8 @@ static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector, + vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) + { + uint16_t index[4]; +- struct pageUpperEntry *pml4e, *pdpe, *pde; +- struct pageTableEntry *pte; ++ uint64_t *pml4e, *pdpe, *pde; ++ uint64_t *pte; + + TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " + "unknown or unsupported guest mode, mode: 0x%x", vm->mode); +@@ -572,22 +534,22 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) + if (!vm->pgd_created) + goto unmapped_gva; + pml4e = addr_gpa2hva(vm, vm->pgd); +- if (!pml4e[index[3]].present) ++ if (!(pml4e[index[3]] & PTE_PRESENT_MASK)) + goto unmapped_gva; + +- pdpe = addr_gpa2hva(vm, pml4e[index[3]].pfn * vm->page_size); +- if (!pdpe[index[2]].present) ++ pdpe = addr_gpa2hva(vm, PTE_GET_PFN(pml4e[index[3]]) * vm->page_size); ++ if (!(pdpe[index[2]] & PTE_PRESENT_MASK)) + goto unmapped_gva; + +- pde = addr_gpa2hva(vm, pdpe[index[2]].pfn * vm->page_size); +- if (!pde[index[1]].present) ++ pde = addr_gpa2hva(vm, PTE_GET_PFN(pdpe[index[2]]) * vm->page_size); ++ if (!(pde[index[1]] & PTE_PRESENT_MASK)) + goto unmapped_gva; + +- pte = addr_gpa2hva(vm, pde[index[1]].pfn * vm->page_size); +- if (!pte[index[0]].present) ++ pte = addr_gpa2hva(vm, PTE_GET_PFN(pde[index[1]]) * vm->page_size); ++ if (!(pte[index[0]] & PTE_PRESENT_MASK)) + goto unmapped_gva; + +- return (pte[index[0]].pfn * vm->page_size) + (gva & 0xfffu); ++ return (PTE_GET_PFN(pte[index[0]]) * vm->page_size) + (gva & 0xfffu); + + unmapped_gva: + TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva); +diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh +index a3402cd8d5b68..9ff22f28032dd 100755 +--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh ++++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh +@@ -61,9 +61,12 @@ setup_prepare() + + vrf_prepare + mirror_gre_topo_create ++ # Avoid changing br1's PVID while it is operational as a L3 interface. ++ ip link set dev br1 down + + ip link set dev $swp3 master br1 + bridge vlan add dev br1 vid 555 pvid untagged self ++ ip link set dev br1 up + ip address add dev br1 192.0.2.129/28 + ip address add dev br1 2001:db8:2::1/64 + +diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c +index 59067f64b7753..2672ac0b6d1f3 100644 +--- a/tools/testing/selftests/net/so_txtime.c ++++ b/tools/testing/selftests/net/so_txtime.c +@@ -421,7 +421,7 @@ static void usage(const char *progname) + "Options:\n" + " -4 only IPv4\n" + " -6 only IPv6\n" +- " -c monotonic (default) or tai\n" ++ " -c monotonic or tai (default)\n" + " -D destination IP address (server)\n" + " -S source IP address (client)\n" + " -r run rx mode\n" +@@ -475,7 +475,7 @@ static void parse_opts(int argc, char **argv) + cfg_rx = true; + break; + case 't': +- cfg_start_time_ns = strtol(optarg, NULL, 0); ++ cfg_start_time_ns = strtoll(optarg, NULL, 0); + break; + case 'm': + cfg_mark = strtol(optarg, NULL, 0); +diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c +index 9d126d7fabdb7..313bb0cbfb1eb 100644 +--- a/tools/testing/selftests/seccomp/seccomp_bpf.c ++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c +@@ -955,7 +955,7 @@ TEST(ERRNO_valid) + ASSERT_EQ(0, ret); + + EXPECT_EQ(parent, syscall(__NR_getppid)); +- EXPECT_EQ(-1, read(0, NULL, 0)); ++ EXPECT_EQ(-1, read(-1, NULL, 0)); + EXPECT_EQ(E2BIG, errno); + } + +@@ -974,7 +974,7 @@ TEST(ERRNO_zero) + + EXPECT_EQ(parent, syscall(__NR_getppid)); + /* "errno" of 0 is ok. */ +- EXPECT_EQ(0, read(0, NULL, 0)); ++ EXPECT_EQ(0, read(-1, NULL, 0)); + } + + /* +@@ -995,7 +995,7 @@ TEST(ERRNO_capped) + ASSERT_EQ(0, ret); + + EXPECT_EQ(parent, syscall(__NR_getppid)); +- EXPECT_EQ(-1, read(0, NULL, 0)); ++ EXPECT_EQ(-1, read(-1, NULL, 0)); + EXPECT_EQ(4095, errno); + } + +@@ -1026,7 +1026,7 @@ TEST(ERRNO_order) + ASSERT_EQ(0, ret); + + EXPECT_EQ(parent, syscall(__NR_getppid)); +- EXPECT_EQ(-1, read(0, NULL, 0)); ++ EXPECT_EQ(-1, read(-1, NULL, 0)); + EXPECT_EQ(12, errno); + } + +@@ -2623,7 +2623,7 @@ void *tsync_sibling(void *data) + ret = prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0); + if (!ret) + return (void *)SIBLING_EXIT_NEWPRIVS; +- read(0, NULL, 0); ++ read(-1, NULL, 0); + return (void *)SIBLING_EXIT_UNKILLED; + } + +diff --git a/tools/testing/selftests/vm/mremap_test.c b/tools/testing/selftests/vm/mremap_test.c +index 58775dab3cc6c..5ef41640d657a 100644 +--- a/tools/testing/selftests/vm/mremap_test.c ++++ b/tools/testing/selftests/vm/mremap_test.c +@@ -118,6 +118,59 @@ static unsigned long long get_mmap_min_addr(void) + return addr; + } + ++/* ++ * Returns false if the requested remap region overlaps with an ++ * existing mapping (e.g text, stack) else returns true. ++ */ ++static bool is_remap_region_valid(void *addr, unsigned long long size) ++{ ++ void *remap_addr = NULL; ++ bool ret = true; ++ ++ /* Use MAP_FIXED_NOREPLACE flag to ensure region is not mapped */ ++ remap_addr = mmap(addr, size, PROT_READ | PROT_WRITE, ++ MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED, ++ -1, 0); ++ ++ if (remap_addr == MAP_FAILED) { ++ if (errno == EEXIST) ++ ret = false; ++ } else { ++ munmap(remap_addr, size); ++ } ++ ++ return ret; ++} ++ ++/* Returns mmap_min_addr sysctl tunable from procfs */ ++static unsigned long long get_mmap_min_addr(void) ++{ ++ FILE *fp; ++ int n_matched; ++ static unsigned long long addr; ++ ++ if (addr) ++ return addr; ++ ++ fp = fopen("/proc/sys/vm/mmap_min_addr", "r"); ++ if (fp == NULL) { ++ ksft_print_msg("Failed to open /proc/sys/vm/mmap_min_addr: %s\n", ++ strerror(errno)); ++ exit(KSFT_SKIP); ++ } ++ ++ n_matched = fscanf(fp, "%llu", &addr); ++ if (n_matched != 1) { ++ ksft_print_msg("Failed to read /proc/sys/vm/mmap_min_addr: %s\n", ++ strerror(errno)); ++ fclose(fp); ++ exit(KSFT_SKIP); ++ } ++ ++ fclose(fp); ++ return addr; ++} ++ + /* + * Returns the start address of the mapping on success, else returns + * NULL on failure.