From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Thu, 3 Sep 2020 11:38:02 +0000 (UTC) [thread overview]
Message-ID: <1599133070.d2252824fd2ddee6c3e9a743f933d7cdcc73a6c6.mpagano@gentoo> (raw)
commit: d2252824fd2ddee6c3e9a743f933d7cdcc73a6c6
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Sep 3 11:37:50 2020 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Sep 3 11:37:50 2020 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d2252824
Linux patch 5.4.62
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +
1061_linux-5.4.62.patch | 8044 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 8048 insertions(+)
diff --git a/0000_README b/0000_README
index fca4b03..edfdae1 100644
--- a/0000_README
+++ b/0000_README
@@ -287,6 +287,10 @@ Patch: 1060_linux-5.4.61.patch
From: http://www.kernel.org
Desc: Linux 5.4.61
+Patch: 1061_linux-5.4.62.patch
+From: http://www.kernel.org
+Desc: Linux 5.4.62
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1061_linux-5.4.62.patch b/1061_linux-5.4.62.patch
new file mode 100644
index 0000000..b9353d0
--- /dev/null
+++ b/1061_linux-5.4.62.patch
@@ -0,0 +1,8044 @@
+diff --git a/Makefile b/Makefile
+index 2c21b922644d7..aece56450bd9d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 61
++SUBLEVEL = 62
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+
+@@ -442,6 +442,12 @@ PYTHON = python
+ PYTHON3 = python3
+ CHECK = sparse
+ BASH = bash
++KGZIP = gzip
++KBZIP2 = bzip2
++KLZOP = lzop
++LZMA = lzma
++LZ4 = lz4c
++XZ = xz
+
+ CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
+ -Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF)
+@@ -490,6 +496,7 @@ CLANG_FLAGS :=
+ export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC
+ export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE LEX YACC AWK INSTALLKERNEL
+ export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
++export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ
+ export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
+
+ export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
+@@ -997,10 +1004,10 @@ export mod_strip_cmd
+ mod_compress_cmd = true
+ ifdef CONFIG_MODULE_COMPRESS
+ ifdef CONFIG_MODULE_COMPRESS_GZIP
+- mod_compress_cmd = gzip -n -f
++ mod_compress_cmd = $(KGZIP) -n -f
+ endif # CONFIG_MODULE_COMPRESS_GZIP
+ ifdef CONFIG_MODULE_COMPRESS_XZ
+- mod_compress_cmd = xz -f
++ mod_compress_cmd = $(XZ) -f
+ endif # CONFIG_MODULE_COMPRESS_XZ
+ endif # CONFIG_MODULE_COMPRESS
+ export mod_compress_cmd
+diff --git a/arch/arm/boot/deflate_xip_data.sh b/arch/arm/boot/deflate_xip_data.sh
+index 40937248cebe3..304495c3c2c5d 100755
+--- a/arch/arm/boot/deflate_xip_data.sh
++++ b/arch/arm/boot/deflate_xip_data.sh
+@@ -56,7 +56,7 @@ trap 'rm -f "$XIPIMAGE.tmp"; exit 1' 1 2 3
+ # substitute the data section by a compressed version
+ $DD if="$XIPIMAGE" count=$data_start iflag=count_bytes of="$XIPIMAGE.tmp"
+ $DD if="$XIPIMAGE" skip=$data_start iflag=skip_bytes |
+-gzip -9 >> "$XIPIMAGE.tmp"
++$KGZIP -9 >> "$XIPIMAGE.tmp"
+
+ # replace kernel binary
+ mv -f "$XIPIMAGE.tmp" "$XIPIMAGE"
+diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
+index 63d9f4a066e38..5a8e58b663420 100644
+--- a/arch/arm/boot/dts/ls1021a.dtsi
++++ b/arch/arm/boot/dts/ls1021a.dtsi
+@@ -753,7 +753,7 @@
+ fsl,tmr-prsc = <2>;
+ fsl,tmr-add = <0xaaaaaaab>;
+ fsl,tmr-fiper1 = <999999995>;
+- fsl,tmr-fiper2 = <99990>;
++ fsl,tmr-fiper2 = <999999995>;
+ fsl,max-adj = <499999999>;
+ fsl,extts-fifo;
+ };
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index 11a7d6208087f..96abe558aea8b 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -146,7 +146,8 @@ zinstall install:
+ PHONY += vdso_install
+ vdso_install:
+ $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
+- $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@
++ $(if $(CONFIG_COMPAT_VDSO), \
++ $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@)
+
+ # We use MRPROPER_FILES and CLEAN_FILES now
+ archclean:
+diff --git a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
+index 1235830ffd0b7..38c0d74767e3f 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
+@@ -521,7 +521,7 @@
+ pins = "gpio63", "gpio64", "gpio65", "gpio66",
+ "gpio67", "gpio68";
+ drive-strength = <2>;
+- bias-disable;
++ bias-pull-down;
+ };
+ };
+ };
+diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
+index a0c8a0b652593..0eadbf933e359 100644
+--- a/arch/arm64/include/asm/smp.h
++++ b/arch/arm64/include/asm/smp.h
+@@ -46,7 +46,12 @@ DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
+ * Logical CPU mapping.
+ */
+ extern u64 __cpu_logical_map[NR_CPUS];
+-#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
++extern u64 cpu_logical_map(int cpu);
++
++static inline void set_cpu_logical_map(int cpu, u64 hwid)
++{
++ __cpu_logical_map[cpu] = hwid;
++}
+
+ struct seq_file;
+
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index 0b2830379fe03..51462c59ab5da 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -917,6 +917,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
+ .desc = "ARM erratum 1418040",
+ .capability = ARM64_WORKAROUND_1418040,
+ ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
++ .type = (ARM64_CPUCAP_SCOPE_LOCAL_CPU |
++ ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU),
+ },
+ #endif
+ #ifdef CONFIG_ARM64_ERRATUM_1165522
+diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
+index fab013c5ee8c9..10190c4b16dc4 100644
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -498,6 +498,39 @@ static void entry_task_switch(struct task_struct *next)
+ __this_cpu_write(__entry_task, next);
+ }
+
++/*
++ * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
++ * Assuming the virtual counter is enabled at the beginning of times:
++ *
++ * - disable access when switching from a 64bit task to a 32bit task
++ * - enable access when switching from a 32bit task to a 64bit task
++ */
++static void erratum_1418040_thread_switch(struct task_struct *prev,
++ struct task_struct *next)
++{
++ bool prev32, next32;
++ u64 val;
++
++ if (!(IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) &&
++ cpus_have_const_cap(ARM64_WORKAROUND_1418040)))
++ return;
++
++ prev32 = is_compat_thread(task_thread_info(prev));
++ next32 = is_compat_thread(task_thread_info(next));
++
++ if (prev32 == next32)
++ return;
++
++ val = read_sysreg(cntkctl_el1);
++
++ if (!next32)
++ val |= ARCH_TIMER_USR_VCT_ACCESS_EN;
++ else
++ val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
++
++ write_sysreg(val, cntkctl_el1);
++}
++
+ /*
+ * Thread switching.
+ */
+@@ -514,6 +547,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
+ uao_thread_switch(next);
+ ptrauth_thread_switch(next);
+ ssbs_thread_switch(next);
++ erratum_1418040_thread_switch(prev, next);
+
+ /*
+ * Complete any pending TLB or cache maintenance on this CPU in case
+diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
+index 56f6645617548..d98987b82874f 100644
+--- a/arch/arm64/kernel/setup.c
++++ b/arch/arm64/kernel/setup.c
+@@ -85,7 +85,7 @@ u64 __cacheline_aligned boot_args[4];
+ void __init smp_setup_processor_id(void)
+ {
+ u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
+- cpu_logical_map(0) = mpidr;
++ set_cpu_logical_map(0, mpidr);
+
+ /*
+ * clear __my_cpu_offset on boot CPU to avoid hang caused by
+@@ -276,6 +276,12 @@ arch_initcall(reserve_memblock_reserved_regions);
+
+ u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
+
++u64 cpu_logical_map(int cpu)
++{
++ return __cpu_logical_map[cpu];
++}
++EXPORT_SYMBOL_GPL(cpu_logical_map);
++
+ void __init setup_arch(char **cmdline_p)
+ {
+ init_mm.start_code = (unsigned long) _text;
+diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
+index 993a4aedfd377..102dc3e7f2e1d 100644
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -549,7 +549,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
+ return;
+
+ /* map the logical cpu id to cpu MPIDR */
+- cpu_logical_map(cpu_count) = hwid;
++ set_cpu_logical_map(cpu_count, hwid);
+
+ cpu_madt_gicc[cpu_count] = *processor;
+
+@@ -663,7 +663,7 @@ static void __init of_parse_and_init_cpus(void)
+ goto next;
+
+ pr_debug("cpu logical map 0x%llx\n", hwid);
+- cpu_logical_map(cpu_count) = hwid;
++ set_cpu_logical_map(cpu_count, hwid);
+
+ early_map_cpu_to_node(cpu_count, of_node_to_nid(dn));
+ next:
+@@ -704,7 +704,7 @@ void __init smp_init_cpus(void)
+ for (i = 1; i < nr_cpu_ids; i++) {
+ if (cpu_logical_map(i) != INVALID_HWID) {
+ if (smp_cpu_setup(i))
+- cpu_logical_map(i) = INVALID_HWID;
++ set_cpu_logical_map(i, INVALID_HWID);
+ }
+ }
+ }
+diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
+index d76a3d39b2699..6f4838b475d0d 100644
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -754,7 +754,7 @@ static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
+ * making sure it is a kernel address and not a PC-relative
+ * reference.
+ */
+- asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));
++ asm volatile("ldr %0, =%1" : "=r" (str_va) : "S" (__hyp_panic_string));
+
+ __hyp_do_panic(str_va,
+ spsr, elr,
+diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
+index 32240000dc0c8..2876a7df1b0a1 100644
+--- a/arch/ia64/Makefile
++++ b/arch/ia64/Makefile
+@@ -40,7 +40,7 @@ $(error Sorry, you need a newer version of the assember, one that is built from
+ endif
+
+ quiet_cmd_gzip = GZIP $@
+-cmd_gzip = cat $(real-prereqs) | gzip -n -f -9 > $@
++cmd_gzip = cat $(real-prereqs) | $(KGZIP) -n -f -9 > $@
+
+ quiet_cmd_objcopy = OBJCOPY $@
+ cmd_objcopy = $(OBJCOPY) $(OBJCOPYFLAGS) $(OBJCOPYFLAGS_$(@F)) $< $@
+diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
+index 5d92883840969..0415d28dbe4fc 100644
+--- a/arch/m68k/Makefile
++++ b/arch/m68k/Makefile
+@@ -135,10 +135,10 @@ vmlinux.gz: vmlinux
+ ifndef CONFIG_KGDB
+ cp vmlinux vmlinux.tmp
+ $(STRIP) vmlinux.tmp
+- gzip -9c vmlinux.tmp >vmlinux.gz
++ $(KGZIP) -9c vmlinux.tmp >vmlinux.gz
+ rm vmlinux.tmp
+ else
+- gzip -9c vmlinux >vmlinux.gz
++ $(KGZIP) -9c vmlinux >vmlinux.gz
+ endif
+
+ bzImage: vmlinux.bz2
+@@ -148,10 +148,10 @@ vmlinux.bz2: vmlinux
+ ifndef CONFIG_KGDB
+ cp vmlinux vmlinux.tmp
+ $(STRIP) vmlinux.tmp
+- bzip2 -1c vmlinux.tmp >vmlinux.bz2
++ $(KBZIP2) -1c vmlinux.tmp >vmlinux.bz2
+ rm vmlinux.tmp
+ else
+- bzip2 -1c vmlinux >vmlinux.bz2
++ $(KBZIP2) -1c vmlinux >vmlinux.bz2
+ endif
+
+ archclean:
+diff --git a/arch/mips/vdso/genvdso.c b/arch/mips/vdso/genvdso.c
+index b66b6b1c4aeb9..8f581a2c8578b 100644
+--- a/arch/mips/vdso/genvdso.c
++++ b/arch/mips/vdso/genvdso.c
+@@ -122,6 +122,7 @@ static void *map_vdso(const char *path, size_t *_size)
+ if (fstat(fd, &stat) != 0) {
+ fprintf(stderr, "%s: Failed to stat '%s': %s\n", program_name,
+ path, strerror(errno));
++ close(fd);
+ return NULL;
+ }
+
+@@ -130,6 +131,7 @@ static void *map_vdso(const char *path, size_t *_size)
+ if (addr == MAP_FAILED) {
+ fprintf(stderr, "%s: Failed to map '%s': %s\n", program_name,
+ path, strerror(errno));
++ close(fd);
+ return NULL;
+ }
+
+@@ -139,6 +141,7 @@ static void *map_vdso(const char *path, size_t *_size)
+ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) {
+ fprintf(stderr, "%s: '%s' is not an ELF file\n", program_name,
+ path);
++ close(fd);
+ return NULL;
+ }
+
+@@ -150,6 +153,7 @@ static void *map_vdso(const char *path, size_t *_size)
+ default:
+ fprintf(stderr, "%s: '%s' has invalid ELF class\n",
+ program_name, path);
++ close(fd);
+ return NULL;
+ }
+
+@@ -161,6 +165,7 @@ static void *map_vdso(const char *path, size_t *_size)
+ default:
+ fprintf(stderr, "%s: '%s' has invalid ELF data order\n",
+ program_name, path);
++ close(fd);
+ return NULL;
+ }
+
+@@ -168,15 +173,18 @@ static void *map_vdso(const char *path, size_t *_size)
+ fprintf(stderr,
+ "%s: '%s' has invalid ELF machine (expected EM_MIPS)\n",
+ program_name, path);
++ close(fd);
+ return NULL;
+ } else if (swap_uint16(ehdr->e_type) != ET_DYN) {
+ fprintf(stderr,
+ "%s: '%s' has invalid ELF type (expected ET_DYN)\n",
+ program_name, path);
++ close(fd);
+ return NULL;
+ }
+
+ *_size = stat.st_size;
++ close(fd);
+ return addr;
+ }
+
+@@ -280,10 +288,12 @@ int main(int argc, char **argv)
+ /* Calculate and write symbol offsets to <output file> */
+ if (!get_symbols(dbg_vdso_path, dbg_vdso)) {
+ unlink(out_path);
++ fclose(out_file);
+ return EXIT_FAILURE;
+ }
+
+ fprintf(out_file, "};\n");
++ fclose(out_file);
+
+ return EXIT_SUCCESS;
+ }
+diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
+index 36b834f1c9330..53f974817affb 100644
+--- a/arch/parisc/Makefile
++++ b/arch/parisc/Makefile
+@@ -156,7 +156,7 @@ vmlinuz: bzImage
+ $(OBJCOPY) $(boot)/bzImage $@
+ else
+ vmlinuz: vmlinux
+- @gzip -cf -9 $< > $@
++ @$(KGZIP) -cf -9 $< > $@
+ endif
+
+ install:
+diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
+index a460298c7ddb4..f91ecb10d0ae7 100644
+--- a/arch/powerpc/kernel/cpu_setup_power.S
++++ b/arch/powerpc/kernel/cpu_setup_power.S
+@@ -184,7 +184,7 @@ __init_LPCR_ISA300:
+
+ __init_FSCR:
+ mfspr r3,SPRN_FSCR
+- ori r3,r3,FSCR_TAR|FSCR_DSCR|FSCR_EBB
++ ori r3,r3,FSCR_TAR|FSCR_EBB
+ mtspr SPRN_FSCR,r3
+ blr
+
+diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
+index ca92e01d0bd1b..f582aa2d98078 100644
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -1522,9 +1522,16 @@ nocheck:
+ ret = 0;
+ out:
+ if (has_branch_stack(event)) {
+- power_pmu_bhrb_enable(event);
+- cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
+- event->attr.branch_sample_type);
++ u64 bhrb_filter = -1;
++
++ if (ppmu->bhrb_filter_map)
++ bhrb_filter = ppmu->bhrb_filter_map(
++ event->attr.branch_sample_type);
++
++ if (bhrb_filter != -1) {
++ cpuhw->bhrb_filter = bhrb_filter;
++ power_pmu_bhrb_enable(event);
++ }
+ }
+
+ perf_pmu_enable(event->pmu);
+@@ -1846,7 +1853,6 @@ static int power_pmu_event_init(struct perf_event *event)
+ int n;
+ int err;
+ struct cpu_hw_events *cpuhw;
+- u64 bhrb_filter;
+
+ if (!ppmu)
+ return -ENOENT;
+@@ -1952,7 +1958,10 @@ static int power_pmu_event_init(struct perf_event *event)
+ err = power_check_constraints(cpuhw, events, cflags, n + 1);
+
+ if (has_branch_stack(event)) {
+- bhrb_filter = ppmu->bhrb_filter_map(
++ u64 bhrb_filter = -1;
++
++ if (ppmu->bhrb_filter_map)
++ bhrb_filter = ppmu->bhrb_filter_map(
+ event->attr.branch_sample_type);
+
+ if (bhrb_filter == -1) {
+@@ -2106,6 +2115,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
+
+ if (perf_event_overflow(event, &data, regs))
+ power_pmu_stop(event, 0);
++ } else if (period) {
++ /* Account for interrupt in case of invalid SIAR */
++ if (perf_event_account_interrupt(event))
++ power_pmu_stop(event, 0);
+ }
+ }
+
+diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
+index 0f7c8241912b9..f2ff359041eec 100644
+--- a/arch/powerpc/platforms/cell/Kconfig
++++ b/arch/powerpc/platforms/cell/Kconfig
+@@ -44,6 +44,7 @@ config SPU_FS
+ tristate "SPU file system"
+ default m
+ depends on PPC_CELL
++ depends on COREDUMP
+ select SPU_BASE
+ help
+ The SPU file system is used to access Synergistic Processing
+diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
+index 50e1a8e02497d..3fd086533dcfc 100644
+--- a/arch/powerpc/sysdev/xive/native.c
++++ b/arch/powerpc/sysdev/xive/native.c
+@@ -18,6 +18,7 @@
+ #include <linux/delay.h>
+ #include <linux/cpumask.h>
+ #include <linux/mm.h>
++#include <linux/kmemleak.h>
+
+ #include <asm/prom.h>
+ #include <asm/io.h>
+@@ -646,6 +647,7 @@ static bool xive_native_provision_pages(void)
+ pr_err("Failed to allocate provisioning page\n");
+ return false;
+ }
++ kmemleak_ignore(p);
+ opal_xive_donate_page(chip, __pa(p));
+ }
+ return true;
+diff --git a/arch/s390/include/asm/numa.h b/arch/s390/include/asm/numa.h
+index 35f8cbe7e5bb0..c759dcffa9eaf 100644
+--- a/arch/s390/include/asm/numa.h
++++ b/arch/s390/include/asm/numa.h
+@@ -17,7 +17,6 @@
+
+ void numa_setup(void);
+ int numa_pfn_to_nid(unsigned long pfn);
+-int __node_distance(int a, int b);
+ void numa_update_cpu_topology(void);
+
+ extern cpumask_t node_to_cpumask_map[MAX_NUMNODES];
+diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
+index cca406fdbe51f..ef9dd253dfad0 100644
+--- a/arch/s390/include/asm/topology.h
++++ b/arch/s390/include/asm/topology.h
+@@ -83,8 +83,6 @@ static inline const struct cpumask *cpumask_of_node(int node)
+
+ #define pcibus_to_node(bus) __pcibus_to_node(bus)
+
+-#define node_distance(a, b) __node_distance(a, b)
+-
+ #else /* !CONFIG_NUMA */
+
+ #define numa_node_id numa_node_id
+diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
+index d2910fa834c8a..8386c58fdb3a0 100644
+--- a/arch/s390/numa/numa.c
++++ b/arch/s390/numa/numa.c
+@@ -49,12 +49,6 @@ void numa_update_cpu_topology(void)
+ mode->update_cpu_topology();
+ }
+
+-int __node_distance(int a, int b)
+-{
+- return mode->distance ? mode->distance(a, b) : 0;
+-}
+-EXPORT_SYMBOL(__node_distance);
+-
+ int numa_debug_enabled;
+
+ /*
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 9674321ce3a3b..8367bd7a9a810 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -1599,14 +1599,28 @@ int native_cpu_disable(void)
+ if (ret)
+ return ret;
+
+- /*
+- * Disable the local APIC. Otherwise IPI broadcasts will reach
+- * it. It still responds normally to INIT, NMI, SMI, and SIPI
+- * messages.
+- */
+- apic_soft_disable();
+ cpu_disable_common();
+
++ /*
++ * Disable the local APIC. Otherwise IPI broadcasts will reach
++ * it. It still responds normally to INIT, NMI, SMI, and SIPI
++ * messages.
++ *
++ * Disabling the APIC must happen after cpu_disable_common()
++ * which invokes fixup_irqs().
++ *
++ * Disabling the APIC preserves already set bits in IRR, but
++ * an interrupt arriving after disabling the local APIC does not
++ * set the corresponding IRR bit.
++ *
++ * fixup_irqs() scans IRR for set bits so it can raise a not
++ * yet handled interrupt on the new destination CPU via an IPI
++ * but obviously it can't do so for IRR bits which are not set.
++ * IOW, interrupts arriving after disabling the local APIC will
++ * be lost.
++ */
++ apic_soft_disable();
++
+ return 0;
+ }
+
+diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
+index 12b707a4e52fd..342a1cfa48c57 100644
+--- a/block/bfq-cgroup.c
++++ b/block/bfq-cgroup.c
+@@ -332,7 +332,7 @@ static void bfqg_put(struct bfq_group *bfqg)
+ kfree(bfqg);
+ }
+
+-void bfqg_and_blkg_get(struct bfq_group *bfqg)
++static void bfqg_and_blkg_get(struct bfq_group *bfqg)
+ {
+ /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
+ bfqg_get(bfqg);
+diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
+index c0232975075d0..de98fdfe9ea17 100644
+--- a/block/bfq-iosched.h
++++ b/block/bfq-iosched.h
+@@ -980,7 +980,6 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
+ struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
+ struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
+-void bfqg_and_blkg_get(struct bfq_group *bfqg);
+ void bfqg_and_blkg_put(struct bfq_group *bfqg);
+
+ #ifdef CONFIG_BFQ_GROUP_IOSCHED
+diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
+index 44079147e396e..05f0bf4a1144d 100644
+--- a/block/bfq-wf2q.c
++++ b/block/bfq-wf2q.c
+@@ -536,9 +536,7 @@ static void bfq_get_entity(struct bfq_entity *entity)
+ bfqq->ref++;
+ bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
+ bfqq, bfqq->ref);
+- } else
+- bfqg_and_blkg_get(container_of(entity, struct bfq_group,
+- entity));
++ }
+ }
+
+ /**
+@@ -652,14 +650,8 @@ static void bfq_forget_entity(struct bfq_service_tree *st,
+
+ entity->on_st = false;
+ st->wsum -= entity->weight;
+- if (is_in_service)
+- return;
+-
+- if (bfqq)
++ if (bfqq && !is_in_service)
+ bfq_put_queue(bfqq);
+- else
+- bfqg_and_blkg_put(container_of(entity, struct bfq_group,
+- entity));
+ }
+
+ /**
+diff --git a/block/bio.c b/block/bio.c
+index 94d697217887a..87505a93bcff6 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -683,8 +683,8 @@ static inline bool page_is_mergeable(const struct bio_vec *bv,
+ struct page *page, unsigned int len, unsigned int off,
+ bool *same_page)
+ {
+- phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) +
+- bv->bv_offset + bv->bv_len - 1;
++ size_t bv_end = bv->bv_offset + bv->bv_len;
++ phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
+ phys_addr_t page_addr = page_to_phys(page);
+
+ if (vec_end_addr + 1 != page_addr + off)
+@@ -693,9 +693,9 @@ static inline bool page_is_mergeable(const struct bio_vec *bv,
+ return false;
+
+ *same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
+- if (!*same_page && pfn_to_page(PFN_DOWN(vec_end_addr)) + 1 != page)
+- return false;
+- return true;
++ if (*same_page)
++ return true;
++ return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE);
+ }
+
+ static bool bio_try_merge_pc_page(struct request_queue *q, struct bio *bio,
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index 1eb8895be4c6b..0c7addcd19859 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -1219,13 +1219,15 @@ int blkcg_init_queue(struct request_queue *q)
+ if (preloaded)
+ radix_tree_preload_end();
+
+- ret = blk_iolatency_init(q);
++ ret = blk_throtl_init(q);
+ if (ret)
+ goto err_destroy_all;
+
+- ret = blk_throtl_init(q);
+- if (ret)
++ ret = blk_iolatency_init(q);
++ if (ret) {
++ blk_throtl_exit(q);
+ goto err_destroy_all;
++ }
+ return 0;
+
+ err_destroy_all:
+diff --git a/block/blk-merge.c b/block/blk-merge.c
+index 93cff719b0661..86c4c1ef87429 100644
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -154,7 +154,7 @@ static inline unsigned get_max_io_size(struct request_queue *q,
+ if (max_sectors > start_offset)
+ return max_sectors - start_offset;
+
+- return sectors & (lbs - 1);
++ return sectors & ~(lbs - 1);
+ }
+
+ static inline unsigned get_max_segment_size(const struct request_queue *q,
+@@ -553,10 +553,17 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
+ }
+ EXPORT_SYMBOL(blk_rq_map_sg);
+
++static inline unsigned int blk_rq_get_max_segments(struct request *rq)
++{
++ if (req_op(rq) == REQ_OP_DISCARD)
++ return queue_max_discard_segments(rq->q);
++ return queue_max_segments(rq->q);
++}
++
+ static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
+ unsigned int nr_phys_segs)
+ {
+- if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(req->q))
++ if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
+ goto no_merge;
+
+ if (blk_integrity_merge_bio(req->q, req, bio) == false)
+@@ -640,7 +647,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
+ return 0;
+
+ total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
+- if (total_phys_segments > queue_max_segments(q))
++ if (total_phys_segments > blk_rq_get_max_segments(req))
+ return 0;
+
+ if (blk_integrity_merge_rq(q, req, next) == false)
+diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
+index 74cedea560348..7620734d55429 100644
+--- a/block/blk-mq-sched.c
++++ b/block/blk-mq-sched.c
+@@ -77,6 +77,15 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
+ return;
+ clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+
++ /*
++ * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
++ * in blk_mq_run_hw_queue(). Its pair is the barrier in
++ * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART,
++ * meantime new request added to hctx->dispatch is missed to check in
++ * blk_mq_run_hw_queue().
++ */
++ smp_mb();
++
+ blk_mq_run_hw_queue(hctx, true);
+ }
+
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index ae7d31cb5a4e1..b748d1e63f9c8 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1318,6 +1318,15 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
+ list_splice_tail_init(list, &hctx->dispatch);
+ spin_unlock(&hctx->lock);
+
++ /*
++ * Order adding requests to hctx->dispatch and checking
++ * SCHED_RESTART flag. The pair of this smp_mb() is the one
++ * in blk_mq_sched_restart(). Avoid restart code path to
++ * miss the new added requests to hctx->dispatch, meantime
++ * SCHED_RESTART is observed here.
++ */
++ smp_mb();
++
+ /*
+ * If SCHED_RESTART was set by the caller of this function and
+ * it is no longer set that means that it was cleared by another
+@@ -1869,7 +1878,8 @@ insert:
+ if (bypass_insert)
+ return BLK_STS_RESOURCE;
+
+- blk_mq_request_bypass_insert(rq, false, run_queue);
++ blk_mq_sched_insert_request(rq, false, run_queue, false);
++
+ return BLK_STS_OK;
+ }
+
+diff --git a/crypto/af_alg.c b/crypto/af_alg.c
+index 35e026ba2c7ed..1d4b0157ee5dc 100644
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -16,6 +16,7 @@
+ #include <linux/module.h>
+ #include <linux/net.h>
+ #include <linux/rwsem.h>
++#include <linux/sched.h>
+ #include <linux/sched/signal.h>
+ #include <linux/security.h>
+
+@@ -847,9 +848,15 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ }
+
+ lock_sock(sk);
+- if (ctx->init && (init || !ctx->more)) {
+- err = -EINVAL;
+- goto unlock;
++ if (ctx->init && !ctx->more) {
++ if (ctx->used) {
++ err = -EINVAL;
++ goto unlock;
++ }
++
++ pr_info_once(
++ "%s sent an empty control message without MSG_MORE.\n",
++ current->comm);
+ }
+ ctx->init = true;
+
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 7bd9cd366d411..94df2ba1bbed7 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -3400,9 +3400,9 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
+ */
+ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
+ {
+- if (fwnode) {
+- struct fwnode_handle *fn = dev->fwnode;
++ struct fwnode_handle *fn = dev->fwnode;
+
++ if (fwnode) {
+ if (fwnode_is_primary(fn))
+ fn = fn->secondary;
+
+@@ -3412,8 +3412,12 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
+ }
+ dev->fwnode = fwnode;
+ } else {
+- dev->fwnode = fwnode_is_primary(dev->fwnode) ?
+- dev->fwnode->secondary : NULL;
++ if (fwnode_is_primary(fn)) {
++ dev->fwnode = fn->secondary;
++ fn->secondary = NULL;
++ } else {
++ dev->fwnode = NULL;
++ }
+ }
+ }
+ EXPORT_SYMBOL_GPL(set_primary_fwnode);
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index 8646147dc1946..23af545120534 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -1728,13 +1728,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+ }
+
+ /*
+- * If a device configured to wake up the system from sleep states
+- * has been suspended at run time and there's a resume request pending
+- * for it, this is equivalent to the device signaling wakeup, so the
+- * system suspend operation should be aborted.
++ * Wait for possible runtime PM transitions of the device in progress
++ * to complete and if there's a runtime resume request pending for it,
++ * resume it before proceeding with invoking the system-wide suspend
++ * callbacks for it.
++ *
++ * If the system-wide suspend callbacks below change the configuration
++ * of the device, they must disable runtime PM for it or otherwise
++ * ensure that its runtime-resume callbacks will not be confused by that
++ * change in case they are invoked going forward.
+ */
+- if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
+- pm_wakeup_event(dev, 0);
++ pm_runtime_barrier(dev);
+
+ if (pm_wakeup_pending()) {
+ dev->power.direct_complete = false;
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index bddbbf5b3dda2..ffbe792410d1c 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -863,6 +863,7 @@ static void loop_config_discard(struct loop_device *lo)
+ struct file *file = lo->lo_backing_file;
+ struct inode *inode = file->f_mapping->host;
+ struct request_queue *q = lo->lo_queue;
++ u32 granularity, max_discard_sectors;
+
+ /*
+ * If the backing device is a block device, mirror its zeroing
+@@ -875,11 +876,10 @@ static void loop_config_discard(struct loop_device *lo)
+ struct request_queue *backingq;
+
+ backingq = bdev_get_queue(inode->i_bdev);
+- blk_queue_max_discard_sectors(q,
+- backingq->limits.max_write_zeroes_sectors);
+
+- blk_queue_max_write_zeroes_sectors(q,
+- backingq->limits.max_write_zeroes_sectors);
++ max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
++ granularity = backingq->limits.discard_granularity ?:
++ queue_physical_block_size(backingq);
+
+ /*
+ * We use punch hole to reclaim the free space used by the
+@@ -888,23 +888,26 @@ static void loop_config_discard(struct loop_device *lo)
+ * useful information.
+ */
+ } else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) {
+- q->limits.discard_granularity = 0;
+- q->limits.discard_alignment = 0;
+- blk_queue_max_discard_sectors(q, 0);
+- blk_queue_max_write_zeroes_sectors(q, 0);
++ max_discard_sectors = 0;
++ granularity = 0;
+
+ } else {
+- q->limits.discard_granularity = inode->i_sb->s_blocksize;
+- q->limits.discard_alignment = 0;
+-
+- blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
+- blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
++ max_discard_sectors = UINT_MAX >> 9;
++ granularity = inode->i_sb->s_blocksize;
+ }
+
+- if (q->limits.max_write_zeroes_sectors)
++ if (max_discard_sectors) {
++ q->limits.discard_granularity = granularity;
++ blk_queue_max_discard_sectors(q, max_discard_sectors);
++ blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
+ blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+- else
++ } else {
++ q->limits.discard_granularity = 0;
++ blk_queue_max_discard_sectors(q, 0);
++ blk_queue_max_write_zeroes_sectors(q, 0);
+ blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
++ }
++ q->limits.discard_alignment = 0;
+ }
+
+ static void loop_unprepare_queue(struct loop_device *lo)
+diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
+index c4454cfc6d530..13eae973eaea4 100644
+--- a/drivers/block/null_blk_main.c
++++ b/drivers/block/null_blk_main.c
+@@ -1072,7 +1072,7 @@ static int null_handle_rq(struct nullb_cmd *cmd)
+ len = bvec.bv_len;
+ err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
+ op_is_write(req_op(rq)), sector,
+- req_op(rq) & REQ_FUA);
++ rq->cmd_flags & REQ_FUA);
+ if (err) {
+ spin_unlock_irq(&nullb->lock);
+ return err;
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index c1de270046bfe..2eeb2bcb488d4 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -205,16 +205,31 @@ static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
+ if (!range)
+ return -ENOMEM;
+
+- __rq_for_each_bio(bio, req) {
+- u64 sector = bio->bi_iter.bi_sector;
+- u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
+-
+- range[n].flags = cpu_to_le32(flags);
+- range[n].num_sectors = cpu_to_le32(num_sectors);
+- range[n].sector = cpu_to_le64(sector);
+- n++;
++ /*
++ * Single max discard segment means multi-range discard isn't
++ * supported, and block layer only runs contiguity merge like
++ * normal RW request. So we can't reply on bio for retrieving
++ * each range info.
++ */
++ if (queue_max_discard_segments(req->q) == 1) {
++ range[0].flags = cpu_to_le32(flags);
++ range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
++ range[0].sector = cpu_to_le64(blk_rq_pos(req));
++ n = 1;
++ } else {
++ __rq_for_each_bio(bio, req) {
++ u64 sector = bio->bi_iter.bi_sector;
++ u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
++
++ range[n].flags = cpu_to_le32(flags);
++ range[n].num_sectors = cpu_to_le32(num_sectors);
++ range[n].sector = cpu_to_le64(sector);
++ n++;
++ }
+ }
+
++ WARN_ON_ONCE(n != segments);
++
+ req->special_vec.bv_page = virt_to_page(range);
+ req->special_vec.bv_offset = offset_in_page(range);
+ req->special_vec.bv_len = sizeof(*range) * segments;
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 53dc0fd6f6d3c..927eb3fd23660 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -649,11 +649,12 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
+ mutex_lock(&intel_pstate_limits_lock);
+
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
+- u64 value;
+-
+- ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
+- if (ret)
+- goto return_pref;
++ /*
++ * Use the cached HWP Request MSR value, because the register
++ * itself may be updated by intel_pstate_hwp_boost_up() or
++ * intel_pstate_hwp_boost_down() at any time.
++ */
++ u64 value = READ_ONCE(cpu_data->hwp_req_cached);
+
+ value &= ~GENMASK_ULL(31, 24);
+
+@@ -661,13 +662,18 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
+ epp = epp_values[pref_index - 1];
+
+ value |= (u64)epp << 24;
++ /*
++ * The only other updater of hwp_req_cached in the active mode,
++ * intel_pstate_hwp_set(), is called under the same lock as this
++ * function, so it cannot run in parallel with the update below.
++ */
++ WRITE_ONCE(cpu_data->hwp_req_cached, value);
+ ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value);
+ } else {
+ if (epp == -EINVAL)
+ epp = (pref_index - 1) << 2;
+ ret = intel_pstate_set_epb(cpu_data->cpu, epp);
+ }
+-return_pref:
+ mutex_unlock(&intel_pstate_limits_lock);
+
+ return ret;
+diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
+index 2e65d7279d79e..027769e39f9b8 100644
+--- a/drivers/devfreq/rk3399_dmc.c
++++ b/drivers/devfreq/rk3399_dmc.c
+@@ -95,18 +95,20 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
+
+ mutex_lock(&dmcfreq->lock);
+
+- if (target_rate >= dmcfreq->odt_dis_freq)
+- odt_enable = true;
+-
+- /*
+- * This makes a SMC call to the TF-A to set the DDR PD (power-down)
+- * timings and to enable or disable the ODT (on-die termination)
+- * resistors.
+- */
+- arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, dmcfreq->odt_pd_arg0,
+- dmcfreq->odt_pd_arg1,
+- ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD,
+- odt_enable, 0, 0, 0, &res);
++ if (dmcfreq->regmap_pmu) {
++ if (target_rate >= dmcfreq->odt_dis_freq)
++ odt_enable = true;
++
++ /*
++ * This makes a SMC call to the TF-A to set the DDR PD
++ * (power-down) timings and to enable or disable the
++ * ODT (on-die termination) resistors.
++ */
++ arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, dmcfreq->odt_pd_arg0,
++ dmcfreq->odt_pd_arg1,
++ ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD,
++ odt_enable, 0, 0, 0, &res);
++ }
+
+ /*
+ * If frequency scaling from low to high, adjust voltage first.
+@@ -364,16 +366,21 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
+ if (res.a0) {
+ dev_err(dev, "Failed to set dram param: %ld\n",
+ res.a0);
+- return -EINVAL;
++ ret = -EINVAL;
++ goto err_edev;
+ }
+ }
+ }
+
+ node = of_parse_phandle(np, "rockchip,pmu", 0);
+- if (node) {
+- data->regmap_pmu = syscon_node_to_regmap(node);
+- if (IS_ERR(data->regmap_pmu))
+- return PTR_ERR(data->regmap_pmu);
++ if (!node)
++ goto no_pmu;
++
++ data->regmap_pmu = syscon_node_to_regmap(node);
++ of_node_put(node);
++ if (IS_ERR(data->regmap_pmu)) {
++ ret = PTR_ERR(data->regmap_pmu);
++ goto err_edev;
+ }
+
+ regmap_read(data->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
+@@ -391,9 +398,11 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
+ data->odt_dis_freq = data->timing.lpddr4_odt_dis_freq;
+ break;
+ default:
+- return -EINVAL;
++ ret = -EINVAL;
++ goto err_edev;
+ };
+
++no_pmu:
+ arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
+ ROCKCHIP_SIP_CONFIG_DRAM_INIT,
+ 0, 0, 0, 0, &res);
+@@ -425,7 +434,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
+ */
+ if (dev_pm_opp_of_add_table(dev)) {
+ dev_err(dev, "Invalid operating-points in device tree.\n");
+- return -EINVAL;
++ ret = -EINVAL;
++ goto err_edev;
+ }
+
+ of_property_read_u32(np, "upthreshold",
+@@ -465,6 +475,9 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
+
+ err_free_opp:
+ dev_pm_opp_of_remove_table(&pdev->dev);
++err_edev:
++ devfreq_event_disable_edev(data->edev);
++
+ return ret;
+ }
+
+diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
+index a71cca6eeb333..6be7e65f7389d 100644
+--- a/drivers/edac/i7core_edac.c
++++ b/drivers/edac/i7core_edac.c
+@@ -1711,9 +1711,9 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
+ if (uncorrected_error) {
+ core_err_cnt = 1;
+ if (ripv)
+- tp_event = HW_EVENT_ERR_FATAL;
+- else
+ tp_event = HW_EVENT_ERR_UNCORRECTED;
++ else
++ tp_event = HW_EVENT_ERR_FATAL;
+ } else {
+ tp_event = HW_EVENT_ERR_CORRECTED;
+ }
+diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
+index d26300f9cb07d..9be43b4f9c506 100644
+--- a/drivers/edac/ie31200_edac.c
++++ b/drivers/edac/ie31200_edac.c
+@@ -170,6 +170,8 @@
+ (n << (28 + (2 * skl) - PAGE_SHIFT))
+
+ static int nr_channels;
++static struct pci_dev *mci_pdev;
++static int ie31200_registered = 1;
+
+ struct ie31200_priv {
+ void __iomem *window;
+@@ -541,12 +543,16 @@ fail_free:
+ static int ie31200_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+ {
+- edac_dbg(0, "MC:\n");
++ int rc;
+
++ edac_dbg(0, "MC:\n");
+ if (pci_enable_device(pdev) < 0)
+ return -EIO;
++ rc = ie31200_probe1(pdev, ent->driver_data);
++ if (rc == 0 && !mci_pdev)
++ mci_pdev = pci_dev_get(pdev);
+
+- return ie31200_probe1(pdev, ent->driver_data);
++ return rc;
+ }
+
+ static void ie31200_remove_one(struct pci_dev *pdev)
+@@ -555,6 +561,8 @@ static void ie31200_remove_one(struct pci_dev *pdev)
+ struct ie31200_priv *priv;
+
+ edac_dbg(0, "\n");
++ pci_dev_put(mci_pdev);
++ mci_pdev = NULL;
+ mci = edac_mc_del_mc(&pdev->dev);
+ if (!mci)
+ return;
+@@ -596,17 +604,53 @@ static struct pci_driver ie31200_driver = {
+
+ static int __init ie31200_init(void)
+ {
++ int pci_rc, i;
++
+ edac_dbg(3, "MC:\n");
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+- return pci_register_driver(&ie31200_driver);
++ pci_rc = pci_register_driver(&ie31200_driver);
++ if (pci_rc < 0)
++ goto fail0;
++
++ if (!mci_pdev) {
++ ie31200_registered = 0;
++ for (i = 0; ie31200_pci_tbl[i].vendor != 0; i++) {
++ mci_pdev = pci_get_device(ie31200_pci_tbl[i].vendor,
++ ie31200_pci_tbl[i].device,
++ NULL);
++ if (mci_pdev)
++ break;
++ }
++ if (!mci_pdev) {
++ edac_dbg(0, "ie31200 pci_get_device fail\n");
++ pci_rc = -ENODEV;
++ goto fail1;
++ }
++ pci_rc = ie31200_init_one(mci_pdev, &ie31200_pci_tbl[i]);
++ if (pci_rc < 0) {
++ edac_dbg(0, "ie31200 init fail\n");
++ pci_rc = -ENODEV;
++ goto fail1;
++ }
++ }
++ return 0;
++
++fail1:
++ pci_unregister_driver(&ie31200_driver);
++fail0:
++ pci_dev_put(mci_pdev);
++
++ return pci_rc;
+ }
+
+ static void __exit ie31200_exit(void)
+ {
+ edac_dbg(3, "MC:\n");
+ pci_unregister_driver(&ie31200_driver);
++ if (!ie31200_registered)
++ ie31200_remove_one(mci_pdev);
+ }
+
+ module_init(ie31200_init);
+diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
+index b1193be1ef1d8..dac45e2071b3f 100644
+--- a/drivers/edac/pnd2_edac.c
++++ b/drivers/edac/pnd2_edac.c
+@@ -1155,7 +1155,7 @@ static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
+ u32 optypenum = GET_BITFIELD(m->status, 4, 6);
+ int rc;
+
+- tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
++ tp_event = uc_err ? (ripv ? HW_EVENT_ERR_UNCORRECTED : HW_EVENT_ERR_FATAL) :
+ HW_EVENT_ERR_CORRECTED;
+
+ /*
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index f743502ca9b72..b557a53c75c46 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -254,18 +254,20 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
+ * FIXME: Implement the error count reads directly
+ */
+
+-static const u32 correrrcnt[] = {
+- 0x104, 0x108, 0x10c, 0x110,
+-};
+-
+ #define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31)
+ #define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30)
+ #define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15)
+ #define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14)
+
++#if 0 /* Currently unused*/
++static const u32 correrrcnt[] = {
++ 0x104, 0x108, 0x10c, 0x110,
++};
++
+ static const u32 correrrthrsld[] = {
+ 0x11c, 0x120, 0x124, 0x128,
+ };
++#endif
+
+ #define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30)
+ #define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14)
+@@ -1340,7 +1342,7 @@ static void knl_show_mc_route(u32 reg, char *s)
+ */
+ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
+ {
+- u64 sad_base, sad_size, sad_limit = 0;
++ u64 sad_base, sad_limit = 0;
+ u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace;
+ int sad_rule = 0;
+ int tad_rule = 0;
+@@ -1427,7 +1429,6 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
+ edram_only = KNL_EDRAM_ONLY(dram_rule);
+
+ sad_limit = pvt->info.sad_limit(dram_rule)+1;
+- sad_size = sad_limit - sad_base;
+
+ pci_read_config_dword(pvt->pci_sad0,
+ pvt->info.interleave_list[sad_rule], &interleave_reg);
+@@ -2952,7 +2953,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
+ struct mem_ctl_info *new_mci;
+ struct sbridge_pvt *pvt = mci->pvt_info;
+ enum hw_event_mc_err_type tp_event;
+- char *type, *optype, msg[256];
++ char *optype, msg[256];
+ bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
+ bool overflow = GET_BITFIELD(m->status, 62, 62);
+ bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
+@@ -2981,14 +2982,11 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
+ if (uncorrected_error) {
+ core_err_cnt = 1;
+ if (ripv) {
+- type = "FATAL";
+- tp_event = HW_EVENT_ERR_FATAL;
+- } else {
+- type = "NON_FATAL";
+ tp_event = HW_EVENT_ERR_UNCORRECTED;
++ } else {
++ tp_event = HW_EVENT_ERR_FATAL;
+ }
+ } else {
+- type = "CORRECTED";
+ tp_event = HW_EVENT_ERR_CORRECTED;
+ }
+
+@@ -3200,7 +3198,6 @@ static struct notifier_block sbridge_mce_dec = {
+ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
+ {
+ struct mem_ctl_info *mci = sbridge_dev->mci;
+- struct sbridge_pvt *pvt;
+
+ if (unlikely(!mci || !mci->pvt_info)) {
+ edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
+@@ -3209,8 +3206,6 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
+ return;
+ }
+
+- pvt = mci->pvt_info;
+-
+ edac_dbg(0, "MC: mci = %p, dev = %p\n",
+ mci, &sbridge_dev->pdev[0]->dev);
+
+diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
+index 2177ad765bd16..99dea4f66b5e9 100644
+--- a/drivers/edac/skx_common.c
++++ b/drivers/edac/skx_common.c
+@@ -475,7 +475,7 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
+ struct decoded_addr *res)
+ {
+ enum hw_event_mc_err_type tp_event;
+- char *type, *optype;
++ char *optype;
+ bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
+ bool overflow = GET_BITFIELD(m->status, 62, 62);
+ bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
+@@ -490,14 +490,11 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
+ if (uncorrected_error) {
+ core_err_cnt = 1;
+ if (ripv) {
+- type = "FATAL";
+- tp_event = HW_EVENT_ERR_FATAL;
+- } else {
+- type = "NON_FATAL";
+ tp_event = HW_EVENT_ERR_UNCORRECTED;
++ } else {
++ tp_event = HW_EVENT_ERR_FATAL;
+ }
+ } else {
+- type = "CORRECTED";
+ tp_event = HW_EVENT_ERR_CORRECTED;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+index ece55c8fa6733..cda0a76a733d3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+@@ -719,8 +719,10 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+- if (r < 0)
++ if (r < 0) {
++ pm_runtime_put_autosuspend(connector->dev->dev);
+ return connector_status_disconnected;
++ }
+ }
+
+ if (encoder) {
+@@ -857,8 +859,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+- if (r < 0)
++ if (r < 0) {
++ pm_runtime_put_autosuspend(connector->dev->dev);
+ return connector_status_disconnected;
++ }
+ }
+
+ encoder = amdgpu_connector_best_single_encoder(connector);
+@@ -980,8 +984,10 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+- if (r < 0)
++ if (r < 0) {
++ pm_runtime_put_autosuspend(connector->dev->dev);
+ return connector_status_disconnected;
++ }
+ }
+
+ if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
+@@ -1330,8 +1336,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+- if (r < 0)
++ if (r < 0) {
++ pm_runtime_put_autosuspend(connector->dev->dev);
+ return connector_status_disconnected;
++ }
+ }
+
+ if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+index 82efc1e22e611..e0aed42d9cbda 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+@@ -282,7 +282,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
+
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0)
+- return ret;
++ goto out;
+
+ ret = drm_crtc_helper_set_config(set, ctx);
+
+@@ -297,7 +297,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
+ take the current one */
+ if (active && !adev->have_disp_power_ref) {
+ adev->have_disp_power_ref = true;
+- return ret;
++ goto out;
+ }
+ /* if we have no active crtcs, then drop the power ref
+ we got before */
+@@ -306,6 +306,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
+ adev->have_disp_power_ref = false;
+ }
+
++out:
+ /* drop the power reference we got coming in here */
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 05d114a72ca1e..fa2c0f29ad4de 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -1286,11 +1286,12 @@ long amdgpu_drm_ioctl(struct file *filp,
+ dev = file_priv->minor->dev;
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0)
+- return ret;
++ goto out;
+
+ ret = drm_ioctl(filp, cmd, arg);
+
+ pm_runtime_mark_last_busy(dev->dev);
++out:
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 2a7da26008a27..59fd9ebf3a58b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -638,8 +638,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ * in the bitfields */
+ if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
+ se_num = 0xffffffff;
++ else if (se_num >= AMDGPU_GFX_MAX_SE)
++ return -EINVAL;
+ if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
+ sh_num = 0xffffffff;
++ else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE)
++ return -EINVAL;
+
+ if (info->read_mmr_reg.count > 128)
+ return -EINVAL;
+@@ -976,7 +980,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
+
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0)
+- return r;
++ goto pm_put;
+
+ fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+ if (unlikely(!fpriv)) {
+@@ -1027,6 +1031,7 @@ error_pasid:
+
+ out_suspend:
+ pm_runtime_mark_last_busy(dev->dev);
++pm_put:
+ pm_runtime_put_autosuspend(dev->dev);
+
+ return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+index 64d96eb0a2337..19876c90be0e1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+@@ -4094,10 +4094,8 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
+ def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
+ data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
+- RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
+-
+- /* only for Vega10 & Raven1 */
+- data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
++ RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK |
++ RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY_MASK);
+
+ if (def != data)
+ WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index 7551761f2aa97..a49e2ab071d68 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -612,8 +612,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
+
+ ret = kobject_init_and_add(dev->kobj_node, &node_type,
+ sys_props.kobj_nodes, "%d", id);
+- if (ret < 0)
++ if (ret < 0) {
++ kobject_put(dev->kobj_node);
+ return ret;
++ }
+
+ dev->kobj_mem = kobject_create_and_add("mem_banks", dev->kobj_node);
+ if (!dev->kobj_mem)
+@@ -660,8 +662,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
+ return -ENOMEM;
+ ret = kobject_init_and_add(mem->kobj, &mem_type,
+ dev->kobj_mem, "%d", i);
+- if (ret < 0)
++ if (ret < 0) {
++ kobject_put(mem->kobj);
+ return ret;
++ }
+
+ mem->attr.name = "properties";
+ mem->attr.mode = KFD_SYSFS_FILE_MODE;
+@@ -679,8 +683,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
+ return -ENOMEM;
+ ret = kobject_init_and_add(cache->kobj, &cache_type,
+ dev->kobj_cache, "%d", i);
+- if (ret < 0)
++ if (ret < 0) {
++ kobject_put(cache->kobj);
+ return ret;
++ }
+
+ cache->attr.name = "properties";
+ cache->attr.mode = KFD_SYSFS_FILE_MODE;
+@@ -698,8 +704,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
+ return -ENOMEM;
+ ret = kobject_init_and_add(iolink->kobj, &iolink_type,
+ dev->kobj_iolink, "%d", i);
+- if (ret < 0)
++ if (ret < 0) {
++ kobject_put(iolink->kobj);
+ return ret;
++ }
+
+ iolink->attr.name = "properties";
+ iolink->attr.mode = KFD_SYSFS_FILE_MODE;
+@@ -779,8 +787,10 @@ static int kfd_topology_update_sysfs(void)
+ ret = kobject_init_and_add(sys_props.kobj_topology,
+ &sysprops_type, &kfd_device->kobj,
+ "topology");
+- if (ret < 0)
++ if (ret < 0) {
++ kobject_put(sys_props.kobj_topology);
+ return ret;
++ }
+
+ sys_props.kobj_nodes = kobject_create_and_add("nodes",
+ sys_props.kobj_topology);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 2c0eb7140ca0e..247f53d41993d 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -7229,6 +7229,30 @@ cleanup:
+ *out_type = update_type;
+ return ret;
+ }
++#if defined(CONFIG_DRM_AMD_DC_DCN)
++static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
++{
++ struct drm_connector *connector;
++ struct drm_connector_state *conn_state;
++ struct amdgpu_dm_connector *aconnector = NULL;
++ int i;
++ for_each_new_connector_in_state(state, connector, conn_state, i) {
++ if (conn_state->crtc != crtc)
++ continue;
++
++ aconnector = to_amdgpu_dm_connector(connector);
++ if (!aconnector->port || !aconnector->mst_port)
++ aconnector = NULL;
++ else
++ break;
++ }
++
++ if (!aconnector)
++ return 0;
++
++ return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
++}
++#endif
+
+ /**
+ * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
+@@ -7282,6 +7306,40 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ if (ret)
+ goto fail;
+
++ /* Check connector changes */
++ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
++ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
++ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
++
++ /* Skip connectors that are disabled or part of modeset already. */
++ if (!old_con_state->crtc && !new_con_state->crtc)
++ continue;
++
++ if (!new_con_state->crtc)
++ continue;
++
++ new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
++ if (IS_ERR(new_crtc_state)) {
++ ret = PTR_ERR(new_crtc_state);
++ goto fail;
++ }
++
++ if (dm_old_con_state->abm_level !=
++ dm_new_con_state->abm_level)
++ new_crtc_state->connectors_changed = true;
++ }
++
++#if defined(CONFIG_DRM_AMD_DC_DCN)
++ if (adev->asic_type >= CHIP_NAVI10) {
++ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
++ if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
++ ret = add_affected_mst_dsc_crtcs(state, crtc);
++ if (ret)
++ goto fail;
++ }
++ }
++ }
++#endif
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
+ !new_crtc_state->color_mgmt_changed &&
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+index 9aa258f3550b6..ddf66046616d6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+@@ -121,35 +121,35 @@ void enc1_update_generic_info_packet(
+ switch (packet_index) {
+ case 0:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+- AFMT_GENERIC0_FRAME_UPDATE, 1);
++ AFMT_GENERIC0_IMMEDIATE_UPDATE, 1);
+ break;
+ case 1:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+- AFMT_GENERIC1_FRAME_UPDATE, 1);
++ AFMT_GENERIC1_IMMEDIATE_UPDATE, 1);
+ break;
+ case 2:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+- AFMT_GENERIC2_FRAME_UPDATE, 1);
++ AFMT_GENERIC2_IMMEDIATE_UPDATE, 1);
+ break;
+ case 3:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+- AFMT_GENERIC3_FRAME_UPDATE, 1);
++ AFMT_GENERIC3_IMMEDIATE_UPDATE, 1);
+ break;
+ case 4:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+- AFMT_GENERIC4_FRAME_UPDATE, 1);
++ AFMT_GENERIC4_IMMEDIATE_UPDATE, 1);
+ break;
+ case 5:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+- AFMT_GENERIC5_FRAME_UPDATE, 1);
++ AFMT_GENERIC5_IMMEDIATE_UPDATE, 1);
+ break;
+ case 6:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+- AFMT_GENERIC6_FRAME_UPDATE, 1);
++ AFMT_GENERIC6_IMMEDIATE_UPDATE, 1);
+ break;
+ case 7:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+- AFMT_GENERIC7_FRAME_UPDATE, 1);
++ AFMT_GENERIC7_IMMEDIATE_UPDATE, 1);
+ break;
+ default:
+ break;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+index a512cbea00d17..b9656614950e3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+@@ -275,7 +275,14 @@ struct dcn10_stream_enc_registers {
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE, mask_sh),\
++ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_IMMEDIATE_UPDATE, mask_sh),\
++ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_IMMEDIATE_UPDATE, mask_sh),\
++ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_IMMEDIATE_UPDATE, mask_sh),\
++ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_IMMEDIATE_UPDATE, mask_sh),\
++ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_IMMEDIATE_UPDATE, mask_sh),\
++ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_IMMEDIATE_UPDATE, mask_sh),\
++ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, mask_sh),\
+@@ -339,7 +346,14 @@ struct dcn10_stream_enc_registers {
+ type AFMT_GENERIC2_FRAME_UPDATE;\
+ type AFMT_GENERIC3_FRAME_UPDATE;\
+ type AFMT_GENERIC4_FRAME_UPDATE;\
++ type AFMT_GENERIC0_IMMEDIATE_UPDATE;\
++ type AFMT_GENERIC1_IMMEDIATE_UPDATE;\
++ type AFMT_GENERIC2_IMMEDIATE_UPDATE;\
++ type AFMT_GENERIC3_IMMEDIATE_UPDATE;\
+ type AFMT_GENERIC4_IMMEDIATE_UPDATE;\
++ type AFMT_GENERIC5_IMMEDIATE_UPDATE;\
++ type AFMT_GENERIC6_IMMEDIATE_UPDATE;\
++ type AFMT_GENERIC7_IMMEDIATE_UPDATE;\
+ type AFMT_GENERIC5_FRAME_UPDATE;\
+ type AFMT_GENERIC6_FRAME_UPDATE;\
+ type AFMT_GENERIC7_FRAME_UPDATE;\
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+index fed3fc4bb57a9..6322e57893db2 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+@@ -209,8 +209,7 @@ static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clo
+ {
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+- if (smu10_data->need_min_deep_sleep_dcefclk &&
+- smu10_data->deep_sleep_dcefclk != clock) {
++ if (clock && smu10_data->deep_sleep_dcefclk != clock) {
+ smu10_data->deep_sleep_dcefclk = clock;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetMinDeepSleepDcefclk,
+@@ -223,8 +222,7 @@ static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t c
+ {
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+- if (smu10_data->dcf_actual_hard_min_freq &&
+- smu10_data->dcf_actual_hard_min_freq != clock) {
++ if (clock && smu10_data->dcf_actual_hard_min_freq != clock) {
+ smu10_data->dcf_actual_hard_min_freq = clock;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinDcefclkByFreq,
+@@ -237,8 +235,7 @@ static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cloc
+ {
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+- if (smu10_data->f_actual_hard_min_freq &&
+- smu10_data->f_actual_hard_min_freq != clock) {
++ if (clock && smu10_data->f_actual_hard_min_freq != clock) {
+ smu10_data->f_actual_hard_min_freq = clock;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+index ba8763daa3808..36a17caa3761d 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+@@ -364,17 +364,19 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
+ struct PP_TemperatureRange *range)
+ {
++ struct phm_ppt_v2_information *pp_table_info =
++ (struct phm_ppt_v2_information *)(hwmgr->pptable);
++ struct phm_tdp_table *tdp_table = pp_table_info->tdp_table;
+ struct amdgpu_device *adev = hwmgr->adev;
+- int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP *
+- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+- int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP *
+- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
++ int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP;
++ int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP;
+ uint32_t val;
+
+- if (low < range->min)
+- low = range->min;
+- if (high > range->max)
+- high = range->max;
++ /* compare them in unit celsius degree */
++ if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
++ low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
++ if (high > tdp_table->usSoftwareShutdownTemp)
++ high = tdp_table->usSoftwareShutdownTemp;
+
+ if (low > high)
+ return -EINVAL;
+@@ -383,8 +385,8 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
+
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
+- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
++ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high);
++ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low);
+ val &= (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK) &
+ (~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK) &
+ (~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
+index 904eb2c9155b4..40e7c72eeae00 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
+@@ -170,17 +170,18 @@ int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
+ struct PP_TemperatureRange *range)
+ {
++ struct phm_ppt_v3_information *pptable_information =
++ (struct phm_ppt_v3_information *)hwmgr->pptable;
+ struct amdgpu_device *adev = hwmgr->adev;
+- int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP *
+- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+- int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP *
+- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
++ int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP;
++ int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP;
+ uint32_t val;
+
+- if (low < range->min)
+- low = range->min;
+- if (high > range->max)
+- high = range->max;
++ /* compare them in unit celsius degree */
++ if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
++ low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
++ if (high > pptable_information->us_software_shutdown_temp)
++ high = pptable_information->us_software_shutdown_temp;
+
+ if (low > high)
+ return -EINVAL;
+@@ -189,8 +190,8 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
+
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
+- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
++ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high);
++ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low);
+ val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
+
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index f5915308e643a..947e4fa3c5e68 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -981,27 +981,15 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
+ {
+ struct vega20_hwmgr *data =
+ (struct vega20_hwmgr *)(hwmgr->backend);
+- uint64_t features_enabled;
+- int i;
+- bool enabled;
+- int ret = 0;
++ int i, ret = 0;
+
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_DisableAllSmuFeatures)) == 0,
+ "[DisableAllSMUFeatures] Failed to disable all smu features!",
+ return ret);
+
+- ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
+- PP_ASSERT_WITH_CODE(!ret,
+- "[DisableAllSMUFeatures] Failed to get enabled smc features!",
+- return ret);
+-
+- for (i = 0; i < GNLD_FEATURES_MAX; i++) {
+- enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
+- true : false;
+- data->smu_features[i].enabled = enabled;
+- data->smu_features[i].supported = enabled;
+- }
++ for (i = 0; i < GNLD_FEATURES_MAX; i++)
++ data->smu_features[i].enabled = 0;
+
+ return 0;
+ }
+@@ -1652,12 +1640,6 @@ static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr)
+
+ data->uvd_power_gated = true;
+ data->vce_power_gated = true;
+-
+- if (data->smu_features[GNLD_DPM_UVD].enabled)
+- data->uvd_power_gated = false;
+-
+- if (data->smu_features[GNLD_DPM_VCE].enabled)
+- data->vce_power_gated = false;
+ }
+
+ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+@@ -3211,10 +3193,11 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
+
+ static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks)
+ {
+- uint64_t features_enabled;
+- uint64_t features_to_enable;
+- uint64_t features_to_disable;
+- int ret = 0;
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++ uint64_t features_enabled, features_to_enable, features_to_disable;
++ int i, ret = 0;
++ bool enabled;
+
+ if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
+ return -EINVAL;
+@@ -3243,6 +3226,17 @@ static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
+ return ret;
+ }
+
++ /* Update the cached feature enablement state */
++ ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
++ if (ret)
++ return ret;
++
++ for (i = 0; i < GNLD_FEATURES_MAX; i++) {
++ enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
++ true : false;
++ data->smu_features[i].enabled = enabled;
++ }
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
+index ede54e87e287b..ce56b93871e8f 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
+@@ -240,17 +240,18 @@ int vega20_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+ static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
+ struct PP_TemperatureRange *range)
+ {
++ struct phm_ppt_v3_information *pptable_information =
++ (struct phm_ppt_v3_information *)hwmgr->pptable;
+ struct amdgpu_device *adev = hwmgr->adev;
+- int low = VEGA20_THERMAL_MINIMUM_ALERT_TEMP *
+- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+- int high = VEGA20_THERMAL_MAXIMUM_ALERT_TEMP *
+- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
++ int low = VEGA20_THERMAL_MINIMUM_ALERT_TEMP;
++ int high = VEGA20_THERMAL_MAXIMUM_ALERT_TEMP;
+ uint32_t val;
+
+- if (low < range->min)
+- low = range->min;
+- if (high > range->max)
+- high = range->max;
++ /* compare them in unit celsius degree */
++ if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
++ low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
++ if (high > pptable_information->us_software_shutdown_temp)
++ high = pptable_information->us_software_shutdown_temp;
+
+ if (low > high)
+ return -EINVAL;
+@@ -259,8 +260,8 @@ static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
+
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
+- val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+- val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
++ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high);
++ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low);
+ val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
+
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
+diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
+index 21417ac8e878e..a9a69760c18d0 100644
+--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
++++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
+@@ -1207,6 +1207,12 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
+ return dst;
+ }
+
++static inline bool cmd_desc_is(const struct drm_i915_cmd_descriptor * const desc,
++ const u32 cmd)
++{
++ return desc->cmd.value == (cmd & desc->cmd.mask);
++}
++
+ static bool check_cmd(const struct intel_engine_cs *engine,
+ const struct drm_i915_cmd_descriptor *desc,
+ const u32 *cmd, u32 length)
+@@ -1245,19 +1251,19 @@ static bool check_cmd(const struct intel_engine_cs *engine,
+ * allowed mask/value pair given in the whitelist entry.
+ */
+ if (reg->mask) {
+- if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
++ if (cmd_desc_is(desc, MI_LOAD_REGISTER_MEM)) {
+ DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
+ reg_addr);
+ return false;
+ }
+
+- if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
++ if (cmd_desc_is(desc, MI_LOAD_REGISTER_REG)) {
+ DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n",
+ reg_addr);
+ return false;
+ }
+
+- if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
++ if (cmd_desc_is(desc, MI_LOAD_REGISTER_IMM(1)) &&
+ (offset + 2 > length ||
+ (cmd[offset + 1] & reg->mask) != reg->value)) {
+ DRM_DEBUG_DRIVER("CMD: Rejected LRI to masked register 0x%08X\n",
+@@ -1488,7 +1494,7 @@ int intel_engine_cmd_parser(struct i915_gem_context *ctx,
+ goto err;
+ }
+
+- if (desc->cmd.value == MI_BATCH_BUFFER_START) {
++ if (cmd_desc_is(desc, MI_BATCH_BUFFER_START)) {
+ ret = check_bbstart(ctx, cmd, offset, length,
+ batch_len, batch_start,
+ shadow_batch_start);
+diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
+index 376fca6ca9f47..7e6179fe63f86 100644
+--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
++++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
+@@ -375,9 +375,9 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
+
+ if (state && state->fb) {
+ addr = drm_fb_cma_get_gem_addr(state->fb, state, 0);
+- width = state->crtc->state->adjusted_mode.hdisplay;
+- height = state->crtc->state->adjusted_mode.vdisplay;
+- cpp = state->fb->format->cpp[plane->index];
++ width = state->src_w >> 16;
++ height = state->src_h >> 16;
++ cpp = state->fb->format->cpp[0];
+
+ priv->dma_hwdesc->addr = addr;
+ priv->dma_hwdesc->cmd = width * height * cpp / 4;
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+index 048c8be426f32..053da39da1cc0 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+@@ -350,7 +350,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
+ ring->next = ring->start;
+
+ /* reset completed fence seqno: */
+- ring->memptrs->fence = ring->seqno;
++ ring->memptrs->fence = ring->fctx->completed_fence;
+ ring->memptrs->rptr = 0;
+ }
+
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+index d735ea7e2d886..419a02260bfa7 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+@@ -2032,8 +2032,10 @@ nv50_disp_atomic_commit(struct drm_device *dev,
+ int ret, i;
+
+ ret = pm_runtime_get_sync(dev->dev);
+- if (ret < 0 && ret != -EACCES)
++ if (ret < 0 && ret != -EACCES) {
++ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
++ }
+
+ ret = drm_atomic_helper_setup_commit(state, nonblock);
+ if (ret)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index eb31c5b6c8e93..0994aee7671ad 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -568,8 +568,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
+ pm_runtime_get_noresume(dev->dev);
+ } else {
+ ret = pm_runtime_get_sync(dev->dev);
+- if (ret < 0 && ret != -EACCES)
++ if (ret < 0 && ret != -EACCES) {
++ pm_runtime_put_autosuspend(dev->dev);
+ return conn_status;
++ }
+ }
+
+ nv_encoder = nouveau_connector_ddc_detect(connector);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+index 5cf2381f667e2..c09ea357e88f0 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+@@ -189,8 +189,10 @@ nouveau_fbcon_open(struct fb_info *info, int user)
+ struct nouveau_fbdev *fbcon = info->par;
+ struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
+ int ret = pm_runtime_get_sync(drm->dev->dev);
+- if (ret < 0 && ret != -EACCES)
++ if (ret < 0 && ret != -EACCES) {
++ pm_runtime_put(drm->dev->dev);
+ return ret;
++ }
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index b684cd719612b..bc63f4cecf5d5 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -883,8 +883,10 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+- if (r < 0)
++ if (r < 0) {
++ pm_runtime_put_autosuspend(connector->dev->dev);
+ return connector_status_disconnected;
++ }
+ }
+
+ if (encoder) {
+@@ -1029,8 +1031,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+- if (r < 0)
++ if (r < 0) {
++ pm_runtime_put_autosuspend(connector->dev->dev);
+ return connector_status_disconnected;
++ }
+ }
+
+ encoder = radeon_best_single_encoder(connector);
+@@ -1167,8 +1171,10 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+- if (r < 0)
++ if (r < 0) {
++ pm_runtime_put_autosuspend(connector->dev->dev);
+ return connector_status_disconnected;
++ }
+ }
+
+ encoder = radeon_best_single_encoder(connector);
+@@ -1251,8 +1257,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+- if (r < 0)
++ if (r < 0) {
++ pm_runtime_put_autosuspend(connector->dev->dev);
+ return connector_status_disconnected;
++ }
+ }
+
+ if (radeon_connector->detected_hpd_without_ddc) {
+@@ -1666,8 +1674,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+- if (r < 0)
++ if (r < 0) {
++ pm_runtime_put_autosuspend(connector->dev->dev);
+ return connector_status_disconnected;
++ }
+ }
+
+ if (!force && radeon_check_hpd_status_unchanged(connector)) {
+diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
+index 4be49c1aef518..09894a1d343f3 100644
+--- a/drivers/gpu/drm/xen/xen_drm_front.c
++++ b/drivers/gpu/drm/xen/xen_drm_front.c
+@@ -400,7 +400,7 @@ static int xen_drm_drv_dumb_create(struct drm_file *filp,
+ args->size = args->pitch * args->height;
+
+ obj = xen_drm_front_gem_create(dev, args->size);
+- if (IS_ERR_OR_NULL(obj)) {
++ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto fail;
+ }
+diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
+index f0b85e0941114..4ec8a49241e17 100644
+--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
++++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
+@@ -83,7 +83,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
+
+ size = round_up(size, PAGE_SIZE);
+ xen_obj = gem_create_obj(dev, size);
+- if (IS_ERR_OR_NULL(xen_obj))
++ if (IS_ERR(xen_obj))
+ return xen_obj;
+
+ if (drm_info->front_info->cfg.be_alloc) {
+@@ -117,7 +117,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
+ */
+ xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
+ xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
+- if (IS_ERR_OR_NULL(xen_obj->pages)) {
++ if (IS_ERR(xen_obj->pages)) {
+ ret = PTR_ERR(xen_obj->pages);
+ xen_obj->pages = NULL;
+ goto fail;
+@@ -136,7 +136,7 @@ struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
+ struct xen_gem_object *xen_obj;
+
+ xen_obj = gem_create(dev, size);
+- if (IS_ERR_OR_NULL(xen_obj))
++ if (IS_ERR(xen_obj))
+ return ERR_CAST(xen_obj);
+
+ return &xen_obj->base;
+@@ -194,7 +194,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
+
+ size = attach->dmabuf->size;
+ xen_obj = gem_create_obj(dev, size);
+- if (IS_ERR_OR_NULL(xen_obj))
++ if (IS_ERR(xen_obj))
+ return ERR_CAST(xen_obj);
+
+ ret = gem_alloc_pages_array(xen_obj, size);
+diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
+index 21ad1c359b613..e4dedbb184ab7 100644
+--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
++++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
+@@ -60,7 +60,7 @@ fb_create(struct drm_device *dev, struct drm_file *filp,
+ int ret;
+
+ fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs);
+- if (IS_ERR_OR_NULL(fb))
++ if (IS_ERR(fb))
+ return fb;
+
+ gem_obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 73e4590ea9c94..09df5ecc2c79b 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -771,6 +771,7 @@
+ #define USB_DEVICE_ID_LOGITECH_G27_WHEEL 0xc29b
+ #define USB_DEVICE_ID_LOGITECH_WII_WHEEL 0xc29c
+ #define USB_DEVICE_ID_LOGITECH_ELITE_KBD 0xc30a
++#define USB_DEVICE_ID_LOGITECH_GROUP_AUDIO 0x0882
+ #define USB_DEVICE_ID_S510_RECEIVER 0xc50c
+ #define USB_DEVICE_ID_S510_RECEIVER_2 0xc517
+ #define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index a49fa2b047cba..b3dd60897ffda 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -179,6 +179,7 @@ static const struct hid_device_id hid_quirks[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE), HID_QUIRK_MULTI_INPUT },
++ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_GROUP_AUDIO), HID_QUIRK_NOGET },
+
+ { 0 }
+ };
+diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
+index b525b2715e074..592176aff0270 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-core.c
++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
+@@ -422,6 +422,19 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
+ dev_err(&client->dev, "failed to change power setting.\n");
+
+ set_pwr_exit:
++
++ /*
++ * The HID over I2C specification states that if a DEVICE needs time
++ * after the PWR_ON request, it should utilise CLOCK stretching.
++ * However, it has been observered that the Windows driver provides a
++ * 1ms sleep between the PWR_ON and RESET requests.
++ * According to Goodix Windows even waits 60 ms after (other?)
++ * PWR_ON requests. Testing has confirmed that several devices
++ * will not work properly without a delay after a PWR_ON request.
++ */
++ if (!ret && power_state == I2C_HID_PWR_ON)
++ msleep(60);
++
+ return ret;
+ }
+
+@@ -443,15 +456,6 @@ static int i2c_hid_hwreset(struct i2c_client *client)
+ if (ret)
+ goto out_unlock;
+
+- /*
+- * The HID over I2C specification states that if a DEVICE needs time
+- * after the PWR_ON request, it should utilise CLOCK stretching.
+- * However, it has been observered that the Windows driver provides a
+- * 1ms sleep between the PWR_ON and RESET requests and that some devices
+- * rely on this.
+- */
+- usleep_range(1000, 5000);
+-
+ i2c_hid_dbg(ihid, "resetting...\n");
+
+ ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
+diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
+index 35b1fa6d962ec..4711fb191a072 100644
+--- a/drivers/hid/usbhid/hiddev.c
++++ b/drivers/hid/usbhid/hiddev.c
+@@ -519,12 +519,16 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
+
+ switch (cmd) {
+ case HIDIOCGUSAGE:
++ if (uref->usage_index >= field->report_count)
++ goto inval;
+ uref->value = field->value[uref->usage_index];
+ if (copy_to_user(user_arg, uref, sizeof(*uref)))
+ goto fault;
+ goto goodreturn;
+
+ case HIDIOCSUSAGE:
++ if (uref->usage_index >= field->report_count)
++ goto inval;
+ field->value[uref->usage_index] = uref->value;
+ goto goodreturn;
+
+diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
+index dfb122b5e1b76..b812b199e5e5b 100644
+--- a/drivers/hwmon/nct7904.c
++++ b/drivers/hwmon/nct7904.c
+@@ -197,7 +197,7 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel,
+ if (ret < 0)
+ return ret;
+ cnt = ((ret & 0xff00) >> 3) | (ret & 0x1f);
+- if (cnt == 0x1fff)
++ if (cnt == 0 || cnt == 0x1fff)
+ rpm = 0;
+ else
+ rpm = 1350000 / cnt;
+@@ -209,7 +209,7 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel,
+ if (ret < 0)
+ return ret;
+ cnt = ((ret & 0xff00) >> 3) | (ret & 0x1f);
+- if (cnt == 0x1fff)
++ if (cnt == 0 || cnt == 0x1fff)
+ rpm = 0;
+ else
+ rpm = 1350000 / cnt;
+diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
+index 0b90aa0318df3..9c162a01a5849 100644
+--- a/drivers/i2c/busses/i2c-rcar.c
++++ b/drivers/i2c/busses/i2c-rcar.c
+@@ -587,6 +587,7 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
+ /* master sent stop */
+ if (ssr_filtered & SSR) {
+ i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value);
++ rcar_i2c_write(priv, ICSCR, SIE | SDBS); /* clear our NACK */
+ rcar_i2c_write(priv, ICSIER, SAR);
+ rcar_i2c_write(priv, ICSSR, ~SSR & 0xff);
+ }
+diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
+index cc193f2ba5d37..def62d5b42ca7 100644
+--- a/drivers/i2c/i2c-core-base.c
++++ b/drivers/i2c/i2c-core-base.c
+@@ -354,7 +354,7 @@ static int i2c_device_probe(struct device *dev)
+ * or ACPI ID table is supplied for the probing device.
+ */
+ if (!driver->id_table &&
+- !i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&
++ !acpi_driver_match_device(dev, dev->driver) &&
+ !i2c_of_match_device(dev->driver->of_match_table, client)) {
+ status = -ENODEV;
+ goto put_sync_adapter;
+diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
+index 0e6a9536eca62..612cbf668adf8 100644
+--- a/drivers/iommu/iova.c
++++ b/drivers/iommu/iova.c
+@@ -811,7 +811,9 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
+ for (i = 0 ; i < mag->size; ++i) {
+ struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
+
+- BUG_ON(!iova);
++ if (WARN_ON(!iova))
++ continue;
++
+ private_free_iova(iovad, iova);
+ }
+
+diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
+index e00f2fa27f00e..a8322a4e18d36 100644
+--- a/drivers/irqchip/irq-stm32-exti.c
++++ b/drivers/irqchip/irq-stm32-exti.c
+@@ -431,6 +431,16 @@ static void stm32_irq_ack(struct irq_data *d)
+ irq_gc_unlock(gc);
+ }
+
++/* directly set the target bit without reading first. */
++static inline void stm32_exti_write_bit(struct irq_data *d, u32 reg)
++{
++ struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
++ void __iomem *base = chip_data->host_data->base;
++ u32 val = BIT(d->hwirq % IRQS_PER_BANK);
++
++ writel_relaxed(val, base + reg);
++}
++
+ static inline u32 stm32_exti_set_bit(struct irq_data *d, u32 reg)
+ {
+ struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+@@ -464,9 +474,9 @@ static void stm32_exti_h_eoi(struct irq_data *d)
+
+ raw_spin_lock(&chip_data->rlock);
+
+- stm32_exti_set_bit(d, stm32_bank->rpr_ofst);
++ stm32_exti_write_bit(d, stm32_bank->rpr_ofst);
+ if (stm32_bank->fpr_ofst != UNDEF_REG)
+- stm32_exti_set_bit(d, stm32_bank->fpr_ofst);
++ stm32_exti_write_bit(d, stm32_bank->fpr_ofst);
+
+ raw_spin_unlock(&chip_data->rlock);
+
+diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
+index 12d6764844724..ed75636a6fb34 100644
+--- a/drivers/media/cec/cec-api.c
++++ b/drivers/media/cec/cec-api.c
+@@ -147,7 +147,13 @@ static long cec_adap_g_log_addrs(struct cec_adapter *adap,
+ struct cec_log_addrs log_addrs;
+
+ mutex_lock(&adap->lock);
+- log_addrs = adap->log_addrs;
++ /*
++ * We use memcpy here instead of assignment since there is a
++ * hole at the end of struct cec_log_addrs that an assignment
++ * might ignore. So when we do copy_to_user() we could leak
++ * one byte of memory.
++ */
++ memcpy(&log_addrs, &adap->log_addrs, sizeof(log_addrs));
+ if (!adap->is_configured)
+ memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
+ sizeof(log_addrs.log_addr));
+diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
+index d0cdee1c6eb0b..bf36b1e22b635 100644
+--- a/drivers/media/pci/ttpci/av7110.c
++++ b/drivers/media/pci/ttpci/av7110.c
+@@ -406,14 +406,15 @@ static void debiirq(unsigned long cookie)
+ case DATA_CI_GET:
+ {
+ u8 *data = av7110->debi_virt;
++ u8 data_0 = data[0];
+
+- if ((data[0] < 2) && data[2] == 0xff) {
++ if (data_0 < 2 && data[2] == 0xff) {
+ int flags = 0;
+ if (data[5] > 0)
+ flags |= CA_CI_MODULE_PRESENT;
+ if (data[5] > 5)
+ flags |= CA_CI_MODULE_READY;
+- av7110->ci_slot[data[0]].flags = flags;
++ av7110->ci_slot[data_0].flags = flags;
+ } else
+ ci_get_data(&av7110->ci_rbuffer,
+ av7110->debi_virt,
+diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
+index 71f4fe882d138..74f68ac3c9a75 100644
+--- a/drivers/media/platform/davinci/vpif_capture.c
++++ b/drivers/media/platform/davinci/vpif_capture.c
+@@ -1482,8 +1482,6 @@ probe_out:
+ /* Unregister video device */
+ video_unregister_device(&ch->video_dev);
+ }
+- kfree(vpif_obj.sd);
+- v4l2_device_unregister(&vpif_obj.v4l2_dev);
+
+ return err;
+ }
+diff --git a/drivers/media/rc/gpio-ir-tx.c b/drivers/media/rc/gpio-ir-tx.c
+index 18ca12d78314c..66703989ae185 100644
+--- a/drivers/media/rc/gpio-ir-tx.c
++++ b/drivers/media/rc/gpio-ir-tx.c
+@@ -79,13 +79,8 @@ static int gpio_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
+ // space
+ edge = ktime_add_us(edge, txbuf[i]);
+ delta = ktime_us_delta(edge, ktime_get());
+- if (delta > 10) {
+- spin_unlock_irqrestore(&gpio_ir->lock, flags);
+- usleep_range(delta, delta + 10);
+- spin_lock_irqsave(&gpio_ir->lock, flags);
+- } else if (delta > 0) {
++ if (delta > 0)
+ udelay(delta);
+- }
+ } else {
+ // pulse
+ ktime_t last = ktime_add_us(edge, txbuf[i]);
+diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
+index b33030e3385c7..da91965b8f7b2 100644
+--- a/drivers/mfd/intel-lpss-pci.c
++++ b/drivers/mfd/intel-lpss-pci.c
+@@ -196,6 +196,9 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x1ac4), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info },
++ /* EBG */
++ { PCI_VDEVICE(INTEL, 0x1bad), (kernel_ulong_t)&bxt_uart_info },
++ { PCI_VDEVICE(INTEL, 0x1bae), (kernel_ulong_t)&bxt_uart_info },
+ /* GLK */
+ { PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&glk_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&glk_i2c_info },
+@@ -225,6 +228,22 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x34ea), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x34eb), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x34fb), (kernel_ulong_t)&spt_info },
++ /* TGL-H */
++ { PCI_VDEVICE(INTEL, 0x43a7), (kernel_ulong_t)&bxt_uart_info },
++ { PCI_VDEVICE(INTEL, 0x43a8), (kernel_ulong_t)&bxt_uart_info },
++ { PCI_VDEVICE(INTEL, 0x43a9), (kernel_ulong_t)&bxt_uart_info },
++ { PCI_VDEVICE(INTEL, 0x43aa), (kernel_ulong_t)&bxt_info },
++ { PCI_VDEVICE(INTEL, 0x43ab), (kernel_ulong_t)&bxt_info },
++ { PCI_VDEVICE(INTEL, 0x43ad), (kernel_ulong_t)&bxt_i2c_info },
++ { PCI_VDEVICE(INTEL, 0x43ae), (kernel_ulong_t)&bxt_i2c_info },
++ { PCI_VDEVICE(INTEL, 0x43d8), (kernel_ulong_t)&bxt_i2c_info },
++ { PCI_VDEVICE(INTEL, 0x43da), (kernel_ulong_t)&bxt_uart_info },
++ { PCI_VDEVICE(INTEL, 0x43e8), (kernel_ulong_t)&bxt_i2c_info },
++ { PCI_VDEVICE(INTEL, 0x43e9), (kernel_ulong_t)&bxt_i2c_info },
++ { PCI_VDEVICE(INTEL, 0x43ea), (kernel_ulong_t)&bxt_i2c_info },
++ { PCI_VDEVICE(INTEL, 0x43eb), (kernel_ulong_t)&bxt_i2c_info },
++ { PCI_VDEVICE(INTEL, 0x43fb), (kernel_ulong_t)&bxt_info },
++ { PCI_VDEVICE(INTEL, 0x43fd), (kernel_ulong_t)&bxt_info },
+ /* EHL */
+ { PCI_VDEVICE(INTEL, 0x4b28), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x4b29), (kernel_ulong_t)&bxt_uart_info },
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index 4d0d13d5d0998..635345bced313 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -2924,7 +2924,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
+ }
+
+ u64_stats_update_begin(&tx_ring->syncp);
+- tx_ring->tx_stats.missed_tx = missed_tx;
++ tx_ring->tx_stats.missed_tx += missed_tx;
+ u64_stats_update_end(&tx_ring->syncp);
+
+ return rc;
+@@ -3848,6 +3848,9 @@ static void ena_keep_alive_wd(void *adapter_data,
+ rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
+
+ u64_stats_update_begin(&adapter->syncp);
++ /* These stats are accumulated by the device, so the counters indicate
++ * all drops since last reset.
++ */
+ adapter->dev_stats.rx_drops = rx_drops;
+ u64_stats_update_end(&adapter->syncp);
+ }
+diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
+index 2580bcd850253..3978d82c95989 100644
+--- a/drivers/net/ethernet/freescale/gianfar.c
++++ b/drivers/net/ethernet/freescale/gianfar.c
+@@ -751,8 +751,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
+ continue;
+
+ err = gfar_parse_group(child, priv, model);
+- if (err)
++ if (err) {
++ of_node_put(child);
+ goto err_grp_init;
++ }
+ }
+ } else { /* SQ_SG_MODE */
+ err = gfar_parse_group(np, priv, model);
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+index ccd852ad62a4b..d50c5b55da180 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+@@ -192,7 +192,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
+ }
+
+ /* alloc the udl from per cpu ddp pool */
+- ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp);
++ ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
+ if (!ddp->udl) {
+ e_err(drv, "failed allocated ddp context\n");
+ goto out_noddp_unmap;
+diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
+index b805abc9ec3b4..5fbabae2909ee 100644
+--- a/drivers/net/ipvlan/ipvlan_main.c
++++ b/drivers/net/ipvlan/ipvlan_main.c
+@@ -106,12 +106,21 @@ static void ipvlan_port_destroy(struct net_device *dev)
+ kfree(port);
+ }
+
++#define IPVLAN_ALWAYS_ON_OFLOADS \
++ (NETIF_F_SG | NETIF_F_HW_CSUM | \
++ NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL)
++
++#define IPVLAN_ALWAYS_ON \
++ (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED)
++
+ #define IPVLAN_FEATURES \
+- (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
++ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
+ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \
+ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
+
++ /* NETIF_F_GSO_ENCAP_ALL NETIF_F_GSO_SOFTWARE Newly added */
++
+ #define IPVLAN_STATE_MASK \
+ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
+
+@@ -125,7 +134,9 @@ static int ipvlan_init(struct net_device *dev)
+ dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
+ (phy_dev->state & IPVLAN_STATE_MASK);
+ dev->features = phy_dev->features & IPVLAN_FEATURES;
+- dev->features |= NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED;
++ dev->features |= IPVLAN_ALWAYS_ON;
++ dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES;
++ dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS;
+ dev->hw_enc_features |= dev->features;
+ dev->gso_max_size = phy_dev->gso_max_size;
+ dev->gso_max_segs = phy_dev->gso_max_segs;
+@@ -225,7 +236,14 @@ static netdev_features_t ipvlan_fix_features(struct net_device *dev,
+ {
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+- return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES);
++ features |= NETIF_F_ALL_FOR_ALL;
++ features &= (ipvlan->sfeatures | ~IPVLAN_FEATURES);
++ features = netdev_increment_features(ipvlan->phy_dev->features,
++ features, features);
++ features |= IPVLAN_ALWAYS_ON;
++ features &= (IPVLAN_FEATURES | IPVLAN_ALWAYS_ON);
++
++ return features;
+ }
+
+ static void ipvlan_change_rx_flags(struct net_device *dev, int change)
+@@ -732,10 +750,9 @@ static int ipvlan_device_event(struct notifier_block *unused,
+
+ case NETDEV_FEAT_CHANGE:
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+- ipvlan->dev->features = dev->features & IPVLAN_FEATURES;
+ ipvlan->dev->gso_max_size = dev->gso_max_size;
+ ipvlan->dev->gso_max_segs = dev->gso_max_segs;
+- netdev_features_change(ipvlan->dev);
++ netdev_update_features(ipvlan->dev);
+ }
+ break;
+
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 9d3209ae41cfb..07622cf8765ae 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -1259,6 +1259,9 @@ static void macvlan_port_destroy(struct net_device *dev)
+ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+ {
++ struct nlattr *nla, *head;
++ int rem, len;
++
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+ return -EINVAL;
+@@ -1306,6 +1309,20 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[],
+ return -EADDRNOTAVAIL;
+ }
+
++ if (data[IFLA_MACVLAN_MACADDR_DATA]) {
++ head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]);
++ len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]);
++
++ nla_for_each_attr(nla, head, len, rem) {
++ if (nla_type(nla) != IFLA_MACVLAN_MACADDR ||
++ nla_len(nla) != ETH_ALEN)
++ return -EINVAL;
++
++ if (!is_valid_ether_addr(nla_data(nla)))
++ return -EADDRNOTAVAIL;
++ }
++ }
++
+ if (data[IFLA_MACVLAN_MACADDR_COUNT])
+ return -EINVAL;
+
+@@ -1362,10 +1379,6 @@ static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode,
+ len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]);
+
+ nla_for_each_attr(nla, head, len, rem) {
+- if (nla_type(nla) != IFLA_MACVLAN_MACADDR ||
+- nla_len(nla) != ETH_ALEN)
+- continue;
+-
+ addr = nla_data(nla);
+ ret = macvlan_hash_add_source(vlan, addr);
+ if (ret)
+diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
+index 2ae57c1de7b55..ae4c9edc445c3 100644
+--- a/drivers/net/wireless/ath/ath10k/hw.h
++++ b/drivers/net/wireless/ath/ath10k/hw.h
+@@ -810,7 +810,7 @@ ath10k_is_rssi_enable(struct ath10k_hw_params *hw,
+
+ #define TARGET_10_4_TX_DBG_LOG_SIZE 1024
+ #define TARGET_10_4_NUM_WDS_ENTRIES 32
+-#define TARGET_10_4_DMA_BURST_SIZE 0
++#define TARGET_10_4_DMA_BURST_SIZE 1
+ #define TARGET_10_4_MAC_AGGR_DELIM 0
+ #define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+ #define TARGET_10_4_VOW_CONFIG 0
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+index e3ebb7abbdaed..4ca50353538ef 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+@@ -82,6 +82,8 @@
+
+ #define BRCMF_ND_INFO_TIMEOUT msecs_to_jiffies(2000)
+
++#define BRCMF_PS_MAX_TIMEOUT_MS 2000
++
+ #define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
+ (sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
+
+@@ -2789,6 +2791,12 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
+ else
+ bphy_err(drvr, "error (%d)\n", err);
+ }
++
++ err = brcmf_fil_iovar_int_set(ifp, "pm2_sleep_ret",
++ min_t(u32, timeout, BRCMF_PS_MAX_TIMEOUT_MS));
++ if (err)
++ bphy_err(drvr, "Unable to set pm timeout, (%d)\n", err);
++
+ done:
+ brcmf_dbg(TRACE, "Exit\n");
+ return err;
+diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
+index c66c6dc003783..bad06939a247c 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
++++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
+@@ -718,8 +718,11 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw)
+
+ usb_anchor_urb(urb, &rtlusb->rx_submitted);
+ err = usb_submit_urb(urb, GFP_KERNEL);
+- if (err)
++ if (err) {
++ usb_unanchor_urb(urb);
++ usb_free_urb(urb);
+ goto err_out;
++ }
+ usb_free_urb(urb);
+ }
+ return 0;
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index 83ac88924f253..dce4d6782ceb1 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -1740,7 +1740,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
+ if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
+ dev_err(ctrl->dev,
+ "FCP Op failed - cmdiu dma mapping failed.\n");
+- ret = EFAULT;
++ ret = -EFAULT;
+ goto out_on_error;
+ }
+
+@@ -1750,7 +1750,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
+ if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
+ dev_err(ctrl->dev,
+ "FCP Op failed - rspiu dma mapping failed.\n");
+- ret = EFAULT;
++ ret = -EFAULT;
+ }
+
+ atomic_set(&op->state, FCPOP_STATE_IDLE);
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 484aad0d0c9c6..0a458f7880887 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -249,12 +249,17 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
+ fallback = ns;
+ }
+
+- /* No optimized path found, re-check the current path */
++ /*
++ * The loop above skips the current path for round-robin semantics.
++ * Fall back to the current path if either:
++ * - no other optimized path found and current is optimized,
++ * - no other usable path found and current is usable.
++ */
+ if (!nvme_path_is_disabled(old) &&
+- old->ana_state == NVME_ANA_OPTIMIZED) {
+- found = old;
+- goto out;
+- }
++ (old->ana_state == NVME_ANA_OPTIMIZED ||
++ (!fallback && old->ana_state == NVME_ANA_NONOPTIMIZED)))
++ return old;
++
+ if (!fallback)
+ return NULL;
+ found = fallback;
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index 270d502b8cd50..374db5d59cf87 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -103,11 +103,14 @@ struct qcom_pcie_resources_2_1_0 {
+ struct clk *iface_clk;
+ struct clk *core_clk;
+ struct clk *phy_clk;
++ struct clk *aux_clk;
++ struct clk *ref_clk;
+ struct reset_control *pci_reset;
+ struct reset_control *axi_reset;
+ struct reset_control *ahb_reset;
+ struct reset_control *por_reset;
+ struct reset_control *phy_reset;
++ struct reset_control *ext_reset;
+ struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
+ };
+
+@@ -253,6 +256,14 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
+ if (IS_ERR(res->phy_clk))
+ return PTR_ERR(res->phy_clk);
+
++ res->aux_clk = devm_clk_get_optional(dev, "aux");
++ if (IS_ERR(res->aux_clk))
++ return PTR_ERR(res->aux_clk);
++
++ res->ref_clk = devm_clk_get_optional(dev, "ref");
++ if (IS_ERR(res->ref_clk))
++ return PTR_ERR(res->ref_clk);
++
+ res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
+ if (IS_ERR(res->pci_reset))
+ return PTR_ERR(res->pci_reset);
+@@ -269,6 +280,10 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
+ if (IS_ERR(res->por_reset))
+ return PTR_ERR(res->por_reset);
+
++ res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext");
++ if (IS_ERR(res->ext_reset))
++ return PTR_ERR(res->ext_reset);
++
+ res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
+ return PTR_ERR_OR_ZERO(res->phy_reset);
+ }
+@@ -277,14 +292,17 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
+ {
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+
++ clk_disable_unprepare(res->phy_clk);
+ reset_control_assert(res->pci_reset);
+ reset_control_assert(res->axi_reset);
+ reset_control_assert(res->ahb_reset);
+ reset_control_assert(res->por_reset);
+- reset_control_assert(res->pci_reset);
++ reset_control_assert(res->ext_reset);
++ reset_control_assert(res->phy_reset);
+ clk_disable_unprepare(res->iface_clk);
+ clk_disable_unprepare(res->core_clk);
+- clk_disable_unprepare(res->phy_clk);
++ clk_disable_unprepare(res->aux_clk);
++ clk_disable_unprepare(res->ref_clk);
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+ }
+
+@@ -315,24 +333,36 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
+ goto err_assert_ahb;
+ }
+
+- ret = clk_prepare_enable(res->phy_clk);
+- if (ret) {
+- dev_err(dev, "cannot prepare/enable phy clock\n");
+- goto err_clk_phy;
+- }
+-
+ ret = clk_prepare_enable(res->core_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable core clock\n");
+ goto err_clk_core;
+ }
+
++ ret = clk_prepare_enable(res->aux_clk);
++ if (ret) {
++ dev_err(dev, "cannot prepare/enable aux clock\n");
++ goto err_clk_aux;
++ }
++
++ ret = clk_prepare_enable(res->ref_clk);
++ if (ret) {
++ dev_err(dev, "cannot prepare/enable ref clock\n");
++ goto err_clk_ref;
++ }
++
+ ret = reset_control_deassert(res->ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert ahb reset\n");
+ goto err_deassert_ahb;
+ }
+
++ ret = reset_control_deassert(res->ext_reset);
++ if (ret) {
++ dev_err(dev, "cannot deassert ext reset\n");
++ goto err_deassert_ahb;
++ }
++
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= ~BIT(0);
+@@ -387,6 +417,12 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
+ return ret;
+ }
+
++ ret = clk_prepare_enable(res->phy_clk);
++ if (ret) {
++ dev_err(dev, "cannot prepare/enable phy clock\n");
++ goto err_deassert_ahb;
++ }
++
+ /* wait for clock acquisition */
+ usleep_range(1000, 1500);
+
+@@ -400,10 +436,12 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
+ return 0;
+
+ err_deassert_ahb:
++ clk_disable_unprepare(res->ref_clk);
++err_clk_ref:
++ clk_disable_unprepare(res->aux_clk);
++err_clk_aux:
+ clk_disable_unprepare(res->core_clk);
+ err_clk_core:
+- clk_disable_unprepare(res->phy_clk);
+-err_clk_phy:
+ clk_disable_unprepare(res->iface_clk);
+ err_assert_ahb:
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
+index ae4aa0e1f2f42..1f087746b7bb0 100644
+--- a/drivers/pci/slot.c
++++ b/drivers/pci/slot.c
+@@ -304,13 +304,16 @@ placeholder:
+ slot_name = make_slot_name(name);
+ if (!slot_name) {
+ err = -ENOMEM;
++ kfree(slot);
+ goto err;
+ }
+
+ err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
+ "%s", slot_name);
+- if (err)
++ if (err) {
++ kobject_put(&slot->kobj);
+ goto err;
++ }
+
+ INIT_LIST_HEAD(&slot->list);
+ list_add(&slot->list, &parent->slots);
+@@ -329,7 +332,6 @@ out:
+ mutex_unlock(&pci_slot_mutex);
+ return slot;
+ err:
+- kfree(slot);
+ slot = ERR_PTR(err);
+ goto out;
+ }
+diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
+index 831850435c23b..5734a78dbb8e6 100644
+--- a/drivers/s390/cio/css.c
++++ b/drivers/s390/cio/css.c
+@@ -677,6 +677,11 @@ static int slow_eval_known_fn(struct subchannel *sch, void *data)
+ rc = css_evaluate_known_subchannel(sch, 1);
+ if (rc == -EAGAIN)
+ css_schedule_eval(sch->schid);
++ /*
++ * The loop might take long time for platforms with lots of
++ * known devices. Allow scheduling here.
++ */
++ cond_resched();
+ }
+ return 0;
+ }
+diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
+index 1791a393795da..07a0dadc75bf5 100644
+--- a/drivers/scsi/fcoe/fcoe_ctlr.c
++++ b/drivers/scsi/fcoe/fcoe_ctlr.c
+@@ -255,9 +255,9 @@ static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new)
+ WARN_ON(!fcf_dev);
+ new->fcf_dev = NULL;
+ fcoe_fcf_device_delete(fcf_dev);
+- kfree(new);
+ mutex_unlock(&cdev->lock);
+ }
++ kfree(new);
+ }
+
+ /**
+diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
+index b766463579800..d0296f7cf45fc 100644
+--- a/drivers/scsi/lpfc/lpfc_vport.c
++++ b/drivers/scsi/lpfc/lpfc_vport.c
+@@ -642,27 +642,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
+ vport->port_state < LPFC_VPORT_READY)
+ return -EAGAIN;
+ }
++
+ /*
+- * This is a bit of a mess. We want to ensure the shost doesn't get
+- * torn down until we're done with the embedded lpfc_vport structure.
+- *
+- * Beyond holding a reference for this function, we also need a
+- * reference for outstanding I/O requests we schedule during delete
+- * processing. But once we scsi_remove_host() we can no longer obtain
+- * a reference through scsi_host_get().
+- *
+- * So we take two references here. We release one reference at the
+- * bottom of the function -- after delinking the vport. And we
+- * release the other at the completion of the unreg_vpi that get's
+- * initiated after we've disposed of all other resources associated
+- * with the port.
++ * Take early refcount for outstanding I/O requests we schedule during
++ * delete processing for unreg_vpi. Always keep this before
++ * scsi_remove_host() as we can no longer obtain a reference through
++ * scsi_host_get() after scsi_host_remove as shost is set to SHOST_DEL.
+ */
+ if (!scsi_host_get(shost))
+ return VPORT_INVAL;
+- if (!scsi_host_get(shost)) {
+- scsi_host_put(shost);
+- return VPORT_INVAL;
+- }
++
+ lpfc_free_sysfs_attr(vport);
+
+ lpfc_debugfs_terminate(vport);
+@@ -809,8 +798,9 @@ skip_logo:
+ if (!(vport->vpi_state & LPFC_VPI_REGISTERED) ||
+ lpfc_mbx_unreg_vpi(vport))
+ scsi_host_put(shost);
+- } else
++ } else {
+ scsi_host_put(shost);
++ }
+
+ lpfc_free_vpi(phba, vport->vpi);
+ vport->work_port_events = 0;
+diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
+index a44de4c5dcf6c..fc6e12fb7d77b 100644
+--- a/drivers/scsi/qla2xxx/qla_gs.c
++++ b/drivers/scsi/qla2xxx/qla_gs.c
+@@ -3673,10 +3673,22 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
+ }
+
+ if (fcport->scan_state != QLA_FCPORT_FOUND) {
++ bool do_delete = false;
++
++ if (fcport->scan_needed &&
++ fcport->disc_state == DSC_LOGIN_PEND) {
++ /* Cable got disconnected after we sent
++ * a login. Do delete to prevent timeout.
++ */
++ fcport->logout_on_delete = 1;
++ do_delete = true;
++ }
++
+ fcport->scan_needed = 0;
+- if ((qla_dual_mode_enabled(vha) ||
+- qla_ini_mode_enabled(vha)) &&
+- atomic_read(&fcport->state) == FCS_ONLINE) {
++ if (((qla_dual_mode_enabled(vha) ||
++ qla_ini_mode_enabled(vha)) &&
++ atomic_read(&fcport->state) == FCS_ONLINE) ||
++ do_delete) {
+ if (fcport->loop_id != FC_NO_LOOP_ID) {
+ if (fcport->flags & FCF_FCP2_DEVICE)
+ fcport->logout_on_delete = 0;
+diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
+index 62a16463f0254..c1631e42d35d1 100644
+--- a/drivers/scsi/qla2xxx/qla_mbx.c
++++ b/drivers/scsi/qla2xxx/qla_mbx.c
+@@ -335,14 +335,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
+ if (time_after(jiffies, wait_time))
+ break;
+
+- /*
+- * Check if it's UNLOADING, cause we cannot poll in
+- * this case, or else a NULL pointer dereference
+- * is triggered.
+- */
+- if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)))
+- return QLA_FUNCTION_TIMEOUT;
+-
+ /* Check for pending interrupts. */
+ qla2x00_poll(ha->rsp_q_map[0]);
+
+diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
+index 941aa53363f56..f4815a4084d8c 100644
+--- a/drivers/scsi/qla2xxx/qla_nvme.c
++++ b/drivers/scsi/qla2xxx/qla_nvme.c
+@@ -535,6 +535,11 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
+ struct nvme_private *priv = fd->private;
+ struct qla_nvme_rport *qla_rport = rport->private;
+
++ if (!priv) {
++ /* nvme association has been torn down */
++ return rval;
++ }
++
+ fcport = qla_rport->fcport;
+
+ if (!qpair || !fcport || (qpair && !qpair->fw_started) ||
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index d91c95d9981ac..67b1e74fcd1e6 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -1993,6 +1993,11 @@ skip_pio:
+ /* Determine queue resources */
+ ha->max_req_queues = ha->max_rsp_queues = 1;
+ ha->msix_count = QLA_BASE_VECTORS;
++
++ /* Check if FW supports MQ or not */
++ if (!(ha->fw_attributes & BIT_6))
++ goto mqiobase_exit;
++
+ if (!ql2xmqsupport || !ql2xnvmeenable ||
+ (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
+ goto mqiobase_exit;
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index cb8a892e2d393..b75e6e4d58c06 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -1262,7 +1262,7 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
+
+ qla24xx_chk_fcp_state(sess);
+
+- ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
++ ql_dbg(ql_dbg_disc, sess->vha, 0xe001,
+ "Scheduling sess %p for deletion %8phC\n",
+ sess, sess->port_name);
+
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index a5c78b38d3022..dbad926e8f87f 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -3174,7 +3174,7 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
+ pr_err("%s could not find host no %u\n",
+ __func__, ev->u.set_flashnode.host_no);
+ err = -ENODEV;
+- goto put_host;
++ goto exit_set_fnode;
+ }
+
+ idx = ev->u.set_flashnode.flashnode_idx;
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index b41b88bcab3d9..5e502e1605549 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -1516,6 +1516,7 @@ unblock_reqs:
+ int ufshcd_hold(struct ufs_hba *hba, bool async)
+ {
+ int rc = 0;
++ bool flush_result;
+ unsigned long flags;
+
+ if (!ufshcd_is_clkgating_allowed(hba))
+@@ -1547,7 +1548,9 @@ start:
+ break;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+- flush_work(&hba->clk_gating.ungate_work);
++ flush_result = flush_work(&hba->clk_gating.ungate_work);
++ if (hba->clk_gating.is_suspended && !flush_result)
++ goto out;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ goto start;
+ }
+@@ -5609,7 +5612,7 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
+ */
+ static irqreturn_t ufshcd_intr(int irq, void *__hba)
+ {
+- u32 intr_status, enabled_intr_status;
++ u32 intr_status, enabled_intr_status = 0;
+ irqreturn_t retval = IRQ_NONE;
+ struct ufs_hba *hba = __hba;
+ int retries = hba->nutrs;
+@@ -5623,7 +5626,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
+ * read, make sure we handle them by checking the interrupt status
+ * again in a loop until we process all of the reqs before returning.
+ */
+- do {
++ while (intr_status && retries--) {
+ enabled_intr_status =
+ intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ if (intr_status)
+@@ -5634,7 +5637,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
+ }
+
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+- } while (intr_status && --retries);
++ }
+
+ spin_unlock(hba->host->host_lock);
+ return retval;
+@@ -6137,7 +6140,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
+ /* command completed already */
+ dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
+ __func__, tag);
+- goto out;
++ goto cleanup;
+ } else {
+ dev_err(hba->dev,
+ "%s: no response from device. tag = %d, err %d\n",
+@@ -6171,6 +6174,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
+ goto out;
+ }
+
++cleanup:
+ scsi_dma_unmap(cmd);
+
+ spin_lock_irqsave(host->host_lock, flags);
+diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
+index 7e92ab0cc9920..8146c2d91d307 100644
+--- a/drivers/spi/spi-stm32.c
++++ b/drivers/spi/spi-stm32.c
+@@ -443,7 +443,8 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
+ {
+ u32 div, mbrdiv;
+
+- div = DIV_ROUND_UP(spi->clk_rate, speed_hz);
++ /* Ensure spi->clk_rate is even */
++ div = DIV_ROUND_UP(spi->clk_rate & ~0x1, speed_hz);
+
+ /*
+ * SPI framework set xfer->speed_hz to master->max_speed_hz if
+@@ -469,20 +470,27 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
+ /**
+ * stm32h7_spi_prepare_fthlv - Determine FIFO threshold level
+ * @spi: pointer to the spi controller data structure
++ * @xfer_len: length of the message to be transferred
+ */
+-static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi)
++static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len)
+ {
+- u32 fthlv, half_fifo;
++ u32 fthlv, half_fifo, packet;
+
+ /* data packet should not exceed 1/2 of fifo space */
+ half_fifo = (spi->fifo_size / 2);
+
++ /* data_packet should not exceed transfer length */
++ if (half_fifo > xfer_len)
++ packet = xfer_len;
++ else
++ packet = half_fifo;
++
+ if (spi->cur_bpw <= 8)
+- fthlv = half_fifo;
++ fthlv = packet;
+ else if (spi->cur_bpw <= 16)
+- fthlv = half_fifo / 2;
++ fthlv = packet / 2;
+ else
+- fthlv = half_fifo / 4;
++ fthlv = packet / 4;
+
+ /* align packet size with data registers access */
+ if (spi->cur_bpw > 8)
+@@ -490,6 +498,9 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi)
+ else
+ fthlv -= (fthlv % 4); /* multiple of 4 */
+
++ if (!fthlv)
++ fthlv = 1;
++
+ return fthlv;
+ }
+
+@@ -962,13 +973,13 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
+ if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
+ stm32h7_spi_read_rxfifo(spi, false);
+
+- writel_relaxed(mask, spi->base + STM32H7_SPI_IFCR);
++ writel_relaxed(sr & mask, spi->base + STM32H7_SPI_IFCR);
+
+ spin_unlock_irqrestore(&spi->lock, flags);
+
+ if (end) {
+- spi_finalize_current_transfer(master);
+ stm32h7_spi_disable(spi);
++ spi_finalize_current_transfer(master);
+ }
+
+ return IRQ_HANDLED;
+@@ -1396,7 +1407,7 @@ static void stm32h7_spi_set_bpw(struct stm32_spi *spi)
+ cfg1_setb |= (bpw << STM32H7_SPI_CFG1_DSIZE_SHIFT) &
+ STM32H7_SPI_CFG1_DSIZE;
+
+- spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi);
++ spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi, spi->cur_xferlen);
+ fthlv = spi->cur_fthlv - 1;
+
+ cfg1_clrb |= STM32H7_SPI_CFG1_FTHLV;
+@@ -1579,39 +1590,33 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
+ unsigned long flags;
+ unsigned int comm_type;
+ int nb_words, ret = 0;
++ int mbr;
+
+ spin_lock_irqsave(&spi->lock, flags);
+
+- if (spi->cur_bpw != transfer->bits_per_word) {
+- spi->cur_bpw = transfer->bits_per_word;
+- spi->cfg->set_bpw(spi);
+- }
++ spi->cur_xferlen = transfer->len;
+
+- if (spi->cur_speed != transfer->speed_hz) {
+- int mbr;
++ spi->cur_bpw = transfer->bits_per_word;
++ spi->cfg->set_bpw(spi);
+
+- /* Update spi->cur_speed with real clock speed */
+- mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz,
+- spi->cfg->baud_rate_div_min,
+- spi->cfg->baud_rate_div_max);
+- if (mbr < 0) {
+- ret = mbr;
+- goto out;
+- }
+-
+- transfer->speed_hz = spi->cur_speed;
+- stm32_spi_set_mbr(spi, mbr);
++ /* Update spi->cur_speed with real clock speed */
++ mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz,
++ spi->cfg->baud_rate_div_min,
++ spi->cfg->baud_rate_div_max);
++ if (mbr < 0) {
++ ret = mbr;
++ goto out;
+ }
+
+- comm_type = stm32_spi_communication_type(spi_dev, transfer);
+- if (spi->cur_comm != comm_type) {
+- ret = spi->cfg->set_mode(spi, comm_type);
++ transfer->speed_hz = spi->cur_speed;
++ stm32_spi_set_mbr(spi, mbr);
+
+- if (ret < 0)
+- goto out;
++ comm_type = stm32_spi_communication_type(spi_dev, transfer);
++ ret = spi->cfg->set_mode(spi, comm_type);
++ if (ret < 0)
++ goto out;
+
+- spi->cur_comm = comm_type;
+- }
++ spi->cur_comm = comm_type;
+
+ if (spi->cfg->set_data_idleness)
+ spi->cfg->set_data_idleness(spi, transfer->len);
+@@ -1629,8 +1634,6 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
+ goto out;
+ }
+
+- spi->cur_xferlen = transfer->len;
+-
+ dev_dbg(spi->dev, "transfer communication mode set to %d\n",
+ spi->cur_comm);
+ dev_dbg(spi->dev,
+diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
+index 8533444159635..e7b3c6e5d5744 100644
+--- a/drivers/target/target_core_internal.h
++++ b/drivers/target/target_core_internal.h
+@@ -138,6 +138,7 @@ int init_se_kmem_caches(void);
+ void release_se_kmem_caches(void);
+ u32 scsi_get_new_index(scsi_index_t);
+ void transport_subsystem_check_init(void);
++void transport_uninit_session(struct se_session *);
+ unsigned char *transport_dump_cmd_direction(struct se_cmd *);
+ void transport_dump_dev_state(struct se_device *, char *, int *);
+ void transport_dump_dev_info(struct se_device *, struct se_lun *,
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 7c78a5d02c083..b1f4be055f838 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -236,6 +236,11 @@ int transport_init_session(struct se_session *se_sess)
+ }
+ EXPORT_SYMBOL(transport_init_session);
+
++void transport_uninit_session(struct se_session *se_sess)
++{
++ percpu_ref_exit(&se_sess->cmd_count);
++}
++
+ /**
+ * transport_alloc_session - allocate a session object and initialize it
+ * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
+@@ -579,7 +584,7 @@ void transport_free_session(struct se_session *se_sess)
+ sbitmap_queue_free(&se_sess->sess_tag_pool);
+ kvfree(se_sess->sess_cmd_map);
+ }
+- percpu_ref_exit(&se_sess->cmd_count);
++ transport_uninit_session(se_sess);
+ kmem_cache_free(se_sess_cache, se_sess);
+ }
+ EXPORT_SYMBOL(transport_free_session);
+diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
+index d766fb14942b3..8888cdf3eead9 100644
+--- a/drivers/target/target_core_user.c
++++ b/drivers/target/target_core_user.c
+@@ -1220,7 +1220,14 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
+
+ struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
+
+- tcmu_flush_dcache_range(entry, sizeof(*entry));
++ /*
++ * Flush max. up to end of cmd ring since current entry might
++ * be a padding that is shorter than sizeof(*entry)
++ */
++ size_t ring_left = head_to_end(udev->cmdr_last_cleaned,
++ udev->cmdr_size);
++ tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ?
++ ring_left : sizeof(*entry));
+
+ if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
+ UPDATE_HEAD(udev->cmdr_last_cleaned,
+diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
+index b9b1e92c6f8db..9d24e85b08631 100644
+--- a/drivers/target/target_core_xcopy.c
++++ b/drivers/target/target_core_xcopy.c
+@@ -479,7 +479,7 @@ int target_xcopy_setup_pt(void)
+ memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
+ ret = transport_init_session(&xcopy_pt_sess);
+ if (ret < 0)
+- return ret;
++ goto destroy_wq;
+
+ xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
+ xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
+@@ -488,12 +488,19 @@ int target_xcopy_setup_pt(void)
+ xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl;
+
+ return 0;
++
++destroy_wq:
++ destroy_workqueue(xcopy_wq);
++ xcopy_wq = NULL;
++ return ret;
+ }
+
+ void target_xcopy_release_pt(void)
+ {
+- if (xcopy_wq)
++ if (xcopy_wq) {
+ destroy_workqueue(xcopy_wq);
++ transport_uninit_session(&xcopy_pt_sess);
++ }
+ }
+
+ /*
+diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
+index 9e2dbe43667ae..93367dea4d8a5 100644
+--- a/drivers/tty/serial/8250/8250_exar.c
++++ b/drivers/tty/serial/8250/8250_exar.c
+@@ -725,6 +725,24 @@ static const struct exar8250_board pbn_exar_XR17V35x = {
+ .exit = pci_xr17v35x_exit,
+ };
+
++static const struct exar8250_board pbn_fastcom35x_2 = {
++ .num_ports = 2,
++ .setup = pci_xr17v35x_setup,
++ .exit = pci_xr17v35x_exit,
++};
++
++static const struct exar8250_board pbn_fastcom35x_4 = {
++ .num_ports = 4,
++ .setup = pci_xr17v35x_setup,
++ .exit = pci_xr17v35x_exit,
++};
++
++static const struct exar8250_board pbn_fastcom35x_8 = {
++ .num_ports = 8,
++ .setup = pci_xr17v35x_setup,
++ .exit = pci_xr17v35x_exit,
++};
++
+ static const struct exar8250_board pbn_exar_XR17V4358 = {
+ .num_ports = 12,
+ .setup = pci_xr17v35x_setup,
+@@ -795,9 +813,9 @@ static const struct pci_device_id exar_pci_tbl[] = {
+ EXAR_DEVICE(EXAR, EXAR_XR17V358, pbn_exar_XR17V35x),
+ EXAR_DEVICE(EXAR, EXAR_XR17V4358, pbn_exar_XR17V4358),
+ EXAR_DEVICE(EXAR, EXAR_XR17V8358, pbn_exar_XR17V8358),
+- EXAR_DEVICE(COMMTECH, COMMTECH_4222PCIE, pbn_exar_XR17V35x),
+- EXAR_DEVICE(COMMTECH, COMMTECH_4224PCIE, pbn_exar_XR17V35x),
+- EXAR_DEVICE(COMMTECH, COMMTECH_4228PCIE, pbn_exar_XR17V35x),
++ EXAR_DEVICE(COMMTECH, COMMTECH_4222PCIE, pbn_fastcom35x_2),
++ EXAR_DEVICE(COMMTECH, COMMTECH_4224PCIE, pbn_fastcom35x_4),
++ EXAR_DEVICE(COMMTECH, COMMTECH_4228PCIE, pbn_fastcom35x_8),
+
+ EXAR_DEVICE(COMMTECH, COMMTECH_4222PCI335, pbn_fastcom335_2),
+ EXAR_DEVICE(COMMTECH, COMMTECH_4224PCI335, pbn_fastcom335_4),
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index dbb27303a6b49..90f09ed6e5ad3 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -2198,6 +2198,10 @@ int serial8250_do_startup(struct uart_port *port)
+
+ if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
+ unsigned char iir1;
++
++ if (port->irqflags & IRQF_SHARED)
++ disable_irq_nosync(port->irq);
++
+ /*
+ * Test for UARTs that do not reassert THRE when the
+ * transmitter is idle and the interrupt has already
+@@ -2207,8 +2211,6 @@ int serial8250_do_startup(struct uart_port *port)
+ * allow register changes to become visible.
+ */
+ spin_lock_irqsave(&port->lock, flags);
+- if (up->port.irqflags & IRQF_SHARED)
+- disable_irq_nosync(port->irq);
+
+ wait_for_xmitr(up, UART_LSR_THRE);
+ serial_port_out_sync(port, UART_IER, UART_IER_THRI);
+@@ -2220,9 +2222,10 @@ int serial8250_do_startup(struct uart_port *port)
+ iir = serial_port_in(port, UART_IIR);
+ serial_port_out(port, UART_IER, 0);
+
++ spin_unlock_irqrestore(&port->lock, flags);
++
+ if (port->irqflags & IRQF_SHARED)
+ enable_irq(port->irq);
+- spin_unlock_irqrestore(&port->lock, flags);
+
+ /*
+ * If the interrupt is not reasserted, or we otherwise
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index de3e8c24c03e7..a8a538b34b535 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -2252,9 +2252,8 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
+ clk_disable(uap->clk);
+ }
+
+-static void __init
+-pl011_console_get_options(struct uart_amba_port *uap, int *baud,
+- int *parity, int *bits)
++static void pl011_console_get_options(struct uart_amba_port *uap, int *baud,
++ int *parity, int *bits)
+ {
+ if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
+ unsigned int lcr_h, ibrd, fbrd;
+@@ -2287,7 +2286,7 @@ pl011_console_get_options(struct uart_amba_port *uap, int *baud,
+ }
+ }
+
+-static int __init pl011_console_setup(struct console *co, char *options)
++static int pl011_console_setup(struct console *co, char *options)
+ {
+ struct uart_amba_port *uap;
+ int baud = 38400;
+@@ -2355,8 +2354,8 @@ static int __init pl011_console_setup(struct console *co, char *options)
+ *
+ * Returns 0 if console matches; otherwise non-zero to use default matching
+ */
+-static int __init pl011_console_match(struct console *co, char *name, int idx,
+- char *options)
++static int pl011_console_match(struct console *co, char *name, int idx,
++ char *options)
+ {
+ unsigned char iotype;
+ resource_size_t addr;
+@@ -2594,7 +2593,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
+
+ static int pl011_register_port(struct uart_amba_port *uap)
+ {
+- int ret;
++ int ret, i;
+
+ /* Ensure interrupts from this UART are masked and cleared */
+ pl011_write(0, uap, REG_IMSC);
+@@ -2605,6 +2604,9 @@ static int pl011_register_port(struct uart_amba_port *uap)
+ if (ret < 0) {
+ dev_err(uap->port.dev,
+ "Failed to register AMBA-PL011 driver\n");
++ for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
++ if (amba_ports[i] == uap)
++ amba_ports[i] = NULL;
+ return ret;
+ }
+ }
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
+index 83fd51607741b..71f99e9217592 100644
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -1791,9 +1791,11 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
+ ourport->tx_irq = ret + 1;
+ }
+
+- ret = platform_get_irq(platdev, 1);
+- if (ret > 0)
+- ourport->tx_irq = ret;
++ if (!s3c24xx_serial_has_interrupt_mask(port)) {
++ ret = platform_get_irq(platdev, 1);
++ if (ret > 0)
++ ourport->tx_irq = ret;
++ }
+ /*
+ * DMA is currently supported only on DT platforms, if DMA properties
+ * are specified.
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index 2f72514d63edd..bf83e6c212f59 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -937,7 +937,7 @@ static int stm32_init_port(struct stm32_port *stm32port,
+ stm32_init_rs485(port, pdev);
+
+ if (stm32port->info->cfg.has_wakeup) {
+- stm32port->wakeirq = platform_get_irq(pdev, 1);
++ stm32port->wakeirq = platform_get_irq_optional(pdev, 1);
+ if (stm32port->wakeirq <= 0 && stm32port->wakeirq != -ENXIO)
+ return stm32port->wakeirq ? : -ENODEV;
+ }
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 8051c70326277..d07a9c9c76081 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -1196,7 +1196,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
+ unsigned int old_rows, old_row_size, first_copied_row;
+ unsigned int new_cols, new_rows, new_row_size, new_screen_size;
+ unsigned int user;
+- unsigned short *newscreen;
++ unsigned short *oldscreen, *newscreen;
+ struct uni_screen *new_uniscr = NULL;
+
+ WARN_CONSOLE_UNLOCKED();
+@@ -1294,10 +1294,11 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
+ if (new_scr_end > new_origin)
+ scr_memsetw((void *)new_origin, vc->vc_video_erase_char,
+ new_scr_end - new_origin);
+- kfree(vc->vc_screenbuf);
++ oldscreen = vc->vc_screenbuf;
+ vc->vc_screenbuf = newscreen;
+ vc->vc_screenbuf_size = new_screen_size;
+ set_origin(vc);
++ kfree(oldscreen);
+
+ /* do part of a reset_terminal() */
+ vc->vc_top = 0;
+diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
+index daf61c28ba766..cbc85c995d92d 100644
+--- a/drivers/tty/vt/vt_ioctl.c
++++ b/drivers/tty/vt/vt_ioctl.c
+@@ -893,12 +893,22 @@ int vt_ioctl(struct tty_struct *tty,
+ console_lock();
+ vcp = vc_cons[i].d;
+ if (vcp) {
++ int ret;
++ int save_scan_lines = vcp->vc_scan_lines;
++ int save_font_height = vcp->vc_font.height;
++
+ if (v.v_vlin)
+ vcp->vc_scan_lines = v.v_vlin;
+ if (v.v_clin)
+ vcp->vc_font.height = v.v_clin;
+ vcp->vc_resize_user = 1;
+- vc_resize(vcp, v.v_cols, v.v_rows);
++ ret = vc_resize(vcp, v.v_cols, v.v_rows);
++ if (ret) {
++ vcp->vc_scan_lines = save_scan_lines;
++ vcp->vc_font.height = save_font_height;
++ console_unlock();
++ return ret;
++ }
+ }
+ console_unlock();
+ }
+diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
+index 856c34010021b..9900888afbcd8 100644
+--- a/drivers/usb/cdns3/gadget.c
++++ b/drivers/usb/cdns3/gadget.c
+@@ -189,10 +189,10 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
+ GFP_DMA32 | GFP_ATOMIC);
+ if (!priv_ep->trb_pool)
+ return -ENOMEM;
+- } else {
+- memset(priv_ep->trb_pool, 0, ring_size);
+ }
+
++ memset(priv_ep->trb_pool, 0, ring_size);
++
+ if (!priv_ep->num)
+ return 0;
+
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index d5187b50fc828..7499ba118665a 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -378,21 +378,19 @@ static void acm_ctrl_irq(struct urb *urb)
+ if (current_size < expected_size) {
+ /* notification is transmitted fragmented, reassemble */
+ if (acm->nb_size < expected_size) {
+- if (acm->nb_size) {
+- kfree(acm->notification_buffer);
+- acm->nb_size = 0;
+- }
++ u8 *new_buffer;
+ alloc_size = roundup_pow_of_two(expected_size);
+- /*
+- * kmalloc ensures a valid notification_buffer after a
+- * use of kfree in case the previous allocation was too
+- * small. Final freeing is done on disconnect.
+- */
+- acm->notification_buffer =
+- kmalloc(alloc_size, GFP_ATOMIC);
+- if (!acm->notification_buffer)
++ /* Final freeing is done on disconnect. */
++ new_buffer = krealloc(acm->notification_buffer,
++ alloc_size, GFP_ATOMIC);
++ if (!new_buffer) {
++ acm->nb_index = 0;
+ goto exit;
++ }
++
++ acm->notification_buffer = new_buffer;
+ acm->nb_size = alloc_size;
++ dr = (struct usb_cdc_notification *)acm->notification_buffer;
+ }
+
+ copy_size = min(current_size,
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index c96c50faccf72..2f068e525a374 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -370,6 +370,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ { USB_DEVICE(0x0926, 0x0202), .driver_info =
+ USB_QUIRK_ENDPOINT_BLACKLIST },
+
++ /* Sound Devices MixPre-D */
++ { USB_DEVICE(0x0926, 0x0208), .driver_info =
++ USB_QUIRK_ENDPOINT_BLACKLIST },
++
+ /* Keytouch QWERTY Panel keyboard */
+ { USB_DEVICE(0x0926, 0x3333), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+@@ -465,6 +469,8 @@ static const struct usb_device_id usb_quirk_list[] = {
+
+ { USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM },
+
++ { USB_DEVICE(0x2386, 0x350e), .driver_info = USB_QUIRK_NO_LPM },
++
+ /* DJI CineSSD */
+ { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+
+@@ -509,6 +515,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
+ */
+ static const struct usb_device_id usb_endpoint_blacklist[] = {
+ { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 },
++ { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0208, 1), .driver_info = 0x85 },
+ { }
+ };
+
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 17340864a5408..4225544342519 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1017,26 +1017,24 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
+ * dwc3_prepare_one_trb - setup one TRB from one request
+ * @dep: endpoint for which this request is prepared
+ * @req: dwc3_request pointer
++ * @trb_length: buffer size of the TRB
+ * @chain: should this TRB be chained to the next?
+ * @node: only for isochronous endpoints. First TRB needs different type.
+ */
+ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
+- struct dwc3_request *req, unsigned chain, unsigned node)
++ struct dwc3_request *req, unsigned int trb_length,
++ unsigned chain, unsigned node)
+ {
+ struct dwc3_trb *trb;
+- unsigned int length;
+ dma_addr_t dma;
+ unsigned stream_id = req->request.stream_id;
+ unsigned short_not_ok = req->request.short_not_ok;
+ unsigned no_interrupt = req->request.no_interrupt;
+
+- if (req->request.num_sgs > 0) {
+- length = sg_dma_len(req->start_sg);
++ if (req->request.num_sgs > 0)
+ dma = sg_dma_address(req->start_sg);
+- } else {
+- length = req->request.length;
++ else
+ dma = req->request.dma;
+- }
+
+ trb = &dep->trb_pool[dep->trb_enqueue];
+
+@@ -1048,7 +1046,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
+
+ req->num_trbs++;
+
+- __dwc3_prepare_one_trb(dep, trb, dma, length, chain, node,
++ __dwc3_prepare_one_trb(dep, trb, dma, trb_length, chain, node,
+ stream_id, short_not_ok, no_interrupt);
+ }
+
+@@ -1058,16 +1056,27 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
+ struct scatterlist *sg = req->start_sg;
+ struct scatterlist *s;
+ int i;
+-
++ unsigned int length = req->request.length;
+ unsigned int remaining = req->request.num_mapped_sgs
+ - req->num_queued_sgs;
+
++ /*
++ * If we resume preparing the request, then get the remaining length of
++ * the request and resume where we left off.
++ */
++ for_each_sg(req->request.sg, s, req->num_queued_sgs, i)
++ length -= sg_dma_len(s);
++
+ for_each_sg(sg, s, remaining, i) {
+- unsigned int length = req->request.length;
+ unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
+ unsigned int rem = length % maxp;
++ unsigned int trb_length;
+ unsigned chain = true;
+
++ trb_length = min_t(unsigned int, length, sg_dma_len(s));
++
++ length -= trb_length;
++
+ /*
+ * IOMMU driver is coalescing the list of sgs which shares a
+ * page boundary into one and giving it to USB driver. With
+@@ -1075,7 +1084,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
+ * sgs passed. So mark the chain bit to false if it isthe last
+ * mapped sg.
+ */
+- if (i == remaining - 1)
++ if ((i == remaining - 1) || !length)
+ chain = false;
+
+ if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) {
+@@ -1085,7 +1094,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
+ req->needs_extra_trb = true;
+
+ /* prepare normal TRB */
+- dwc3_prepare_one_trb(dep, req, true, i);
++ dwc3_prepare_one_trb(dep, req, trb_length, true, i);
+
+ /* Now prepare one extra TRB to align transfer size */
+ trb = &dep->trb_pool[dep->trb_enqueue];
+@@ -1095,8 +1104,37 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
+ req->request.stream_id,
+ req->request.short_not_ok,
+ req->request.no_interrupt);
++ } else if (req->request.zero && req->request.length &&
++ !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
++ !rem && !chain) {
++ struct dwc3 *dwc = dep->dwc;
++ struct dwc3_trb *trb;
++
++ req->needs_extra_trb = true;
++
++ /* Prepare normal TRB */
++ dwc3_prepare_one_trb(dep, req, trb_length, true, i);
++
++ /* Prepare one extra TRB to handle ZLP */
++ trb = &dep->trb_pool[dep->trb_enqueue];
++ req->num_trbs++;
++ __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
++ !req->direction, 1,
++ req->request.stream_id,
++ req->request.short_not_ok,
++ req->request.no_interrupt);
++
++ /* Prepare one more TRB to handle MPS alignment */
++ if (!req->direction) {
++ trb = &dep->trb_pool[dep->trb_enqueue];
++ req->num_trbs++;
++ __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp,
++ false, 1, req->request.stream_id,
++ req->request.short_not_ok,
++ req->request.no_interrupt);
++ }
+ } else {
+- dwc3_prepare_one_trb(dep, req, chain, i);
++ dwc3_prepare_one_trb(dep, req, trb_length, chain, i);
+ }
+
+ /*
+@@ -1111,6 +1149,16 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
+
+ req->num_queued_sgs++;
+
++ /*
++ * The number of pending SG entries may not correspond to the
++ * number of mapped SG entries. If all the data are queued, then
++ * don't include unused SG entries.
++ */
++ if (length == 0) {
++ req->num_pending_sgs -= req->request.num_mapped_sgs - req->num_queued_sgs;
++ break;
++ }
++
+ if (!dwc3_calc_trbs_left(dep))
+ break;
+ }
+@@ -1130,7 +1178,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
+ req->needs_extra_trb = true;
+
+ /* prepare normal TRB */
+- dwc3_prepare_one_trb(dep, req, true, 0);
++ dwc3_prepare_one_trb(dep, req, length, true, 0);
+
+ /* Now prepare one extra TRB to align transfer size */
+ trb = &dep->trb_pool[dep->trb_enqueue];
+@@ -1140,6 +1188,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
+ req->request.short_not_ok,
+ req->request.no_interrupt);
+ } else if (req->request.zero && req->request.length &&
++ !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
+ (IS_ALIGNED(req->request.length, maxp))) {
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_trb *trb;
+@@ -1147,17 +1196,27 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
+ req->needs_extra_trb = true;
+
+ /* prepare normal TRB */
+- dwc3_prepare_one_trb(dep, req, true, 0);
++ dwc3_prepare_one_trb(dep, req, length, true, 0);
+
+- /* Now prepare one extra TRB to handle ZLP */
++ /* Prepare one extra TRB to handle ZLP */
+ trb = &dep->trb_pool[dep->trb_enqueue];
+ req->num_trbs++;
+ __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
+- false, 1, req->request.stream_id,
++ !req->direction, 1, req->request.stream_id,
+ req->request.short_not_ok,
+ req->request.no_interrupt);
++
++ /* Prepare one more TRB to handle MPS alignment for OUT */
++ if (!req->direction) {
++ trb = &dep->trb_pool[dep->trb_enqueue];
++ req->num_trbs++;
++ __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp,
++ false, 1, req->request.stream_id,
++ req->request.short_not_ok,
++ req->request.no_interrupt);
++ }
+ } else {
+- dwc3_prepare_one_trb(dep, req, false, 0);
++ dwc3_prepare_one_trb(dep, req, length, false, 0);
+ }
+ }
+
+@@ -2559,8 +2618,17 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
+ status);
+
+ if (req->needs_extra_trb) {
++ unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
++
+ ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
+ status);
++
++ /* Reclaim MPS padding TRB for ZLP */
++ if (!req->direction && req->request.zero && req->request.length &&
++ !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
++ (IS_ALIGNED(req->request.length, maxp)))
++ ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status);
++
+ req->needs_extra_trb = false;
+ }
+
+diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
+index 1d900081b1f0c..b4206b0dede54 100644
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -1181,12 +1181,15 @@ static int ncm_unwrap_ntb(struct gether *port,
+ int ndp_index;
+ unsigned dg_len, dg_len2;
+ unsigned ndp_len;
++ unsigned block_len;
+ struct sk_buff *skb2;
+ int ret = -EINVAL;
+- unsigned max_size = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
++ unsigned ntb_max = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
++ unsigned frame_max = le16_to_cpu(ecm_desc.wMaxSegmentSize);
+ const struct ndp_parser_opts *opts = ncm->parser_opts;
+ unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
+ int dgram_counter;
++ bool ndp_after_header;
+
+ /* dwSignature */
+ if (get_unaligned_le32(tmp) != opts->nth_sign) {
+@@ -1205,25 +1208,37 @@ static int ncm_unwrap_ntb(struct gether *port,
+ }
+ tmp++; /* skip wSequence */
+
++ block_len = get_ncm(&tmp, opts->block_length);
+ /* (d)wBlockLength */
+- if (get_ncm(&tmp, opts->block_length) > max_size) {
++ if (block_len > ntb_max) {
+ INFO(port->func.config->cdev, "OUT size exceeded\n");
+ goto err;
+ }
+
+ ndp_index = get_ncm(&tmp, opts->ndp_index);
++ ndp_after_header = false;
+
+ /* Run through all the NDP's in the NTB */
+ do {
+- /* NCM 3.2 */
+- if (((ndp_index % 4) != 0) &&
+- (ndp_index < opts->nth_size)) {
++ /*
++ * NCM 3.2
++ * dwNdpIndex
++ */
++ if (((ndp_index % 4) != 0) ||
++ (ndp_index < opts->nth_size) ||
++ (ndp_index > (block_len -
++ opts->ndp_size))) {
+ INFO(port->func.config->cdev, "Bad index: %#X\n",
+ ndp_index);
+ goto err;
+ }
++ if (ndp_index == opts->nth_size)
++ ndp_after_header = true;
+
+- /* walk through NDP */
++ /*
++ * walk through NDP
++ * dwSignature
++ */
+ tmp = (void *)(skb->data + ndp_index);
+ if (get_unaligned_le32(tmp) != ncm->ndp_sign) {
+ INFO(port->func.config->cdev, "Wrong NDP SIGN\n");
+@@ -1234,14 +1249,15 @@ static int ncm_unwrap_ntb(struct gether *port,
+ ndp_len = get_unaligned_le16(tmp++);
+ /*
+ * NCM 3.3.1
++ * wLength
+ * entry is 2 items
+ * item size is 16/32 bits, opts->dgram_item_len * 2 bytes
+ * minimal: struct usb_cdc_ncm_ndpX + normal entry + zero entry
+ * Each entry is a dgram index and a dgram length.
+ */
+ if ((ndp_len < opts->ndp_size
+- + 2 * 2 * (opts->dgram_item_len * 2))
+- || (ndp_len % opts->ndplen_align != 0)) {
++ + 2 * 2 * (opts->dgram_item_len * 2)) ||
++ (ndp_len % opts->ndplen_align != 0)) {
+ INFO(port->func.config->cdev, "Bad NDP length: %#X\n",
+ ndp_len);
+ goto err;
+@@ -1258,8 +1274,21 @@ static int ncm_unwrap_ntb(struct gether *port,
+
+ do {
+ index = index2;
++ /* wDatagramIndex[0] */
++ if ((index < opts->nth_size) ||
++ (index > block_len - opts->dpe_size)) {
++ INFO(port->func.config->cdev,
++ "Bad index: %#X\n", index);
++ goto err;
++ }
++
+ dg_len = dg_len2;
+- if (dg_len < 14 + crc_len) { /* ethernet hdr + crc */
++ /*
++ * wDatagramLength[0]
++ * ethernet hdr + crc or larger than max frame size
++ */
++ if ((dg_len < 14 + crc_len) ||
++ (dg_len > frame_max)) {
+ INFO(port->func.config->cdev,
+ "Bad dgram length: %#X\n", dg_len);
+ goto err;
+@@ -1283,6 +1312,37 @@ static int ncm_unwrap_ntb(struct gether *port,
+ index2 = get_ncm(&tmp, opts->dgram_item_len);
+ dg_len2 = get_ncm(&tmp, opts->dgram_item_len);
+
++ if (index2 == 0 || dg_len2 == 0)
++ break;
++
++ /* wDatagramIndex[1] */
++ if (ndp_after_header) {
++ if (index2 < opts->nth_size + opts->ndp_size) {
++ INFO(port->func.config->cdev,
++ "Bad index: %#X\n", index2);
++ goto err;
++ }
++ } else {
++ if (index2 < opts->nth_size + opts->dpe_size) {
++ INFO(port->func.config->cdev,
++ "Bad index: %#X\n", index2);
++ goto err;
++ }
++ }
++ if (index2 > block_len - opts->dpe_size) {
++ INFO(port->func.config->cdev,
++ "Bad index: %#X\n", index2);
++ goto err;
++ }
++
++ /* wDatagramLength[1] */
++ if ((dg_len2 < 14 + crc_len) ||
++ (dg_len2 > frame_max)) {
++ INFO(port->func.config->cdev,
++ "Bad dgram length: %#X\n", dg_len);
++ goto err;
++ }
++
+ /*
+ * Copy the data into a new skb.
+ * This ensures the truesize is correct
+@@ -1299,9 +1359,6 @@ static int ncm_unwrap_ntb(struct gether *port,
+ ndp_len -= 2 * (opts->dgram_item_len * 2);
+
+ dgram_counter++;
+-
+- if (index2 == 0 || dg_len2 == 0)
+- break;
+ } while (ndp_len > 2 * (opts->dgram_item_len * 2));
+ } while (ndp_index);
+
+diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
+index 7f01f78b1d238..f6d203fec4955 100644
+--- a/drivers/usb/gadget/function/f_tcm.c
++++ b/drivers/usb/gadget/function/f_tcm.c
+@@ -751,12 +751,13 @@ static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream)
+ goto err_sts;
+
+ return 0;
++
+ err_sts:
+- usb_ep_free_request(fu->ep_status, stream->req_status);
+- stream->req_status = NULL;
+-err_out:
+ usb_ep_free_request(fu->ep_out, stream->req_out);
+ stream->req_out = NULL;
++err_out:
++ usb_ep_free_request(fu->ep_in, stream->req_in);
++ stream->req_in = NULL;
+ out:
+ return -ENOMEM;
+ }
+diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h
+index eaa13fd3dc7f3..e313c3b8dcb19 100644
+--- a/drivers/usb/gadget/u_f.h
++++ b/drivers/usb/gadget/u_f.h
+@@ -14,6 +14,7 @@
+ #define __U_F_H__
+
+ #include <linux/usb/gadget.h>
++#include <linux/overflow.h>
+
+ /* Variable Length Array Macros **********************************************/
+ #define vla_group(groupname) size_t groupname##__next = 0
+@@ -21,21 +22,36 @@
+
+ #define vla_item(groupname, type, name, n) \
+ size_t groupname##_##name##__offset = ({ \
+- size_t align_mask = __alignof__(type) - 1; \
+- size_t offset = (groupname##__next + align_mask) & ~align_mask;\
+- size_t size = (n) * sizeof(type); \
+- groupname##__next = offset + size; \
++ size_t offset = 0; \
++ if (groupname##__next != SIZE_MAX) { \
++ size_t align_mask = __alignof__(type) - 1; \
++ size_t size = array_size(n, sizeof(type)); \
++ offset = (groupname##__next + align_mask) & \
++ ~align_mask; \
++ if (check_add_overflow(offset, size, \
++ &groupname##__next)) { \
++ groupname##__next = SIZE_MAX; \
++ offset = 0; \
++ } \
++ } \
+ offset; \
+ })
+
+ #define vla_item_with_sz(groupname, type, name, n) \
+- size_t groupname##_##name##__sz = (n) * sizeof(type); \
+- size_t groupname##_##name##__offset = ({ \
+- size_t align_mask = __alignof__(type) - 1; \
+- size_t offset = (groupname##__next + align_mask) & ~align_mask;\
+- size_t size = groupname##_##name##__sz; \
+- groupname##__next = offset + size; \
+- offset; \
++ size_t groupname##_##name##__sz = array_size(n, sizeof(type)); \
++ size_t groupname##_##name##__offset = ({ \
++ size_t offset = 0; \
++ if (groupname##__next != SIZE_MAX) { \
++ size_t align_mask = __alignof__(type) - 1; \
++ offset = (groupname##__next + align_mask) & \
++ ~align_mask; \
++ if (check_add_overflow(offset, groupname##_##name##__sz,\
++ &groupname##__next)) { \
++ groupname##__next = SIZE_MAX; \
++ offset = 0; \
++ } \
++ } \
++ offset; \
+ })
+
+ #define vla_ptr(ptr, groupname, name) \
+diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
+index d5ce98e205c73..d8b6c9f5695c9 100644
+--- a/drivers/usb/host/ohci-exynos.c
++++ b/drivers/usb/host/ohci-exynos.c
+@@ -171,9 +171,8 @@ static int exynos_ohci_probe(struct platform_device *pdev)
+ hcd->rsrc_len = resource_size(res);
+
+ irq = platform_get_irq(pdev, 0);
+- if (!irq) {
+- dev_err(&pdev->dev, "Failed to get IRQ\n");
+- err = -ENODEV;
++ if (irq < 0) {
++ err = irq;
+ goto fail_io;
+ }
+
+diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
+index 76c3f29562d2b..448d7b11dec4c 100644
+--- a/drivers/usb/host/xhci-debugfs.c
++++ b/drivers/usb/host/xhci-debugfs.c
+@@ -273,7 +273,7 @@ static int xhci_slot_context_show(struct seq_file *s, void *unused)
+
+ static int xhci_endpoint_context_show(struct seq_file *s, void *unused)
+ {
+- int dci;
++ int ep_index;
+ dma_addr_t dma;
+ struct xhci_hcd *xhci;
+ struct xhci_ep_ctx *ep_ctx;
+@@ -282,9 +282,9 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused)
+
+ xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus));
+
+- for (dci = 1; dci < 32; dci++) {
+- ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, dci);
+- dma = dev->out_ctx->dma + dci * CTX_SIZE(xhci->hcc_params);
++ for (ep_index = 0; ep_index < 31; ep_index++) {
++ ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
++ dma = dev->out_ctx->dma + (ep_index + 1) * CTX_SIZE(xhci->hcc_params);
+ seq_printf(s, "%pad: %s\n", &dma,
+ xhci_decode_ep_context(le32_to_cpu(ep_ctx->ep_info),
+ le32_to_cpu(ep_ctx->ep_info2),
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 3196de2931b12..933936abb6fb7 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -738,15 +738,6 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
+ {
+ u32 pls = status_reg & PORT_PLS_MASK;
+
+- /* resume state is a xHCI internal state.
+- * Do not report it to usb core, instead, pretend to be U3,
+- * thus usb core knows it's not ready for transfer
+- */
+- if (pls == XDEV_RESUME) {
+- *status |= USB_SS_PORT_LS_U3;
+- return;
+- }
+-
+ /* When the CAS bit is set then warm reset
+ * should be performed on port
+ */
+@@ -768,6 +759,16 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
+ */
+ pls |= USB_PORT_STAT_CONNECTION;
+ } else {
++ /*
++ * Resume state is an xHCI internal state. Do not report it to
++ * usb core, instead, pretend to be U3, thus usb core knows
++ * it's not ready for transfer.
++ */
++ if (pls == XDEV_RESUME) {
++ *status |= USB_SS_PORT_LS_U3;
++ return;
++ }
++
+ /*
+ * If CAS bit isn't set but the Port is already at
+ * Compliance Mode, fake a connection so the USB core
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 11a65854d3f09..bad154f446f8d 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -3236,10 +3236,11 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
+
+ wait_for_completion(cfg_cmd->completion);
+
+- ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
+ xhci_free_command(xhci, cfg_cmd);
+ cleanup:
+ xhci_free_command(xhci, stop_cmd);
++ if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
++ ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
+ }
+
+ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
+diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c
+index 407fe7570f3bc..f8686139d6f39 100644
+--- a/drivers/usb/misc/lvstest.c
++++ b/drivers/usb/misc/lvstest.c
+@@ -426,7 +426,7 @@ static int lvs_rh_probe(struct usb_interface *intf,
+ USB_DT_SS_HUB_SIZE, USB_CTRL_GET_TIMEOUT);
+ if (ret < (USB_DT_HUB_NONVAR_SIZE + 2)) {
+ dev_err(&hdev->dev, "wrong root hub descriptor read %d\n", ret);
+- return ret;
++ return ret < 0 ? ret : -EINVAL;
+ }
+
+ /* submit urb to poll interrupt endpoint */
+diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
+index fc8a5da4a07c9..0734e6dd93862 100644
+--- a/drivers/usb/misc/sisusbvga/sisusb.c
++++ b/drivers/usb/misc/sisusbvga/sisusb.c
+@@ -761,7 +761,7 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
+ u8 swap8, fromkern = kernbuffer ? 1 : 0;
+ u16 swap16;
+ u32 swap32, flag = (length >> 28) & 1;
+- char buf[4];
++ u8 buf[4];
+
+ /* if neither kernbuffer not userbuffer are given, assume
+ * data in obuf
+diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
+index be0505b8b5d4e..785080f790738 100644
+--- a/drivers/usb/misc/yurex.c
++++ b/drivers/usb/misc/yurex.c
+@@ -492,7 +492,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
+ prepare_to_wait(&dev->waitq, &wait, TASK_INTERRUPTIBLE);
+ dev_dbg(&dev->interface->dev, "%s - submit %c\n", __func__,
+ dev->cntl_buffer[0]);
+- retval = usb_submit_urb(dev->cntl_urb, GFP_KERNEL);
++ retval = usb_submit_urb(dev->cntl_urb, GFP_ATOMIC);
+ if (retval >= 0)
+ timeout = schedule_timeout(YUREX_WRITE_TIMEOUT);
+ finish_wait(&dev->waitq, &wait);
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index f6c3681fa2e9e..88275842219ef 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -2328,7 +2328,7 @@ UNUSUAL_DEV( 0x357d, 0x7788, 0x0114, 0x0114,
+ "JMicron",
+ "USB to ATA/ATAPI Bridge",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+- US_FL_BROKEN_FUA ),
++ US_FL_BROKEN_FUA | US_FL_IGNORE_UAS ),
+
+ /* Reported by Andrey Rahmatullin <wrar@altlinux.org> */
+ UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100,
+diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
+index 37157ed9a881a..dcdfcdfd2ad13 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -28,6 +28,13 @@
+ * and don't forget to CC: the USB development list <linux-usb@vger.kernel.org>
+ */
+
++/* Reported-by: Till Dörges <doerges@pre-sense.de> */
++UNUSUAL_DEV(0x054c, 0x087d, 0x0000, 0x9999,
++ "Sony",
++ "PSZ-HA*",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_NO_REPORT_OPCODES),
++
+ /* Reported-by: Julian Groß <julian.g@posteo.de> */
+ UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
+ "LaCie",
+@@ -80,6 +87,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BROKEN_FUA),
+
++/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
++UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999,
++ "PNY",
++ "Pro Elite SSD",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_NO_ATA_1X),
++
+ /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
+ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
+ "VIA",
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index 22070cfea1d06..31f00d72f1493 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -2185,6 +2185,9 @@ static void updatescrollmode(struct fbcon_display *p,
+ }
+ }
+
++#define PITCH(w) (((w) + 7) >> 3)
++#define CALC_FONTSZ(h, p, c) ((h) * (p) * (c)) /* size = height * pitch * charcount */
++
+ static int fbcon_resize(struct vc_data *vc, unsigned int width,
+ unsigned int height, unsigned int user)
+ {
+@@ -2194,6 +2197,24 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
+ struct fb_var_screeninfo var = info->var;
+ int x_diff, y_diff, virt_w, virt_h, virt_fw, virt_fh;
+
++ if (ops->p && ops->p->userfont && FNTSIZE(vc->vc_font.data)) {
++ int size;
++ int pitch = PITCH(vc->vc_font.width);
++
++ /*
++ * If user font, ensure that a possible change to user font
++ * height or width will not allow a font data out-of-bounds access.
++ * NOTE: must use original charcount in calculation as font
++ * charcount can change and cannot be used to determine the
++ * font data allocated size.
++ */
++ if (pitch <= 0)
++ return -EINVAL;
++ size = CALC_FONTSZ(vc->vc_font.height, pitch, FNTCHARCNT(vc->vc_font.data));
++ if (size > FNTSIZE(vc->vc_font.data))
++ return -EINVAL;
++ }
++
+ virt_w = FBCON_SWAP(ops->rotate, width, height);
+ virt_h = FBCON_SWAP(ops->rotate, height, width);
+ virt_fw = FBCON_SWAP(ops->rotate, vc->vc_font.width,
+@@ -2645,7 +2666,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
+ int size;
+ int i, csum;
+ u8 *new_data, *data = font->data;
+- int pitch = (font->width+7) >> 3;
++ int pitch = PITCH(font->width);
+
+ /* Is there a reason why fbconsole couldn't handle any charcount >256?
+ * If not this check should be changed to charcount < 256 */
+@@ -2661,7 +2682,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
+ if (fbcon_invalid_charcount(info, charcount))
+ return -EINVAL;
+
+- size = h * pitch * charcount;
++ size = CALC_FONTSZ(h, pitch, charcount);
+
+ new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER);
+
+diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
+index e72738371ecbe..97abcd497c7e0 100644
+--- a/drivers/video/fbdev/core/fbmem.c
++++ b/drivers/video/fbdev/core/fbmem.c
+@@ -952,7 +952,6 @@ static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var,
+ int
+ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
+ {
+- int flags = info->flags;
+ int ret = 0;
+ u32 activate;
+ struct fb_var_screeninfo old_var;
+@@ -1047,9 +1046,6 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
+ event.data = &mode;
+ fb_notifier_call_chain(FB_EVENT_MODE_CHANGE, &event);
+
+- if (flags & FBINFO_MISC_USEREVENT)
+- fbcon_update_vcs(info, activate & FB_ACTIVATE_ALL);
+-
+ return 0;
+ }
+ EXPORT_SYMBOL(fb_set_var);
+@@ -1100,9 +1096,9 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
+ return -EFAULT;
+ console_lock();
+ lock_fb_info(info);
+- info->flags |= FBINFO_MISC_USEREVENT;
+ ret = fb_set_var(info, &var);
+- info->flags &= ~FBINFO_MISC_USEREVENT;
++ if (!ret)
++ fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL);
+ unlock_fb_info(info);
+ console_unlock();
+ if (!ret && copy_to_user(argp, &var, sizeof(var)))
+diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
+index d54c88f88991d..65dae05fff8e6 100644
+--- a/drivers/video/fbdev/core/fbsysfs.c
++++ b/drivers/video/fbdev/core/fbsysfs.c
+@@ -91,9 +91,9 @@ static int activate(struct fb_info *fb_info, struct fb_var_screeninfo *var)
+
+ var->activate |= FB_ACTIVATE_FORCE;
+ console_lock();
+- fb_info->flags |= FBINFO_MISC_USEREVENT;
+ err = fb_set_var(fb_info, var);
+- fb_info->flags &= ~FBINFO_MISC_USEREVENT;
++ if (!err)
++ fbcon_update_vcs(fb_info, var->activate & FB_ACTIVATE_ALL);
+ console_unlock();
+ if (err)
+ return err;
+diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
+index 376ee5bc3ddc9..34e8171856e95 100644
+--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
++++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
+@@ -520,8 +520,11 @@ int dispc_runtime_get(void)
+ DSSDBG("dispc_runtime_get\n");
+
+ r = pm_runtime_get_sync(&dispc.pdev->dev);
+- WARN_ON(r < 0);
+- return r < 0 ? r : 0;
++ if (WARN_ON(r < 0)) {
++ pm_runtime_put_sync(&dispc.pdev->dev);
++ return r;
++ }
++ return 0;
+ }
+ EXPORT_SYMBOL(dispc_runtime_get);
+
+diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+index d620376216e1d..6f9c25fec9946 100644
+--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
++++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+@@ -1137,8 +1137,11 @@ static int dsi_runtime_get(struct platform_device *dsidev)
+ DSSDBG("dsi_runtime_get\n");
+
+ r = pm_runtime_get_sync(&dsi->pdev->dev);
+- WARN_ON(r < 0);
+- return r < 0 ? r : 0;
++ if (WARN_ON(r < 0)) {
++ pm_runtime_put_sync(&dsi->pdev->dev);
++ return r;
++ }
++ return 0;
+ }
+
+ static void dsi_runtime_put(struct platform_device *dsidev)
+diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
+index bfc5c4c5a26ad..a6b1c1598040d 100644
+--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c
++++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
+@@ -768,8 +768,11 @@ int dss_runtime_get(void)
+ DSSDBG("dss_runtime_get\n");
+
+ r = pm_runtime_get_sync(&dss.pdev->dev);
+- WARN_ON(r < 0);
+- return r < 0 ? r : 0;
++ if (WARN_ON(r < 0)) {
++ pm_runtime_put_sync(&dss.pdev->dev);
++ return r;
++ }
++ return 0;
+ }
+
+ void dss_runtime_put(void)
+diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
+index 7060ae56c062c..4804aab342981 100644
+--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
++++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
+@@ -39,9 +39,10 @@ static int hdmi_runtime_get(void)
+ DSSDBG("hdmi_runtime_get\n");
+
+ r = pm_runtime_get_sync(&hdmi.pdev->dev);
+- WARN_ON(r < 0);
+- if (r < 0)
++ if (WARN_ON(r < 0)) {
++ pm_runtime_put_sync(&hdmi.pdev->dev);
+ return r;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
+index ac49531e47327..a06b6f1355bdb 100644
+--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
++++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
+@@ -43,9 +43,10 @@ static int hdmi_runtime_get(void)
+ DSSDBG("hdmi_runtime_get\n");
+
+ r = pm_runtime_get_sync(&hdmi.pdev->dev);
+- WARN_ON(r < 0);
+- if (r < 0)
++ if (WARN_ON(r < 0)) {
++ pm_runtime_put_sync(&hdmi.pdev->dev);
+ return r;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
+index f81e2a46366dd..3717dac3dcc83 100644
+--- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c
++++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
+@@ -391,8 +391,11 @@ static int venc_runtime_get(void)
+ DSSDBG("venc_runtime_get\n");
+
+ r = pm_runtime_get_sync(&venc.pdev->dev);
+- WARN_ON(r < 0);
+- return r < 0 ? r : 0;
++ if (WARN_ON(r < 0)) {
++ pm_runtime_put_sync(&venc.pdev->dev);
++ return r;
++ }
++ return 0;
+ }
+
+ static void venc_runtime_put(void)
+diff --git a/drivers/video/fbdev/ps3fb.c b/drivers/video/fbdev/ps3fb.c
+index 5ed2db39d8236..ce90483c50209 100644
+--- a/drivers/video/fbdev/ps3fb.c
++++ b/drivers/video/fbdev/ps3fb.c
+@@ -29,6 +29,7 @@
+ #include <linux/freezer.h>
+ #include <linux/uaccess.h>
+ #include <linux/fb.h>
++#include <linux/fbcon.h>
+ #include <linux/init.h>
+
+ #include <asm/cell-regs.h>
+@@ -824,12 +825,12 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd,
+ var = info->var;
+ fb_videomode_to_var(&var, vmode);
+ console_lock();
+- info->flags |= FBINFO_MISC_USEREVENT;
+ /* Force, in case only special bits changed */
+ var.activate |= FB_ACTIVATE_FORCE;
+ par->new_mode_id = val;
+ retval = fb_set_var(info, &var);
+- info->flags &= ~FBINFO_MISC_USEREVENT;
++ if (!retval)
++ fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL);
+ console_unlock();
+ }
+ break;
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index 6c8843968a52d..55f2b834cf130 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -155,7 +155,7 @@ int get_evtchn_to_irq(unsigned evtchn)
+ /* Get info for IRQ */
+ struct irq_info *info_for_irq(unsigned irq)
+ {
+- return irq_get_handler_data(irq);
++ return irq_get_chip_data(irq);
+ }
+
+ /* Constructors for packed IRQ information. */
+@@ -376,7 +376,7 @@ static void xen_irq_init(unsigned irq)
+ info->type = IRQT_UNBOUND;
+ info->refcnt = -1;
+
+- irq_set_handler_data(irq, info);
++ irq_set_chip_data(irq, info);
+
+ list_add_tail(&info->list, &xen_irq_list_head);
+ }
+@@ -425,14 +425,14 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
+
+ static void xen_free_irq(unsigned irq)
+ {
+- struct irq_info *info = irq_get_handler_data(irq);
++ struct irq_info *info = irq_get_chip_data(irq);
+
+ if (WARN_ON(!info))
+ return;
+
+ list_del(&info->list);
+
+- irq_set_handler_data(irq, NULL);
++ irq_set_chip_data(irq, NULL);
+
+ WARN_ON(info->refcnt > 0);
+
+@@ -602,7 +602,7 @@ EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
+ static void __unbind_from_irq(unsigned int irq)
+ {
+ int evtchn = evtchn_from_irq(irq);
+- struct irq_info *info = irq_get_handler_data(irq);
++ struct irq_info *info = irq_get_chip_data(irq);
+
+ if (info->refcnt > 0) {
+ info->refcnt--;
+@@ -1106,7 +1106,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
+
+ void unbind_from_irqhandler(unsigned int irq, void *dev_id)
+ {
+- struct irq_info *info = irq_get_handler_data(irq);
++ struct irq_info *info = irq_get_chip_data(irq);
+
+ if (WARN_ON(!info))
+ return;
+@@ -1140,7 +1140,7 @@ int evtchn_make_refcounted(unsigned int evtchn)
+ if (irq == -1)
+ return -ENOENT;
+
+- info = irq_get_handler_data(irq);
++ info = irq_get_chip_data(irq);
+
+ if (!info)
+ return -ENOENT;
+@@ -1168,7 +1168,7 @@ int evtchn_get(unsigned int evtchn)
+ if (irq == -1)
+ goto done;
+
+- info = irq_get_handler_data(irq);
++ info = irq_get_chip_data(irq);
+
+ if (!info)
+ goto done;
+diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
+index 831a2b25ba79f..196f9f64d075c 100644
+--- a/fs/binfmt_flat.c
++++ b/fs/binfmt_flat.c
+@@ -571,7 +571,7 @@ static int load_flat_file(struct linux_binprm *bprm,
+ goto err;
+ }
+
+- len = data_len + extra;
++ len = data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long);
+ len = PAGE_ALIGN(len);
+ realdatastart = vm_mmap(NULL, 0, len,
+ PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0);
+@@ -585,7 +585,9 @@ static int load_flat_file(struct linux_binprm *bprm,
+ vm_munmap(textpos, text_len);
+ goto err;
+ }
+- datapos = ALIGN(realdatastart, FLAT_DATA_ALIGN);
++ datapos = ALIGN(realdatastart +
++ MAX_SHARED_LIBS * sizeof(unsigned long),
++ FLAT_DATA_ALIGN);
+
+ pr_debug("Allocated data+bss+stack (%u bytes): %lx\n",
+ data_len + bss_len + stack_len, datapos);
+@@ -615,7 +617,7 @@ static int load_flat_file(struct linux_binprm *bprm,
+ memp_size = len;
+ } else {
+
+- len = text_len + data_len + extra;
++ len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(u32);
+ len = PAGE_ALIGN(len);
+ textpos = vm_mmap(NULL, 0, len,
+ PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0);
+@@ -630,7 +632,9 @@ static int load_flat_file(struct linux_binprm *bprm,
+ }
+
+ realdatastart = textpos + ntohl(hdr->data_start);
+- datapos = ALIGN(realdatastart, FLAT_DATA_ALIGN);
++ datapos = ALIGN(realdatastart +
++ MAX_SHARED_LIBS * sizeof(u32),
++ FLAT_DATA_ALIGN);
+
+ reloc = (__be32 __user *)
+ (datapos + (ntohl(hdr->reloc_start) - text_len));
+@@ -647,9 +651,8 @@ static int load_flat_file(struct linux_binprm *bprm,
+ (text_len + full_data
+ - sizeof(struct flat_hdr)),
+ 0);
+- if (datapos != realdatastart)
+- memmove((void *)datapos, (void *)realdatastart,
+- full_data);
++ memmove((void *) datapos, (void *) realdatastart,
++ full_data);
+ #else
+ /*
+ * This is used on MMU systems mainly for testing.
+@@ -705,7 +708,8 @@ static int load_flat_file(struct linux_binprm *bprm,
+ if (IS_ERR_VALUE(result)) {
+ ret = result;
+ pr_err("Unable to read code+data+bss, errno %d\n", ret);
+- vm_munmap(textpos, text_len + data_len + extra);
++ vm_munmap(textpos, text_len + data_len + extra +
++ MAX_SHARED_LIBS * sizeof(u32));
+ goto err;
+ }
+ }
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 18357b054a91e..9a690c10afaa0 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -2415,7 +2415,7 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
+ u64 bytenr, u64 num_bytes);
+ int btrfs_exclude_logged_extents(struct extent_buffer *eb);
+ int btrfs_cross_ref_exist(struct btrfs_root *root,
+- u64 objectid, u64 offset, u64 bytenr);
++ u64 objectid, u64 offset, u64 bytenr, bool strict);
+ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ u64 parent, u64 root_objectid,
+@@ -2821,7 +2821,7 @@ struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
+ u64 start, u64 len);
+ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
+ u64 *orig_start, u64 *orig_block_len,
+- u64 *ram_bytes);
++ u64 *ram_bytes, bool strict);
+
+ void __btrfs_del_delalloc_inode(struct btrfs_root *root,
+ struct btrfs_inode *inode);
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index ad1c8e3b8133a..dd6fb2ee80409 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -4477,6 +4477,7 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
+ cache->io_ctl.inode = NULL;
+ iput(inode);
+ }
++ ASSERT(cache->io_ctl.pages == NULL);
+ btrfs_put_block_group(cache);
+ }
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index a36bd4507bacd..ef05cbacef73f 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -2320,7 +2320,8 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
+
+ static noinline int check_committed_ref(struct btrfs_root *root,
+ struct btrfs_path *path,
+- u64 objectid, u64 offset, u64 bytenr)
++ u64 objectid, u64 offset, u64 bytenr,
++ bool strict)
+ {
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_root *extent_root = fs_info->extent_root;
+@@ -2362,9 +2363,13 @@ static noinline int check_committed_ref(struct btrfs_root *root,
+ btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
+ goto out;
+
+- /* If extent created before last snapshot => it's definitely shared */
+- if (btrfs_extent_generation(leaf, ei) <=
+- btrfs_root_last_snapshot(&root->root_item))
++ /*
++ * If extent created before last snapshot => it's shared unless the
++ * snapshot has been deleted. Use the heuristic if strict is false.
++ */
++ if (!strict &&
++ (btrfs_extent_generation(leaf, ei) <=
++ btrfs_root_last_snapshot(&root->root_item)))
+ goto out;
+
+ iref = (struct btrfs_extent_inline_ref *)(ei + 1);
+@@ -2389,7 +2394,7 @@ out:
+ }
+
+ int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
+- u64 bytenr)
++ u64 bytenr, bool strict)
+ {
+ struct btrfs_path *path;
+ int ret;
+@@ -2400,7 +2405,7 @@ int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
+
+ do {
+ ret = check_committed_ref(root, path, objectid,
+- offset, bytenr);
++ offset, bytenr, strict);
+ if (ret && ret != -ENOENT)
+ goto out;
+
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 3cfbccacef7fd..4e4ddd5629e55 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1568,7 +1568,7 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
+
+ num_bytes = lockend - lockstart + 1;
+ ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
+- NULL, NULL, NULL);
++ NULL, NULL, NULL, false);
+ if (ret <= 0) {
+ ret = 0;
+ btrfs_end_write_no_snapshotting(root);
+@@ -3130,14 +3130,14 @@ reserve_space:
+ if (ret < 0)
+ goto out;
+ space_reserved = true;
+- ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
+- alloc_start, bytes_to_reserve);
+- if (ret)
+- goto out;
+ ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
+ &cached_state);
+ if (ret)
+ goto out;
++ ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
++ alloc_start, bytes_to_reserve);
++ if (ret)
++ goto out;
+ ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
+ alloc_end - alloc_start,
+ i_blocksize(inode),
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 8bfc0f348ad55..6e6be922b937d 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -1166,7 +1166,6 @@ static int __btrfs_wait_cache_io(struct btrfs_root *root,
+ ret = update_cache_item(trans, root, inode, path, offset,
+ io_ctl->entries, io_ctl->bitmaps);
+ out:
+- io_ctl_free(io_ctl);
+ if (ret) {
+ invalidate_inode_pages2(inode->i_mapping);
+ BTRFS_I(inode)->generation = 0;
+@@ -1329,6 +1328,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
+ * them out later
+ */
+ io_ctl_drop_pages(io_ctl);
++ io_ctl_free(io_ctl);
+
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
+ i_size_read(inode) - 1, &cached_state);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index fa7f3a59813ea..9ac40991a6405 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -1578,7 +1578,7 @@ next_slot:
+ goto out_check;
+ ret = btrfs_cross_ref_exist(root, ino,
+ found_key.offset -
+- extent_offset, disk_bytenr);
++ extent_offset, disk_bytenr, false);
+ if (ret) {
+ /*
+ * ret could be -EIO if the above fails to read
+@@ -7529,7 +7529,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
+ */
+ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
+ u64 *orig_start, u64 *orig_block_len,
+- u64 *ram_bytes)
++ u64 *ram_bytes, bool strict)
+ {
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct btrfs_path *path;
+@@ -7607,8 +7607,9 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
+ * Do the same check as in btrfs_cross_ref_exist but without the
+ * unnecessary search.
+ */
+- if (btrfs_file_extent_generation(leaf, fi) <=
+- btrfs_root_last_snapshot(&root->root_item))
++ if (!strict &&
++ (btrfs_file_extent_generation(leaf, fi) <=
++ btrfs_root_last_snapshot(&root->root_item)))
+ goto out;
+
+ backref_offset = btrfs_file_extent_offset(leaf, fi);
+@@ -7644,7 +7645,8 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
+ */
+
+ ret = btrfs_cross_ref_exist(root, btrfs_ino(BTRFS_I(inode)),
+- key.offset - backref_offset, disk_bytenr);
++ key.offset - backref_offset, disk_bytenr,
++ strict);
+ if (ret) {
+ ret = 0;
+ goto out;
+@@ -7865,7 +7867,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
+ block_start = em->block_start + (start - em->start);
+
+ if (can_nocow_extent(inode, start, &len, &orig_start,
+- &orig_block_len, &ram_bytes) == 1 &&
++ &orig_block_len, &ram_bytes, false) == 1 &&
+ btrfs_inc_nocow_writers(fs_info, block_start)) {
+ struct extent_map *em2;
+
+@@ -9568,7 +9570,7 @@ void btrfs_destroy_inode(struct inode *inode)
+ btrfs_put_ordered_extent(ordered);
+ }
+ }
+- btrfs_qgroup_check_reserved_leak(inode);
++ btrfs_qgroup_check_reserved_leak(BTRFS_I(inode));
+ inode_tree_del(inode);
+ btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
+ }
+@@ -11030,7 +11032,7 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ free_extent_map(em);
+ em = NULL;
+
+- ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL);
++ ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, true);
+ if (ret < 0) {
+ goto out;
+ } else if (ret) {
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index b94f6f99e90d0..04fd02e6124dd 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -3769,7 +3769,7 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
+ * Check qgroup reserved space leaking, normally at destroy inode
+ * time
+ */
+-void btrfs_qgroup_check_reserved_leak(struct inode *inode)
++void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode)
+ {
+ struct extent_changeset changeset;
+ struct ulist_node *unode;
+@@ -3777,19 +3777,19 @@ void btrfs_qgroup_check_reserved_leak(struct inode *inode)
+ int ret;
+
+ extent_changeset_init(&changeset);
+- ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
++ ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1,
+ EXTENT_QGROUP_RESERVED, &changeset);
+
+ WARN_ON(ret < 0);
+ if (WARN_ON(changeset.bytes_changed)) {
+ ULIST_ITER_INIT(&iter);
+ while ((unode = ulist_next(&changeset.range_changed, &iter))) {
+- btrfs_warn(BTRFS_I(inode)->root->fs_info,
+- "leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu",
+- inode->i_ino, unode->val, unode->aux);
++ btrfs_warn(inode->root->fs_info,
++ "leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu",
++ btrfs_ino(inode), unode->val, unode->aux);
+ }
+- btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
+- BTRFS_I(inode)->root->root_key.objectid,
++ btrfs_qgroup_free_refroot(inode->root->fs_info,
++ inode->root->root_key.objectid,
+ changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
+
+ }
+diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
+index 17e8ac992c502..b0420c4f5d0ef 100644
+--- a/fs/btrfs/qgroup.h
++++ b/fs/btrfs/qgroup.h
+@@ -399,7 +399,7 @@ void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root);
+ */
+ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes);
+
+-void btrfs_qgroup_check_reserved_leak(struct inode *inode);
++void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode);
+
+ /* btrfs_qgroup_swapped_blocks related functions */
+ void btrfs_qgroup_init_swapped_blocks(
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index a1498df419b4f..6a2ae208ff80a 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -544,6 +544,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
+ } else if (strncmp(args[0].from, "lzo", 3) == 0) {
+ compress_type = "lzo";
+ info->compress_type = BTRFS_COMPRESS_LZO;
++ info->compress_level = 0;
+ btrfs_set_opt(info->mount_opt, COMPRESS);
+ btrfs_clear_opt(info->mount_opt, NODATACOW);
+ btrfs_clear_opt(info->mount_opt, NODATASUM);
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 3c090549ed07d..7042b84edc89d 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3473,11 +3473,13 @@ fail:
+ btrfs_free_path(path);
+ out_unlock:
+ mutex_unlock(&dir->log_mutex);
+- if (ret == -ENOSPC) {
++ if (err == -ENOSPC) {
+ btrfs_set_log_full_commit(trans);
+- ret = 0;
+- } else if (ret < 0)
+- btrfs_abort_transaction(trans, ret);
++ err = 0;
++ } else if (err < 0 && err != -ENOENT) {
++ /* ENOENT can be returned if the entry hasn't been fsynced yet */
++ btrfs_abort_transaction(trans, err);
++ }
+
+ btrfs_end_log_trans(root);
+
+@@ -4994,6 +4996,138 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
+ return ret;
+ }
+
++static int copy_inode_items_to_log(struct btrfs_trans_handle *trans,
++ struct btrfs_inode *inode,
++ struct btrfs_key *min_key,
++ const struct btrfs_key *max_key,
++ struct btrfs_path *path,
++ struct btrfs_path *dst_path,
++ const u64 logged_isize,
++ const bool recursive_logging,
++ const int inode_only,
++ struct btrfs_log_ctx *ctx,
++ bool *need_log_inode_item)
++{
++ struct btrfs_root *root = inode->root;
++ int ins_start_slot = 0;
++ int ins_nr = 0;
++ int ret;
++
++ while (1) {
++ ret = btrfs_search_forward(root, min_key, path, trans->transid);
++ if (ret < 0)
++ return ret;
++ if (ret > 0) {
++ ret = 0;
++ break;
++ }
++again:
++ /* Note, ins_nr might be > 0 here, cleanup outside the loop */
++ if (min_key->objectid != max_key->objectid)
++ break;
++ if (min_key->type > max_key->type)
++ break;
++
++ if (min_key->type == BTRFS_INODE_ITEM_KEY)
++ *need_log_inode_item = false;
++
++ if ((min_key->type == BTRFS_INODE_REF_KEY ||
++ min_key->type == BTRFS_INODE_EXTREF_KEY) &&
++ inode->generation == trans->transid &&
++ !recursive_logging) {
++ u64 other_ino = 0;
++ u64 other_parent = 0;
++
++ ret = btrfs_check_ref_name_override(path->nodes[0],
++ path->slots[0], min_key, inode,
++ &other_ino, &other_parent);
++ if (ret < 0) {
++ return ret;
++ } else if (ret > 0 && ctx &&
++ other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
++ if (ins_nr > 0) {
++ ins_nr++;
++ } else {
++ ins_nr = 1;
++ ins_start_slot = path->slots[0];
++ }
++ ret = copy_items(trans, inode, dst_path, path,
++ ins_start_slot, ins_nr,
++ inode_only, logged_isize);
++ if (ret < 0)
++ return ret;
++ ins_nr = 0;
++
++ ret = log_conflicting_inodes(trans, root, path,
++ ctx, other_ino, other_parent);
++ if (ret)
++ return ret;
++ btrfs_release_path(path);
++ goto next_key;
++ }
++ }
++
++ /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
++ if (min_key->type == BTRFS_XATTR_ITEM_KEY) {
++ if (ins_nr == 0)
++ goto next_slot;
++ ret = copy_items(trans, inode, dst_path, path,
++ ins_start_slot,
++ ins_nr, inode_only, logged_isize);
++ if (ret < 0)
++ return ret;
++ ins_nr = 0;
++ goto next_slot;
++ }
++
++ if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
++ ins_nr++;
++ goto next_slot;
++ } else if (!ins_nr) {
++ ins_start_slot = path->slots[0];
++ ins_nr = 1;
++ goto next_slot;
++ }
++
++ ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
++ ins_nr, inode_only, logged_isize);
++ if (ret < 0)
++ return ret;
++ ins_nr = 1;
++ ins_start_slot = path->slots[0];
++next_slot:
++ path->slots[0]++;
++ if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
++ btrfs_item_key_to_cpu(path->nodes[0], min_key,
++ path->slots[0]);
++ goto again;
++ }
++ if (ins_nr) {
++ ret = copy_items(trans, inode, dst_path, path,
++ ins_start_slot, ins_nr, inode_only,
++ logged_isize);
++ if (ret < 0)
++ return ret;
++ ins_nr = 0;
++ }
++ btrfs_release_path(path);
++next_key:
++ if (min_key->offset < (u64)-1) {
++ min_key->offset++;
++ } else if (min_key->type < max_key->type) {
++ min_key->type++;
++ min_key->offset = 0;
++ } else {
++ break;
++ }
++ }
++ if (ins_nr)
++ ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
++ ins_nr, inode_only, logged_isize);
++
++ return ret;
++}
++
+ /* log a single inode in the tree log.
+ * At least one parent directory for this inode must exist in the tree
+ * or be logged already.
+@@ -5015,17 +5149,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
+ const loff_t end,
+ struct btrfs_log_ctx *ctx)
+ {
+- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_path *path;
+ struct btrfs_path *dst_path;
+ struct btrfs_key min_key;
+ struct btrfs_key max_key;
+ struct btrfs_root *log = root->log_root;
+ int err = 0;
+- int ret;
+- int nritems;
+- int ins_start_slot = 0;
+- int ins_nr;
++ int ret = 0;
+ bool fast_search = false;
+ u64 ino = btrfs_ino(inode);
+ struct extent_map_tree *em_tree = &inode->extent_tree;
+@@ -5061,15 +5191,19 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
+ max_key.offset = (u64)-1;
+
+ /*
+- * Only run delayed items if we are a dir or a new file.
+- * Otherwise commit the delayed inode only, which is needed in
+- * order for the log replay code to mark inodes for link count
+- * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
++ * Only run delayed items if we are a directory. We want to make sure
++ * all directory indexes hit the fs/subvolume tree so we can find them
++ * and figure out which index ranges have to be logged.
++ *
++ * Otherwise commit the delayed inode only if the full sync flag is set,
++ * as we want to make sure an up to date version is in the subvolume
++ * tree so copy_inode_items_to_log() / copy_items() can find it and copy
++ * it to the log tree. For a non full sync, we always log the inode item
++ * based on the in-memory struct btrfs_inode which is always up to date.
+ */
+- if (S_ISDIR(inode->vfs_inode.i_mode) ||
+- inode->generation > fs_info->last_trans_committed)
++ if (S_ISDIR(inode->vfs_inode.i_mode))
+ ret = btrfs_commit_inode_delayed_items(trans, inode);
+- else
++ else if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
+ ret = btrfs_commit_inode_delayed_inode(inode);
+
+ if (ret) {
+@@ -5156,139 +5290,12 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
+ goto out_unlock;
+ }
+
+- while (1) {
+- ins_nr = 0;
+- ret = btrfs_search_forward(root, &min_key,
+- path, trans->transid);
+- if (ret < 0) {
+- err = ret;
+- goto out_unlock;
+- }
+- if (ret != 0)
+- break;
+-again:
+- /* note, ins_nr might be > 0 here, cleanup outside the loop */
+- if (min_key.objectid != ino)
+- break;
+- if (min_key.type > max_key.type)
+- break;
+-
+- if (min_key.type == BTRFS_INODE_ITEM_KEY)
+- need_log_inode_item = false;
+-
+- if ((min_key.type == BTRFS_INODE_REF_KEY ||
+- min_key.type == BTRFS_INODE_EXTREF_KEY) &&
+- inode->generation == trans->transid &&
+- !recursive_logging) {
+- u64 other_ino = 0;
+- u64 other_parent = 0;
+-
+- ret = btrfs_check_ref_name_override(path->nodes[0],
+- path->slots[0], &min_key, inode,
+- &other_ino, &other_parent);
+- if (ret < 0) {
+- err = ret;
+- goto out_unlock;
+- } else if (ret > 0 && ctx &&
+- other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
+- if (ins_nr > 0) {
+- ins_nr++;
+- } else {
+- ins_nr = 1;
+- ins_start_slot = path->slots[0];
+- }
+- ret = copy_items(trans, inode, dst_path, path,
+- ins_start_slot,
+- ins_nr, inode_only,
+- logged_isize);
+- if (ret < 0) {
+- err = ret;
+- goto out_unlock;
+- }
+- ins_nr = 0;
+-
+- err = log_conflicting_inodes(trans, root, path,
+- ctx, other_ino, other_parent);
+- if (err)
+- goto out_unlock;
+- btrfs_release_path(path);
+- goto next_key;
+- }
+- }
+-
+- /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
+- if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
+- if (ins_nr == 0)
+- goto next_slot;
+- ret = copy_items(trans, inode, dst_path, path,
+- ins_start_slot,
+- ins_nr, inode_only, logged_isize);
+- if (ret < 0) {
+- err = ret;
+- goto out_unlock;
+- }
+- ins_nr = 0;
+- goto next_slot;
+- }
+-
+- if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
+- ins_nr++;
+- goto next_slot;
+- } else if (!ins_nr) {
+- ins_start_slot = path->slots[0];
+- ins_nr = 1;
+- goto next_slot;
+- }
+-
+- ret = copy_items(trans, inode, dst_path, path,
+- ins_start_slot, ins_nr, inode_only,
+- logged_isize);
+- if (ret < 0) {
+- err = ret;
+- goto out_unlock;
+- }
+- ins_nr = 1;
+- ins_start_slot = path->slots[0];
+-next_slot:
+-
+- nritems = btrfs_header_nritems(path->nodes[0]);
+- path->slots[0]++;
+- if (path->slots[0] < nritems) {
+- btrfs_item_key_to_cpu(path->nodes[0], &min_key,
+- path->slots[0]);
+- goto again;
+- }
+- if (ins_nr) {
+- ret = copy_items(trans, inode, dst_path, path,
+- ins_start_slot,
+- ins_nr, inode_only, logged_isize);
+- if (ret < 0) {
+- err = ret;
+- goto out_unlock;
+- }
+- ins_nr = 0;
+- }
+- btrfs_release_path(path);
+-next_key:
+- if (min_key.offset < (u64)-1) {
+- min_key.offset++;
+- } else if (min_key.type < max_key.type) {
+- min_key.type++;
+- min_key.offset = 0;
+- } else {
+- break;
+- }
+- }
+- if (ins_nr) {
+- ret = copy_items(trans, inode, dst_path, path,
+- ins_start_slot, ins_nr, inode_only,
+- logged_isize);
+- if (ret < 0) {
+- err = ret;
+- goto out_unlock;
+- }
+- ins_nr = 0;
+- }
++ err = copy_inode_items_to_log(trans, inode, &min_key, &max_key,
++ path, dst_path, logged_isize,
++ recursive_logging, inode_only, ctx,
++ &need_log_inode_item);
++ if (err)
++ goto out_unlock;
+
+ btrfs_release_path(path);
+ btrfs_release_path(dst_path);
+diff --git a/fs/buffer.c b/fs/buffer.c
+index 79c9562434a8d..22d8ac4a8c40a 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -3170,6 +3170,15 @@ int __sync_dirty_buffer(struct buffer_head *bh, int op_flags)
+ WARN_ON(atomic_read(&bh->b_count) < 1);
+ lock_buffer(bh);
+ if (test_clear_buffer_dirty(bh)) {
++ /*
++ * The bh should be mapped, but it might not be if the
++ * device was hot-removed. Not much we can do but fail the I/O.
++ */
++ if (!buffer_mapped(bh)) {
++ unlock_buffer(bh);
++ return -EIO;
++ }
++
+ get_bh(bh);
+ bh->b_end_io = end_buffer_write_sync;
+ ret = submit_bh(REQ_OP_WRITE, op_flags, bh);
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index ce54a1b12819b..4a6b14a2bd7f9 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -1260,6 +1260,7 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
+ struct inode *inode = file_inode(filp);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct page *pinned_page = NULL;
++ bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
+ ssize_t ret;
+ int want, got = 0;
+ int retry_op = 0, read = 0;
+@@ -1268,7 +1269,7 @@ again:
+ dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
+ inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
+
+- if (iocb->ki_flags & IOCB_DIRECT)
++ if (direct_lock)
+ ceph_start_io_direct(inode);
+ else
+ ceph_start_io_read(inode);
+@@ -1325,7 +1326,7 @@ again:
+ }
+ ceph_put_cap_refs(ci, got);
+
+- if (iocb->ki_flags & IOCB_DIRECT)
++ if (direct_lock)
+ ceph_end_io_direct(inode);
+ else
+ ceph_end_io_read(inode);
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index b0077f5a31688..0f21073a51a1b 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -4068,6 +4068,9 @@ static void delayed_work(struct work_struct *work)
+
+ dout("mdsc delayed_work\n");
+
++ if (mdsc->stopping)
++ return;
++
+ mutex_lock(&mdsc->mutex);
+ renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
+ renew_caps = time_after_eq(jiffies, HZ*renew_interval +
+@@ -4433,7 +4436,16 @@ void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
+ static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
+ {
+ dout("stop\n");
+- cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
++ /*
++ * Make sure the delayed work stopped before releasing
++ * the resources.
++ *
++ * Because the cancel_delayed_work_sync() will only
++ * guarantee that the work finishes executing. But the
++ * delayed work will re-arm itself again after that.
++ */
++ flush_delayed_work(&mdsc->delayed_work);
++
+ if (mdsc->mdsmap)
+ ceph_mdsmap_destroy(mdsc->mdsmap);
+ kfree(mdsc->sessions);
+diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
+index ceb54ccc937e9..97c56d061e615 100644
+--- a/fs/ext4/block_validity.c
++++ b/fs/ext4/block_validity.c
+@@ -250,14 +250,6 @@ int ext4_setup_system_zone(struct super_block *sb)
+ int flex_size = ext4_flex_bg_size(sbi);
+ int ret;
+
+- if (!test_opt(sb, BLOCK_VALIDITY)) {
+- if (sbi->system_blks)
+- ext4_release_system_zone(sb);
+- return 0;
+- }
+- if (sbi->system_blks)
+- return 0;
+-
+ system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL);
+ if (!system_blks)
+ return -ENOMEM;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index f7c20bb20da37..4aae7e3e89a12 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -66,10 +66,10 @@ static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
+ unsigned long journal_devnum);
+ static int ext4_show_options(struct seq_file *seq, struct dentry *root);
+ static int ext4_commit_super(struct super_block *sb, int sync);
+-static void ext4_mark_recovery_complete(struct super_block *sb,
++static int ext4_mark_recovery_complete(struct super_block *sb,
+ struct ext4_super_block *es);
+-static void ext4_clear_journal_err(struct super_block *sb,
+- struct ext4_super_block *es);
++static int ext4_clear_journal_err(struct super_block *sb,
++ struct ext4_super_block *es);
+ static int ext4_sync_fs(struct super_block *sb, int wait);
+ static int ext4_remount(struct super_block *sb, int *flags, char *data);
+ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
+@@ -4563,11 +4563,13 @@ no_journal:
+
+ ext4_set_resv_clusters(sb);
+
+- err = ext4_setup_system_zone(sb);
+- if (err) {
+- ext4_msg(sb, KERN_ERR, "failed to initialize system "
+- "zone (%d)", err);
+- goto failed_mount4a;
++ if (test_opt(sb, BLOCK_VALIDITY)) {
++ err = ext4_setup_system_zone(sb);
++ if (err) {
++ ext4_msg(sb, KERN_ERR, "failed to initialize system "
++ "zone (%d)", err);
++ goto failed_mount4a;
++ }
+ }
+
+ ext4_ext_init(sb);
+@@ -4635,7 +4637,9 @@ no_journal:
+ EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
+ if (needs_recovery) {
+ ext4_msg(sb, KERN_INFO, "recovery complete");
+- ext4_mark_recovery_complete(sb, es);
++ err = ext4_mark_recovery_complete(sb, es);
++ if (err)
++ goto failed_mount8;
+ }
+ if (EXT4_SB(sb)->s_journal) {
+ if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
+@@ -4678,10 +4682,8 @@ cantfind_ext4:
+ ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
+ goto failed_mount;
+
+-#ifdef CONFIG_QUOTA
+ failed_mount8:
+ ext4_unregister_sysfs(sb);
+-#endif
+ failed_mount7:
+ ext4_unregister_li_request(sb);
+ failed_mount6:
+@@ -4820,7 +4822,8 @@ static journal_t *ext4_get_journal(struct super_block *sb,
+ struct inode *journal_inode;
+ journal_t *journal;
+
+- BUG_ON(!ext4_has_feature_journal(sb));
++ if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
++ return NULL;
+
+ journal_inode = ext4_get_journal_inode(sb, journal_inum);
+ if (!journal_inode)
+@@ -4850,7 +4853,8 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
+ struct ext4_super_block *es;
+ struct block_device *bdev;
+
+- BUG_ON(!ext4_has_feature_journal(sb));
++ if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
++ return NULL;
+
+ bdev = ext4_blkdev_get(j_dev, sb);
+ if (bdev == NULL)
+@@ -4941,8 +4945,10 @@ static int ext4_load_journal(struct super_block *sb,
+ dev_t journal_dev;
+ int err = 0;
+ int really_read_only;
++ int journal_dev_ro;
+
+- BUG_ON(!ext4_has_feature_journal(sb));
++ if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
++ return -EFSCORRUPTED;
+
+ if (journal_devnum &&
+ journal_devnum != le32_to_cpu(es->s_journal_dev)) {
+@@ -4952,7 +4958,31 @@ static int ext4_load_journal(struct super_block *sb,
+ } else
+ journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
+
+- really_read_only = bdev_read_only(sb->s_bdev);
++ if (journal_inum && journal_dev) {
++ ext4_msg(sb, KERN_ERR,
++ "filesystem has both journal inode and journal device!");
++ return -EINVAL;
++ }
++
++ if (journal_inum) {
++ journal = ext4_get_journal(sb, journal_inum);
++ if (!journal)
++ return -EINVAL;
++ } else {
++ journal = ext4_get_dev_journal(sb, journal_dev);
++ if (!journal)
++ return -EINVAL;
++ }
++
++ journal_dev_ro = bdev_read_only(journal->j_dev);
++ really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro;
++
++ if (journal_dev_ro && !sb_rdonly(sb)) {
++ ext4_msg(sb, KERN_ERR,
++ "journal device read-only, try mounting with '-o ro'");
++ err = -EROFS;
++ goto err_out;
++ }
+
+ /*
+ * Are we loading a blank journal or performing recovery after a
+@@ -4967,27 +4997,14 @@ static int ext4_load_journal(struct super_block *sb,
+ ext4_msg(sb, KERN_ERR, "write access "
+ "unavailable, cannot proceed "
+ "(try mounting with noload)");
+- return -EROFS;
++ err = -EROFS;
++ goto err_out;
+ }
+ ext4_msg(sb, KERN_INFO, "write access will "
+ "be enabled during recovery");
+ }
+ }
+
+- if (journal_inum && journal_dev) {
+- ext4_msg(sb, KERN_ERR, "filesystem has both journal "
+- "and inode journals!");
+- return -EINVAL;
+- }
+-
+- if (journal_inum) {
+- if (!(journal = ext4_get_journal(sb, journal_inum)))
+- return -EINVAL;
+- } else {
+- if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
+- return -EINVAL;
+- }
+-
+ if (!(journal->j_flags & JBD2_BARRIER))
+ ext4_msg(sb, KERN_INFO, "barriers disabled");
+
+@@ -5007,12 +5024,16 @@ static int ext4_load_journal(struct super_block *sb,
+
+ if (err) {
+ ext4_msg(sb, KERN_ERR, "error loading journal");
+- jbd2_journal_destroy(journal);
+- return err;
++ goto err_out;
+ }
+
+ EXT4_SB(sb)->s_journal = journal;
+- ext4_clear_journal_err(sb, es);
++ err = ext4_clear_journal_err(sb, es);
++ if (err) {
++ EXT4_SB(sb)->s_journal = NULL;
++ jbd2_journal_destroy(journal);
++ return err;
++ }
+
+ if (!really_read_only && journal_devnum &&
+ journal_devnum != le32_to_cpu(es->s_journal_dev)) {
+@@ -5023,6 +5044,10 @@ static int ext4_load_journal(struct super_block *sb,
+ }
+
+ return 0;
++
++err_out:
++ jbd2_journal_destroy(journal);
++ return err;
+ }
+
+ static int ext4_commit_super(struct super_block *sb, int sync)
+@@ -5034,13 +5059,6 @@ static int ext4_commit_super(struct super_block *sb, int sync)
+ if (!sbh || block_device_ejected(sb))
+ return error;
+
+- /*
+- * The superblock bh should be mapped, but it might not be if the
+- * device was hot-removed. Not much we can do but fail the I/O.
+- */
+- if (!buffer_mapped(sbh))
+- return error;
+-
+ /*
+ * If the file system is mounted read-only, don't update the
+ * superblock write time. This avoids updating the superblock
+@@ -5108,26 +5126,32 @@ static int ext4_commit_super(struct super_block *sb, int sync)
+ * remounting) the filesystem readonly, then we will end up with a
+ * consistent fs on disk. Record that fact.
+ */
+-static void ext4_mark_recovery_complete(struct super_block *sb,
+- struct ext4_super_block *es)
++static int ext4_mark_recovery_complete(struct super_block *sb,
++ struct ext4_super_block *es)
+ {
++ int err;
+ journal_t *journal = EXT4_SB(sb)->s_journal;
+
+ if (!ext4_has_feature_journal(sb)) {
+- BUG_ON(journal != NULL);
+- return;
++ if (journal != NULL) {
++ ext4_error(sb, "Journal got removed while the fs was "
++ "mounted!");
++ return -EFSCORRUPTED;
++ }
++ return 0;
+ }
+ jbd2_journal_lock_updates(journal);
+- if (jbd2_journal_flush(journal) < 0)
++ err = jbd2_journal_flush(journal);
++ if (err < 0)
+ goto out;
+
+ if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
+ ext4_clear_feature_journal_needs_recovery(sb);
+ ext4_commit_super(sb, 1);
+ }
+-
+ out:
+ jbd2_journal_unlock_updates(journal);
++ return err;
+ }
+
+ /*
+@@ -5135,14 +5159,17 @@ out:
+ * has recorded an error from a previous lifetime, move that error to the
+ * main filesystem now.
+ */
+-static void ext4_clear_journal_err(struct super_block *sb,
++static int ext4_clear_journal_err(struct super_block *sb,
+ struct ext4_super_block *es)
+ {
+ journal_t *journal;
+ int j_errno;
+ const char *errstr;
+
+- BUG_ON(!ext4_has_feature_journal(sb));
++ if (!ext4_has_feature_journal(sb)) {
++ ext4_error(sb, "Journal got removed while the fs was mounted!");
++ return -EFSCORRUPTED;
++ }
+
+ journal = EXT4_SB(sb)->s_journal;
+
+@@ -5167,6 +5194,7 @@ static void ext4_clear_journal_err(struct super_block *sb,
+ jbd2_journal_clear_err(journal);
+ jbd2_journal_update_sb_errno(journal);
+ }
++ return 0;
+ }
+
+ /*
+@@ -5309,7 +5337,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
+ {
+ struct ext4_super_block *es;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+- unsigned long old_sb_flags;
++ unsigned long old_sb_flags, vfs_flags;
+ struct ext4_mount_options old_opts;
+ int enable_quota = 0;
+ ext4_group_t g;
+@@ -5352,6 +5380,14 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
+ if (sbi->s_journal && sbi->s_journal->j_task->io_context)
+ journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
+
++ /*
++ * Some options can be enabled by ext4 and/or by VFS mount flag
++ * either way we need to make sure it matches in both *flags and
++ * s_flags. Copy those selected flags from *flags to s_flags
++ */
++ vfs_flags = SB_LAZYTIME | SB_I_VERSION;
++ sb->s_flags = (sb->s_flags & ~vfs_flags) | (*flags & vfs_flags);
++
+ if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
+ err = -EINVAL;
+ goto restore_opts;
+@@ -5405,9 +5441,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
+ set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
+ }
+
+- if (*flags & SB_LAZYTIME)
+- sb->s_flags |= SB_LAZYTIME;
+-
+ if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
+ if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
+ err = -EROFS;
+@@ -5437,8 +5470,13 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
+ (sbi->s_mount_state & EXT4_VALID_FS))
+ es->s_state = cpu_to_le16(sbi->s_mount_state);
+
+- if (sbi->s_journal)
++ if (sbi->s_journal) {
++ /*
++ * We let remount-ro finish even if marking fs
++ * as clean failed...
++ */
+ ext4_mark_recovery_complete(sb, es);
++ }
+ if (sbi->s_mmp_tsk)
+ kthread_stop(sbi->s_mmp_tsk);
+ } else {
+@@ -5486,8 +5524,11 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
+ * been changed by e2fsck since we originally mounted
+ * the partition.)
+ */
+- if (sbi->s_journal)
+- ext4_clear_journal_err(sb, es);
++ if (sbi->s_journal) {
++ err = ext4_clear_journal_err(sb, es);
++ if (err)
++ goto restore_opts;
++ }
+ sbi->s_mount_state = le16_to_cpu(es->s_state);
+
+ err = ext4_setup_super(sb, es, 0);
+@@ -5517,7 +5558,17 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
+ ext4_register_li_request(sb, first_not_zeroed);
+ }
+
+- ext4_setup_system_zone(sb);
++ /*
++ * Handle creation of system zone data early because it can fail.
++ * Releasing of existing data is done when we are sure remount will
++ * succeed.
++ */
++ if (test_opt(sb, BLOCK_VALIDITY) && !sbi->system_blks) {
++ err = ext4_setup_system_zone(sb);
++ if (err)
++ goto restore_opts;
++ }
++
+ if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) {
+ err = ext4_commit_super(sb, 1);
+ if (err)
+@@ -5538,8 +5589,16 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
+ }
+ }
+ #endif
++ if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks)
++ ext4_release_system_zone(sb);
++
++ /*
++ * Some options can be enabled by ext4 and/or by VFS mount flag
++ * either way we need to make sure it matches in both *flags and
++ * s_flags. Copy those selected flags from s_flags to *flags
++ */
++ *flags = (*flags & ~vfs_flags) | (sb->s_flags & vfs_flags);
+
+- *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
+ ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
+ kfree(orig_data);
+ return 0;
+@@ -5553,6 +5612,8 @@ restore_opts:
+ sbi->s_commit_interval = old_opts.s_commit_interval;
+ sbi->s_min_batch_time = old_opts.s_min_batch_time;
+ sbi->s_max_batch_time = old_opts.s_max_batch_time;
++ if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks)
++ ext4_release_system_zone(sb);
+ #ifdef CONFIG_QUOTA
+ sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
+ for (i = 0; i < EXT4_MAXQUOTAS; i++) {
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 03693d6b1c104..b3b7e63394be7 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3061,7 +3061,7 @@ bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
+ void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
+ void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
+ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
+-void f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
++int f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
+ int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
+ int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
+ int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
+@@ -3487,7 +3487,7 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page);
+ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
+ int f2fs_convert_inline_inode(struct inode *inode);
+ int f2fs_write_inline_data(struct inode *inode, struct page *page);
+-bool f2fs_recover_inline_data(struct inode *inode, struct page *npage);
++int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
+ struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
+ struct fscrypt_name *fname, struct page **res_page);
+ int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
+diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
+index 896db0416f0e6..183388393c6a8 100644
+--- a/fs/f2fs/inline.c
++++ b/fs/f2fs/inline.c
+@@ -252,7 +252,7 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page)
+ return 0;
+ }
+
+-bool f2fs_recover_inline_data(struct inode *inode, struct page *npage)
++int f2fs_recover_inline_data(struct inode *inode, struct page *npage)
+ {
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_inode *ri = NULL;
+@@ -274,7 +274,8 @@ bool f2fs_recover_inline_data(struct inode *inode, struct page *npage)
+ ri && (ri->i_inline & F2FS_INLINE_DATA)) {
+ process_inline:
+ ipage = f2fs_get_node_page(sbi, inode->i_ino);
+- f2fs_bug_on(sbi, IS_ERR(ipage));
++ if (IS_ERR(ipage))
++ return PTR_ERR(ipage);
+
+ f2fs_wait_on_page_writeback(ipage, NODE, true, true);
+
+@@ -287,21 +288,25 @@ process_inline:
+
+ set_page_dirty(ipage);
+ f2fs_put_page(ipage, 1);
+- return true;
++ return 1;
+ }
+
+ if (f2fs_has_inline_data(inode)) {
+ ipage = f2fs_get_node_page(sbi, inode->i_ino);
+- f2fs_bug_on(sbi, IS_ERR(ipage));
++ if (IS_ERR(ipage))
++ return PTR_ERR(ipage);
+ f2fs_truncate_inline_inode(inode, ipage, 0);
+ clear_inode_flag(inode, FI_INLINE_DATA);
+ f2fs_put_page(ipage, 1);
+ } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
+- if (f2fs_truncate_blocks(inode, 0, false))
+- return false;
++ int ret;
++
++ ret = f2fs_truncate_blocks(inode, 0, false);
++ if (ret)
++ return ret;
+ goto process_inline;
+ }
+- return false;
++ return 0;
+ }
+
+ struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index 90a20bd129614..daeac4268c1ab 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -2512,7 +2512,7 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
+ return nr - nr_shrink;
+ }
+
+-void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
++int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
+ {
+ void *src_addr, *dst_addr;
+ size_t inline_size;
+@@ -2520,7 +2520,8 @@ void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
+ struct f2fs_inode *ri;
+
+ ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
+- f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
++ if (IS_ERR(ipage))
++ return PTR_ERR(ipage);
+
+ ri = F2FS_INODE(page);
+ if (ri->i_inline & F2FS_INLINE_XATTR) {
+@@ -2539,6 +2540,7 @@ void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
+ update_inode:
+ f2fs_update_inode(inode, ipage);
+ f2fs_put_page(ipage, 1);
++ return 0;
+ }
+
+ int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
+diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
+index 783773e4560de..5f230e981c483 100644
+--- a/fs/f2fs/recovery.c
++++ b/fs/f2fs/recovery.c
+@@ -514,7 +514,9 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
+
+ /* step 1: recover xattr */
+ if (IS_INODE(page)) {
+- f2fs_recover_inline_xattr(inode, page);
++ err = f2fs_recover_inline_xattr(inode, page);
++ if (err)
++ goto out;
+ } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
+ err = f2fs_recover_xattr_data(inode, page);
+ if (!err)
+@@ -523,8 +525,12 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
+ }
+
+ /* step 2: recover inline data */
+- if (f2fs_recover_inline_data(inode, page))
++ err = f2fs_recover_inline_data(inode, page);
++ if (err) {
++ if (err == 1)
++ err = 0;
+ goto out;
++ }
+
+ /* step 3: recover data indices */
+ start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index f4b882ee48ddf..fa461db696e79 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1075,6 +1075,9 @@ static void f2fs_put_super(struct super_block *sb)
+ int i;
+ bool dropped;
+
++ /* unregister procfs/sysfs entries in advance to avoid race case */
++ f2fs_unregister_sysfs(sbi);
++
+ f2fs_quota_off_umount(sb);
+
+ /* prevent remaining shrinker jobs */
+@@ -1138,8 +1141,6 @@ static void f2fs_put_super(struct super_block *sb)
+
+ kvfree(sbi->ckpt);
+
+- f2fs_unregister_sysfs(sbi);
+-
+ sb->s_fs_info = NULL;
+ if (sbi->s_chksum_driver)
+ crypto_free_shash(sbi->s_chksum_driver);
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 76ac9c7d32ec7..5f6400ba82c00 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -42,7 +42,6 @@
+ struct wb_writeback_work {
+ long nr_pages;
+ struct super_block *sb;
+- unsigned long *older_than_this;
+ enum writeback_sync_modes sync_mode;
+ unsigned int tagged_writepages:1;
+ unsigned int for_kupdate:1;
+@@ -144,7 +143,9 @@ static void inode_io_list_del_locked(struct inode *inode,
+ struct bdi_writeback *wb)
+ {
+ assert_spin_locked(&wb->list_lock);
++ assert_spin_locked(&inode->i_lock);
+
++ inode->i_state &= ~I_SYNC_QUEUED;
+ list_del_init(&inode->i_io_list);
+ wb_io_lists_depopulated(wb);
+ }
+@@ -1123,7 +1124,9 @@ void inode_io_list_del(struct inode *inode)
+ struct bdi_writeback *wb;
+
+ wb = inode_to_wb_and_lock_list(inode);
++ spin_lock(&inode->i_lock);
+ inode_io_list_del_locked(inode, wb);
++ spin_unlock(&inode->i_lock);
+ spin_unlock(&wb->list_lock);
+ }
+
+@@ -1172,8 +1175,10 @@ void sb_clear_inode_writeback(struct inode *inode)
+ * the case then the inode must have been redirtied while it was being written
+ * out and we don't reset its dirtied_when.
+ */
+-static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
++static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
+ {
++ assert_spin_locked(&inode->i_lock);
++
+ if (!list_empty(&wb->b_dirty)) {
+ struct inode *tail;
+
+@@ -1182,6 +1187,14 @@ static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
+ inode->dirtied_when = jiffies;
+ }
+ inode_io_list_move_locked(inode, wb, &wb->b_dirty);
++ inode->i_state &= ~I_SYNC_QUEUED;
++}
++
++static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
++{
++ spin_lock(&inode->i_lock);
++ redirty_tail_locked(inode, wb);
++ spin_unlock(&inode->i_lock);
+ }
+
+ /*
+@@ -1220,16 +1233,13 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t)
+ #define EXPIRE_DIRTY_ATIME 0x0001
+
+ /*
+- * Move expired (dirtied before work->older_than_this) dirty inodes from
++ * Move expired (dirtied before dirtied_before) dirty inodes from
+ * @delaying_queue to @dispatch_queue.
+ */
+ static int move_expired_inodes(struct list_head *delaying_queue,
+ struct list_head *dispatch_queue,
+- int flags,
+- struct wb_writeback_work *work)
++ int flags, unsigned long dirtied_before)
+ {
+- unsigned long *older_than_this = NULL;
+- unsigned long expire_time;
+ LIST_HEAD(tmp);
+ struct list_head *pos, *node;
+ struct super_block *sb = NULL;
+@@ -1237,21 +1247,17 @@ static int move_expired_inodes(struct list_head *delaying_queue,
+ int do_sb_sort = 0;
+ int moved = 0;
+
+- if ((flags & EXPIRE_DIRTY_ATIME) == 0)
+- older_than_this = work->older_than_this;
+- else if (!work->for_sync) {
+- expire_time = jiffies - (dirtytime_expire_interval * HZ);
+- older_than_this = &expire_time;
+- }
+ while (!list_empty(delaying_queue)) {
+ inode = wb_inode(delaying_queue->prev);
+- if (older_than_this &&
+- inode_dirtied_after(inode, *older_than_this))
++ if (inode_dirtied_after(inode, dirtied_before))
+ break;
+ list_move(&inode->i_io_list, &tmp);
+ moved++;
++ spin_lock(&inode->i_lock);
+ if (flags & EXPIRE_DIRTY_ATIME)
+- set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state);
++ inode->i_state |= I_DIRTY_TIME_EXPIRED;
++ inode->i_state |= I_SYNC_QUEUED;
++ spin_unlock(&inode->i_lock);
+ if (sb_is_blkdev_sb(inode->i_sb))
+ continue;
+ if (sb && sb != inode->i_sb)
+@@ -1289,18 +1295,22 @@ out:
+ * |
+ * +--> dequeue for IO
+ */
+-static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
++static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work,
++ unsigned long dirtied_before)
+ {
+ int moved;
++ unsigned long time_expire_jif = dirtied_before;
+
+ assert_spin_locked(&wb->list_lock);
+ list_splice_init(&wb->b_more_io, &wb->b_io);
+- moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work);
++ moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, dirtied_before);
++ if (!work->for_sync)
++ time_expire_jif = jiffies - dirtytime_expire_interval * HZ;
+ moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
+- EXPIRE_DIRTY_ATIME, work);
++ EXPIRE_DIRTY_ATIME, time_expire_jif);
+ if (moved)
+ wb_io_lists_populated(wb);
+- trace_writeback_queue_io(wb, work, moved);
++ trace_writeback_queue_io(wb, work, dirtied_before, moved);
+ }
+
+ static int write_inode(struct inode *inode, struct writeback_control *wbc)
+@@ -1394,7 +1404,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
+ * writeback is not making progress due to locked
+ * buffers. Skip this inode for now.
+ */
+- redirty_tail(inode, wb);
++ redirty_tail_locked(inode, wb);
+ return;
+ }
+
+@@ -1414,7 +1424,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
+ * retrying writeback of the dirty page/inode
+ * that cannot be performed immediately.
+ */
+- redirty_tail(inode, wb);
++ redirty_tail_locked(inode, wb);
+ }
+ } else if (inode->i_state & I_DIRTY) {
+ /*
+@@ -1422,10 +1432,11 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
+ * such as delayed allocation during submission or metadata
+ * updates after data IO completion.
+ */
+- redirty_tail(inode, wb);
++ redirty_tail_locked(inode, wb);
+ } else if (inode->i_state & I_DIRTY_TIME) {
+ inode->dirtied_when = jiffies;
+ inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
++ inode->i_state &= ~I_SYNC_QUEUED;
+ } else {
+ /* The inode is clean. Remove from writeback lists. */
+ inode_io_list_del_locked(inode, wb);
+@@ -1669,8 +1680,8 @@ static long writeback_sb_inodes(struct super_block *sb,
+ */
+ spin_lock(&inode->i_lock);
+ if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
++ redirty_tail_locked(inode, wb);
+ spin_unlock(&inode->i_lock);
+- redirty_tail(inode, wb);
+ continue;
+ }
+ if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
+@@ -1811,7 +1822,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
+ blk_start_plug(&plug);
+ spin_lock(&wb->list_lock);
+ if (list_empty(&wb->b_io))
+- queue_io(wb, &work);
++ queue_io(wb, &work, jiffies);
+ __writeback_inodes_wb(wb, &work);
+ spin_unlock(&wb->list_lock);
+ blk_finish_plug(&plug);
+@@ -1831,7 +1842,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
+ * takes longer than a dirty_writeback_interval interval, then leave a
+ * one-second gap.
+ *
+- * older_than_this takes precedence over nr_to_write. So we'll only write back
++ * dirtied_before takes precedence over nr_to_write. So we'll only write back
+ * all dirty pages if they are all attached to "old" mappings.
+ */
+ static long wb_writeback(struct bdi_writeback *wb,
+@@ -1839,14 +1850,11 @@ static long wb_writeback(struct bdi_writeback *wb,
+ {
+ unsigned long wb_start = jiffies;
+ long nr_pages = work->nr_pages;
+- unsigned long oldest_jif;
++ unsigned long dirtied_before = jiffies;
+ struct inode *inode;
+ long progress;
+ struct blk_plug plug;
+
+- oldest_jif = jiffies;
+- work->older_than_this = &oldest_jif;
+-
+ blk_start_plug(&plug);
+ spin_lock(&wb->list_lock);
+ for (;;) {
+@@ -1880,14 +1888,14 @@ static long wb_writeback(struct bdi_writeback *wb,
+ * safe.
+ */
+ if (work->for_kupdate) {
+- oldest_jif = jiffies -
++ dirtied_before = jiffies -
+ msecs_to_jiffies(dirty_expire_interval * 10);
+ } else if (work->for_background)
+- oldest_jif = jiffies;
++ dirtied_before = jiffies;
+
+ trace_writeback_start(wb, work);
+ if (list_empty(&wb->b_io))
+- queue_io(wb, work);
++ queue_io(wb, work, dirtied_before);
+ if (work->sb)
+ progress = writeback_sb_inodes(work->sb, wb, work);
+ else
+@@ -2289,11 +2297,12 @@ void __mark_inode_dirty(struct inode *inode, int flags)
+ inode->i_state |= flags;
+
+ /*
+- * If the inode is being synced, just update its dirty state.
+- * The unlocker will place the inode on the appropriate
+- * superblock list, based upon its state.
++ * If the inode is queued for writeback by flush worker, just
++ * update its dirty state. Once the flush worker is done with
++ * the inode it will place it on the appropriate superblock
++ * list, based upon its state.
+ */
+- if (inode->i_state & I_SYNC)
++ if (inode->i_state & I_SYNC_QUEUED)
+ goto out_unlock_inode;
+
+ /*
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index 40306c1eab07c..5fff7cb3582f0 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -1284,6 +1284,12 @@ hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
+ sb->s_magic = HUGETLBFS_MAGIC;
+ sb->s_op = &hugetlbfs_ops;
+ sb->s_time_gran = 1;
++
++ /*
++ * Due to the special and limited functionality of hugetlbfs, it does
++ * not work well as a stacking filesystem.
++ */
++ sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
+ sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx));
+ if (!sb->s_root)
+ goto out_free;
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index fada14ee1cdcb..2a539b794f3b0 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -2378,6 +2378,15 @@ static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
+ list_del_init(&req->list);
+ ret = false;
+ }
++
++ if (ret) {
++ struct io_ring_ctx *ctx = req->ctx;
++
++ spin_lock_irq(&ctx->task_lock);
++ list_add(&req->task_list, &ctx->task_list);
++ req->work_task = NULL;
++ spin_unlock_irq(&ctx->task_lock);
++ }
+ spin_unlock(&list->lock);
+ return ret;
+ }
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index de992a70ddfef..90453309345d5 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -1983,6 +1983,9 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
+ */
+ static void __jbd2_journal_unfile_buffer(struct journal_head *jh)
+ {
++ J_ASSERT_JH(jh, jh->b_transaction != NULL);
++ J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
++
+ __jbd2_journal_temp_unlink_buffer(jh);
+ jh->b_transaction = NULL;
+ jbd2_journal_put_journal_head(jh);
+@@ -2074,6 +2077,7 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal,
+ {
+ struct buffer_head *head;
+ struct buffer_head *bh;
++ bool has_write_io_error = false;
+ int ret = 0;
+
+ J_ASSERT(PageLocked(page));
+@@ -2098,11 +2102,26 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal,
+ jbd_unlock_bh_state(bh);
+ if (buffer_jbd(bh))
+ goto busy;
++
++ /*
++ * If we free a metadata buffer which has been failed to
++ * write out, the jbd2 checkpoint procedure will not detect
++ * this failure and may lead to filesystem inconsistency
++ * after cleanup journal tail.
++ */
++ if (buffer_write_io_error(bh)) {
++ pr_err("JBD2: Error while async write back metadata bh %llu.",
++ (unsigned long long)bh->b_blocknr);
++ has_write_io_error = true;
++ }
+ } while ((bh = bh->b_this_page) != head);
+
+ ret = try_to_free_buffers(page);
+
+ busy:
++ if (has_write_io_error)
++ jbd2_journal_abort(journal, -EIO);
++
+ return ret;
+ }
+
+@@ -2530,6 +2549,13 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
+
+ was_dirty = test_clear_buffer_jbddirty(bh);
+ __jbd2_journal_temp_unlink_buffer(jh);
++
++ /*
++ * b_transaction must be set, otherwise the new b_transaction won't
++ * be holding jh reference
++ */
++ J_ASSERT_JH(jh, jh->b_transaction != NULL);
++
+ /*
+ * We set b_transaction here because b_next_transaction will inherit
+ * our jh reference and thus __jbd2_journal_file_buffer() must not
+diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c
+index a9ad90926b873..6c7354abd0aea 100644
+--- a/fs/xfs/libxfs/xfs_trans_inode.c
++++ b/fs/xfs/libxfs/xfs_trans_inode.c
+@@ -36,6 +36,7 @@ xfs_trans_ijoin(
+
+ ASSERT(iip->ili_lock_flags == 0);
+ iip->ili_lock_flags = lock_flags;
++ ASSERT(!xfs_iflags_test(ip, XFS_ISTALE));
+
+ /*
+ * Get a log_item_desc to point at the new item.
+@@ -91,6 +92,7 @@ xfs_trans_log_inode(
+
+ ASSERT(ip->i_itemp != NULL);
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
++ ASSERT(!xfs_iflags_test(ip, XFS_ISTALE));
+
+ /*
+ * Don't bother with i_lock for the I_DIRTY_TIME check here, as races
+diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
+index d95dc9b0f0bba..a1135b86e79f9 100644
+--- a/fs/xfs/xfs_icache.c
++++ b/fs/xfs/xfs_icache.c
+@@ -1132,7 +1132,7 @@ restart:
+ goto out_ifunlock;
+ xfs_iunpin_wait(ip);
+ }
+- if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) {
++ if (xfs_inode_clean(ip)) {
+ xfs_ifunlock(ip);
+ goto reclaim;
+ }
+@@ -1219,6 +1219,7 @@ reclaim:
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_qm_dqdetach(ip);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
++ ASSERT(xfs_inode_clean(ip));
+
+ __xfs_inode_free(ip);
+ return error;
+diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
+index 18f4b262e61ce..b339ff93df997 100644
+--- a/fs/xfs/xfs_inode.c
++++ b/fs/xfs/xfs_inode.c
+@@ -1761,10 +1761,31 @@ xfs_inactive_ifree(
+ return error;
+ }
+
++ /*
++ * We do not hold the inode locked across the entire rolling transaction
++ * here. We only need to hold it for the first transaction that
++ * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
++ * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
++ * here breaks the relationship between cluster buffer invalidation and
++ * stale inode invalidation on cluster buffer item journal commit
++ * completion, and can result in leaving dirty stale inodes hanging
++ * around in memory.
++ *
++ * We have no need for serialising this inode operation against other
++ * operations - we freed the inode and hence reallocation is required
++ * and that will serialise on reallocating the space the deferops need
++ * to free. Hence we can unlock the inode on the first commit of
++ * the transaction rather than roll it right through the deferops. This
++ * avoids relogging the XFS_ISTALE inode.
++ *
++ * We check that xfs_ifree() hasn't grown an internal transaction roll
++ * by asserting that the inode is still locked when it returns.
++ */
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+- xfs_trans_ijoin(tp, ip, 0);
++ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+ error = xfs_ifree(tp, ip);
++ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+ if (error) {
+ /*
+ * If we fail to free the inode, shut down. The cancel
+@@ -1777,7 +1798,6 @@ xfs_inactive_ifree(
+ xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
+ }
+ xfs_trans_cancel(tp);
+- xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return error;
+ }
+
+@@ -1795,7 +1815,6 @@ xfs_inactive_ifree(
+ xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
+ __func__, error);
+
+- xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return 0;
+ }
+
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index d87acf62958e2..13ed2c6b13f8b 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -1039,7 +1039,11 @@ extern void *efi_get_pal_addr (void);
+ extern void efi_map_pal_code (void);
+ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
+ extern void efi_gettimeofday (struct timespec64 *ts);
++#ifdef CONFIG_EFI
+ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
++#else
++static inline void efi_enter_virtual_mode (void) {}
++#endif
+ #ifdef CONFIG_X86
+ extern efi_status_t efi_query_variable_store(u32 attributes,
+ unsigned long size,
+diff --git a/include/linux/fb.h b/include/linux/fb.h
+index 756706b666a10..8221838fefd98 100644
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -400,8 +400,6 @@ struct fb_tile_ops {
+ #define FBINFO_HWACCEL_YPAN 0x2000 /* optional */
+ #define FBINFO_HWACCEL_YWRAP 0x4000 /* optional */
+
+-#define FBINFO_MISC_USEREVENT 0x10000 /* event request
+- from userspace */
+ #define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */
+
+ /* A driver may set this flag to indicate that it does want a set_par to be
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 5bd384dbdca58..4c82683e034a7 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2140,6 +2140,10 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
+ *
+ * I_CREATING New object's inode in the middle of setting up.
+ *
++ * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists.
++ * Used to detect that mark_inode_dirty() should not move
++ * inode between dirty lists.
++ *
+ * Q: What is the difference between I_WILL_FREE and I_FREEING?
+ */
+ #define I_DIRTY_SYNC (1 << 0)
+@@ -2157,11 +2161,11 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
+ #define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP)
+ #define I_LINKABLE (1 << 10)
+ #define I_DIRTY_TIME (1 << 11)
+-#define __I_DIRTY_TIME_EXPIRED 12
+-#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED)
++#define I_DIRTY_TIME_EXPIRED (1 << 12)
+ #define I_WB_SWITCH (1 << 13)
+ #define I_OVL_INUSE (1 << 14)
+ #define I_CREATING (1 << 15)
++#define I_SYNC_QUEUED (1 << 17)
+
+ #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
+ #define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
+diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
+index aac42c28fe62d..9b67394471e1c 100644
+--- a/include/linux/netfilter_ipv6.h
++++ b/include/linux/netfilter_ipv6.h
+@@ -58,7 +58,6 @@ struct nf_ipv6_ops {
+ int (*output)(struct net *, struct sock *, struct sk_buff *));
+ int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
+ #if IS_MODULE(CONFIG_IPV6)
+- int (*br_defrag)(struct net *net, struct sk_buff *skb, u32 user);
+ int (*br_fragment)(struct net *net, struct sock *sk,
+ struct sk_buff *skb,
+ struct nf_bridge_frag_data *data,
+@@ -117,23 +116,6 @@ static inline int nf_ip6_route(struct net *net, struct dst_entry **dst,
+
+ #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
+
+-static inline int nf_ipv6_br_defrag(struct net *net, struct sk_buff *skb,
+- u32 user)
+-{
+-#if IS_MODULE(CONFIG_IPV6)
+- const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
+-
+- if (!v6_ops)
+- return 1;
+-
+- return v6_ops->br_defrag(net, skb, user);
+-#elif IS_BUILTIN(CONFIG_IPV6)
+- return nf_ct_frag6_gather(net, skb, user);
+-#else
+- return 1;
+-#endif
+-}
+-
+ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+ struct nf_bridge_frag_data *data,
+ int (*output)(struct net *, struct sock *sk,
+diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
+index 66282552db207..67434278b81dd 100644
+--- a/include/trace/events/writeback.h
++++ b/include/trace/events/writeback.h
+@@ -499,8 +499,9 @@ DEFINE_WBC_EVENT(wbc_writepage);
+ TRACE_EVENT(writeback_queue_io,
+ TP_PROTO(struct bdi_writeback *wb,
+ struct wb_writeback_work *work,
++ unsigned long dirtied_before,
+ int moved),
+- TP_ARGS(wb, work, moved),
++ TP_ARGS(wb, work, dirtied_before, moved),
+ TP_STRUCT__entry(
+ __array(char, name, 32)
+ __field(unsigned long, older)
+@@ -510,19 +511,17 @@ TRACE_EVENT(writeback_queue_io,
+ __field(unsigned int, cgroup_ino)
+ ),
+ TP_fast_assign(
+- unsigned long *older_than_this = work->older_than_this;
+ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
+- __entry->older = older_than_this ? *older_than_this : 0;
+- __entry->age = older_than_this ?
+- (jiffies - *older_than_this) * 1000 / HZ : -1;
++ __entry->older = dirtied_before;
++ __entry->age = (jiffies - dirtied_before) * 1000 / HZ;
+ __entry->moved = moved;
+ __entry->reason = work->reason;
+ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
+ ),
+ TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u",
+ __entry->name,
+- __entry->older, /* older_than_this in jiffies */
+- __entry->age, /* older_than_this in relative milliseconds */
++ __entry->older, /* dirtied_before in jiffies */
++ __entry->age, /* dirtied_before in relative milliseconds */
+ __entry->moved,
+ __print_symbolic(__entry->reason, WB_WORK_REASON),
+ __entry->cgroup_ino
+diff --git a/kernel/Makefile b/kernel/Makefile
+index daad787fb795d..42557f251fea6 100644
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -128,7 +128,7 @@ $(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE
+ $(obj)/kheaders.o: $(obj)/kheaders_data.tar.xz
+
+ quiet_cmd_genikh = CHK $(obj)/kheaders_data.tar.xz
+- cmd_genikh = $(BASH) $(srctree)/kernel/gen_kheaders.sh $@
++ cmd_genikh = $(CONFIG_SHELL) $(srctree)/kernel/gen_kheaders.sh $@
+ $(obj)/kheaders_data.tar.xz: FORCE
+ $(call cmd,genikh)
+
+diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh
+index 5a0fc0b0403a6..c1510f0ab3ea5 100755
+--- a/kernel/gen_kheaders.sh
++++ b/kernel/gen_kheaders.sh
+@@ -1,4 +1,4 @@
+-#!/bin/bash
++#!/bin/sh
+ # SPDX-License-Identifier: GPL-2.0
+
+ # This script generates an archive consisting of kernel headers
+@@ -21,30 +21,38 @@ arch/$SRCARCH/include/
+ # Uncomment it for debugging.
+ # if [ ! -f /tmp/iter ]; then iter=1; echo 1 > /tmp/iter;
+ # else iter=$(($(cat /tmp/iter) + 1)); echo $iter > /tmp/iter; fi
+-# find $src_file_list -name "*.h" | xargs ls -l > /tmp/src-ls-$iter
+-# find $obj_file_list -name "*.h" | xargs ls -l > /tmp/obj-ls-$iter
++# find $all_dirs -name "*.h" | xargs ls -l > /tmp/ls-$iter
++
++all_dirs=
++if [ "$building_out_of_srctree" ]; then
++ for d in $dir_list; do
++ all_dirs="$all_dirs $srctree/$d"
++ done
++fi
++all_dirs="$all_dirs $dir_list"
+
+ # include/generated/compile.h is ignored because it is touched even when none
+-# of the source files changed. This causes pointless regeneration, so let us
+-# ignore them for md5 calculation.
+-pushd $srctree > /dev/null
+-src_files_md5="$(find $dir_list -name "*.h" |
+- grep -v "include/generated/compile.h" |
+- grep -v "include/generated/autoconf.h" |
+- xargs ls -l | md5sum | cut -d ' ' -f1)"
+-popd > /dev/null
+-obj_files_md5="$(find $dir_list -name "*.h" |
+- grep -v "include/generated/compile.h" |
+- grep -v "include/generated/autoconf.h" |
++# of the source files changed.
++#
++# When Kconfig regenerates include/generated/autoconf.h, its timestamp is
++# updated, but the contents might be still the same. When any CONFIG option is
++# changed, Kconfig touches the corresponding timestamp file include/config/*.h.
++# Hence, the md5sum detects the configuration change anyway. We do not need to
++# check include/generated/autoconf.h explicitly.
++#
++# Ignore them for md5 calculation to avoid pointless regeneration.
++headers_md5="$(find $all_dirs -name "*.h" |
++ grep -v "include/generated/compile.h" |
++ grep -v "include/generated/autoconf.h" |
+ xargs ls -l | md5sum | cut -d ' ' -f1)"
++
+ # Any changes to this script will also cause a rebuild of the archive.
+ this_file_md5="$(ls -l $sfile | md5sum | cut -d ' ' -f1)"
+ if [ -f $tarfile ]; then tarfile_md5="$(md5sum $tarfile | cut -d ' ' -f1)"; fi
+ if [ -f kernel/kheaders.md5 ] &&
+- [ "$(cat kernel/kheaders.md5|head -1)" == "$src_files_md5" ] &&
+- [ "$(cat kernel/kheaders.md5|head -2|tail -1)" == "$obj_files_md5" ] &&
+- [ "$(cat kernel/kheaders.md5|head -3|tail -1)" == "$this_file_md5" ] &&
+- [ "$(cat kernel/kheaders.md5|tail -1)" == "$tarfile_md5" ]; then
++ [ "$(head -n 1 kernel/kheaders.md5)" = "$headers_md5" ] &&
++ [ "$(head -n 2 kernel/kheaders.md5 | tail -n 1)" = "$this_file_md5" ] &&
++ [ "$(tail -n 1 kernel/kheaders.md5)" = "$tarfile_md5" ]; then
+ exit
+ fi
+
+@@ -55,14 +63,17 @@ fi
+ rm -rf $cpio_dir
+ mkdir $cpio_dir
+
+-pushd $srctree > /dev/null
+-for f in $dir_list;
+- do find "$f" -name "*.h";
+-done | cpio --quiet -pd $cpio_dir
+-popd > /dev/null
++if [ "$building_out_of_srctree" ]; then
++ (
++ cd $srctree
++ for f in $dir_list
++ do find "$f" -name "*.h";
++ done | cpio --quiet -pd $cpio_dir
++ )
++fi
+
+-# The second CPIO can complain if files already exist which can
+-# happen with out of tree builds. Just silence CPIO for now.
++# The second CPIO can complain if files already exist which can happen with out
++# of tree builds having stale headers in srctree. Just silence CPIO for now.
+ for f in $dir_list;
+ do find "$f" -name "*.h";
+ done | cpio --quiet -pd $cpio_dir >/dev/null 2>&1
+@@ -77,10 +88,9 @@ find $cpio_dir -type f -print0 |
+ find $cpio_dir -printf "./%P\n" | LC_ALL=C sort | \
+ tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
+ --owner=0 --group=0 --numeric-owner --no-recursion \
+- -Jcf $tarfile -C $cpio_dir/ -T - > /dev/null
++ -I $XZ -cf $tarfile -C $cpio_dir/ -T - > /dev/null
+
+-echo "$src_files_md5" > kernel/kheaders.md5
+-echo "$obj_files_md5" >> kernel/kheaders.md5
++echo $headers_md5 > kernel/kheaders.md5
+ echo "$this_file_md5" >> kernel/kheaders.md5
+ echo "$(md5sum $tarfile | cut -d ' ' -f1)" >> kernel/kheaders.md5
+
+diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
+index 30cc217b86318..651a4ad6d711f 100644
+--- a/kernel/irq/matrix.c
++++ b/kernel/irq/matrix.c
+@@ -380,6 +380,13 @@ int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
+ unsigned int cpu, bit;
+ struct cpumap *cm;
+
++ /*
++ * Not required in theory, but matrix_find_best_cpu() uses
++ * for_each_cpu() which ignores the cpumask on UP .
++ */
++ if (cpumask_empty(msk))
++ return -EINVAL;
++
+ cpu = matrix_find_best_cpu(m, msk);
+ if (cpu == UINT_MAX)
+ return -ENOSPC;
+diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
+index 9bb6d2497b040..581f818181386 100644
+--- a/kernel/locking/lockdep_proc.c
++++ b/kernel/locking/lockdep_proc.c
+@@ -400,7 +400,7 @@ static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
+ seq_time(m, lt->min);
+ seq_time(m, lt->max);
+ seq_time(m, lt->total);
+- seq_time(m, lt->nr ? div_s64(lt->total, lt->nr) : 0);
++ seq_time(m, lt->nr ? div64_u64(lt->total, lt->nr) : 0);
+ }
+
+ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index b34b5c6e25248..352239c411a44 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -794,6 +794,26 @@ unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
+ /* All clamps are required to be less or equal than these values */
+ static struct uclamp_se uclamp_default[UCLAMP_CNT];
+
++/*
++ * This static key is used to reduce the uclamp overhead in the fast path. It
++ * primarily disables the call to uclamp_rq_{inc, dec}() in
++ * enqueue/dequeue_task().
++ *
++ * This allows users to continue to enable uclamp in their kernel config with
++ * minimum uclamp overhead in the fast path.
++ *
++ * As soon as userspace modifies any of the uclamp knobs, the static key is
++ * enabled, since we have an actual users that make use of uclamp
++ * functionality.
++ *
++ * The knobs that would enable this static key are:
++ *
++ * * A task modifying its uclamp value with sched_setattr().
++ * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
++ * * An admin modifying the cgroup cpu.uclamp.{min, max}
++ */
++DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
++
+ /* Integer rounded range for each bucket */
+ #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS)
+
+@@ -990,10 +1010,38 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
+
+ lockdep_assert_held(&rq->lock);
+
++ /*
++ * If sched_uclamp_used was enabled after task @p was enqueued,
++ * we could end up with unbalanced call to uclamp_rq_dec_id().
++ *
++ * In this case the uc_se->active flag should be false since no uclamp
++ * accounting was performed at enqueue time and we can just return
++ * here.
++ *
++ * Need to be careful of the following enqeueue/dequeue ordering
++ * problem too
++ *
++ * enqueue(taskA)
++ * // sched_uclamp_used gets enabled
++ * enqueue(taskB)
++ * dequeue(taskA)
++ * // Must not decrement bukcet->tasks here
++ * dequeue(taskB)
++ *
++ * where we could end up with stale data in uc_se and
++ * bucket[uc_se->bucket_id].
++ *
++ * The following check here eliminates the possibility of such race.
++ */
++ if (unlikely(!uc_se->active))
++ return;
++
+ bucket = &uc_rq->bucket[uc_se->bucket_id];
++
+ SCHED_WARN_ON(!bucket->tasks);
+ if (likely(bucket->tasks))
+ bucket->tasks--;
++
+ uc_se->active = false;
+
+ /*
+@@ -1021,6 +1069,15 @@ static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
+ {
+ enum uclamp_id clamp_id;
+
++ /*
++ * Avoid any overhead until uclamp is actually used by the userspace.
++ *
++ * The condition is constructed such that a NOP is generated when
++ * sched_uclamp_used is disabled.
++ */
++ if (!static_branch_unlikely(&sched_uclamp_used))
++ return;
++
+ if (unlikely(!p->sched_class->uclamp_enabled))
+ return;
+
+@@ -1036,6 +1093,15 @@ static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
+ {
+ enum uclamp_id clamp_id;
+
++ /*
++ * Avoid any overhead until uclamp is actually used by the userspace.
++ *
++ * The condition is constructed such that a NOP is generated when
++ * sched_uclamp_used is disabled.
++ */
++ if (!static_branch_unlikely(&sched_uclamp_used))
++ return;
++
+ if (unlikely(!p->sched_class->uclamp_enabled))
+ return;
+
+@@ -1145,8 +1211,10 @@ int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
+ update_root_tg = true;
+ }
+
+- if (update_root_tg)
++ if (update_root_tg) {
++ static_branch_enable(&sched_uclamp_used);
+ uclamp_update_root_tg();
++ }
+
+ /*
+ * We update all RUNNABLE tasks only when task groups are in use.
+@@ -1181,6 +1249,15 @@ static int uclamp_validate(struct task_struct *p,
+ if (upper_bound > SCHED_CAPACITY_SCALE)
+ return -EINVAL;
+
++ /*
++ * We have valid uclamp attributes; make sure uclamp is enabled.
++ *
++ * We need to do that here, because enabling static branches is a
++ * blocking operation which obviously cannot be done while holding
++ * scheduler locks.
++ */
++ static_branch_enable(&sched_uclamp_used);
++
+ return 0;
+ }
+
+@@ -7294,6 +7371,8 @@ static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
+ if (req.ret)
+ return req.ret;
+
++ static_branch_enable(&sched_uclamp_used);
++
+ mutex_lock(&uclamp_mutex);
+ rcu_read_lock();
+
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index b6f56e7c8dd16..4cb80e6042c4f 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -210,7 +210,7 @@ unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
+ unsigned long dl_util, util, irq;
+ struct rq *rq = cpu_rq(cpu);
+
+- if (!IS_BUILTIN(CONFIG_UCLAMP_TASK) &&
++ if (!uclamp_is_used() &&
+ type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
+ return max;
+ }
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 570659f1c6e22..9f2a9e34a78d5 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -841,6 +841,8 @@ struct uclamp_rq {
+ unsigned int value;
+ struct uclamp_bucket bucket[UCLAMP_BUCKETS];
+ };
++
++DECLARE_STATIC_KEY_FALSE(sched_uclamp_used);
+ #endif /* CONFIG_UCLAMP_TASK */
+
+ /*
+@@ -2319,12 +2321,35 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
+ #ifdef CONFIG_UCLAMP_TASK
+ unsigned int uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
+
++/**
++ * uclamp_util_with - clamp @util with @rq and @p effective uclamp values.
++ * @rq: The rq to clamp against. Must not be NULL.
++ * @util: The util value to clamp.
++ * @p: The task to clamp against. Can be NULL if you want to clamp
++ * against @rq only.
++ *
++ * Clamps the passed @util to the max(@rq, @p) effective uclamp values.
++ *
++ * If sched_uclamp_used static key is disabled, then just return the util
++ * without any clamping since uclamp aggregation at the rq level in the fast
++ * path is disabled, rendering this operation a NOP.
++ *
++ * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It
++ * will return the correct effective uclamp value of the task even if the
++ * static key is disabled.
++ */
+ static __always_inline
+ unsigned int uclamp_util_with(struct rq *rq, unsigned int util,
+ struct task_struct *p)
+ {
+- unsigned int min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value);
+- unsigned int max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
++ unsigned int min_util;
++ unsigned int max_util;
++
++ if (!static_branch_likely(&sched_uclamp_used))
++ return util;
++
++ min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value);
++ max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
+
+ if (p) {
+ min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN));
+@@ -2346,6 +2371,19 @@ static inline unsigned int uclamp_util(struct rq *rq, unsigned int util)
+ {
+ return uclamp_util_with(rq, util, NULL);
+ }
++
++/*
++ * When uclamp is compiled in, the aggregation at rq level is 'turned off'
++ * by default in the fast path and only gets turned on once userspace performs
++ * an operation that requires it.
++ *
++ * Returns true if userspace opted-in to use uclamp and aggregation at rq level
++ * hence is active.
++ */
++static inline bool uclamp_is_used(void)
++{
++ return static_branch_likely(&sched_uclamp_used);
++}
+ #else /* CONFIG_UCLAMP_TASK */
+ static inline unsigned int uclamp_util_with(struct rq *rq, unsigned int util,
+ struct task_struct *p)
+@@ -2356,6 +2394,11 @@ static inline unsigned int uclamp_util(struct rq *rq, unsigned int util)
+ {
+ return util;
+ }
++
++static inline bool uclamp_is_used(void)
++{
++ return false;
++}
+ #endif /* CONFIG_UCLAMP_TASK */
+
+ #ifdef arch_scale_freq_capacity
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index a4c8f9d9522e4..884333b9fc767 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -535,6 +535,18 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+ #endif
+ bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
+
++ /*
++ * As blktrace relies on debugfs for its interface the debugfs directory
++ * is required, contrary to the usual mantra of not checking for debugfs
++ * files or directories.
++ */
++ if (IS_ERR_OR_NULL(dir)) {
++ pr_warn("debugfs_dir not present for %s so skipping\n",
++ buts->name);
++ ret = -ENOENT;
++ goto err;
++ }
++
+ bt->dev = dev;
+ atomic_set(&bt->dropped, 0);
+ INIT_LIST_HEAD(&bt->running_list);
+diff --git a/mm/cma.c b/mm/cma.c
+index 7fe0b8356775f..7de520c0a1db6 100644
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -93,19 +93,15 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
+ mutex_unlock(&cma->lock);
+ }
+
+-static int __init cma_activate_area(struct cma *cma)
++static void __init cma_activate_area(struct cma *cma)
+ {
+- int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
+ unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
+ unsigned i = cma->count >> pageblock_order;
+ struct zone *zone;
+
+- cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+-
+- if (!cma->bitmap) {
+- cma->count = 0;
+- return -ENOMEM;
+- }
++ cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
++ if (!cma->bitmap)
++ goto out_error;
+
+ WARN_ON_ONCE(!pfn_valid(pfn));
+ zone = page_zone(pfn_to_page(pfn));
+@@ -135,25 +131,22 @@ static int __init cma_activate_area(struct cma *cma)
+ spin_lock_init(&cma->mem_head_lock);
+ #endif
+
+- return 0;
++ return;
+
+ not_in_zone:
+- pr_err("CMA area %s could not be activated\n", cma->name);
+- kfree(cma->bitmap);
++ bitmap_free(cma->bitmap);
++out_error:
+ cma->count = 0;
+- return -EINVAL;
++ pr_err("CMA area %s could not be activated\n", cma->name);
++ return;
+ }
+
+ static int __init cma_init_reserved_areas(void)
+ {
+ int i;
+
+- for (i = 0; i < cma_area_count; i++) {
+- int ret = cma_activate_area(&cma_areas[i]);
+-
+- if (ret)
+- return ret;
+- }
++ for (i = 0; i < cma_area_count; i++)
++ cma_activate_area(&cma_areas[i]);
+
+ return 0;
+ }
+diff --git a/mm/mmu_context.c b/mm/mmu_context.c
+index 3e612ae748e96..a1da47e027479 100644
+--- a/mm/mmu_context.c
++++ b/mm/mmu_context.c
+@@ -25,13 +25,16 @@ void use_mm(struct mm_struct *mm)
+ struct task_struct *tsk = current;
+
+ task_lock(tsk);
++ /* Hold off tlb flush IPIs while switching mm's */
++ local_irq_disable();
+ active_mm = tsk->active_mm;
+ if (active_mm != mm) {
+ mmgrab(mm);
+ tsk->active_mm = mm;
+ }
+ tsk->mm = mm;
+- switch_mm(active_mm, mm, tsk);
++ switch_mm_irqs_off(active_mm, mm, tsk);
++ local_irq_enable();
+ task_unlock(tsk);
+ #ifdef finish_arch_post_lock_switch
+ finish_arch_post_lock_switch();
+@@ -56,9 +59,11 @@ void unuse_mm(struct mm_struct *mm)
+
+ task_lock(tsk);
+ sync_mm_rss(mm);
++ local_irq_disable();
+ tsk->mm = NULL;
+ /* active_mm is still 'mm' */
+ enter_lazy_tlb(mm, tsk);
++ local_irq_enable();
+ task_unlock(tsk);
+ }
+ EXPORT_SYMBOL_GPL(unuse_mm);
+diff --git a/mm/shuffle.c b/mm/shuffle.c
+index b3fe97fd66541..56958ffa5a3a9 100644
+--- a/mm/shuffle.c
++++ b/mm/shuffle.c
+@@ -58,25 +58,25 @@ module_param_call(shuffle, shuffle_store, shuffle_show, &shuffle_param, 0400);
+ * For two pages to be swapped in the shuffle, they must be free (on a
+ * 'free_area' lru), have the same order, and have the same migratetype.
+ */
+-static struct page * __meminit shuffle_valid_page(unsigned long pfn, int order)
++static struct page * __meminit shuffle_valid_page(struct zone *zone,
++ unsigned long pfn, int order)
+ {
+- struct page *page;
++ struct page *page = pfn_to_online_page(pfn);
+
+ /*
+ * Given we're dealing with randomly selected pfns in a zone we
+ * need to ask questions like...
+ */
+
+- /* ...is the pfn even in the memmap? */
+- if (!pfn_valid_within(pfn))
++ /* ... is the page managed by the buddy? */
++ if (!page)
+ return NULL;
+
+- /* ...is the pfn in a present section or a hole? */
+- if (!pfn_present(pfn))
++ /* ... is the page assigned to the same zone? */
++ if (page_zone(page) != zone)
+ return NULL;
+
+ /* ...is the page free and currently on a free_area list? */
+- page = pfn_to_page(pfn);
+ if (!PageBuddy(page))
+ return NULL;
+
+@@ -123,7 +123,7 @@ void __meminit __shuffle_zone(struct zone *z)
+ * page_j randomly selected in the span @zone_start_pfn to
+ * @spanned_pages.
+ */
+- page_i = shuffle_valid_page(i, order);
++ page_i = shuffle_valid_page(z, i, order);
+ if (!page_i)
+ continue;
+
+@@ -137,7 +137,7 @@ void __meminit __shuffle_zone(struct zone *z)
+ j = z->zone_start_pfn +
+ ALIGN_DOWN(get_random_long() % z->spanned_pages,
+ order_pages);
+- page_j = shuffle_valid_page(j, order);
++ page_j = shuffle_valid_page(z, j, order);
+ if (page_j && page_j != page_i)
+ break;
+ }
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index ad4d00bd79147..5797e1eeaa7e6 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -85,6 +85,8 @@ static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
+ if (pmd_none_or_clear_bad(pmd))
+ continue;
+ vunmap_pte_range(pmd, addr, next);
++
++ cond_resched();
+ } while (pmd++, addr = next, addr != end);
+ }
+
+diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
+index 8096732223828..8d033a75a766e 100644
+--- a/net/bridge/netfilter/nf_conntrack_bridge.c
++++ b/net/bridge/netfilter/nf_conntrack_bridge.c
+@@ -168,6 +168,7 @@ static unsigned int nf_ct_br_defrag4(struct sk_buff *skb,
+ static unsigned int nf_ct_br_defrag6(struct sk_buff *skb,
+ const struct nf_hook_state *state)
+ {
++#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+ u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
+ enum ip_conntrack_info ctinfo;
+ struct br_input_skb_cb cb;
+@@ -180,14 +181,17 @@ static unsigned int nf_ct_br_defrag6(struct sk_buff *skb,
+
+ br_skb_cb_save(skb, &cb, sizeof(struct inet6_skb_parm));
+
+- err = nf_ipv6_br_defrag(state->net, skb,
+- IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id);
++ err = nf_ct_frag6_gather(state->net, skb,
++ IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id);
+ /* queued */
+ if (err == -EINPROGRESS)
+ return NF_STOLEN;
+
+ br_skb_cb_restore(skb, &cb, IP6CB(skb)->frag_max_size);
+ return err == 0 ? NF_ACCEPT : NF_DROP;
++#else
++ return NF_ACCEPT;
++#endif
+ }
+
+ static int nf_ct_br_ip_check(const struct sk_buff *skb)
+diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
+index dbd215cbc53d8..a8dd956b5e8e1 100644
+--- a/net/can/j1939/transport.c
++++ b/net/can/j1939/transport.c
+@@ -1803,7 +1803,20 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
+ }
+
+ tpdat = se_skb->data;
+- memcpy(&tpdat[offset], &dat[1], nbytes);
++ if (!session->transmission) {
++ memcpy(&tpdat[offset], &dat[1], nbytes);
++ } else {
++ int err;
++
++ err = memcmp(&tpdat[offset], &dat[1], nbytes);
++ if (err)
++ netdev_err_once(priv->ndev,
++ "%s: 0x%p: Data of RX-looped back packet (%*ph) doesn't match TX data (%*ph)!\n",
++ __func__, session,
++ nbytes, &dat[1],
++ nbytes, &tpdat[offset]);
++ }
++
+ if (packet == session->pkt.rx)
+ session->pkt.rx++;
+
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 973a71f4bc898..f80b6999ca1cb 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -5317,8 +5317,8 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto err_free;
+-
+- if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
++ /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */
++ if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short))))
+ goto err_free;
+
+ vhdr = (struct vlan_hdr *)skb->data;
+diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
+index a01f500d6a6ba..afa2c5049845f 100644
+--- a/net/ipv4/nexthop.c
++++ b/net/ipv4/nexthop.c
+@@ -403,7 +403,7 @@ static int nh_check_attr_group(struct net *net, struct nlattr *tb[],
+ struct nexthop_grp *nhg;
+ unsigned int i, j;
+
+- if (len & (sizeof(struct nexthop_grp) - 1)) {
++ if (!len || len & (sizeof(struct nexthop_grp) - 1)) {
+ NL_SET_ERR_MSG(extack,
+ "Invalid length for nexthop group attribute");
+ return -EINVAL;
+@@ -1105,6 +1105,9 @@ static struct nexthop *nexthop_create_group(struct net *net,
+ struct nexthop *nh;
+ int i;
+
++ if (WARN_ON(!num_nh))
++ return ERR_PTR(-EINVAL);
++
+ nh = nexthop_alloc();
+ if (!nh)
+ return ERR_PTR(-ENOMEM);
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index b5dd20c4599bb..8dcf7bacc99a6 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -860,7 +860,15 @@ int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
+ struct metadata_dst *tun_dst,
+ bool log_ecn_err)
+ {
+- return __ip6_tnl_rcv(t, skb, tpi, tun_dst, ip6ip6_dscp_ecn_decapsulate,
++ int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
++ const struct ipv6hdr *ipv6h,
++ struct sk_buff *skb);
++
++ dscp_ecn_decapsulate = ip6ip6_dscp_ecn_decapsulate;
++ if (tpi->proto == htons(ETH_P_IP))
++ dscp_ecn_decapsulate = ip4ip6_dscp_ecn_decapsulate;
++
++ return __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
+ log_ecn_err);
+ }
+ EXPORT_SYMBOL(ip6_tnl_rcv);
+diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
+index 409e79b84a830..6d0e942d082d4 100644
+--- a/net/ipv6/netfilter.c
++++ b/net/ipv6/netfilter.c
+@@ -245,9 +245,6 @@ static const struct nf_ipv6_ops ipv6ops = {
+ .route_input = ip6_route_input,
+ .fragment = ip6_fragment,
+ .reroute = nf_ip6_reroute,
+-#if IS_MODULE(CONFIG_IPV6) && IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+- .br_defrag = nf_ct_frag6_gather,
+-#endif
+ #if IS_MODULE(CONFIG_IPV6)
+ .br_fragment = br_ip6_fragment,
+ #endif
+diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
+index 1ce1e710d0252..a699e318b9a01 100644
+--- a/net/qrtr/qrtr.c
++++ b/net/qrtr/qrtr.c
+@@ -547,23 +547,25 @@ static void qrtr_port_remove(struct qrtr_sock *ipc)
+ */
+ static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
+ {
++ u32 min_port;
+ int rc;
+
+ mutex_lock(&qrtr_port_lock);
+ if (!*port) {
+- rc = idr_alloc(&qrtr_ports, ipc,
+- QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1,
+- GFP_ATOMIC);
+- if (rc >= 0)
+- *port = rc;
++ min_port = QRTR_MIN_EPH_SOCKET;
++ rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, QRTR_MAX_EPH_SOCKET, GFP_ATOMIC);
++ if (!rc)
++ *port = min_port;
+ } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
+ rc = -EACCES;
+ } else if (*port == QRTR_PORT_CTRL) {
+- rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC);
++ min_port = 0;
++ rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, 0, GFP_ATOMIC);
+ } else {
+- rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC);
+- if (rc >= 0)
+- *port = rc;
++ min_port = *port;
++ rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, *port, GFP_ATOMIC);
++ if (!rc)
++ *port = min_port;
+ }
+ mutex_unlock(&qrtr_port_lock);
+
+diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
+index e0060aefbf9d8..e32c4732ddf83 100644
+--- a/net/sched/act_ct.c
++++ b/net/sched/act_ct.c
+@@ -186,7 +186,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
+ memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
+ err = nf_ct_frag6_gather(net, skb, user);
+ if (err && err != -EINPROGRESS)
+- goto out_free;
++ return err;
+ #else
+ err = -EOPNOTSUPP;
+ goto out_free;
+diff --git a/net/sctp/stream.c b/net/sctp/stream.c
+index e13cbd5c01932..cd20638b61514 100644
+--- a/net/sctp/stream.c
++++ b/net/sctp/stream.c
+@@ -88,12 +88,13 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
+ int ret;
+
+ if (outcnt <= stream->outcnt)
+- return 0;
++ goto out;
+
+ ret = genradix_prealloc(&stream->out, outcnt, gfp);
+ if (ret)
+ return ret;
+
++out:
+ stream->outcnt = outcnt;
+ return 0;
+ }
+@@ -104,12 +105,13 @@ static int sctp_stream_alloc_in(struct sctp_stream *stream, __u16 incnt,
+ int ret;
+
+ if (incnt <= stream->incnt)
+- return 0;
++ goto out;
+
+ ret = genradix_prealloc(&stream->in, incnt, gfp);
+ if (ret)
+ return ret;
+
++out:
+ stream->incnt = incnt;
+ return 0;
+ }
+diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
+index e1f64f4ba2361..da9ba6d1679b7 100644
+--- a/net/smc/smc_diag.c
++++ b/net/smc/smc_diag.c
+@@ -170,13 +170,15 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
+ (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
+ !list_empty(&smc->conn.lgr->list)) {
+ struct smc_connection *conn = &smc->conn;
+- struct smcd_diag_dmbinfo dinfo = {
+- .linkid = *((u32 *)conn->lgr->id),
+- .peer_gid = conn->lgr->peer_gid,
+- .my_gid = conn->lgr->smcd->local_gid,
+- .token = conn->rmb_desc->token,
+- .peer_token = conn->peer_token
+- };
++ struct smcd_diag_dmbinfo dinfo;
++
++ memset(&dinfo, 0, sizeof(dinfo));
++
++ dinfo.linkid = *((u32 *)conn->lgr->id);
++ dinfo.peer_gid = conn->lgr->peer_gid;
++ dinfo.my_gid = conn->lgr->smcd->local_gid;
++ dinfo.token = conn->rmb_desc->token;
++ dinfo.peer_token = conn->peer_token;
+
+ if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0)
+ goto errout;
+diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
+index d4d2928424e2f..11be9a84f8de9 100644
+--- a/net/tipc/netlink_compat.c
++++ b/net/tipc/netlink_compat.c
+@@ -255,8 +255,9 @@ err_out:
+ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
+ struct tipc_nl_compat_msg *msg)
+ {
+- int err;
++ struct nlmsghdr *nlh;
+ struct sk_buff *arg;
++ int err;
+
+ if (msg->req_type && (!msg->req_size ||
+ !TLV_CHECK_TYPE(msg->req, msg->req_type)))
+@@ -285,6 +286,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
+ return -ENOMEM;
+ }
+
++ nlh = nlmsg_put(arg, 0, 0, tipc_genl_family.id, 0, NLM_F_MULTI);
++ if (!nlh) {
++ kfree_skb(arg);
++ kfree_skb(msg->rep);
++ msg->rep = NULL;
++ return -EMSGSIZE;
++ }
++ nlmsg_end(arg, nlh);
++
+ err = __tipc_nl_compat_dumpit(cmd, msg, arg);
+ if (err) {
+ kfree_skb(msg->rep);
+diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
+index 342618a2bccb4..a6d0044328b1f 100644
+--- a/scripts/Makefile.lib
++++ b/scripts/Makefile.lib
+@@ -230,7 +230,7 @@ cmd_objcopy = $(OBJCOPY) $(OBJCOPYFLAGS) $(OBJCOPYFLAGS_$(@F)) $< $@
+ # ---------------------------------------------------------------------------
+
+ quiet_cmd_gzip = GZIP $@
+- cmd_gzip = cat $(real-prereqs) | gzip -n -f -9 > $@
++ cmd_gzip = cat $(real-prereqs) | $(KGZIP) -n -f -9 > $@
+
+ # DTC
+ # ---------------------------------------------------------------------------
+@@ -322,19 +322,19 @@ printf "%08x\n" $$dec_size | \
+ )
+
+ quiet_cmd_bzip2 = BZIP2 $@
+- cmd_bzip2 = { cat $(real-prereqs) | bzip2 -9; $(size_append); } > $@
++ cmd_bzip2 = { cat $(real-prereqs) | $(KBZIP2) -9; $(size_append); } > $@
+
+ # Lzma
+ # ---------------------------------------------------------------------------
+
+ quiet_cmd_lzma = LZMA $@
+- cmd_lzma = { cat $(real-prereqs) | lzma -9; $(size_append); } > $@
++ cmd_lzma = { cat $(real-prereqs) | $(LZMA) -9; $(size_append); } > $@
+
+ quiet_cmd_lzo = LZO $@
+- cmd_lzo = { cat $(real-prereqs) | lzop -9; $(size_append); } > $@
++ cmd_lzo = { cat $(real-prereqs) | $(KLZOP) -9; $(size_append); } > $@
+
+ quiet_cmd_lz4 = LZ4 $@
+- cmd_lz4 = { cat $(real-prereqs) | lz4c -l -c1 stdin stdout; \
++ cmd_lz4 = { cat $(real-prereqs) | $(LZ4) -l -c1 stdin stdout; \
+ $(size_append); } > $@
+
+ # U-Boot mkimage
+@@ -381,7 +381,7 @@ quiet_cmd_xzkern = XZKERN $@
+ $(size_append); } > $@
+
+ quiet_cmd_xzmisc = XZMISC $@
+- cmd_xzmisc = cat $(real-prereqs) | xz --check=crc32 --lzma2=dict=1MiB > $@
++ cmd_xzmisc = cat $(real-prereqs) | $(XZ) --check=crc32 --lzma2=dict=1MiB > $@
+
+ # ASM offsets
+ # ---------------------------------------------------------------------------
+diff --git a/scripts/Makefile.package b/scripts/Makefile.package
+index 56eadcc48d46d..35a617c296115 100644
+--- a/scripts/Makefile.package
++++ b/scripts/Makefile.package
+@@ -45,7 +45,7 @@ if test "$(objtree)" != "$(srctree)"; then \
+ false; \
+ fi ; \
+ $(srctree)/scripts/setlocalversion --save-scmversion; \
+-tar -cz $(RCS_TAR_IGNORE) -f $(2).tar.gz \
++tar -I $(KGZIP) -c $(RCS_TAR_IGNORE) -f $(2).tar.gz \
+ --transform 's:^:$(2)/:S' $(TAR_CONTENT) $(3); \
+ rm -f $(objtree)/.scmversion
+
+@@ -127,9 +127,9 @@ util/PERF-VERSION-GEN $(CURDIR)/$(perf-tar)/); \
+ tar rf $(perf-tar).tar $(perf-tar)/HEAD $(perf-tar)/PERF-VERSION-FILE; \
+ rm -r $(perf-tar); \
+ $(if $(findstring tar-src,$@),, \
+-$(if $(findstring bz2,$@),bzip2, \
+-$(if $(findstring gz,$@),gzip, \
+-$(if $(findstring xz,$@),xz, \
++$(if $(findstring bz2,$@),$(KBZIP2), \
++$(if $(findstring gz,$@),$(KGZIP), \
++$(if $(findstring xz,$@),$(XZ), \
+ $(error unknown target $@)))) \
+ -f -9 $(perf-tar).tar)
+
+diff --git a/scripts/package/buildtar b/scripts/package/buildtar
+index 2f66c81e4021b..3d541cee16ed0 100755
+--- a/scripts/package/buildtar
++++ b/scripts/package/buildtar
+@@ -28,15 +28,15 @@ case "${1}" in
+ opts=
+ ;;
+ targz-pkg)
+- opts=--gzip
++ opts="-I ${KGZIP}"
+ tarball=${tarball}.gz
+ ;;
+ tarbz2-pkg)
+- opts=--bzip2
++ opts="-I ${KBZIP2}"
+ tarball=${tarball}.bz2
+ ;;
+ tarxz-pkg)
+- opts=--xz
++ opts="-I ${XZ}"
+ tarball=${tarball}.xz
+ ;;
+ *)
+diff --git a/scripts/xz_wrap.sh b/scripts/xz_wrap.sh
+index 7a2d372f4885a..76e9cbcfbeab4 100755
+--- a/scripts/xz_wrap.sh
++++ b/scripts/xz_wrap.sh
+@@ -20,4 +20,4 @@ case $SRCARCH in
+ sparc) BCJ=--sparc ;;
+ esac
+
+-exec xz --check=crc32 $BCJ --lzma2=$LZMA2OPTS,dict=32MiB
++exec $XZ --check=crc32 $BCJ --lzma2=$LZMA2OPTS,dict=32MiB
+diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
+index 5b888b795f7ee..c07a9e735733a 100644
+--- a/sound/pci/cs46xx/cs46xx_lib.c
++++ b/sound/pci/cs46xx/cs46xx_lib.c
+@@ -766,7 +766,7 @@ static void snd_cs46xx_set_capture_sample_rate(struct snd_cs46xx *chip, unsigned
+ rate = 48000 / 9;
+
+ /*
+- * We can not capture at at rate greater than the Input Rate (48000).
++ * We can not capture at a rate greater than the Input Rate (48000).
+ * Return an error if an attempt is made to stray outside that limit.
+ */
+ if (rate > 48000)
+diff --git a/sound/pci/cs46xx/dsp_spos_scb_lib.c b/sound/pci/cs46xx/dsp_spos_scb_lib.c
+index 715ead59613da..0bef823c5f61f 100644
+--- a/sound/pci/cs46xx/dsp_spos_scb_lib.c
++++ b/sound/pci/cs46xx/dsp_spos_scb_lib.c
+@@ -1716,7 +1716,7 @@ int cs46xx_iec958_pre_open (struct snd_cs46xx *chip)
+ struct dsp_spos_instance * ins = chip->dsp_spos_instance;
+
+ if ( ins->spdif_status_out & DSP_SPDIF_STATUS_OUTPUT_ENABLED ) {
+- /* remove AsynchFGTxSCB and and PCMSerialInput_II */
++ /* remove AsynchFGTxSCB and PCMSerialInput_II */
+ cs46xx_dsp_disable_spdif_out (chip);
+
+ /* save state */
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 801abf0fc98b3..103011e7285a3 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -3420,7 +3420,7 @@ EXPORT_SYMBOL_GPL(snd_hda_set_power_save);
+ * @nid: NID to check / update
+ *
+ * Check whether the given NID is in the amp list. If it's in the list,
+- * check the current AMP status, and update the the power-status according
++ * check the current AMP status, and update the power-status according
+ * to the mute status.
+ *
+ * This function is supposed to be set or called from the check_power_status
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index 6815f9dc8545d..e1750bdbe51f6 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -813,7 +813,7 @@ static void activate_amp_in(struct hda_codec *codec, struct nid_path *path,
+ }
+ }
+
+-/* sync power of each widget in the the given path */
++/* sync power of each widget in the given path */
+ static hda_nid_t path_power_update(struct hda_codec *codec,
+ struct nid_path *path,
+ bool allow_powerdown)
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 7353d2ec359ae..3a456410937b5 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2671,6 +2671,8 @@ static const struct pci_device_id azx_ids[] = {
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI },
+ /* Zhaoxin */
+ { PCI_DEVICE(0x1d17, 0x3288), .driver_data = AZX_DRIVER_ZHAOXIN },
++ /* Loongson */
++ { PCI_DEVICE(0x0014, 0x7a07), .driver_data = AZX_DRIVER_GENERIC },
+ { 0, }
+ };
+ MODULE_DEVICE_TABLE(pci, azx_ids);
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 908b68fda24c9..ec9460f3a288e 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -176,6 +176,7 @@ struct hdmi_spec {
+ bool use_jack_detect; /* jack detection enabled */
+ bool use_acomp_notifier; /* use eld_notify callback for hotplug */
+ bool acomp_registered; /* audio component registered in this driver */
++ bool force_connect; /* force connectivity */
+ struct drm_audio_component_audio_ops drm_audio_ops;
+ int (*port2pin)(struct hda_codec *, int); /* reverse port/pin mapping */
+
+@@ -1711,7 +1712,8 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
+ * all device entries on the same pin
+ */
+ config = snd_hda_codec_get_pincfg(codec, pin_nid);
+- if (get_defcfg_connect(config) == AC_JACK_PORT_NONE)
++ if (get_defcfg_connect(config) == AC_JACK_PORT_NONE &&
++ !spec->force_connect)
+ return 0;
+
+ /*
+@@ -1815,11 +1817,19 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
+ return 0;
+ }
+
++static const struct snd_pci_quirk force_connect_list[] = {
++ SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1),
++ SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
++ {}
++};
++
+ static int hdmi_parse_codec(struct hda_codec *codec)
+ {
++ struct hdmi_spec *spec = codec->spec;
+ hda_nid_t start_nid;
+ unsigned int caps;
+ int i, nodes;
++ const struct snd_pci_quirk *q;
+
+ nodes = snd_hda_get_sub_nodes(codec, codec->core.afg, &start_nid);
+ if (!start_nid || nodes < 0) {
+@@ -1827,6 +1837,11 @@ static int hdmi_parse_codec(struct hda_codec *codec)
+ return -EINVAL;
+ }
+
++ q = snd_pci_quirk_lookup(codec->bus->pci, force_connect_list);
++
++ if (q && q->value)
++ spec->force_connect = true;
++
+ /*
+ * hdmi_add_pin() assumes total amount of converters to
+ * be known, so first discover all converters
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 06bbcfbb28153..d1b74c7cacd76 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6137,6 +6137,7 @@ enum {
+ ALC269_FIXUP_CZC_L101,
+ ALC269_FIXUP_LEMOTE_A1802,
+ ALC269_FIXUP_LEMOTE_A190X,
++ ALC256_FIXUP_INTEL_NUC8_RUGGED,
+ };
+
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -7458,6 +7459,15 @@ static const struct hda_fixup alc269_fixups[] = {
+ },
+ .chain_id = ALC269_FIXUP_DMIC,
+ },
++ [ALC256_FIXUP_INTEL_NUC8_RUGGED] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x1b, 0x01a1913c }, /* use as headset mic, without its own jack detect */
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC269_FIXUP_HEADSET_MODE
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -7757,6 +7767,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
+ SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
++ SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
+
+ #if 0
+ /* Below is a quirk table taken from the old code.
+@@ -7928,6 +7939,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
+ {.id = ALC298_FIXUP_HUAWEI_MBX_STEREO, .name = "huawei-mbx-stereo"},
+ {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
++ {.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"},
+ {}
+ };
+ #define ALC225_STANDARD_PINS \
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 4b9300babc7d0..bfd3fe5eff31c 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -832,7 +832,7 @@ static int stac_auto_create_beep_ctls(struct hda_codec *codec,
+ static struct snd_kcontrol_new beep_vol_ctl =
+ HDA_CODEC_VOLUME(NULL, 0, 0, 0);
+
+- /* check for mute support for the the amp */
++ /* check for mute support for the amp */
+ if ((caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT) {
+ const struct snd_kcontrol_new *temp;
+ if (spec->anabeep_nid == nid)
+diff --git a/sound/pci/ice1712/prodigy192.c b/sound/pci/ice1712/prodigy192.c
+index 98f8ac6587962..243f757da3edb 100644
+--- a/sound/pci/ice1712/prodigy192.c
++++ b/sound/pci/ice1712/prodigy192.c
+@@ -32,7 +32,7 @@
+ * Experimentally I found out that only a combination of
+ * OCKS0=1, OCKS1=1 (128fs, 64fs output) and ice1724 -
+ * VT1724_MT_I2S_MCLK_128X=0 (256fs input) yields correct
+- * sampling rate. That means the the FPGA doubles the
++ * sampling rate. That means that the FPGA doubles the
+ * MCK01 rate.
+ *
+ * Copyright (c) 2003 Takashi Iwai <tiwai@suse.de>
+diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c
+index c3f8721624cd4..b90421a1d909a 100644
+--- a/sound/pci/oxygen/xonar_dg.c
++++ b/sound/pci/oxygen/xonar_dg.c
+@@ -29,7 +29,7 @@
+ * GPIO 4 <- headphone detect
+ * GPIO 5 -> enable ADC analog circuit for the left channel
+ * GPIO 6 -> enable ADC analog circuit for the right channel
+- * GPIO 7 -> switch green rear output jack between CS4245 and and the first
++ * GPIO 7 -> switch green rear output jack between CS4245 and the first
+ * channel of CS4361 (mechanical relay)
+ * GPIO 8 -> enable output to speakers
+ *
+diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
+index 18535b326680a..04f23477039a5 100644
+--- a/sound/soc/codecs/wm8958-dsp2.c
++++ b/sound/soc/codecs/wm8958-dsp2.c
+@@ -416,8 +416,12 @@ int wm8958_aif_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+ {
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
++ struct wm8994 *control = dev_get_drvdata(component->dev->parent);
+ int i;
+
++ if (control->type != WM8958)
++ return 0;
++
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ case SND_SOC_DAPM_PRE_PMU:
+diff --git a/sound/soc/img/img-i2s-in.c b/sound/soc/img/img-i2s-in.c
+index 869fe0068cbd3..bb668551dd4b2 100644
+--- a/sound/soc/img/img-i2s-in.c
++++ b/sound/soc/img/img-i2s-in.c
+@@ -343,8 +343,10 @@ static int img_i2s_in_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+ chan_control_mask = IMG_I2S_IN_CH_CTL_CLK_TRANS_MASK;
+
+ ret = pm_runtime_get_sync(i2s->dev);
+- if (ret < 0)
++ if (ret < 0) {
++ pm_runtime_put_noidle(i2s->dev);
+ return ret;
++ }
+
+ for (i = 0; i < i2s->active_channels; i++)
+ img_i2s_in_ch_disable(i2s, i);
+diff --git a/sound/soc/img/img-parallel-out.c b/sound/soc/img/img-parallel-out.c
+index 5ddbe3a31c2e9..4da49a42e8547 100644
+--- a/sound/soc/img/img-parallel-out.c
++++ b/sound/soc/img/img-parallel-out.c
+@@ -163,8 +163,10 @@ static int img_prl_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+ }
+
+ ret = pm_runtime_get_sync(prl->dev);
+- if (ret < 0)
++ if (ret < 0) {
++ pm_runtime_put_noidle(prl->dev);
+ return ret;
++ }
+
+ reg = img_prl_out_readl(prl, IMG_PRL_OUT_CTL);
+ reg = (reg & ~IMG_PRL_OUT_CTL_EDGE_MASK) | control_set;
+diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c
+index 635eacbd28d47..156e3b9d613c6 100644
+--- a/sound/soc/tegra/tegra30_ahub.c
++++ b/sound/soc/tegra/tegra30_ahub.c
+@@ -643,8 +643,10 @@ static int tegra30_ahub_resume(struct device *dev)
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+- if (ret < 0)
++ if (ret < 0) {
++ pm_runtime_put(dev);
+ return ret;
++ }
+ ret = regcache_sync(ahub->regmap_ahub);
+ ret |= regcache_sync(ahub->regmap_apbif);
+ pm_runtime_put(dev);
+diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
+index e6d548fa980b6..8894b7c16a01a 100644
+--- a/sound/soc/tegra/tegra30_i2s.c
++++ b/sound/soc/tegra/tegra30_i2s.c
+@@ -538,8 +538,10 @@ static int tegra30_i2s_resume(struct device *dev)
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+- if (ret < 0)
++ if (ret < 0) {
++ pm_runtime_put(dev);
+ return ret;
++ }
+ ret = regcache_sync(i2s->regmap);
+ pm_runtime_put(dev);
+
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 1573229d8cf4c..8c3b3a291ddbf 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -2695,6 +2695,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
++ {
++ .ifnum = 0,
++ .type = QUIRK_AUDIO_STANDARD_MIXER,
++ },
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+@@ -2707,6 +2711,32 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .attributes = UAC_EP_CS_ATTR_SAMPLE_RATE,
+ .endpoint = 0x01,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC,
++ .datainterval = 1,
++ .maxpacksize = 0x024c,
++ .rates = SNDRV_PCM_RATE_44100 |
++ SNDRV_PCM_RATE_48000,
++ .rate_min = 44100,
++ .rate_max = 48000,
++ .nr_rates = 2,
++ .rate_table = (unsigned int[]) {
++ 44100, 48000
++ }
++ }
++ },
++ {
++ .ifnum = 0,
++ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
++ .data = &(const struct audioformat) {
++ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
++ .channels = 2,
++ .iface = 0,
++ .altsetting = 1,
++ .altset_idx = 1,
++ .attributes = 0,
++ .endpoint = 0x82,
++ .ep_attr = USB_ENDPOINT_XFER_ISOC,
++ .datainterval = 1,
++ .maxpacksize = 0x0126,
+ .rates = SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000,
+ .rate_min = 44100,
+@@ -3675,8 +3705,8 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */
+ * they pretend to be 96kHz mono as a workaround for stereo being broken
+ * by that...
+ *
+- * They also have swapped L-R channels, but that's for userspace to deal
+- * with.
++ * They also have an issue with initial stream alignment that causes the
++ * channels to be swapped and out of phase, which is dealt with in quirks.c.
+ */
+ {
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+diff --git a/tools/testing/selftests/net/icmp_redirect.sh b/tools/testing/selftests/net/icmp_redirect.sh
+index 18c5de53558af..bf361f30d6ef9 100755
+--- a/tools/testing/selftests/net/icmp_redirect.sh
++++ b/tools/testing/selftests/net/icmp_redirect.sh
+@@ -180,6 +180,8 @@ setup()
+ ;;
+ r[12]) ip netns exec $ns sysctl -q -w net.ipv4.ip_forward=1
+ ip netns exec $ns sysctl -q -w net.ipv4.conf.all.send_redirects=1
++ ip netns exec $ns sysctl -q -w net.ipv4.conf.default.rp_filter=0
++ ip netns exec $ns sysctl -q -w net.ipv4.conf.all.rp_filter=0
+
+ ip netns exec $ns sysctl -q -w net.ipv6.conf.all.forwarding=1
+ ip netns exec $ns sysctl -q -w net.ipv6.route.mtu_expires=10
+diff --git a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c
+index a2d7b0e3dca97..a26ac122c759f 100644
+--- a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c
++++ b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c
+@@ -91,8 +91,6 @@ int back_to_back_ebbs(void)
+ ebb_global_disable();
+ ebb_freeze_pmcs();
+
+- count_pmc(1, sample_period);
+-
+ dump_ebb_state();
+
+ event_close(&event);
+diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c
+index bc893813483ee..bb9f587fa76e8 100644
+--- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c
++++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c
+@@ -42,8 +42,6 @@ int cycles(void)
+ ebb_global_disable();
+ ebb_freeze_pmcs();
+
+- count_pmc(1, sample_period);
+-
+ dump_ebb_state();
+
+ event_close(&event);
+diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c
+index dcd351d203289..9ae795ce314e6 100644
+--- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c
++++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c
+@@ -99,8 +99,6 @@ int cycles_with_freeze(void)
+ ebb_global_disable();
+ ebb_freeze_pmcs();
+
+- count_pmc(1, sample_period);
+-
+ dump_ebb_state();
+
+ printf("EBBs while frozen %d\n", ebbs_while_frozen);
+diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c
+index 94c99c12c0f23..4b45a2e70f62b 100644
+--- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c
++++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c
+@@ -71,8 +71,6 @@ int cycles_with_mmcr2(void)
+ ebb_global_disable();
+ ebb_freeze_pmcs();
+
+- count_pmc(1, sample_period);
+-
+ dump_ebb_state();
+
+ event_close(&event);
+diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c
+index dfbc5c3ad52d7..21537d6eb6b7d 100644
+--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c
++++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c
+@@ -396,8 +396,6 @@ int ebb_child(union pipe read_pipe, union pipe write_pipe)
+ ebb_global_disable();
+ ebb_freeze_pmcs();
+
+- count_pmc(1, sample_period);
+-
+ dump_ebb_state();
+
+ event_close(&event);
+diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c
+index ca2f7d729155b..b208bf6ad58d3 100644
+--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c
++++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c
+@@ -38,8 +38,6 @@ static int victim_child(union pipe read_pipe, union pipe write_pipe)
+ ebb_global_disable();
+ ebb_freeze_pmcs();
+
+- count_pmc(1, sample_period);
+-
+ dump_ebb_state();
+
+ FAIL_IF(ebb_state.stats.ebb_count == 0);
+diff --git a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c
+index ac3e6e182614a..ba2681a12cc7b 100644
+--- a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c
++++ b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c
+@@ -75,7 +75,6 @@ static int test_body(void)
+ ebb_freeze_pmcs();
+ ebb_global_disable();
+
+- count_pmc(4, sample_period);
+ mtspr(SPRN_PMC4, 0xdead);
+
+ dump_summary_ebb_state();
+diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c
+index b8242e9d97d2d..791d37ba327b5 100644
+--- a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c
++++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c
+@@ -70,13 +70,6 @@ int multi_counter(void)
+ ebb_global_disable();
+ ebb_freeze_pmcs();
+
+- count_pmc(1, sample_period);
+- count_pmc(2, sample_period);
+- count_pmc(3, sample_period);
+- count_pmc(4, sample_period);
+- count_pmc(5, sample_period);
+- count_pmc(6, sample_period);
+-
+ dump_ebb_state();
+
+ for (i = 0; i < 6; i++)
+diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c
+index a05c0e18ded63..9b0f70d597020 100644
+--- a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c
++++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c
+@@ -61,8 +61,6 @@ static int cycles_child(void)
+ ebb_global_disable();
+ ebb_freeze_pmcs();
+
+- count_pmc(1, sample_period);
+-
+ dump_summary_ebb_state();
+
+ event_close(&event);
+diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c
+index 153ebc92234fd..2904c741e04e5 100644
+--- a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c
++++ b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c
+@@ -82,8 +82,6 @@ static int test_body(void)
+ ebb_global_disable();
+ ebb_freeze_pmcs();
+
+- count_pmc(1, sample_period);
+-
+ dump_ebb_state();
+
+ if (mmcr0_mismatch)
+diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
+index eadad75ed7e6f..b29f8ba22d1e6 100644
+--- a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
++++ b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
+@@ -76,8 +76,6 @@ int pmc56_overflow(void)
+ ebb_global_disable();
+ ebb_freeze_pmcs();
+
+- count_pmc(2, sample_period);
+-
+ dump_ebb_state();
+
+ printf("PMC5/6 overflow %d\n", pmc56_overflowed);
next reply other threads:[~2020-09-03 11:38 UTC|newest]
Thread overview: 348+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-09-03 11:38 Mike Pagano [this message]
-- strict thread matches above, loose matches on Subject: below --
2025-10-30 6:42 [gentoo-commits] proj/linux-patches:5.4 commit in: / Arisu Tachibana
2025-10-02 13:27 Arisu Tachibana
2025-09-10 5:33 Arisu Tachibana
2025-09-04 14:32 Arisu Tachibana
2025-09-04 14:32 Arisu Tachibana
2025-08-21 7:00 Arisu Tachibana
2025-08-21 6:59 Arisu Tachibana
2025-08-21 6:58 Arisu Tachibana
2025-08-21 6:58 Arisu Tachibana
2025-08-21 6:57 Arisu Tachibana
2025-08-21 6:56 Arisu Tachibana
2025-08-21 6:56 Arisu Tachibana
2025-08-21 6:55 Arisu Tachibana
2025-08-21 6:54 Arisu Tachibana
2025-08-21 5:23 Arisu Tachibana
2025-08-21 5:23 Arisu Tachibana
2025-08-21 5:23 Arisu Tachibana
2025-08-21 5:21 Arisu Tachibana
2025-08-21 5:20 Arisu Tachibana
2025-08-21 5:19 Arisu Tachibana
2025-08-21 5:19 Arisu Tachibana
2025-08-21 5:18 Arisu Tachibana
2025-08-21 5:18 Arisu Tachibana
2025-08-21 5:17 Arisu Tachibana
2025-08-21 5:16 Arisu Tachibana
2025-08-21 1:17 Arisu Tachibana
2025-08-21 1:16 Arisu Tachibana
2025-08-21 1:13 Arisu Tachibana
2025-08-21 1:12 Arisu Tachibana
2025-08-16 3:12 Arisu Tachibana
2025-08-01 10:32 Arisu Tachibana
2025-07-24 9:19 Arisu Tachibana
2025-07-18 12:07 Arisu Tachibana
2025-07-14 16:22 Arisu Tachibana
2025-07-11 2:32 Arisu Tachibana
2025-07-11 2:29 Arisu Tachibana
2025-07-06 18:27 Arisu Tachibana
2025-07-06 18:27 Arisu Tachibana
2025-07-06 18:27 Arisu Tachibana
2025-07-06 18:27 Arisu Tachibana
2025-07-06 13:36 Arisu Tachibana
2025-07-06 13:36 Arisu Tachibana
2025-07-06 13:36 Arisu Tachibana
2024-04-18 3:06 Alice Ferrazzi
2023-10-05 14:24 Mike Pagano
2023-09-23 10:18 Mike Pagano
2023-09-02 9:58 Mike Pagano
2023-08-30 14:56 Mike Pagano
2023-08-16 17:00 Mike Pagano
2023-08-11 11:57 Mike Pagano
2023-08-08 18:42 Mike Pagano
2023-07-27 11:51 Mike Pagano
2023-07-24 20:29 Mike Pagano
2023-06-28 10:28 Mike Pagano
2023-06-21 14:55 Alice Ferrazzi
2023-06-14 10:20 Mike Pagano
2023-06-09 11:32 Mike Pagano
2023-06-05 11:50 Mike Pagano
2023-05-30 12:56 Mike Pagano
2023-05-17 11:21 Mike Pagano
2023-05-17 11:00 Mike Pagano
2023-05-10 17:58 Mike Pagano
2023-04-26 9:51 Alice Ferrazzi
2023-04-20 11:17 Alice Ferrazzi
2023-04-05 10:01 Alice Ferrazzi
2023-03-30 13:41 Alice Ferrazzi
2023-03-22 14:16 Alice Ferrazzi
2023-03-17 10:46 Mike Pagano
2023-03-13 11:34 Alice Ferrazzi
2023-03-11 16:20 Mike Pagano
2023-03-03 12:31 Mike Pagano
2023-02-25 11:42 Mike Pagano
2023-02-24 3:08 Alice Ferrazzi
2023-02-22 14:41 Alice Ferrazzi
2023-02-06 12:48 Mike Pagano
2023-02-02 19:15 Mike Pagano
2023-01-24 7:25 Alice Ferrazzi
2023-01-18 11:10 Mike Pagano
2022-12-19 12:27 Alice Ferrazzi
2022-12-14 12:14 Mike Pagano
2022-12-08 12:13 Alice Ferrazzi
2022-11-25 17:05 Mike Pagano
2022-11-10 17:59 Mike Pagano
2022-11-03 15:13 Mike Pagano
2022-11-01 19:47 Mike Pagano
2022-10-29 9:52 Mike Pagano
2022-10-26 11:44 Mike Pagano
2022-10-17 16:48 Mike Pagano
2022-10-15 10:06 Mike Pagano
2022-10-07 11:12 Mike Pagano
2022-10-05 11:58 Mike Pagano
2022-09-28 9:26 Mike Pagano
2022-09-20 12:02 Mike Pagano
2022-09-15 10:31 Mike Pagano
2022-09-05 12:04 Mike Pagano
2022-08-25 10:34 Mike Pagano
2022-08-11 12:35 Mike Pagano
2022-08-03 14:51 Alice Ferrazzi
2022-07-29 15:29 Mike Pagano
2022-07-21 20:09 Mike Pagano
2022-07-15 10:04 Mike Pagano
2022-07-12 16:01 Mike Pagano
2022-07-07 16:18 Mike Pagano
2022-07-02 16:08 Mike Pagano
2022-06-29 11:09 Mike Pagano
2022-06-27 19:03 Mike Pagano
2022-06-25 19:46 Mike Pagano
2022-06-22 13:50 Mike Pagano
2022-06-22 13:25 Mike Pagano
2022-06-22 12:47 Mike Pagano
2022-06-16 11:43 Mike Pagano
2022-06-14 17:12 Mike Pagano
2022-06-06 11:04 Mike Pagano
2022-05-25 11:55 Mike Pagano
2022-05-18 9:49 Mike Pagano
2022-05-15 22:11 Mike Pagano
2022-05-12 11:30 Mike Pagano
2022-05-09 10:55 Mike Pagano
2022-04-27 12:21 Mike Pagano
2022-04-20 12:08 Mike Pagano
2022-04-15 13:10 Mike Pagano
2022-04-12 19:21 Mike Pagano
2022-03-28 10:58 Mike Pagano
2022-03-23 11:57 Mike Pagano
2022-03-19 13:21 Mike Pagano
2022-03-16 13:31 Mike Pagano
2022-03-11 10:55 Mike Pagano
2022-03-08 18:31 Mike Pagano
2022-03-02 13:07 Mike Pagano
2022-02-23 12:38 Mike Pagano
2022-02-16 12:46 Mike Pagano
2022-02-11 12:36 Mike Pagano
2022-02-08 17:55 Mike Pagano
2022-02-05 12:14 Mike Pagano
2022-02-01 17:24 Mike Pagano
2022-01-31 13:01 Mike Pagano
2022-01-29 17:44 Mike Pagano
2022-01-27 11:38 Mike Pagano
2022-01-20 10:00 Mike Pagano
2022-01-16 10:22 Mike Pagano
2022-01-11 14:34 Mike Pagano
2022-01-05 12:54 Mike Pagano
2021-12-29 13:07 Mike Pagano
2021-12-22 14:06 Mike Pagano
2021-12-17 11:55 Mike Pagano
2021-12-16 16:51 Mike Pagano
2021-12-14 14:19 Mike Pagano
2021-12-08 12:54 Mike Pagano
2021-12-01 12:50 Mike Pagano
2021-11-26 11:58 Mike Pagano
2021-11-21 20:44 Mike Pagano
2021-11-17 12:00 Mike Pagano
2021-11-12 14:14 Mike Pagano
2021-11-06 13:26 Mike Pagano
2021-11-04 11:23 Mike Pagano
2021-11-02 19:31 Mike Pagano
2021-10-27 15:51 Mike Pagano
2021-10-27 11:58 Mike Pagano
2021-10-20 13:24 Mike Pagano
2021-10-17 13:12 Mike Pagano
2021-10-13 14:55 Alice Ferrazzi
2021-10-09 21:32 Mike Pagano
2021-10-06 14:06 Mike Pagano
2021-09-30 10:49 Mike Pagano
2021-09-26 14:13 Mike Pagano
2021-09-22 11:39 Mike Pagano
2021-09-20 22:03 Mike Pagano
2021-09-16 11:19 Mike Pagano
2021-09-15 12:00 Mike Pagano
2021-09-12 14:38 Mike Pagano
2021-09-03 11:21 Mike Pagano
2021-09-03 9:39 Alice Ferrazzi
2021-08-26 14:36 Mike Pagano
2021-08-18 12:46 Mike Pagano
2021-08-15 20:06 Mike Pagano
2021-08-12 11:52 Mike Pagano
2021-08-08 13:38 Mike Pagano
2021-08-04 11:53 Mike Pagano
2021-08-03 12:23 Mike Pagano
2021-07-31 10:32 Alice Ferrazzi
2021-07-28 12:36 Mike Pagano
2021-07-25 17:27 Mike Pagano
2021-07-20 15:39 Alice Ferrazzi
2021-07-19 11:18 Mike Pagano
2021-07-14 16:22 Mike Pagano
2021-07-13 12:37 Mike Pagano
2021-07-11 14:44 Mike Pagano
2021-07-07 13:13 Mike Pagano
2021-06-30 14:24 Mike Pagano
2021-06-23 15:11 Mike Pagano
2021-06-18 11:38 Mike Pagano
2021-06-16 12:23 Mike Pagano
2021-06-10 11:59 Mike Pagano
2021-06-07 11:23 Mike Pagano
2021-06-03 10:28 Alice Ferrazzi
2021-05-28 12:03 Alice Ferrazzi
2021-05-26 12:06 Mike Pagano
2021-05-22 10:04 Mike Pagano
2021-05-19 12:23 Mike Pagano
2021-05-14 14:10 Alice Ferrazzi
2021-05-11 14:20 Mike Pagano
2021-05-07 11:44 Alice Ferrazzi
2021-05-07 11:37 Mike Pagano
2021-05-02 16:02 Mike Pagano
2021-05-02 16:00 Mike Pagano
2021-04-30 19:01 Mike Pagano
2021-04-28 11:52 Alice Ferrazzi
2021-04-21 11:42 Mike Pagano
2021-04-16 11:14 Alice Ferrazzi
2021-04-14 11:20 Alice Ferrazzi
2021-04-10 13:25 Mike Pagano
2021-04-07 13:27 Mike Pagano
2021-03-30 13:12 Alice Ferrazzi
2021-03-24 12:09 Mike Pagano
2021-03-22 15:55 Mike Pagano
2021-03-20 14:32 Mike Pagano
2021-03-17 18:43 Mike Pagano
2021-03-16 16:04 Mike Pagano
2021-03-11 14:08 Mike Pagano
2021-03-09 12:18 Mike Pagano
2021-03-07 15:16 Mike Pagano
2021-03-04 14:51 Mike Pagano
2021-03-04 12:06 Alice Ferrazzi
2021-03-01 23:49 Mike Pagano
2021-03-01 23:44 Mike Pagano
2021-02-27 14:16 Mike Pagano
2021-02-26 10:01 Alice Ferrazzi
2021-02-23 17:01 Mike Pagano
2021-02-23 14:28 Alice Ferrazzi
2021-02-17 11:39 Alice Ferrazzi
2021-02-13 14:46 Alice Ferrazzi
2021-02-10 9:53 Alice Ferrazzi
2021-02-07 15:24 Alice Ferrazzi
2021-02-03 23:48 Mike Pagano
2021-01-30 13:37 Alice Ferrazzi
2021-01-27 11:13 Mike Pagano
2021-01-23 17:50 Mike Pagano
2021-01-23 16:37 Mike Pagano
2021-01-19 20:32 Mike Pagano
2021-01-17 16:19 Mike Pagano
2021-01-12 20:05 Mike Pagano
2021-01-09 17:51 Mike Pagano
2021-01-08 16:08 Mike Pagano
2021-01-06 14:14 Mike Pagano
2020-12-30 12:53 Mike Pagano
2020-12-21 13:27 Mike Pagano
2020-12-16 23:14 Mike Pagano
2020-12-11 12:56 Mike Pagano
2020-12-08 12:07 Mike Pagano
2020-12-02 12:50 Mike Pagano
2020-11-26 14:27 Mike Pagano
2020-11-24 14:44 Mike Pagano
2020-11-22 19:31 Mike Pagano
2020-11-18 20:19 Mike Pagano
2020-11-18 20:10 Mike Pagano
2020-11-18 20:03 Mike Pagano
2020-11-13 12:16 Mike Pagano
2020-11-11 15:48 Mike Pagano
2020-11-10 13:57 Mike Pagano
2020-11-05 12:36 Mike Pagano
2020-11-01 20:31 Mike Pagano
2020-10-29 11:19 Mike Pagano
2020-10-17 10:18 Mike Pagano
2020-10-14 20:37 Mike Pagano
2020-10-07 12:48 Mike Pagano
2020-10-01 12:49 Mike Pagano
2020-09-26 21:59 Mike Pagano
2020-09-24 15:38 Mike Pagano
2020-09-24 15:38 Mike Pagano
2020-09-24 15:38 Mike Pagano
2020-09-23 12:10 Mike Pagano
2020-09-17 14:56 Mike Pagano
2020-09-12 18:08 Mike Pagano
2020-09-09 18:00 Mike Pagano
2020-09-08 22:26 Mike Pagano
2020-09-05 10:47 Mike Pagano
2020-08-26 11:16 Mike Pagano
2020-08-21 13:25 Alice Ferrazzi
2020-08-19 9:28 Alice Ferrazzi
2020-08-12 23:30 Alice Ferrazzi
2020-08-07 12:16 Alice Ferrazzi
2020-08-05 14:45 Thomas Deutschmann
2020-08-01 19:45 Mike Pagano
2020-07-31 18:28 Mike Pagano
2020-07-31 18:04 Mike Pagano
2020-07-30 14:58 Mike Pagano
2020-07-29 12:40 Mike Pagano
2020-07-22 12:53 Mike Pagano
2020-07-16 11:19 Mike Pagano
2020-07-09 12:13 Mike Pagano
2020-07-01 12:23 Mike Pagano
2020-06-29 17:40 Mike Pagano
2020-06-24 16:49 Mike Pagano
2020-06-22 14:48 Mike Pagano
2020-06-17 16:40 Mike Pagano
2020-06-10 19:42 Mike Pagano
2020-06-07 21:53 Mike Pagano
2020-06-03 11:43 Mike Pagano
2020-06-02 11:37 Mike Pagano
2020-05-27 16:31 Mike Pagano
2020-05-20 11:37 Mike Pagano
2020-05-20 11:33 Mike Pagano
2020-05-14 11:32 Mike Pagano
2020-05-13 12:18 Mike Pagano
2020-05-11 22:49 Mike Pagano
2020-05-09 22:12 Mike Pagano
2020-05-06 11:47 Mike Pagano
2020-05-02 19:24 Mike Pagano
2020-05-02 13:25 Mike Pagano
2020-04-29 17:56 Mike Pagano
2020-04-23 11:55 Mike Pagano
2020-04-21 11:19 Mike Pagano
2020-04-17 11:46 Mike Pagano
2020-04-15 15:52 Mike Pagano
2020-04-13 11:18 Mike Pagano
2020-04-08 12:42 Mike Pagano
2020-04-02 15:26 Mike Pagano
2020-04-01 12:03 Mike Pagano
2020-03-25 15:01 Mike Pagano
2020-03-21 18:58 Mike Pagano
2020-03-18 14:23 Mike Pagano
2020-03-12 14:04 Mike Pagano
2020-03-05 16:26 Mike Pagano
2020-02-28 16:41 Mike Pagano
2020-02-24 11:09 Mike Pagano
2020-02-19 23:48 Mike Pagano
2020-02-14 23:55 Mike Pagano
2020-02-11 15:35 Mike Pagano
2020-02-06 11:07 Mike Pagano
2020-02-01 10:53 Mike Pagano
2020-02-01 10:31 Mike Pagano
2020-01-29 16:18 Mike Pagano
2020-01-26 12:27 Mike Pagano
2020-01-23 11:09 Mike Pagano
2020-01-17 19:57 Mike Pagano
2020-01-14 22:33 Mike Pagano
2020-01-12 15:01 Mike Pagano
2020-01-09 11:17 Mike Pagano
2020-01-04 19:59 Mike Pagano
2019-12-31 17:48 Mike Pagano
2019-12-30 23:03 Mike Pagano
2019-12-21 15:01 Mike Pagano
2019-12-18 19:30 Mike Pagano
2019-12-17 21:57 Mike Pagano
2019-12-13 12:39 Mike Pagano
2019-12-05 1:04 Thomas Deutschmann
2019-11-29 21:21 Thomas Deutschmann
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1599133070.d2252824fd2ddee6c3e9a743f933d7cdcc73a6c6.mpagano@gentoo \
--to=mpagano@gentoo.org \
--cc=gentoo-commits@lists.gentoo.org \
--cc=gentoo-dev@lists.gentoo.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox