public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Thu, 12 Sep 2024 12:42:52 +0000 (UTC)	[thread overview]
Message-ID: <1726144958.dbf9d5158e0707e771e47bd7a23f9fe3991c70bb.mpagano@gentoo> (raw)

commit:     dbf9d5158e0707e771e47bd7a23f9fe3991c70bb
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Sep 12 12:42:38 2024 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Sep 12 12:42:38 2024 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=dbf9d515

Linux patchg 5.10.226

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1225_linux-5.10.226.patch | 8691 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 8695 insertions(+)

diff --git a/0000_README b/0000_README
index ad8e0d5b..695208ee 100644
--- a/0000_README
+++ b/0000_README
@@ -943,6 +943,10 @@ Patch:  1224_linux-5.10.225.patch
 From:   https://www.kernel.org
 Desc:   Linux 5.10.225
 
+Patch:  1225_linux-5.10.226.patch
+From:   https://www.kernel.org
+Desc:   Linux 5.10.226
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1225_linux-5.10.226.patch b/1225_linux-5.10.226.patch
new file mode 100644
index 00000000..7df9b67b
--- /dev/null
+++ b/1225_linux-5.10.226.patch
@@ -0,0 +1,8691 @@
+diff --git a/Documentation/locking/hwspinlock.rst b/Documentation/locking/hwspinlock.rst
+index 6f03713b70039..2ffaa3cbd63f1 100644
+--- a/Documentation/locking/hwspinlock.rst
++++ b/Documentation/locking/hwspinlock.rst
+@@ -85,6 +85,17 @@ is already free).
+ 
+ Should be called from a process context (might sleep).
+ 
++::
++
++  int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id);
++
++After verifying the owner of the hwspinlock, release a previously acquired
++hwspinlock; returns 0 on success, or an appropriate error code on failure
++(e.g. -EOPNOTSUPP if the bust operation is not defined for the specific
++hwspinlock).
++
++Should be called from a process context (might sleep).
++
+ ::
+ 
+   int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout);
+diff --git a/Makefile b/Makefile
+index 30918576f9de4..cf232897553bf 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 10
+-SUBLEVEL = 225
++SUBLEVEL = 226
+ EXTRAVERSION =
+ NAME = Dare mighty things
+ 
+diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
+index bd68e1b7f29f3..702587fda70cf 100644
+--- a/arch/arm64/include/asm/acpi.h
++++ b/arch/arm64/include/asm/acpi.h
+@@ -97,6 +97,18 @@ static inline u32 get_acpi_id_for_cpu(unsigned int cpu)
+ 	return	acpi_cpu_get_madt_gicc(cpu)->uid;
+ }
+ 
++static inline int get_cpu_for_acpi_id(u32 uid)
++{
++	int cpu;
++
++	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
++		if (acpi_cpu_get_madt_gicc(cpu) &&
++		    uid == get_acpi_id_for_cpu(cpu))
++			return cpu;
++
++	return -EINVAL;
++}
++
+ static inline void arch_fix_phys_package_id(int num, u32 slot) { }
+ void __init acpi_init_cpus(void);
+ int apei_claim_sea(struct pt_regs *regs);
+diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c
+index 048b75cadd2fd..c5feac18c238a 100644
+--- a/arch/arm64/kernel/acpi_numa.c
++++ b/arch/arm64/kernel/acpi_numa.c
+@@ -34,17 +34,6 @@ int __init acpi_numa_get_nid(unsigned int cpu)
+ 	return acpi_early_node_map[cpu];
+ }
+ 
+-static inline int get_cpu_for_acpi_id(u32 uid)
+-{
+-	int cpu;
+-
+-	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+-		if (uid == get_acpi_id_for_cpu(cpu))
+-			return cpu;
+-
+-	return -EINVAL;
+-}
+-
+ static int __init acpi_parse_gicc_pxm(union acpi_subtable_headers *header,
+ 				      const unsigned long end)
+ {
+diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
+index 995ad9e69ded3..23207516015cc 100644
+--- a/arch/mips/kernel/cevt-r4k.c
++++ b/arch/mips/kernel/cevt-r4k.c
+@@ -307,13 +307,6 @@ int r4k_clockevent_init(void)
+ 	if (!c0_compare_int_usable())
+ 		return -ENXIO;
+ 
+-	/*
+-	 * With vectored interrupts things are getting platform specific.
+-	 * get_c0_compare_int is a hook to allow a platform to return the
+-	 * interrupt number of its liking.
+-	 */
+-	irq = get_c0_compare_int();
+-
+ 	cd = &per_cpu(mips_clockevent_device, cpu);
+ 
+ 	cd->name		= "MIPS";
+@@ -324,7 +317,6 @@ int r4k_clockevent_init(void)
+ 	min_delta		= calculate_min_delta();
+ 
+ 	cd->rating		= 300;
+-	cd->irq			= irq;
+ 	cd->cpumask		= cpumask_of(cpu);
+ 	cd->set_next_event	= mips_next_event;
+ 	cd->event_handler	= mips_event_handler;
+@@ -336,6 +328,13 @@ int r4k_clockevent_init(void)
+ 
+ 	cp0_timer_irq_installed = 1;
+ 
++	/*
++	 * With vectored interrupts things are getting platform specific.
++	 * get_c0_compare_int is a hook to allow a platform to return the
++	 * interrupt number of its liking.
++	 */
++	irq = get_c0_compare_int();
++
+ 	if (request_irq(irq, c0_compare_interrupt, flags, "timer",
+ 			c0_compare_interrupt))
+ 		pr_err("Failed to request irq %d (timer)\n", irq);
+diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
+index 1c65c38ec9a3e..c4bf95371f493 100644
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -69,6 +69,15 @@ SECTIONS
+ 	. = ALIGN(PAGE_SIZE);
+ 	__end_ro_after_init = .;
+ 
++	.data.rel.ro : {
++		*(.data.rel.ro .data.rel.ro.*)
++	}
++	.got : {
++		__got_start = .;
++		*(.got)
++		__got_end = .;
++	}
++
+ 	RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE)
+ 	BOOT_DATA_PRESERVED
+ 
+diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
+index 37e96ba0f5fb1..d2beb4a497a2a 100644
+--- a/arch/um/drivers/line.c
++++ b/arch/um/drivers/line.c
+@@ -378,6 +378,7 @@ int setup_one_line(struct line *lines, int n, char *init,
+ 			parse_chan_pair(NULL, line, n, opts, error_out);
+ 			err = 0;
+ 		}
++		*error_out = "configured as 'none'";
+ 	} else {
+ 		char *new = kstrdup(init, GFP_KERNEL);
+ 		if (!new) {
+@@ -401,6 +402,7 @@ int setup_one_line(struct line *lines, int n, char *init,
+ 			}
+ 		}
+ 		if (err) {
++			*error_out = "failed to parse channel pair";
+ 			line->init_str = NULL;
+ 			line->valid = 0;
+ 			kfree(new);
+diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
+index 50e31d14351bf..85289c8f21db8 100644
+--- a/arch/x86/mm/pti.c
++++ b/arch/x86/mm/pti.c
+@@ -241,7 +241,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
+  *
+  * Returns a pointer to a PTE on success, or NULL on failure.
+  */
+-static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
++static pte_t *pti_user_pagetable_walk_pte(unsigned long address, bool late_text)
+ {
+ 	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
+ 	pmd_t *pmd;
+@@ -251,10 +251,15 @@ static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
+ 	if (!pmd)
+ 		return NULL;
+ 
+-	/* We can't do anything sensible if we hit a large mapping. */
++	/* Large PMD mapping found */
+ 	if (pmd_large(*pmd)) {
+-		WARN_ON(1);
+-		return NULL;
++		/* Clear the PMD if we hit a large mapping from the first round */
++		if (late_text) {
++			set_pmd(pmd, __pmd(0));
++		} else {
++			WARN_ON_ONCE(1);
++			return NULL;
++		}
+ 	}
+ 
+ 	if (pmd_none(*pmd)) {
+@@ -283,7 +288,7 @@ static void __init pti_setup_vsyscall(void)
+ 	if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
+ 		return;
+ 
+-	target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
++	target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR, false);
+ 	if (WARN_ON(!target_pte))
+ 		return;
+ 
+@@ -301,7 +306,7 @@ enum pti_clone_level {
+ 
+ static void
+ pti_clone_pgtable(unsigned long start, unsigned long end,
+-		  enum pti_clone_level level)
++		  enum pti_clone_level level, bool late_text)
+ {
+ 	unsigned long addr;
+ 
+@@ -390,7 +395,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
+ 				return;
+ 
+ 			/* Allocate PTE in the user page-table */
+-			target_pte = pti_user_pagetable_walk_pte(addr);
++			target_pte = pti_user_pagetable_walk_pte(addr, late_text);
+ 			if (WARN_ON(!target_pte))
+ 				return;
+ 
+@@ -453,7 +458,7 @@ static void __init pti_clone_user_shared(void)
+ 		phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
+ 		pte_t *target_pte;
+ 
+-		target_pte = pti_user_pagetable_walk_pte(va);
++		target_pte = pti_user_pagetable_walk_pte(va, false);
+ 		if (WARN_ON(!target_pte))
+ 			return;
+ 
+@@ -476,7 +481,7 @@ static void __init pti_clone_user_shared(void)
+ 	start = CPU_ENTRY_AREA_BASE;
+ 	end   = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
+ 
+-	pti_clone_pgtable(start, end, PTI_CLONE_PMD);
++	pti_clone_pgtable(start, end, PTI_CLONE_PMD, false);
+ }
+ #endif /* CONFIG_X86_64 */
+ 
+@@ -493,11 +498,11 @@ static void __init pti_setup_espfix64(void)
+ /*
+  * Clone the populated PMDs of the entry text and force it RO.
+  */
+-static void pti_clone_entry_text(void)
++static void pti_clone_entry_text(bool late)
+ {
+ 	pti_clone_pgtable((unsigned long) __entry_text_start,
+ 			  (unsigned long) __entry_text_end,
+-			  PTI_LEVEL_KERNEL_IMAGE);
++			  PTI_LEVEL_KERNEL_IMAGE, late);
+ }
+ 
+ /*
+@@ -572,7 +577,7 @@ static void pti_clone_kernel_text(void)
+ 	 * pti_set_kernel_image_nonglobal() did to clear the
+ 	 * global bit.
+ 	 */
+-	pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
++	pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE, false);
+ 
+ 	/*
+ 	 * pti_clone_pgtable() will set the global bit in any PMDs
+@@ -639,8 +644,15 @@ void __init pti_init(void)
+ 
+ 	/* Undo all global bits from the init pagetables in head_64.S: */
+ 	pti_set_kernel_image_nonglobal();
++
+ 	/* Replace some of the global bits just for shared entry text: */
+-	pti_clone_entry_text();
++	/*
++	 * This is very early in boot. Device and Late initcalls can do
++	 * modprobe before free_initmem() and mark_readonly(). This
++	 * pti_clone_entry_text() allows those user-mode-helpers to function,
++	 * but notably the text is still RW.
++	 */
++	pti_clone_entry_text(false);
+ 	pti_setup_espfix64();
+ 	pti_setup_vsyscall();
+ }
+@@ -657,10 +669,11 @@ void pti_finalize(void)
+ 	if (!boot_cpu_has(X86_FEATURE_PTI))
+ 		return;
+ 	/*
+-	 * We need to clone everything (again) that maps parts of the
+-	 * kernel image.
++	 * This is after free_initmem() (all initcalls are done) and we've done
++	 * mark_readonly(). Text is now NX which might've split some PMDs
++	 * relative to the early clone.
+ 	 */
+-	pti_clone_entry_text();
++	pti_clone_entry_text(true);
+ 	pti_clone_kernel_text();
+ 
+ 	debug_checkwx_user();
+diff --git a/block/bio-integrity.c b/block/bio-integrity.c
+index a4cfc97275df6..a5fd04db5ae8e 100644
+--- a/block/bio-integrity.c
++++ b/block/bio-integrity.c
+@@ -216,6 +216,7 @@ bool bio_integrity_prep(struct bio *bio)
+ 	unsigned int bytes, offset, i;
+ 	unsigned int intervals;
+ 	blk_status_t status;
++	gfp_t gfp = GFP_NOIO;
+ 
+ 	if (!bi)
+ 		return true;
+@@ -238,12 +239,20 @@ bool bio_integrity_prep(struct bio *bio)
+ 		if (!bi->profile->generate_fn ||
+ 		    !(bi->flags & BLK_INTEGRITY_GENERATE))
+ 			return true;
++
++		/*
++		 * Zero the memory allocated to not leak uninitialized kernel
++		 * memory to disk.  For PI this only affects the app tag, but
++		 * for non-integrity metadata it affects the entire metadata
++		 * buffer.
++		 */
++		gfp |= __GFP_ZERO;
+ 	}
+ 	intervals = bio_integrity_intervals(bi, bio_sectors(bio));
+ 
+ 	/* Allocate kernel buffer for protection data */
+ 	len = intervals * bi->tuple_size;
+-	buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
++	buf = kmalloc(len, gfp | q->bounce_gfp);
+ 	status = BLK_STS_RESOURCE;
+ 	if (unlikely(buf == NULL)) {
+ 		printk(KERN_ERR "could not allocate integrity buffer\n");
+diff --git a/block/blk-integrity.c b/block/blk-integrity.c
+index 9e83159f5a527..2bcf3760538c2 100644
+--- a/block/blk-integrity.c
++++ b/block/blk-integrity.c
+@@ -431,8 +431,6 @@ void blk_integrity_unregister(struct gendisk *disk)
+ 	if (!bi->profile)
+ 		return;
+ 
+-	/* ensure all bios are off the integrity workqueue */
+-	blk_flush_integrity();
+ 	blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
+ 	memset(bi, 0, sizeof(*bi));
+ }
+diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
+index 2ee5e05a0d69e..707b2c37e5ee6 100644
+--- a/drivers/acpi/acpi_processor.c
++++ b/drivers/acpi/acpi_processor.c
+@@ -387,7 +387,7 @@ static int acpi_processor_add(struct acpi_device *device,
+ 
+ 	result = acpi_processor_get_info(device);
+ 	if (result) /* Processor is not physically present or unavailable */
+-		return 0;
++		goto err_clear_driver_data;
+ 
+ 	BUG_ON(pr->id >= nr_cpu_ids);
+ 
+@@ -402,7 +402,7 @@ static int acpi_processor_add(struct acpi_device *device,
+ 			"BIOS reported wrong ACPI id %d for the processor\n",
+ 			pr->id);
+ 		/* Give up, but do not abort the namespace scan. */
+-		goto err;
++		goto err_clear_driver_data;
+ 	}
+ 	/*
+ 	 * processor_device_array is not cleared on errors to allow buggy BIOS
+@@ -414,12 +414,12 @@ static int acpi_processor_add(struct acpi_device *device,
+ 	dev = get_cpu_device(pr->id);
+ 	if (!dev) {
+ 		result = -ENODEV;
+-		goto err;
++		goto err_clear_per_cpu;
+ 	}
+ 
+ 	result = acpi_bind_one(dev, device);
+ 	if (result)
+-		goto err;
++		goto err_clear_per_cpu;
+ 
+ 	pr->dev = dev;
+ 
+@@ -430,10 +430,11 @@ static int acpi_processor_add(struct acpi_device *device,
+ 	dev_err(dev, "Processor driver could not be attached\n");
+ 	acpi_unbind_one(dev);
+ 
+- err:
+-	free_cpumask_var(pr->throttling.shared_cpu_map);
+-	device->driver_data = NULL;
++ err_clear_per_cpu:
+ 	per_cpu(processors, pr->id) = NULL;
++ err_clear_driver_data:
++	device->driver_data = NULL;
++	free_cpumask_var(pr->throttling.shared_cpu_map);
+  err_free_pr:
+ 	kfree(pr);
+ 	return result;
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index cd3de4ec17670..eabb4c9d4718b 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -3530,6 +3530,7 @@ static void binder_transaction(struct binder_proc *proc,
+ 		 */
+ 		copy_size = object_offset - user_offset;
+ 		if (copy_size && (user_offset > object_offset ||
++				object_offset > tr->data_size ||
+ 				binder_alloc_copy_user_to_buffer(
+ 					&target_proc->alloc,
+ 					t->buffer, user_offset,
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 467fc8002c447..107c28ec23b8a 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -5429,8 +5429,10 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
+ 	}
+ 
+ 	dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
+-	if (!dr)
++	if (!dr) {
++		kfree(host);
+ 		goto err_out;
++	}
+ 
+ 	devres_add(dev, dr);
+ 	dev_set_drvdata(dev, host);
+diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
+index e47a28271f5bb..ba8f0084075bd 100644
+--- a/drivers/ata/pata_macio.c
++++ b/drivers/ata/pata_macio.c
+@@ -540,7 +540,8 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
+ 
+ 		while (sg_len) {
+ 			/* table overflow should never happen */
+-			BUG_ON (pi++ >= MAX_DCMDS);
++			if (WARN_ON_ONCE(pi >= MAX_DCMDS))
++				return AC_ERR_SYSTEM;
+ 
+ 			len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
+ 			table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE);
+@@ -552,11 +553,13 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
+ 			addr += len;
+ 			sg_len -= len;
+ 			++table;
++			++pi;
+ 		}
+ 	}
+ 
+ 	/* Should never happen according to Tejun */
+-	BUG_ON(!pi);
++	if (WARN_ON_ONCE(!pi))
++		return AC_ERR_SYSTEM;
+ 
+ 	/* Convert the last command to an input/output */
+ 	table--;
+diff --git a/drivers/base/devres.c b/drivers/base/devres.c
+index 8a74008c13c44..e3a735d0213a8 100644
+--- a/drivers/base/devres.c
++++ b/drivers/base/devres.c
+@@ -577,6 +577,7 @@ void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
+ 	grp->id = grp;
+ 	if (id)
+ 		grp->id = id;
++	grp->color = 0;
+ 
+ 	spin_lock_irqsave(&dev->devres_lock, flags);
+ 	add_dr(dev, &grp->node[0]);
+diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
+index cf265ab035ea9..095ad50fd363e 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.c
++++ b/drivers/clk/qcom/clk-alpha-pll.c
+@@ -38,7 +38,7 @@
+ 
+ #define PLL_USER_CTL(p)		((p)->offset + (p)->regs[PLL_OFF_USER_CTL])
+ # define PLL_POST_DIV_SHIFT	8
+-# define PLL_POST_DIV_MASK(p)	GENMASK((p)->width, 0)
++# define PLL_POST_DIV_MASK(p)	GENMASK((p)->width - 1, 0)
+ # define PLL_ALPHA_EN		BIT(24)
+ # define PLL_ALPHA_MODE		BIT(25)
+ # define PLL_VCO_SHIFT		20
+@@ -1321,8 +1321,8 @@ clk_trion_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ 	}
+ 
+ 	return regmap_update_bits(regmap, PLL_USER_CTL(pll),
+-				  PLL_POST_DIV_MASK(pll) << PLL_POST_DIV_SHIFT,
+-				  val << PLL_POST_DIV_SHIFT);
++				  PLL_POST_DIV_MASK(pll) << pll->post_div_shift,
++				  val << pll->post_div_shift);
+ }
+ 
+ const struct clk_ops clk_alpha_pll_postdiv_trion_ops = {
+diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
+index 2cdc077a39f5d..9f0aeda4031ff 100644
+--- a/drivers/clocksource/timer-imx-tpm.c
++++ b/drivers/clocksource/timer-imx-tpm.c
+@@ -83,20 +83,28 @@ static u64 notrace tpm_read_sched_clock(void)
+ static int tpm_set_next_event(unsigned long delta,
+ 				struct clock_event_device *evt)
+ {
+-	unsigned long next, now;
++	unsigned long next, prev, now;
+ 
+-	next = tpm_read_counter();
+-	next += delta;
++	prev = tpm_read_counter();
++	next = prev + delta;
+ 	writel(next, timer_base + TPM_C0V);
+ 	now = tpm_read_counter();
+ 
++	/*
++	 * Need to wait CNT increase at least 1 cycle to make sure
++	 * the C0V has been updated into HW.
++	 */
++	if ((next & 0xffffffff) != readl(timer_base + TPM_C0V))
++		while (now == tpm_read_counter())
++			;
++
+ 	/*
+ 	 * NOTE: We observed in a very small probability, the bus fabric
+ 	 * contention between GPU and A7 may results a few cycles delay
+ 	 * of writing CNT registers which may cause the min_delta event got
+ 	 * missed, so we need add a ETIME check here in case it happened.
+ 	 */
+-	return (int)(next - now) <= 0 ? -ETIME : 0;
++	return (now - prev) >= delta ? -ETIME : 0;
+ }
+ 
+ static int tpm_set_state_oneshot(struct clock_event_device *evt)
+diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
+index b965f20174e3a..411f16c4de05a 100644
+--- a/drivers/clocksource/timer-of.c
++++ b/drivers/clocksource/timer-of.c
+@@ -25,10 +25,7 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
+ 
+ 	struct clock_event_device *clkevt = &to->clkevt;
+ 
+-	if (of_irq->percpu)
+-		free_percpu_irq(of_irq->irq, clkevt);
+-	else
+-		free_irq(of_irq->irq, clkevt);
++	free_irq(of_irq->irq, clkevt);
+ }
+ 
+ /**
+@@ -42,9 +39,6 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
+  * - Get interrupt number by name
+  * - Get interrupt number by index
+  *
+- * When the interrupt is per CPU, 'request_percpu_irq()' is called,
+- * otherwise 'request_irq()' is used.
+- *
+  * Returns 0 on success, < 0 otherwise
+  */
+ static __init int timer_of_irq_init(struct device_node *np,
+@@ -69,12 +63,9 @@ static __init int timer_of_irq_init(struct device_node *np,
+ 		return -EINVAL;
+ 	}
+ 
+-	ret = of_irq->percpu ?
+-		request_percpu_irq(of_irq->irq, of_irq->handler,
+-				   np->full_name, clkevt) :
+-		request_irq(of_irq->irq, of_irq->handler,
+-			    of_irq->flags ? of_irq->flags : IRQF_TIMER,
+-			    np->full_name, clkevt);
++	ret = request_irq(of_irq->irq, of_irq->handler,
++			  of_irq->flags ? of_irq->flags : IRQF_TIMER,
++			  np->full_name, clkevt);
+ 	if (ret) {
+ 		pr_err("Failed to request irq %d for %pOF\n", of_irq->irq, np);
+ 		return ret;
+diff --git a/drivers/clocksource/timer-of.h b/drivers/clocksource/timer-of.h
+index a5478f3e8589d..01a2c6b7db065 100644
+--- a/drivers/clocksource/timer-of.h
++++ b/drivers/clocksource/timer-of.h
+@@ -11,7 +11,6 @@
+ struct of_timer_irq {
+ 	int irq;
+ 	int index;
+-	int percpu;
+ 	const char *name;
+ 	unsigned long flags;
+ 	irq_handler_t handler;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c
+index a4d65973bf7cf..80771b1480fff 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c
+@@ -100,6 +100,7 @@ struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock)
+ 	amdgpu_afmt_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
+ 	amdgpu_afmt_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
+ 	amdgpu_afmt_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
++	res.clock = clock;
+ 
+ 	return res;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+index 469352e2d6ecf..436d436b2ea23 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+@@ -1626,6 +1626,8 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
+ 										(u32)le32_to_cpu(*((u32 *)reg_data + j));
+ 									j++;
+ 								} else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
++									if (i == 0)
++										continue;
+ 									reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
+ 										reg_table->mc_reg_table_entry[num_ranges].mc_data[i - 1];
+ 								}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+index 78ac6dbe70d84..854b218602574 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+@@ -213,6 +213,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
+ 		struct amdgpu_firmware_info *ucode;
+ 
+ 		id = fw_type_convert(cgs_device, type);
++		if (id >= AMDGPU_UCODE_ID_MAXIMUM)
++			return -EINVAL;
++
+ 		ucode = &adev->firmware.ucode[id];
+ 		if (ucode->fw == NULL)
+ 			return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+index 15ee13c3bd9e1..b78feb8ba01e1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -260,7 +260,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
+ 	ring->priority = DRM_SCHED_PRIORITY_NORMAL;
+ 	mutex_init(&ring->priority_mutex);
+ 
+-	if (!ring->no_scheduler) {
++	if (!ring->no_scheduler && ring->funcs->type < AMDGPU_HW_IP_NUM) {
+ 		hw_ip = ring->funcs->type;
+ 		num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
+ 		adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
+@@ -368,8 +368,9 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
+ 					size_t size, loff_t *pos)
+ {
+ 	struct amdgpu_ring *ring = file_inode(f)->i_private;
+-	int r, i;
+ 	uint32_t value, result, early[3];
++	loff_t i;
++	int r;
+ 
+ 	if (*pos & 3 || size & 3)
+ 		return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+index d6f2951035959..ca4c915e3a6c7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+@@ -404,6 +404,8 @@ static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
+ 	uint64_t retired_page;
+ 	uint32_t bp_idx, bp_cnt;
+ 
++	memset(&bp, 0, sizeof(bp));
++
+ 	if (bp_block_size) {
+ 		bp_cnt = bp_block_size / sizeof(uint64_t);
+ 		for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
+@@ -550,7 +552,7 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
+ 
+ 	vf2pf_info->checksum =
+ 		amd_sriov_msg_checksum(
+-		vf2pf_info, vf2pf_info->header.size, 0, 0);
++		vf2pf_info, sizeof(*vf2pf_info), 0, 0);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
+index d6aca1c080687..9587e8672a01c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
++++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
+@@ -70,6 +70,8 @@ static u32 df_v1_7_get_hbm_channel_number(struct amdgpu_device *adev)
+ 	int fb_channel_number;
+ 
+ 	fb_channel_number = adev->df.funcs->get_fb_channel_number(adev);
++	if (fb_channel_number >= ARRAY_SIZE(df_v1_7_channel_number))
++		fb_channel_number = 0;
+ 
+ 	return df_v1_7_channel_number[fb_channel_number];
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+index eadc9526d33fe..b81572dc115f7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+@@ -313,7 +313,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
+ 						RAS_CNTLR_INTERRUPT_CLEAR, 1);
+ 		WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+ 
+-		if (!ras->disable_ras_err_cnt_harvest) {
++		if (ras && !ras->disable_ras_err_cnt_harvest && obj) {
+ 			/*
+ 			 * clear error status after ras_controller_intr
+ 			 * according to hw team and count ue number
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+index d54ceebd346b7..30c70b3ab17f1 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+@@ -42,8 +42,6 @@
+ #define CRAT_OEMTABLEID_LENGTH	8
+ #define CRAT_RESERVED_LENGTH	6
+ 
+-#define CRAT_OEMID_64BIT_MASK ((1ULL << (CRAT_OEMID_LENGTH * 8)) - 1)
+-
+ /* Compute Unit flags */
+ #define COMPUTE_UNIT_CPU	(1 << 0)  /* Create Virtual CRAT for CPU */
+ #define COMPUTE_UNIT_GPU	(1 << 1)  /* Create Virtual CRAT for GPU */
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index 2b31c3066aaae..b5738032237e3 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -906,8 +906,7 @@ static void kfd_update_system_properties(void)
+ 	dev = list_last_entry(&topology_device_list,
+ 			struct kfd_topology_device, list);
+ 	if (dev) {
+-		sys_props.platform_id =
+-			(*((uint64_t *)dev->oem_id)) & CRAT_OEMID_64BIT_MASK;
++		sys_props.platform_id = dev->oem_id64;
+ 		sys_props.platform_oem = *((uint64_t *)dev->oem_table_id);
+ 		sys_props.platform_rev = dev->oem_revision;
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+index 326d9b26b7aa7..22476a9390641 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+@@ -182,7 +182,10 @@ struct kfd_topology_device {
+ 	struct attribute		attr_gpuid;
+ 	struct attribute		attr_name;
+ 	struct attribute		attr_props;
+-	uint8_t				oem_id[CRAT_OEMID_LENGTH];
++	union {
++		uint8_t				oem_id[CRAT_OEMID_LENGTH];
++		uint64_t			oem_id64;
++	};
+ 	uint8_t				oem_table_id[CRAT_OEMTABLEID_LENGTH];
+ 	uint32_t			oem_revision;
+ };
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 29ef0ed44d5f4..50921b340b886 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3341,7 +3341,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+ 
+ 	/* There is one primary plane per CRTC */
+ 	primary_planes = dm->dc->caps.max_streams;
+-	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
++	if (primary_planes > AMDGPU_MAX_PLANES) {
++		DRM_ERROR("DM: Plane nums out of 6 planes\n");
++		return -EINVAL;
++	}
+ 
+ 	/*
+ 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+index 0eba391e597fd..40d03f8cde2cf 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+@@ -455,7 +455,8 @@ static void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_sm
+ 			ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ 
+ 			/* Modify previous watermark range to cover up to max */
+-			ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
++			if (num_valid_sets > 0)
++				ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ 		}
+ 		num_valid_sets++;
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
+index 880954ac0b027..1b3cba5b1d749 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
+@@ -690,6 +690,9 @@ static void wbscl_set_scaler_filter(
+ 	int pair;
+ 	uint16_t odd_coef, even_coef;
+ 
++	if (!filter)
++		return;
++
+ 	for (phase = 0; phase < (NUM_PHASES / 2 + 1); phase++) {
+ 		for (pair = 0; pair < tap_pairs; pair++) {
+ 			even_coef = filter[phase * taps + 2 * pair];
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+index dae8e489c8cf4..a5de27908914c 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
++++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+@@ -58,7 +58,7 @@ struct gpio_service *dal_gpio_service_create(
+ 	struct dc_context *ctx)
+ {
+ 	struct gpio_service *service;
+-	uint32_t index_of_id;
++	int32_t index_of_id;
+ 
+ 	service = kzalloc(sizeof(struct gpio_service), GFP_KERNEL);
+ 
+@@ -114,7 +114,7 @@ struct gpio_service *dal_gpio_service_create(
+ 	return service;
+ 
+ failure_2:
+-	while (index_of_id) {
++	while (index_of_id > 0) {
+ 		--index_of_id;
+ 		kfree(service->busyness[index_of_id]);
+ 	}
+@@ -241,6 +241,9 @@ static bool is_pin_busy(
+ 	enum gpio_id id,
+ 	uint32_t en)
+ {
++	if (id == GPIO_ID_UNKNOWN)
++		return false;
++
+ 	return service->busyness[id][en];
+ }
+ 
+@@ -249,6 +252,9 @@ static void set_pin_busy(
+ 	enum gpio_id id,
+ 	uint32_t en)
+ {
++	if (id == GPIO_ID_UNKNOWN)
++		return;
++
+ 	service->busyness[id][en] = true;
+ }
+ 
+@@ -257,6 +263,9 @@ static void set_pin_free(
+ 	enum gpio_id id,
+ 	uint32_t en)
+ {
++	if (id == GPIO_ID_UNKNOWN)
++		return;
++
+ 	service->busyness[id][en] = false;
+ }
+ 
+@@ -265,7 +274,7 @@ enum gpio_result dal_gpio_service_lock(
+ 	enum gpio_id id,
+ 	uint32_t en)
+ {
+-	if (!service->busyness[id]) {
++	if (id != GPIO_ID_UNKNOWN && !service->busyness[id]) {
+ 		ASSERT_CRITICAL(false);
+ 		return GPIO_RESULT_OPEN_FAILED;
+ 	}
+@@ -279,7 +288,7 @@ enum gpio_result dal_gpio_service_unlock(
+ 	enum gpio_id id,
+ 	uint32_t en)
+ {
+-	if (!service->busyness[id]) {
++	if (id != GPIO_ID_UNKNOWN && !service->busyness[id]) {
+ 		ASSERT_CRITICAL(false);
+ 		return GPIO_RESULT_OPEN_FAILED;
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+index 51855a2624cf4..b1d5387195054 100644
+--- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
++++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+@@ -130,13 +130,21 @@ static bool hdmi_14_process_transaction(
+ 	const uint8_t hdcp_i2c_addr_link_primary = 0x3a; /* 0x74 >> 1*/
+ 	const uint8_t hdcp_i2c_addr_link_secondary = 0x3b; /* 0x76 >> 1*/
+ 	struct i2c_command i2c_command;
+-	uint8_t offset = hdcp_i2c_offsets[message_info->msg_id];
++	uint8_t offset;
+ 	struct i2c_payload i2c_payloads[] = {
+-		{ true, 0, 1, &offset },
++		{ true, 0, 1, 0 },
+ 		/* actual hdcp payload, will be filled later, zeroed for now*/
+ 		{ 0 }
+ 	};
+ 
++	if (message_info->msg_id == HDCP_MESSAGE_ID_INVALID) {
++		DC_LOG_ERROR("%s: Invalid message_info msg_id - %d\n", __func__, message_info->msg_id);
++		return false;
++	}
++
++	offset = hdcp_i2c_offsets[message_info->msg_id];
++	i2c_payloads[0].data = &offset;
++
+ 	switch (message_info->link) {
+ 	case HDCP_LINK_SECONDARY:
+ 		i2c_payloads[0].address = hdcp_i2c_addr_link_secondary;
+@@ -310,6 +318,11 @@ static bool dp_11_process_transaction(
+ 	struct dc_link *link,
+ 	struct hdcp_protection_message *message_info)
+ {
++	if (message_info->msg_id == HDCP_MESSAGE_ID_INVALID) {
++		DC_LOG_ERROR("%s: Invalid message_info msg_id - %d\n", __func__, message_info->msg_id);
++		return false;
++	}
++
+ 	return dpcd_access_helper(
+ 		link,
+ 		message_info->length,
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+index 8e9caae7c9559..1b2df97226a3f 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+@@ -156,11 +156,16 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
+ 	uint32_t cur_size = 0;
+ 	uint32_t data_offset = 0;
+ 
+-	if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
++	if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID ||
++		msg_id >= MOD_HDCP_MESSAGE_ID_MAX)
+ 		return MOD_HDCP_STATUS_DDC_FAILURE;
+-	}
+ 
+ 	if (is_dp_hdcp(hdcp)) {
++		int num_dpcd_addrs = sizeof(hdcp_dpcd_addrs) /
++			sizeof(hdcp_dpcd_addrs[0]);
++		if (msg_id >= num_dpcd_addrs)
++			return MOD_HDCP_STATUS_DDC_FAILURE;
++
+ 		while (buf_len > 0) {
+ 			cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+ 			success = hdcp->config.ddc.funcs.read_dpcd(hdcp->config.ddc.handle,
+@@ -175,6 +180,11 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
+ 			data_offset += cur_size;
+ 		}
+ 	} else {
++		int num_i2c_offsets = sizeof(hdcp_i2c_offsets) /
++			sizeof(hdcp_i2c_offsets[0]);
++		if (msg_id >= num_i2c_offsets)
++			return MOD_HDCP_STATUS_DDC_FAILURE;
++
+ 		success = hdcp->config.ddc.funcs.read_i2c(
+ 				hdcp->config.ddc.handle,
+ 				HDCP_I2C_ADDR,
+@@ -219,11 +229,16 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
+ 	uint32_t cur_size = 0;
+ 	uint32_t data_offset = 0;
+ 
+-	if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
++	if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID ||
++		msg_id >= MOD_HDCP_MESSAGE_ID_MAX)
+ 		return MOD_HDCP_STATUS_DDC_FAILURE;
+-	}
+ 
+ 	if (is_dp_hdcp(hdcp)) {
++		int num_dpcd_addrs = sizeof(hdcp_dpcd_addrs) /
++			sizeof(hdcp_dpcd_addrs[0]);
++		if (msg_id >= num_dpcd_addrs)
++			return MOD_HDCP_STATUS_DDC_FAILURE;
++
+ 		while (buf_len > 0) {
+ 			cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+ 			success = hdcp->config.ddc.funcs.write_dpcd(
+@@ -239,6 +254,11 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
+ 			data_offset += cur_size;
+ 		}
+ 	} else {
++		int num_i2c_offsets = sizeof(hdcp_i2c_offsets) /
++			sizeof(hdcp_i2c_offsets[0]);
++		if (msg_id >= num_i2c_offsets)
++			return MOD_HDCP_STATUS_DDC_FAILURE;
++
+ 		hdcp->buf[0] = hdcp_i2c_offsets[msg_id];
+ 		memmove(&hdcp->buf[1], buf, buf_len);
+ 		success = hdcp->config.ddc.funcs.write_i2c(
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
+index 31a32a79cfc20..fe70ab4e65bb5 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
+@@ -30,9 +30,8 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
+ {
+ 	int result;
+ 	unsigned int i;
+-	unsigned int table_entries;
+ 	struct pp_power_state *state;
+-	int size;
++	int size, table_entries;
+ 
+ 	if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL)
+ 		return 0;
+@@ -40,15 +39,19 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
+ 	if (hwmgr->hwmgr_func->get_power_state_size == NULL)
+ 		return 0;
+ 
+-	hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
++	table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
+ 
+-	hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
++	size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
+ 					  sizeof(struct pp_power_state);
+ 
+-	if (table_entries == 0 || size == 0) {
++	if (table_entries <= 0 || size == 0) {
+ 		pr_warn("Please check whether power state management is supported on this asic\n");
++		hwmgr->num_ps = 0;
++		hwmgr->ps_size = 0;
+ 		return 0;
+ 	}
++	hwmgr->num_ps = table_entries;
++	hwmgr->ps_size = size;
+ 
+ 	hwmgr->ps = kcalloc(table_entries, size, GFP_KERNEL);
+ 	if (hwmgr->ps == NULL)
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+index 01dc46dc9c8a0..165af862d0542 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+@@ -73,8 +73,9 @@ static int atomctrl_retrieve_ac_timing(
+ 					j++;
+ 				} else if ((table->mc_reg_address[i].uc_pre_reg_data &
+ 							LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
+-					table->mc_reg_table_entry[num_ranges].mc_data[i] =
+-						table->mc_reg_table_entry[num_ranges].mc_data[i-1];
++					if (i)
++						table->mc_reg_table_entry[num_ranges].mc_data[i] =
++							table->mc_reg_table_entry[num_ranges].mc_data[i-1];
+ 				}
+ 			}
+ 			num_ranges++;
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+index 5e72b7555edae..3673a9e7ba449 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+@@ -5190,7 +5190,7 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint
+ 	mode = input[size];
+ 	switch (mode) {
+ 	case PP_SMC_POWER_PROFILE_CUSTOM:
+-		if (size < 8 && size != 0)
++		if (size != 8 && size != 0)
+ 			return -EINVAL;
+ 		/* If only CUSTOM is passed in, use the saved values. Check
+ 		 * that we actually have a CUSTOM profile by ensuring that
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+index 35d0ff57a5960..e85a90b989b59 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+@@ -584,6 +584,7 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
+ 				hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+ 	unsigned long clock = 0;
+ 	uint32_t level;
++	int ret;
+ 
+ 	if (NULL == table || table->count <= 0)
+ 		return -EINVAL;
+@@ -591,7 +592,9 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
+ 	data->uvd_dpm.soft_min_clk = 0;
+ 	data->uvd_dpm.hard_min_clk = 0;
+ 
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level);
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level);
++	if (ret)
++		return ret;
+ 
+ 	if (level < table->count)
+ 		clock = table->entries[level].vclk;
+@@ -611,6 +614,7 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
+ 				hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+ 	unsigned long clock = 0;
+ 	uint32_t level;
++	int ret;
+ 
+ 	if (NULL == table || table->count <= 0)
+ 		return -EINVAL;
+@@ -618,7 +622,9 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
+ 	data->vce_dpm.soft_min_clk = 0;
+ 	data->vce_dpm.hard_min_clk = 0;
+ 
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level);
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level);
++	if (ret)
++		return ret;
+ 
+ 	if (level < table->count)
+ 		clock = table->entries[level].ecclk;
+@@ -638,6 +644,7 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
+ 				hwmgr->dyn_state.acp_clock_voltage_dependency_table;
+ 	unsigned long clock = 0;
+ 	uint32_t level;
++	int ret;
+ 
+ 	if (NULL == table || table->count <= 0)
+ 		return -EINVAL;
+@@ -645,7 +652,9 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
+ 	data->acp_dpm.soft_min_clk = 0;
+ 	data->acp_dpm.hard_min_clk = 0;
+ 
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level);
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level);
++	if (ret)
++		return ret;
+ 
+ 	if (level < table->count)
+ 		clock = table->entries[level].acpclk;
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+index 10678b5199957..79a41180adf13 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+@@ -355,13 +355,13 @@ static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
+ 	return 0;
+ }
+ 
+-static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
++static int vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ {
+ 	struct vega10_hwmgr *data = hwmgr->backend;
+-	int i;
+ 	uint32_t sub_vendor_id, hw_revision;
+ 	uint32_t top32, bottom32;
+ 	struct amdgpu_device *adev = hwmgr->adev;
++	int ret, i;
+ 
+ 	vega10_initialize_power_tune_defaults(hwmgr);
+ 
+@@ -486,9 +486,12 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ 	if (data->registry_data.vr0hot_enabled)
+ 		data->smu_features[GNLD_VR0HOT].supported = true;
+ 
+-	smum_send_msg_to_smc(hwmgr,
++	ret = smum_send_msg_to_smc(hwmgr,
+ 			PPSMC_MSG_GetSmuVersion,
+ 			&hwmgr->smu_version);
++	if (ret)
++		return ret;
++
+ 		/* ACG firmware has major version 5 */
+ 	if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
+ 		data->smu_features[GNLD_ACG].supported = true;
+@@ -506,10 +509,16 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+ 		data->smu_features[GNLD_PCC_LIMIT].supported = true;
+ 
+ 	/* Get the SN to turn into a Unique ID */
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
++	if (ret)
++		return ret;
++
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
++	if (ret)
++		return ret;
+ 
+ 	adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
++	return 0;
+ }
+ 
+ #ifdef PPLIB_VEGA10_EVV_SUPPORT
+@@ -883,7 +892,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+ 
+ 	vega10_set_features_platform_caps(hwmgr);
+ 
+-	vega10_init_dpm_defaults(hwmgr);
++	result = vega10_init_dpm_defaults(hwmgr);
++	if (result)
++		return result;
+ 
+ #ifdef PPLIB_VEGA10_EVV_SUPPORT
+ 	/* Get leakage voltage based on leakage ID. */
+@@ -2350,15 +2361,20 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
+ {
+ 	struct vega10_hwmgr *data = hwmgr->backend;
+ 	uint32_t agc_btc_response;
++	int ret;
+ 
+ 	if (data->smu_features[GNLD_ACG].supported) {
+ 		if (0 == vega10_enable_smc_features(hwmgr, true,
+ 					data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
+ 			data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
+ 
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL);
++		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL);
++		if (ret)
++			return ret;
+ 
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response);
++		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response);
++		if (ret)
++			agc_btc_response = 0;
+ 
+ 		if (1 == agc_btc_response) {
+ 			if (1 == data->acg_loop_state)
+@@ -2572,8 +2588,11 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
+ 		}
+ 	}
+ 
+-	pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
++	result = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
+ 			VOLTAGE_OBJ_SVID2,  &voltage_table);
++	PP_ASSERT_WITH_CODE(!result,
++			"Failed to get voltage table!",
++			return result);
+ 	pp_table->MaxVidStep = voltage_table.max_vid_step;
+ 
+ 	pp_table->GfxDpmVoltageMode =
+@@ -3391,13 +3410,17 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
+ 	const struct vega10_power_state *vega10_ps =
+ 			cast_const_phw_vega10_power_state(states->pnew_state);
+ 	struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
+-	uint32_t sclk = vega10_ps->performance_levels
+-			[vega10_ps->performance_level_count - 1].gfx_clock;
+ 	struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
+-	uint32_t mclk = vega10_ps->performance_levels
+-			[vega10_ps->performance_level_count - 1].mem_clock;
++	uint32_t sclk, mclk;
+ 	uint32_t i;
+ 
++	if (vega10_ps == NULL)
++		return -EINVAL;
++	sclk = vega10_ps->performance_levels
++			[vega10_ps->performance_level_count - 1].gfx_clock;
++	mclk = vega10_ps->performance_levels
++			[vega10_ps->performance_level_count - 1].mem_clock;
++
+ 	for (i = 0; i < sclk_table->count; i++) {
+ 		if (sclk == sclk_table->dpm_levels[i].value)
+ 			break;
+@@ -3704,6 +3727,9 @@ static int vega10_generate_dpm_level_enable_mask(
+ 			cast_const_phw_vega10_power_state(states->pnew_state);
+ 	int i;
+ 
++	if (vega10_ps == NULL)
++		return -EINVAL;
++
+ 	PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
+ 			"Attempt to Trim DPM States Failed!",
+ 			return -1);
+@@ -3876,11 +3902,14 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
+ 		uint32_t *query)
+ {
+ 	uint32_t value;
++	int ret;
+ 
+ 	if (!query)
+ 		return -EINVAL;
+ 
+-	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value);
++	ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value);
++	if (ret)
++		return ret;
+ 
+ 	/* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */
+ 	*query = value << 8;
+@@ -4633,14 +4662,16 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ 	uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
+ 	PPTable_t *pptable = &(data->smc_state_table.pp_table);
+ 
+-	int i, now, size = 0, count = 0;
++	int i, ret, now,  size = 0, count = 0;
+ 
+ 	switch (type) {
+ 	case PP_SCLK:
+ 		if (data->registry_data.sclk_dpm_key_disabled)
+ 			break;
+ 
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now);
++		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now);
++		if (ret)
++			break;
+ 
+ 		if (hwmgr->pp_one_vf &&
+ 		    (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK))
+@@ -4656,7 +4687,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ 		if (data->registry_data.mclk_dpm_key_disabled)
+ 			break;
+ 
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
++		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
++		if (ret)
++			break;
+ 
+ 		for (i = 0; i < mclk_table->count; i++)
+ 			size += sprintf(buf + size, "%d: %uMhz %s\n",
+@@ -4667,7 +4700,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ 		if (data->registry_data.socclk_dpm_key_disabled)
+ 			break;
+ 
+-		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
++		ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
++		if (ret)
++			break;
+ 
+ 		for (i = 0; i < soc_table->count; i++)
+ 			size += sprintf(buf + size, "%d: %uMhz %s\n",
+@@ -4678,8 +4713,10 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ 		if (data->registry_data.dcefclk_dpm_key_disabled)
+ 			break;
+ 
+-		smum_send_msg_to_smc_with_parameter(hwmgr,
++		ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ 				PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
++		if (ret)
++			break;
+ 
+ 		for (i = 0; i < dcef_table->count; i++)
+ 			size += sprintf(buf + size, "%d: %uMhz %s\n",
+@@ -4828,6 +4865,9 @@ static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
+ 
+ 	psa = cast_const_phw_vega10_power_state(pstate1);
+ 	psb = cast_const_phw_vega10_power_state(pstate2);
++	if (psa == NULL || psb == NULL)
++		return -EINVAL;
++
+ 	/* If the two states don't even have the same number of performance levels they cannot be the same state. */
+ 	if (psa->performance_level_count != psb->performance_level_count) {
+ 		*equal = false;
+@@ -4953,6 +4993,8 @@ static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+ 		return -EINVAL;
+ 
+ 	vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
++	if (vega10_ps == NULL)
++		return -EINVAL;
+ 
+ 	vega10_ps->performance_levels
+ 	[vega10_ps->performance_level_count - 1].gfx_clock =
+@@ -5004,6 +5046,8 @@ static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+ 		return -EINVAL;
+ 
+ 	vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
++	if (vega10_ps == NULL)
++		return -EINVAL;
+ 
+ 	vega10_ps->performance_levels
+ 	[vega10_ps->performance_level_count - 1].mem_clock =
+@@ -5239,6 +5283,9 @@ static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr)
+ 		return;
+ 
+ 	vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
++	if (vega10_ps == NULL)
++		return;
++
+ 	max_level = vega10_ps->performance_level_count - 1;
+ 
+ 	if (vega10_ps->performance_levels[max_level].gfx_clock !=
+@@ -5261,6 +5308,9 @@ static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr)
+ 
+ 	ps = (struct pp_power_state *)((unsigned long)(hwmgr->ps) + hwmgr->ps_size * (hwmgr->num_ps - 1));
+ 	vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
++	if (vega10_ps == NULL)
++		return;
++
+ 	max_level = vega10_ps->performance_level_count - 1;
+ 
+ 	if (vega10_ps->performance_levels[max_level].gfx_clock !=
+@@ -5451,6 +5501,8 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_
+ 		return -EINVAL;
+ 
+ 	ps = cast_const_phw_vega10_power_state(state);
++	if (ps == NULL)
++		return -EINVAL;
+ 
+ 	i = index > ps->performance_level_count - 1 ?
+ 			ps->performance_level_count - 1 : index;
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+index 57a354a03e8ae..a55dc6ec4f766 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+@@ -4095,9 +4095,11 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
+ 	if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
+ 		struct vega20_hwmgr *data =
+ 			(struct vega20_hwmgr *)(hwmgr->backend);
+-		if (size == 0 && !data->is_custom_profile_set)
++
++		if (size != 10 && size != 0)
+ 			return -EINVAL;
+-		if (size < 10 && size != 0)
++
++		if (size == 0 && !data->is_custom_profile_set)
+ 			return -EINVAL;
+ 
+ 		result = vega20_get_activity_monitor_coeff(hwmgr,
+@@ -4159,6 +4161,8 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
+ 			activity_monitor.Fclk_PD_Data_error_coeff = input[8];
+ 			activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
+ 			break;
++		default:
++			return -EINVAL;
+ 		}
+ 
+ 		result = vega20_set_activity_monitor_coeff(hwmgr,
+diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
+index daf122f24f230..ae8305a1ff05a 100644
+--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
++++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
+@@ -131,13 +131,17 @@ int vega10_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
+ 			    uint64_t *features_enabled)
+ {
+ 	uint32_t enabled_features;
++	int ret;
+ 
+ 	if (features_enabled == NULL)
+ 		return -EINVAL;
+ 
+-	smum_send_msg_to_smc(hwmgr,
++	ret = smum_send_msg_to_smc(hwmgr,
+ 			PPSMC_MSG_GetEnabledSmuFeatures,
+ 			&enabled_features);
++	if (ret)
++		return ret;
++
+ 	*features_enabled = enabled_features;
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index 43de9dfcba19a..f1091cb87de0c 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -318,6 +318,12 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
+ 		},
+ 		.driver_data = (void *)&lcd1600x2560_leftside_up,
++	}, {	/* OrangePi Neo */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OrangePi"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "NEO-01"),
++		},
++		.driver_data = (void *)&lcd1200x1920_rightside_up,
+ 	}, {	/* Samsung GalaxyBook 10.6 */
+ 		.matches = {
+ 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
+index 038d4c6884c5b..136a7163477da 100644
+--- a/drivers/gpu/drm/i915/i915_sw_fence.c
++++ b/drivers/gpu/drm/i915/i915_sw_fence.c
+@@ -44,7 +44,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
+ 	debug_object_init(fence, &i915_sw_fence_debug_descr);
+ }
+ 
+-static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
++static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence)
+ {
+ 	debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr);
+ }
+@@ -70,7 +70,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence)
+ 	debug_object_destroy(fence, &i915_sw_fence_debug_descr);
+ }
+ 
+-static inline void debug_fence_free(struct i915_sw_fence *fence)
++static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence)
+ {
+ 	debug_object_free(fence, &i915_sw_fence_debug_descr);
+ 	smp_wmb(); /* flush the change in state before reallocation */
+@@ -87,7 +87,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
+ {
+ }
+ 
+-static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
++static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence)
+ {
+ }
+ 
+@@ -108,7 +108,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence)
+ {
+ }
+ 
+-static inline void debug_fence_free(struct i915_sw_fence *fence)
++static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence)
+ {
+ }
+ 
+diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
+index 255c6b863f8d2..6d54c565b34fa 100644
+--- a/drivers/gpu/drm/meson/meson_plane.c
++++ b/drivers/gpu/drm/meson/meson_plane.c
+@@ -529,6 +529,7 @@ int meson_plane_create(struct meson_drm *priv)
+ 	struct meson_plane *meson_plane;
+ 	struct drm_plane *plane;
+ 	const uint64_t *format_modifiers = format_modifiers_default;
++	int ret;
+ 
+ 	meson_plane = devm_kzalloc(priv->drm->dev, sizeof(*meson_plane),
+ 				   GFP_KERNEL);
+@@ -543,12 +544,16 @@ int meson_plane_create(struct meson_drm *priv)
+ 	else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+ 		format_modifiers = format_modifiers_afbc_g12a;
+ 
+-	drm_universal_plane_init(priv->drm, plane, 0xFF,
+-				 &meson_plane_funcs,
+-				 supported_drm_formats,
+-				 ARRAY_SIZE(supported_drm_formats),
+-				 format_modifiers,
+-				 DRM_PLANE_TYPE_PRIMARY, "meson_primary_plane");
++	ret = drm_universal_plane_init(priv->drm, plane, 0xFF,
++					&meson_plane_funcs,
++					supported_drm_formats,
++					ARRAY_SIZE(supported_drm_formats),
++					format_modifiers,
++					DRM_PLANE_TYPE_PRIMARY, "meson_primary_plane");
++	if (ret) {
++		devm_kfree(priv->drm->dev, meson_plane);
++		return ret;
++	}
+ 
+ 	drm_plane_helper_add(plane, &meson_plane_helper_funcs);
+ 
+diff --git a/drivers/hid/hid-cougar.c b/drivers/hid/hid-cougar.c
+index 28d671c5e0cac..d173b13ff1983 100644
+--- a/drivers/hid/hid-cougar.c
++++ b/drivers/hid/hid-cougar.c
+@@ -106,7 +106,7 @@ static void cougar_fix_g6_mapping(void)
+ static __u8 *cougar_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 				 unsigned int *rsize)
+ {
+-	if (rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
++	if (*rsize >= 117 && rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
+ 	    (rdesc[115] | rdesc[116] << 8) >= HID_MAX_USAGES) {
+ 		hid_info(hdev,
+ 			"usage count exceeds max: fixing up report descriptor\n");
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index e99400f3ae1d1..39339b152b8ba 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -1965,6 +1965,7 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL_GPL(vmbus_device_unregister);
+ 
+ /*
+  * vmbus_remove_channel_attr_group - remove the channel's attribute group
+diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
+index 6c9a906631b89..e73c4de9471fa 100644
+--- a/drivers/hwmon/adc128d818.c
++++ b/drivers/hwmon/adc128d818.c
+@@ -176,7 +176,7 @@ static ssize_t adc128_in_store(struct device *dev,
+ 
+ 	mutex_lock(&data->update_lock);
+ 	/* 10 mV LSB on limit registers */
+-	regval = clamp_val(DIV_ROUND_CLOSEST(val, 10), 0, 255);
++	regval = DIV_ROUND_CLOSEST(clamp_val(val, 0, 2550), 10);
+ 	data->in[index][nr] = regval << 4;
+ 	reg = index == 1 ? ADC128_REG_IN_MIN(nr) : ADC128_REG_IN_MAX(nr);
+ 	i2c_smbus_write_byte_data(data->client, reg, regval);
+@@ -214,7 +214,7 @@ static ssize_t adc128_temp_store(struct device *dev,
+ 		return err;
+ 
+ 	mutex_lock(&data->update_lock);
+-	regval = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
++	regval = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
+ 	data->temp[index] = regval << 1;
+ 	i2c_smbus_write_byte_data(data->client,
+ 				  index == 1 ? ADC128_REG_TEMP_MAX
+diff --git a/drivers/hwmon/lm95234.c b/drivers/hwmon/lm95234.c
+index ac169a994ae00..db2aecdfbd17c 100644
+--- a/drivers/hwmon/lm95234.c
++++ b/drivers/hwmon/lm95234.c
+@@ -301,7 +301,8 @@ static ssize_t tcrit2_store(struct device *dev, struct device_attribute *attr,
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, index ? 255 : 127);
++	val = DIV_ROUND_CLOSEST(clamp_val(val, 0, (index ? 255 : 127) * 1000),
++				1000);
+ 
+ 	mutex_lock(&data->update_lock);
+ 	data->tcrit2[index] = val;
+@@ -350,7 +351,7 @@ static ssize_t tcrit1_store(struct device *dev, struct device_attribute *attr,
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 255);
++	val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 255000), 1000);
+ 
+ 	mutex_lock(&data->update_lock);
+ 	data->tcrit1[index] = val;
+@@ -391,7 +392,7 @@ static ssize_t tcrit1_hyst_store(struct device *dev,
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	val = DIV_ROUND_CLOSEST(val, 1000);
++	val = DIV_ROUND_CLOSEST(clamp_val(val, -255000, 255000), 1000);
+ 	val = clamp_val((int)data->tcrit1[index] - val, 0, 31);
+ 
+ 	mutex_lock(&data->update_lock);
+@@ -431,7 +432,7 @@ static ssize_t offset_store(struct device *dev, struct device_attribute *attr,
+ 		return ret;
+ 
+ 	/* Accuracy is 1/2 degrees C */
+-	val = clamp_val(DIV_ROUND_CLOSEST(val, 500), -128, 127);
++	val = DIV_ROUND_CLOSEST(clamp_val(val, -64000, 63500), 500);
+ 
+ 	mutex_lock(&data->update_lock);
+ 	data->toffset[index] = val;
+diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
+index 5bd15622a85f9..3645a19cdaf4d 100644
+--- a/drivers/hwmon/nct6775.c
++++ b/drivers/hwmon/nct6775.c
+@@ -2374,7 +2374,7 @@ store_temp_offset(struct device *dev, struct device_attribute *attr,
+ 	if (err < 0)
+ 		return err;
+ 
+-	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
++	val = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
+ 
+ 	mutex_lock(&data->update_lock);
+ 	data->temp_offset[nr] = val;
+diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
+index 3964ceab2817c..acf36862851ad 100644
+--- a/drivers/hwmon/w83627ehf.c
++++ b/drivers/hwmon/w83627ehf.c
+@@ -897,7 +897,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
+ 	if (err < 0)
+ 		return err;
+ 
+-	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
++	val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 127000), 1000);
+ 
+ 	mutex_lock(&data->update_lock);
+ 	data->target_temp[nr] = val;
+@@ -922,7 +922,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
+ 		return err;
+ 
+ 	/* Limit the temp to 0C - 15C */
+-	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
++	val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 15000), 1000);
+ 
+ 	mutex_lock(&data->update_lock);
+ 	reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
+diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
+index fd5f5c5a5244d..425597151dd3e 100644
+--- a/drivers/hwspinlock/hwspinlock_core.c
++++ b/drivers/hwspinlock/hwspinlock_core.c
+@@ -302,6 +302,34 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
+ }
+ EXPORT_SYMBOL_GPL(__hwspin_unlock);
+ 
++/**
++ * hwspin_lock_bust() - bust a specific hwspinlock
++ * @hwlock: a previously-acquired hwspinlock which we want to bust
++ * @id: identifier of the remote lock holder, if applicable
++ *
++ * This function will bust a hwspinlock that was previously acquired as
++ * long as the current owner of the lock matches the id given by the caller.
++ *
++ * Context: Process context.
++ *
++ * Returns: 0 on success, or -EINVAL if the hwspinlock does not exist, or
++ * the bust operation fails, and -EOPNOTSUPP if the bust operation is not
++ * defined for the hwspinlock.
++ */
++int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id)
++{
++	if (WARN_ON(!hwlock))
++		return -EINVAL;
++
++	if (!hwlock->bank->ops->bust) {
++		pr_err("bust operation not defined\n");
++		return -EOPNOTSUPP;
++	}
++
++	return hwlock->bank->ops->bust(hwlock, id);
++}
++EXPORT_SYMBOL_GPL(hwspin_lock_bust);
++
+ /**
+  * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id
+  * @bank: the hwspinlock device bank
+diff --git a/drivers/hwspinlock/hwspinlock_internal.h b/drivers/hwspinlock/hwspinlock_internal.h
+index 29892767bb7a0..f298fc0ee5adb 100644
+--- a/drivers/hwspinlock/hwspinlock_internal.h
++++ b/drivers/hwspinlock/hwspinlock_internal.h
+@@ -21,6 +21,8 @@ struct hwspinlock_device;
+  * @trylock: make a single attempt to take the lock. returns 0 on
+  *	     failure and true on success. may _not_ sleep.
+  * @unlock:  release the lock. always succeed. may _not_ sleep.
++ * @bust:    optional, platform-specific bust handler, called by hwspinlock
++ *	     core to bust a specific lock.
+  * @relax:   optional, platform-specific relax handler, called by hwspinlock
+  *	     core while spinning on a lock, between two successive
+  *	     invocations of @trylock. may _not_ sleep.
+@@ -28,6 +30,7 @@ struct hwspinlock_device;
+ struct hwspinlock_ops {
+ 	int (*trylock)(struct hwspinlock *lock);
+ 	void (*unlock)(struct hwspinlock *lock);
++	int (*bust)(struct hwspinlock *lock, unsigned int id);
+ 	void (*relax)(struct hwspinlock *lock);
+ };
+ 
+diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
+index 19ab7d7251bcb..99d1288e66828 100644
+--- a/drivers/iio/adc/ad7124.c
++++ b/drivers/iio/adc/ad7124.c
+@@ -500,6 +500,7 @@ static int ad7124_soft_reset(struct ad7124_state *st)
+ 	if (ret < 0)
+ 		return ret;
+ 
++	fsleep(200);
+ 	timeout = 100;
+ 	do {
+ 		ret = ad_sd_read_reg(&st->sd, AD7124_STATUS, 1, &readval);
+diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+index 93b4e9e6bb551..8aa6e12320e72 100644
+--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
++++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+@@ -180,7 +180,7 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
+ 
+ 	ret = dma_get_slave_caps(chan, &caps);
+ 	if (ret < 0)
+-		goto err_free;
++		goto err_release;
+ 
+ 	/* Needs to be aligned to the maximum of the minimums */
+ 	if (caps.src_addr_widths)
+@@ -207,6 +207,8 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
+ 
+ 	return &dmaengine_buffer->queue.buffer;
+ 
++err_release:
++	dma_release_channel(chan);
+ err_free:
+ 	kfree(dmaengine_buffer);
+ 	return ERR_PTR(ret);
+diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
+index c32b2577dd991..6e64ffde6c82d 100644
+--- a/drivers/iio/inkern.c
++++ b/drivers/iio/inkern.c
+@@ -610,17 +610,17 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
+ 		break;
+ 	case IIO_VAL_INT_PLUS_MICRO:
+ 		if (scale_val2 < 0)
+-			*processed = -raw64 * scale_val;
++			*processed = -raw64 * scale_val * scale;
+ 		else
+-			*processed = raw64 * scale_val;
++			*processed = raw64 * scale_val * scale;
+ 		*processed += div_s64(raw64 * (s64)scale_val2 * scale,
+ 				      1000000LL);
+ 		break;
+ 	case IIO_VAL_INT_PLUS_NANO:
+ 		if (scale_val2 < 0)
+-			*processed = -raw64 * scale_val;
++			*processed = -raw64 * scale_val * scale;
+ 		else
+-			*processed = raw64 * scale_val;
++			*processed = raw64 * scale_val * scale;
+ 		*processed += div_s64(raw64 * (s64)scale_val2 * scale,
+ 				      1000000000LL);
+ 		break;
+diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
+index f2593133e5247..790db3ceb2083 100644
+--- a/drivers/input/misc/uinput.c
++++ b/drivers/input/misc/uinput.c
+@@ -416,6 +416,20 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
+ 		return -EINVAL;
+ 	}
+ 
++	/*
++	 * Limit number of contacts to a reasonable value (100). This
++	 * ensures that we need less than 2 pages for struct input_mt
++	 * (we are not using in-kernel slot assignment so not going to
++	 * allocate memory for the "red" table), and we should have no
++	 * trouble getting this much memory.
++	 */
++	if (code == ABS_MT_SLOT && max > 99) {
++		printk(KERN_DEBUG
++		       "%s: unreasonably large number of slots requested: %d\n",
++		       UINPUT_NAME, max);
++		return -EINVAL;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
+index a27765a7f6b75..72b380e17a1b0 100644
+--- a/drivers/iommu/intel/dmar.c
++++ b/drivers/iommu/intel/dmar.c
+@@ -1333,7 +1333,7 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
+ 	 */
+ 	writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
+ 
+-	while (qi->desc_status[wait_index] != QI_DONE) {
++	while (READ_ONCE(qi->desc_status[wait_index]) != QI_DONE) {
+ 		/*
+ 		 * We will leave the interrupts disabled, to prevent interrupt
+ 		 * context to queue another cmd while a cmd is already submitted
+diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
+index 65aa30d55d3ab..f31f66b123666 100644
+--- a/drivers/iommu/sun50i-iommu.c
++++ b/drivers/iommu/sun50i-iommu.c
+@@ -380,6 +380,7 @@ static int sun50i_iommu_enable(struct sun50i_iommu *iommu)
+ 		    IOMMU_TLB_PREFETCH_MASTER_ENABLE(3) |
+ 		    IOMMU_TLB_PREFETCH_MASTER_ENABLE(4) |
+ 		    IOMMU_TLB_PREFETCH_MASTER_ENABLE(5));
++	iommu_write(iommu, IOMMU_BYPASS_REG, 0);
+ 	iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK);
+ 	iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE),
+ 		    IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
+diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
+index c76fb70c70bb6..e865a43428b83 100644
+--- a/drivers/irqchip/irq-armada-370-xp.c
++++ b/drivers/irqchip/irq-armada-370-xp.c
+@@ -546,6 +546,10 @@ static struct irq_chip armada_370_xp_irq_chip = {
+ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
+ 				      unsigned int virq, irq_hw_number_t hw)
+ {
++	/* IRQs 0 and 1 cannot be mapped, they are handled internally */
++	if (hw <= 1)
++		return -EINVAL;
++
+ 	armada_370_xp_irq_mask(irq_get_irq_data(virq));
+ 	if (!is_percpu_irq(hw))
+ 		writel(hw, per_cpu_int_base +
+diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
+index 4116b48e60aff..205a275196074 100644
+--- a/drivers/irqchip/irq-gic-v2m.c
++++ b/drivers/irqchip/irq-gic-v2m.c
+@@ -442,12 +442,12 @@ static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
+ 
+ 		ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis,
+ 				      &res, 0);
+-		if (ret) {
+-			of_node_put(child);
++		if (ret)
+ 			break;
+-		}
+ 	}
+ 
++	if (ret && child)
++		of_node_put(child);
+ 	if (!ret)
+ 		ret = gicv2m_allocate_domains(parent);
+ 	if (ret)
+diff --git a/drivers/leds/leds-spi-byte.c b/drivers/leds/leds-spi-byte.c
+index f1964c96fb159..82696e0607a53 100644
+--- a/drivers/leds/leds-spi-byte.c
++++ b/drivers/leds/leds-spi-byte.c
+@@ -91,7 +91,6 @@ static int spi_byte_probe(struct spi_device *spi)
+ 		dev_err(dev, "Device must have exactly one LED sub-node.");
+ 		return -EINVAL;
+ 	}
+-	child = of_get_next_available_child(dev_of_node(dev), NULL);
+ 
+ 	led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
+ 	if (!led)
+@@ -107,11 +106,13 @@ static int spi_byte_probe(struct spi_device *spi)
+ 	led->ldev.max_brightness = led->cdef->max_value - led->cdef->off_value;
+ 	led->ldev.brightness_set_blocking = spi_byte_brightness_set_blocking;
+ 
++	child = of_get_next_available_child(dev_of_node(dev), NULL);
+ 	state = of_get_property(child, "default-state", NULL);
+ 	if (state) {
+ 		if (!strcmp(state, "on")) {
+ 			led->ldev.brightness = led->ldev.max_brightness;
+ 		} else if (strcmp(state, "off")) {
++			of_node_put(child);
+ 			/* all other cases except "off" */
+ 			dev_err(dev, "default-state can only be 'on' or 'off'");
+ 			return -EINVAL;
+@@ -122,9 +123,12 @@ static int spi_byte_probe(struct spi_device *spi)
+ 
+ 	ret = devm_led_classdev_register(&spi->dev, &led->ldev);
+ 	if (ret) {
++		of_node_put(child);
+ 		mutex_destroy(&led->mutex);
+ 		return ret;
+ 	}
++
++	of_node_put(child);
+ 	spi_set_drvdata(spi, led);
+ 
+ 	return 0;
+diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c
+index b0c45c6ebe0bf..f76477044ec1e 100644
+--- a/drivers/md/dm-init.c
++++ b/drivers/md/dm-init.c
+@@ -207,8 +207,10 @@ static char __init *dm_parse_device_entry(struct dm_device *dev, char *str)
+ 	strscpy(dev->dmi.uuid, field[1], sizeof(dev->dmi.uuid));
+ 	/* minor */
+ 	if (strlen(field[2])) {
+-		if (kstrtoull(field[2], 0, &dev->dmi.dev))
++		if (kstrtoull(field[2], 0, &dev->dmi.dev) ||
++		    dev->dmi.dev >= (1 << MINORBITS))
+ 			return ERR_PTR(-EINVAL);
++		dev->dmi.dev = huge_encode_dev((dev_t)dev->dmi.dev);
+ 		dev->dmi.flags |= DM_PERSISTENT_DEV_FLAG;
+ 	}
+ 	/* flags */
+diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
+index 9186881afc981..d074f426980dd 100644
+--- a/drivers/media/platform/qcom/camss/camss.c
++++ b/drivers/media/platform/qcom/camss/camss.c
+@@ -431,8 +431,11 @@ static int camss_of_parse_endpoint_node(struct device *dev,
+ 	struct v4l2_fwnode_bus_mipi_csi2 *mipi_csi2;
+ 	struct v4l2_fwnode_endpoint vep = { { 0 } };
+ 	unsigned int i;
++	int ret;
+ 
+-	v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep);
++	ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep);
++	if (ret)
++		return ret;
+ 
+ 	csd->interface.csiphy_id = vep.base.port;
+ 
+diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+index 437889e51ca05..2ce7f5567f512 100644
+--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
++++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+@@ -113,8 +113,9 @@ static int vid_cap_queue_setup(struct vb2_queue *vq,
+ 		if (*nplanes != buffers)
+ 			return -EINVAL;
+ 		for (p = 0; p < buffers; p++) {
+-			if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h +
+-						dev->fmt_cap->data_offset[p])
++			if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h /
++					dev->fmt_cap->vdownsampling[p] +
++					dev->fmt_cap->data_offset[p])
+ 				return -EINVAL;
+ 		}
+ 	} else {
+@@ -1801,8 +1802,10 @@ int vidioc_s_edid(struct file *file, void *_fh,
+ 		return -EINVAL;
+ 	if (edid->blocks == 0) {
+ 		dev->edid_blocks = 0;
+-		v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0);
+-		v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0);
++		if (dev->num_outputs) {
++			v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0);
++			v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0);
++		}
+ 		phys_addr = CEC_PHYS_ADDR_INVALID;
+ 		goto set_phys_addr;
+ 	}
+@@ -1826,8 +1829,10 @@ int vidioc_s_edid(struct file *file, void *_fh,
+ 			display_present |=
+ 				dev->display_present[i] << j++;
+ 
+-	v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present);
+-	v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present);
++	if (dev->num_outputs) {
++		v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present);
++		v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present);
++	}
+ 
+ set_phys_addr:
+ 	/* TODO: a proper hotplug detect cycle should be emulated here */
+diff --git a/drivers/media/test-drivers/vivid/vivid-vid-out.c b/drivers/media/test-drivers/vivid/vivid-vid-out.c
+index cd6c247547d66..9038be90ab35d 100644
+--- a/drivers/media/test-drivers/vivid/vivid-vid-out.c
++++ b/drivers/media/test-drivers/vivid/vivid-vid-out.c
+@@ -63,14 +63,16 @@ static int vid_out_queue_setup(struct vb2_queue *vq,
+ 		if (sizes[0] < size)
+ 			return -EINVAL;
+ 		for (p = 1; p < planes; p++) {
+-			if (sizes[p] < dev->bytesperline_out[p] * h +
+-				       vfmt->data_offset[p])
++			if (sizes[p] < dev->bytesperline_out[p] * h /
++					vfmt->vdownsampling[p] +
++					vfmt->data_offset[p])
+ 				return -EINVAL;
+ 		}
+ 	} else {
+ 		for (p = 0; p < planes; p++)
+-			sizes[p] = p ? dev->bytesperline_out[p] * h +
+-				       vfmt->data_offset[p] : size;
++			sizes[p] = p ? dev->bytesperline_out[p] * h /
++					vfmt->vdownsampling[p] +
++					vfmt->data_offset[p] : size;
+ 	}
+ 
+ 	if (vq->num_buffers + *nbuffers < 2)
+@@ -127,7 +129,7 @@ static int vid_out_buf_prepare(struct vb2_buffer *vb)
+ 
+ 	for (p = 0; p < planes; p++) {
+ 		if (p)
+-			size = dev->bytesperline_out[p] * h;
++			size = dev->bytesperline_out[p] * h / vfmt->vdownsampling[p];
+ 		size += vb->planes[p].data_offset;
+ 
+ 		if (vb2_get_plane_payload(vb, p) < size) {
+@@ -334,8 +336,8 @@ int vivid_g_fmt_vid_out(struct file *file, void *priv,
+ 	for (p = 0; p < mp->num_planes; p++) {
+ 		mp->plane_fmt[p].bytesperline = dev->bytesperline_out[p];
+ 		mp->plane_fmt[p].sizeimage =
+-			mp->plane_fmt[p].bytesperline * mp->height +
+-			fmt->data_offset[p];
++			mp->plane_fmt[p].bytesperline * mp->height /
++			fmt->vdownsampling[p] + fmt->data_offset[p];
+ 	}
+ 	for (p = fmt->buffers; p < fmt->planes; p++) {
+ 		unsigned stride = dev->bytesperline_out[p];
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 6334f99f1854d..cfbc7595cd0b8 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -948,16 +948,26 @@ static int uvc_parse_streaming(struct uvc_device *dev,
+ 		goto error;
+ 	}
+ 
+-	size = nformats * sizeof(*format) + nframes * sizeof(*frame)
++	/*
++	 * Allocate memory for the formats, the frames and the intervals,
++	 * plus any required padding to guarantee that everything has the
++	 * correct alignment.
++	 */
++	size = nformats * sizeof(*format);
++	size = ALIGN(size, __alignof__(*frame)) + nframes * sizeof(*frame);
++	size = ALIGN(size, __alignof__(*interval))
+ 	     + nintervals * sizeof(*interval);
++
+ 	format = kzalloc(size, GFP_KERNEL);
+-	if (format == NULL) {
++	if (!format) {
+ 		ret = -ENOMEM;
+ 		goto error;
+ 	}
+ 
+-	frame = (struct uvc_frame *)&format[nformats];
+-	interval = (u32 *)&frame[nframes];
++	frame = (void *)format + nformats * sizeof(*format);
++	frame = PTR_ALIGN(frame, __alignof__(*frame));
++	interval = (void *)frame + nframes * sizeof(*frame);
++	interval = PTR_ALIGN(interval, __alignof__(*interval));
+ 
+ 	streaming->format = format;
+ 	streaming->nformats = nformats;
+diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c
+index 692daa9eff341..19c9d2cdd277b 100644
+--- a/drivers/misc/vmw_vmci/vmci_resource.c
++++ b/drivers/misc/vmw_vmci/vmci_resource.c
+@@ -144,7 +144,8 @@ void vmci_resource_remove(struct vmci_resource *resource)
+ 	spin_lock(&vmci_resource_table.lock);
+ 
+ 	hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) {
+-		if (vmci_handle_is_equal(r->handle, resource->handle)) {
++		if (vmci_handle_is_equal(r->handle, resource->handle) &&
++		    resource->type == r->type) {
+ 			hlist_del_init_rcu(&r->node);
+ 			break;
+ 		}
+diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
+index 23cf7912c1ba3..6a350f4953528 100644
+--- a/drivers/mmc/host/cqhci.c
++++ b/drivers/mmc/host/cqhci.c
+@@ -592,7 +592,7 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+ 		cqhci_writel(cq_host, 0, CQHCI_CTL);
+ 		mmc->cqe_on = true;
+ 		pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
+-		if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
++		if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
+ 			pr_err("%s: cqhci: CQE failed to exit halt state\n",
+ 			       mmc_hostname(mmc));
+ 		}
+diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
+index 4da525f9c11f0..dc7a5ad41c420 100644
+--- a/drivers/mmc/host/dw_mmc.c
++++ b/drivers/mmc/host/dw_mmc.c
+@@ -2826,8 +2826,8 @@ static int dw_mci_init_slot(struct dw_mci *host)
+ 	if (host->use_dma == TRANS_MODE_IDMAC) {
+ 		mmc->max_segs = host->ring_size;
+ 		mmc->max_blk_size = 65535;
+-		mmc->max_seg_size = 0x1000;
+-		mmc->max_req_size = mmc->max_seg_size * host->ring_size;
++		mmc->max_req_size = DW_MCI_DESC_DATA_LENGTH * host->ring_size;
++		mmc->max_seg_size = mmc->max_req_size;
+ 		mmc->max_blk_count = mmc->max_req_size / 512;
+ 	} else if (host->use_dma == TRANS_MODE_EDMAC) {
+ 		mmc->max_segs = 64;
+diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
+index 4f008ba3280eb..60810de52d4db 100644
+--- a/drivers/mmc/host/sdhci-of-aspeed.c
++++ b/drivers/mmc/host/sdhci-of-aspeed.c
+@@ -236,6 +236,7 @@ static const struct of_device_id aspeed_sdhci_of_match[] = {
+ 	{ .compatible = "aspeed,ast2600-sdhci", },
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(of, aspeed_sdhci_of_match);
+ 
+ static struct platform_driver aspeed_sdhci_driver = {
+ 	.driver		= {
+diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
+index 53ef48588e59a..d9917120b8fac 100644
+--- a/drivers/net/bareudp.c
++++ b/drivers/net/bareudp.c
+@@ -75,7 +75,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ 
+ 		if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
+ 				  sizeof(ipversion))) {
+-			bareudp->dev->stats.rx_dropped++;
++			DEV_STATS_INC(bareudp->dev, rx_dropped);
+ 			goto drop;
+ 		}
+ 		ipversion >>= 4;
+@@ -85,7 +85,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ 		} else if (ipversion == 6 && bareudp->multi_proto_mode) {
+ 			proto = htons(ETH_P_IPV6);
+ 		} else {
+-			bareudp->dev->stats.rx_dropped++;
++			DEV_STATS_INC(bareudp->dev, rx_dropped);
+ 			goto drop;
+ 		}
+ 	} else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) {
+@@ -99,7 +99,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ 				   ipv4_is_multicast(tunnel_hdr->daddr)) {
+ 				proto = htons(ETH_P_MPLS_MC);
+ 			} else {
+-				bareudp->dev->stats.rx_dropped++;
++				DEV_STATS_INC(bareudp->dev, rx_dropped);
+ 				goto drop;
+ 			}
+ 		} else {
+@@ -115,7 +115,7 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ 				   (addr_type & IPV6_ADDR_MULTICAST)) {
+ 				proto = htons(ETH_P_MPLS_MC);
+ 			} else {
+-				bareudp->dev->stats.rx_dropped++;
++				DEV_STATS_INC(bareudp->dev, rx_dropped);
+ 				goto drop;
+ 			}
+ 		}
+@@ -127,12 +127,12 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ 				 proto,
+ 				 !net_eq(bareudp->net,
+ 				 dev_net(bareudp->dev)))) {
+-		bareudp->dev->stats.rx_dropped++;
++		DEV_STATS_INC(bareudp->dev, rx_dropped);
+ 		goto drop;
+ 	}
+ 	tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0);
+ 	if (!tun_dst) {
+-		bareudp->dev->stats.rx_dropped++;
++		DEV_STATS_INC(bareudp->dev, rx_dropped);
+ 		goto drop;
+ 	}
+ 	skb_dst_set(skb, &tun_dst->dst);
+@@ -157,8 +157,8 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ 						     &((struct ipv6hdr *)oiph)->saddr);
+ 		}
+ 		if (err > 1) {
+-			++bareudp->dev->stats.rx_frame_errors;
+-			++bareudp->dev->stats.rx_errors;
++			DEV_STATS_INC(bareudp->dev, rx_frame_errors);
++			DEV_STATS_INC(bareudp->dev, rx_errors);
+ 			goto drop;
+ 		}
+ 	}
+@@ -453,11 +453,11 @@ static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	dev_kfree_skb(skb);
+ 
+ 	if (err == -ELOOP)
+-		dev->stats.collisions++;
++		DEV_STATS_INC(dev, collisions);
+ 	else if (err == -ENETUNREACH)
+-		dev->stats.tx_carrier_errors++;
++		DEV_STATS_INC(dev, tx_carrier_errors);
+ 
+-	dev->stats.tx_errors++;
++	DEV_STATS_INC(dev, tx_errors);
+ 	return NETDEV_TX_OK;
+ }
+ 
+diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
+index ffcb04aac9729..88d065718e990 100644
+--- a/drivers/net/can/spi/mcp251x.c
++++ b/drivers/net/can/spi/mcp251x.c
+@@ -755,7 +755,7 @@ static int mcp251x_hw_wake(struct spi_device *spi)
+ 	int ret;
+ 
+ 	/* Force wakeup interrupt to wake device, but don't execute IST */
+-	disable_irq(spi->irq);
++	disable_irq_nosync(spi->irq);
+ 	mcp251x_write_2regs(spi, CANINTE, CANINTE_WAKIE, CANINTF_WAKIF);
+ 
+ 	/* Wait for oscillator startup timer after wake up */
+diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
+index 8a21902212e04..7c2780ccf9d6f 100644
+--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
++++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
+@@ -35,7 +35,7 @@
+ #define VSC73XX_BLOCK_ANALYZER	0x2 /* Only subblock 0 */
+ #define VSC73XX_BLOCK_MII	0x3 /* Subblocks 0 and 1 */
+ #define VSC73XX_BLOCK_MEMINIT	0x3 /* Only subblock 2 */
+-#define VSC73XX_BLOCK_CAPTURE	0x4 /* Only subblock 2 */
++#define VSC73XX_BLOCK_CAPTURE	0x4 /* Subblocks 0-4, 6, 7 */
+ #define VSC73XX_BLOCK_ARBITER	0x5 /* Only subblock 0 */
+ #define VSC73XX_BLOCK_SYSTEM	0x7 /* Only subblock 0 */
+ 
+@@ -371,13 +371,19 @@ int vsc73xx_is_addr_valid(u8 block, u8 subblock)
+ 		break;
+ 
+ 	case VSC73XX_BLOCK_MII:
+-	case VSC73XX_BLOCK_CAPTURE:
+ 	case VSC73XX_BLOCK_ARBITER:
+ 		switch (subblock) {
+ 		case 0 ... 1:
+ 			return 1;
+ 		}
+ 		break;
++	case VSC73XX_BLOCK_CAPTURE:
++		switch (subblock) {
++		case 0 ... 4:
++		case 6 ... 7:
++			return 1;
++		}
++		break;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+index cb7c028b1bf5a..90bd5583ac347 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+@@ -908,14 +908,18 @@ static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
+ 	}
+ }
+ 
+-static void dpaa_fq_setup(struct dpaa_priv *priv,
+-			  const struct dpaa_fq_cbs *fq_cbs,
+-			  struct fman_port *tx_port)
++static int dpaa_fq_setup(struct dpaa_priv *priv,
++			 const struct dpaa_fq_cbs *fq_cbs,
++			 struct fman_port *tx_port)
+ {
+ 	int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu;
+ 	const cpumask_t *affine_cpus = qman_affine_cpus();
+-	u16 channels[NR_CPUS];
+ 	struct dpaa_fq *fq;
++	u16 *channels;
++
++	channels = kcalloc(num_possible_cpus(), sizeof(u16), GFP_KERNEL);
++	if (!channels)
++		return -ENOMEM;
+ 
+ 	for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
+ 		channels[num_portals++] = qman_affine_channel(cpu);
+@@ -974,6 +978,10 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
+ 				break;
+ 		}
+ 	}
++
++	kfree(channels);
++
++	return 0;
+ }
+ 
+ static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
+@@ -3015,7 +3023,9 @@ static int dpaa_eth_probe(struct platform_device *pdev)
+ 	 */
+ 	dpaa_eth_add_channel(priv->channel, &pdev->dev);
+ 
+-	dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
++	err = dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
++	if (err)
++		goto free_dpaa_bps;
+ 
+ 	/* Create a congestion group for this netdev, with
+ 	 * dynamically-allocated CGR ID.
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+index 2f9075429c43e..d8cb0b99684ad 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+@@ -537,12 +537,16 @@ static int dpaa_set_coalesce(struct net_device *dev,
+ 			     struct ethtool_coalesce *c)
+ {
+ 	const cpumask_t *cpus = qman_affine_cpus();
+-	bool needs_revert[NR_CPUS] = {false};
+ 	struct qman_portal *portal;
+ 	u32 period, prev_period;
+ 	u8 thresh, prev_thresh;
++	bool *needs_revert;
+ 	int cpu, res;
+ 
++	needs_revert = kcalloc(num_possible_cpus(), sizeof(bool), GFP_KERNEL);
++	if (!needs_revert)
++		return -ENOMEM;
++
+ 	period = c->rx_coalesce_usecs;
+ 	thresh = c->rx_max_coalesced_frames;
+ 
+@@ -565,6 +569,8 @@ static int dpaa_set_coalesce(struct net_device *dev,
+ 		needs_revert[cpu] = true;
+ 	}
+ 
++	kfree(needs_revert);
++
+ 	return 0;
+ 
+ revert_values:
+@@ -578,6 +584,8 @@ static int dpaa_set_coalesce(struct net_device *dev,
+ 		qman_dqrr_set_ithresh(portal, prev_thresh);
+ 	}
+ 
++	kfree(needs_revert);
++
+ 	return res;
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 0848613c3f45a..e2c38e5232dc2 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -6805,10 +6805,20 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
+ 
+ static void igb_tsync_interrupt(struct igb_adapter *adapter)
+ {
++	const u32 mask = (TSINTR_SYS_WRAP | E1000_TSICR_TXTS |
++			  TSINTR_TT0 | TSINTR_TT1 |
++			  TSINTR_AUTT0 | TSINTR_AUTT1);
+ 	struct e1000_hw *hw = &adapter->hw;
+ 	u32 tsicr = rd32(E1000_TSICR);
+ 	struct ptp_clock_event event;
+ 
++	if (hw->mac.type == e1000_82580) {
++		/* 82580 has a hardware bug that requires an explicit
++		 * write to clear the TimeSync interrupt cause.
++		 */
++		wr32(E1000_TSICR, tsicr & mask);
++	}
++
+ 	if (tsicr & TSINTR_SYS_WRAP) {
+ 		event.type = PTP_CLOCK_PPS;
+ 		if (adapter->ptp_caps.pps)
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 631ce793fb2ec..65cf7035b02d5 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -5740,6 +5740,7 @@ static void igc_io_resume(struct pci_dev *pdev)
+ 	rtnl_lock();
+ 	if (netif_running(netdev)) {
+ 		if (igc_open(netdev)) {
++			rtnl_unlock();
+ 			netdev_err(netdev, "igc_open failed after reset\n");
+ 			return;
+ 		}
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index 324ef6990e9a7..f0c48f20d086d 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -210,7 +210,7 @@ static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
+ 		name = dev_name(dev);
+ 
+ 	snprintf(intr->name, sizeof(intr->name),
+-		 "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
++		 "%.5s-%.16s-%.8s", IONIC_DRV_NAME, name, q->name);
+ 
+ 	return devm_request_irq(dev, intr->vector, ionic_isr,
+ 				0, intr->name, &qcq->napi);
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index af35361a3dcee..08b479f04ed06 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -528,18 +528,15 @@ static struct sk_buff *geneve_gro_receive(struct sock *sk,
+ 
+ 	type = gh->proto_type;
+ 
+-	rcu_read_lock();
+ 	ptype = gro_find_receive_by_type(type);
+ 	if (!ptype)
+-		goto out_unlock;
++		goto out;
+ 
+ 	skb_gro_pull(skb, gh_len);
+ 	skb_gro_postpull_rcsum(skb, gh, gh_len);
+ 	pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
+ 	flush = 0;
+ 
+-out_unlock:
+-	rcu_read_unlock();
+ out:
+ 	skb_gro_flush_final(skb, pp, flush);
+ 
+@@ -559,13 +556,10 @@ static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
+ 	gh_len = geneve_hlen(gh);
+ 	type = gh->proto_type;
+ 
+-	rcu_read_lock();
+ 	ptype = gro_find_complete_by_type(type);
+ 	if (ptype)
+ 		err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
+ 
+-	rcu_read_unlock();
+-
+ 	skb_set_inner_mac_header(skb, nhoff + gh_len);
+ 
+ 	return err;
+diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
+index d7f3b70d54775..f69d9b902da04 100644
+--- a/drivers/net/usb/ch9200.c
++++ b/drivers/net/usb/ch9200.c
+@@ -336,6 +336,7 @@ static int ch9200_bind(struct usbnet *dev, struct usb_interface *intf)
+ {
+ 	int retval = 0;
+ 	unsigned char data[2];
++	u8 addr[ETH_ALEN];
+ 
+ 	retval = usbnet_get_endpoints(dev, intf);
+ 	if (retval)
+@@ -383,7 +384,8 @@ static int ch9200_bind(struct usbnet *dev, struct usb_interface *intf)
+ 	retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_CTRL, data, 0x02,
+ 			       CONTROL_TIMEOUT_MS);
+ 
+-	retval = get_mac_address(dev, dev->net->dev_addr);
++	retval = get_mac_address(dev, addr);
++	eth_hw_addr_set(dev->net, addr);
+ 
+ 	return retval;
+ }
+diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
+index c4568a491dc4d..79a47e2fd4378 100644
+--- a/drivers/net/usb/cx82310_eth.c
++++ b/drivers/net/usb/cx82310_eth.c
+@@ -146,6 +146,7 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
+ 	u8 link[3];
+ 	int timeout = 50;
+ 	struct cx82310_priv *priv;
++	u8 addr[ETH_ALEN];
+ 
+ 	/* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */
+ 	if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0
+@@ -202,12 +203,12 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
+ 		goto err;
+ 
+ 	/* get the MAC address */
+-	ret = cx82310_cmd(dev, CMD_GET_MAC_ADDR, true, NULL, 0,
+-			  dev->net->dev_addr, ETH_ALEN);
++	ret = cx82310_cmd(dev, CMD_GET_MAC_ADDR, true, NULL, 0, addr, ETH_ALEN);
+ 	if (ret) {
+ 		netdev_err(dev->net, "unable to read MAC address: %d\n", ret);
+ 		goto err;
+ 	}
++	eth_hw_addr_set(dev->net, addr);
+ 
+ 	/* start (does not seem to have any effect?) */
+ 	ret = cx82310_cmd(dev, CMD_START, false, NULL, 0, NULL, 0);
+diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
+index 06d9f19ca142a..4485388dcff2e 100644
+--- a/drivers/net/usb/ipheth.c
++++ b/drivers/net/usb/ipheth.c
+@@ -353,8 +353,8 @@ static int ipheth_close(struct net_device *net)
+ {
+ 	struct ipheth_device *dev = netdev_priv(net);
+ 
+-	cancel_delayed_work_sync(&dev->carrier_work);
+ 	netif_stop_queue(net);
++	cancel_delayed_work_sync(&dev->carrier_work);
+ 	return 0;
+ }
+ 
+@@ -443,7 +443,7 @@ static int ipheth_probe(struct usb_interface *intf,
+ 
+ 	netdev->netdev_ops = &ipheth_netdev_ops;
+ 	netdev->watchdog_timeo = IPHETH_TX_TIMEOUT;
+-	strcpy(netdev->name, "eth%d");
++	strscpy(netdev->name, "eth%d", sizeof(netdev->name));
+ 
+ 	dev = netdev_priv(netdev);
+ 	dev->udev = udev;
+diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
+index 144c686b43330..9b2bc1993ece2 100644
+--- a/drivers/net/usb/kaweth.c
++++ b/drivers/net/usb/kaweth.c
+@@ -1044,8 +1044,7 @@ static int kaweth_probe(
+ 		goto err_all_but_rxbuf;
+ 
+ 	memcpy(netdev->broadcast, &bcast_addr, sizeof(bcast_addr));
+-	memcpy(netdev->dev_addr, &kaweth->configuration.hw_addr,
+-               sizeof(kaweth->configuration.hw_addr));
++	eth_hw_addr_set(netdev, (u8 *)&kaweth->configuration.hw_addr);
+ 
+ 	netdev->netdev_ops = &kaweth_netdev_ops;
+ 	netdev->watchdog_timeo = KAWETH_TX_TIMEOUT;
+diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
+index 7e40e2e2f3723..57281296ba2ca 100644
+--- a/drivers/net/usb/mcs7830.c
++++ b/drivers/net/usb/mcs7830.c
+@@ -480,17 +480,19 @@ static const struct net_device_ops mcs7830_netdev_ops = {
+ static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev)
+ {
+ 	struct net_device *net = dev->net;
++	u8 addr[ETH_ALEN];
+ 	int ret;
+ 	int retry;
+ 
+ 	/* Initial startup: Gather MAC address setting from EEPROM */
+ 	ret = -EINVAL;
+ 	for (retry = 0; retry < 5 && ret; retry++)
+-		ret = mcs7830_hif_get_mac_address(dev, net->dev_addr);
++		ret = mcs7830_hif_get_mac_address(dev, addr);
+ 	if (ret) {
+ 		dev_warn(&dev->udev->dev, "Cannot read MAC address\n");
+ 		goto out;
+ 	}
++	eth_hw_addr_set(net, addr);
+ 
+ 	mcs7830_data_set_multicast(net);
+ 
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 47cc54a64b56d..0a1ab8c30a003 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1365,6 +1365,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x2692, 0x9025, 4)},    /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
+ 	{QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)},	/* u-blox LARA-L6 */
+ 	{QMI_QUIRK_SET_DTR(0x33f8, 0x0104, 4)}, /* Rolling RW101 RMNET */
++	{QMI_FIXED_INTF(0x2dee, 0x4d22, 5)},    /* MeiG Smart SRM825L */
+ 
+ 	/* 4. Gobi 1000 devices */
+ 	{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},	/* Acer Gobi Modem Device */
+diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
+index 0abd257b634c6..777f672f288cb 100644
+--- a/drivers/net/usb/sierra_net.c
++++ b/drivers/net/usb/sierra_net.c
+@@ -669,6 +669,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
+ 		0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
+ 	static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = {
+ 		0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00};
++	u8 mod[2];
+ 
+ 	dev_dbg(&dev->udev->dev, "%s", __func__);
+ 
+@@ -698,8 +699,9 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
+ 	dev->net->netdev_ops = &sierra_net_device_ops;
+ 
+ 	/* change MAC addr to include, ifacenum, and to be unique */
+-	dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
+-	dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
++	mod[0] = atomic_inc_return(&iface_counter);
++	mod[1] = ifacenum;
++	dev_addr_mod(dev->net, ETH_ALEN - 2, mod, 2);
+ 
+ 	/* prepare shutdown message template */
+ 	memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
+diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
+index 8d2e3daf03cf2..1ec11a08820d4 100644
+--- a/drivers/net/usb/sr9700.c
++++ b/drivers/net/usb/sr9700.c
+@@ -326,6 +326,7 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
+ {
+ 	struct net_device *netdev;
+ 	struct mii_if_info *mii;
++	u8 addr[ETH_ALEN];
+ 	int ret;
+ 
+ 	ret = usbnet_get_endpoints(dev, intf);
+@@ -356,11 +357,12 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
+ 	 * EEPROM automatically to PAR. In case there is no EEPROM externally,
+ 	 * a default MAC address is stored in PAR for making chip work properly.
+ 	 */
+-	if (sr_read(dev, SR_PAR, ETH_ALEN, netdev->dev_addr) < 0) {
++	if (sr_read(dev, SR_PAR, ETH_ALEN, addr) < 0) {
+ 		netdev_err(netdev, "Error reading MAC address\n");
+ 		ret = -ENODEV;
+ 		goto out;
+ 	}
++	eth_hw_addr_set(netdev, addr);
+ 
+ 	/* power up and reset phy */
+ 	sr_write_reg(dev, SR_PRR, PRR_PHY_RST);
+diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
+index a5332e99102a5..351e0edcda2af 100644
+--- a/drivers/net/usb/sr9800.c
++++ b/drivers/net/usb/sr9800.c
+@@ -731,6 +731,7 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
+ 	struct sr_data *data = (struct sr_data *)&dev->data;
+ 	u16 led01_mux, led23_mux;
+ 	int ret, embd_phy;
++	u8 addr[ETH_ALEN];
+ 	u32 phyid;
+ 	u16 rx_ctl;
+ 
+@@ -756,12 +757,12 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
+ 	}
+ 
+ 	/* Get the MAC address */
+-	ret = sr_read_cmd(dev, SR_CMD_READ_NODE_ID, 0, 0, ETH_ALEN,
+-			  dev->net->dev_addr);
++	ret = sr_read_cmd(dev, SR_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, addr);
+ 	if (ret < 0) {
+ 		netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
+ 		return ret;
+ 	}
++	eth_hw_addr_set(dev->net, addr);
+ 	netdev_dbg(dev->net, "mac addr : %pM\n", dev->net->dev_addr);
+ 
+ 	/* Initialize MII structure */
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 481a41d879b53..669cd20cfe00a 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -64,9 +64,6 @@
+ 
+ /*-------------------------------------------------------------------------*/
+ 
+-// randomly generated ethernet address
+-static u8	node_id [ETH_ALEN];
+-
+ /* use ethtool to change the level for any given device */
+ static int msg_level = -1;
+ module_param (msg_level, int, 0);
+@@ -148,12 +145,13 @@ EXPORT_SYMBOL_GPL(usbnet_get_endpoints);
+ 
+ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
+ {
++	u8		addr[ETH_ALEN];
+ 	int 		tmp = -1, ret;
+ 	unsigned char	buf [13];
+ 
+ 	ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf);
+ 	if (ret == 12)
+-		tmp = hex2bin(dev->net->dev_addr, buf, 6);
++		tmp = hex2bin(addr, buf, 6);
+ 	if (tmp < 0) {
+ 		dev_dbg(&dev->udev->dev,
+ 			"bad MAC string %d fetch, %d\n", iMACAddress, tmp);
+@@ -161,6 +159,7 @@ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
+ 			ret = -EINVAL;
+ 		return ret;
+ 	}
++	eth_hw_addr_set(dev->net, addr);
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
+@@ -1693,8 +1692,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ 	dev->interrupt_count = 0;
+ 
+ 	dev->net = net;
+-	strcpy (net->name, "usb%d");
+-	memcpy (net->dev_addr, node_id, sizeof node_id);
++	strscpy(net->name, "usb%d", sizeof(net->name));
+ 
+ 	/* rx and tx sides can use different message sizes;
+ 	 * bind() should set rx_urb_size in that case.
+@@ -1720,13 +1718,13 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ 		if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
+ 		    ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 ||
+ 		     (net->dev_addr [0] & 0x02) == 0))
+-			strcpy (net->name, "eth%d");
++			strscpy(net->name, "eth%d", sizeof(net->name));
+ 		/* WLAN devices should always be named "wlan%d" */
+ 		if ((dev->driver_info->flags & FLAG_WLAN) != 0)
+-			strcpy(net->name, "wlan%d");
++			strscpy(net->name, "wlan%d", sizeof(net->name));
+ 		/* WWAN devices should always be named "wwan%d" */
+ 		if ((dev->driver_info->flags & FLAG_WWAN) != 0)
+-			strcpy(net->name, "wwan%d");
++			strscpy(net->name, "wwan%d", sizeof(net->name));
+ 
+ 		/* devices that cannot do ARP */
+ 		if ((dev->driver_info->flags & FLAG_NOARP) != 0)
+@@ -1768,9 +1766,9 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ 		goto out4;
+ 	}
+ 
+-	/* let userspace know we have a random address */
+-	if (ether_addr_equal(net->dev_addr, node_id))
+-		net->addr_assign_type = NET_ADDR_RANDOM;
++	/* this flags the device for user space */
++	if (!is_valid_ether_addr(net->dev_addr))
++		eth_hw_addr_random(net);
+ 
+ 	if ((dev->driver_info->flags & FLAG_WLAN) != 0)
+ 		SET_NETDEV_DEVTYPE(net, &wlan_type);
+@@ -2180,7 +2178,6 @@ static int __init usbnet_init(void)
+ 	BUILD_BUG_ON(
+ 		sizeof_field(struct sk_buff, cb) < sizeof(struct skb_data));
+ 
+-	eth_random_addr(node_id);
+ 	return 0;
+ }
+ module_init(usbnet_init);
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index f7ed99561c192..99dea89b26788 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1497,7 +1497,7 @@ static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
+ 		return false;
+ }
+ 
+-static void virtnet_poll_cleantx(struct receive_queue *rq)
++static void virtnet_poll_cleantx(struct receive_queue *rq, int budget)
+ {
+ 	struct virtnet_info *vi = rq->vq->vdev->priv;
+ 	unsigned int index = vq2rxq(rq->vq);
+@@ -1508,7 +1508,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
+ 		return;
+ 
+ 	if (__netif_tx_trylock(txq)) {
+-		free_old_xmit_skbs(sq, true);
++		free_old_xmit_skbs(sq, !!budget);
+ 		__netif_tx_unlock(txq);
+ 	}
+ 
+@@ -1525,7 +1525,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
+ 	unsigned int received;
+ 	unsigned int xdp_xmit = 0;
+ 
+-	virtnet_poll_cleantx(rq);
++	virtnet_poll_cleantx(rq, budget);
+ 
+ 	received = virtnet_receive(rq, budget, &xdp_xmit);
+ 
+@@ -1598,7 +1598,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
+ 	txq = netdev_get_tx_queue(vi->dev, index);
+ 	__netif_tx_lock(txq, raw_smp_processor_id());
+ 	virtqueue_disable_cb(sq->vq);
+-	free_old_xmit_skbs(sq, true);
++	free_old_xmit_skbs(sq, !!budget);
+ 
+ 	opaque = virtqueue_enable_cb_prepare(sq->vq);
+ 
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+index fb76b4a69a059..ad3893d450583 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+@@ -1089,6 +1089,7 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
+ 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ 	ieee80211_hw_set(hw, SIGNAL_DBM);
+ 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
++	ieee80211_hw_set(hw, MFP_CAPABLE);
+ 
+ 	hw->extra_tx_headroom = brcms_c_get_header_len();
+ 	hw->queues = N_TX_QUEUES;
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+index 24d6ed3513ce5..c09a736f87e68 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+@@ -275,8 +275,7 @@ static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf,
+ 		.data = { NULL, },
+ 	};
+ 
+-	if (fwrt->ops && fwrt->ops->fw_running &&
+-	    !fwrt->ops->fw_running(fwrt->ops_ctx))
++	if (!iwl_trans_fw_running(fwrt->trans))
+ 		return -EIO;
+ 
+ 	if (count < header_size + 1 || count > 1024 * 4)
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+index cddcb4d9a264c..79ab8ef78f67a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+@@ -72,7 +72,6 @@
+ struct iwl_fw_runtime_ops {
+ 	int (*dump_start)(void *ctx);
+ 	void (*dump_end)(void *ctx);
+-	bool (*fw_running)(void *ctx);
+ 	int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd);
+ 	bool (*d3_debug_enable)(void *ctx);
+ };
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+index 3548eb57f1f30..9b1a1455a7d51 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+@@ -577,11 +577,6 @@ static void iwl_mvm_fwrt_dump_end(void *ctx)
+ 	mutex_unlock(&mvm->mutex);
+ }
+ 
+-static bool iwl_mvm_fwrt_fw_running(void *ctx)
+-{
+-	return iwl_mvm_firmware_running(ctx);
+-}
+-
+ static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
+ {
+ 	struct iwl_mvm *mvm = (struct iwl_mvm *)ctx;
+@@ -602,7 +597,6 @@ static bool iwl_mvm_d3_debug_enable(void *ctx)
+ static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
+ 	.dump_start = iwl_mvm_fwrt_dump_start,
+ 	.dump_end = iwl_mvm_fwrt_dump_end,
+-	.fw_running = iwl_mvm_fwrt_fw_running,
+ 	.send_hcmd = iwl_mvm_fwrt_send_hcmd,
+ 	.d3_debug_enable = iwl_mvm_d3_debug_enable,
+ };
+diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
+index f4e3dce10d654..5b14fe08811e8 100644
+--- a/drivers/net/wireless/marvell/mwifiex/main.h
++++ b/drivers/net/wireless/marvell/mwifiex/main.h
+@@ -1310,6 +1310,9 @@ mwifiex_get_priv_by_id(struct mwifiex_adapter *adapter,
+ 
+ 	for (i = 0; i < adapter->priv_num; i++) {
+ 		if (adapter->priv[i]) {
++			if (adapter->priv[i]->bss_mode == NL80211_IFTYPE_UNSPECIFIED)
++				continue;
++
+ 			if ((adapter->priv[i]->bss_num == bss_num) &&
+ 			    (adapter->priv[i]->bss_type == bss_type))
+ 				break;
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index e493fc709065a..5655f6d81cc09 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -1787,8 +1787,10 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
+ 	}
+ 
+ 	queue->nr_cmds = sq->size * 2;
+-	if (nvmet_tcp_alloc_cmds(queue))
++	if (nvmet_tcp_alloc_cmds(queue)) {
++		queue->nr_cmds = 0;
+ 		return NVME_SC_INTERNAL;
++	}
+ 	return 0;
+ }
+ 
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index 1505c745154e7..45a10c15186be 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -962,13 +962,13 @@ void nvmem_device_put(struct nvmem_device *nvmem)
+ EXPORT_SYMBOL_GPL(nvmem_device_put);
+ 
+ /**
+- * devm_nvmem_device_get() - Get nvmem cell of device form a given id
++ * devm_nvmem_device_get() - Get nvmem device of device form a given id
+  *
+  * @dev: Device that requests the nvmem device.
+  * @id: name id for the requested nvmem device.
+  *
+- * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
+- * on success.  The nvmem_cell will be freed by the automatically once the
++ * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
++ * on success.  The nvmem_device will be freed by the automatically once the
+  * device is freed.
+  */
+ struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
+diff --git a/drivers/of/irq.c b/drivers/of/irq.c
+index 352e14b007e78..ad0cb49e233ac 100644
+--- a/drivers/of/irq.c
++++ b/drivers/of/irq.c
+@@ -288,7 +288,8 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
+ 	struct device_node *p;
+ 	const __be32 *addr;
+ 	u32 intsize;
+-	int i, res;
++	int i, res, addr_len;
++	__be32 addr_buf[3] = { 0 };
+ 
+ 	pr_debug("of_irq_parse_one: dev=%pOF, index=%d\n", device, index);
+ 
+@@ -297,13 +298,19 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
+ 		return of_irq_parse_oldworld(device, index, out_irq);
+ 
+ 	/* Get the reg property (if any) */
+-	addr = of_get_property(device, "reg", NULL);
++	addr = of_get_property(device, "reg", &addr_len);
++
++	/* Prevent out-of-bounds read in case of longer interrupt parent address size */
++	if (addr_len > (3 * sizeof(__be32)))
++		addr_len = 3 * sizeof(__be32);
++	if (addr)
++		memcpy(addr_buf, addr, addr_len);
+ 
+ 	/* Try the new-style interrupts-extended first */
+ 	res = of_parse_phandle_with_args(device, "interrupts-extended",
+ 					"#interrupt-cells", index, out_irq);
+ 	if (!res)
+-		return of_irq_parse_raw(addr, out_irq);
++		return of_irq_parse_raw(addr_buf, out_irq);
+ 
+ 	/* Look for the interrupt parent. */
+ 	p = of_irq_find_parent(device);
+@@ -333,7 +340,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
+ 
+ 
+ 	/* Check if there are any interrupt-map translations to process */
+-	res = of_irq_parse_raw(addr, out_irq);
++	res = of_irq_parse_raw(addr_buf, out_irq);
+  out:
+ 	of_node_put(p);
+ 	return res;
+diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
+index d3c3ca3ef4bae..0b49bdf149a69 100644
+--- a/drivers/pci/controller/dwc/pci-keystone.c
++++ b/drivers/pci/controller/dwc/pci-keystone.c
+@@ -35,6 +35,11 @@
+ #define PCIE_DEVICEID_SHIFT	16
+ 
+ /* Application registers */
++#define PID				0x000
++#define RTL				GENMASK(15, 11)
++#define RTL_SHIFT			11
++#define AM6_PCI_PG1_RTL_VER		0x15
++
+ #define CMD_STATUS			0x004
+ #define LTSSM_EN_VAL		        BIT(0)
+ #define OB_XLAT_EN_VAL		        BIT(1)
+@@ -105,6 +110,8 @@
+ 
+ #define to_keystone_pcie(x)		dev_get_drvdata((x)->dev)
+ 
++#define PCI_DEVICE_ID_TI_AM654X		0xb00c
++
+ struct ks_pcie_of_data {
+ 	enum dw_pcie_device_mode mode;
+ 	const struct dw_pcie_host_ops *host_ops;
+@@ -537,7 +544,11 @@ static int ks_pcie_start_link(struct dw_pcie *pci)
+ static void ks_pcie_quirk(struct pci_dev *dev)
+ {
+ 	struct pci_bus *bus = dev->bus;
++	struct keystone_pcie *ks_pcie;
++	struct device *bridge_dev;
+ 	struct pci_dev *bridge;
++	u32 val;
++
+ 	static const struct pci_device_id rc_pci_devids[] = {
+ 		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
+ 		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+@@ -549,6 +560,11 @@ static void ks_pcie_quirk(struct pci_dev *dev)
+ 		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ 		{ 0, },
+ 	};
++	static const struct pci_device_id am6_pci_devids[] = {
++		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654X),
++		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
++		{ 0, },
++	};
+ 
+ 	if (pci_is_root_bus(bus))
+ 		bridge = dev;
+@@ -570,10 +586,36 @@ static void ks_pcie_quirk(struct pci_dev *dev)
+ 	 */
+ 	if (pci_match_id(rc_pci_devids, bridge)) {
+ 		if (pcie_get_readrq(dev) > 256) {
+-			dev_info(&dev->dev, "limiting MRRS to 256\n");
++			dev_info(&dev->dev, "limiting MRRS to 256 bytes\n");
+ 			pcie_set_readrq(dev, 256);
+ 		}
+ 	}
++
++	/*
++	 * Memory transactions fail with PCI controller in AM654 PG1.0
++	 * when MRRS is set to more than 128 bytes. Force the MRRS to
++	 * 128 bytes in all downstream devices.
++	 */
++	if (pci_match_id(am6_pci_devids, bridge)) {
++		bridge_dev = pci_get_host_bridge_device(dev);
++		if (!bridge_dev && !bridge_dev->parent)
++			return;
++
++		ks_pcie = dev_get_drvdata(bridge_dev->parent);
++		if (!ks_pcie)
++			return;
++
++		val = ks_pcie_app_readl(ks_pcie, PID);
++		val &= RTL;
++		val >>= RTL_SHIFT;
++		if (val != AM6_PCI_PG1_RTL_VER)
++			return;
++
++		if (pcie_get_readrq(dev) > 128) {
++			dev_info(&dev->dev, "limiting MRRS to 128 bytes\n");
++			pcie_set_readrq(dev, 128);
++		}
++	}
+ }
+ DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
+ 
+diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
+index f973fbca90cf7..ac772fb11aa73 100644
+--- a/drivers/pci/controller/dwc/pcie-al.c
++++ b/drivers/pci/controller/dwc/pcie-al.c
+@@ -250,18 +250,24 @@ static struct pci_ops al_child_pci_ops = {
+ 	.write = pci_generic_config_write,
+ };
+ 
+-static void al_pcie_config_prepare(struct al_pcie *pcie)
++static int al_pcie_config_prepare(struct al_pcie *pcie)
+ {
+ 	struct al_pcie_target_bus_cfg *target_bus_cfg;
+ 	struct pcie_port *pp = &pcie->pci->pp;
+ 	unsigned int ecam_bus_mask;
++	struct resource_entry *ft;
+ 	u32 cfg_control_offset;
++	struct resource *bus;
+ 	u8 subordinate_bus;
+ 	u8 secondary_bus;
+ 	u32 cfg_control;
+ 	u32 reg;
+-	struct resource *bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
+ 
++	ft = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
++	if (!ft)
++		return -ENODEV;
++
++	bus = ft->res;
+ 	target_bus_cfg = &pcie->target_bus_cfg;
+ 
+ 	ecam_bus_mask = (pcie->ecam_size >> 20) - 1;
+@@ -295,6 +301,8 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)
+ 	       FIELD_PREP(CFG_CONTROL_SEC_BUS_MASK, secondary_bus);
+ 
+ 	al_pcie_controller_writel(pcie, cfg_control_offset, reg);
++
++	return 0;
+ }
+ 
+ static int al_pcie_host_init(struct pcie_port *pp)
+@@ -313,7 +321,9 @@ static int al_pcie_host_init(struct pcie_port *pp)
+ 	if (rc)
+ 		return rc;
+ 
+-	al_pcie_config_prepare(pcie);
++	rc = al_pcie_config_prepare(pcie);
++	if (rc)
++		return rc;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
+index 04565162a4495..cf9c0e75f0be4 100644
+--- a/drivers/pci/hotplug/pnv_php.c
++++ b/drivers/pci/hotplug/pnv_php.c
+@@ -38,7 +38,6 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
+ 				bool disable_device)
+ {
+ 	struct pci_dev *pdev = php_slot->pdev;
+-	int irq = php_slot->irq;
+ 	u16 ctrl;
+ 
+ 	if (php_slot->irq > 0) {
+@@ -57,7 +56,7 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
+ 		php_slot->wq = NULL;
+ 	}
+ 
+-	if (disable_device || irq > 0) {
++	if (disable_device) {
+ 		if (pdev->msix_enabled)
+ 			pci_disable_msix(pdev);
+ 		else if (pdev->msi_enabled)
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 09d5fa637b984..800df0f1417d8 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -5260,10 +5260,12 @@ static void pci_bus_lock(struct pci_bus *bus)
+ {
+ 	struct pci_dev *dev;
+ 
++	pci_dev_lock(bus->self);
+ 	list_for_each_entry(dev, &bus->devices, bus_list) {
+-		pci_dev_lock(dev);
+ 		if (dev->subordinate)
+ 			pci_bus_lock(dev->subordinate);
++		else
++			pci_dev_lock(dev);
+ 	}
+ }
+ 
+@@ -5275,8 +5277,10 @@ static void pci_bus_unlock(struct pci_bus *bus)
+ 	list_for_each_entry(dev, &bus->devices, bus_list) {
+ 		if (dev->subordinate)
+ 			pci_bus_unlock(dev->subordinate);
+-		pci_dev_unlock(dev);
++		else
++			pci_dev_unlock(dev);
+ 	}
++	pci_dev_unlock(bus->self);
+ }
+ 
+ /* Return 1 on successful lock, 0 on contention */
+@@ -5284,15 +5288,15 @@ static int pci_bus_trylock(struct pci_bus *bus)
+ {
+ 	struct pci_dev *dev;
+ 
++	if (!pci_dev_trylock(bus->self))
++		return 0;
++
+ 	list_for_each_entry(dev, &bus->devices, bus_list) {
+-		if (!pci_dev_trylock(dev))
+-			goto unlock;
+ 		if (dev->subordinate) {
+-			if (!pci_bus_trylock(dev->subordinate)) {
+-				pci_dev_unlock(dev);
++			if (!pci_bus_trylock(dev->subordinate))
+ 				goto unlock;
+-			}
+-		}
++		} else if (!pci_dev_trylock(dev))
++			goto unlock;
+ 	}
+ 	return 1;
+ 
+@@ -5300,8 +5304,10 @@ static int pci_bus_trylock(struct pci_bus *bus)
+ 	list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
+ 		if (dev->subordinate)
+ 			pci_bus_unlock(dev->subordinate);
+-		pci_dev_unlock(dev);
++		else
++			pci_dev_unlock(dev);
+ 	}
++	pci_dev_unlock(bus->self);
+ 	return 0;
+ }
+ 
+@@ -5333,9 +5339,10 @@ static void pci_slot_lock(struct pci_slot *slot)
+ 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ 		if (!dev->slot || dev->slot != slot)
+ 			continue;
+-		pci_dev_lock(dev);
+ 		if (dev->subordinate)
+ 			pci_bus_lock(dev->subordinate);
++		else
++			pci_dev_lock(dev);
+ 	}
+ }
+ 
+@@ -5361,14 +5368,13 @@ static int pci_slot_trylock(struct pci_slot *slot)
+ 	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ 		if (!dev->slot || dev->slot != slot)
+ 			continue;
+-		if (!pci_dev_trylock(dev))
+-			goto unlock;
+ 		if (dev->subordinate) {
+ 			if (!pci_bus_trylock(dev->subordinate)) {
+ 				pci_dev_unlock(dev);
+ 				goto unlock;
+ 			}
+-		}
++		} else if (!pci_dev_trylock(dev))
++			goto unlock;
+ 	}
+ 	return 1;
+ 
+@@ -5379,7 +5385,8 @@ static int pci_slot_trylock(struct pci_slot *slot)
+ 			continue;
+ 		if (dev->subordinate)
+ 			pci_bus_unlock(dev->subordinate);
+-		pci_dev_unlock(dev);
++		else
++			pci_dev_unlock(dev);
+ 	}
+ 	return 0;
+ }
+diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
+index 84bfc0e85d6b9..f15b72c6e57ed 100644
+--- a/drivers/pcmcia/yenta_socket.c
++++ b/drivers/pcmcia/yenta_socket.c
+@@ -636,11 +636,11 @@ static int yenta_search_one_res(struct resource *root, struct resource *res,
+ 		start = PCIBIOS_MIN_CARDBUS_IO;
+ 		end = ~0U;
+ 	} else {
+-		unsigned long avail = root->end - root->start;
++		unsigned long avail = resource_size(root);
+ 		int i;
+ 		size = BRIDGE_MEM_MAX;
+-		if (size > avail/8) {
+-			size = (avail+1)/8;
++		if (size > (avail - 1) / 8) {
++			size = avail / 8;
+ 			/* round size down to next power of 2 */
+ 			i = 0;
+ 			while ((size /= 2) != 0)
+diff --git a/drivers/platform/x86/dell-smbios-base.c b/drivers/platform/x86/dell-smbios-base.c
+index 3a1dbf1994413..98e77cb210b70 100644
+--- a/drivers/platform/x86/dell-smbios-base.c
++++ b/drivers/platform/x86/dell-smbios-base.c
+@@ -610,7 +610,10 @@ static int __init dell_smbios_init(void)
+ 	return 0;
+ 
+ fail_sysfs:
+-	free_group(platform_device);
++	if (!wmi)
++		exit_dell_smbios_wmi();
++	if (!smm)
++		exit_dell_smbios_smm();
+ 
+ fail_create_group:
+ 	platform_device_del(platform_device);
+diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
+index 262c3590e64e3..fa0a7056dea40 100644
+--- a/drivers/staging/iio/frequency/ad9834.c
++++ b/drivers/staging/iio/frequency/ad9834.c
+@@ -115,7 +115,7 @@ static int ad9834_write_frequency(struct ad9834_state *st,
+ 
+ 	clk_freq = clk_get_rate(st->mclk);
+ 
+-	if (fout > (clk_freq / 2))
++	if (!clk_freq || fout > (clk_freq / 2))
+ 		return -EINVAL;
+ 
+ 	regval = ad9834_calc_freqreg(clk_freq, fout);
+diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
+index c31febe90d4ea..3343cac607379 100644
+--- a/drivers/uio/uio_hv_generic.c
++++ b/drivers/uio/uio_hv_generic.c
+@@ -104,10 +104,11 @@ static void hv_uio_channel_cb(void *context)
+ 
+ /*
+  * Callback from vmbus_event when channel is rescinded.
++ * It is meant for rescind of primary channels only.
+  */
+ static void hv_uio_rescind(struct vmbus_channel *channel)
+ {
+-	struct hv_device *hv_dev = channel->primary_channel->device_obj;
++	struct hv_device *hv_dev = channel->device_obj;
+ 	struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
+ 
+ 	/*
+@@ -118,6 +119,14 @@ static void hv_uio_rescind(struct vmbus_channel *channel)
+ 
+ 	/* Wake up reader */
+ 	uio_event_notify(&pdata->info);
++
++	/*
++	 * With rescind callback registered, rescind path will not unregister the device
++	 * from vmbus when the primary channel is rescinded.
++	 * Without it, rescind handling is incomplete and next onoffer msg does not come.
++	 * Unregister the device from vmbus here.
++	 */
++	vmbus_device_unregister(channel->device_obj);
+ }
+ 
+ /* Sysfs API to allow mmap of the ring buffers
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index ff6f41e7e0683..ea1680c4cc065 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -424,6 +424,7 @@ static void uas_data_cmplt(struct urb *urb)
+ 			uas_log_cmd_state(cmnd, "data cmplt err", status);
+ 		/* error: no data transfered */
+ 		scsi_set_resid(cmnd, sdb->length);
++		set_host_byte(cmnd, DID_ERROR);
+ 	} else {
+ 		scsi_set_resid(cmnd, sdb->length - urb->actual_length);
+ 	}
+diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
+index 41e1a64da82e8..f75b1e2c05fec 100644
+--- a/drivers/usb/typec/ucsi/ucsi.h
++++ b/drivers/usb/typec/ucsi/ucsi.h
+@@ -365,7 +365,7 @@ ucsi_register_displayport(struct ucsi_connector *con,
+ 			  bool override, int offset,
+ 			  struct typec_altmode_desc *desc)
+ {
+-	return NULL;
++	return typec_port_register_altmode(con->port, desc);
+ }
+ 
+ static inline void
+diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
+index 5dd41e8215e0f..bb34d647cf138 100644
+--- a/drivers/usb/usbip/stub_rx.c
++++ b/drivers/usb/usbip/stub_rx.c
+@@ -144,53 +144,62 @@ static int tweak_set_configuration_cmd(struct urb *urb)
+ 	if (err && err != -ENODEV)
+ 		dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
+ 			config, err);
+-	return 0;
++	return err;
+ }
+ 
+ static int tweak_reset_device_cmd(struct urb *urb)
+ {
+ 	struct stub_priv *priv = (struct stub_priv *) urb->context;
+ 	struct stub_device *sdev = priv->sdev;
++	int err;
+ 
+ 	dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
+ 
+-	if (usb_lock_device_for_reset(sdev->udev, NULL) < 0) {
++	err = usb_lock_device_for_reset(sdev->udev, NULL);
++	if (err < 0) {
+ 		dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
+-		return 0;
++		return err;
+ 	}
+-	usb_reset_device(sdev->udev);
++	err = usb_reset_device(sdev->udev);
+ 	usb_unlock_device(sdev->udev);
+ 
+-	return 0;
++	return err;
+ }
+ 
+ /*
+  * clear_halt, set_interface, and set_configuration require special tricks.
++ * Returns 1 if request was tweaked, 0 otherwise.
+  */
+-static void tweak_special_requests(struct urb *urb)
++static int tweak_special_requests(struct urb *urb)
+ {
++	int err;
++
+ 	if (!urb || !urb->setup_packet)
+-		return;
++		return 0;
+ 
+ 	if (usb_pipetype(urb->pipe) != PIPE_CONTROL)
+-		return;
++		return 0;
+ 
+ 	if (is_clear_halt_cmd(urb))
+ 		/* tweak clear_halt */
+-		 tweak_clear_halt_cmd(urb);
++		err = tweak_clear_halt_cmd(urb);
+ 
+ 	else if (is_set_interface_cmd(urb))
+ 		/* tweak set_interface */
+-		tweak_set_interface_cmd(urb);
++		err = tweak_set_interface_cmd(urb);
+ 
+ 	else if (is_set_configuration_cmd(urb))
+ 		/* tweak set_configuration */
+-		tweak_set_configuration_cmd(urb);
++		err = tweak_set_configuration_cmd(urb);
+ 
+ 	else if (is_reset_device_cmd(urb))
+-		tweak_reset_device_cmd(urb);
+-	else
++		err = tweak_reset_device_cmd(urb);
++	else {
+ 		usbip_dbg_stub_rx("no need to tweak\n");
++		return 0;
++	}
++
++	return !err;
+ }
+ 
+ /*
+@@ -468,6 +477,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
+ 	int support_sg = 1;
+ 	int np = 0;
+ 	int ret, i;
++	int is_tweaked;
+ 
+ 	if (pipe == -1)
+ 		return;
+@@ -580,8 +590,11 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
+ 		priv->urbs[i]->pipe = pipe;
+ 		priv->urbs[i]->complete = stub_complete;
+ 
+-		/* no need to submit an intercepted request, but harmless? */
+-		tweak_special_requests(priv->urbs[i]);
++		/*
++		 * all URBs belong to a single PDU, so a global is_tweaked flag is
++		 * enough
++		 */
++		is_tweaked = tweak_special_requests(priv->urbs[i]);
+ 
+ 		masking_bogus_flags(priv->urbs[i]);
+ 	}
+@@ -594,22 +607,32 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
+ 
+ 	/* urb is now ready to submit */
+ 	for (i = 0; i < priv->num_urbs; i++) {
+-		ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL);
++		if (!is_tweaked) {
++			ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL);
+ 
+-		if (ret == 0)
+-			usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
+-					pdu->base.seqnum);
+-		else {
+-			dev_err(&udev->dev, "submit_urb error, %d\n", ret);
+-			usbip_dump_header(pdu);
+-			usbip_dump_urb(priv->urbs[i]);
++			if (ret == 0)
++				usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
++						pdu->base.seqnum);
++			else {
++				dev_err(&udev->dev, "submit_urb error, %d\n", ret);
++				usbip_dump_header(pdu);
++				usbip_dump_urb(priv->urbs[i]);
+ 
++				/*
++				 * Pessimistic.
++				 * This connection will be discarded.
++				 */
++				usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
++				break;
++			}
++		} else {
+ 			/*
+-			 * Pessimistic.
+-			 * This connection will be discarded.
++			 * An identical URB was already submitted in
++			 * tweak_special_requests(). Skip submitting this URB to not
++			 * duplicate the request.
+ 			 */
+-			usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
+-			break;
++			priv->urbs[i]->status = 0;
++			stub_complete(priv->urbs[i]);
+ 		}
+ 	}
+ 
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 3ba43a40032cd..afa1eccd5e2d4 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -4806,7 +4806,15 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
+ 		/* We don't care about errors in readahead. */
+ 		if (ret < 0)
+ 			continue;
+-		BUG_ON(refs == 0);
++
++		/*
++		 * This could be racey, it's conceivable that we raced and end
++		 * up with a bogus refs count, if that's the case just skip, if
++		 * we are actually corrupt we will notice when we look up
++		 * everything again with our locks.
++		 */
++		if (refs == 0)
++			continue;
+ 
+ 		if (wc->stage == DROP_REFERENCE) {
+ 			if (refs == 1)
+@@ -4865,7 +4873,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
+ 	if (lookup_info &&
+ 	    ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
+ 	     (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
+-		BUG_ON(!path->locks[level]);
++		ASSERT(path->locks[level]);
+ 		ret = btrfs_lookup_extent_info(trans, fs_info,
+ 					       eb->start, level, 1,
+ 					       &wc->refs[level],
+@@ -4873,7 +4881,11 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
+ 		BUG_ON(ret == -ENOMEM);
+ 		if (ret)
+ 			return ret;
+-		BUG_ON(wc->refs[level] == 0);
++		if (unlikely(wc->refs[level] == 0)) {
++			btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
++				  eb->start);
++			return -EUCLEAN;
++		}
+ 	}
+ 
+ 	if (wc->stage == DROP_REFERENCE) {
+@@ -4889,7 +4901,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
+ 
+ 	/* wc->stage == UPDATE_BACKREF */
+ 	if (!(wc->flags[level] & flag)) {
+-		BUG_ON(!path->locks[level]);
++		ASSERT(path->locks[level]);
+ 		ret = btrfs_inc_ref(trans, root, eb, 1);
+ 		BUG_ON(ret); /* -ENOMEM */
+ 		ret = btrfs_dec_ref(trans, root, eb, 0);
+@@ -5006,8 +5018,9 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
+ 		goto out_unlock;
+ 
+ 	if (unlikely(wc->refs[level - 1] == 0)) {
+-		btrfs_err(fs_info, "Missing references.");
+-		ret = -EIO;
++		btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
++			  bytenr);
++		ret = -EUCLEAN;
+ 		goto out_unlock;
+ 	}
+ 	*lookup_info = 0;
+@@ -5209,7 +5222,12 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
+ 				path->locks[level] = 0;
+ 				return ret;
+ 			}
+-			BUG_ON(wc->refs[level] == 0);
++			if (unlikely(wc->refs[level] == 0)) {
++				btrfs_tree_unlock_rw(eb, path->locks[level]);
++				btrfs_err(fs_info, "bytenr %llu has 0 references, expect > 0",
++					  eb->start);
++				return -EUCLEAN;
++			}
+ 			if (wc->refs[level] == 1) {
+ 				btrfs_tree_unlock_rw(eb, path->locks[level]);
+ 				path->locks[level] = 0;
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 4bf28f74605fd..cd3156a9a268d 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -5527,7 +5527,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
+ 	struct inode *inode;
+ 	struct btrfs_root *root = BTRFS_I(dir)->root;
+ 	struct btrfs_root *sub_root = root;
+-	struct btrfs_key location;
++	struct btrfs_key location = { 0 };
+ 	u8 di_type = 0;
+ 	int ret = 0;
+ 
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index ab8ed187746ea..24c4d059cfabb 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -853,10 +853,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
+ 		goto fail;
+ 	}
+ 
+-	spin_lock(&fs_info->trans_lock);
+-	list_add(&pending_snapshot->list,
+-		 &trans->transaction->pending_snapshots);
+-	spin_unlock(&fs_info->trans_lock);
++	trans->pending_snapshot = pending_snapshot;
+ 
+ 	ret = btrfs_commit_transaction(trans);
+ 	if (ret)
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 8cefe11c57dbc..8878aa7cbdc57 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -2075,6 +2075,27 @@ static inline void btrfs_wait_delalloc_flush(struct btrfs_trans_handle *trans)
+ 	}
+ }
+ 
++/*
++ * Add a pending snapshot associated with the given transaction handle to the
++ * respective handle. This must be called after the transaction commit started
++ * and while holding fs_info->trans_lock.
++ * This serves to guarantee a caller of btrfs_commit_transaction() that it can
++ * safely free the pending snapshot pointer in case btrfs_commit_transaction()
++ * returns an error.
++ */
++static void add_pending_snapshot(struct btrfs_trans_handle *trans)
++{
++	struct btrfs_transaction *cur_trans = trans->transaction;
++
++	if (!trans->pending_snapshot)
++		return;
++
++	lockdep_assert_held(&trans->fs_info->trans_lock);
++	ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_START);
++
++	list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots);
++}
++
+ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
+ {
+ 	struct btrfs_fs_info *fs_info = trans->fs_info;
+@@ -2161,6 +2182,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
+ 
+ 	spin_lock(&fs_info->trans_lock);
+ 	if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
++		add_pending_snapshot(trans);
++
+ 		spin_unlock(&fs_info->trans_lock);
+ 		refcount_inc(&cur_trans->use_count);
+ 		ret = btrfs_end_transaction(trans);
+@@ -2243,6 +2266,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
+ 	 * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
+ 	 */
+ 	spin_lock(&fs_info->trans_lock);
++	add_pending_snapshot(trans);
+ 	cur_trans->state = TRANS_STATE_COMMIT_DOING;
+ 	spin_unlock(&fs_info->trans_lock);
+ 	wait_event(cur_trans->writer_wait,
+diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
+index f73654d93fa03..eb26eb068fe8d 100644
+--- a/fs/btrfs/transaction.h
++++ b/fs/btrfs/transaction.h
+@@ -122,6 +122,8 @@ struct btrfs_trans_handle {
+ 	struct btrfs_transaction *transaction;
+ 	struct btrfs_block_rsv *block_rsv;
+ 	struct btrfs_block_rsv *orig_rsv;
++	/* Set by a task that wants to create a snapshot. */
++	struct btrfs_pending_snapshot *pending_snapshot;
+ 	refcount_t use_count;
+ 	unsigned int type;
+ 	/*
+diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
+index a94cc7b22d7ea..1a371eb4470eb 100644
+--- a/fs/ext4/page-io.c
++++ b/fs/ext4/page-io.c
+@@ -493,6 +493,13 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
+ 			/* A hole? We can safely clear the dirty bit */
+ 			if (!buffer_mapped(bh))
+ 				clear_buffer_dirty(bh);
++			/*
++			 * Keeping dirty some buffer we cannot write? Make
++			 * sure to redirty the page. This happens e.g. when
++			 * doing writeout for transaction commit.
++			 */
++			if (buffer_dirty(bh) && !PageDirty(page))
++				redirty_page_for_writepage(wbc, page);
+ 			if (io->io_bio)
+ 				ext4_io_submit(io);
+ 			continue;
+@@ -500,6 +507,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
+ 		if (buffer_new(bh))
+ 			clear_buffer_new(bh);
+ 		set_buffer_async_write(bh);
++		clear_buffer_dirty(bh);
+ 		nr_to_submit++;
+ 	} while ((bh = bh->b_this_page) != head);
+ 
+@@ -542,7 +550,10 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
+ 			printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
+ 			redirty_page_for_writepage(wbc, page);
+ 			do {
+-				clear_buffer_async_write(bh);
++				if (buffer_async_write(bh)) {
++					clear_buffer_async_write(bh);
++					set_buffer_dirty(bh);
++				}
+ 				bh = bh->b_this_page;
+ 			} while (bh != head);
+ 			goto unlock;
+@@ -555,7 +566,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
+ 			continue;
+ 		io_submit_add_bh(io, inode, page, bounce_page, bh);
+ 		nr_submitted++;
+-		clear_buffer_dirty(bh);
+ 	} while ((bh = bh->b_this_page) != head);
+ 
+ unlock:
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 13d97547eaf6c..fd7263ed25b92 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -1692,10 +1692,16 @@ __acquires(fi->lock)
+ 	fuse_writepage_finish(fm, wpa);
+ 	spin_unlock(&fi->lock);
+ 
+-	/* After fuse_writepage_finish() aux request list is private */
++	/* After rb_erase() aux request list is private */
+ 	for (aux = wpa->next; aux; aux = next) {
++		struct backing_dev_info *bdi = inode_to_bdi(aux->inode);
++
+ 		next = aux->next;
+ 		aux->next = NULL;
++
++		dec_wb_stat(&bdi->wb, WB_WRITEBACK);
++		dec_node_page_state(aux->ia.ap.pages[0], NR_WRITEBACK_TEMP);
++		wb_writeout_inc(&bdi->wb);
+ 		fuse_writepage_free(aux);
+ 	}
+ 
+diff --git a/fs/fuse/xattr.c b/fs/fuse/xattr.c
+index cdea18de94f7e..314e460ce679d 100644
+--- a/fs/fuse/xattr.c
++++ b/fs/fuse/xattr.c
+@@ -79,7 +79,7 @@ ssize_t fuse_getxattr(struct inode *inode, const char *name, void *value,
+ 	}
+ 	ret = fuse_simple_request(fm, &args);
+ 	if (!ret && !size)
+-		ret = min_t(ssize_t, outarg.size, XATTR_SIZE_MAX);
++		ret = min_t(size_t, outarg.size, XATTR_SIZE_MAX);
+ 	if (ret == -ENOSYS) {
+ 		fm->fc->no_getxattr = 1;
+ 		ret = -EOPNOTSUPP;
+@@ -141,7 +141,7 @@ ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
+ 	}
+ 	ret = fuse_simple_request(fm, &args);
+ 	if (!ret && !size)
+-		ret = min_t(ssize_t, outarg.size, XATTR_LIST_MAX);
++		ret = min_t(size_t, outarg.size, XATTR_LIST_MAX);
+ 	if (ret > 0 && size)
+ 		ret = fuse_verify_xattr_list(list, ret);
+ 	if (ret == -ENOSYS) {
+diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
+index 5579e67da17db..c33f78513f00f 100644
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -759,8 +759,6 @@ static const struct svc_version *nlmsvc_version[] = {
+ #endif
+ };
+ 
+-static struct svc_stat		nlmsvc_stats;
+-
+ #define NLM_NRVERS	ARRAY_SIZE(nlmsvc_version)
+ static struct svc_program	nlmsvc_program = {
+ 	.pg_prog		= NLM_PROGRAM,		/* program number */
+@@ -768,7 +766,6 @@ static struct svc_program	nlmsvc_program = {
+ 	.pg_vers		= nlmsvc_version,	/* version table */
+ 	.pg_name		= "lockd",		/* service name */
+ 	.pg_class		= "nfsd",		/* share authentication with nfsd */
+-	.pg_stats		= &nlmsvc_stats,	/* stats table */
+ 	.pg_authenticate	= &lockd_authenticate,	/* export authentication */
+ 	.pg_init_request	= svc_generic_init_request,
+ 	.pg_rpcbind_set		= svc_generic_rpcbind_set,
+diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
+index 8fe143cad4a2b..f00fff3633f60 100644
+--- a/fs/nfs/callback.c
++++ b/fs/nfs/callback.c
+@@ -407,15 +407,12 @@ static const struct svc_version *nfs4_callback_version[] = {
+ 	[4] = &nfs4_callback_version4,
+ };
+ 
+-static struct svc_stat nfs4_callback_stats;
+-
+ static struct svc_program nfs4_callback_program = {
+ 	.pg_prog = NFS4_CALLBACK,			/* RPC service number */
+ 	.pg_nvers = ARRAY_SIZE(nfs4_callback_version),	/* Number of entries */
+ 	.pg_vers = nfs4_callback_version,		/* version table */
+ 	.pg_name = "NFSv4 callback",			/* service name */
+ 	.pg_class = "nfs",				/* authentication class */
+-	.pg_stats = &nfs4_callback_stats,
+ 	.pg_authenticate = nfs_callback_authenticate,
+ 	.pg_init_request = svc_generic_init_request,
+ 	.pg_rpcbind_set	= svc_generic_rpcbind_set,
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index 1ffce90760606..2d2238548a6e5 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -47,6 +47,7 @@
+ #include <linux/vfs.h>
+ #include <linux/inet.h>
+ #include <linux/in6.h>
++#include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <net/ipv6.h>
+ #include <linux/netdevice.h>
+@@ -219,6 +220,7 @@ static int __nfs_list_for_each_server(struct list_head *head,
+ 		ret = fn(server, data);
+ 		if (ret)
+ 			goto out;
++		cond_resched();
+ 		rcu_read_lock();
+ 	}
+ 	rcu_read_unlock();
+diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
+index 7c863f2c21e0c..617a5b6ae6c38 100644
+--- a/fs/nfsd/export.c
++++ b/fs/nfsd/export.c
+@@ -339,12 +339,16 @@ static int export_stats_init(struct export_stats *stats)
+ 
+ static void export_stats_reset(struct export_stats *stats)
+ {
+-	nfsd_percpu_counters_reset(stats->counter, EXP_STATS_COUNTERS_NUM);
++	if (stats)
++		nfsd_percpu_counters_reset(stats->counter,
++					   EXP_STATS_COUNTERS_NUM);
+ }
+ 
+ static void export_stats_destroy(struct export_stats *stats)
+ {
+-	nfsd_percpu_counters_destroy(stats->counter, EXP_STATS_COUNTERS_NUM);
++	if (stats)
++		nfsd_percpu_counters_destroy(stats->counter,
++					     EXP_STATS_COUNTERS_NUM);
+ }
+ 
+ static void svc_export_put(struct kref *ref)
+@@ -353,7 +357,8 @@ static void svc_export_put(struct kref *ref)
+ 	path_put(&exp->ex_path);
+ 	auth_domain_put(exp->ex_client);
+ 	nfsd4_fslocs_free(&exp->ex_fslocs);
+-	export_stats_destroy(&exp->ex_stats);
++	export_stats_destroy(exp->ex_stats);
++	kfree(exp->ex_stats);
+ 	kfree(exp->ex_uuid);
+ 	kfree_rcu(exp, ex_rcu);
+ }
+@@ -738,13 +743,15 @@ static int svc_export_show(struct seq_file *m,
+ 	seq_putc(m, '\t');
+ 	seq_escape(m, exp->ex_client->name, " \t\n\\");
+ 	if (export_stats) {
+-		seq_printf(m, "\t%lld\n", exp->ex_stats.start_time);
++		struct percpu_counter *counter = exp->ex_stats->counter;
++
++		seq_printf(m, "\t%lld\n", exp->ex_stats->start_time);
+ 		seq_printf(m, "\tfh_stale: %lld\n",
+-			   percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_FH_STALE]));
++			   percpu_counter_sum_positive(&counter[EXP_STATS_FH_STALE]));
+ 		seq_printf(m, "\tio_read: %lld\n",
+-			   percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_IO_READ]));
++			   percpu_counter_sum_positive(&counter[EXP_STATS_IO_READ]));
+ 		seq_printf(m, "\tio_write: %lld\n",
+-			   percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_IO_WRITE]));
++			   percpu_counter_sum_positive(&counter[EXP_STATS_IO_WRITE]));
+ 		seq_putc(m, '\n');
+ 		return 0;
+ 	}
+@@ -790,7 +797,7 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
+ 	new->ex_layout_types = 0;
+ 	new->ex_uuid = NULL;
+ 	new->cd = item->cd;
+-	export_stats_reset(&new->ex_stats);
++	export_stats_reset(new->ex_stats);
+ }
+ 
+ static void export_update(struct cache_head *cnew, struct cache_head *citem)
+@@ -826,7 +833,14 @@ static struct cache_head *svc_export_alloc(void)
+ 	if (!i)
+ 		return NULL;
+ 
+-	if (export_stats_init(&i->ex_stats)) {
++	i->ex_stats = kmalloc(sizeof(*(i->ex_stats)), GFP_KERNEL);
++	if (!i->ex_stats) {
++		kfree(i);
++		return NULL;
++	}
++
++	if (export_stats_init(i->ex_stats)) {
++		kfree(i->ex_stats);
+ 		kfree(i);
+ 		return NULL;
+ 	}
+diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
+index d03f7f6a8642d..f73e23bb24a1e 100644
+--- a/fs/nfsd/export.h
++++ b/fs/nfsd/export.h
+@@ -64,10 +64,10 @@ struct svc_export {
+ 	struct cache_head	h;
+ 	struct auth_domain *	ex_client;
+ 	int			ex_flags;
++	int			ex_fsid;
+ 	struct path		ex_path;
+ 	kuid_t			ex_anon_uid;
+ 	kgid_t			ex_anon_gid;
+-	int			ex_fsid;
+ 	unsigned char *		ex_uuid; /* 16 byte fsid */
+ 	struct nfsd4_fs_locations ex_fslocs;
+ 	uint32_t		ex_nflavors;
+@@ -76,7 +76,7 @@ struct svc_export {
+ 	struct nfsd4_deviceid_map *ex_devid_map;
+ 	struct cache_detail	*cd;
+ 	struct rcu_head		ex_rcu;
+-	struct export_stats	ex_stats;
++	struct export_stats	*ex_stats;
+ };
+ 
+ /* an "export key" (expkey) maps a filehandlefragement to an
+diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
+index 51a4b7885cae2..548422b24a7d7 100644
+--- a/fs/nfsd/netns.h
++++ b/fs/nfsd/netns.h
+@@ -10,8 +10,10 @@
+ 
+ #include <net/net_namespace.h>
+ #include <net/netns/generic.h>
++#include <linux/nfs4.h>
+ #include <linux/percpu_counter.h>
+ #include <linux/siphash.h>
++#include <linux/sunrpc/stats.h>
+ 
+ /* Hash tables for nfs4_clientid state */
+ #define CLIENT_HASH_BITS                 4
+@@ -25,10 +27,22 @@ struct nfsd4_client_tracking_ops;
+ 
+ enum {
+ 	/* cache misses due only to checksum comparison failures */
+-	NFSD_NET_PAYLOAD_MISSES,
++	NFSD_STATS_PAYLOAD_MISSES,
+ 	/* amount of memory (in bytes) currently consumed by the DRC */
+-	NFSD_NET_DRC_MEM_USAGE,
+-	NFSD_NET_COUNTERS_NUM
++	NFSD_STATS_DRC_MEM_USAGE,
++	NFSD_STATS_RC_HITS,		/* repcache hits */
++	NFSD_STATS_RC_MISSES,		/* repcache misses */
++	NFSD_STATS_RC_NOCACHE,		/* uncached reqs */
++	NFSD_STATS_FH_STALE,		/* FH stale error */
++	NFSD_STATS_IO_READ,		/* bytes returned to read requests */
++	NFSD_STATS_IO_WRITE,		/* bytes passed in write requests */
++#ifdef CONFIG_NFSD_V4
++	NFSD_STATS_FIRST_NFS4_OP,	/* count of individual nfsv4 operations */
++	NFSD_STATS_LAST_NFS4_OP = NFSD_STATS_FIRST_NFS4_OP + LAST_NFS4_OP,
++#define NFSD_STATS_NFS4_OP(op)	(NFSD_STATS_FIRST_NFS4_OP + (op))
++	NFSD_STATS_WDELEG_GETATTR,	/* count of getattr conflict with wdeleg */
++#endif
++	NFSD_STATS_COUNTERS_NUM
+ };
+ 
+ /*
+@@ -168,7 +182,10 @@ struct nfsd_net {
+ 	atomic_t                 num_drc_entries;
+ 
+ 	/* Per-netns stats counters */
+-	struct percpu_counter    counter[NFSD_NET_COUNTERS_NUM];
++	struct percpu_counter    counter[NFSD_STATS_COUNTERS_NUM];
++
++	/* sunrpc svc stats */
++	struct svc_stat          nfsd_svcstats;
+ 
+ 	/* longest hash chain seen */
+ 	unsigned int             longest_chain;
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 2c0de247083a9..f10e70f372855 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -2435,10 +2435,10 @@ nfsd4_proc_null(struct svc_rqst *rqstp)
+ 	return rpc_success;
+ }
+ 
+-static inline void nfsd4_increment_op_stats(u32 opnum)
++static inline void nfsd4_increment_op_stats(struct nfsd_net *nn, u32 opnum)
+ {
+ 	if (opnum >= FIRST_NFS4_OP && opnum <= LAST_NFS4_OP)
+-		percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_NFS4_OP(opnum)]);
++		percpu_counter_inc(&nn->counter[NFSD_STATS_NFS4_OP(opnum)]);
+ }
+ 
+ static const struct nfsd4_operation nfsd4_ops[];
+@@ -2713,7 +2713,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
+ 					   status, nfsd4_op_name(op->opnum));
+ 
+ 		nfsd4_cstate_clear_replay(cstate);
+-		nfsd4_increment_op_stats(op->opnum);
++		nfsd4_increment_op_stats(nn, op->opnum);
+ 	}
+ 
+ 	fh_put(current_fh);
+diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
+index 2b5417e06d80d..448700939dfe9 100644
+--- a/fs/nfsd/nfscache.c
++++ b/fs/nfsd/nfscache.c
+@@ -85,8 +85,8 @@ nfsd_hashsize(unsigned int limit)
+ }
+ 
+ static struct svc_cacherep *
+-nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum,
+-			struct nfsd_net *nn)
++nfsd_cacherep_alloc(struct svc_rqst *rqstp, __wsum csum,
++		    struct nfsd_net *nn)
+ {
+ 	struct svc_cacherep	*rp;
+ 
+@@ -110,21 +110,48 @@ nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum,
+ 	return rp;
+ }
+ 
+-static void
+-nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
+-				struct nfsd_net *nn)
++static void nfsd_cacherep_free(struct svc_cacherep *rp)
+ {
+-	if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
+-		nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len);
++	if (rp->c_type == RC_REPLBUFF)
+ 		kfree(rp->c_replvec.iov_base);
++	kmem_cache_free(drc_slab, rp);
++}
++
++static unsigned long
++nfsd_cacherep_dispose(struct list_head *dispose)
++{
++	struct svc_cacherep *rp;
++	unsigned long freed = 0;
++
++	while (!list_empty(dispose)) {
++		rp = list_first_entry(dispose, struct svc_cacherep, c_lru);
++		list_del(&rp->c_lru);
++		nfsd_cacherep_free(rp);
++		freed++;
+ 	}
++	return freed;
++}
++
++static void
++nfsd_cacherep_unlink_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
++			    struct svc_cacherep *rp)
++{
++	if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base)
++		nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len);
+ 	if (rp->c_state != RC_UNUSED) {
+ 		rb_erase(&rp->c_node, &b->rb_head);
+ 		list_del(&rp->c_lru);
+ 		atomic_dec(&nn->num_drc_entries);
+ 		nfsd_stats_drc_mem_usage_sub(nn, sizeof(*rp));
+ 	}
+-	kmem_cache_free(drc_slab, rp);
++}
++
++static void
++nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
++				struct nfsd_net *nn)
++{
++	nfsd_cacherep_unlink_locked(nn, b, rp);
++	nfsd_cacherep_free(rp);
+ }
+ 
+ static void
+@@ -132,8 +159,9 @@ nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
+ 			struct nfsd_net *nn)
+ {
+ 	spin_lock(&b->cache_lock);
+-	nfsd_reply_cache_free_locked(b, rp, nn);
++	nfsd_cacherep_unlink_locked(nn, b, rp);
+ 	spin_unlock(&b->cache_lock);
++	nfsd_cacherep_free(rp);
+ }
+ 
+ int nfsd_drc_slab_create(void)
+@@ -148,16 +176,6 @@ void nfsd_drc_slab_free(void)
+ 	kmem_cache_destroy(drc_slab);
+ }
+ 
+-static int nfsd_reply_cache_stats_init(struct nfsd_net *nn)
+-{
+-	return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM);
+-}
+-
+-static void nfsd_reply_cache_stats_destroy(struct nfsd_net *nn)
+-{
+-	nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM);
+-}
+-
+ int nfsd_reply_cache_init(struct nfsd_net *nn)
+ {
+ 	unsigned int hashsize;
+@@ -169,16 +187,12 @@ int nfsd_reply_cache_init(struct nfsd_net *nn)
+ 	hashsize = nfsd_hashsize(nn->max_drc_entries);
+ 	nn->maskbits = ilog2(hashsize);
+ 
+-	status = nfsd_reply_cache_stats_init(nn);
+-	if (status)
+-		goto out_nomem;
+-
+ 	nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan;
+ 	nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count;
+ 	nn->nfsd_reply_cache_shrinker.seeks = 1;
+ 	status = register_shrinker(&nn->nfsd_reply_cache_shrinker);
+ 	if (status)
+-		goto out_stats_destroy;
++		return status;
+ 
+ 	nn->drc_hashtbl = kvzalloc(array_size(hashsize,
+ 				sizeof(*nn->drc_hashtbl)), GFP_KERNEL);
+@@ -194,9 +208,6 @@ int nfsd_reply_cache_init(struct nfsd_net *nn)
+ 	return 0;
+ out_shrinker:
+ 	unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
+-out_stats_destroy:
+-	nfsd_reply_cache_stats_destroy(nn);
+-out_nomem:
+ 	printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
+ 	return -ENOMEM;
+ }
+@@ -216,7 +227,6 @@ void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
+ 									rp, nn);
+ 		}
+ 	}
+-	nfsd_reply_cache_stats_destroy(nn);
+ 
+ 	kvfree(nn->drc_hashtbl);
+ 	nn->drc_hashtbl = NULL;
+@@ -243,12 +253,21 @@ nfsd_cache_bucket_find(__be32 xid, struct nfsd_net *nn)
+ 	return &nn->drc_hashtbl[hash];
+ }
+ 
+-static long prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn,
+-			 unsigned int max)
++/*
++ * Remove and return no more than @max expired entries in bucket @b.
++ * If @max is zero, do not limit the number of removed entries.
++ */
++static void
++nfsd_prune_bucket_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
++			 unsigned int max, struct list_head *dispose)
+ {
++	unsigned long expiry = jiffies - RC_EXPIRE;
+ 	struct svc_cacherep *rp, *tmp;
+-	long freed = 0;
++	unsigned int freed = 0;
++
++	lockdep_assert_held(&b->cache_lock);
+ 
++	/* The bucket LRU is ordered oldest-first. */
+ 	list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
+ 		/*
+ 		 * Don't free entries attached to calls that are still
+@@ -256,60 +275,77 @@ static long prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn,
+ 		 */
+ 		if (rp->c_state == RC_INPROG)
+ 			continue;
++
+ 		if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
+-		    time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
++		    time_before(expiry, rp->c_timestamp))
+ 			break;
+-		nfsd_reply_cache_free_locked(b, rp, nn);
+-		if (max && freed++ > max)
++
++		nfsd_cacherep_unlink_locked(nn, b, rp);
++		list_add(&rp->c_lru, dispose);
++
++		if (max && ++freed > max)
+ 			break;
+ 	}
+-	return freed;
+ }
+ 
+-static long nfsd_prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn)
++/**
++ * nfsd_reply_cache_count - count_objects method for the DRC shrinker
++ * @shrink: our registered shrinker context
++ * @sc: garbage collection parameters
++ *
++ * Returns the total number of entries in the duplicate reply cache. To
++ * keep things simple and quick, this is not the number of expired entries
++ * in the cache (ie, the number that would be removed by a call to
++ * nfsd_reply_cache_scan).
++ */
++static unsigned long
++nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
+ {
+-	return prune_bucket(b, nn, 3);
++	struct nfsd_net *nn = container_of(shrink,
++				struct nfsd_net, nfsd_reply_cache_shrinker);
++
++	return atomic_read(&nn->num_drc_entries);
+ }
+ 
+-/*
+- * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
+- * Also prune the oldest ones when the total exceeds the max number of entries.
++/**
++ * nfsd_reply_cache_scan - scan_objects method for the DRC shrinker
++ * @shrink: our registered shrinker context
++ * @sc: garbage collection parameters
++ *
++ * Free expired entries on each bucket's LRU list until we've released
++ * nr_to_scan freed objects. Nothing will be released if the cache
++ * has not exceeded it's max_drc_entries limit.
++ *
++ * Returns the number of entries released by this call.
+  */
+-static long
+-prune_cache_entries(struct nfsd_net *nn)
++static unsigned long
++nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
+ {
++	struct nfsd_net *nn = container_of(shrink,
++				struct nfsd_net, nfsd_reply_cache_shrinker);
++	unsigned long freed = 0;
++	LIST_HEAD(dispose);
+ 	unsigned int i;
+-	long freed = 0;
+ 
+ 	for (i = 0; i < nn->drc_hashsize; i++) {
+ 		struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i];
+ 
+ 		if (list_empty(&b->lru_head))
+ 			continue;
++
+ 		spin_lock(&b->cache_lock);
+-		freed += prune_bucket(b, nn, 0);
++		nfsd_prune_bucket_locked(nn, b, 0, &dispose);
+ 		spin_unlock(&b->cache_lock);
+-	}
+-	return freed;
+-}
+ 
+-static unsigned long
+-nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
+-{
+-	struct nfsd_net *nn = container_of(shrink,
+-				struct nfsd_net, nfsd_reply_cache_shrinker);
++		freed += nfsd_cacherep_dispose(&dispose);
++		if (freed > sc->nr_to_scan)
++			break;
++	}
+ 
+-	return atomic_read(&nn->num_drc_entries);
++	trace_nfsd_drc_gc(nn, freed);
++	return freed;
+ }
+ 
+-static unsigned long
+-nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
+-{
+-	struct nfsd_net *nn = container_of(shrink,
+-				struct nfsd_net, nfsd_reply_cache_shrinker);
+-
+-	return prune_cache_entries(nn);
+-}
+ /*
+  * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
+  */
+@@ -421,16 +457,18 @@ nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key,
+  */
+ int nfsd_cache_lookup(struct svc_rqst *rqstp)
+ {
+-	struct nfsd_net		*nn;
++	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ 	struct svc_cacherep	*rp, *found;
+ 	__wsum			csum;
+ 	struct nfsd_drc_bucket	*b;
+ 	int type = rqstp->rq_cachetype;
++	unsigned long freed;
++	LIST_HEAD(dispose);
+ 	int rtn = RC_DOIT;
+ 
+ 	rqstp->rq_cacherep = NULL;
+ 	if (type == RC_NOCACHE) {
+-		nfsd_stats_rc_nocache_inc();
++		nfsd_stats_rc_nocache_inc(nn);
+ 		goto out;
+ 	}
+ 
+@@ -440,8 +478,7 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp)
+ 	 * Since the common case is a cache miss followed by an insert,
+ 	 * preallocate an entry.
+ 	 */
+-	nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+-	rp = nfsd_reply_cache_alloc(rqstp, csum, nn);
++	rp = nfsd_cacherep_alloc(rqstp, csum, nn);
+ 	if (!rp)
+ 		goto out;
+ 
+@@ -450,25 +487,23 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp)
+ 	found = nfsd_cache_insert(b, rp, nn);
+ 	if (found != rp)
+ 		goto found_entry;
+-
+-	nfsd_stats_rc_misses_inc();
+ 	rqstp->rq_cacherep = rp;
+ 	rp->c_state = RC_INPROG;
++	nfsd_prune_bucket_locked(nn, b, 3, &dispose);
++	spin_unlock(&b->cache_lock);
+ 
++	freed = nfsd_cacherep_dispose(&dispose);
++	trace_nfsd_drc_gc(nn, freed);
++
++	nfsd_stats_rc_misses_inc(nn);
+ 	atomic_inc(&nn->num_drc_entries);
+ 	nfsd_stats_drc_mem_usage_add(nn, sizeof(*rp));
+-
+-	nfsd_prune_bucket(b, nn);
+-
+-out_unlock:
+-	spin_unlock(&b->cache_lock);
+-out:
+-	return rtn;
++	goto out;
+ 
+ found_entry:
+ 	/* We found a matching entry which is either in progress or done. */
+ 	nfsd_reply_cache_free_locked(NULL, rp, nn);
+-	nfsd_stats_rc_hits_inc();
++	nfsd_stats_rc_hits_inc(nn);
+ 	rtn = RC_DROPIT;
+ 	rp = found;
+ 
+@@ -501,7 +536,10 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp)
+ 
+ out_trace:
+ 	trace_nfsd_drc_found(nn, rqstp, rtn);
+-	goto out_unlock;
++out_unlock:
++	spin_unlock(&b->cache_lock);
++out:
++	return rtn;
+ }
+ 
+ /**
+@@ -613,15 +651,15 @@ int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
+ 		   atomic_read(&nn->num_drc_entries));
+ 	seq_printf(m, "hash buckets:          %u\n", 1 << nn->maskbits);
+ 	seq_printf(m, "mem usage:             %lld\n",
+-		   percpu_counter_sum_positive(&nn->counter[NFSD_NET_DRC_MEM_USAGE]));
++		   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_DRC_MEM_USAGE]));
+ 	seq_printf(m, "cache hits:            %lld\n",
+-		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]));
++		   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_HITS]));
+ 	seq_printf(m, "cache misses:          %lld\n",
+-		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]));
++		   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_MISSES]));
+ 	seq_printf(m, "not cached:            %lld\n",
+-		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]));
++		   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_NOCACHE]));
+ 	seq_printf(m, "payload misses:        %lld\n",
+-		   percpu_counter_sum_positive(&nn->counter[NFSD_NET_PAYLOAD_MISSES]));
++		   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_PAYLOAD_MISSES]));
+ 	seq_printf(m, "longest chain len:     %u\n", nn->longest_chain);
+ 	seq_printf(m, "cachesize at longest:  %u\n", nn->longest_chain_cachesize);
+ 	return 0;
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index f77f00c931723..2feaa49fb9fe2 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1458,18 +1458,21 @@ static __net_init int nfsd_init_net(struct net *net)
+ 	retval = nfsd_idmap_init(net);
+ 	if (retval)
+ 		goto out_idmap_error;
++	retval = nfsd_stat_counters_init(nn);
++	if (retval)
++		goto out_repcache_error;
++	memset(&nn->nfsd_svcstats, 0, sizeof(nn->nfsd_svcstats));
++	nn->nfsd_svcstats.program = &nfsd_program;
+ 	nn->nfsd_versions = NULL;
+ 	nn->nfsd4_minorversions = NULL;
+ 	nfsd4_init_leases_net(nn);
+-	retval = nfsd_reply_cache_init(nn);
+-	if (retval)
+-		goto out_cache_error;
+ 	get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key));
+ 	seqlock_init(&nn->writeverf_lock);
++	nfsd_proc_stat_init(net);
+ 
+ 	return 0;
+ 
+-out_cache_error:
++out_repcache_error:
+ 	nfsd_idmap_shutdown(net);
+ out_idmap_error:
+ 	nfsd_export_shutdown(net);
+@@ -1481,10 +1484,11 @@ static __net_exit void nfsd_exit_net(struct net *net)
+ {
+ 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ 
+-	nfsd_reply_cache_shutdown(nn);
++	nfsd_proc_stat_shutdown(net);
++	nfsd_stat_counters_destroy(nn);
+ 	nfsd_idmap_shutdown(net);
+ 	nfsd_export_shutdown(net);
+-	nfsd_netns_free_versions(net_generic(net, nfsd_net_id));
++	nfsd_netns_free_versions(nn);
+ }
+ 
+ static struct pernet_operations nfsd_net_ops = {
+@@ -1504,12 +1508,9 @@ static int __init init_nfsd(void)
+ 	retval = nfsd4_init_pnfs();
+ 	if (retval)
+ 		goto out_free_slabs;
+-	retval = nfsd_stat_init();	/* Statistics */
+-	if (retval)
+-		goto out_free_pnfs;
+ 	retval = nfsd_drc_slab_create();
+ 	if (retval)
+-		goto out_free_stat;
++		goto out_free_pnfs;
+ 	nfsd_lockd_init();	/* lockd->nfsd callbacks */
+ 	retval = create_proc_exports_entry();
+ 	if (retval)
+@@ -1539,8 +1540,6 @@ static int __init init_nfsd(void)
+ out_free_lockd:
+ 	nfsd_lockd_shutdown();
+ 	nfsd_drc_slab_free();
+-out_free_stat:
+-	nfsd_stat_shutdown();
+ out_free_pnfs:
+ 	nfsd4_exit_pnfs();
+ out_free_slabs:
+@@ -1557,7 +1556,6 @@ static void __exit exit_nfsd(void)
+ 	nfsd_drc_slab_free();
+ 	remove_proc_entry("fs/nfs/exports", NULL);
+ 	remove_proc_entry("fs/nfs", NULL);
+-	nfsd_stat_shutdown();
+ 	nfsd_lockd_shutdown();
+ 	nfsd4_free_slabs();
+ 	nfsd4_exit_pnfs();
+diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
+index 013bfa24ced21..996f3f62335b2 100644
+--- a/fs/nfsd/nfsd.h
++++ b/fs/nfsd/nfsd.h
+@@ -69,6 +69,7 @@ extern struct mutex		nfsd_mutex;
+ extern spinlock_t		nfsd_drc_lock;
+ extern unsigned long		nfsd_drc_max_mem;
+ extern unsigned long		nfsd_drc_mem_used;
++extern atomic_t			nfsd_th_cnt;		/* number of available threads */
+ 
+ extern const struct seq_operations nfs_exports_op;
+ 
+diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
+index ae3323e0708dd..44e9a9dd28688 100644
+--- a/fs/nfsd/nfsfh.c
++++ b/fs/nfsd/nfsfh.c
+@@ -326,6 +326,7 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
+ __be32
+ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
+ {
++	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ 	struct svc_export *exp = NULL;
+ 	struct dentry	*dentry;
+ 	__be32		error;
+@@ -399,7 +400,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
+ 	}
+ out:
+ 	if (error == nfserr_stale)
+-		nfsd_stats_fh_stale_inc(exp);
++		nfsd_stats_fh_stale_inc(nn, exp);
+ 	return error;
+ }
+ 
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index 3d4fd40c987bd..29eb9861684e3 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -34,6 +34,7 @@
+ 
+ #define NFSDDBG_FACILITY	NFSDDBG_SVC
+ 
++atomic_t			nfsd_th_cnt = ATOMIC_INIT(0);
+ extern struct svc_program	nfsd_program;
+ static int			nfsd(void *vrqstp);
+ #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
+@@ -89,7 +90,6 @@ unsigned long	nfsd_drc_max_mem;
+ unsigned long	nfsd_drc_mem_used;
+ 
+ #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
+-static struct svc_stat	nfsd_acl_svcstats;
+ static const struct svc_version *nfsd_acl_version[] = {
+ # if defined(CONFIG_NFSD_V2_ACL)
+ 	[2] = &nfsd_acl_version2,
+@@ -108,15 +108,11 @@ static struct svc_program	nfsd_acl_program = {
+ 	.pg_vers		= nfsd_acl_version,
+ 	.pg_name		= "nfsacl",
+ 	.pg_class		= "nfsd",
+-	.pg_stats		= &nfsd_acl_svcstats,
+ 	.pg_authenticate	= &svc_set_client,
+ 	.pg_init_request	= nfsd_acl_init_request,
+ 	.pg_rpcbind_set		= nfsd_acl_rpcbind_set,
+ };
+ 
+-static struct svc_stat	nfsd_acl_svcstats = {
+-	.program	= &nfsd_acl_program,
+-};
+ #endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
+ 
+ static const struct svc_version *nfsd_version[] = {
+@@ -141,7 +137,6 @@ struct svc_program		nfsd_program = {
+ 	.pg_vers		= nfsd_version,		/* version table */
+ 	.pg_name		= "nfsd",		/* program name */
+ 	.pg_class		= "nfsd",		/* authentication class */
+-	.pg_stats		= &nfsd_svcstats,	/* version table */
+ 	.pg_authenticate	= &svc_set_client,	/* export authentication */
+ 	.pg_init_request	= nfsd_init_request,
+ 	.pg_rpcbind_set		= nfsd_rpcbind_set,
+@@ -427,16 +422,23 @@ static int nfsd_startup_net(struct net *net, const struct cred *cred)
+ 	ret = nfsd_file_cache_start_net(net);
+ 	if (ret)
+ 		goto out_lockd;
+-	ret = nfs4_state_start_net(net);
++
++	ret = nfsd_reply_cache_init(nn);
+ 	if (ret)
+ 		goto out_filecache;
+ 
++	ret = nfs4_state_start_net(net);
++	if (ret)
++		goto out_reply_cache;
++
+ #ifdef CONFIG_NFSD_V4_2_INTER_SSC
+ 	nfsd4_ssc_init_umount_work(nn);
+ #endif
+ 	nn->nfsd_net_up = true;
+ 	return 0;
+ 
++out_reply_cache:
++	nfsd_reply_cache_shutdown(nn);
+ out_filecache:
+ 	nfsd_file_cache_shutdown_net(net);
+ out_lockd:
+@@ -454,6 +456,7 @@ static void nfsd_shutdown_net(struct net *net)
+ 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ 
+ 	nfs4_state_shutdown_net(net);
++	nfsd_reply_cache_shutdown(nn);
+ 	nfsd_file_cache_shutdown_net(net);
+ 	if (nn->lockd_up) {
+ 		lockd_down(net);
+@@ -559,7 +562,6 @@ void nfsd_last_thread(struct net *net)
+ 		return;
+ 
+ 	nfsd_shutdown_net(net);
+-	pr_info("nfsd: last server has exited, flushing export cache\n");
+ 	nfsd_export_flush(net);
+ }
+ 
+@@ -662,7 +664,8 @@ int nfsd_create_serv(struct net *net)
+ 	if (nfsd_max_blksize == 0)
+ 		nfsd_max_blksize = nfsd_get_default_max_blksize();
+ 	nfsd_reset_versions(nn);
+-	serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize, nfsd);
++	serv = svc_create_pooled(&nfsd_program, &nn->nfsd_svcstats,
++				 nfsd_max_blksize, nfsd);
+ 	if (serv == NULL)
+ 		return -ENOMEM;
+ 
+@@ -774,7 +777,6 @@ int
+ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
+ {
+ 	int	error;
+-	bool	nfsd_up_before;
+ 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ 	struct svc_serv *serv;
+ 
+@@ -794,8 +796,6 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
+ 	error = nfsd_create_serv(net);
+ 	if (error)
+ 		goto out;
+-
+-	nfsd_up_before = nn->nfsd_net_up;
+ 	serv = nn->nfsd_serv;
+ 
+ 	error = nfsd_startup_net(net, cred);
+@@ -803,17 +803,15 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
+ 		goto out_put;
+ 	error = svc_set_num_threads(serv, NULL, nrservs);
+ 	if (error)
+-		goto out_shutdown;
++		goto out_put;
+ 	error = serv->sv_nrthreads;
+-	if (error == 0)
+-		nfsd_last_thread(net);
+-out_shutdown:
+-	if (error < 0 && !nfsd_up_before)
+-		nfsd_shutdown_net(net);
+ out_put:
+ 	/* Threads now hold service active */
+ 	if (xchg(&nn->keep_active, 0))
+ 		svc_put(serv);
++
++	if (serv->sv_nrthreads == 0)
++		nfsd_last_thread(net);
+ 	svc_put(serv);
+ out:
+ 	mutex_unlock(&nfsd_mutex);
+@@ -938,7 +936,7 @@ nfsd(void *vrqstp)
+ 
+ 	current->fs->umask = 0;
+ 
+-	atomic_inc(&nfsdstats.th_cnt);
++	atomic_inc(&nfsd_th_cnt);
+ 
+ 	set_freezable();
+ 
+@@ -962,7 +960,7 @@ nfsd(void *vrqstp)
+ 		validate_process_creds();
+ 	}
+ 
+-	atomic_dec(&nfsdstats.th_cnt);
++	atomic_dec(&nfsd_th_cnt);
+ 
+ out:
+ 	/* Release the thread */
+diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c
+index 777e24e5da33b..7a58dba0045c3 100644
+--- a/fs/nfsd/stats.c
++++ b/fs/nfsd/stats.c
+@@ -27,25 +27,22 @@
+ 
+ #include "nfsd.h"
+ 
+-struct nfsd_stats	nfsdstats;
+-struct svc_stat		nfsd_svcstats = {
+-	.program	= &nfsd_program,
+-};
+-
+ static int nfsd_show(struct seq_file *seq, void *v)
+ {
++	struct net *net = PDE_DATA(file_inode(seq->file));
++	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ 	int i;
+ 
+ 	seq_printf(seq, "rc %lld %lld %lld\nfh %lld 0 0 0 0\nio %lld %lld\n",
+-		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]),
+-		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]),
+-		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]),
+-		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_FH_STALE]),
+-		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_IO_READ]),
+-		   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_IO_WRITE]));
++		   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_HITS]),
++		   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_MISSES]),
++		   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_NOCACHE]),
++		   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_FH_STALE]),
++		   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_IO_READ]),
++		   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_IO_WRITE]));
+ 
+ 	/* thread usage: */
+-	seq_printf(seq, "th %u 0", atomic_read(&nfsdstats.th_cnt));
++	seq_printf(seq, "th %u 0", atomic_read(&nfsd_th_cnt));
+ 
+ 	/* deprecated thread usage histogram stats */
+ 	for (i = 0; i < 10; i++)
+@@ -55,7 +52,7 @@ static int nfsd_show(struct seq_file *seq, void *v)
+ 	seq_puts(seq, "\nra 0 0 0 0 0 0 0 0 0 0 0 0\n");
+ 
+ 	/* show my rpc info */
+-	svc_seq_show(seq, &nfsd_svcstats);
++	svc_seq_show(seq, &nn->nfsd_svcstats);
+ 
+ #ifdef CONFIG_NFSD_V4
+ 	/* Show count for individual nfsv4 operations */
+@@ -63,7 +60,7 @@ static int nfsd_show(struct seq_file *seq, void *v)
+ 	seq_printf(seq,"proc4ops %u", LAST_NFS4_OP + 1);
+ 	for (i = 0; i <= LAST_NFS4_OP; i++) {
+ 		seq_printf(seq, " %lld",
+-			   percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_NFS4_OP(i)]));
++			   percpu_counter_sum_positive(&nn->counter[NFSD_STATS_NFS4_OP(i)]));
+ 	}
+ 
+ 	seq_putc(seq, '\n');
+@@ -74,7 +71,7 @@ static int nfsd_show(struct seq_file *seq, void *v)
+ 
+ DEFINE_PROC_SHOW_ATTRIBUTE(nfsd);
+ 
+-int nfsd_percpu_counters_init(struct percpu_counter counters[], int num)
++int nfsd_percpu_counters_init(struct percpu_counter *counters, int num)
+ {
+ 	int i, err = 0;
+ 
+@@ -106,31 +103,24 @@ void nfsd_percpu_counters_destroy(struct percpu_counter counters[], int num)
+ 		percpu_counter_destroy(&counters[i]);
+ }
+ 
+-static int nfsd_stat_counters_init(void)
++int nfsd_stat_counters_init(struct nfsd_net *nn)
+ {
+-	return nfsd_percpu_counters_init(nfsdstats.counter, NFSD_STATS_COUNTERS_NUM);
++	return nfsd_percpu_counters_init(nn->counter, NFSD_STATS_COUNTERS_NUM);
+ }
+ 
+-static void nfsd_stat_counters_destroy(void)
++void nfsd_stat_counters_destroy(struct nfsd_net *nn)
+ {
+-	nfsd_percpu_counters_destroy(nfsdstats.counter, NFSD_STATS_COUNTERS_NUM);
++	nfsd_percpu_counters_destroy(nn->counter, NFSD_STATS_COUNTERS_NUM);
+ }
+ 
+-int nfsd_stat_init(void)
++void nfsd_proc_stat_init(struct net *net)
+ {
+-	int err;
+-
+-	err = nfsd_stat_counters_init();
+-	if (err)
+-		return err;
++	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ 
+-	svc_proc_register(&init_net, &nfsd_svcstats, &nfsd_proc_ops);
+-
+-	return 0;
++	svc_proc_register(net, &nn->nfsd_svcstats, &nfsd_proc_ops);
+ }
+ 
+-void nfsd_stat_shutdown(void)
++void nfsd_proc_stat_shutdown(struct net *net)
+ {
+-	nfsd_stat_counters_destroy();
+-	svc_proc_unregister(&init_net, "nfsd");
++	svc_proc_unregister(net, "nfsd");
+ }
+diff --git a/fs/nfsd/stats.h b/fs/nfsd/stats.h
+index 9b43dc3d99913..14525e854cbac 100644
+--- a/fs/nfsd/stats.h
++++ b/fs/nfsd/stats.h
+@@ -10,87 +10,66 @@
+ #include <uapi/linux/nfsd/stats.h>
+ #include <linux/percpu_counter.h>
+ 
+-
+-enum {
+-	NFSD_STATS_RC_HITS,		/* repcache hits */
+-	NFSD_STATS_RC_MISSES,		/* repcache misses */
+-	NFSD_STATS_RC_NOCACHE,		/* uncached reqs */
+-	NFSD_STATS_FH_STALE,		/* FH stale error */
+-	NFSD_STATS_IO_READ,		/* bytes returned to read requests */
+-	NFSD_STATS_IO_WRITE,		/* bytes passed in write requests */
+-#ifdef CONFIG_NFSD_V4
+-	NFSD_STATS_FIRST_NFS4_OP,	/* count of individual nfsv4 operations */
+-	NFSD_STATS_LAST_NFS4_OP = NFSD_STATS_FIRST_NFS4_OP + LAST_NFS4_OP,
+-#define NFSD_STATS_NFS4_OP(op)	(NFSD_STATS_FIRST_NFS4_OP + (op))
+-#endif
+-	NFSD_STATS_COUNTERS_NUM
+-};
+-
+-struct nfsd_stats {
+-	struct percpu_counter	counter[NFSD_STATS_COUNTERS_NUM];
+-
+-	atomic_t	th_cnt;		/* number of available threads */
+-};
+-
+-extern struct nfsd_stats	nfsdstats;
+-
+-extern struct svc_stat		nfsd_svcstats;
+-
+-int nfsd_percpu_counters_init(struct percpu_counter counters[], int num);
+-void nfsd_percpu_counters_reset(struct percpu_counter counters[], int num);
+-void nfsd_percpu_counters_destroy(struct percpu_counter counters[], int num);
+-int nfsd_stat_init(void);
+-void nfsd_stat_shutdown(void);
+-
+-static inline void nfsd_stats_rc_hits_inc(void)
++int nfsd_percpu_counters_init(struct percpu_counter *counters, int num);
++void nfsd_percpu_counters_reset(struct percpu_counter *counters, int num);
++void nfsd_percpu_counters_destroy(struct percpu_counter *counters, int num);
++int nfsd_stat_counters_init(struct nfsd_net *nn);
++void nfsd_stat_counters_destroy(struct nfsd_net *nn);
++void nfsd_proc_stat_init(struct net *net);
++void nfsd_proc_stat_shutdown(struct net *net);
++
++static inline void nfsd_stats_rc_hits_inc(struct nfsd_net *nn)
+ {
+-	percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_HITS]);
++	percpu_counter_inc(&nn->counter[NFSD_STATS_RC_HITS]);
+ }
+ 
+-static inline void nfsd_stats_rc_misses_inc(void)
++static inline void nfsd_stats_rc_misses_inc(struct nfsd_net *nn)
+ {
+-	percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_MISSES]);
++	percpu_counter_inc(&nn->counter[NFSD_STATS_RC_MISSES]);
+ }
+ 
+-static inline void nfsd_stats_rc_nocache_inc(void)
++static inline void nfsd_stats_rc_nocache_inc(struct nfsd_net *nn)
+ {
+-	percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]);
++	percpu_counter_inc(&nn->counter[NFSD_STATS_RC_NOCACHE]);
+ }
+ 
+-static inline void nfsd_stats_fh_stale_inc(struct svc_export *exp)
++static inline void nfsd_stats_fh_stale_inc(struct nfsd_net *nn,
++					   struct svc_export *exp)
+ {
+-	percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_FH_STALE]);
+-	if (exp)
+-		percpu_counter_inc(&exp->ex_stats.counter[EXP_STATS_FH_STALE]);
++	percpu_counter_inc(&nn->counter[NFSD_STATS_FH_STALE]);
++	if (exp && exp->ex_stats)
++		percpu_counter_inc(&exp->ex_stats->counter[EXP_STATS_FH_STALE]);
+ }
+ 
+-static inline void nfsd_stats_io_read_add(struct svc_export *exp, s64 amount)
++static inline void nfsd_stats_io_read_add(struct nfsd_net *nn,
++					  struct svc_export *exp, s64 amount)
+ {
+-	percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_READ], amount);
+-	if (exp)
+-		percpu_counter_add(&exp->ex_stats.counter[EXP_STATS_IO_READ], amount);
++	percpu_counter_add(&nn->counter[NFSD_STATS_IO_READ], amount);
++	if (exp && exp->ex_stats)
++		percpu_counter_add(&exp->ex_stats->counter[EXP_STATS_IO_READ], amount);
+ }
+ 
+-static inline void nfsd_stats_io_write_add(struct svc_export *exp, s64 amount)
++static inline void nfsd_stats_io_write_add(struct nfsd_net *nn,
++					   struct svc_export *exp, s64 amount)
+ {
+-	percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_WRITE], amount);
+-	if (exp)
+-		percpu_counter_add(&exp->ex_stats.counter[EXP_STATS_IO_WRITE], amount);
++	percpu_counter_add(&nn->counter[NFSD_STATS_IO_WRITE], amount);
++	if (exp && exp->ex_stats)
++		percpu_counter_add(&exp->ex_stats->counter[EXP_STATS_IO_WRITE], amount);
+ }
+ 
+ static inline void nfsd_stats_payload_misses_inc(struct nfsd_net *nn)
+ {
+-	percpu_counter_inc(&nn->counter[NFSD_NET_PAYLOAD_MISSES]);
++	percpu_counter_inc(&nn->counter[NFSD_STATS_PAYLOAD_MISSES]);
+ }
+ 
+ static inline void nfsd_stats_drc_mem_usage_add(struct nfsd_net *nn, s64 amount)
+ {
+-	percpu_counter_add(&nn->counter[NFSD_NET_DRC_MEM_USAGE], amount);
++	percpu_counter_add(&nn->counter[NFSD_STATS_DRC_MEM_USAGE], amount);
+ }
+ 
+ static inline void nfsd_stats_drc_mem_usage_sub(struct nfsd_net *nn, s64 amount)
+ {
+-	percpu_counter_sub(&nn->counter[NFSD_NET_DRC_MEM_USAGE], amount);
++	percpu_counter_sub(&nn->counter[NFSD_STATS_DRC_MEM_USAGE], amount);
+ }
+ 
+ #endif /* _NFSD_STATS_H */
+diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
+index 445d00f00eab7..0e6c7ed9da1b4 100644
+--- a/fs/nfsd/trace.h
++++ b/fs/nfsd/trace.h
+@@ -1171,6 +1171,28 @@ TRACE_EVENT(nfsd_drc_mismatch,
+ 		__entry->ingress)
+ );
+ 
++TRACE_EVENT_CONDITION(nfsd_drc_gc,
++	TP_PROTO(
++		const struct nfsd_net *nn,
++		unsigned long freed
++	),
++	TP_ARGS(nn, freed),
++	TP_CONDITION(freed > 0),
++	TP_STRUCT__entry(
++		__field(unsigned long long, boot_time)
++		__field(unsigned long, freed)
++		__field(int, total)
++	),
++	TP_fast_assign(
++		__entry->boot_time = nn->boot_time;
++		__entry->freed = freed;
++		__entry->total = atomic_read(&nn->num_drc_entries);
++	),
++	TP_printk("boot_time=%16llx total=%d freed=%lu",
++		__entry->boot_time, __entry->total, __entry->freed
++	)
++);
++
+ TRACE_EVENT(nfsd_cb_args,
+ 	TP_PROTO(
+ 		const struct nfs4_client *clp,
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 0ea05ddff0d08..dab44f187d013 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1000,7 +1000,9 @@ static __be32 nfsd_finish_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 			       unsigned long *count, u32 *eof, ssize_t host_err)
+ {
+ 	if (host_err >= 0) {
+-		nfsd_stats_io_read_add(fhp->fh_export, host_err);
++		struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
++
++		nfsd_stats_io_read_add(nn, fhp->fh_export, host_err);
+ 		*eof = nfsd_eof_on_read(file, offset, host_err, *count);
+ 		*count = host_err;
+ 		fsnotify_access(file);
+@@ -1143,7 +1145,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
+ 		goto out_nfserr;
+ 	}
+ 	*cnt = host_err;
+-	nfsd_stats_io_write_add(exp, *cnt);
++	nfsd_stats_io_write_add(nn, exp, *cnt);
+ 	fsnotify_modify(file);
+ 	host_err = filemap_check_wb_err(file->f_mapping, since);
+ 	if (host_err < 0)
+diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
+index 188b8cc52e2b6..33c4a97519de8 100644
+--- a/fs/nilfs2/recovery.c
++++ b/fs/nilfs2/recovery.c
+@@ -708,6 +708,33 @@ static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
+ 	brelse(bh);
+ }
+ 
++/**
++ * nilfs_abort_roll_forward - cleaning up after a failed rollforward recovery
++ * @nilfs: nilfs object
++ */
++static void nilfs_abort_roll_forward(struct the_nilfs *nilfs)
++{
++	struct nilfs_inode_info *ii, *n;
++	LIST_HEAD(head);
++
++	/* Abandon inodes that have read recovery data */
++	spin_lock(&nilfs->ns_inode_lock);
++	list_splice_init(&nilfs->ns_dirty_files, &head);
++	spin_unlock(&nilfs->ns_inode_lock);
++	if (list_empty(&head))
++		return;
++
++	set_nilfs_purging(nilfs);
++	list_for_each_entry_safe(ii, n, &head, i_dirty) {
++		spin_lock(&nilfs->ns_inode_lock);
++		list_del_init(&ii->i_dirty);
++		spin_unlock(&nilfs->ns_inode_lock);
++
++		iput(&ii->vfs_inode);
++	}
++	clear_nilfs_purging(nilfs);
++}
++
+ /**
+  * nilfs_salvage_orphan_logs - salvage logs written after the latest checkpoint
+  * @nilfs: nilfs object
+@@ -766,15 +793,19 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
+ 		if (unlikely(err)) {
+ 			nilfs_err(sb, "error %d writing segment for recovery",
+ 				  err);
+-			goto failed;
++			goto put_root;
+ 		}
+ 
+ 		nilfs_finish_roll_forward(nilfs, ri);
+ 	}
+ 
+- failed:
++put_root:
+ 	nilfs_put_root(root);
+ 	return err;
++
++failed:
++	nilfs_abort_roll_forward(nilfs);
++	goto put_root;
+ }
+ 
+ /**
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index d9f92df15a84f..2213011afab70 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -1833,6 +1833,9 @@ static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
+ 	nilfs_abort_logs(&logs, ret ? : err);
+ 
+ 	list_splice_tail_init(&sci->sc_segbufs, &logs);
++	if (list_empty(&logs))
++		return; /* if the first segment buffer preparation failed */
++
+ 	nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
+ 	nilfs_free_incomplete_logs(&logs, nilfs);
+ 
+@@ -2077,7 +2080,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
+ 
+ 		err = nilfs_segctor_begin_construction(sci, nilfs);
+ 		if (unlikely(err))
+-			goto out;
++			goto failed;
+ 
+ 		/* Update time stamp */
+ 		sci->sc_seg_ctime = ktime_get_real_seconds();
+@@ -2140,10 +2143,9 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
+ 	return err;
+ 
+  failed_to_write:
+-	if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
+-		nilfs_redirty_inodes(&sci->sc_dirty_files);
+-
+  failed:
++	if (mode == SC_LSEG_SR && nilfs_sc_cstage_get(sci) >= NILFS_ST_IFILE)
++		nilfs_redirty_inodes(&sci->sc_dirty_files);
+ 	if (nilfs_doing_gc())
+ 		nilfs_redirty_inodes(&sci->sc_gc_inodes);
+ 	nilfs_segctor_abort_construction(sci, nilfs, err);
+diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
+index 57afd06db62de..64ea44be0a646 100644
+--- a/fs/nilfs2/sysfs.c
++++ b/fs/nilfs2/sysfs.c
+@@ -108,7 +108,7 @@ static ssize_t
+ nilfs_snapshot_inodes_count_show(struct nilfs_snapshot_attr *attr,
+ 				 struct nilfs_root *root, char *buf)
+ {
+-	return snprintf(buf, PAGE_SIZE, "%llu\n",
++	return sysfs_emit(buf, "%llu\n",
+ 			(unsigned long long)atomic64_read(&root->inodes_count));
+ }
+ 
+@@ -116,7 +116,7 @@ static ssize_t
+ nilfs_snapshot_blocks_count_show(struct nilfs_snapshot_attr *attr,
+ 				 struct nilfs_root *root, char *buf)
+ {
+-	return snprintf(buf, PAGE_SIZE, "%llu\n",
++	return sysfs_emit(buf, "%llu\n",
+ 			(unsigned long long)atomic64_read(&root->blocks_count));
+ }
+ 
+@@ -129,7 +129,7 @@ static ssize_t
+ nilfs_snapshot_README_show(struct nilfs_snapshot_attr *attr,
+ 			    struct nilfs_root *root, char *buf)
+ {
+-	return snprintf(buf, PAGE_SIZE, snapshot_readme_str);
++	return sysfs_emit(buf, snapshot_readme_str);
+ }
+ 
+ NILFS_SNAPSHOT_RO_ATTR(inodes_count);
+@@ -230,7 +230,7 @@ static ssize_t
+ nilfs_mounted_snapshots_README_show(struct nilfs_mounted_snapshots_attr *attr,
+ 				    struct the_nilfs *nilfs, char *buf)
+ {
+-	return snprintf(buf, PAGE_SIZE, mounted_snapshots_readme_str);
++	return sysfs_emit(buf, mounted_snapshots_readme_str);
+ }
+ 
+ NILFS_MOUNTED_SNAPSHOTS_RO_ATTR(README);
+@@ -268,7 +268,7 @@ nilfs_checkpoints_checkpoints_number_show(struct nilfs_checkpoints_attr *attr,
+ 
+ 	ncheckpoints = cpstat.cs_ncps;
+ 
+-	return snprintf(buf, PAGE_SIZE, "%llu\n", ncheckpoints);
++	return sysfs_emit(buf, "%llu\n", ncheckpoints);
+ }
+ 
+ static ssize_t
+@@ -291,7 +291,7 @@ nilfs_checkpoints_snapshots_number_show(struct nilfs_checkpoints_attr *attr,
+ 
+ 	nsnapshots = cpstat.cs_nsss;
+ 
+-	return snprintf(buf, PAGE_SIZE, "%llu\n", nsnapshots);
++	return sysfs_emit(buf, "%llu\n", nsnapshots);
+ }
+ 
+ static ssize_t
+@@ -305,7 +305,7 @@ nilfs_checkpoints_last_seg_checkpoint_show(struct nilfs_checkpoints_attr *attr,
+ 	last_cno = nilfs->ns_last_cno;
+ 	spin_unlock(&nilfs->ns_last_segment_lock);
+ 
+-	return snprintf(buf, PAGE_SIZE, "%llu\n", last_cno);
++	return sysfs_emit(buf, "%llu\n", last_cno);
+ }
+ 
+ static ssize_t
+@@ -319,7 +319,7 @@ nilfs_checkpoints_next_checkpoint_show(struct nilfs_checkpoints_attr *attr,
+ 	cno = nilfs->ns_cno;
+ 	up_read(&nilfs->ns_segctor_sem);
+ 
+-	return snprintf(buf, PAGE_SIZE, "%llu\n", cno);
++	return sysfs_emit(buf, "%llu\n", cno);
+ }
+ 
+ static const char checkpoints_readme_str[] =
+@@ -335,7 +335,7 @@ static ssize_t
+ nilfs_checkpoints_README_show(struct nilfs_checkpoints_attr *attr,
+ 				struct the_nilfs *nilfs, char *buf)
+ {
+-	return snprintf(buf, PAGE_SIZE, checkpoints_readme_str);
++	return sysfs_emit(buf, checkpoints_readme_str);
+ }
+ 
+ NILFS_CHECKPOINTS_RO_ATTR(checkpoints_number);
+@@ -366,7 +366,7 @@ nilfs_segments_segments_number_show(struct nilfs_segments_attr *attr,
+ 				     struct the_nilfs *nilfs,
+ 				     char *buf)
+ {
+-	return snprintf(buf, PAGE_SIZE, "%lu\n", nilfs->ns_nsegments);
++	return sysfs_emit(buf, "%lu\n", nilfs->ns_nsegments);
+ }
+ 
+ static ssize_t
+@@ -374,7 +374,7 @@ nilfs_segments_blocks_per_segment_show(struct nilfs_segments_attr *attr,
+ 					struct the_nilfs *nilfs,
+ 					char *buf)
+ {
+-	return snprintf(buf, PAGE_SIZE, "%lu\n", nilfs->ns_blocks_per_segment);
++	return sysfs_emit(buf, "%lu\n", nilfs->ns_blocks_per_segment);
+ }
+ 
+ static ssize_t
+@@ -388,7 +388,7 @@ nilfs_segments_clean_segments_show(struct nilfs_segments_attr *attr,
+ 	ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile);
+ 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
+ 
+-	return snprintf(buf, PAGE_SIZE, "%lu\n", ncleansegs);
++	return sysfs_emit(buf, "%lu\n", ncleansegs);
+ }
+ 
+ static ssize_t
+@@ -408,7 +408,7 @@ nilfs_segments_dirty_segments_show(struct nilfs_segments_attr *attr,
+ 		return err;
+ 	}
+ 
+-	return snprintf(buf, PAGE_SIZE, "%llu\n", sustat.ss_ndirtysegs);
++	return sysfs_emit(buf, "%llu\n", sustat.ss_ndirtysegs);
+ }
+ 
+ static const char segments_readme_str[] =
+@@ -424,7 +424,7 @@ nilfs_segments_README_show(struct nilfs_segments_attr *attr,
+ 			    struct the_nilfs *nilfs,
+ 			    char *buf)
+ {
+-	return snprintf(buf, PAGE_SIZE, segments_readme_str);
++	return sysfs_emit(buf, segments_readme_str);
+ }
+ 
+ NILFS_SEGMENTS_RO_ATTR(segments_number);
+@@ -461,7 +461,7 @@ nilfs_segctor_last_pseg_block_show(struct nilfs_segctor_attr *attr,
+ 	last_pseg = nilfs->ns_last_pseg;
+ 	spin_unlock(&nilfs->ns_last_segment_lock);
+ 
+-	return snprintf(buf, PAGE_SIZE, "%llu\n",
++	return sysfs_emit(buf, "%llu\n",
+ 			(unsigned long long)last_pseg);
+ }
+ 
+@@ -476,7 +476,7 @@ nilfs_segctor_last_seg_sequence_show(struct nilfs_segctor_attr *attr,
+ 	last_seq = nilfs->ns_last_seq;
+ 	spin_unlock(&nilfs->ns_last_segment_lock);
+ 
+-	return snprintf(buf, PAGE_SIZE, "%llu\n", last_seq);
++	return sysfs_emit(buf, "%llu\n", last_seq);
+ }
+ 
+ static ssize_t
+@@ -490,7 +490,7 @@ nilfs_segctor_last_seg_checkpoint_show(struct nilfs_segctor_attr *attr,
+ 	last_cno = nilfs->ns_last_cno;
+ 	spin_unlock(&nilfs->ns_last_segment_lock);
+ 
+-	return snprintf(buf, PAGE_SIZE, "%llu\n", last_cno);
++	return sysfs_emit(buf, "%llu\n", last_cno);
+ }
+ 
+ static ssize_t
+@@ -504,7 +504,7 @@ nilfs_segctor_current_seg_sequence_show(struct nilfs_segctor_attr *attr,
+ 	seg_seq = nilfs->ns_seg_seq;
+ 	up_read(&nilfs->ns_segctor_sem);
+ 
+-	return snprintf(buf, PAGE_SIZE, "%llu\n", seg_seq);
++	return sysfs_emit(buf, "%llu\n", seg_seq);
+ }
+ 
+ static ssize_t
+@@ -518,7 +518,7 @@ nilfs_segctor_current_last_full_seg_show(struct nilfs_segctor_attr *attr,
+ 	segnum = nilfs->ns_segnum;
+ 	up_read(&nilfs->ns_segctor_sem);
+ 
+-	return snprintf(buf, PAGE_SIZE, "%llu\n", segnum);
++	return sysfs_emit(buf, "%llu\n", segnum);
+ }
+ 
+ static ssize_t
+@@ -532,7 +532,7 @@ nilfs_segctor_next_full_seg_show(struct nilfs_segctor_attr *attr,
+ 	nextnum = nilfs->ns_nextnum;
+ 	up_read(&nilfs->ns_segctor_sem);
+ 
+-	return snprintf(buf, PAGE_SIZE, "%llu\n", nextnum);
++	return sysfs_emit(buf, "%llu\n", nextnum);
+ }
+ 
+ static ssize_t
+@@ -546,7 +546,7 @@ nilfs_segctor_next_pseg_offset_show(struct nilfs_segctor_attr *attr,
+ 	pseg_offset = nilfs->ns_pseg_offset;
+ 	up_read(&nilfs->ns_segctor_sem);
+ 
+-	return snprintf(buf, PAGE_SIZE, "%lu\n", pseg_offset);
++	return sysfs_emit(buf, "%lu\n", pseg_offset);
+ }
+ 
+ static ssize_t
+@@ -560,7 +560,7 @@ nilfs_segctor_next_checkpoint_show(struct nilfs_segctor_attr *attr,
+ 	cno = nilfs->ns_cno;
+ 	up_read(&nilfs->ns_segctor_sem);
+ 
+-	return snprintf(buf, PAGE_SIZE, "%llu\n", cno);
++	return sysfs_emit(buf, "%llu\n", cno);
+ }
+ 
+ static ssize_t
+@@ -588,7 +588,7 @@ nilfs_segctor_last_seg_write_time_secs_show(struct nilfs_segctor_attr *attr,
+ 	ctime = nilfs->ns_ctime;
+ 	up_read(&nilfs->ns_segctor_sem);
+ 
+-	return snprintf(buf, PAGE_SIZE, "%llu\n", ctime);
++	return sysfs_emit(buf, "%llu\n", ctime);
+ }
+ 
+ static ssize_t
+@@ -616,7 +616,7 @@ nilfs_segctor_last_nongc_write_time_secs_show(struct nilfs_segctor_attr *attr,
+ 	nongc_ctime = nilfs->ns_nongc_ctime;
+ 	up_read(&nilfs->ns_segctor_sem);
+ 
+-	return snprintf(buf, PAGE_SIZE, "%llu\n", nongc_ctime);
++	return sysfs_emit(buf, "%llu\n", nongc_ctime);
+ }
+ 
+ static ssize_t
+@@ -630,7 +630,7 @@ nilfs_segctor_dirty_data_blocks_count_show(struct nilfs_segctor_attr *attr,
+ 	ndirtyblks = atomic_read(&nilfs->ns_ndirtyblks);
+ 	up_read(&nilfs->ns_segctor_sem);
+ 
+-	return snprintf(buf, PAGE_SIZE, "%u\n", ndirtyblks);
++	return sysfs_emit(buf, "%u\n", ndirtyblks);
+ }
+ 
+ static const char segctor_readme_str[] =
+@@ -667,7 +667,7 @@ static ssize_t
+ nilfs_segctor_README_show(struct nilfs_segctor_attr *attr,
+ 			  struct the_nilfs *nilfs, char *buf)
+ {
+-	return snprintf(buf, PAGE_SIZE, segctor_readme_str);
++	return sysfs_emit(buf, segctor_readme_str);
+ }
+ 
+ NILFS_SEGCTOR_RO_ATTR(last_pseg_block);
+@@ -736,7 +736,7 @@ nilfs_superblock_sb_write_time_secs_show(struct nilfs_superblock_attr *attr,
+ 	sbwtime = nilfs->ns_sbwtime;
+ 	up_read(&nilfs->ns_sem);
+ 
+-	return snprintf(buf, PAGE_SIZE, "%llu\n", sbwtime);
++	return sysfs_emit(buf, "%llu\n", sbwtime);
+ }
+ 
+ static ssize_t
+@@ -750,7 +750,7 @@ nilfs_superblock_sb_write_count_show(struct nilfs_superblock_attr *attr,
+ 	sbwcount = nilfs->ns_sbwcount;
+ 	up_read(&nilfs->ns_sem);
+ 
+-	return snprintf(buf, PAGE_SIZE, "%u\n", sbwcount);
++	return sysfs_emit(buf, "%u\n", sbwcount);
+ }
+ 
+ static ssize_t
+@@ -764,7 +764,7 @@ nilfs_superblock_sb_update_frequency_show(struct nilfs_superblock_attr *attr,
+ 	sb_update_freq = nilfs->ns_sb_update_freq;
+ 	up_read(&nilfs->ns_sem);
+ 
+-	return snprintf(buf, PAGE_SIZE, "%u\n", sb_update_freq);
++	return sysfs_emit(buf, "%u\n", sb_update_freq);
+ }
+ 
+ static ssize_t
+@@ -812,7 +812,7 @@ static ssize_t
+ nilfs_superblock_README_show(struct nilfs_superblock_attr *attr,
+ 				struct the_nilfs *nilfs, char *buf)
+ {
+-	return snprintf(buf, PAGE_SIZE, sb_readme_str);
++	return sysfs_emit(buf, sb_readme_str);
+ }
+ 
+ NILFS_SUPERBLOCK_RO_ATTR(sb_write_time);
+@@ -843,11 +843,17 @@ ssize_t nilfs_dev_revision_show(struct nilfs_dev_attr *attr,
+ 				struct the_nilfs *nilfs,
+ 				char *buf)
+ {
+-	struct nilfs_super_block **sbp = nilfs->ns_sbp;
+-	u32 major = le32_to_cpu(sbp[0]->s_rev_level);
+-	u16 minor = le16_to_cpu(sbp[0]->s_minor_rev_level);
++	struct nilfs_super_block *raw_sb;
++	u32 major;
++	u16 minor;
+ 
+-	return snprintf(buf, PAGE_SIZE, "%d.%d\n", major, minor);
++	down_read(&nilfs->ns_sem);
++	raw_sb = nilfs->ns_sbp[0];
++	major = le32_to_cpu(raw_sb->s_rev_level);
++	minor = le16_to_cpu(raw_sb->s_minor_rev_level);
++	up_read(&nilfs->ns_sem);
++
++	return sysfs_emit(buf, "%d.%d\n", major, minor);
+ }
+ 
+ static
+@@ -855,7 +861,7 @@ ssize_t nilfs_dev_blocksize_show(struct nilfs_dev_attr *attr,
+ 				 struct the_nilfs *nilfs,
+ 				 char *buf)
+ {
+-	return snprintf(buf, PAGE_SIZE, "%u\n", nilfs->ns_blocksize);
++	return sysfs_emit(buf, "%u\n", nilfs->ns_blocksize);
+ }
+ 
+ static
+@@ -863,10 +869,15 @@ ssize_t nilfs_dev_device_size_show(struct nilfs_dev_attr *attr,
+ 				    struct the_nilfs *nilfs,
+ 				    char *buf)
+ {
+-	struct nilfs_super_block **sbp = nilfs->ns_sbp;
+-	u64 dev_size = le64_to_cpu(sbp[0]->s_dev_size);
++	struct nilfs_super_block *raw_sb;
++	u64 dev_size;
++
++	down_read(&nilfs->ns_sem);
++	raw_sb = nilfs->ns_sbp[0];
++	dev_size = le64_to_cpu(raw_sb->s_dev_size);
++	up_read(&nilfs->ns_sem);
+ 
+-	return snprintf(buf, PAGE_SIZE, "%llu\n", dev_size);
++	return sysfs_emit(buf, "%llu\n", dev_size);
+ }
+ 
+ static
+@@ -877,7 +888,7 @@ ssize_t nilfs_dev_free_blocks_show(struct nilfs_dev_attr *attr,
+ 	sector_t free_blocks = 0;
+ 
+ 	nilfs_count_free_blocks(nilfs, &free_blocks);
+-	return snprintf(buf, PAGE_SIZE, "%llu\n",
++	return sysfs_emit(buf, "%llu\n",
+ 			(unsigned long long)free_blocks);
+ }
+ 
+@@ -886,9 +897,15 @@ ssize_t nilfs_dev_uuid_show(struct nilfs_dev_attr *attr,
+ 			    struct the_nilfs *nilfs,
+ 			    char *buf)
+ {
+-	struct nilfs_super_block **sbp = nilfs->ns_sbp;
++	struct nilfs_super_block *raw_sb;
++	ssize_t len;
+ 
+-	return snprintf(buf, PAGE_SIZE, "%pUb\n", sbp[0]->s_uuid);
++	down_read(&nilfs->ns_sem);
++	raw_sb = nilfs->ns_sbp[0];
++	len = sysfs_emit(buf, "%pUb\n", raw_sb->s_uuid);
++	up_read(&nilfs->ns_sem);
++
++	return len;
+ }
+ 
+ static
+@@ -896,10 +913,16 @@ ssize_t nilfs_dev_volume_name_show(struct nilfs_dev_attr *attr,
+ 				    struct the_nilfs *nilfs,
+ 				    char *buf)
+ {
+-	struct nilfs_super_block **sbp = nilfs->ns_sbp;
++	struct nilfs_super_block *raw_sb;
++	ssize_t len;
++
++	down_read(&nilfs->ns_sem);
++	raw_sb = nilfs->ns_sbp[0];
++	len = scnprintf(buf, sizeof(raw_sb->s_volume_name), "%s\n",
++			raw_sb->s_volume_name);
++	up_read(&nilfs->ns_sem);
+ 
+-	return scnprintf(buf, sizeof(sbp[0]->s_volume_name), "%s\n",
+-			 sbp[0]->s_volume_name);
++	return len;
+ }
+ 
+ static const char dev_readme_str[] =
+@@ -916,7 +939,7 @@ static ssize_t nilfs_dev_README_show(struct nilfs_dev_attr *attr,
+ 				     struct the_nilfs *nilfs,
+ 				     char *buf)
+ {
+-	return snprintf(buf, PAGE_SIZE, dev_readme_str);
++	return sysfs_emit(buf, dev_readme_str);
+ }
+ 
+ NILFS_DEV_RO_ATTR(revision);
+@@ -1060,7 +1083,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs)
+ static ssize_t nilfs_feature_revision_show(struct kobject *kobj,
+ 					    struct attribute *attr, char *buf)
+ {
+-	return snprintf(buf, PAGE_SIZE, "%d.%d\n",
++	return sysfs_emit(buf, "%d.%d\n",
+ 			NILFS_CURRENT_REV, NILFS_MINOR_REV);
+ }
+ 
+@@ -1073,7 +1096,7 @@ static ssize_t nilfs_feature_README_show(struct kobject *kobj,
+ 					 struct attribute *attr,
+ 					 char *buf)
+ {
+-	return snprintf(buf, PAGE_SIZE, features_readme_str);
++	return sysfs_emit(buf, features_readme_str);
+ }
+ 
+ NILFS_FEATURE_RO_ATTR(revision);
+diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
+index 7974e91ffe134..b5d8f238fce42 100644
+--- a/fs/notify/fsnotify.c
++++ b/fs/notify/fsnotify.c
+@@ -103,17 +103,13 @@ void fsnotify_sb_delete(struct super_block *sb)
+  * parent cares.  Thus when an event happens on a child it can quickly tell
+  * if there is a need to find a parent and send the event to the parent.
+  */
+-void __fsnotify_update_child_dentry_flags(struct inode *inode)
++void fsnotify_set_children_dentry_flags(struct inode *inode)
+ {
+ 	struct dentry *alias;
+-	int watched;
+ 
+ 	if (!S_ISDIR(inode->i_mode))
+ 		return;
+ 
+-	/* determine if the children should tell inode about their events */
+-	watched = fsnotify_inode_watches_children(inode);
+-
+ 	spin_lock(&inode->i_lock);
+ 	/* run all of the dentries associated with this inode.  Since this is a
+ 	 * directory, there damn well better only be one item on this list */
+@@ -129,10 +125,7 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
+ 				continue;
+ 
+ 			spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
+-			if (watched)
+-				child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
+-			else
+-				child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
++			child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
+ 			spin_unlock(&child->d_lock);
+ 		}
+ 		spin_unlock(&alias->d_lock);
+@@ -140,6 +133,24 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
+ 	spin_unlock(&inode->i_lock);
+ }
+ 
++/*
++ * Lazily clear false positive PARENT_WATCHED flag for child whose parent had
++ * stopped watching children.
++ */
++static void fsnotify_clear_child_dentry_flag(struct inode *pinode,
++					     struct dentry *dentry)
++{
++	spin_lock(&dentry->d_lock);
++	/*
++	 * d_lock is a sufficient barrier to prevent observing a non-watched
++	 * parent state from before the fsnotify_set_children_dentry_flags()
++	 * or fsnotify_update_flags() call that had set PARENT_WATCHED.
++	 */
++	if (!fsnotify_inode_watches_children(pinode))
++		dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
++	spin_unlock(&dentry->d_lock);
++}
++
+ /* Are inode/sb/mount interested in parent and name info with this event? */
+ static bool fsnotify_event_needs_parent(struct inode *inode, struct mount *mnt,
+ 					__u32 mask)
+@@ -208,7 +219,7 @@ int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data,
+ 	p_inode = parent->d_inode;
+ 	p_mask = fsnotify_inode_watches_children(p_inode);
+ 	if (unlikely(parent_watched && !p_mask))
+-		__fsnotify_update_child_dentry_flags(p_inode);
++		fsnotify_clear_child_dentry_flag(p_inode, dentry);
+ 
+ 	/*
+ 	 * Include parent/name in notification either if some notification
+diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h
+index fde74eb333cc9..2b4267de86e6b 100644
+--- a/fs/notify/fsnotify.h
++++ b/fs/notify/fsnotify.h
+@@ -74,7 +74,7 @@ static inline void fsnotify_clear_marks_by_sb(struct super_block *sb)
+  * update the dentry->d_flags of all of inode's children to indicate if inode cares
+  * about events that happen to its children.
+  */
+-extern void __fsnotify_update_child_dentry_flags(struct inode *inode);
++extern void fsnotify_set_children_dentry_flags(struct inode *inode);
+ 
+ extern struct kmem_cache *fsnotify_mark_connector_cachep;
+ 
+diff --git a/fs/notify/mark.c b/fs/notify/mark.c
+index c74ef947447d6..4be6e883d492f 100644
+--- a/fs/notify/mark.c
++++ b/fs/notify/mark.c
+@@ -176,6 +176,24 @@ static void *__fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
+ 	return fsnotify_update_iref(conn, want_iref);
+ }
+ 
++static bool fsnotify_conn_watches_children(
++					struct fsnotify_mark_connector *conn)
++{
++	if (conn->type != FSNOTIFY_OBJ_TYPE_INODE)
++		return false;
++
++	return fsnotify_inode_watches_children(fsnotify_conn_inode(conn));
++}
++
++static void fsnotify_conn_set_children_dentry_flags(
++					struct fsnotify_mark_connector *conn)
++{
++	if (conn->type != FSNOTIFY_OBJ_TYPE_INODE)
++		return;
++
++	fsnotify_set_children_dentry_flags(fsnotify_conn_inode(conn));
++}
++
+ /*
+  * Calculate mask of events for a list of marks. The caller must make sure
+  * connector and connector->obj cannot disappear under us.  Callers achieve
+@@ -184,15 +202,23 @@ static void *__fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
+  */
+ void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
+ {
++	bool update_children;
++
+ 	if (!conn)
+ 		return;
+ 
+ 	spin_lock(&conn->lock);
++	update_children = !fsnotify_conn_watches_children(conn);
+ 	__fsnotify_recalc_mask(conn);
++	update_children &= fsnotify_conn_watches_children(conn);
+ 	spin_unlock(&conn->lock);
+-	if (conn->type == FSNOTIFY_OBJ_TYPE_INODE)
+-		__fsnotify_update_child_dentry_flags(
+-					fsnotify_conn_inode(conn));
++	/*
++	 * Set children's PARENT_WATCHED flags only if parent started watching.
++	 * When parent stops watching, we clear false positive PARENT_WATCHED
++	 * flags lazily in __fsnotify_parent().
++	 */
++	if (update_children)
++		fsnotify_conn_set_children_dentry_flags(conn);
+ }
+ 
+ /* Free all connectors queued for freeing once SRCU period ends */
+diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
+index 24463145b3513..f31649080a881 100644
+--- a/fs/squashfs/inode.c
++++ b/fs/squashfs/inode.c
+@@ -276,8 +276,13 @@ int squashfs_read_inode(struct inode *inode, long long ino)
+ 		if (err < 0)
+ 			goto failed_read;
+ 
+-		set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+ 		inode->i_size = le32_to_cpu(sqsh_ino->symlink_size);
++		if (inode->i_size > PAGE_SIZE) {
++			ERROR("Corrupted symlink\n");
++			return -EINVAL;
++		}
++
++		set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
+ 		inode->i_op = &squashfs_symlink_inode_ops;
+ 		inode_nohighmem(inode);
+ 		inode->i_data.a_ops = &squashfs_symlink_aops;
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index 1939678f0b622..ae75df43d51cb 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -86,6 +86,13 @@ enum {
+ #define UDF_MAX_LVID_NESTING 1000
+ 
+ enum { UDF_MAX_LINKS = 0xffff };
++/*
++ * We limit filesize to 4TB. This is arbitrary as the on-disk format supports
++ * more but because the file space is described by a linked list of extents,
++ * each of which can have at most 1GB, the creation and handling of extents
++ * gets unusably slow beyond certain point...
++ */
++#define UDF_MAX_FILESIZE (1ULL << 42)
+ 
+ /* These are the "meat" - everything else is stuffing */
+ static int udf_fill_super(struct super_block *, void *, int);
+@@ -1076,12 +1083,19 @@ static int udf_fill_partdesc_info(struct super_block *sb,
+ 	struct udf_part_map *map;
+ 	struct udf_sb_info *sbi = UDF_SB(sb);
+ 	struct partitionHeaderDesc *phd;
++	u32 sum;
+ 	int err;
+ 
+ 	map = &sbi->s_partmaps[p_index];
+ 
+ 	map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
+ 	map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
++	if (check_add_overflow(map->s_partition_root, map->s_partition_len,
++			       &sum)) {
++		udf_err(sb, "Partition %d has invalid location %u + %u\n",
++			p_index, map->s_partition_root, map->s_partition_len);
++		return -EFSCORRUPTED;
++	}
+ 
+ 	if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
+ 		map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
+@@ -1137,6 +1151,14 @@ static int udf_fill_partdesc_info(struct super_block *sb,
+ 		bitmap->s_extPosition = le32_to_cpu(
+ 				phd->unallocSpaceBitmap.extPosition);
+ 		map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
++		/* Check whether math over bitmap won't overflow. */
++		if (check_add_overflow(map->s_partition_len,
++				       sizeof(struct spaceBitmapDesc) << 3,
++				       &sum)) {
++			udf_err(sb, "Partition %d is too long (%u)\n", p_index,
++				map->s_partition_len);
++			return -EFSCORRUPTED;
++		}
+ 		udf_debug("unallocSpaceBitmap (part %d) @ %u\n",
+ 			  p_index, bitmap->s_extPosition);
+ 	}
+@@ -2301,7 +2323,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
+ 		ret = -ENOMEM;
+ 		goto error_out;
+ 	}
+-	sb->s_maxbytes = MAX_LFS_FILESIZE;
++	sb->s_maxbytes = UDF_MAX_FILESIZE;
+ 	sb->s_max_links = UDF_MAX_LINKS;
+ 	return 0;
+ 
+diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
+index c9fafca1c30c5..6c6323a01d430 100644
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -764,107 +764,54 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
+  * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains
+  * per-socket cgroup information except for memcg association.
+  *
+- * On legacy hierarchies, net_prio and net_cls controllers directly set
+- * attributes on each sock which can then be tested by the network layer.
+- * On the default hierarchy, each sock is associated with the cgroup it was
+- * created in and the networking layer can match the cgroup directly.
+- *
+- * To avoid carrying all three cgroup related fields separately in sock,
+- * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
+- * On boot, sock_cgroup_data records the cgroup that the sock was created
+- * in so that cgroup2 matches can be made; however, once either net_prio or
+- * net_cls starts being used, the area is overriden to carry prioidx and/or
+- * classid.  The two modes are distinguished by whether the lowest bit is
+- * set.  Clear bit indicates cgroup pointer while set bit prioidx and
+- * classid.
+- *
+- * While userland may start using net_prio or net_cls at any time, once
+- * either is used, cgroup2 matching no longer works.  There is no reason to
+- * mix the two and this is in line with how legacy and v2 compatibility is
+- * handled.  On mode switch, cgroup references which are already being
+- * pointed to by socks may be leaked.  While this can be remedied by adding
+- * synchronization around sock_cgroup_data, given that the number of leaked
+- * cgroups is bound and highly unlikely to be high, this seems to be the
+- * better trade-off.
++ * On legacy hierarchies, net_prio and net_cls controllers directly
++ * set attributes on each sock which can then be tested by the network
++ * layer. On the default hierarchy, each sock is associated with the
++ * cgroup it was created in and the networking layer can match the
++ * cgroup directly.
+  */
+ struct sock_cgroup_data {
+-	union {
+-#ifdef __LITTLE_ENDIAN
+-		struct {
+-			u8	is_data : 1;
+-			u8	no_refcnt : 1;
+-			u8	unused : 6;
+-			u8	padding;
+-			u16	prioidx;
+-			u32	classid;
+-		} __packed;
+-#else
+-		struct {
+-			u32	classid;
+-			u16	prioidx;
+-			u8	padding;
+-			u8	unused : 6;
+-			u8	no_refcnt : 1;
+-			u8	is_data : 1;
+-		} __packed;
++	struct cgroup	*cgroup; /* v2 */
++#ifdef CONFIG_CGROUP_NET_CLASSID
++	u32		classid; /* v1 */
++#endif
++#ifdef CONFIG_CGROUP_NET_PRIO
++	u16		prioidx; /* v1 */
+ #endif
+-		u64		val;
+-	};
+ };
+ 
+-/*
+- * There's a theoretical window where the following accessors race with
+- * updaters and return part of the previous pointer as the prioidx or
+- * classid.  Such races are short-lived and the result isn't critical.
+- */
+ static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
+ {
+-	/* fallback to 1 which is always the ID of the root cgroup */
+-	return (skcd->is_data & 1) ? skcd->prioidx : 1;
++#ifdef CONFIG_CGROUP_NET_PRIO
++	return READ_ONCE(skcd->prioidx);
++#else
++	return 1;
++#endif
+ }
+ 
+ static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
+ {
+-	/* fallback to 0 which is the unconfigured default classid */
+-	return (skcd->is_data & 1) ? skcd->classid : 0;
++#ifdef CONFIG_CGROUP_NET_CLASSID
++	return READ_ONCE(skcd->classid);
++#else
++	return 0;
++#endif
+ }
+ 
+-/*
+- * If invoked concurrently, the updaters may clobber each other.  The
+- * caller is responsible for synchronization.
+- */
+ static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
+ 					   u16 prioidx)
+ {
+-	struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
+-
+-	if (sock_cgroup_prioidx(&skcd_buf) == prioidx)
+-		return;
+-
+-	if (!(skcd_buf.is_data & 1)) {
+-		skcd_buf.val = 0;
+-		skcd_buf.is_data = 1;
+-	}
+-
+-	skcd_buf.prioidx = prioidx;
+-	WRITE_ONCE(skcd->val, skcd_buf.val);	/* see sock_cgroup_ptr() */
++#ifdef CONFIG_CGROUP_NET_PRIO
++	WRITE_ONCE(skcd->prioidx, prioidx);
++#endif
+ }
+ 
+ static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
+ 					   u32 classid)
+ {
+-	struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
+-
+-	if (sock_cgroup_classid(&skcd_buf) == classid)
+-		return;
+-
+-	if (!(skcd_buf.is_data & 1)) {
+-		skcd_buf.val = 0;
+-		skcd_buf.is_data = 1;
+-	}
+-
+-	skcd_buf.classid = classid;
+-	WRITE_ONCE(skcd->val, skcd_buf.val);	/* see sock_cgroup_ptr() */
++#ifdef CONFIG_CGROUP_NET_CLASSID
++	WRITE_ONCE(skcd->classid, classid);
++#endif
+ }
+ 
+ #else	/* CONFIG_SOCK_CGROUP_DATA */
+diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
+index c9c430712d471..15c27a2c98e26 100644
+--- a/include/linux/cgroup.h
++++ b/include/linux/cgroup.h
+@@ -816,33 +816,13 @@ static inline void cgroup_account_cputime_field(struct task_struct *task,
+  */
+ #ifdef CONFIG_SOCK_CGROUP_DATA
+ 
+-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
+-extern spinlock_t cgroup_sk_update_lock;
+-#endif
+-
+-void cgroup_sk_alloc_disable(void);
+ void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
+ void cgroup_sk_clone(struct sock_cgroup_data *skcd);
+ void cgroup_sk_free(struct sock_cgroup_data *skcd);
+ 
+ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
+ {
+-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
+-	unsigned long v;
+-
+-	/*
+-	 * @skcd->val is 64bit but the following is safe on 32bit too as we
+-	 * just need the lower ulong to be written and read atomically.
+-	 */
+-	v = READ_ONCE(skcd->val);
+-
+-	if (v & 3)
+-		return &cgrp_dfl_root.cgrp;
+-
+-	return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
+-#else
+-	return (struct cgroup *)(unsigned long)skcd->val;
+-#endif
++	return skcd->cgroup;
+ }
+ 
+ #else	/* CONFIG_CGROUP_DATA */
+diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
+index d7d96c806bff2..096b79e4373f4 100644
+--- a/include/linux/fsnotify_backend.h
++++ b/include/linux/fsnotify_backend.h
+@@ -563,12 +563,14 @@ static inline __u32 fsnotify_parent_needed_mask(__u32 mask)
+ 
+ static inline int fsnotify_inode_watches_children(struct inode *inode)
+ {
++	__u32 parent_mask = READ_ONCE(inode->i_fsnotify_mask);
++
+ 	/* FS_EVENT_ON_CHILD is set if the inode may care */
+-	if (!(inode->i_fsnotify_mask & FS_EVENT_ON_CHILD))
++	if (!(parent_mask & FS_EVENT_ON_CHILD))
+ 		return 0;
+ 	/* this inode might care about child events, does it care about the
+ 	 * specific set of events that can happen on a child? */
+-	return inode->i_fsnotify_mask & FS_EVENTS_POSS_ON_CHILD;
++	return parent_mask & FS_EVENTS_POSS_ON_CHILD;
+ }
+ 
+ /*
+@@ -582,7 +584,7 @@ static inline void fsnotify_update_flags(struct dentry *dentry)
+ 	/*
+ 	 * Serialisation of setting PARENT_WATCHED on the dentries is provided
+ 	 * by d_lock. If inotify_inode_watched changes after we have taken
+-	 * d_lock, the following __fsnotify_update_child_dentry_flags call will
++	 * d_lock, the following fsnotify_set_children_dentry_flags call will
+ 	 * find our entry, so it will spin until we complete here, and update
+ 	 * us with the new state.
+ 	 */
+diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
+index bfe7c1f1ac6d1..f0231dbc47771 100644
+--- a/include/linux/hwspinlock.h
++++ b/include/linux/hwspinlock.h
+@@ -68,6 +68,7 @@ int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
+ int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
+ void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
+ int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name);
++int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id);
+ int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock);
+ struct hwspinlock *devm_hwspin_lock_request(struct device *dev);
+ struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
+@@ -127,6 +128,11 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
+ {
+ }
+ 
++static inline int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id)
++{
++	return 0;
++}
++
+ static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
+ {
+ 	return 0;
+diff --git a/include/linux/i2c.h b/include/linux/i2c.h
+index a670ae129f4b9..6cfb530b3d43f 100644
+--- a/include/linux/i2c.h
++++ b/include/linux/i2c.h
+@@ -991,7 +991,7 @@ static inline int of_i2c_get_board_info(struct device *dev,
+ struct acpi_resource;
+ struct acpi_resource_i2c_serialbus;
+ 
+-#if IS_ENABLED(CONFIG_ACPI)
++#if IS_REACHABLE(CONFIG_ACPI) && IS_REACHABLE(CONFIG_I2C)
+ bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
+ 			       struct acpi_resource_i2c_serialbus **i2c);
+ u32 i2c_acpi_find_bus_speed(struct device *dev);
+diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
+index 00303c636a89d..dea002ad99fc6 100644
+--- a/include/linux/sunrpc/svc.h
++++ b/include/linux/sunrpc/svc.h
+@@ -410,7 +410,6 @@ struct svc_program {
+ 	const struct svc_version **pg_vers;	/* version array */
+ 	char *			pg_name;	/* service name */
+ 	char *			pg_class;	/* class name: services sharing authentication */
+-	struct svc_stat *	pg_stats;	/* rpc statistics */
+ 	int			(*pg_authenticate)(struct svc_rqst *);
+ 	__be32			(*pg_init_request)(struct svc_rqst *,
+ 						   const struct svc_program *,
+@@ -484,7 +483,9 @@ void		   svc_rqst_replace_page(struct svc_rqst *rqstp,
+ 					 struct page *page);
+ void		   svc_rqst_free(struct svc_rqst *);
+ void		   svc_exit_thread(struct svc_rqst *);
+-struct svc_serv *  svc_create_pooled(struct svc_program *, unsigned int,
++struct svc_serv *  svc_create_pooled(struct svc_program *prog,
++				     struct svc_stat *stats,
++				     unsigned int bufsize,
+ 				     int (*threadfn)(void *data));
+ int		   svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
+ int		   svc_pool_stats_open(struct svc_serv *serv, struct file *file);
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 9128c0db11f88..fe62943a35ddc 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -174,7 +174,6 @@ struct blocked_key {
+ struct smp_csrk {
+ 	bdaddr_t bdaddr;
+ 	u8 bdaddr_type;
+-	u8 link_type;
+ 	u8 type;
+ 	u8 val[16];
+ };
+@@ -184,7 +183,6 @@ struct smp_ltk {
+ 	struct rcu_head rcu;
+ 	bdaddr_t bdaddr;
+ 	u8 bdaddr_type;
+-	u8 link_type;
+ 	u8 authenticated;
+ 	u8 type;
+ 	u8 enc_size;
+@@ -199,7 +197,6 @@ struct smp_irk {
+ 	bdaddr_t rpa;
+ 	bdaddr_t bdaddr;
+ 	u8 addr_type;
+-	u8 link_type;
+ 	u8 val[16];
+ };
+ 
+@@ -207,8 +204,6 @@ struct link_key {
+ 	struct list_head list;
+ 	struct rcu_head rcu;
+ 	bdaddr_t bdaddr;
+-	u8 bdaddr_type;
+-	u8 link_type;
+ 	u8 type;
+ 	u8 val[HCI_LINK_KEY_SIZE];
+ 	u8 pin_len;
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 11400eba61242..643d8e178f7b9 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -1773,9 +1773,9 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
+ 		RCU_INIT_POINTER(scgrp->subsys[ssid], NULL);
+ 		rcu_assign_pointer(dcgrp->subsys[ssid], css);
+ 		ss->root = dst_root;
+-		css->cgroup = dcgrp;
+ 
+ 		spin_lock_irq(&css_set_lock);
++		css->cgroup = dcgrp;
+ 		WARN_ON(!list_empty(&dcgrp->e_csets[ss->id]));
+ 		list_for_each_entry_safe(cset, cset_pos, &scgrp->e_csets[ss->id],
+ 					 e_cset_node[ss->id]) {
+@@ -6557,74 +6557,51 @@ int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v)
+  */
+ #ifdef CONFIG_SOCK_CGROUP_DATA
+ 
+-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
+-
+-DEFINE_SPINLOCK(cgroup_sk_update_lock);
+-static bool cgroup_sk_alloc_disabled __read_mostly;
+-
+-void cgroup_sk_alloc_disable(void)
+-{
+-	if (cgroup_sk_alloc_disabled)
+-		return;
+-	pr_info("cgroup: disabling cgroup2 socket matching due to net_prio or net_cls activation\n");
+-	cgroup_sk_alloc_disabled = true;
+-}
+-
+-#else
+-
+-#define cgroup_sk_alloc_disabled	false
+-
+-#endif
+-
+ void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
+ {
+-	if (cgroup_sk_alloc_disabled) {
+-		skcd->no_refcnt = 1;
+-		return;
+-	}
+-
+-	/* Don't associate the sock with unrelated interrupted task's cgroup. */
+-	if (in_interrupt())
+-		return;
++	struct cgroup *cgroup;
+ 
+ 	rcu_read_lock();
++	/* Don't associate the sock with unrelated interrupted task's cgroup. */
++	if (in_interrupt()) {
++		cgroup = &cgrp_dfl_root.cgrp;
++		cgroup_get(cgroup);
++		goto out;
++	}
+ 
+ 	while (true) {
+ 		struct css_set *cset;
+ 
+ 		cset = task_css_set(current);
+ 		if (likely(cgroup_tryget(cset->dfl_cgrp))) {
+-			skcd->val = (unsigned long)cset->dfl_cgrp;
+-			cgroup_bpf_get(cset->dfl_cgrp);
++			cgroup = cset->dfl_cgrp;
+ 			break;
+ 		}
+ 		cpu_relax();
+ 	}
+-
++out:
++	skcd->cgroup = cgroup;
++	cgroup_bpf_get(cgroup);
+ 	rcu_read_unlock();
+ }
+ 
+ void cgroup_sk_clone(struct sock_cgroup_data *skcd)
+ {
+-	if (skcd->val) {
+-		if (skcd->no_refcnt)
+-			return;
+-		/*
+-		 * We might be cloning a socket which is left in an empty
+-		 * cgroup and the cgroup might have already been rmdir'd.
+-		 * Don't use cgroup_get_live().
+-		 */
+-		cgroup_get(sock_cgroup_ptr(skcd));
+-		cgroup_bpf_get(sock_cgroup_ptr(skcd));
+-	}
++	struct cgroup *cgrp = sock_cgroup_ptr(skcd);
++
++	/*
++	 * We might be cloning a socket which is left in an empty
++	 * cgroup and the cgroup might have already been rmdir'd.
++	 * Don't use cgroup_get_live().
++	 */
++	cgroup_get(cgrp);
++	cgroup_bpf_get(cgrp);
+ }
+ 
+ void cgroup_sk_free(struct sock_cgroup_data *skcd)
+ {
+ 	struct cgroup *cgrp = sock_cgroup_ptr(skcd);
+ 
+-	if (skcd->no_refcnt)
+-		return;
+ 	cgroup_bpf_put(cgrp);
+ 	cgroup_put(cgrp);
+ }
+diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
+index 0263983089097..654b039dfc335 100644
+--- a/kernel/dma/debug.c
++++ b/kernel/dma/debug.c
+@@ -447,8 +447,11 @@ void debug_dma_dump_mappings(struct device *dev)
+  * dma_active_cacheline entry to track per event.  dma_map_sg(), on the
+  * other hand, consumes a single dma_debug_entry, but inserts 'nents'
+  * entries into the tree.
++ *
++ * Use __GFP_NOWARN because the printk from an OOM, to netconsole, could end
++ * up right back in the DMA debugging code, leading to a deadlock.
+  */
+-static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC);
++static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC | __GFP_NOWARN);
+ static DEFINE_SPINLOCK(radix_lock);
+ #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
+ #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index b60325cc8604d..55033d6c05777 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1366,8 +1366,9 @@ static void put_ctx(struct perf_event_context *ctx)
+  *	  perf_event_context::mutex
+  *	    perf_event::child_mutex;
+  *	      perf_event_context::lock
+- *	    perf_event::mmap_mutex
+  *	    mmap_lock
++ *	      perf_event::mmap_mutex
++ *	        perf_buffer::aux_mutex
+  *	      perf_addr_filters_head::lock
+  *
+  *    cpu_hotplug_lock
+@@ -6091,12 +6092,11 @@ static void perf_mmap_close(struct vm_area_struct *vma)
+ 		event->pmu->event_unmapped(event, vma->vm_mm);
+ 
+ 	/*
+-	 * rb->aux_mmap_count will always drop before rb->mmap_count and
+-	 * event->mmap_count, so it is ok to use event->mmap_mutex to
+-	 * serialize with perf_mmap here.
++	 * The AUX buffer is strictly a sub-buffer, serialize using aux_mutex
++	 * to avoid complications.
+ 	 */
+ 	if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
+-	    atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
++	    atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &rb->aux_mutex)) {
+ 		/*
+ 		 * Stop all AUX events that are writing to this buffer,
+ 		 * so that we can free its AUX pages and corresponding PMU
+@@ -6113,7 +6113,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
+ 		rb_free_aux(rb);
+ 		WARN_ON_ONCE(refcount_read(&rb->aux_refcount));
+ 
+-		mutex_unlock(&event->mmap_mutex);
++		mutex_unlock(&rb->aux_mutex);
+ 	}
+ 
+ 	if (atomic_dec_and_test(&rb->mmap_count))
+@@ -6201,6 +6201,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ 	struct perf_event *event = file->private_data;
+ 	unsigned long user_locked, user_lock_limit;
+ 	struct user_struct *user = current_user();
++	struct mutex *aux_mutex = NULL;
+ 	struct perf_buffer *rb = NULL;
+ 	unsigned long locked, lock_limit;
+ 	unsigned long vma_size;
+@@ -6249,6 +6250,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ 		if (!rb)
+ 			goto aux_unlock;
+ 
++		aux_mutex = &rb->aux_mutex;
++		mutex_lock(aux_mutex);
++
+ 		aux_offset = READ_ONCE(rb->user_page->aux_offset);
+ 		aux_size = READ_ONCE(rb->user_page->aux_size);
+ 
+@@ -6399,6 +6403,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ 		atomic_dec(&rb->mmap_count);
+ 	}
+ aux_unlock:
++	if (aux_mutex)
++		mutex_unlock(aux_mutex);
+ 	mutex_unlock(&event->mmap_mutex);
+ 
+ 	/*
+diff --git a/kernel/events/internal.h b/kernel/events/internal.h
+index 8e63cc2bd4f7d..6f4a7bb2b2286 100644
+--- a/kernel/events/internal.h
++++ b/kernel/events/internal.h
+@@ -40,6 +40,7 @@ struct perf_buffer {
+ 	struct user_struct		*mmap_user;
+ 
+ 	/* AUX area */
++	struct mutex			aux_mutex;
+ 	long				aux_head;
+ 	unsigned int			aux_nest;
+ 	long				aux_wakeup;	/* last aux_watermark boundary crossed by aux_head */
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index ca27946fdaaf2..ffca72b8c4c6d 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -332,6 +332,8 @@ ring_buffer_init(struct perf_buffer *rb, long watermark, int flags)
+ 	 */
+ 	if (!rb->nr_pages)
+ 		rb->paused = 1;
++
++	mutex_init(&rb->aux_mutex);
+ }
+ 
+ void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
+diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
+index 826a2355da1ed..e91d6aac9855c 100644
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -1485,7 +1485,7 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
+ 	uprobe_opcode_t insn = UPROBE_SWBP_INSN;
+ 	struct xol_area *area;
+ 
+-	area = kmalloc(sizeof(*area), GFP_KERNEL);
++	area = kzalloc(sizeof(*area), GFP_KERNEL);
+ 	if (unlikely(!area))
+ 		goto out;
+ 
+@@ -1495,7 +1495,6 @@ static struct xol_area *__create_xol_area(unsigned long vaddr)
+ 		goto free_area;
+ 
+ 	area->xol_mapping.name = "[uprobes]";
+-	area->xol_mapping.fault = NULL;
+ 	area->xol_mapping.pages = area->pages;
+ 	area->pages[0] = alloc_page(GFP_HIGHUSER);
+ 	if (!area->pages[0])
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index f00dd928fc711..c6a2dafd4a3b4 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1202,6 +1202,7 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ }
+ 
+ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
++				     struct rt_mutex *lock,
+ 				     struct rt_mutex_waiter *w)
+ {
+ 	/*
+@@ -1211,6 +1212,7 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
+ 	if (res != -EDEADLOCK || detect_deadlock)
+ 		return;
+ 
++	raw_spin_unlock_irq(&lock->wait_lock);
+ 	/*
+ 	 * Yell lowdly and stop the task right here.
+ 	 */
+@@ -1266,7 +1268,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ 	if (unlikely(ret)) {
+ 		__set_current_state(TASK_RUNNING);
+ 		remove_waiter(lock, &waiter);
+-		rt_mutex_handle_deadlock(ret, chwalk, &waiter);
++		rt_mutex_handle_deadlock(ret, chwalk, lock, &waiter);
+ 	}
+ 
+ 	/*
+diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
+index 105fdc2bb004c..bede3a4f108e3 100644
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -1240,7 +1240,7 @@ static void show_rcu_tasks_trace_gp_kthread(void)
+ {
+ 	char buf[64];
+ 
+-	sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
++	snprintf(buf, sizeof(buf), "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
+ 		data_race(n_heavy_reader_ofl_updates),
+ 		data_race(n_heavy_reader_updates),
+ 		data_race(n_heavy_reader_attempts));
+diff --git a/kernel/smp.c b/kernel/smp.c
+index b0684b4c111e9..c6b3ad79c72bd 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -1009,6 +1009,7 @@ int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
+ 
+ 	queue_work_on(cpu, system_wq, &sscs.work);
+ 	wait_for_completion(&sscs.done);
++	destroy_work_on_stack(&sscs.work);
+ 
+ 	return sscs.ret;
+ }
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 22e1e57118698..b16291f4c5731 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -3742,6 +3742,8 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
+ 			break;
+ 		entries++;
+ 		ring_buffer_iter_advance(buf_iter);
++		/* This could be a big loop */
++		cond_resched();
+ 	}
+ 
+ 	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
+diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
+index f25eb111c0516..34d3ac52de894 100644
+--- a/lib/generic-radix-tree.c
++++ b/lib/generic-radix-tree.c
+@@ -131,6 +131,8 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
+ 		if ((v = cmpxchg_release(&radix->root, r, new_root)) == r) {
+ 			v = new_root;
+ 			new_node = NULL;
++		} else {
++			new_node->children[0] = NULL;
+ 		}
+ 	}
+ 
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 874f91715296b..8de7c72ae0258 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -5160,11 +5160,28 @@ static struct cftype mem_cgroup_legacy_files[] = {
+  */
+ 
+ static DEFINE_IDR(mem_cgroup_idr);
++static DEFINE_SPINLOCK(memcg_idr_lock);
++
++static int mem_cgroup_alloc_id(void)
++{
++	int ret;
++
++	idr_preload(GFP_KERNEL);
++	spin_lock(&memcg_idr_lock);
++	ret = idr_alloc(&mem_cgroup_idr, NULL, 1, MEM_CGROUP_ID_MAX + 1,
++			GFP_NOWAIT);
++	spin_unlock(&memcg_idr_lock);
++	idr_preload_end();
++	return ret;
++}
+ 
+ static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
+ {
+ 	if (memcg->id.id > 0) {
++		spin_lock(&memcg_idr_lock);
+ 		idr_remove(&mem_cgroup_idr, memcg->id.id);
++		spin_unlock(&memcg_idr_lock);
++
+ 		memcg->id.id = 0;
+ 	}
+ }
+@@ -5294,9 +5311,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
+ 	if (!memcg)
+ 		return ERR_PTR(error);
+ 
+-	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
+-				 1, MEM_CGROUP_ID_MAX,
+-				 GFP_KERNEL);
++	memcg->id.id = mem_cgroup_alloc_id();
+ 	if (memcg->id.id < 0) {
+ 		error = memcg->id.id;
+ 		goto fail;
+@@ -5342,7 +5357,9 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
+ 	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
+ 	memcg->deferred_split_queue.split_queue_len = 0;
+ #endif
++	spin_lock(&memcg_idr_lock);
+ 	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
++	spin_unlock(&memcg_idr_lock);
+ 	return memcg;
+ fail:
+ 	mem_cgroup_id_remove(memcg);
+diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
+index 43aea97c57620..c96ff4a1d4a0b 100644
+--- a/net/8021q/vlan_core.c
++++ b/net/8021q/vlan_core.c
+@@ -482,10 +482,9 @@ static struct sk_buff *vlan_gro_receive(struct list_head *head,
+ 
+ 	type = vhdr->h_vlan_encapsulated_proto;
+ 
+-	rcu_read_lock();
+ 	ptype = gro_find_receive_by_type(type);
+ 	if (!ptype)
+-		goto out_unlock;
++		goto out;
+ 
+ 	flush = 0;
+ 
+@@ -504,8 +503,6 @@ static struct sk_buff *vlan_gro_receive(struct list_head *head,
+ 	skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
+ 	pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
+ 
+-out_unlock:
+-	rcu_read_unlock();
+ out:
+ 	skb_gro_flush_final(skb, pp, flush);
+ 
+@@ -519,12 +516,10 @@ static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
+ 	struct packet_offload *ptype;
+ 	int err = -ENOENT;
+ 
+-	rcu_read_lock();
+ 	ptype = gro_find_complete_by_type(type);
+ 	if (ptype)
+ 		err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr));
+ 
+-	rcu_read_unlock();
+ 	return err;
+ }
+ 
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 0078e33e12ba9..51b16c2a279f4 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -2370,16 +2370,6 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
+ 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
+ 		   key_count);
+ 
+-	for (i = 0; i < key_count; i++) {
+-		struct mgmt_link_key_info *key = &cp->keys[i];
+-
+-		/* Considering SMP over BREDR/LE, there is no need to check addr_type */
+-		if (key->type > 0x08)
+-			return mgmt_cmd_status(sk, hdev->id,
+-					       MGMT_OP_LOAD_LINK_KEYS,
+-					       MGMT_STATUS_INVALID_PARAMS);
+-	}
+-
+ 	hci_dev_lock(hdev);
+ 
+ 	hci_link_keys_clear(hdev);
+@@ -2404,6 +2394,19 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
+ 			continue;
+ 		}
+ 
++		if (key->addr.type != BDADDR_BREDR) {
++			bt_dev_warn(hdev,
++				    "Invalid link address type %u for %pMR",
++				    key->addr.type, &key->addr.bdaddr);
++			continue;
++		}
++
++		if (key->type > 0x08) {
++			bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
++				    key->type, &key->addr.bdaddr);
++			continue;
++		}
++
+ 		/* Always ignore debug keys and require a new pairing if
+ 		 * the user wants to use them.
+ 		 */
+@@ -5919,7 +5922,6 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
+ 
+ 	for (i = 0; i < irk_count; i++) {
+ 		struct mgmt_irk_info *irk = &cp->irks[i];
+-		u8 addr_type = le_addr_type(irk->addr.type);
+ 
+ 		if (hci_is_blocked_key(hdev,
+ 				       HCI_BLOCKED_KEY_TYPE_IRK,
+@@ -5929,12 +5931,8 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
+ 			continue;
+ 		}
+ 
+-		/* When using SMP over BR/EDR, the addr type should be set to BREDR */
+-		if (irk->addr.type == BDADDR_BREDR)
+-			addr_type = BDADDR_BREDR;
+-
+ 		hci_add_irk(hdev, &irk->addr.bdaddr,
+-			    addr_type, irk->val,
++			    le_addr_type(irk->addr.type), irk->val,
+ 			    BDADDR_ANY);
+ 	}
+ 
+@@ -5999,15 +5997,6 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
+ 
+ 	bt_dev_dbg(hdev, "key_count %u", key_count);
+ 
+-	for (i = 0; i < key_count; i++) {
+-		struct mgmt_ltk_info *key = &cp->keys[i];
+-
+-		if (!ltk_is_valid(key))
+-			return mgmt_cmd_status(sk, hdev->id,
+-					       MGMT_OP_LOAD_LONG_TERM_KEYS,
+-					       MGMT_STATUS_INVALID_PARAMS);
+-	}
+-
+ 	hci_dev_lock(hdev);
+ 
+ 	hci_smp_ltks_clear(hdev);
+@@ -6015,7 +6004,6 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
+ 	for (i = 0; i < key_count; i++) {
+ 		struct mgmt_ltk_info *key = &cp->keys[i];
+ 		u8 type, authenticated;
+-		u8 addr_type = le_addr_type(key->addr.type);
+ 
+ 		if (hci_is_blocked_key(hdev,
+ 				       HCI_BLOCKED_KEY_TYPE_LTK,
+@@ -6025,6 +6013,12 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
+ 			continue;
+ 		}
+ 
++		if (!ltk_is_valid(key)) {
++			bt_dev_warn(hdev, "Invalid LTK for %pMR",
++				    &key->addr.bdaddr);
++			continue;
++		}
++
+ 		switch (key->type) {
+ 		case MGMT_LTK_UNAUTHENTICATED:
+ 			authenticated = 0x00;
+@@ -6050,12 +6044,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
+ 			continue;
+ 		}
+ 
+-		/* When using SMP over BR/EDR, the addr type should be set to BREDR */
+-		if (key->addr.type == BDADDR_BREDR)
+-			addr_type = BDADDR_BREDR;
+-
+ 		hci_add_ltk(hdev, &key->addr.bdaddr,
+-			    addr_type, type, authenticated,
++			    le_addr_type(key->addr.type), type, authenticated,
+ 			    key->val, key->enc_size, key->ediv, key->rand);
+ 	}
+ 
+@@ -8058,7 +8048,7 @@ void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+ 
+ 	ev.store_hint = persistent;
+ 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
+-	ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
++	ev.key.addr.type = BDADDR_BREDR;
+ 	ev.key.type = key->type;
+ 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
+ 	ev.key.pin_len = key->pin_len;
+@@ -8109,7 +8099,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
+ 		ev.store_hint = persistent;
+ 
+ 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
+-	ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
++	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
+ 	ev.key.type = mgmt_ltk_type(key);
+ 	ev.key.enc_size = key->enc_size;
+ 	ev.key.ediv = key->ediv;
+@@ -8138,7 +8128,7 @@ void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
+ 
+ 	bacpy(&ev.rpa, &irk->rpa);
+ 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
+-	ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
++	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
+ 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
+ 
+ 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
+@@ -8167,7 +8157,7 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
+ 		ev.store_hint = persistent;
+ 
+ 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
+-	ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
++	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
+ 	ev.key.type = csrk->type;
+ 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
+ 
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index 20cae8f768762..8f9566f37498e 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -1060,7 +1060,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
+ 	}
+ 
+ 	if (smp->remote_irk) {
+-		smp->remote_irk->link_type = hcon->type;
+ 		mgmt_new_irk(hdev, smp->remote_irk, persistent);
+ 
+ 		/* Now that user space can be considered to know the
+@@ -1075,28 +1074,24 @@ static void smp_notify_keys(struct l2cap_conn *conn)
+ 	}
+ 
+ 	if (smp->csrk) {
+-		smp->csrk->link_type = hcon->type;
+ 		smp->csrk->bdaddr_type = hcon->dst_type;
+ 		bacpy(&smp->csrk->bdaddr, &hcon->dst);
+ 		mgmt_new_csrk(hdev, smp->csrk, persistent);
+ 	}
+ 
+ 	if (smp->responder_csrk) {
+-		smp->responder_csrk->link_type = hcon->type;
+ 		smp->responder_csrk->bdaddr_type = hcon->dst_type;
+ 		bacpy(&smp->responder_csrk->bdaddr, &hcon->dst);
+ 		mgmt_new_csrk(hdev, smp->responder_csrk, persistent);
+ 	}
+ 
+ 	if (smp->ltk) {
+-		smp->ltk->link_type = hcon->type;
+ 		smp->ltk->bdaddr_type = hcon->dst_type;
+ 		bacpy(&smp->ltk->bdaddr, &hcon->dst);
+ 		mgmt_new_ltk(hdev, smp->ltk, persistent);
+ 	}
+ 
+ 	if (smp->responder_ltk) {
+-		smp->responder_ltk->link_type = hcon->type;
+ 		smp->responder_ltk->bdaddr_type = hcon->dst_type;
+ 		bacpy(&smp->responder_ltk->bdaddr, &hcon->dst);
+ 		mgmt_new_ltk(hdev, smp->responder_ltk, persistent);
+@@ -1116,8 +1111,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
+ 		key = hci_add_link_key(hdev, smp->conn->hcon, &hcon->dst,
+ 				       smp->link_key, type, 0, &persistent);
+ 		if (key) {
+-			key->link_type = hcon->type;
+-			key->bdaddr_type = hcon->dst_type;
+ 			mgmt_new_link_key(hdev, key, persistent);
+ 
+ 			/* Don't keep debug keys around if the relevant
+diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
+index 8a6470a217024..8751571a3cb02 100644
+--- a/net/bridge/br_fdb.c
++++ b/net/bridge/br_fdb.c
+@@ -1238,12 +1238,10 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
+ 			modified = true;
+ 		}
+ 
+-		if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
++		if (test_and_set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
+ 			/* Refresh entry */
+ 			fdb->used = jiffies;
+-		} else if (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags)) {
+-			/* Take over SW learned entry */
+-			set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags);
++		} else {
+ 			modified = true;
+ 		}
+ 
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 2388c619f29ca..b2b1bd6727871 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -1423,6 +1423,10 @@ static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
+ 
+ 		/* remove device reference, if this is our bound device */
+ 		if (bo->bound && bo->ifindex == dev->ifindex) {
++#if IS_ENABLED(CONFIG_PROC_FS)
++			if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read)
++				remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir);
++#endif
+ 			bo->bound   = 0;
+ 			bo->ifindex = 0;
+ 			notify_enodev = 1;
+diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
+index 41b24cd31562a..b6de5ee22391c 100644
+--- a/net/core/netclassid_cgroup.c
++++ b/net/core/netclassid_cgroup.c
+@@ -72,11 +72,8 @@ static int update_classid_sock(const void *v, struct file *file, unsigned n)
+ 	struct update_classid_context *ctx = (void *)v;
+ 	struct socket *sock = sock_from_file(file, &err);
+ 
+-	if (sock) {
+-		spin_lock(&cgroup_sk_update_lock);
++	if (sock)
+ 		sock_cgroup_set_classid(&sock->sk->sk_cgrp_data, ctx->classid);
+-		spin_unlock(&cgroup_sk_update_lock);
+-	}
+ 	if (--ctx->batch == 0) {
+ 		ctx->batch = UPDATE_CLASSID_BATCH;
+ 		return n + 1;
+@@ -122,8 +119,6 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
+ 	struct css_task_iter it;
+ 	struct task_struct *p;
+ 
+-	cgroup_sk_alloc_disable();
+-
+ 	cs->classid = (u32)value;
+ 
+ 	css_task_iter_start(css, 0, &it);
+diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
+index 9bd4cab7d510f..d4c71e382a13f 100644
+--- a/net/core/netprio_cgroup.c
++++ b/net/core/netprio_cgroup.c
+@@ -207,8 +207,6 @@ static ssize_t write_priomap(struct kernfs_open_file *of,
+ 	if (!dev)
+ 		return -ENODEV;
+ 
+-	cgroup_sk_alloc_disable();
+-
+ 	rtnl_lock();
+ 
+ 	ret = netprio_set_prio(of_css(of), dev, prio);
+@@ -222,12 +220,10 @@ static int update_netprio(const void *v, struct file *file, unsigned n)
+ {
+ 	int err;
+ 	struct socket *sock = sock_from_file(file, &err);
+-	if (sock) {
+-		spin_lock(&cgroup_sk_update_lock);
++
++	if (sock)
+ 		sock_cgroup_set_prioidx(&sock->sk->sk_cgrp_data,
+ 					(unsigned long)v);
+-		spin_unlock(&cgroup_sk_update_lock);
+-	}
+ 	return 0;
+ }
+ 
+@@ -236,8 +232,6 @@ static void net_prio_attach(struct cgroup_taskset *tset)
+ 	struct task_struct *p;
+ 	struct cgroup_subsys_state *css;
+ 
+-	cgroup_sk_alloc_disable();
+-
+ 	cgroup_taskset_for_each(p, css, tset) {
+ 		void *v = (void *)(unsigned long)css->id;
+ 
+diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
+index 61cb40368723c..081390c32707d 100644
+--- a/net/ethernet/eth.c
++++ b/net/ethernet/eth.c
+@@ -430,19 +430,16 @@ struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb)
+ 
+ 	type = eh->h_proto;
+ 
+-	rcu_read_lock();
+ 	ptype = gro_find_receive_by_type(type);
+ 	if (ptype == NULL) {
+ 		flush = 1;
+-		goto out_unlock;
++		goto out;
+ 	}
+ 
+ 	skb_gro_pull(skb, sizeof(*eh));
+ 	skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
+ 	pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
+ 
+-out_unlock:
+-	rcu_read_unlock();
+ out:
+ 	skb_gro_flush_final(skb, pp, flush);
+ 
+@@ -460,13 +457,11 @@ int eth_gro_complete(struct sk_buff *skb, int nhoff)
+ 	if (skb->encapsulation)
+ 		skb_set_inner_mac_header(skb, nhoff);
+ 
+-	rcu_read_lock();
+ 	ptype = gro_find_complete_by_type(type);
+ 	if (ptype != NULL)
+ 		err = ptype->callbacks.gro_complete(skb, nhoff +
+ 						    sizeof(struct ethhdr));
+ 
+-	rcu_read_unlock();
+ 	return err;
+ }
+ EXPORT_SYMBOL(eth_gro_complete);
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index ce42626663de6..58dfca09093c2 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -1471,19 +1471,18 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
+ 
+ 	proto = iph->protocol;
+ 
+-	rcu_read_lock();
+ 	ops = rcu_dereference(inet_offloads[proto]);
+ 	if (!ops || !ops->callbacks.gro_receive)
+-		goto out_unlock;
++		goto out;
+ 
+ 	if (*(u8 *)iph != 0x45)
+-		goto out_unlock;
++		goto out;
+ 
+ 	if (ip_is_fragment(iph))
+-		goto out_unlock;
++		goto out;
+ 
+ 	if (unlikely(ip_fast_csum((u8 *)iph, 5)))
+-		goto out_unlock;
++		goto out;
+ 
+ 	id = ntohl(*(__be32 *)&iph->id);
+ 	flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF));
+@@ -1560,9 +1559,6 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
+ 	pp = indirect_call_gro_receive(tcp4_gro_receive, udp4_gro_receive,
+ 				       ops->callbacks.gro_receive, head, skb);
+ 
+-out_unlock:
+-	rcu_read_unlock();
+-
+ out:
+ 	skb_gro_flush_final(skb, pp, flush);
+ 
+@@ -1638,10 +1634,9 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
+ 	csum_replace2(&iph->check, iph->tot_len, newlen);
+ 	iph->tot_len = newlen;
+ 
+-	rcu_read_lock();
+ 	ops = rcu_dereference(inet_offloads[proto]);
+ 	if (WARN_ON(!ops || !ops->callbacks.gro_complete))
+-		goto out_unlock;
++		goto out;
+ 
+ 	/* Only need to add sizeof(*iph) to get to the next hdr below
+ 	 * because any hdr with option will have been flushed in
+@@ -1651,9 +1646,7 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
+ 			      tcp4_gro_complete, udp4_gro_complete,
+ 			      skb, nhoff + sizeof(*iph));
+ 
+-out_unlock:
+-	rcu_read_unlock();
+-
++out:
+ 	return err;
+ }
+ EXPORT_SYMBOL(inet_gro_complete);
+diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
+index e5f69b0bf3df5..135da756dd5ab 100644
+--- a/net/ipv4/fou.c
++++ b/net/ipv4/fou.c
+@@ -48,7 +48,7 @@ struct fou_net {
+ 
+ static inline struct fou *fou_from_sock(struct sock *sk)
+ {
+-	return sk->sk_user_data;
++	return rcu_dereference_sk_user_data(sk);
+ }
+ 
+ static int fou_recv_pull(struct sk_buff *skb, struct fou *fou, size_t len)
+@@ -230,10 +230,16 @@ static struct sk_buff *fou_gro_receive(struct sock *sk,
+ 				       struct list_head *head,
+ 				       struct sk_buff *skb)
+ {
+-	u8 proto = fou_from_sock(sk)->protocol;
+-	const struct net_offload **offloads;
++	const struct net_offload __rcu **offloads;
++	struct fou *fou = fou_from_sock(sk);
+ 	const struct net_offload *ops;
+ 	struct sk_buff *pp = NULL;
++	u8 proto;
++
++	if (!fou)
++		goto out;
++
++	proto = fou->protocol;
+ 
+ 	/* We can clear the encap_mark for FOU as we are essentially doing
+ 	 * one of two possible things.  We are either adding an L4 tunnel
+@@ -246,41 +252,45 @@ static struct sk_buff *fou_gro_receive(struct sock *sk,
+ 	/* Flag this frame as already having an outer encap header */
+ 	NAPI_GRO_CB(skb)->is_fou = 1;
+ 
+-	rcu_read_lock();
+ 	offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
+ 	ops = rcu_dereference(offloads[proto]);
+ 	if (!ops || !ops->callbacks.gro_receive)
+-		goto out_unlock;
++		goto out;
+ 
+ 	pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
+ 
+-out_unlock:
+-	rcu_read_unlock();
+-
++out:
+ 	return pp;
+ }
+ 
+ static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
+ 			    int nhoff)
+ {
++	const struct net_offload __rcu **offloads;
++	struct fou *fou = fou_from_sock(sk);
+ 	const struct net_offload *ops;
+-	u8 proto = fou_from_sock(sk)->protocol;
+-	int err = -ENOSYS;
+-	const struct net_offload **offloads;
++	u8 proto;
++	int err;
++
++	if (!fou) {
++		err = -ENOENT;
++		goto out;
++	}
++
++	proto = fou->protocol;
+ 
+-	rcu_read_lock();
+ 	offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
+ 	ops = rcu_dereference(offloads[proto]);
+-	if (WARN_ON(!ops || !ops->callbacks.gro_complete))
+-		goto out_unlock;
++	if (WARN_ON(!ops || !ops->callbacks.gro_complete)) {
++		err = -ENOSYS;
++		goto out;
++	}
+ 
+ 	err = ops->callbacks.gro_complete(skb, nhoff);
+ 
+ 	skb_set_inner_mac_header(skb, nhoff);
+ 
+-out_unlock:
+-	rcu_read_unlock();
+-
++out:
+ 	return err;
+ }
+ 
+@@ -311,7 +321,7 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
+ 				       struct list_head *head,
+ 				       struct sk_buff *skb)
+ {
+-	const struct net_offload **offloads;
++	const struct net_offload __rcu **offloads;
+ 	const struct net_offload *ops;
+ 	struct sk_buff *pp = NULL;
+ 	struct sk_buff *p;
+@@ -324,6 +334,9 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
+ 	struct gro_remcsum grc;
+ 	u8 proto;
+ 
++	if (!fou)
++		goto out;
++
+ 	skb_gro_remcsum_init(&grc);
+ 
+ 	off = skb_gro_offset(skb);
+@@ -438,17 +451,14 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
+ 	/* Flag this frame as already having an outer encap header */
+ 	NAPI_GRO_CB(skb)->is_fou = 1;
+ 
+-	rcu_read_lock();
+ 	offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
+ 	ops = rcu_dereference(offloads[proto]);
+ 	if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
+-		goto out_unlock;
++		goto out;
+ 
+ 	pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
+ 	flush = 0;
+ 
+-out_unlock:
+-	rcu_read_unlock();
+ out:
+ 	skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
+ 
+@@ -457,8 +467,8 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
+ 
+ static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
+ {
+-	const struct net_offload **offloads;
+ 	struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
++	const struct net_offload __rcu **offloads;
+ 	const struct net_offload *ops;
+ 	unsigned int guehlen = 0;
+ 	u8 proto;
+@@ -485,18 +495,16 @@ static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
+ 		return err;
+ 	}
+ 
+-	rcu_read_lock();
+ 	offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
+ 	ops = rcu_dereference(offloads[proto]);
+ 	if (WARN_ON(!ops || !ops->callbacks.gro_complete))
+-		goto out_unlock;
++		goto out;
+ 
+ 	err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
+ 
+ 	skb_set_inner_mac_header(skb, nhoff + guehlen);
+ 
+-out_unlock:
+-	rcu_read_unlock();
++out:
+ 	return err;
+ }
+ 
+diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
+index e0a2465758872..b4da692b97342 100644
+--- a/net/ipv4/gre_offload.c
++++ b/net/ipv4/gre_offload.c
+@@ -158,10 +158,9 @@ static struct sk_buff *gre_gro_receive(struct list_head *head,
+ 
+ 	type = greh->protocol;
+ 
+-	rcu_read_lock();
+ 	ptype = gro_find_receive_by_type(type);
+ 	if (!ptype)
+-		goto out_unlock;
++		goto out;
+ 
+ 	grehlen = GRE_HEADER_SECTION;
+ 
+@@ -175,13 +174,13 @@ static struct sk_buff *gre_gro_receive(struct list_head *head,
+ 	if (skb_gro_header_hard(skb, hlen)) {
+ 		greh = skb_gro_header_slow(skb, hlen, off);
+ 		if (unlikely(!greh))
+-			goto out_unlock;
++			goto out;
+ 	}
+ 
+ 	/* Don't bother verifying checksum if we're going to flush anyway. */
+ 	if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) {
+ 		if (skb_gro_checksum_simple_validate(skb))
+-			goto out_unlock;
++			goto out;
+ 
+ 		skb_gro_checksum_try_convert(skb, IPPROTO_GRE,
+ 					     null_compute_pseudo);
+@@ -225,8 +224,6 @@ static struct sk_buff *gre_gro_receive(struct list_head *head,
+ 	pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
+ 	flush = 0;
+ 
+-out_unlock:
+-	rcu_read_unlock();
+ out:
+ 	skb_gro_flush_final(skb, pp, flush);
+ 
+@@ -251,13 +248,10 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
+ 	if (greh->flags & GRE_CSUM)
+ 		grehlen += GRE_HEADER_SECTION;
+ 
+-	rcu_read_lock();
+ 	ptype = gro_find_complete_by_type(type);
+ 	if (ptype)
+ 		err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
+ 
+-	rcu_read_unlock();
+-
+ 	skb_set_inner_mac_header(skb, nhoff + grehlen);
+ 
+ 	return err;
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 56deddeac1b0e..0fb5d758264fe 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -653,6 +653,7 @@ int __inet_hash(struct sock *sk, struct sock *osk)
+ 		if (err)
+ 			goto unlock;
+ 	}
++	sock_set_flag(sk, SOCK_RCU_FREE);
+ 	if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+ 		sk->sk_family == AF_INET6)
+ 		__sk_nulls_add_node_tail_rcu(sk, &ilb->nulls_head);
+@@ -660,7 +661,6 @@ int __inet_hash(struct sock *sk, struct sock *osk)
+ 		__sk_nulls_add_node_rcu(sk, &ilb->nulls_head);
+ 	inet_hash2(hashinfo, sk);
+ 	ilb->count++;
+-	sock_set_flag(sk, SOCK_RCU_FREE);
+ 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ unlock:
+ 	spin_unlock(&ilb->lock);
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index f909e440bb226..ade27d63655c2 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -511,7 +511,7 @@ static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+ 		err = sk_stream_error(sk, msg->msg_flags, err);
+ 	release_sock(sk);
+ 	sk_psock_put(sk, psock);
+-	return copied ? copied : err;
++	return copied > 0 ? copied : err;
+ }
+ 
+ static int tcp_bpf_sendpage(struct sock *sk, struct page *page, int offset,
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index a0b569d0085bc..6e36eb1ba2763 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -149,8 +149,8 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
+ 				       netdev_features_t features,
+ 				       bool is_ipv6)
+ {
++	const struct net_offload __rcu **offloads;
+ 	__be16 protocol = skb->protocol;
+-	const struct net_offload **offloads;
+ 	const struct net_offload *ops;
+ 	struct sk_buff *segs = ERR_PTR(-EINVAL);
+ 	struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
+@@ -606,13 +606,11 @@ struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb)
+ 					     inet_gro_compute_pseudo);
+ skip:
+ 	NAPI_GRO_CB(skb)->is_ipv6 = 0;
+-	rcu_read_lock();
+ 
+ 	if (static_branch_unlikely(&udp_encap_needed_key))
+ 		sk = udp4_gro_lookup_skb(skb, uh->source, uh->dest);
+ 
+ 	pp = udp_gro_receive(head, skb, uh, sk);
+-	rcu_read_unlock();
+ 	return pp;
+ 
+ flush:
+@@ -647,7 +645,6 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
+ 
+ 	uh->len = newlen;
+ 
+-	rcu_read_lock();
+ 	sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb,
+ 				udp4_lib_lookup_skb, skb, uh->source, uh->dest);
+ 	if (sk && udp_sk(sk)->gro_complete) {
+@@ -663,7 +660,6 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
+ 	} else {
+ 		err = udp_gro_complete_segment(skb);
+ 	}
+-	rcu_read_unlock();
+ 
+ 	if (skb->remcsum_offload)
+ 		skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM;
+diff --git a/net/ipv6/ila/ila.h b/net/ipv6/ila/ila.h
+index ad5f6f6ba3330..85b92917849bf 100644
+--- a/net/ipv6/ila/ila.h
++++ b/net/ipv6/ila/ila.h
+@@ -108,6 +108,7 @@ int ila_lwt_init(void);
+ void ila_lwt_fini(void);
+ 
+ int ila_xlat_init_net(struct net *net);
++void ila_xlat_pre_exit_net(struct net *net);
+ void ila_xlat_exit_net(struct net *net);
+ 
+ int ila_xlat_nl_cmd_add_mapping(struct sk_buff *skb, struct genl_info *info);
+diff --git a/net/ipv6/ila/ila_main.c b/net/ipv6/ila/ila_main.c
+index 36c58aa257e88..a5b0365c5e48e 100644
+--- a/net/ipv6/ila/ila_main.c
++++ b/net/ipv6/ila/ila_main.c
+@@ -71,6 +71,11 @@ static __net_init int ila_init_net(struct net *net)
+ 	return err;
+ }
+ 
++static __net_exit void ila_pre_exit_net(struct net *net)
++{
++	ila_xlat_pre_exit_net(net);
++}
++
+ static __net_exit void ila_exit_net(struct net *net)
+ {
+ 	ila_xlat_exit_net(net);
+@@ -78,6 +83,7 @@ static __net_exit void ila_exit_net(struct net *net)
+ 
+ static struct pernet_operations ila_net_ops = {
+ 	.init = ila_init_net,
++	.pre_exit = ila_pre_exit_net,
+ 	.exit = ila_exit_net,
+ 	.id   = &ila_net_id,
+ 	.size = sizeof(struct ila_net),
+diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
+index 163668531a57f..1f7b674b7c58b 100644
+--- a/net/ipv6/ila/ila_xlat.c
++++ b/net/ipv6/ila/ila_xlat.c
+@@ -616,6 +616,15 @@ int ila_xlat_init_net(struct net *net)
+ 	return 0;
+ }
+ 
++void ila_xlat_pre_exit_net(struct net *net)
++{
++	struct ila_net *ilan = net_generic(net, ila_net_id);
++
++	if (ilan->xlat.hooks_registered)
++		nf_unregister_net_hooks(net, ila_nf_hook_ops,
++					ARRAY_SIZE(ila_nf_hook_ops));
++}
++
+ void ila_xlat_exit_net(struct net *net)
+ {
+ 	struct ila_net *ilan = net_generic(net, ila_net_id);
+@@ -623,10 +632,6 @@ void ila_xlat_exit_net(struct net *net)
+ 	rhashtable_free_and_destroy(&ilan->xlat.rhash_table, ila_free_cb, NULL);
+ 
+ 	free_bucket_spinlocks(ilan->xlat.locks);
+-
+-	if (ilan->xlat.hooks_registered)
+-		nf_unregister_net_hooks(net, ila_nf_hook_ops,
+-					ARRAY_SIZE(ila_nf_hook_ops));
+ }
+ 
+ static int ila_xlat_addr(struct sk_buff *skb, bool sir2ila)
+diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
+index 15c8eef1ef443..673f02ea62aae 100644
+--- a/net/ipv6/ip6_offload.c
++++ b/net/ipv6/ip6_offload.c
+@@ -209,7 +209,6 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
+ 
+ 	flush += ntohs(iph->payload_len) != skb_gro_len(skb);
+ 
+-	rcu_read_lock();
+ 	proto = iph->nexthdr;
+ 	ops = rcu_dereference(inet6_offloads[proto]);
+ 	if (!ops || !ops->callbacks.gro_receive) {
+@@ -222,7 +221,7 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
+ 
+ 		ops = rcu_dereference(inet6_offloads[proto]);
+ 		if (!ops || !ops->callbacks.gro_receive)
+-			goto out_unlock;
++			goto out;
+ 
+ 		iph = ipv6_hdr(skb);
+ 	}
+@@ -280,9 +279,6 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
+ 	pp = indirect_call_gro_receive_l4(tcp6_gro_receive, udp6_gro_receive,
+ 					 ops->callbacks.gro_receive, head, skb);
+ 
+-out_unlock:
+-	rcu_read_unlock();
+-
+ out:
+ 	skb_gro_flush_final(skb, pp, flush);
+ 
+@@ -332,18 +328,14 @@ INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
+ 
+ 	iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
+ 
+-	rcu_read_lock();
+-
+ 	nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
+ 	if (WARN_ON(!ops || !ops->callbacks.gro_complete))
+-		goto out_unlock;
++		goto out;
+ 
+ 	err = INDIRECT_CALL_L4(ops->callbacks.gro_complete, tcp6_gro_complete,
+ 			       udp6_gro_complete, skb, nhoff);
+ 
+-out_unlock:
+-	rcu_read_unlock();
+-
++out:
+ 	return err;
+ }
+ 
+diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
+index 7752e1e921f8f..1107782c083d5 100644
+--- a/net/ipv6/udp_offload.c
++++ b/net/ipv6/udp_offload.c
+@@ -144,13 +144,11 @@ struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb)
+ 
+ skip:
+ 	NAPI_GRO_CB(skb)->is_ipv6 = 1;
+-	rcu_read_lock();
+ 
+ 	if (static_branch_unlikely(&udpv6_encap_needed_key))
+ 		sk = udp6_gro_lookup_skb(skb, uh->source, uh->dest);
+ 
+ 	pp = udp_gro_receive(head, skb, uh, sk);
+-	rcu_read_unlock();
+ 	return pp;
+ 
+ flush:
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index f7a91266d5a9c..9b11396552dfc 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -96,7 +96,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 			mp_opt->data_len = get_unaligned_be16(ptr);
+ 			ptr += 2;
+ 		}
+-		pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d",
++		pr_debug("MP_CAPABLE version=%x, flags=%x, optlen=%d sndr=%llu, rcvr=%llu len=%d\n",
+ 			 version, flags, opsize, mp_opt->sndr_key,
+ 			 mp_opt->rcvr_key, mp_opt->data_len);
+ 		break;
+@@ -110,7 +110,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 			ptr += 4;
+ 			mp_opt->nonce = get_unaligned_be32(ptr);
+ 			ptr += 4;
+-			pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u",
++			pr_debug("MP_JOIN bkup=%u, id=%u, token=%u, nonce=%u\n",
+ 				 mp_opt->backup, mp_opt->join_id,
+ 				 mp_opt->token, mp_opt->nonce);
+ 		} else if (opsize == TCPOLEN_MPTCP_MPJ_SYNACK) {
+@@ -120,20 +120,20 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 			ptr += 8;
+ 			mp_opt->nonce = get_unaligned_be32(ptr);
+ 			ptr += 4;
+-			pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u",
++			pr_debug("MP_JOIN bkup=%u, id=%u, thmac=%llu, nonce=%u\n",
+ 				 mp_opt->backup, mp_opt->join_id,
+ 				 mp_opt->thmac, mp_opt->nonce);
+ 		} else if (opsize == TCPOLEN_MPTCP_MPJ_ACK) {
+ 			ptr += 2;
+ 			memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN);
+-			pr_debug("MP_JOIN hmac");
++			pr_debug("MP_JOIN hmac\n");
+ 		} else {
+ 			mp_opt->mp_join = 0;
+ 		}
+ 		break;
+ 
+ 	case MPTCPOPT_DSS:
+-		pr_debug("DSS");
++		pr_debug("DSS\n");
+ 		ptr++;
+ 
+ 		/* we must clear 'mpc_map' be able to detect MP_CAPABLE
+@@ -148,7 +148,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 		mp_opt->ack64 = (flags & MPTCP_DSS_ACK64) != 0;
+ 		mp_opt->use_ack = (flags & MPTCP_DSS_HAS_ACK);
+ 
+-		pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d",
++		pr_debug("data_fin=%d dsn64=%d use_map=%d ack64=%d use_ack=%d\n",
+ 			 mp_opt->data_fin, mp_opt->dsn64,
+ 			 mp_opt->use_map, mp_opt->ack64,
+ 			 mp_opt->use_ack);
+@@ -189,7 +189,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 				ptr += 4;
+ 			}
+ 
+-			pr_debug("data_ack=%llu", mp_opt->data_ack);
++			pr_debug("data_ack=%llu\n", mp_opt->data_ack);
+ 		}
+ 
+ 		if (mp_opt->use_map) {
+@@ -207,7 +207,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 			mp_opt->data_len = get_unaligned_be16(ptr);
+ 			ptr += 2;
+ 
+-			pr_debug("data_seq=%llu subflow_seq=%u data_len=%u",
++			pr_debug("data_seq=%llu subflow_seq=%u data_len=%u\n",
+ 				 mp_opt->data_seq, mp_opt->subflow_seq,
+ 				 mp_opt->data_len);
+ 		}
+@@ -242,7 +242,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 
+ 		mp_opt->add_addr = 1;
+ 		mp_opt->addr_id = *ptr++;
+-		pr_debug("ADD_ADDR: id=%d, echo=%d", mp_opt->addr_id, mp_opt->echo);
++		pr_debug("ADD_ADDR: id=%d, echo=%d\n", mp_opt->addr_id, mp_opt->echo);
+ 		if (mp_opt->family == MPTCP_ADDR_IPVERSION_4) {
+ 			memcpy((u8 *)&mp_opt->addr.s_addr, (u8 *)ptr, 4);
+ 			ptr += 4;
+@@ -277,7 +277,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
+ 
+ 		mp_opt->rm_addr = 1;
+ 		mp_opt->rm_id = *ptr++;
+-		pr_debug("RM_ADDR: id=%d", mp_opt->rm_id);
++		pr_debug("RM_ADDR: id=%d\n", mp_opt->rm_id);
+ 		break;
+ 
+ 	default:
+@@ -344,7 +344,7 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
+ 		*size = TCPOLEN_MPTCP_MPC_SYN;
+ 		return true;
+ 	} else if (subflow->request_join) {
+-		pr_debug("remote_token=%u, nonce=%u", subflow->remote_token,
++		pr_debug("remote_token=%u, nonce=%u\n", subflow->remote_token,
+ 			 subflow->local_nonce);
+ 		opts->suboptions = OPTION_MPTCP_MPJ_SYN;
+ 		opts->join_id = subflow->local_id;
+@@ -436,7 +436,7 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
+ 		else
+ 			*size = TCPOLEN_MPTCP_MPC_ACK;
+ 
+-		pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d",
++		pr_debug("subflow=%p, local_key=%llu, remote_key=%llu map_len=%d\n",
+ 			 subflow, subflow->local_key, subflow->remote_key,
+ 			 data_len);
+ 
+@@ -445,7 +445,7 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
+ 		opts->suboptions = OPTION_MPTCP_MPJ_ACK;
+ 		memcpy(opts->hmac, subflow->hmac, MPTCPOPT_HMAC_LEN);
+ 		*size = TCPOLEN_MPTCP_MPJ_ACK;
+-		pr_debug("subflow=%p", subflow);
++		pr_debug("subflow=%p\n", subflow);
+ 
+ 		schedule_3rdack_retransmission(sk);
+ 		return true;
+@@ -619,7 +619,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk,
+ 		}
+ 	}
+ #endif
+-	pr_debug("addr_id=%d, ahmac=%llu, echo=%d", opts->addr_id, opts->ahmac, echo);
++	pr_debug("addr_id=%d, ahmac=%llu, echo=%d\n", opts->addr_id, opts->ahmac, echo);
+ 
+ 	return true;
+ }
+@@ -644,7 +644,7 @@ static bool mptcp_established_options_rm_addr(struct sock *sk,
+ 	opts->suboptions |= OPTION_MPTCP_RM_ADDR;
+ 	opts->rm_id = rm_id;
+ 
+-	pr_debug("rm_id=%d", opts->rm_id);
++	pr_debug("rm_id=%d\n", opts->rm_id);
+ 
+ 	return true;
+ }
+@@ -703,7 +703,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
+ 		opts->suboptions = OPTION_MPTCP_MPC_SYNACK;
+ 		opts->sndr_key = subflow_req->local_key;
+ 		*size = TCPOLEN_MPTCP_MPC_SYNACK;
+-		pr_debug("subflow_req=%p, local_key=%llu",
++		pr_debug("subflow_req=%p, local_key=%llu\n",
+ 			 subflow_req, subflow_req->local_key);
+ 		return true;
+ 	} else if (subflow_req->mp_join) {
+@@ -712,7 +712,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
+ 		opts->join_id = subflow_req->local_id;
+ 		opts->thmac = subflow_req->thmac;
+ 		opts->nonce = subflow_req->local_nonce;
+-		pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u",
++		pr_debug("req=%p, bkup=%u, id=%u, thmac=%llu, nonce=%u\n",
+ 			 subflow_req, opts->backup, opts->join_id,
+ 			 opts->thmac, opts->nonce);
+ 		*size = TCPOLEN_MPTCP_MPJ_SYNACK;
+diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
+index 1f310abbf1ede..a8c26f4179004 100644
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -16,7 +16,7 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk,
+ 			   const struct mptcp_addr_info *addr,
+ 			   bool echo)
+ {
+-	pr_debug("msk=%p, local_id=%d", msk, addr->id);
++	pr_debug("msk=%p, local_id=%d\n", msk, addr->id);
+ 
+ 	msk->pm.local = *addr;
+ 	WRITE_ONCE(msk->pm.add_addr_echo, echo);
+@@ -26,7 +26,7 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk,
+ 
+ int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id)
+ {
+-	pr_debug("msk=%p, local_id=%d", msk, local_id);
++	pr_debug("msk=%p, local_id=%d\n", msk, local_id);
+ 
+ 	msk->pm.rm_id = local_id;
+ 	WRITE_ONCE(msk->pm.rm_addr_signal, true);
+@@ -35,7 +35,7 @@ int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id)
+ 
+ int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 local_id)
+ {
+-	pr_debug("msk=%p, local_id=%d", msk, local_id);
++	pr_debug("msk=%p, local_id=%d\n", msk, local_id);
+ 
+ 	spin_lock_bh(&msk->pm.lock);
+ 	mptcp_pm_nl_rm_subflow_received(msk, local_id);
+@@ -49,7 +49,7 @@ void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side)
+ {
+ 	struct mptcp_pm_data *pm = &msk->pm;
+ 
+-	pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
++	pr_debug("msk=%p, token=%u side=%d\n", msk, msk->token, server_side);
+ 
+ 	WRITE_ONCE(pm->server_side, server_side);
+ }
+@@ -59,7 +59,7 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
+ 	struct mptcp_pm_data *pm = &msk->pm;
+ 	int ret = 0;
+ 
+-	pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
++	pr_debug("msk=%p subflows=%d max=%d allow=%d\n", msk, pm->subflows,
+ 		 pm->subflows_max, READ_ONCE(pm->accept_subflow));
+ 
+ 	/* try to avoid acquiring the lock below */
+@@ -83,7 +83,7 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
+ static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
+ 				   enum mptcp_pm_status new_status)
+ {
+-	pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
++	pr_debug("msk=%p status=%x new=%lx\n", msk, msk->pm.status,
+ 		 BIT(new_status));
+ 	if (msk->pm.status & BIT(new_status))
+ 		return false;
+@@ -98,7 +98,7 @@ void mptcp_pm_fully_established(struct mptcp_sock *msk)
+ {
+ 	struct mptcp_pm_data *pm = &msk->pm;
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	/* try to avoid acquiring the lock below */
+ 	if (!READ_ONCE(pm->work_pending))
+@@ -114,7 +114,7 @@ void mptcp_pm_fully_established(struct mptcp_sock *msk)
+ 
+ void mptcp_pm_connection_closed(struct mptcp_sock *msk)
+ {
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ }
+ 
+ void mptcp_pm_subflow_established(struct mptcp_sock *msk,
+@@ -122,7 +122,7 @@ void mptcp_pm_subflow_established(struct mptcp_sock *msk,
+ {
+ 	struct mptcp_pm_data *pm = &msk->pm;
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	if (!READ_ONCE(pm->work_pending))
+ 		return;
+@@ -137,7 +137,7 @@ void mptcp_pm_subflow_established(struct mptcp_sock *msk,
+ 
+ void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id)
+ {
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ }
+ 
+ void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
+@@ -145,7 +145,7 @@ void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
+ {
+ 	struct mptcp_pm_data *pm = &msk->pm;
+ 
+-	pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
++	pr_debug("msk=%p remote_id=%d accept=%d\n", msk, addr->id,
+ 		 READ_ONCE(pm->accept_addr));
+ 
+ 	spin_lock_bh(&pm->lock);
+@@ -162,7 +162,7 @@ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, u8 rm_id)
+ {
+ 	struct mptcp_pm_data *pm = &msk->pm;
+ 
+-	pr_debug("msk=%p remote_id=%d", msk, rm_id);
++	pr_debug("msk=%p remote_id=%d\n", msk, rm_id);
+ 
+ 	spin_lock_bh(&pm->lock);
+ 	mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED);
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index ca57d856d5df5..f115c92c45d4a 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -127,11 +127,13 @@ static bool lookup_subflow_by_saddr(const struct list_head *list,
+ 	return false;
+ }
+ 
+-static struct mptcp_pm_addr_entry *
++static bool
+ select_local_address(const struct pm_nl_pernet *pernet,
+-		     struct mptcp_sock *msk)
++		     struct mptcp_sock *msk,
++		     struct mptcp_pm_addr_entry *new_entry)
+ {
+-	struct mptcp_pm_addr_entry *entry, *ret = NULL;
++	struct mptcp_pm_addr_entry *entry;
++	bool found = false;
+ 
+ 	rcu_read_lock();
+ 	spin_lock_bh(&msk->join_list_lock);
+@@ -145,19 +147,23 @@ select_local_address(const struct pm_nl_pernet *pernet,
+ 		if (entry->addr.family == ((struct sock *)msk)->sk_family &&
+ 		    !lookup_subflow_by_saddr(&msk->conn_list, &entry->addr) &&
+ 		    !lookup_subflow_by_saddr(&msk->join_list, &entry->addr)) {
+-			ret = entry;
++			*new_entry = *entry;
++			found = true;
+ 			break;
+ 		}
+ 	}
+ 	spin_unlock_bh(&msk->join_list_lock);
+ 	rcu_read_unlock();
+-	return ret;
++
++	return found;
+ }
+ 
+-static struct mptcp_pm_addr_entry *
+-select_signal_address(struct pm_nl_pernet *pernet, unsigned int pos)
++static bool
++select_signal_address(struct pm_nl_pernet *pernet, unsigned int pos,
++		      struct mptcp_pm_addr_entry *new_entry)
+ {
+-	struct mptcp_pm_addr_entry *entry, *ret = NULL;
++	struct mptcp_pm_addr_entry *entry;
++	bool found = false;
+ 	int i = 0;
+ 
+ 	rcu_read_lock();
+@@ -170,12 +176,14 @@ select_signal_address(struct pm_nl_pernet *pernet, unsigned int pos)
+ 		if (!(entry->addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL))
+ 			continue;
+ 		if (i++ == pos) {
+-			ret = entry;
++			*new_entry = *entry;
++			found = true;
+ 			break;
+ 		}
+ 	}
+ 	rcu_read_unlock();
+-	return ret;
++
++	return found;
+ }
+ 
+ static void check_work_pending(struct mptcp_sock *msk)
+@@ -206,7 +214,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
+ 	struct mptcp_sock *msk = entry->sock;
+ 	struct sock *sk = (struct sock *)msk;
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	if (!msk)
+ 		return;
+@@ -225,7 +233,7 @@ static void mptcp_pm_add_timer(struct timer_list *timer)
+ 	spin_lock_bh(&msk->pm.lock);
+ 
+ 	if (!mptcp_pm_should_add_signal(msk)) {
+-		pr_debug("retransmit ADD_ADDR id=%d", entry->addr.id);
++		pr_debug("retransmit ADD_ADDR id=%d\n", entry->addr.id);
+ 		mptcp_pm_announce_addr(msk, &entry->addr, false);
+ 		entry->retrans_times++;
+ 	}
+@@ -289,7 +297,7 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
+ 	struct sock *sk = (struct sock *)msk;
+ 	LIST_HEAD(free_list);
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	spin_lock_bh(&msk->pm.lock);
+ 	list_splice_init(&msk->pm.anno_list, &free_list);
+@@ -305,7 +313,7 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
+ {
+ 	struct mptcp_addr_info remote = { 0 };
+ 	struct sock *sk = (struct sock *)msk;
+-	struct mptcp_pm_addr_entry *local;
++	struct mptcp_pm_addr_entry local;
+ 	struct pm_nl_pernet *pernet;
+ 
+ 	pernet = net_generic(sock_net((struct sock *)msk), pm_nl_pernet_id);
+@@ -317,13 +325,11 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
+ 
+ 	/* check first for announce */
+ 	if (msk->pm.add_addr_signaled < msk->pm.add_addr_signal_max) {
+-		local = select_signal_address(pernet,
+-					      msk->pm.add_addr_signaled);
+-
+-		if (local) {
+-			if (mptcp_pm_alloc_anno_list(msk, local)) {
++		if (select_signal_address(pernet, msk->pm.add_addr_signaled,
++					  &local)) {
++			if (mptcp_pm_alloc_anno_list(msk, &local)) {
+ 				msk->pm.add_addr_signaled++;
+-				mptcp_pm_announce_addr(msk, &local->addr, false);
++				mptcp_pm_announce_addr(msk, &local.addr, false);
+ 			}
+ 		} else {
+ 			/* pick failed, avoid fourther attempts later */
+@@ -338,13 +344,12 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
+ 	    msk->pm.subflows < msk->pm.subflows_max) {
+ 		remote_address((struct sock_common *)sk, &remote);
+ 
+-		local = select_local_address(pernet, msk);
+-		if (local) {
++		if (select_local_address(pernet, msk, &local)) {
+ 			msk->pm.local_addr_used++;
+ 			msk->pm.subflows++;
+ 			check_work_pending(msk);
+ 			spin_unlock_bh(&msk->pm.lock);
+-			__mptcp_subflow_connect(sk, &local->addr, &remote);
++			__mptcp_subflow_connect(sk, &local.addr, &remote);
+ 			spin_lock_bh(&msk->pm.lock);
+ 			return;
+ 		}
+@@ -372,7 +377,7 @@ void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
+ 	struct mptcp_addr_info local;
+ 	int err;
+ 
+-	pr_debug("accepted %d:%d remote family %d",
++	pr_debug("accepted %d:%d remote family %d\n",
+ 		 msk->pm.add_addr_accepted, msk->pm.add_addr_accept_max,
+ 		 msk->pm.remote.family);
+ 	msk->pm.subflows++;
+@@ -405,7 +410,7 @@ void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk)
+ 	struct mptcp_subflow_context *subflow, *tmp;
+ 	struct sock *sk = (struct sock *)msk;
+ 
+-	pr_debug("address rm_id %d", msk->pm.rm_id);
++	pr_debug("address rm_id %d\n", msk->pm.rm_id);
+ 
+ 	if (!msk->pm.rm_id)
+ 		return;
+@@ -441,7 +446,7 @@ void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk, u8 rm_id)
+ 	struct mptcp_subflow_context *subflow, *tmp;
+ 	struct sock *sk = (struct sock *)msk;
+ 
+-	pr_debug("subflow rm_id %d", rm_id);
++	pr_debug("subflow rm_id %d\n", rm_id);
+ 
+ 	if (!rm_id)
+ 		return;
+@@ -791,7 +796,7 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
+ 	struct mptcp_sock *msk;
+ 	long s_slot = 0, s_num = 0;
+ 
+-	pr_debug("remove_id=%d", addr->id);
++	pr_debug("remove_id=%d\n", addr->id);
+ 
+ 	while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
+ 		struct sock *sk = (struct sock *)msk;
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 0ef6a99b62b0d..590e2c9bb67e2 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -128,7 +128,7 @@ static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
+ 	    !skb_try_coalesce(to, from, &fragstolen, &delta))
+ 		return false;
+ 
+-	pr_debug("colesced seq %llx into %llx new len %d new end seq %llx",
++	pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n",
+ 		 MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
+ 		 to->len, MPTCP_SKB_CB(from)->end_seq);
+ 	MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
+@@ -164,7 +164,7 @@ static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
+ 	space = tcp_space(sk);
+ 	max_seq = space > 0 ? space + msk->ack_seq : msk->ack_seq;
+ 
+-	pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq,
++	pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk, seq, max_seq,
+ 		 RB_EMPTY_ROOT(&msk->out_of_order_queue));
+ 	if (after64(seq, max_seq)) {
+ 		/* out of window */
+@@ -469,7 +469,7 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
+ 	u32 old_copied_seq;
+ 	bool done = false;
+ 
+-	pr_debug("msk=%p ssk=%p", msk, ssk);
++	pr_debug("msk=%p ssk=%p\n", msk, ssk);
+ 	tp = tcp_sk(ssk);
+ 	old_copied_seq = tp->copied_seq;
+ 	do {
+@@ -552,7 +552,7 @@ static bool mptcp_ofo_queue(struct mptcp_sock *msk)
+ 	u64 end_seq;
+ 
+ 	p = rb_first(&msk->out_of_order_queue);
+-	pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
++	pr_debug("msk=%p empty=%d\n", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
+ 	while (p) {
+ 		skb = rb_to_skb(p);
+ 		if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
+@@ -574,7 +574,7 @@ static bool mptcp_ofo_queue(struct mptcp_sock *msk)
+ 			int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
+ 
+ 			/* skip overlapping data, if any */
+-			pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d",
++			pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d\n",
+ 				 MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
+ 				 delta);
+ 			MPTCP_SKB_CB(skb)->offset += delta;
+@@ -956,12 +956,12 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ 		psize = min_t(size_t, pfrag->size - offset, avail_size);
+ 
+ 		/* Copy to page */
+-		pr_debug("left=%zu", msg_data_left(msg));
++		pr_debug("left=%zu\n", msg_data_left(msg));
+ 		psize = copy_page_from_iter(pfrag->page, offset,
+ 					    min_t(size_t, msg_data_left(msg),
+ 						  psize),
+ 					    &msg->msg_iter);
+-		pr_debug("left=%zu", msg_data_left(msg));
++		pr_debug("left=%zu\n", msg_data_left(msg));
+ 		if (!psize)
+ 			return -EINVAL;
+ 
+@@ -1031,7 +1031,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ 	mpext->use_map = 1;
+ 	mpext->dsn64 = 1;
+ 
+-	pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
++	pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d\n",
+ 		 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
+ 		 mpext->dsn64);
+ 
+@@ -1147,7 +1147,7 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk,
+ 		}
+ 	}
+ 
+-	pr_debug("msk=%p nr_active=%d ssk=%p:%lld backup=%p:%lld",
++	pr_debug("msk=%p nr_active=%d ssk=%p:%lld backup=%p:%lld\n",
+ 		 msk, nr_active, send_info[0].ssk, send_info[0].ratio,
+ 		 send_info[1].ssk, send_info[1].ratio);
+ 
+@@ -1240,7 +1240,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	    sndbuf > READ_ONCE(sk->sk_sndbuf))
+ 		WRITE_ONCE(sk->sk_sndbuf, sndbuf);
+ 
+-	pr_debug("conn_list->subflow=%p", ssk);
++	pr_debug("conn_list->subflow=%p\n", ssk);
+ 
+ 	lock_sock(ssk);
+ 	tx_ok = msg_data_left(msg);
+@@ -1577,7 +1577,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 			}
+ 		}
+ 
+-		pr_debug("block timeout %ld", timeo);
++		pr_debug("block timeout %ld\n", timeo);
+ 		mptcp_wait_data(sk, &timeo);
+ 	}
+ 
+@@ -1595,7 +1595,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 		set_bit(MPTCP_DATA_READY, &msk->flags);
+ 	}
+ out_err:
+-	pr_debug("msk=%p data_ready=%d rx queue empty=%d copied=%d",
++	pr_debug("msk=%p data_ready=%d rx queue empty=%d copied=%d\n",
+ 		 msk, test_bit(MPTCP_DATA_READY, &msk->flags),
+ 		 skb_queue_empty(&sk->sk_receive_queue), copied);
+ 	mptcp_rcv_space_adjust(msk, copied);
+@@ -1712,7 +1712,7 @@ static void pm_work(struct mptcp_sock *msk)
+ 
+ 	spin_lock_bh(&msk->pm.lock);
+ 
+-	pr_debug("msk=%p status=%x", msk, pm->status);
++	pr_debug("msk=%p status=%x\n", msk, pm->status);
+ 	if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
+ 		pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
+ 		mptcp_pm_nl_add_addr_received(msk);
+@@ -1913,11 +1913,11 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
+ 		break;
+ 	default:
+ 		if (__mptcp_check_fallback(mptcp_sk(sk))) {
+-			pr_debug("Fallback");
++			pr_debug("Fallback\n");
+ 			ssk->sk_shutdown |= how;
+ 			tcp_shutdown(ssk, how);
+ 		} else {
+-			pr_debug("Sending DATA_FIN on subflow %p", ssk);
++			pr_debug("Sending DATA_FIN on subflow %p\n", ssk);
+ 			mptcp_set_timeout(sk, ssk);
+ 			tcp_send_ack(ssk);
+ 		}
+@@ -1973,7 +1973,7 @@ static void mptcp_close(struct sock *sk, long timeout)
+ 	if (__mptcp_check_fallback(msk)) {
+ 		goto update_state;
+ 	} else if (mptcp_close_state(sk)) {
+-		pr_debug("Sending DATA_FIN sk=%p", sk);
++		pr_debug("Sending DATA_FIN sk=%p\n", sk);
+ 		WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
+ 		WRITE_ONCE(msk->snd_data_fin_enable, 1);
+ 
+@@ -2181,12 +2181,12 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
+ 		return NULL;
+ 	}
+ 
+-	pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk));
++	pr_debug("msk=%p, listener=%p\n", msk, mptcp_subflow_ctx(listener->sk));
+ 	newsk = inet_csk_accept(listener->sk, flags, err, kern);
+ 	if (!newsk)
+ 		return NULL;
+ 
+-	pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
++	pr_debug("msk=%p, subflow is mptcp=%d\n", msk, sk_is_mptcp(newsk));
+ 	if (sk_is_mptcp(newsk)) {
+ 		struct mptcp_subflow_context *subflow;
+ 		struct sock *new_mptcp_sock;
+@@ -2351,7 +2351,7 @@ static int mptcp_setsockopt(struct sock *sk, int level, int optname,
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 	struct sock *ssk;
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	if (mptcp_unsupported(level, optname))
+ 		return -ENOPROTOOPT;
+@@ -2383,7 +2383,7 @@ static int mptcp_getsockopt(struct sock *sk, int level, int optname,
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+ 	struct sock *ssk;
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	/* @@ the meaning of setsockopt() when the socket is connected and
+ 	 * there are multiple subflows is not yet defined. It is up to the
+@@ -2454,7 +2454,7 @@ static int mptcp_get_port(struct sock *sk, unsigned short snum)
+ 	struct socket *ssock;
+ 
+ 	ssock = __mptcp_nmpc_socket(msk);
+-	pr_debug("msk=%p, subflow=%p", msk, ssock);
++	pr_debug("msk=%p, subflow=%p\n", msk, ssock);
+ 	if (WARN_ON_ONCE(!ssock))
+ 		return -EINVAL;
+ 
+@@ -2472,7 +2472,7 @@ void mptcp_finish_connect(struct sock *ssk)
+ 	sk = subflow->conn;
+ 	msk = mptcp_sk(sk);
+ 
+-	pr_debug("msk=%p, token=%u", sk, subflow->token);
++	pr_debug("msk=%p, token=%u\n", sk, subflow->token);
+ 
+ 	mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
+ 	ack_seq++;
+@@ -2511,7 +2511,7 @@ bool mptcp_finish_join(struct sock *sk)
+ 	struct socket *parent_sock;
+ 	bool ret;
+ 
+-	pr_debug("msk=%p, subflow=%p", msk, subflow);
++	pr_debug("msk=%p, subflow=%p\n", msk, subflow);
+ 
+ 	/* mptcp socket already closing? */
+ 	if (!mptcp_is_fully_established(parent))
+@@ -2673,7 +2673,7 @@ static int mptcp_listen(struct socket *sock, int backlog)
+ 	struct socket *ssock;
+ 	int err;
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	lock_sock(sock->sk);
+ 	ssock = __mptcp_nmpc_socket(msk);
+@@ -2703,7 +2703,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
+ 	struct socket *ssock;
+ 	int err;
+ 
+-	pr_debug("msk=%p", msk);
++	pr_debug("msk=%p\n", msk);
+ 
+ 	lock_sock(sock->sk);
+ 	if (sock->sk->sk_state != TCP_LISTEN)
+@@ -2762,7 +2762,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
+ 	sock_poll_wait(file, sock, wait);
+ 
+ 	state = inet_sk_state_load(sk);
+-	pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
++	pr_debug("msk=%p state=%d flags=%lx\n", msk, state, msk->flags);
+ 	if (state == TCP_LISTEN)
+ 		return mptcp_check_readable(msk);
+ 
+@@ -2783,7 +2783,7 @@ static int mptcp_shutdown(struct socket *sock, int how)
+ 	struct mptcp_subflow_context *subflow;
+ 	int ret = 0;
+ 
+-	pr_debug("sk=%p, how=%d", msk, how);
++	pr_debug("sk=%p, how=%d\n", msk, how);
+ 
+ 	lock_sock(sock->sk);
+ 
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 4348bccb982f9..b8351b671c2fa 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -523,7 +523,7 @@ static inline bool mptcp_check_fallback(const struct sock *sk)
+ static inline void __mptcp_do_fallback(struct mptcp_sock *msk)
+ {
+ 	if (test_bit(MPTCP_FALLBACK_DONE, &msk->flags)) {
+-		pr_debug("TCP fallback already done (msk=%p)", msk);
++		pr_debug("TCP fallback already done (msk=%p)\n", msk);
+ 		return;
+ 	}
+ 	set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
+@@ -537,7 +537,7 @@ static inline void mptcp_do_fallback(struct sock *sk)
+ 	__mptcp_do_fallback(msk);
+ }
+ 
+-#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a)
++#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)\n", __func__, a)
+ 
+ static inline bool subflow_simultaneous_connect(struct sock *sk)
+ {
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index ba86cb06d6d8c..8a0ef50c307ce 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -34,7 +34,7 @@ static void subflow_req_destructor(struct request_sock *req)
+ {
+ 	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
+ 
+-	pr_debug("subflow_req=%p", subflow_req);
++	pr_debug("subflow_req=%p\n", subflow_req);
+ 
+ 	if (subflow_req->msk)
+ 		sock_put((struct sock *)subflow_req->msk);
+@@ -121,7 +121,7 @@ static void subflow_init_req(struct request_sock *req,
+ 	struct mptcp_options_received mp_opt;
+ 	int ret;
+ 
+-	pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
++	pr_debug("subflow_req=%p, listener=%p\n", subflow_req, listener);
+ 
+ 	ret = __subflow_init_req(req, sk_listener);
+ 	if (ret)
+@@ -183,7 +183,7 @@ static void subflow_init_req(struct request_sock *req,
+ 				subflow_init_req_cookie_join_save(subflow_req, skb);
+ 		}
+ 
+-		pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
++		pr_debug("token=%u, remote_nonce=%u msk=%p\n", subflow_req->token,
+ 			 subflow_req->remote_nonce, subflow_req->msk);
+ 	}
+ }
+@@ -306,7 +306,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ 	subflow->rel_write_seq = 1;
+ 	subflow->conn_finished = 1;
+ 	subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
+-	pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
++	pr_debug("subflow=%p synack seq=%x\n", subflow, subflow->ssn_offset);
+ 
+ 	mptcp_get_options(skb, &mp_opt);
+ 	if (subflow->request_mptcp) {
+@@ -321,7 +321,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ 		subflow->mp_capable = 1;
+ 		subflow->can_ack = 1;
+ 		subflow->remote_key = mp_opt.sndr_key;
+-		pr_debug("subflow=%p, remote_key=%llu", subflow,
++		pr_debug("subflow=%p, remote_key=%llu\n", subflow,
+ 			 subflow->remote_key);
+ 		mptcp_finish_connect(sk);
+ 	} else if (subflow->request_join) {
+@@ -332,7 +332,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
+ 
+ 		subflow->thmac = mp_opt.thmac;
+ 		subflow->remote_nonce = mp_opt.nonce;
+-		pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow,
++		pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u\n", subflow,
+ 			 subflow->thmac, subflow->remote_nonce);
+ 
+ 		if (!subflow_thmac_valid(subflow)) {
+@@ -371,7 +371,7 @@ static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ {
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ 
+-	pr_debug("subflow=%p", subflow);
++	pr_debug("subflow=%p\n", subflow);
+ 
+ 	/* Never answer to SYNs sent to broadcast or multicast */
+ 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+@@ -401,7 +401,7 @@ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ {
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ 
+-	pr_debug("subflow=%p", subflow);
++	pr_debug("subflow=%p\n", subflow);
+ 
+ 	if (skb->protocol == htons(ETH_P_IP))
+ 		return subflow_v4_conn_request(sk, skb);
+@@ -543,7 +543,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ 	struct sock *new_msk = NULL;
+ 	struct sock *child;
+ 
+-	pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
++	pr_debug("listener=%p, req=%p, conn=%p\n", listener, req, listener->conn);
+ 
+ 	/* After child creation we must look for 'mp_capable' even when options
+ 	 * are not parsed
+@@ -692,7 +692,7 @@ static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
+ 
+ static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
+ {
+-	pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
++	pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d\n",
+ 		 ssn, subflow->map_subflow_seq, subflow->map_data_len);
+ }
+ 
+@@ -768,7 +768,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
+ 		goto validate_seq;
+ 	}
+ 
+-	pr_debug("seq=%llu is64=%d ssn=%u data_len=%u data_fin=%d",
++	pr_debug("seq=%llu is64=%d ssn=%u data_len=%u data_fin=%d\n",
+ 		 mpext->data_seq, mpext->dsn64, mpext->subflow_seq,
+ 		 mpext->data_len, mpext->data_fin);
+ 
+@@ -782,7 +782,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
+ 		if (data_len == 1) {
+ 			bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
+ 								 mpext->dsn64);
+-			pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
++			pr_debug("DATA_FIN with no payload seq=%llu\n", mpext->data_seq);
+ 			if (subflow->map_valid) {
+ 				/* A DATA_FIN might arrive in a DSS
+ 				 * option before the previous mapping
+@@ -807,7 +807,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
+ 				data_fin_seq &= GENMASK_ULL(31, 0);
+ 
+ 			mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
+-			pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
++			pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d\n",
+ 				 data_fin_seq, mpext->dsn64);
+ 		}
+ 
+@@ -818,7 +818,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
+ 	if (!mpext->dsn64) {
+ 		map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
+ 				     mpext->data_seq);
+-		pr_debug("expanded seq=%llu", subflow->map_seq);
++		pr_debug("expanded seq=%llu\n", subflow->map_seq);
+ 	} else {
+ 		map_seq = mpext->data_seq;
+ 	}
+@@ -850,7 +850,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
+ 	subflow->map_data_len = data_len;
+ 	subflow->map_valid = 1;
+ 	subflow->mpc_map = mpext->mpc_map;
+-	pr_debug("new map seq=%llu subflow_seq=%u data_len=%u",
++	pr_debug("new map seq=%llu subflow_seq=%u data_len=%u\n",
+ 		 subflow->map_seq, subflow->map_subflow_seq,
+ 		 subflow->map_data_len);
+ 
+@@ -880,7 +880,7 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
+ 	avail_len = skb->len - offset;
+ 	incr = limit >= avail_len ? avail_len + fin : limit;
+ 
+-	pr_debug("discarding=%d len=%d offset=%d seq=%d", incr, skb->len,
++	pr_debug("discarding=%d len=%d offset=%d seq=%d\n", incr, skb->len,
+ 		 offset, subflow->map_subflow_seq);
+ 	MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
+ 	tcp_sk(ssk)->copied_seq += incr;
+@@ -901,7 +901,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
+ 	struct mptcp_sock *msk;
+ 	struct sk_buff *skb;
+ 
+-	pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk,
++	pr_debug("msk=%p ssk=%p data_avail=%d skb=%p\n", subflow->conn, ssk,
+ 		 subflow->data_avail, skb_peek(&ssk->sk_receive_queue));
+ 	if (!skb_peek(&ssk->sk_receive_queue))
+ 		subflow->data_avail = 0;
+@@ -914,7 +914,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
+ 		u64 old_ack;
+ 
+ 		status = get_mapping_status(ssk, msk);
+-		pr_debug("msk=%p ssk=%p status=%d", msk, ssk, status);
++		pr_debug("msk=%p ssk=%p status=%d\n", msk, ssk, status);
+ 		if (status == MAPPING_INVALID) {
+ 			ssk->sk_err = EBADMSG;
+ 			goto fatal;
+@@ -953,7 +953,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
+ 
+ 		old_ack = READ_ONCE(msk->ack_seq);
+ 		ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
+-		pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
++		pr_debug("msk ack_seq=%llx subflow ack_seq=%llx\n", old_ack,
+ 			 ack_seq);
+ 		if (ack_seq == old_ack) {
+ 			subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
+@@ -991,7 +991,7 @@ bool mptcp_subflow_data_available(struct sock *sk)
+ 		subflow->map_valid = 0;
+ 		subflow->data_avail = 0;
+ 
+-		pr_debug("Done with mapping: seq=%u data_len=%u",
++		pr_debug("Done with mapping: seq=%u data_len=%u\n",
+ 			 subflow->map_subflow_seq,
+ 			 subflow->map_data_len);
+ 	}
+@@ -1079,7 +1079,7 @@ void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
+ 
+ 	target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
+ 
+-	pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
++	pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d\n",
+ 		 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
+ 
+ 	if (likely(icsk->icsk_af_ops == target))
+@@ -1162,7 +1162,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
+ 		goto failed;
+ 
+ 	mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
+-	pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
++	pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d\n", msk,
+ 		 remote_token, local_id, remote_id);
+ 	subflow->remote_token = remote_token;
+ 	subflow->local_id = local_id;
+@@ -1233,7 +1233,7 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
+ 	SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
+ 
+ 	subflow = mptcp_subflow_ctx(sf->sk);
+-	pr_debug("subflow=%p", subflow);
++	pr_debug("subflow=%p\n", subflow);
+ 
+ 	*new_sock = sf;
+ 	sock_hold(sk);
+@@ -1255,7 +1255,7 @@ static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
+ 	rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
+ 	INIT_LIST_HEAD(&ctx->node);
+ 
+-	pr_debug("subflow=%p", ctx);
++	pr_debug("subflow=%p\n", ctx);
+ 
+ 	ctx->tcp_sock = sk;
+ 
+@@ -1332,7 +1332,7 @@ static int subflow_ulp_init(struct sock *sk)
+ 		goto out;
+ 	}
+ 
+-	pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
++	pr_debug("subflow=%p, family=%d\n", ctx, sk->sk_family);
+ 
+ 	tp->is_mptcp = 1;
+ 	ctx->icsk_af_ops = icsk->icsk_af_ops;
+diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
+index 82f36beb2e766..0ce12a33ffda4 100644
+--- a/net/netfilter/nf_conncount.c
++++ b/net/netfilter/nf_conncount.c
+@@ -310,7 +310,6 @@ insert_tree(struct net *net,
+ 	struct nf_conncount_rb *rbconn;
+ 	struct nf_conncount_tuple *conn;
+ 	unsigned int count = 0, gc_count = 0;
+-	u8 keylen = data->keylen;
+ 	bool do_gc = true;
+ 
+ 	spin_lock_bh(&nf_conncount_locks[hash]);
+@@ -322,7 +321,7 @@ insert_tree(struct net *net,
+ 		rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);
+ 
+ 		parent = *rbnode;
+-		diff = key_diff(key, rbconn->key, keylen);
++		diff = key_diff(key, rbconn->key, data->keylen);
+ 		if (diff < 0) {
+ 			rbnode = &((*rbnode)->rb_left);
+ 		} else if (diff > 0) {
+@@ -367,7 +366,7 @@ insert_tree(struct net *net,
+ 
+ 	conn->tuple = *tuple;
+ 	conn->zone = *zone;
+-	memcpy(rbconn->key, key, sizeof(u32) * keylen);
++	memcpy(rbconn->key, key, sizeof(u32) * data->keylen);
+ 
+ 	nf_conncount_list_init(&rbconn->list);
+ 	list_add(&conn->node, &rbconn->list.head);
+@@ -392,7 +391,6 @@ count_tree(struct net *net,
+ 	struct rb_node *parent;
+ 	struct nf_conncount_rb *rbconn;
+ 	unsigned int hash;
+-	u8 keylen = data->keylen;
+ 
+ 	hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS;
+ 	root = &data->root[hash];
+@@ -403,7 +401,7 @@ count_tree(struct net *net,
+ 
+ 		rbconn = rb_entry(parent, struct nf_conncount_rb, node);
+ 
+-		diff = key_diff(key, rbconn->key, keylen);
++		diff = key_diff(key, rbconn->key, data->keylen);
+ 		if (diff < 0) {
+ 			parent = rcu_dereference_raw(parent->rb_left);
+ 		} else if (diff > 0) {
+diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
+index 5dc7a3c310c9d..4ddb43a6644ab 100644
+--- a/net/sched/sch_cake.c
++++ b/net/sched/sch_cake.c
+@@ -785,12 +785,15 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
+ 		 * queue, accept the collision, update the host tags.
+ 		 */
+ 		q->way_collisions++;
+-		if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
+-			q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
+-			q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
+-		}
+ 		allocate_src = cake_dsrc(flow_mode);
+ 		allocate_dst = cake_ddst(flow_mode);
++
++		if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
++			if (allocate_src)
++				q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
++			if (allocate_dst)
++				q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
++		}
+ found:
+ 		/* reserve queue for future packets in same flow */
+ 		reduced_hash = outer_hash + k;
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index e0e16b0fdb179..93ed7bac9ee60 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -733,11 +733,10 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
+ 
+ 				err = qdisc_enqueue(skb, q->qdisc, &to_free);
+ 				kfree_skb_list(to_free);
+-				if (err != NET_XMIT_SUCCESS &&
+-				    net_xmit_drop_count(err)) {
+-					qdisc_qstats_drop(sch);
+-					qdisc_tree_reduce_backlog(sch, 1,
+-								  pkt_len);
++				if (err != NET_XMIT_SUCCESS) {
++					if (net_xmit_drop_count(err))
++						qdisc_qstats_drop(sch);
++					qdisc_tree_reduce_backlog(sch, 1, pkt_len);
+ 				}
+ 				goto tfifo_dequeue;
+ 			}
+diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
+index c964b48eaabae..a004c3ef35c0f 100644
+--- a/net/sunrpc/stats.c
++++ b/net/sunrpc/stats.c
+@@ -309,7 +309,7 @@ EXPORT_SYMBOL_GPL(rpc_proc_unregister);
+ struct proc_dir_entry *
+ svc_proc_register(struct net *net, struct svc_stat *statp, const struct proc_ops *proc_ops)
+ {
+-	return do_register(net, statp->program->pg_name, statp, proc_ops);
++	return do_register(net, statp->program->pg_name, net, proc_ops);
+ }
+ EXPORT_SYMBOL_GPL(svc_proc_register);
+ 
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index f8815ae776e68..4212fb1c3d887 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -445,8 +445,8 @@ __svc_init_bc(struct svc_serv *serv)
+  * Create an RPC service
+  */
+ static struct svc_serv *
+-__svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
+-	     int (*threadfn)(void *data))
++__svc_create(struct svc_program *prog, struct svc_stat *stats,
++	     unsigned int bufsize, int npools, int (*threadfn)(void *data))
+ {
+ 	struct svc_serv	*serv;
+ 	unsigned int vers;
+@@ -458,7 +458,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
+ 	serv->sv_name      = prog->pg_name;
+ 	serv->sv_program   = prog;
+ 	kref_init(&serv->sv_refcnt);
+-	serv->sv_stats     = prog->pg_stats;
++	serv->sv_stats     = stats;
+ 	if (bufsize > RPCSVC_MAXPAYLOAD)
+ 		bufsize = RPCSVC_MAXPAYLOAD;
+ 	serv->sv_max_payload = bufsize? bufsize : 4096;
+@@ -520,26 +520,28 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
+ struct svc_serv *svc_create(struct svc_program *prog, unsigned int bufsize,
+ 			    int (*threadfn)(void *data))
+ {
+-	return __svc_create(prog, bufsize, 1, threadfn);
++	return __svc_create(prog, NULL, bufsize, 1, threadfn);
+ }
+ EXPORT_SYMBOL_GPL(svc_create);
+ 
+ /**
+  * svc_create_pooled - Create an RPC service with pooled threads
+  * @prog: the RPC program the new service will handle
++ * @stats: the stats struct if desired
+  * @bufsize: maximum message size for @prog
+  * @threadfn: a function to service RPC requests for @prog
+  *
+  * Returns an instantiated struct svc_serv object or NULL.
+  */
+ struct svc_serv *svc_create_pooled(struct svc_program *prog,
++				   struct svc_stat *stats,
+ 				   unsigned int bufsize,
+ 				   int (*threadfn)(void *data))
+ {
+ 	struct svc_serv *serv;
+ 	unsigned int npools = svc_pool_map_get();
+ 
+-	serv = __svc_create(prog, bufsize, npools, threadfn);
++	serv = __svc_create(prog, stats, bufsize, npools, threadfn);
+ 	if (!serv)
+ 		goto out_err;
+ 	return serv;
+@@ -1355,7 +1357,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
+ 		goto err_bad_proc;
+ 
+ 	/* Syntactic check complete */
+-	serv->sv_stats->rpccnt++;
++	if (serv->sv_stats)
++		serv->sv_stats->rpccnt++;
+ 	trace_svc_process(rqstp, progp->pg_name);
+ 
+ 	/* Build the reply header. */
+@@ -1421,7 +1424,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
+ 	goto close_xprt;
+ 
+ err_bad_rpc:
+-	serv->sv_stats->rpcbadfmt++;
++	if (serv->sv_stats)
++		serv->sv_stats->rpcbadfmt++;
+ 	svc_putnl(resv, 1);	/* REJECT */
+ 	svc_putnl(resv, 0);	/* RPC_MISMATCH */
+ 	svc_putnl(resv, 2);	/* Only RPCv2 supported */
+@@ -1434,7 +1438,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
+ err_bad_auth:
+ 	dprintk("svc: authentication failed (%d)\n",
+ 		be32_to_cpu(rqstp->rq_auth_stat));
+-	serv->sv_stats->rpcbadauth++;
++	if (serv->sv_stats)
++		serv->sv_stats->rpcbadauth++;
+ 	/* Restore write pointer to location of accept status: */
+ 	xdr_ressize_check(rqstp, reply_statp);
+ 	svc_putnl(resv, 1);	/* REJECT */
+@@ -1444,7 +1449,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
+ 
+ err_bad_prog:
+ 	dprintk("svc: unknown program %d\n", prog);
+-	serv->sv_stats->rpcbadfmt++;
++	if (serv->sv_stats)
++		serv->sv_stats->rpcbadfmt++;
+ 	svc_putnl(resv, RPC_PROG_UNAVAIL);
+ 	goto sendit;
+ 
+@@ -1452,7 +1458,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
+ 	svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
+ 		       rqstp->rq_vers, rqstp->rq_prog, progp->pg_name);
+ 
+-	serv->sv_stats->rpcbadfmt++;
++	if (serv->sv_stats)
++		serv->sv_stats->rpcbadfmt++;
+ 	svc_putnl(resv, RPC_PROG_MISMATCH);
+ 	svc_putnl(resv, process.mismatch.lovers);
+ 	svc_putnl(resv, process.mismatch.hivers);
+@@ -1461,7 +1468,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
+ err_bad_proc:
+ 	svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc);
+ 
+-	serv->sv_stats->rpcbadfmt++;
++	if (serv->sv_stats)
++		serv->sv_stats->rpcbadfmt++;
+ 	svc_putnl(resv, RPC_PROC_UNAVAIL);
+ 	goto sendit;
+ 
+@@ -1470,7 +1478,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
+ 
+ 	rpc_stat = rpc_garbage_args;
+ err_bad:
+-	serv->sv_stats->rpcbadfmt++;
++	if (serv->sv_stats)
++		serv->sv_stats->rpcbadfmt++;
+ 	svc_putnl(resv, ntohl(rpc_stat));
+ 	goto sendit;
+ }
+@@ -1505,7 +1514,8 @@ svc_process(struct svc_rqst *rqstp)
+ 	if (dir != 0) {
+ 		/* direction != CALL */
+ 		svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
+-		serv->sv_stats->rpcbadfmt++;
++		if (serv->sv_stats)
++			serv->sv_stats->rpcbadfmt++;
+ 		goto out_drop;
+ 	}
+ 
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
+index 80a0c0e875909..7c50eddb8d3ca 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
+@@ -460,6 +460,8 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info,
+ 		offset += info->wi_seg_off;
+ 
+ 		write_len = min(remaining, length - info->wi_seg_off);
++		if (!write_len)
++			goto out_overflow;
+ 		ctxt = svc_rdma_get_rw_ctxt(rdma,
+ 					    (write_len >> PAGE_SHIFT) + 2);
+ 		if (!ctxt)
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 0666f981618a2..e0cd6d7350533 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -2314,6 +2314,13 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ 	case -EALREADY:
+ 		xprt_unlock_connect(xprt, transport);
+ 		return;
++	case -EPERM:
++		/* Happens, for instance, if a BPF program is preventing
++		 * the connect. Remap the error so upper layers can better
++		 * deal with it.
++		 */
++		status = -ECONNREFUSED;
++		fallthrough;
+ 	case -EINVAL:
+ 		/* Happens, for instance, if the user specified a link
+ 		 * local IPv6 address without a scope-id.
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index e2ff610d27760..b7e9c1238516f 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -603,9 +603,6 @@ static void init_peercred(struct sock *sk)
+ 
+ static void copy_peercred(struct sock *sk, struct sock *peersk)
+ {
+-	const struct cred *old_cred;
+-	struct pid *old_pid;
+-
+ 	if (sk < peersk) {
+ 		spin_lock(&sk->sk_peer_lock);
+ 		spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
+@@ -613,16 +610,12 @@ static void copy_peercred(struct sock *sk, struct sock *peersk)
+ 		spin_lock(&peersk->sk_peer_lock);
+ 		spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
+ 	}
+-	old_pid = sk->sk_peer_pid;
+-	old_cred = sk->sk_peer_cred;
++
+ 	sk->sk_peer_pid  = get_pid(peersk->sk_peer_pid);
+ 	sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
+ 
+ 	spin_unlock(&sk->sk_peer_lock);
+ 	spin_unlock(&peersk->sk_peer_lock);
+-
+-	put_pid(old_pid);
+-	put_cred(old_cred);
+ }
+ 
+ static int unix_listen(struct socket *sock, int backlog)
+diff --git a/net/wireless/scan.c b/net/wireless/scan.c
+index 76a27b6d45d28..e8a9ce0392957 100644
+--- a/net/wireless/scan.c
++++ b/net/wireless/scan.c
+@@ -1510,7 +1510,7 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
+ }
+ EXPORT_SYMBOL(cfg80211_get_bss);
+ 
+-static void rb_insert_bss(struct cfg80211_registered_device *rdev,
++static bool rb_insert_bss(struct cfg80211_registered_device *rdev,
+ 			  struct cfg80211_internal_bss *bss)
+ {
+ 	struct rb_node **p = &rdev->bss_tree.rb_node;
+@@ -1526,7 +1526,7 @@ static void rb_insert_bss(struct cfg80211_registered_device *rdev,
+ 
+ 		if (WARN_ON(!cmp)) {
+ 			/* will sort of leak this BSS */
+-			return;
++			return false;
+ 		}
+ 
+ 		if (cmp < 0)
+@@ -1537,6 +1537,7 @@ static void rb_insert_bss(struct cfg80211_registered_device *rdev,
+ 
+ 	rb_link_node(&bss->rbn, parent, p);
+ 	rb_insert_color(&bss->rbn, &rdev->bss_tree);
++	return true;
+ }
+ 
+ static struct cfg80211_internal_bss *
+@@ -1563,6 +1564,34 @@ rb_find_bss(struct cfg80211_registered_device *rdev,
+ 	return NULL;
+ }
+ 
++static void cfg80211_insert_bss(struct cfg80211_registered_device *rdev,
++				struct cfg80211_internal_bss *bss)
++{
++	lockdep_assert_held(&rdev->bss_lock);
++
++	if (!rb_insert_bss(rdev, bss))
++		return;
++	list_add_tail(&bss->list, &rdev->bss_list);
++	rdev->bss_entries++;
++}
++
++static void cfg80211_rehash_bss(struct cfg80211_registered_device *rdev,
++                                struct cfg80211_internal_bss *bss)
++{
++	lockdep_assert_held(&rdev->bss_lock);
++
++	rb_erase(&bss->rbn, &rdev->bss_tree);
++	if (!rb_insert_bss(rdev, bss)) {
++		list_del(&bss->list);
++		if (!list_empty(&bss->hidden_list))
++			list_del_init(&bss->hidden_list);
++		if (!list_empty(&bss->pub.nontrans_list))
++			list_del_init(&bss->pub.nontrans_list);
++		rdev->bss_entries--;
++	}
++	rdev->bss_generation++;
++}
++
+ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
+ 				   struct cfg80211_internal_bss *new)
+ {
+@@ -1838,9 +1867,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
+ 			bss_ref_get(rdev, pbss);
+ 		}
+ 
+-		list_add_tail(&new->list, &rdev->bss_list);
+-		rdev->bss_entries++;
+-		rb_insert_bss(rdev, new);
++		cfg80211_insert_bss(rdev, new);
+ 		found = new;
+ 	}
+ 
+@@ -2702,10 +2729,7 @@ void cfg80211_update_assoc_bss_entry(struct wireless_dev *wdev,
+ 		if (!WARN_ON(!__cfg80211_unlink_bss(rdev, new)))
+ 			rdev->bss_generation++;
+ 	}
+-
+-	rb_erase(&cbss->rbn, &rdev->bss_tree);
+-	rb_insert_bss(rdev, cbss);
+-	rdev->bss_generation++;
++	cfg80211_rehash_bss(rdev, cbss);
+ 
+ 	list_for_each_entry_safe(nontrans_bss, tmp,
+ 				 &cbss->pub.nontrans_list,
+@@ -2713,9 +2737,7 @@ void cfg80211_update_assoc_bss_entry(struct wireless_dev *wdev,
+ 		bss = container_of(nontrans_bss,
+ 				   struct cfg80211_internal_bss, pub);
+ 		bss->pub.channel = chan;
+-		rb_erase(&bss->rbn, &rdev->bss_tree);
+-		rb_insert_bss(rdev, bss);
+-		rdev->bss_generation++;
++		cfg80211_rehash_bss(rdev, bss);
+ 	}
+ 
+ done:
+diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
+index 49d97b331abca..06eac22665656 100644
+--- a/security/apparmor/apparmorfs.c
++++ b/security/apparmor/apparmorfs.c
+@@ -1679,6 +1679,10 @@ int __aafs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
+ 		struct aa_profile *p;
+ 		p = aa_deref_parent(profile);
+ 		dent = prof_dir(p);
++		if (!dent) {
++			error = -ENOENT;
++			goto fail2;
++		}
+ 		/* adding to parent that previously didn't have children */
+ 		dent = aafs_create_dir("profiles", dent);
+ 		if (IS_ERR(dent))
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index 8c790563b33ac..92bc6c9d793d6 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -3642,12 +3642,18 @@ static int smack_unix_stream_connect(struct sock *sock,
+ 		}
+ 	}
+ 
+-	/*
+-	 * Cross reference the peer labels for SO_PEERSEC.
+-	 */
+ 	if (rc == 0) {
++		/*
++		 * Cross reference the peer labels for SO_PEERSEC.
++		 */
+ 		nsp->smk_packet = ssp->smk_out;
+ 		ssp->smk_packet = osp->smk_out;
++
++		/*
++		 * new/child/established socket must inherit listening socket labels
++		 */
++		nsp->smk_out = osp->smk_out;
++		nsp->smk_in  = osp->smk_in;
+ 	}
+ 
+ 	return rc;
+@@ -4228,7 +4234,7 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
+ 	rcu_read_unlock();
+ 
+ 	if (hskp == NULL)
+-		rc = netlbl_req_setattr(req, &skp->smk_netlabel);
++		rc = netlbl_req_setattr(req, &ssp->smk_out->smk_netlabel);
+ 	else
+ 		netlbl_req_delattr(req);
+ 
+diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c
+index aad5c4bf4d344..0ebf4d9078522 100644
+--- a/sound/hda/hdmi_chmap.c
++++ b/sound/hda/hdmi_chmap.c
+@@ -753,6 +753,20 @@ static int hdmi_chmap_ctl_get(struct snd_kcontrol *kcontrol,
+ 	return 0;
+ }
+ 
++/* a simple sanity check for input values to chmap kcontrol */
++static int chmap_value_check(struct hdac_chmap *hchmap,
++			     const struct snd_ctl_elem_value *ucontrol)
++{
++	int i;
++
++	for (i = 0; i < hchmap->channels_max; i++) {
++		if (ucontrol->value.integer.value[i] < 0 ||
++		    ucontrol->value.integer.value[i] > SNDRV_CHMAP_LAST)
++			return -EINVAL;
++	}
++	return 0;
++}
++
+ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
+ 			      struct snd_ctl_elem_value *ucontrol)
+ {
+@@ -764,6 +778,10 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
+ 	unsigned char chmap[8], per_pin_chmap[8];
+ 	int i, err, ca, prepared = 0;
+ 
++	err = chmap_value_check(hchmap, ucontrol);
++	if (err < 0)
++		return err;
++
+ 	/* No monitor is connected in dyn_pcm_assign.
+ 	 * It's invalid to setup the chmap
+ 	 */
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index 35113fa84a0fd..733dc9953a38b 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -5067,6 +5067,69 @@ void snd_hda_gen_stream_pm(struct hda_codec *codec, hda_nid_t nid, bool on)
+ }
+ EXPORT_SYMBOL_GPL(snd_hda_gen_stream_pm);
+ 
++/* forcibly mute the speaker output without caching; return true if updated */
++static bool force_mute_output_path(struct hda_codec *codec, hda_nid_t nid)
++{
++	if (!nid)
++		return false;
++	if (!nid_has_mute(codec, nid, HDA_OUTPUT))
++		return false; /* no mute, skip */
++	if (snd_hda_codec_amp_read(codec, nid, 0, HDA_OUTPUT, 0) &
++	    snd_hda_codec_amp_read(codec, nid, 1, HDA_OUTPUT, 0) &
++	    HDA_AMP_MUTE)
++		return false; /* both channels already muted, skip */
++
++	/* direct amp update without caching */
++	snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_AMP_GAIN_MUTE,
++			    AC_AMP_SET_OUTPUT | AC_AMP_SET_LEFT |
++			    AC_AMP_SET_RIGHT | HDA_AMP_MUTE);
++	return true;
++}
++
++/**
++ * snd_hda_gen_shutup_speakers - Forcibly mute the speaker outputs
++ * @codec: the HDA codec
++ *
++ * Forcibly mute the speaker outputs, to be called at suspend or shutdown.
++ *
++ * The mute state done by this function isn't cached, hence the original state
++ * will be restored at resume.
++ *
++ * Return true if the mute state has been changed.
++ */
++bool snd_hda_gen_shutup_speakers(struct hda_codec *codec)
++{
++	struct hda_gen_spec *spec = codec->spec;
++	const int *paths;
++	const struct nid_path *path;
++	int i, p, num_paths;
++	bool updated = false;
++
++	/* if already powered off, do nothing */
++	if (!snd_hdac_is_power_on(&codec->core))
++		return false;
++
++	if (spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT) {
++		paths = spec->out_paths;
++		num_paths = spec->autocfg.line_outs;
++	} else {
++		paths = spec->speaker_paths;
++		num_paths = spec->autocfg.speaker_outs;
++	}
++
++	for (i = 0; i < num_paths; i++) {
++		path = snd_hda_get_path_from_idx(codec, paths[i]);
++		if (!path)
++			continue;
++		for (p = 0; p < path->depth; p++)
++			if (force_mute_output_path(codec, path->path[p]))
++				updated = true;
++	}
++
++	return updated;
++}
++EXPORT_SYMBOL_GPL(snd_hda_gen_shutup_speakers);
++
+ /**
+  * snd_hda_gen_parse_auto_config - Parse the given BIOS configuration and
+  * set up the hda_gen_spec
+diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
+index 578faa9adcdcd..fc00f8bc0d78d 100644
+--- a/sound/pci/hda/hda_generic.h
++++ b/sound/pci/hda/hda_generic.h
+@@ -364,5 +364,6 @@ int snd_hda_gen_add_mute_led_cdev(struct hda_codec *codec,
+ int snd_hda_gen_add_micmute_led_cdev(struct hda_codec *codec,
+ 				     int (*callback)(struct led_classdev *,
+ 						     enum led_brightness));
++bool snd_hda_gen_shutup_speakers(struct hda_codec *codec);
+ 
+ #endif /* __SOUND_HDA_GENERIC_H */
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 5b37f5f14bc91..d908a39af9f5e 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -181,6 +181,8 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
+ {
+ 	struct conexant_spec *spec = codec->spec;
+ 
++	snd_hda_gen_shutup_speakers(codec);
++
+ 	/* Turn the problematic codec into D3 to avoid spurious noises
+ 	   from the internal speaker during (and after) reboot */
+ 	cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
+@@ -236,6 +238,7 @@ enum {
+ 	CXT_FIXUP_HEADSET_MIC,
+ 	CXT_FIXUP_HP_MIC_NO_PRESENCE,
+ 	CXT_PINCFG_SWS_JS201D,
++	CXT_PINCFG_TOP_SPEAKER,
+ };
+ 
+ /* for hda_fixup_thinkpad_acpi() */
+@@ -903,6 +906,13 @@ static const struct hda_fixup cxt_fixups[] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = cxt_pincfg_sws_js201d,
+ 	},
++	[CXT_PINCFG_TOP_SPEAKER] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x1d, 0x82170111 },
++			{ }
++		},
++	},
+ };
+ 
+ static const struct snd_pci_quirk cxt5045_fixups[] = {
+@@ -999,6 +1009,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ 	SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
+ 	SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004),
+ 	SND_PCI_QUIRK(0x1c06, 0x2012, "Lemote A1205", CXT_PINCFG_LEMOTE_A1205),
++	SND_PCI_QUIRK(0x2782, 0x12c3, "Sirius Gen1", CXT_PINCFG_TOP_SPEAKER),
++	SND_PCI_QUIRK(0x2782, 0x12c5, "Sirius Gen2", CXT_PINCFG_TOP_SPEAKER),
+ 	{}
+ };
+ 
+@@ -1018,6 +1030,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
+ 	{ .id = CXT_FIXUP_HP_MIC_NO_PRESENCE, .name = "hp-mic-fix" },
+ 	{ .id = CXT_PINCFG_LENOVO_NOTEBOOK, .name = "lenovo-20149" },
+ 	{ .id = CXT_PINCFG_SWS_JS201D, .name = "sws-js201d" },
++	{ .id = CXT_PINCFG_TOP_SPEAKER, .name = "sirius-top-speaker" },
+ 	{}
+ };
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 04fd52bba0573..c104a33b3e8fa 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6968,6 +6968,7 @@ enum {
+ 	ALC236_FIXUP_HP_GPIO_LED,
+ 	ALC236_FIXUP_HP_MUTE_LED,
+ 	ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
++	ALC236_FIXUP_LENOVO_INV_DMIC,
+ 	ALC298_FIXUP_SAMSUNG_AMP,
+ 	ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
+ 	ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
+@@ -8361,6 +8362,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc236_fixup_hp_mute_led_micmute_vref,
+ 	},
++	[ALC236_FIXUP_LENOVO_INV_DMIC] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_inv_dmic,
++		.chained = true,
++		.chain_id = ALC283_FIXUP_INT_MIC,
++	},
+ 	[ALC298_FIXUP_SAMSUNG_AMP] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc298_fixup_samsung_amp,
+@@ -9105,6 +9112,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x87f6, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
+ 	SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
++	SND_PCI_QUIRK(0x103c, 0x87fd, "HP Laptop 14-dq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x87fe, "HP Laptop 15s-fq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ 	SND_PCI_QUIRK(0x103c, 0x8805, "HP ProBook 650 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+@@ -9355,6 +9363,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x3852, "Lenovo Yoga 7 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ 	SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ 	SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
++	SND_PCI_QUIRK(0x17aa, 0x3913, "Lenovo 145", ALC236_FIXUP_LENOVO_INV_DMIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+ 	SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+@@ -9596,6 +9605,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
+ 	{.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
+ 	{.id = ALC285_FIXUP_HP_GPIO_AMP_INIT, .name = "alc285-hp-amp-init"},
++	{.id = ALC236_FIXUP_LENOVO_INV_DMIC, .name = "alc236-fixup-lenovo-inv-mic"},
+ 	{}
+ };
+ #define ALC225_STANDARD_PINS \
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 754c1f16ee83f..acb46e1f9c0ae 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -4014,6 +4014,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
+ 
+ 	case SND_SOC_DAPM_POST_PMD:
+ 		kfree(substream->runtime);
++		substream->runtime = NULL;
+ 		break;
+ 
+ 	default:
+diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
+index 23a5f9a52da0f..aa57f796e9dd3 100644
+--- a/sound/soc/soc-topology.c
++++ b/sound/soc/soc-topology.c
+@@ -998,6 +998,8 @@ static int soc_tplg_denum_create_values(struct soc_enum *se,
+ 		se->dobj.control.dvalues[i] = le32_to_cpu(ec->values[i]);
+ 	}
+ 
++	se->items = le32_to_cpu(ec->items);
++	se->values = (const unsigned int *)se->dobj.control.dvalues;
+ 	return 0;
+ }
+ 
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index 015ed8253f739..33cdcfe106344 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -9005,7 +9005,7 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
+ struct bpf_map *
+ bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
+ {
+-	if (prev == NULL)
++	if (prev == NULL && obj != NULL)
+ 		return obj->maps;
+ 
+ 	return __bpf_map__iter(prev, obj, 1);
+@@ -9014,7 +9014,7 @@ bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
+ struct bpf_map *
+ bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
+ {
+-	if (next == NULL) {
++	if (next == NULL && obj != NULL) {
+ 		if (!obj->nr_maps)
+ 			return NULL;
+ 		return obj->maps + obj->nr_maps - 1;
+diff --git a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
+index 909da9cdda97f..aa4be40f7d49f 100644
+--- a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
++++ b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
+@@ -29,9 +29,11 @@ static int check_vgem(int fd)
+ 	version.name = name;
+ 
+ 	ret = ioctl(fd, DRM_IOCTL_VERSION, &version);
+-	if (ret)
++	if (ret || version.name_len != 4)
+ 		return 0;
+ 
++	name[4] = '\0';
++
+ 	return !strcmp(name, "vgem");
+ }
+ 


             reply	other threads:[~2024-09-12 12:42 UTC|newest]

Thread overview: 289+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-09-12 12:42 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2024-09-04 13:53 [gentoo-commits] proj/linux-patches:5.10 commit in: / Mike Pagano
2024-08-19 10:44 Mike Pagano
2024-07-27  9:20 Mike Pagano
2024-07-27  9:17 Mike Pagano
2024-07-18 12:17 Mike Pagano
2024-07-05 10:53 Mike Pagano
2024-07-05 10:51 Mike Pagano
2024-06-21 14:08 Mike Pagano
2024-06-16 14:35 Mike Pagano
2024-05-25 15:14 Mike Pagano
2024-05-17 11:38 Mike Pagano
2024-05-05 18:14 Mike Pagano
2024-05-02 15:03 Mike Pagano
2024-04-27 22:57 Mike Pagano
2024-04-13 13:09 Mike Pagano
2024-03-27 11:26 Mike Pagano
2024-03-15 22:02 Mike Pagano
2024-03-06 18:09 Mike Pagano
2024-03-01 13:09 Mike Pagano
2024-02-23 12:45 Mike Pagano
2024-02-23 12:39 Mike Pagano
2024-01-25 23:34 Mike Pagano
2024-01-15 18:49 Mike Pagano
2024-01-12 20:35 Mike Pagano
2024-01-05 14:29 Mike Pagano
2023-12-20 15:21 Mike Pagano
2023-12-13 18:29 Mike Pagano
2023-12-08 11:16 Mike Pagano
2023-12-01 17:47 Mike Pagano
2023-11-28 17:52 Mike Pagano
2023-11-20 11:25 Mike Pagano
2023-11-08 17:28 Mike Pagano
2023-10-25 11:38 Mike Pagano
2023-10-18 20:16 Mike Pagano
2023-10-10 20:34 Mike Pagano
2023-10-05 14:24 Mike Pagano
2023-09-23 10:19 Mike Pagano
2023-09-21 11:29 Mike Pagano
2023-09-19 13:22 Mike Pagano
2023-09-02  9:59 Mike Pagano
2023-08-30 14:45 Mike Pagano
2023-08-26 15:21 Mike Pagano
2023-08-16 17:01 Mike Pagano
2023-08-11 11:56 Mike Pagano
2023-08-08 18:42 Mike Pagano
2023-07-27 11:50 Mike Pagano
2023-07-24 20:28 Mike Pagano
2023-06-28 10:27 Mike Pagano
2023-06-21 14:55 Alice Ferrazzi
2023-06-14 10:34 Mike Pagano
2023-06-14 10:20 Mike Pagano
2023-06-09 11:31 Mike Pagano
2023-06-05 11:50 Mike Pagano
2023-05-30 12:56 Mike Pagano
2023-05-17 11:25 Mike Pagano
2023-05-17 10:59 Mike Pagano
2023-05-10 17:56 Mike Pagano
2023-04-27 14:11 Mike Pagano
2023-04-26  9:50 Alice Ferrazzi
2023-04-20 11:17 Alice Ferrazzi
2023-04-05 10:01 Alice Ferrazzi
2023-03-22 14:15 Alice Ferrazzi
2023-03-17 10:45 Mike Pagano
2023-03-13 11:32 Alice Ferrazzi
2023-03-11 16:05 Mike Pagano
2023-03-03 15:01 Mike Pagano
2023-03-03 12:30 Mike Pagano
2023-02-25 11:44 Mike Pagano
2023-02-24  3:06 Alice Ferrazzi
2023-02-22 14:04 Alice Ferrazzi
2023-02-15 16:40 Mike Pagano
2023-02-06 12:47 Mike Pagano
2023-02-02 19:11 Mike Pagano
2023-02-01  8:09 Alice Ferrazzi
2023-01-24  7:13 Alice Ferrazzi
2023-01-18 11:09 Mike Pagano
2023-01-14 13:52 Mike Pagano
2023-01-04 11:39 Mike Pagano
2022-12-21 19:00 Alice Ferrazzi
2022-12-19 12:33 Alice Ferrazzi
2022-12-14 12:14 Mike Pagano
2022-12-08 11:51 Alice Ferrazzi
2022-12-02 17:26 Mike Pagano
2022-11-25 17:06 Mike Pagano
2022-11-16 12:08 Alice Ferrazzi
2022-11-10 18:05 Mike Pagano
2022-11-03 15:17 Mike Pagano
2022-10-30  9:33 Mike Pagano
2022-10-28 13:38 Mike Pagano
2022-10-26 11:46 Mike Pagano
2022-10-17 16:46 Mike Pagano
2022-10-15 10:05 Mike Pagano
2022-10-05 11:58 Mike Pagano
2022-09-28  9:30 Mike Pagano
2022-09-23 12:40 Mike Pagano
2022-09-20 12:01 Mike Pagano
2022-09-15 10:31 Mike Pagano
2022-09-08 10:46 Mike Pagano
2022-09-05 12:04 Mike Pagano
2022-08-31 15:39 Mike Pagano
2022-08-29 10:46 Mike Pagano
2022-08-25 10:33 Mike Pagano
2022-08-21 16:52 Mike Pagano
2022-08-11 12:34 Mike Pagano
2022-08-03 14:24 Alice Ferrazzi
2022-07-29 16:37 Mike Pagano
2022-07-25 10:19 Alice Ferrazzi
2022-07-21 20:08 Mike Pagano
2022-07-15 10:03 Mike Pagano
2022-07-12 15:59 Mike Pagano
2022-07-07 16:17 Mike Pagano
2022-07-02 16:10 Mike Pagano
2022-06-29 11:08 Mike Pagano
2022-06-27 11:12 Mike Pagano
2022-06-25 19:45 Mike Pagano
2022-06-22 12:45 Mike Pagano
2022-06-16 11:44 Mike Pagano
2022-06-14 17:12 Mike Pagano
2022-06-09 11:27 Mike Pagano
2022-06-06 11:03 Mike Pagano
2022-05-30 13:59 Mike Pagano
2022-05-25 11:54 Mike Pagano
2022-05-18  9:48 Mike Pagano
2022-05-15 22:10 Mike Pagano
2022-05-12 11:29 Mike Pagano
2022-05-09 10:56 Mike Pagano
2022-04-27 12:24 Mike Pagano
2022-04-27 12:20 Mike Pagano
2022-04-26 12:17 Mike Pagano
2022-04-20 12:07 Mike Pagano
2022-04-13 20:20 Mike Pagano
2022-04-13 19:48 Mike Pagano
2022-04-12 19:08 Mike Pagano
2022-04-08 13:16 Mike Pagano
2022-03-28 10:58 Mike Pagano
2022-03-23 11:55 Mike Pagano
2022-03-19 13:20 Mike Pagano
2022-03-16 13:33 Mike Pagano
2022-03-11 11:31 Mike Pagano
2022-03-08 18:32 Mike Pagano
2022-03-02 13:06 Mike Pagano
2022-02-26 20:27 Mike Pagano
2022-02-23 12:37 Mike Pagano
2022-02-16 12:46 Mike Pagano
2022-02-11 12:35 Mike Pagano
2022-02-08 17:54 Mike Pagano
2022-02-05 19:04 Mike Pagano
2022-02-05 12:13 Mike Pagano
2022-02-01 17:23 Mike Pagano
2022-01-31 12:25 Mike Pagano
2022-01-29 17:43 Mike Pagano
2022-01-27 11:37 Mike Pagano
2022-01-20 10:00 Mike Pagano
2022-01-16 10:21 Mike Pagano
2022-01-11 14:50 Mike Pagano
2022-01-05 12:53 Mike Pagano
2021-12-29 13:06 Mike Pagano
2021-12-22 14:05 Mike Pagano
2021-12-21 19:37 Mike Pagano
2021-12-17 11:55 Mike Pagano
2021-12-16 16:04 Mike Pagano
2021-12-14 12:51 Mike Pagano
2021-12-14 12:12 Mike Pagano
2021-12-08 12:53 Mike Pagano
2021-12-01 12:49 Mike Pagano
2021-11-26 11:57 Mike Pagano
2021-11-21 20:42 Mike Pagano
2021-11-18 15:33 Mike Pagano
2021-11-12 14:18 Mike Pagano
2021-11-06 13:36 Mike Pagano
2021-11-02 19:30 Mike Pagano
2021-10-27 14:55 Mike Pagano
2021-10-27 11:57 Mike Pagano
2021-10-20 13:23 Mike Pagano
2021-10-18 21:17 Mike Pagano
2021-10-17 13:11 Mike Pagano
2021-10-13  9:35 Alice Ferrazzi
2021-10-09 21:31 Mike Pagano
2021-10-06 14:18 Mike Pagano
2021-09-30 10:48 Mike Pagano
2021-09-26 14:12 Mike Pagano
2021-09-22 11:38 Mike Pagano
2021-09-20 22:02 Mike Pagano
2021-09-18 16:07 Mike Pagano
2021-09-17 12:50 Mike Pagano
2021-09-17 12:46 Mike Pagano
2021-09-16 11:20 Mike Pagano
2021-09-15 12:00 Mike Pagano
2021-09-12 14:38 Mike Pagano
2021-09-08 13:00 Alice Ferrazzi
2021-09-03 11:47 Mike Pagano
2021-09-03 11:20 Mike Pagano
2021-08-26 14:34 Mike Pagano
2021-08-25 16:23 Mike Pagano
2021-08-24 21:33 Mike Pagano
2021-08-24 21:32 Mike Pagano
2021-08-21 14:17 Mike Pagano
2021-08-19 11:56 Mike Pagano
2021-08-18 12:46 Mike Pagano
2021-08-15 20:05 Mike Pagano
2021-08-12 11:53 Mike Pagano
2021-08-10 11:49 Mike Pagano
2021-08-10 11:49 Mike Pagano
2021-08-08 13:36 Mike Pagano
2021-08-04 11:52 Mike Pagano
2021-08-03 11:03 Mike Pagano
2021-08-02 22:35 Mike Pagano
2021-07-31 10:30 Alice Ferrazzi
2021-07-28 13:22 Mike Pagano
2021-07-25 17:28 Mike Pagano
2021-07-25 17:26 Mike Pagano
2021-07-20 15:44 Alice Ferrazzi
2021-07-19 11:17 Mike Pagano
2021-07-14 16:31 Mike Pagano
2021-07-14 16:21 Mike Pagano
2021-07-13 12:37 Mike Pagano
2021-07-12 17:25 Mike Pagano
2021-07-11 15:11 Mike Pagano
2021-07-11 14:43 Mike Pagano
2021-07-08 12:27 Mike Pagano
2021-07-08  3:27 Alice Ferrazzi
2021-07-07 13:13 Mike Pagano
2021-07-02 19:38 Mike Pagano
2021-07-01 14:32 Mike Pagano
2021-06-30 14:23 Mike Pagano
2021-06-23 15:12 Mike Pagano
2021-06-18 11:37 Mike Pagano
2021-06-16 12:24 Mike Pagano
2021-06-11 17:34 Mike Pagano
2021-06-10 13:14 Mike Pagano
2021-06-10 12:09 Mike Pagano
2021-06-08 22:42 Mike Pagano
2021-06-03 10:26 Alice Ferrazzi
2021-05-28 12:15 Alice Ferrazzi
2021-05-26 12:07 Mike Pagano
2021-05-22 16:59 Mike Pagano
2021-05-19 12:24 Mike Pagano
2021-05-14 14:07 Alice Ferrazzi
2021-05-11 14:20 Mike Pagano
2021-05-07 11:27 Alice Ferrazzi
2021-05-02 16:03 Mike Pagano
2021-04-30 18:58 Mike Pagano
2021-04-28 12:03 Alice Ferrazzi
2021-04-21 11:42 Mike Pagano
2021-04-16 11:02 Alice Ferrazzi
2021-04-14 11:07 Alice Ferrazzi
2021-04-10 13:26 Mike Pagano
2021-04-07 13:27 Mike Pagano
2021-03-30 12:57 Alice Ferrazzi
2021-03-25  9:04 Alice Ferrazzi
2021-03-22 15:57 Mike Pagano
2021-03-20 14:35 Mike Pagano
2021-03-17 17:00 Mike Pagano
2021-03-11 15:08 Mike Pagano
2021-03-09 12:18 Mike Pagano
2021-03-07 15:17 Mike Pagano
2021-03-04 12:04 Alice Ferrazzi
2021-02-26 13:22 Mike Pagano
2021-02-26 10:42 Alice Ferrazzi
2021-02-23 15:16 Alice Ferrazzi
2021-02-18 20:45 Mike Pagano
2021-02-18 14:48 Mike Pagano
2021-02-17 11:14 Alice Ferrazzi
2021-02-13 15:51 Mike Pagano
2021-02-13 15:48 Mike Pagano
2021-02-13 14:42 Alice Ferrazzi
2021-02-10 10:23 Alice Ferrazzi
2021-02-10  9:51 Alice Ferrazzi
2021-02-09 19:10 Mike Pagano
2021-02-07 15:20 Alice Ferrazzi
2021-02-03 23:43 Alice Ferrazzi
2021-01-30 13:27 Alice Ferrazzi
2021-01-27 11:29 Mike Pagano
2021-01-23 16:38 Mike Pagano
2021-01-19 20:31 Mike Pagano
2021-01-17 16:18 Mike Pagano
2021-01-12 20:03 Mike Pagano
2021-01-09 17:58 Mike Pagano
2021-01-09  0:14 Mike Pagano
2021-01-06 14:54 Mike Pagano
2020-12-30 12:54 Mike Pagano
2020-12-26 15:32 Mike Pagano
2020-12-26 15:29 Mike Pagano
2020-12-21 13:26 Mike Pagano
2020-12-18 16:08 Mike Pagano
2020-12-14 20:45 Mike Pagano
2020-12-13 16:09 Mike Pagano
2020-11-19 13:03 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1726144958.dbf9d5158e0707e771e47bd7a23f9fe3991c70bb.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox