public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Sun, 17 Jan 2021 16:19:33 +0000 (UTC)	[thread overview]
Message-ID: <1610900360.5f74453df8e87c968a5d45bdfd6284eea40a29af.mpagano@gentoo> (raw)

commit:     5f74453df8e87c968a5d45bdfd6284eea40a29af
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jan 17 16:19:20 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jan 17 16:19:20 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5f74453d

Linux patch 5.4.90

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1089_linux-5.4.90.patch | 1787 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1791 insertions(+)

diff --git a/0000_README b/0000_README
index 3db22cb..5d28def 100644
--- a/0000_README
+++ b/0000_README
@@ -399,6 +399,10 @@ Patch:  1088_linux-5.4.89.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.4.89
 
+Patch:  1089_linux-5.4.90.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.4.90
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1089_linux-5.4.90.patch b/1089_linux-5.4.90.patch
new file mode 100644
index 0000000..bc5ebac
--- /dev/null
+++ b/1089_linux-5.4.90.patch
@@ -0,0 +1,1787 @@
+diff --git a/Makefile b/Makefile
+index 95848875110ef..5c9d680b7ce51 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 89
++SUBLEVEL = 90
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
+index 3acb4192918df..f85a0fd6aca5c 100644
+--- a/arch/arm/mach-omap2/omap_device.c
++++ b/arch/arm/mach-omap2/omap_device.c
+@@ -234,10 +234,12 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
+ 		break;
+ 	case BUS_NOTIFY_BIND_DRIVER:
+ 		od = to_omap_device(pdev);
+-		if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) &&
+-		    pm_runtime_status_suspended(dev)) {
++		if (od) {
+ 			od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
+-			pm_runtime_set_active(dev);
++			if (od->_state == OMAP_DEVICE_STATE_ENABLED &&
++			    pm_runtime_status_suspended(dev)) {
++				pm_runtime_set_active(dev);
++			}
+ 		}
+ 		break;
+ 	case BUS_NOTIFY_ADD_DEVICE:
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index 6478635ff2142..98a177dd1f89f 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -625,6 +625,10 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+ {
+ 	u64 pmcr, val;
+ 
++	/* No PMU available, PMCR_EL0 may UNDEF... */
++	if (!kvm_arm_support_pmu_v3())
++		return;
++
+ 	pmcr = read_sysreg(pmcr_el0);
+ 	/*
+ 	 * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index 390edb7638265..bde3e0f85425f 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -869,9 +869,10 @@ GLOBAL(__begin_SYSENTER_singlestep_region)
+  * Xen doesn't set %esp to be precisely what the normal SYSENTER
+  * entry point expects, so fix it up before using the normal path.
+  */
+-ENTRY(xen_sysenter_target)
++SYM_CODE_START(xen_sysenter_target)
+ 	addl	$5*4, %esp			/* remove xen-provided frame */
+ 	jmp	.Lsysenter_past_esp
++SYM_CODE_END(xen_sysenter_target)
+ #endif
+ 
+ /*
+diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
+index e95e95960156b..5b076cb79f5fb 100644
+--- a/arch/x86/kernel/acpi/wakeup_32.S
++++ b/arch/x86/kernel/acpi/wakeup_32.S
+@@ -9,8 +9,7 @@
+ 	.code32
+ 	ALIGN
+ 
+-ENTRY(wakeup_pmode_return)
+-wakeup_pmode_return:
++SYM_CODE_START(wakeup_pmode_return)
+ 	movw	$__KERNEL_DS, %ax
+ 	movw	%ax, %ss
+ 	movw	%ax, %fs
+@@ -39,6 +38,7 @@ wakeup_pmode_return:
+ 	# jump to place where we left off
+ 	movl	saved_eip, %eax
+ 	jmp	*%eax
++SYM_CODE_END(wakeup_pmode_return)
+ 
+ bogus_magic:
+ 	jmp	bogus_magic
+@@ -72,7 +72,7 @@ restore_registers:
+ 	popfl
+ 	ret
+ 
+-ENTRY(do_suspend_lowlevel)
++SYM_CODE_START(do_suspend_lowlevel)
+ 	call	save_processor_state
+ 	call	save_registers
+ 	pushl	$3
+@@ -87,6 +87,7 @@ ret_point:
+ 	call	restore_registers
+ 	call	restore_processor_state
+ 	ret
++SYM_CODE_END(do_suspend_lowlevel)
+ 
+ .data
+ ALIGN
+diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+index 830ccc396e26d..28f786289fce4 100644
+--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
++++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+@@ -525,85 +525,70 @@ static void rdtgroup_remove(struct rdtgroup *rdtgrp)
+ 	kfree(rdtgrp);
+ }
+ 
+-struct task_move_callback {
+-	struct callback_head	work;
+-	struct rdtgroup		*rdtgrp;
+-};
+-
+-static void move_myself(struct callback_head *head)
++static void _update_task_closid_rmid(void *task)
+ {
+-	struct task_move_callback *callback;
+-	struct rdtgroup *rdtgrp;
+-
+-	callback = container_of(head, struct task_move_callback, work);
+-	rdtgrp = callback->rdtgrp;
+-
+ 	/*
+-	 * If resource group was deleted before this task work callback
+-	 * was invoked, then assign the task to root group and free the
+-	 * resource group.
++	 * If the task is still current on this CPU, update PQR_ASSOC MSR.
++	 * Otherwise, the MSR is updated when the task is scheduled in.
+ 	 */
+-	if (atomic_dec_and_test(&rdtgrp->waitcount) &&
+-	    (rdtgrp->flags & RDT_DELETED)) {
+-		current->closid = 0;
+-		current->rmid = 0;
+-		rdtgroup_remove(rdtgrp);
+-	}
+-
+-	preempt_disable();
+-	/* update PQR_ASSOC MSR to make resource group go into effect */
+-	resctrl_sched_in();
+-	preempt_enable();
++	if (task == current)
++		resctrl_sched_in();
++}
+ 
+-	kfree(callback);
++static void update_task_closid_rmid(struct task_struct *t)
++{
++	if (IS_ENABLED(CONFIG_SMP) && task_curr(t))
++		smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1);
++	else
++		_update_task_closid_rmid(t);
+ }
+ 
+ static int __rdtgroup_move_task(struct task_struct *tsk,
+ 				struct rdtgroup *rdtgrp)
+ {
+-	struct task_move_callback *callback;
+-	int ret;
+-
+-	callback = kzalloc(sizeof(*callback), GFP_KERNEL);
+-	if (!callback)
+-		return -ENOMEM;
+-	callback->work.func = move_myself;
+-	callback->rdtgrp = rdtgrp;
++	/* If the task is already in rdtgrp, no need to move the task. */
++	if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid &&
++	     tsk->rmid == rdtgrp->mon.rmid) ||
++	    (rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid &&
++	     tsk->closid == rdtgrp->mon.parent->closid))
++		return 0;
+ 
+ 	/*
+-	 * Take a refcount, so rdtgrp cannot be freed before the
+-	 * callback has been invoked.
++	 * Set the task's closid/rmid before the PQR_ASSOC MSR can be
++	 * updated by them.
++	 *
++	 * For ctrl_mon groups, move both closid and rmid.
++	 * For monitor groups, can move the tasks only from
++	 * their parent CTRL group.
+ 	 */
+-	atomic_inc(&rdtgrp->waitcount);
+-	ret = task_work_add(tsk, &callback->work, true);
+-	if (ret) {
+-		/*
+-		 * Task is exiting. Drop the refcount and free the callback.
+-		 * No need to check the refcount as the group cannot be
+-		 * deleted before the write function unlocks rdtgroup_mutex.
+-		 */
+-		atomic_dec(&rdtgrp->waitcount);
+-		kfree(callback);
+-		rdt_last_cmd_puts("Task exited\n");
+-	} else {
+-		/*
+-		 * For ctrl_mon groups move both closid and rmid.
+-		 * For monitor groups, can move the tasks only from
+-		 * their parent CTRL group.
+-		 */
+-		if (rdtgrp->type == RDTCTRL_GROUP) {
+-			tsk->closid = rdtgrp->closid;
++
++	if (rdtgrp->type == RDTCTRL_GROUP) {
++		tsk->closid = rdtgrp->closid;
++		tsk->rmid = rdtgrp->mon.rmid;
++	} else if (rdtgrp->type == RDTMON_GROUP) {
++		if (rdtgrp->mon.parent->closid == tsk->closid) {
+ 			tsk->rmid = rdtgrp->mon.rmid;
+-		} else if (rdtgrp->type == RDTMON_GROUP) {
+-			if (rdtgrp->mon.parent->closid == tsk->closid) {
+-				tsk->rmid = rdtgrp->mon.rmid;
+-			} else {
+-				rdt_last_cmd_puts("Can't move task to different control group\n");
+-				ret = -EINVAL;
+-			}
++		} else {
++			rdt_last_cmd_puts("Can't move task to different control group\n");
++			return -EINVAL;
+ 		}
+ 	}
+-	return ret;
++
++	/*
++	 * Ensure the task's closid and rmid are written before determining if
++	 * the task is current that will decide if it will be interrupted.
++	 */
++	barrier();
++
++	/*
++	 * By now, the task's closid and rmid are set. If the task is current
++	 * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource
++	 * group go into effect. If the task is not current, the MSR will be
++	 * updated when the task is scheduled in.
++	 */
++	update_task_closid_rmid(tsk);
++
++	return 0;
+ }
+ 
+ /**
+diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
+index 073aab525d800..2cc0303522c99 100644
+--- a/arch/x86/kernel/ftrace_32.S
++++ b/arch/x86/kernel/ftrace_32.S
+@@ -89,7 +89,7 @@ WEAK(ftrace_stub)
+ 	ret
+ END(ftrace_caller)
+ 
+-ENTRY(ftrace_regs_caller)
++SYM_CODE_START(ftrace_regs_caller)
+ 	/*
+ 	 * We're here from an mcount/fentry CALL, and the stack frame looks like:
+ 	 *
+@@ -163,6 +163,7 @@ GLOBAL(ftrace_regs_call)
+ 	popl	%eax
+ 
+ 	jmp	.Lftrace_ret
++SYM_CODE_END(ftrace_regs_caller)
+ 
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ ENTRY(ftrace_graph_caller)
+diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
+index 2e6a0676c1f43..11a5d5ade52ce 100644
+--- a/arch/x86/kernel/head_32.S
++++ b/arch/x86/kernel/head_32.S
+@@ -64,7 +64,7 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
+  * can.
+  */
+ __HEAD
+-ENTRY(startup_32)
++SYM_CODE_START(startup_32)
+ 	movl pa(initial_stack),%ecx
+ 	
+ 	/* test KEEP_SEGMENTS flag to see if the bootloader is asking
+@@ -172,6 +172,7 @@ num_subarch_entries = (. - subarch_entries) / 4
+ #else
+ 	jmp .Ldefault_entry
+ #endif /* CONFIG_PARAVIRT */
++SYM_CODE_END(startup_32)
+ 
+ #ifdef CONFIG_HOTPLUG_CPU
+ /*
+diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
+index 6fe383002125f..a19ed3d231853 100644
+--- a/arch/x86/power/hibernate_asm_32.S
++++ b/arch/x86/power/hibernate_asm_32.S
+@@ -35,7 +35,7 @@ ENTRY(swsusp_arch_suspend)
+ 	ret
+ ENDPROC(swsusp_arch_suspend)
+ 
+-ENTRY(restore_image)
++SYM_CODE_START(restore_image)
+ 	/* prepare to jump to the image kernel */
+ 	movl	restore_jump_address, %ebx
+ 	movl	restore_cr3, %ebp
+@@ -45,9 +45,10 @@ ENTRY(restore_image)
+ 	/* jump to relocated restore code */
+ 	movl	relocated_restore_code, %eax
+ 	jmpl	*%eax
++SYM_CODE_END(restore_image)
+ 
+ /* code below has been relocated to a safe page */
+-ENTRY(core_restore_code)
++SYM_CODE_START(core_restore_code)
+ 	movl	temp_pgt, %eax
+ 	movl	%eax, %cr3
+ 
+@@ -77,6 +78,7 @@ copy_loop:
+ 
+ done:
+ 	jmpl	*%ebx
++SYM_CODE_END(core_restore_code)
+ 
+ 	/* code below belongs to the image kernel */
+ 	.align PAGE_SIZE
+diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
+index 1868b158480d4..3a0ef0d577344 100644
+--- a/arch/x86/realmode/rm/trampoline_32.S
++++ b/arch/x86/realmode/rm/trampoline_32.S
+@@ -29,7 +29,7 @@
+ 	.code16
+ 
+ 	.balign	PAGE_SIZE
+-ENTRY(trampoline_start)
++SYM_CODE_START(trampoline_start)
+ 	wbinvd			# Needed for NUMA-Q should be harmless for others
+ 
+ 	LJMPW_RM(1f)
+@@ -54,11 +54,13 @@ ENTRY(trampoline_start)
+ 	lmsw	%dx			# into protected mode
+ 
+ 	ljmpl	$__BOOT_CS, $pa_startup_32
++SYM_CODE_END(trampoline_start)
+ 
+ 	.section ".text32","ax"
+ 	.code32
+-ENTRY(startup_32)			# note: also used from wakeup_asm.S
++SYM_CODE_START(startup_32)			# note: also used from wakeup_asm.S
+ 	jmp	*%eax
++SYM_CODE_END(startup_32)
+ 
+ 	.bss
+ 	.balign 8
+diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
+index cd177772fe4d5..2712e91553063 100644
+--- a/arch/x86/xen/xen-asm_32.S
++++ b/arch/x86/xen/xen-asm_32.S
+@@ -56,7 +56,7 @@
+ 	_ASM_EXTABLE(1b,2b)
+ .endm
+ 
+-ENTRY(xen_iret)
++SYM_CODE_START(xen_iret)
+ 	/* test eflags for special cases */
+ 	testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp)
+ 	jnz hyper_iret
+@@ -122,6 +122,7 @@ xen_iret_end_crit:
+ hyper_iret:
+ 	/* put this out of line since its very rarely used */
+ 	jmp hypercall_page + __HYPERVISOR_iret * 32
++SYM_CODE_END(xen_iret)
+ 
+ 	.globl xen_iret_start_crit, xen_iret_end_crit
+ 
+@@ -152,7 +153,7 @@ hyper_iret:
+  * The only caveat is that if the outer eax hasn't been restored yet (i.e.
+  * it's still on stack), we need to restore its value here.
+  */
+-ENTRY(xen_iret_crit_fixup)
++SYM_CODE_START(xen_iret_crit_fixup)
+ 	/*
+ 	 * Paranoia: Make sure we're really coming from kernel space.
+ 	 * One could imagine a case where userspace jumps into the
+@@ -179,4 +180,4 @@ ENTRY(xen_iret_crit_fixup)
+ 
+ 2:
+ 	ret
+-END(xen_iret_crit_fixup)
++SYM_CODE_END(xen_iret_crit_fixup)
+diff --git a/block/genhd.c b/block/genhd.c
+index 26b31fcae217f..604f0a2cbc9a0 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -222,14 +222,17 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter)
+ 		part = rcu_dereference(ptbl->part[piter->idx]);
+ 		if (!part)
+ 			continue;
++		get_device(part_to_dev(part));
++		piter->part = part;
+ 		if (!part_nr_sects_read(part) &&
+ 		    !(piter->flags & DISK_PITER_INCL_EMPTY) &&
+ 		    !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
+-		      piter->idx == 0))
++		      piter->idx == 0)) {
++			put_device(part_to_dev(part));
++			piter->part = NULL;
+ 			continue;
++		}
+ 
+-		get_device(part_to_dev(part));
+-		piter->part = part;
+ 		piter->idx += inc;
+ 		break;
+ 	}
+diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
+index f58baff2be0af..398991381e9af 100644
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -583,8 +583,12 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
+ 		devname = dev_name(map->dev);
+ 
+ 	if (name) {
+-		map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
++		if (!map->debugfs_name) {
++			map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
+ 					      devname, name);
++			if (!map->debugfs_name)
++				return;
++		}
+ 		name = map->debugfs_name;
+ 	} else {
+ 		name = devname;
+@@ -592,9 +596,10 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
+ 
+ 	if (!strcmp(name, "dummy")) {
+ 		kfree(map->debugfs_name);
+-
+ 		map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
+ 						dummy_index);
++		if (!map->debugfs_name)
++				return;
+ 		name = map->debugfs_name;
+ 		dummy_index++;
+ 	}
+diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
+index 1bb8ec5753527..0fc27ac14f29c 100644
+--- a/drivers/block/Kconfig
++++ b/drivers/block/Kconfig
+@@ -461,6 +461,7 @@ config BLK_DEV_RBD
+ config BLK_DEV_RSXX
+ 	tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver"
+ 	depends on PCI
++	select CRC32
+ 	help
+ 	  Device driver for IBM's high speed PCIe SSD
+ 	  storage device: Flash Adapter 900GB Full Height.
+diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
+index 2db2f1739e092..1b2ec3be59eb7 100644
+--- a/drivers/cpufreq/powernow-k8.c
++++ b/drivers/cpufreq/powernow-k8.c
+@@ -878,9 +878,9 @@ static int get_transition_latency(struct powernow_k8_data *data)
+ 
+ /* Take a frequency, and issue the fid/vid transition command */
+ static int transition_frequency_fidvid(struct powernow_k8_data *data,
+-		unsigned int index)
++		unsigned int index,
++		struct cpufreq_policy *policy)
+ {
+-	struct cpufreq_policy *policy;
+ 	u32 fid = 0;
+ 	u32 vid = 0;
+ 	int res;
+@@ -912,9 +912,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
+ 	freqs.old = find_khz_freq_from_fid(data->currfid);
+ 	freqs.new = find_khz_freq_from_fid(fid);
+ 
+-	policy = cpufreq_cpu_get(smp_processor_id());
+-	cpufreq_cpu_put(policy);
+-
+ 	cpufreq_freq_transition_begin(policy, &freqs);
+ 	res = transition_fid_vid(data, fid, vid);
+ 	cpufreq_freq_transition_end(policy, &freqs, res);
+@@ -969,7 +966,7 @@ static long powernowk8_target_fn(void *arg)
+ 
+ 	powernow_k8_acpi_pst_values(data, newstate);
+ 
+-	ret = transition_frequency_fidvid(data, newstate);
++	ret = transition_frequency_fidvid(data, newstate, pol);
+ 
+ 	if (ret) {
+ 		pr_err("transition frequency failed\n");
+diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
+index f81a5e35d8fd1..eddc6d1bdb2d1 100644
+--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
++++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
+@@ -577,7 +577,7 @@ static void chtls_reset_synq(struct listen_ctx *listen_ctx)
+ 
+ 	while (!skb_queue_empty(&listen_ctx->synq)) {
+ 		struct chtls_sock *csk =
+-			container_of((struct synq *)__skb_dequeue
++			container_of((struct synq *)skb_peek
+ 				(&listen_ctx->synq), struct chtls_sock, synq);
+ 		struct sock *child = csk->sk;
+ 
+@@ -1021,6 +1021,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
+ 				    const struct cpl_pass_accept_req *req,
+ 				    struct chtls_dev *cdev)
+ {
++	struct adapter *adap = pci_get_drvdata(cdev->pdev);
+ 	struct inet_sock *newinet;
+ 	const struct iphdr *iph;
+ 	struct tls_context *ctx;
+@@ -1030,9 +1031,10 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
+ 	struct neighbour *n;
+ 	struct tcp_sock *tp;
+ 	struct sock *newsk;
++	bool found = false;
+ 	u16 port_id;
+ 	int rxq_idx;
+-	int step;
++	int step, i;
+ 
+ 	iph = (const struct iphdr *)network_hdr;
+ 	newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb);
+@@ -1044,7 +1046,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
+ 		goto free_sk;
+ 
+ 	n = dst_neigh_lookup(dst, &iph->saddr);
+-	if (!n)
++	if (!n || !n->dev)
+ 		goto free_sk;
+ 
+ 	ndev = n->dev;
+@@ -1053,6 +1055,13 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
+ 	if (is_vlan_dev(ndev))
+ 		ndev = vlan_dev_real_dev(ndev);
+ 
++	for_each_port(adap, i)
++		if (cdev->ports[i] == ndev)
++			found = true;
++
++	if (!found)
++		goto free_dst;
++
+ 	port_id = cxgb4_port_idx(ndev);
+ 
+ 	csk = chtls_sock_create(cdev);
+@@ -1108,6 +1117,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
+ free_csk:
+ 	chtls_sock_release(&csk->kref);
+ free_dst:
++	neigh_release(n);
+ 	dst_release(dst);
+ free_sk:
+ 	inet_csk_prepare_forced_close(newsk);
+@@ -1443,6 +1453,11 @@ static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb)
+ 			sk_wake_async(sk, 0, POLL_OUT);
+ 
+ 		data = lookup_stid(cdev->tids, stid);
++		if (!data) {
++			/* listening server close */
++			kfree_skb(skb);
++			goto unlock;
++		}
+ 		lsk = ((struct listen_ctx *)data)->lsk;
+ 
+ 		bh_lock_sock(lsk);
+@@ -1828,39 +1843,6 @@ static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
+ 	kfree_skb(skb);
+ }
+ 
+-static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
+-			   struct chtls_dev *cdev, int status, int queue)
+-{
+-	struct cpl_abort_req_rss *req = cplhdr(skb);
+-	struct sk_buff *reply_skb;
+-	struct chtls_sock *csk;
+-
+-	csk = rcu_dereference_sk_user_data(sk);
+-
+-	reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
+-			      GFP_KERNEL);
+-
+-	if (!reply_skb) {
+-		req->status = (queue << 1);
+-		send_defer_abort_rpl(cdev, skb);
+-		return;
+-	}
+-
+-	set_abort_rpl_wr(reply_skb, GET_TID(req), status);
+-	kfree_skb(skb);
+-
+-	set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
+-	if (csk_conn_inline(csk)) {
+-		struct l2t_entry *e = csk->l2t_entry;
+-
+-		if (e && sk->sk_state != TCP_SYN_RECV) {
+-			cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
+-			return;
+-		}
+-	}
+-	cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
+-}
+-
+ /*
+  * Add an skb to the deferred skb queue for processing from process context.
+  */
+@@ -1923,9 +1905,9 @@ static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb)
+ 	queue = csk->txq_idx;
+ 
+ 	skb->sk	= NULL;
++	chtls_send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
++			     CPL_ABORT_NO_RST, queue);
+ 	do_abort_syn_rcv(child, lsk);
+-	send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
+-		       CPL_ABORT_NO_RST, queue);
+ }
+ 
+ static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
+@@ -1955,8 +1937,8 @@ static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
+ 	if (!sock_owned_by_user(psk)) {
+ 		int queue = csk->txq_idx;
+ 
++		chtls_send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
+ 		do_abort_syn_rcv(sk, psk);
+-		send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
+ 	} else {
+ 		skb->sk = sk;
+ 		BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv;
+@@ -1974,9 +1956,6 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
+ 	int queue = csk->txq_idx;
+ 
+ 	if (is_neg_adv(req->status)) {
+-		if (sk->sk_state == TCP_SYN_RECV)
+-			chtls_set_tcb_tflag(sk, 0, 0);
+-
+ 		kfree_skb(skb);
+ 		return;
+ 	}
+@@ -2002,12 +1981,11 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
+ 
+ 		if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
+ 			return;
+-
+-		chtls_release_resources(sk);
+-		chtls_conn_done(sk);
+ 	}
+ 
+ 	chtls_send_abort_rpl(sk, skb, csk->cdev, rst_status, queue);
++	chtls_release_resources(sk);
++	chtls_conn_done(sk);
+ }
+ 
+ static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
+diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
+index 7f9a86c3c58ff..31577316f80bc 100644
+--- a/drivers/dma/dw-edma/dw-edma-core.c
++++ b/drivers/dma/dw-edma/dw-edma-core.c
+@@ -85,12 +85,12 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
+ 
+ 	if (desc->chunk) {
+ 		/* Create and add new element into the linked list */
+-		desc->chunks_alloc++;
+-		list_add_tail(&chunk->list, &desc->chunk->list);
+ 		if (!dw_edma_alloc_burst(chunk)) {
+ 			kfree(chunk);
+ 			return NULL;
+ 		}
++		desc->chunks_alloc++;
++		list_add_tail(&chunk->list, &desc->chunk->list);
+ 	} else {
+ 		/* List head */
+ 		chunk->burst = NULL;
+diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
+index 4c58da7421432..04d89eec11e74 100644
+--- a/drivers/dma/mediatek/mtk-hsdma.c
++++ b/drivers/dma/mediatek/mtk-hsdma.c
+@@ -1007,6 +1007,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
+ 	return 0;
+ 
+ err_free:
++	mtk_hsdma_hw_deinit(hsdma);
+ 	of_dma_controller_free(pdev->dev.of_node);
+ err_unregister:
+ 	dma_async_device_unregister(dd);
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index a6abfe702c5a3..1b5f3e9f43d70 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -2431,7 +2431,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
+ 		has_dre = false;
+ 
+ 	if (!has_dre)
+-		xdev->common.copy_align = fls(width - 1);
++		xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
+ 
+ 	if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
+ 	    of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
+@@ -2543,7 +2543,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
+ static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
+ 				    struct device_node *node)
+ {
+-	int ret, i, nr_channels = 1;
++	int ret, i;
++	u32 nr_channels = 1;
+ 
+ 	ret = of_property_read_u32(node, "dma-channels", &nr_channels);
+ 	if ((ret < 0) && xdev->mcdma)
+@@ -2742,7 +2743,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	/* Register the DMA engine with the core */
+-	dma_async_device_register(&xdev->common);
++	err = dma_async_device_register(&xdev->common);
++	if (err) {
++		dev_err(xdev->dev, "failed to register the dma device\n");
++		goto error;
++	}
+ 
+ 	err = of_dma_controller_register(node, of_dma_xilinx_xlate,
+ 					 xdev);
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index cd71e71339446..9e852b4bbf92b 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -1270,6 +1270,37 @@ static int wacom_devm_sysfs_create_group(struct wacom *wacom,
+ 					       group);
+ }
+ 
++static void wacom_devm_kfifo_release(struct device *dev, void *res)
++{
++	struct kfifo_rec_ptr_2 *devres = res;
++
++	kfifo_free(devres);
++}
++
++static int wacom_devm_kfifo_alloc(struct wacom *wacom)
++{
++	struct wacom_wac *wacom_wac = &wacom->wacom_wac;
++	struct kfifo_rec_ptr_2 *pen_fifo = &wacom_wac->pen_fifo;
++	int error;
++
++	pen_fifo = devres_alloc(wacom_devm_kfifo_release,
++			      sizeof(struct kfifo_rec_ptr_2),
++			      GFP_KERNEL);
++
++	if (!pen_fifo)
++		return -ENOMEM;
++
++	error = kfifo_alloc(pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
++	if (error) {
++		devres_free(pen_fifo);
++		return error;
++	}
++
++	devres_add(&wacom->hdev->dev, pen_fifo);
++
++	return 0;
++}
++
+ enum led_brightness wacom_leds_brightness_get(struct wacom_led *led)
+ {
+ 	struct wacom *wacom = led->wacom;
+@@ -2724,7 +2755,7 @@ static int wacom_probe(struct hid_device *hdev,
+ 	if (features->check_for_hid_type && features->hid_type != hdev->type)
+ 		return -ENODEV;
+ 
+-	error = kfifo_alloc(&wacom_wac->pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
++	error = wacom_devm_kfifo_alloc(wacom);
+ 	if (error)
+ 		return error;
+ 
+@@ -2786,8 +2817,6 @@ static void wacom_remove(struct hid_device *hdev)
+ 
+ 	if (wacom->wacom_wac.features.type != REMOTE)
+ 		wacom_release_resources(wacom);
+-
+-	kfifo_free(&wacom_wac->pen_fifo);
+ }
+ 
+ #ifdef CONFIG_PM
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index c40eef4e7a985..2b6a4c1f188f4 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -1424,7 +1424,7 @@ static int i801_add_mux(struct i801_priv *priv)
+ 
+ 	/* Register GPIO descriptor lookup table */
+ 	lookup = devm_kzalloc(dev,
+-			      struct_size(lookup, table, mux_config->n_gpios),
++			      struct_size(lookup, table, mux_config->n_gpios + 1),
+ 			      GFP_KERNEL);
+ 	if (!lookup)
+ 		return -ENOMEM;
+diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
+index b432e7580458d..b2dc802864641 100644
+--- a/drivers/i2c/busses/i2c-sprd.c
++++ b/drivers/i2c/busses/i2c-sprd.c
+@@ -72,6 +72,8 @@
+ 
+ /* timeout (ms) for pm runtime autosuspend */
+ #define SPRD_I2C_PM_TIMEOUT	1000
++/* timeout (ms) for transfer message */
++#define I2C_XFER_TIMEOUT	1000
+ 
+ /* SPRD i2c data structure */
+ struct sprd_i2c {
+@@ -244,6 +246,7 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap,
+ 			       struct i2c_msg *msg, bool is_last_msg)
+ {
+ 	struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
++	unsigned long time_left;
+ 
+ 	i2c_dev->msg = msg;
+ 	i2c_dev->buf = msg->buf;
+@@ -273,7 +276,10 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap,
+ 
+ 	sprd_i2c_opt_start(i2c_dev);
+ 
+-	wait_for_completion(&i2c_dev->complete);
++	time_left = wait_for_completion_timeout(&i2c_dev->complete,
++				msecs_to_jiffies(I2C_XFER_TIMEOUT));
++	if (!time_left)
++		return -ETIMEDOUT;
+ 
+ 	return i2c_dev->err;
+ }
+diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+index b0f3da1976e4f..d1f2109012ed5 100644
+--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+@@ -664,13 +664,29 @@ static irqreturn_t st_lsm6dsx_handler_irq(int irq, void *private)
+ static irqreturn_t st_lsm6dsx_handler_thread(int irq, void *private)
+ {
+ 	struct st_lsm6dsx_hw *hw = private;
+-	int count;
++	int fifo_len = 0, len;
+ 
+-	mutex_lock(&hw->fifo_lock);
+-	count = hw->settings->fifo_ops.read_fifo(hw);
+-	mutex_unlock(&hw->fifo_lock);
++	/*
++	 * If we are using edge IRQs, new samples can arrive while
++	 * processing current interrupt since there are no hw
++	 * guarantees the irq line stays "low" long enough to properly
++	 * detect the new interrupt. In this case the new sample will
++	 * be missed.
++	 * Polling FIFO status register allow us to read new
++	 * samples even if the interrupt arrives while processing
++	 * previous data and the timeslot where the line is "low" is
++	 * too short to be properly detected.
++	 */
++	do {
++		mutex_lock(&hw->fifo_lock);
++		len = hw->settings->fifo_ops.read_fifo(hw);
++		mutex_unlock(&hw->fifo_lock);
++
++		if (len > 0)
++			fifo_len += len;
++	} while (len > 0);
+ 
+-	return count ? IRQ_HANDLED : IRQ_NONE;
++	return fifo_len ? IRQ_HANDLED : IRQ_NONE;
+ }
+ 
+ static int st_lsm6dsx_buffer_preenable(struct iio_dev *iio_dev)
+diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
+index f697f3a1d46bc..5dcc81b1df623 100644
+--- a/drivers/iommu/intel_irq_remapping.c
++++ b/drivers/iommu/intel_irq_remapping.c
+@@ -1400,6 +1400,8 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
+ 		irq_data = irq_domain_get_irq_data(domain, virq + i);
+ 		irq_cfg = irqd_cfg(irq_data);
+ 		if (!irq_data || !irq_cfg) {
++			if (!i)
++				kfree(data);
+ 			ret = -EINVAL;
+ 			goto out_free_data;
+ 		}
+diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
+index 8f39f9ba5c80e..4c2ce210c1237 100644
+--- a/drivers/lightnvm/Kconfig
++++ b/drivers/lightnvm/Kconfig
+@@ -19,6 +19,7 @@ if NVM
+ 
+ config NVM_PBLK
+ 	tristate "Physical Block Device Open-Channel SSD target"
++	select CRC32
+ 	help
+ 	  Allows an open-channel SSD to be exposed as a block device to the
+ 	  host. The target assumes the device exposes raw flash and must be
+diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
+index 17c166cc8482d..e4d944770ccaf 100644
+--- a/drivers/net/can/Kconfig
++++ b/drivers/net/can/Kconfig
+@@ -123,6 +123,7 @@ config CAN_JANZ_ICAN3
+ config CAN_KVASER_PCIEFD
+ 	depends on PCI
+ 	tristate "Kvaser PCIe FD cards"
++	select CRC32
+ 	  help
+ 	  This is a driver for the Kvaser PCI Express CAN FD family.
+ 
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index d2bb9a87eff9a..8a842545e3f69 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -1868,8 +1868,6 @@ void m_can_class_unregister(struct m_can_classdev *m_can_dev)
+ {
+ 	unregister_candev(m_can_dev->net);
+ 
+-	m_can_clk_stop(m_can_dev);
+-
+ 	free_candev(m_can_dev->net);
+ }
+ EXPORT_SYMBOL_GPL(m_can_class_unregister);
+diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
+index 681bb861de05e..1f8710b35c6d7 100644
+--- a/drivers/net/can/m_can/tcan4x5x.c
++++ b/drivers/net/can/m_can/tcan4x5x.c
+@@ -126,30 +126,6 @@ struct tcan4x5x_priv {
+ 	int reg_offset;
+ };
+ 
+-static struct can_bittiming_const tcan4x5x_bittiming_const = {
+-	.name = DEVICE_NAME,
+-	.tseg1_min = 2,
+-	.tseg1_max = 31,
+-	.tseg2_min = 2,
+-	.tseg2_max = 16,
+-	.sjw_max = 16,
+-	.brp_min = 1,
+-	.brp_max = 32,
+-	.brp_inc = 1,
+-};
+-
+-static struct can_bittiming_const tcan4x5x_data_bittiming_const = {
+-	.name = DEVICE_NAME,
+-	.tseg1_min = 1,
+-	.tseg1_max = 32,
+-	.tseg2_min = 1,
+-	.tseg2_max = 16,
+-	.sjw_max = 16,
+-	.brp_min = 1,
+-	.brp_max = 32,
+-	.brp_inc = 1,
+-};
+-
+ static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv)
+ {
+ 	int wake_state = 0;
+@@ -449,8 +425,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
+ 	mcan_class->dev = &spi->dev;
+ 	mcan_class->ops = &tcan4x5x_ops;
+ 	mcan_class->is_peripheral = true;
+-	mcan_class->bit_timing = &tcan4x5x_bittiming_const;
+-	mcan_class->data_timing = &tcan4x5x_data_bittiming_const;
+ 	mcan_class->net->irq = spi->irq;
+ 
+ 	spi_set_drvdata(spi, priv);
+diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
+index 0d9b3fa7bd94e..ee1e67df1e7b4 100644
+--- a/drivers/net/dsa/lantiq_gswip.c
++++ b/drivers/net/dsa/lantiq_gswip.c
+@@ -1419,11 +1419,12 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port,
+ 	phylink_set(mask, Pause);
+ 	phylink_set(mask, Asym_Pause);
+ 
+-	/* With the exclusion of MII and Reverse MII, we support Gigabit,
+-	 * including Half duplex
++	/* With the exclusion of MII, Reverse MII and Reduced MII, we
++	 * support Gigabit, including Half duplex
+ 	 */
+ 	if (state->interface != PHY_INTERFACE_MODE_MII &&
+-	    state->interface != PHY_INTERFACE_MODE_REVMII) {
++	    state->interface != PHY_INTERFACE_MODE_REVMII &&
++	    state->interface != PHY_INTERFACE_MODE_RMII) {
+ 		phylink_set(mask, 1000baseT_Full);
+ 		phylink_set(mask, 1000baseT_Half);
+ 	}
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+index f8a87f8ca9833..148e53812d89c 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+@@ -123,7 +123,7 @@ struct hclgevf_mbx_arq_ring {
+ #define hclge_mbx_ring_ptr_move_crq(crq) \
+ 	(crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
+ #define hclge_mbx_tail_ptr_move_arq(arq) \
+-	(arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
++		(arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
+ #define hclge_mbx_head_ptr_move_arq(arq) \
+-		(arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
++		(arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
+ #endif
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 6c3d13110993f..6887b7fda6e07 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -746,7 +746,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
+ 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+ 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
+ 
+-		if (hdev->hw.mac.phydev) {
++		if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
++		    hdev->hw.mac.phydev->drv->set_loopback) {
+ 			count += 1;
+ 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
+ 		}
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index 8827ab4b4932e..6988bbf2576f5 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -4545,7 +4545,7 @@ static int mvpp2_port_init(struct mvpp2_port *port)
+ 	struct mvpp2 *priv = port->priv;
+ 	struct mvpp2_txq_pcpu *txq_pcpu;
+ 	unsigned int thread;
+-	int queue, err;
++	int queue, err, val;
+ 
+ 	/* Checks for hardware constraints */
+ 	if (port->first_rxq + port->nrxqs >
+@@ -4559,6 +4559,18 @@ static int mvpp2_port_init(struct mvpp2_port *port)
+ 	mvpp2_egress_disable(port);
+ 	mvpp2_port_disable(port);
+ 
++	if (mvpp2_is_xlg(port->phy_interface)) {
++		val = readl(port->base + MVPP22_XLG_CTRL0_REG);
++		val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
++		val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
++		writel(val, port->base + MVPP22_XLG_CTRL0_REG);
++	} else {
++		val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
++		val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
++		val |= MVPP2_GMAC_FORCE_LINK_DOWN;
++		writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
++	}
++
+ 	port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
+ 
+ 	port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+index 6d55e3d0b7ea2..54e9f6dc24ea0 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+@@ -725,8 +725,10 @@ static int cgx_lmac_init(struct cgx *cgx)
+ 		if (!lmac)
+ 			return -ENOMEM;
+ 		lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
+-		if (!lmac->name)
+-			return -ENOMEM;
++		if (!lmac->name) {
++			err = -ENOMEM;
++			goto err_lmac_free;
++		}
+ 		sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
+ 		lmac->lmac_id = i;
+ 		lmac->cgx = cgx;
+@@ -737,7 +739,7 @@ static int cgx_lmac_init(struct cgx *cgx)
+ 						 CGX_LMAC_FWI + i * 9),
+ 				   cgx_fwi_event_handler, 0, lmac->name, lmac);
+ 		if (err)
+-			return err;
++			goto err_irq;
+ 
+ 		/* Enable interrupt */
+ 		cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
+@@ -748,6 +750,12 @@ static int cgx_lmac_init(struct cgx *cgx)
+ 	}
+ 
+ 	return cgx_lmac_verify_fwi_version(cgx);
++
++err_irq:
++	kfree(lmac->name);
++err_lmac_free:
++	kfree(lmac);
++	return err;
+ }
+ 
+ static int cgx_lmac_exit(struct cgx *cgx)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index 8cd529556b214..01089c2283d7f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -976,6 +976,22 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
+ 	return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
+ }
+ 
++static int mlx5e_speed_validate(struct net_device *netdev, bool ext,
++				const unsigned long link_modes, u8 autoneg)
++{
++	/* Extended link-mode has no speed limitations. */
++	if (ext)
++		return 0;
++
++	if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
++	    autoneg != AUTONEG_ENABLE) {
++		netdev_err(netdev, "%s: 56G link speed requires autoneg enabled\n",
++			   __func__);
++		return -EINVAL;
++	}
++	return 0;
++}
++
+ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
+ {
+ 	u32 i, ptys_modes = 0;
+@@ -1068,13 +1084,9 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
+ 	link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
+ 		mlx5e_port_speed2linkmodes(mdev, speed, !ext);
+ 
+-	if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
+-	    autoneg != AUTONEG_ENABLE) {
+-		netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n",
+-			   __func__);
+-		err = -EINVAL;
++	err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg);
++	if (err)
+ 		goto out;
+-	}
+ 
+ 	link_modes = link_modes & eproto.cap;
+ 	if (!link_modes) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+index 713dc210f710c..c4ac7a9968d16 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+@@ -927,6 +927,7 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
+ 	in = kvzalloc(inlen, GFP_KERNEL);
+ 	if (!in) {
+ 		kfree(ft->g);
++		ft->g = NULL;
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -1067,6 +1068,7 @@ static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
+ 	in = kvzalloc(inlen, GFP_KERNEL);
+ 	if (!in) {
+ 		kfree(ft->g);
++		ft->g = NULL;
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -1346,6 +1348,7 @@ err_destroy_groups:
+ 	ft->g[ft->num_groups] = NULL;
+ 	mlx5e_destroy_groups(ft);
+ 	kvfree(in);
++	kfree(ft->g);
+ 
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+index 0fc7de4aa572f..8e0dddc6383f0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+@@ -116,7 +116,7 @@ free:
+ static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev)
+ {
+ 	mlx5_core_roce_gid_set(dev, 0, 0, 0,
+-			       NULL, NULL, false, 0, 0);
++			       NULL, NULL, false, 0, 1);
+ }
+ 
+ static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid)
+diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
+index 0937fc2a928ed..23c9394cd5d22 100644
+--- a/drivers/net/ethernet/natsemi/macsonic.c
++++ b/drivers/net/ethernet/natsemi/macsonic.c
+@@ -540,10 +540,14 @@ static int mac_sonic_platform_probe(struct platform_device *pdev)
+ 
+ 	err = register_netdev(dev);
+ 	if (err)
+-		goto out;
++		goto undo_probe;
+ 
+ 	return 0;
+ 
++undo_probe:
++	dma_free_coherent(lp->device,
++			  SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
++			  lp->descriptors, lp->descriptors_laddr);
+ out:
+ 	free_netdev(dev);
+ 
+@@ -618,12 +622,16 @@ static int mac_sonic_nubus_probe(struct nubus_board *board)
+ 
+ 	err = register_netdev(ndev);
+ 	if (err)
+-		goto out;
++		goto undo_probe;
+ 
+ 	nubus_set_drvdata(board, ndev);
+ 
+ 	return 0;
+ 
++undo_probe:
++	dma_free_coherent(lp->device,
++			  SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
++			  lp->descriptors, lp->descriptors_laddr);
+ out:
+ 	free_netdev(ndev);
+ 	return err;
+diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
+index e1b886e87a762..44171d7bb434c 100644
+--- a/drivers/net/ethernet/natsemi/xtsonic.c
++++ b/drivers/net/ethernet/natsemi/xtsonic.c
+@@ -265,11 +265,14 @@ int xtsonic_probe(struct platform_device *pdev)
+ 	sonic_msg_init(dev);
+ 
+ 	if ((err = register_netdev(dev)))
+-		goto out1;
++		goto undo_probe1;
+ 
+ 	return 0;
+ 
+-out1:
++undo_probe1:
++	dma_free_coherent(lp->device,
++			  SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
++			  lp->descriptors, lp->descriptors_laddr);
+ 	release_region(dev->base_addr, SONIC_MEM_SIZE);
+ out:
+ 	free_netdev(dev);
+diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
+index 55a29ec766807..58eac2471d53a 100644
+--- a/drivers/net/ethernet/qlogic/Kconfig
++++ b/drivers/net/ethernet/qlogic/Kconfig
+@@ -78,6 +78,7 @@ config QED
+ 	depends on PCI
+ 	select ZLIB_INFLATE
+ 	select CRC8
++	select CRC32
+ 	select NET_DEVLINK
+ 	---help---
+ 	  This enables the support for ...
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+index e9e0867ec139d..c4c9cbdeb601e 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+@@ -64,6 +64,7 @@ struct emac_variant {
+  * @variant:	reference to the current board variant
+  * @regmap:	regmap for using the syscon
+  * @internal_phy_powered: Does the internal PHY is enabled
++ * @use_internal_phy: Is the internal PHY selected for use
+  * @mux_handle:	Internal pointer used by mdio-mux lib
+  */
+ struct sunxi_priv_data {
+@@ -74,6 +75,7 @@ struct sunxi_priv_data {
+ 	const struct emac_variant *variant;
+ 	struct regmap_field *regmap_field;
+ 	bool internal_phy_powered;
++	bool use_internal_phy;
+ 	void *mux_handle;
+ };
+ 
+@@ -523,8 +525,11 @@ static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
+ 	.dma_interrupt = sun8i_dwmac_dma_interrupt,
+ };
+ 
++static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv);
++
+ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
+ {
++	struct net_device *ndev = platform_get_drvdata(pdev);
+ 	struct sunxi_priv_data *gmac = priv;
+ 	int ret;
+ 
+@@ -538,13 +543,25 @@ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
+ 
+ 	ret = clk_prepare_enable(gmac->tx_clk);
+ 	if (ret) {
+-		if (gmac->regulator)
+-			regulator_disable(gmac->regulator);
+ 		dev_err(&pdev->dev, "Could not enable AHB clock\n");
+-		return ret;
++		goto err_disable_regulator;
++	}
++
++	if (gmac->use_internal_phy) {
++		ret = sun8i_dwmac_power_internal_phy(netdev_priv(ndev));
++		if (ret)
++			goto err_disable_clk;
+ 	}
+ 
+ 	return 0;
++
++err_disable_clk:
++	clk_disable_unprepare(gmac->tx_clk);
++err_disable_regulator:
++	if (gmac->regulator)
++		regulator_disable(gmac->regulator);
++
++	return ret;
+ }
+ 
+ static void sun8i_dwmac_core_init(struct mac_device_info *hw,
+@@ -815,7 +832,6 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
+ 	struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+ 	u32 reg, val;
+ 	int ret = 0;
+-	bool need_power_ephy = false;
+ 
+ 	if (current_child ^ desired_child) {
+ 		regmap_field_read(gmac->regmap_field, &reg);
+@@ -823,13 +839,12 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
+ 		case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID:
+ 			dev_info(priv->device, "Switch mux to internal PHY");
+ 			val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SELECT;
+-
+-			need_power_ephy = true;
++			gmac->use_internal_phy = true;
+ 			break;
+ 		case DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID:
+ 			dev_info(priv->device, "Switch mux to external PHY");
+ 			val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SHUTDOWN;
+-			need_power_ephy = false;
++			gmac->use_internal_phy = false;
+ 			break;
+ 		default:
+ 			dev_err(priv->device, "Invalid child ID %x\n",
+@@ -837,7 +852,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
+ 			return -EINVAL;
+ 		}
+ 		regmap_field_write(gmac->regmap_field, val);
+-		if (need_power_ephy) {
++		if (gmac->use_internal_phy) {
+ 			ret = sun8i_dwmac_power_internal_phy(priv);
+ 			if (ret)
+ 				return ret;
+@@ -988,17 +1003,12 @@ static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
+ 	struct sunxi_priv_data *gmac = priv;
+ 
+ 	if (gmac->variant->soc_has_internal_phy) {
+-		/* sun8i_dwmac_exit could be called with mdiomux uninit */
+-		if (gmac->mux_handle)
+-			mdio_mux_uninit(gmac->mux_handle);
+ 		if (gmac->internal_phy_powered)
+ 			sun8i_dwmac_unpower_internal_phy(gmac);
+ 	}
+ 
+ 	sun8i_dwmac_unset_syscon(gmac);
+ 
+-	reset_control_put(gmac->rst_ephy);
+-
+ 	clk_disable_unprepare(gmac->tx_clk);
+ 
+ 	if (gmac->regulator)
+@@ -1227,12 +1237,32 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
+ 
+ 	return ret;
+ dwmac_mux:
++	reset_control_put(gmac->rst_ephy);
++	clk_put(gmac->ephy_clk);
+ 	sun8i_dwmac_unset_syscon(gmac);
+ dwmac_exit:
+ 	stmmac_pltfr_remove(pdev);
+ return ret;
+ }
+ 
++static int sun8i_dwmac_remove(struct platform_device *pdev)
++{
++	struct net_device *ndev = platform_get_drvdata(pdev);
++	struct stmmac_priv *priv = netdev_priv(ndev);
++	struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
++
++	if (gmac->variant->soc_has_internal_phy) {
++		mdio_mux_uninit(gmac->mux_handle);
++		sun8i_dwmac_unpower_internal_phy(gmac);
++		reset_control_put(gmac->rst_ephy);
++		clk_put(gmac->ephy_clk);
++	}
++
++	stmmac_pltfr_remove(pdev);
++
++	return 0;
++}
++
+ static const struct of_device_id sun8i_dwmac_match[] = {
+ 	{ .compatible = "allwinner,sun8i-h3-emac",
+ 		.data = &emac_variant_h3 },
+@@ -1252,7 +1282,7 @@ MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
+ 
+ static struct platform_driver sun8i_dwmac_driver = {
+ 	.probe  = sun8i_dwmac_probe,
+-	.remove = stmmac_pltfr_remove,
++	.remove = sun8i_dwmac_remove,
+ 	.driver = {
+ 		.name           = "dwmac-sun8i",
+ 		.pm		= &stmmac_pltfr_pm_ops,
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index d407489cec904..cbe7f35eac982 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -1126,7 +1126,10 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
+ 	 * accordingly. Otherwise, we should check here.
+ 	 */
+ 	if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
+-		delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus);
++		delayed_ndp_size = ctx->max_ndp_size +
++			max_t(u32,
++			      ctx->tx_ndp_modulus,
++			      ctx->tx_modulus + ctx->tx_remainder) - 1;
+ 	else
+ 		delayed_ndp_size = 0;
+ 
+@@ -1307,7 +1310,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
+ 	if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
+ 	    skb_out->len > ctx->min_tx_pkt) {
+ 		padding_count = ctx->tx_curr_size - skb_out->len;
+-		skb_put_zero(skb_out, padding_count);
++		if (!WARN_ON(padding_count > ctx->tx_curr_size))
++			skb_put_zero(skb_out, padding_count);
+ 	} else if (skb_out->len < ctx->tx_curr_size &&
+ 		   (skb_out->len % dev->maxpacket) == 0) {
+ 		skb_put_u8(skb_out, 0);	/* force short packet */
+diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
+index 058d77d2e693d..0d6e1829e0ac9 100644
+--- a/drivers/net/wan/Kconfig
++++ b/drivers/net/wan/Kconfig
+@@ -282,6 +282,7 @@ config SLIC_DS26522
+ 	tristate "Slic Maxim ds26522 card support"
+ 	depends on SPI
+ 	depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
++	select BITREVERSE
+ 	help
+ 	  This module initializes and configures the slic maxim card
+ 	  in T1 or E1 mode.
+diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
+index 0d1a8dab30ed4..32e1c036f3ac9 100644
+--- a/drivers/net/wireless/ath/wil6210/Kconfig
++++ b/drivers/net/wireless/ath/wil6210/Kconfig
+@@ -2,6 +2,7 @@
+ config WIL6210
+ 	tristate "Wilocity 60g WiFi card wil6210 support"
+ 	select WANT_DEV_COREDUMP
++	select CRC32
+ 	depends on CFG80211
+ 	depends on PCI
+ 	default n
+diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
+index f11e4bfbc91be..a47f87b8373df 100644
+--- a/drivers/regulator/qcom-rpmh-regulator.c
++++ b/drivers/regulator/qcom-rpmh-regulator.c
+@@ -726,7 +726,7 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = {
+ static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
+ 	.regulator_type = VRM,
+ 	.ops = &rpmh_regulator_vrm_ops,
+-	.voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 1600),
++	.voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 16000),
+ 	.n_voltages = 5,
+ 	.pmic_mode_map = pmic_mode_map_pmic5_smps,
+ 	.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
+diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
+index a1c23e998f977..8dee16aca421f 100644
+--- a/drivers/s390/net/qeth_l3_main.c
++++ b/drivers/s390/net/qeth_l3_main.c
+@@ -2114,7 +2114,7 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
+ 						    struct net_device *dev,
+ 						    netdev_features_t features)
+ {
+-	if (qeth_get_ip_version(skb) != 4)
++	if (vlan_get_protocol(skb) != htons(ETH_P_IP))
+ 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+ 	return qeth_features_check(skb, dev, features);
+ }
+diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
+index ed20ad2950885..77ddf23b65d65 100644
+--- a/drivers/spi/spi-stm32.c
++++ b/drivers/spi/spi-stm32.c
+@@ -494,9 +494,9 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len)
+ 
+ 	/* align packet size with data registers access */
+ 	if (spi->cur_bpw > 8)
+-		fthlv -= (fthlv % 2); /* multiple of 2 */
++		fthlv += (fthlv % 2) ? 1 : 0;
+ 	else
+-		fthlv -= (fthlv % 4); /* multiple of 4 */
++		fthlv += (fthlv % 4) ? (4 - (fthlv % 4)) : 0;
+ 
+ 	if (!fthlv)
+ 		fthlv = 1;
+diff --git a/drivers/staging/exfat/exfat_super.c b/drivers/staging/exfat/exfat_super.c
+index 58c7d66060f7e..dd12777b9a788 100644
+--- a/drivers/staging/exfat/exfat_super.c
++++ b/drivers/staging/exfat/exfat_super.c
+@@ -59,7 +59,7 @@ static void exfat_write_super(struct super_block *sb);
+ /* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */
+ static void exfat_time_fat2unix(struct timespec64 *ts, struct date_time_t *tp)
+ {
+-	ts->tv_sec = mktime64(tp->Year + 1980, tp->Month + 1, tp->Day,
++	ts->tv_sec = mktime64(tp->Year + 1980, tp->Month, tp->Day,
+ 			      tp->Hour, tp->Minute, tp->Second);
+ 
+ 	ts->tv_nsec = tp->MilliSecond * NSEC_PER_MSEC;
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index 3b31e83a92155..bc6ba41686fa3 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -2303,6 +2303,24 @@ out_unlock:
+ 	return ret;
+ }
+ 
++static int vfio_iommu_dma_avail_build_caps(struct vfio_iommu *iommu,
++					   struct vfio_info_cap *caps)
++{
++	struct vfio_iommu_type1_info_dma_avail cap_dma_avail;
++	int ret;
++
++	mutex_lock(&iommu->lock);
++	cap_dma_avail.header.id = VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL;
++	cap_dma_avail.header.version = 1;
++
++	cap_dma_avail.avail = iommu->dma_avail;
++
++	ret = vfio_info_add_capability(caps, &cap_dma_avail.header,
++				       sizeof(cap_dma_avail));
++	mutex_unlock(&iommu->lock);
++	return ret;
++}
++
+ static long vfio_iommu_type1_ioctl(void *iommu_data,
+ 				   unsigned int cmd, unsigned long arg)
+ {
+@@ -2349,6 +2367,10 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
+ 		info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
+ 
+ 		ret = vfio_iommu_iova_build_caps(iommu, &caps);
++
++		if (!ret)
++			ret = vfio_iommu_dma_avail_build_caps(iommu, &caps);
++
+ 		if (ret)
+ 			return ret;
+ 
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index e5e2425875953..130f16cc0b86d 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -520,7 +520,10 @@
+  */
+ #define TEXT_TEXT							\
+ 		ALIGN_FUNCTION();					\
+-		*(.text.hot TEXT_MAIN .text.fixup .text.unlikely)	\
++		*(.text.hot .text.hot.*)				\
++		*(TEXT_MAIN .text.fixup)				\
++		*(.text.unlikely .text.unlikely.*)			\
++		*(.text.unknown .text.unknown.*)			\
+ 		*(.text..refcount)					\
+ 		*(.ref.text)						\
+ 	MEM_KEEP(init.text*)						\
+diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
+index 9e843a147ead0..cabc93118f9c8 100644
+--- a/include/uapi/linux/vfio.h
++++ b/include/uapi/linux/vfio.h
+@@ -748,6 +748,21 @@ struct vfio_iommu_type1_info_cap_iova_range {
+ 	struct	vfio_iova_range iova_ranges[];
+ };
+ 
++/*
++ * The DMA available capability allows to report the current number of
++ * simultaneously outstanding DMA mappings that are allowed.
++ *
++ * The structure below defines version 1 of this capability.
++ *
++ * avail: specifies the current number of outstanding DMA mappings allowed.
++ */
++#define VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL 3
++
++struct vfio_iommu_type1_info_dma_avail {
++	struct	vfio_info_cap_header header;
++	__u32	avail;
++};
++
+ #define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
+ 
+ /**
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+index d4bcfd8f95bf6..3f47abf9ef4a6 100644
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -280,7 +280,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
+ 	return 0;
+ 
+ out_free_newdev:
+-	if (new_dev->reg_state == NETREG_UNINITIALIZED)
++	if (new_dev->reg_state == NETREG_UNINITIALIZED ||
++	    new_dev->reg_state == NETREG_UNREGISTERED)
+ 		free_netdev(new_dev);
+ 	return err;
+ }
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index a0486dcf5425b..49d923c227a21 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2017,6 +2017,12 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
+ 		skb->csum = csum_block_sub(skb->csum,
+ 					   skb_checksum(skb, len, delta, 0),
+ 					   len);
++	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
++		int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len;
++		int offset = skb_checksum_start_offset(skb) + skb->csum_offset;
++
++		if (offset + sizeof(__sum16) > hdlen)
++			return -EINVAL;
+ 	}
+ 	return __pskb_trim(skb, len);
+ }
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 079dcf9f0c56d..7a394479dd56c 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -303,7 +303,7 @@ static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *
+ 	if (skb_is_gso(skb))
+ 		return ip_finish_output_gso(net, sk, skb, mtu);
+ 
+-	if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU))
++	if (skb->len > mtu || IPCB(skb)->frag_max_size)
+ 		return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
+ 
+ 	return ip_finish_output2(net, sk, skb);
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index f61c5a0b502a8..ca525cf681a4e 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -765,8 +765,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ 		goto tx_error;
+ 	}
+ 
+-	if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph,
+-			    0, 0, false)) {
++	df = tnl_params->frag_off;
++	if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
++		df |= (inner_iph->frag_off & htons(IP_DF));
++
++	if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, 0, 0, false)) {
+ 		ip_rt_put(rt);
+ 		goto tx_error;
+ 	}
+@@ -794,10 +797,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ 			ttl = ip4_dst_hoplimit(&rt->dst);
+ 	}
+ 
+-	df = tnl_params->frag_off;
+-	if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
+-		df |= (inner_iph->frag_off&htons(IP_DF));
+-
+ 	max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
+ 			+ rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
+ 	if (max_headroom > dev->needed_headroom)
+diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
+index ea32b113089d3..c2b7d43d92b0e 100644
+--- a/net/ipv4/nexthop.c
++++ b/net/ipv4/nexthop.c
+@@ -1157,8 +1157,10 @@ static struct nexthop *nexthop_create_group(struct net *net,
+ 	return nh;
+ 
+ out_no_nh:
+-	for (; i >= 0; --i)
++	for (i--; i >= 0; --i) {
++		list_del(&nhg->nh_entries[i].nh_list);
+ 		nexthop_put(nhg->nh_entries[i].nh);
++	}
+ 
+ 	kfree(nhg->spare);
+ 	kfree(nhg);
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index 0646fce31b67a..906ac5e6d96cd 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -973,6 +973,8 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
+ {
+ 	struct fib6_table *table = rt->fib6_table;
+ 
++	/* Flush all cached dst in exception table */
++	rt6_flush_exceptions(rt);
+ 	fib6_drop_pcpu_from(rt, table);
+ 
+ 	if (rt->nh && !list_empty(&rt->nh_list))
+@@ -1839,9 +1841,6 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
+ 	net->ipv6.rt6_stats->fib_rt_entries--;
+ 	net->ipv6.rt6_stats->fib_discarded_routes++;
+ 
+-	/* Flush all cached dst in exception table */
+-	rt6_flush_exceptions(rt);
+-
+ 	/* Reset round-robin state, if necessary */
+ 	if (rcu_access_pointer(fn->rr_ptr) == rt)
+ 		fn->rr_ptr = NULL;
+diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c
+index bb311ccc6c487..c6787a1daa481 100644
+--- a/tools/bpf/bpftool/net.c
++++ b/tools/bpf/bpftool/net.c
+@@ -9,7 +9,6 @@
+ #include <unistd.h>
+ #include <libbpf.h>
+ #include <net/if.h>
+-#include <linux/if.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/tc_act/tc_bpf.h>
+ #include <sys/socket.h>
+diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
+index 71a62e7e35b1c..3429767cadcdd 100755
+--- a/tools/testing/selftests/net/pmtu.sh
++++ b/tools/testing/selftests/net/pmtu.sh
+@@ -119,7 +119,15 @@
+ # - list_flush_ipv6_exception
+ #	Using the same topology as in pmtu_ipv6, create exceptions, and check
+ #	they are shown when listing exception caches, gone after flushing them
+-
++#
++# - pmtu_ipv4_route_change
++#	Use the same topology as in pmtu_ipv4, but issue a route replacement
++#	command and delete the corresponding device afterward. This tests for
++#	proper cleanup of the PMTU exceptions by the route replacement path.
++#	Device unregistration should complete successfully
++#
++# - pmtu_ipv6_route_change
++#	Same as above but with IPv6
+ 
+ # Kselftest framework requirement - SKIP code is 4.
+ ksft_skip=4
+@@ -161,7 +169,9 @@ tests="
+ 	cleanup_ipv4_exception		ipv4: cleanup of cached exceptions	1
+ 	cleanup_ipv6_exception		ipv6: cleanup of cached exceptions	1
+ 	list_flush_ipv4_exception	ipv4: list and flush cached exceptions	1
+-	list_flush_ipv6_exception	ipv6: list and flush cached exceptions	1"
++	list_flush_ipv6_exception	ipv6: list and flush cached exceptions	1
++	pmtu_ipv4_route_change		ipv4: PMTU exception w/route replace	1
++	pmtu_ipv6_route_change		ipv6: PMTU exception w/route replace	1"
+ 
+ NS_A="ns-A"
+ NS_B="ns-B"
+@@ -1316,6 +1326,63 @@ test_list_flush_ipv6_exception() {
+ 	return ${fail}
+ }
+ 
++test_pmtu_ipvX_route_change() {
++	family=${1}
++
++	setup namespaces routing || return 2
++	trace "${ns_a}"  veth_A-R1    "${ns_r1}" veth_R1-A \
++	      "${ns_r1}" veth_R1-B    "${ns_b}"  veth_B-R1 \
++	      "${ns_a}"  veth_A-R2    "${ns_r2}" veth_R2-A \
++	      "${ns_r2}" veth_R2-B    "${ns_b}"  veth_B-R2
++
++	if [ ${family} -eq 4 ]; then
++		ping=ping
++		dst1="${prefix4}.${b_r1}.1"
++		dst2="${prefix4}.${b_r2}.1"
++		gw="${prefix4}.${a_r1}.2"
++	else
++		ping=${ping6}
++		dst1="${prefix6}:${b_r1}::1"
++		dst2="${prefix6}:${b_r2}::1"
++		gw="${prefix6}:${a_r1}::2"
++	fi
++
++	# Set up initial MTU values
++	mtu "${ns_a}"  veth_A-R1 2000
++	mtu "${ns_r1}" veth_R1-A 2000
++	mtu "${ns_r1}" veth_R1-B 1400
++	mtu "${ns_b}"  veth_B-R1 1400
++
++	mtu "${ns_a}"  veth_A-R2 2000
++	mtu "${ns_r2}" veth_R2-A 2000
++	mtu "${ns_r2}" veth_R2-B 1500
++	mtu "${ns_b}"  veth_B-R2 1500
++
++	# Create route exceptions
++	run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst1}
++	run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst2}
++
++	# Check that exceptions have been created with the correct PMTU
++	pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})"
++	check_pmtu_value "1400" "${pmtu_1}" "exceeding MTU" || return 1
++	pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
++	check_pmtu_value "1500" "${pmtu_2}" "exceeding MTU" || return 1
++
++	# Replace the route from A to R1
++	run_cmd ${ns_a} ip route change default via ${gw}
++
++	# Delete the device in A
++	run_cmd ${ns_a} ip link del "veth_A-R1"
++}
++
++test_pmtu_ipv4_route_change() {
++	test_pmtu_ipvX_route_change 4
++}
++
++test_pmtu_ipv6_route_change() {
++	test_pmtu_ipvX_route_change 6
++}
++
+ usage() {
+ 	echo
+ 	echo "$0 [OPTIONS] [TEST]..."


             reply	other threads:[~2021-01-17 16:19 UTC|newest]

Thread overview: 305+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-01-17 16:19 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2024-04-18  3:06 [gentoo-commits] proj/linux-patches:5.4 commit in: / Alice Ferrazzi
2023-10-05 14:24 Mike Pagano
2023-09-23 10:18 Mike Pagano
2023-09-02  9:58 Mike Pagano
2023-08-30 14:56 Mike Pagano
2023-08-16 17:00 Mike Pagano
2023-08-11 11:57 Mike Pagano
2023-08-08 18:42 Mike Pagano
2023-07-27 11:51 Mike Pagano
2023-07-24 20:29 Mike Pagano
2023-06-28 10:28 Mike Pagano
2023-06-21 14:55 Alice Ferrazzi
2023-06-14 10:20 Mike Pagano
2023-06-09 11:32 Mike Pagano
2023-06-05 11:50 Mike Pagano
2023-05-30 12:56 Mike Pagano
2023-05-17 11:21 Mike Pagano
2023-05-17 11:00 Mike Pagano
2023-05-10 17:58 Mike Pagano
2023-04-26  9:51 Alice Ferrazzi
2023-04-20 11:17 Alice Ferrazzi
2023-04-05 10:01 Alice Ferrazzi
2023-03-30 13:41 Alice Ferrazzi
2023-03-22 14:16 Alice Ferrazzi
2023-03-17 10:46 Mike Pagano
2023-03-13 11:34 Alice Ferrazzi
2023-03-11 16:20 Mike Pagano
2023-03-03 12:31 Mike Pagano
2023-02-25 11:42 Mike Pagano
2023-02-24  3:08 Alice Ferrazzi
2023-02-22 14:41 Alice Ferrazzi
2023-02-06 12:48 Mike Pagano
2023-02-02 19:15 Mike Pagano
2023-01-24  7:25 Alice Ferrazzi
2023-01-18 11:10 Mike Pagano
2022-12-19 12:27 Alice Ferrazzi
2022-12-14 12:14 Mike Pagano
2022-12-08 12:13 Alice Ferrazzi
2022-11-25 17:05 Mike Pagano
2022-11-10 17:59 Mike Pagano
2022-11-03 15:13 Mike Pagano
2022-11-01 19:47 Mike Pagano
2022-10-29  9:52 Mike Pagano
2022-10-26 11:44 Mike Pagano
2022-10-17 16:48 Mike Pagano
2022-10-15 10:06 Mike Pagano
2022-10-07 11:12 Mike Pagano
2022-10-05 11:58 Mike Pagano
2022-09-28  9:26 Mike Pagano
2022-09-20 12:02 Mike Pagano
2022-09-15 10:31 Mike Pagano
2022-09-05 12:04 Mike Pagano
2022-08-25 10:34 Mike Pagano
2022-08-11 12:35 Mike Pagano
2022-08-03 14:51 Alice Ferrazzi
2022-07-29 15:29 Mike Pagano
2022-07-21 20:09 Mike Pagano
2022-07-15 10:04 Mike Pagano
2022-07-12 16:01 Mike Pagano
2022-07-07 16:18 Mike Pagano
2022-07-02 16:08 Mike Pagano
2022-06-29 11:09 Mike Pagano
2022-06-27 19:03 Mike Pagano
2022-06-25 19:46 Mike Pagano
2022-06-22 13:50 Mike Pagano
2022-06-22 13:25 Mike Pagano
2022-06-22 12:47 Mike Pagano
2022-06-16 11:43 Mike Pagano
2022-06-14 17:12 Mike Pagano
2022-06-06 11:04 Mike Pagano
2022-05-25 11:55 Mike Pagano
2022-05-18  9:49 Mike Pagano
2022-05-15 22:11 Mike Pagano
2022-05-12 11:30 Mike Pagano
2022-05-09 10:55 Mike Pagano
2022-04-27 12:21 Mike Pagano
2022-04-20 12:08 Mike Pagano
2022-04-15 13:10 Mike Pagano
2022-04-12 19:21 Mike Pagano
2022-03-28 10:58 Mike Pagano
2022-03-23 11:57 Mike Pagano
2022-03-19 13:21 Mike Pagano
2022-03-16 13:31 Mike Pagano
2022-03-11 10:55 Mike Pagano
2022-03-08 18:31 Mike Pagano
2022-03-02 13:07 Mike Pagano
2022-02-23 12:38 Mike Pagano
2022-02-16 12:46 Mike Pagano
2022-02-11 12:36 Mike Pagano
2022-02-08 17:55 Mike Pagano
2022-02-05 12:14 Mike Pagano
2022-02-01 17:24 Mike Pagano
2022-01-31 13:01 Mike Pagano
2022-01-29 17:44 Mike Pagano
2022-01-27 11:38 Mike Pagano
2022-01-20 10:00 Mike Pagano
2022-01-16 10:22 Mike Pagano
2022-01-11 14:34 Mike Pagano
2022-01-05 12:54 Mike Pagano
2021-12-29 13:07 Mike Pagano
2021-12-22 14:06 Mike Pagano
2021-12-17 11:55 Mike Pagano
2021-12-16 16:51 Mike Pagano
2021-12-14 14:19 Mike Pagano
2021-12-08 12:54 Mike Pagano
2021-12-01 12:50 Mike Pagano
2021-11-26 11:58 Mike Pagano
2021-11-21 20:44 Mike Pagano
2021-11-17 12:00 Mike Pagano
2021-11-12 14:14 Mike Pagano
2021-11-06 13:26 Mike Pagano
2021-11-04 11:23 Mike Pagano
2021-11-02 19:31 Mike Pagano
2021-10-27 15:51 Mike Pagano
2021-10-27 11:58 Mike Pagano
2021-10-20 13:24 Mike Pagano
2021-10-17 13:12 Mike Pagano
2021-10-13 14:55 Alice Ferrazzi
2021-10-09 21:32 Mike Pagano
2021-10-06 14:06 Mike Pagano
2021-09-30 10:49 Mike Pagano
2021-09-26 14:13 Mike Pagano
2021-09-22 11:39 Mike Pagano
2021-09-20 22:03 Mike Pagano
2021-09-16 11:19 Mike Pagano
2021-09-15 12:00 Mike Pagano
2021-09-12 14:38 Mike Pagano
2021-09-03 11:21 Mike Pagano
2021-09-03  9:39 Alice Ferrazzi
2021-08-26 14:36 Mike Pagano
2021-08-18 12:46 Mike Pagano
2021-08-15 20:06 Mike Pagano
2021-08-12 11:52 Mike Pagano
2021-08-08 13:38 Mike Pagano
2021-08-04 11:53 Mike Pagano
2021-08-03 12:23 Mike Pagano
2021-07-31 10:32 Alice Ferrazzi
2021-07-28 12:36 Mike Pagano
2021-07-25 17:27 Mike Pagano
2021-07-20 15:39 Alice Ferrazzi
2021-07-19 11:18 Mike Pagano
2021-07-14 16:22 Mike Pagano
2021-07-13 12:37 Mike Pagano
2021-07-11 14:44 Mike Pagano
2021-07-07 13:13 Mike Pagano
2021-06-30 14:24 Mike Pagano
2021-06-23 15:11 Mike Pagano
2021-06-18 11:38 Mike Pagano
2021-06-16 12:23 Mike Pagano
2021-06-10 11:59 Mike Pagano
2021-06-07 11:23 Mike Pagano
2021-06-03 10:28 Alice Ferrazzi
2021-05-28 12:03 Alice Ferrazzi
2021-05-26 12:06 Mike Pagano
2021-05-22 10:04 Mike Pagano
2021-05-19 12:23 Mike Pagano
2021-05-14 14:10 Alice Ferrazzi
2021-05-11 14:20 Mike Pagano
2021-05-07 11:44 Alice Ferrazzi
2021-05-07 11:37 Mike Pagano
2021-05-02 16:02 Mike Pagano
2021-05-02 16:00 Mike Pagano
2021-04-30 19:01 Mike Pagano
2021-04-28 11:52 Alice Ferrazzi
2021-04-21 11:42 Mike Pagano
2021-04-16 11:14 Alice Ferrazzi
2021-04-14 11:20 Alice Ferrazzi
2021-04-10 13:25 Mike Pagano
2021-04-07 13:27 Mike Pagano
2021-03-30 13:12 Alice Ferrazzi
2021-03-24 12:09 Mike Pagano
2021-03-22 15:55 Mike Pagano
2021-03-20 14:32 Mike Pagano
2021-03-17 18:43 Mike Pagano
2021-03-16 16:04 Mike Pagano
2021-03-11 14:08 Mike Pagano
2021-03-09 12:18 Mike Pagano
2021-03-07 15:16 Mike Pagano
2021-03-04 14:51 Mike Pagano
2021-03-04 12:06 Alice Ferrazzi
2021-03-01 23:49 Mike Pagano
2021-03-01 23:44 Mike Pagano
2021-02-27 14:16 Mike Pagano
2021-02-26 10:01 Alice Ferrazzi
2021-02-23 17:01 Mike Pagano
2021-02-23 14:28 Alice Ferrazzi
2021-02-17 11:39 Alice Ferrazzi
2021-02-13 14:46 Alice Ferrazzi
2021-02-10  9:53 Alice Ferrazzi
2021-02-07 15:24 Alice Ferrazzi
2021-02-03 23:48 Mike Pagano
2021-01-30 13:37 Alice Ferrazzi
2021-01-27 11:13 Mike Pagano
2021-01-23 17:50 Mike Pagano
2021-01-23 16:37 Mike Pagano
2021-01-19 20:32 Mike Pagano
2021-01-12 20:05 Mike Pagano
2021-01-09 17:51 Mike Pagano
2021-01-08 16:08 Mike Pagano
2021-01-06 14:14 Mike Pagano
2020-12-30 12:53 Mike Pagano
2020-12-21 13:27 Mike Pagano
2020-12-16 23:14 Mike Pagano
2020-12-11 12:56 Mike Pagano
2020-12-08 12:07 Mike Pagano
2020-12-02 12:50 Mike Pagano
2020-11-26 14:27 Mike Pagano
2020-11-24 14:44 Mike Pagano
2020-11-22 19:31 Mike Pagano
2020-11-18 20:19 Mike Pagano
2020-11-18 20:10 Mike Pagano
2020-11-18 20:03 Mike Pagano
2020-11-13 12:16 Mike Pagano
2020-11-11 15:48 Mike Pagano
2020-11-10 13:57 Mike Pagano
2020-11-05 12:36 Mike Pagano
2020-11-01 20:31 Mike Pagano
2020-10-29 11:19 Mike Pagano
2020-10-17 10:18 Mike Pagano
2020-10-14 20:37 Mike Pagano
2020-10-07 12:48 Mike Pagano
2020-10-01 12:49 Mike Pagano
2020-09-26 21:59 Mike Pagano
2020-09-24 15:38 Mike Pagano
2020-09-24 15:38 Mike Pagano
2020-09-24 15:38 Mike Pagano
2020-09-23 12:10 Mike Pagano
2020-09-17 14:56 Mike Pagano
2020-09-12 18:08 Mike Pagano
2020-09-09 18:00 Mike Pagano
2020-09-08 22:26 Mike Pagano
2020-09-05 10:47 Mike Pagano
2020-09-03 11:38 Mike Pagano
2020-08-26 11:16 Mike Pagano
2020-08-21 13:25 Alice Ferrazzi
2020-08-19  9:28 Alice Ferrazzi
2020-08-12 23:30 Alice Ferrazzi
2020-08-07 12:16 Alice Ferrazzi
2020-08-05 14:45 Thomas Deutschmann
2020-08-01 19:45 Mike Pagano
2020-07-31 18:28 Mike Pagano
2020-07-31 18:04 Mike Pagano
2020-07-30 14:58 Mike Pagano
2020-07-29 12:40 Mike Pagano
2020-07-22 12:53 Mike Pagano
2020-07-16 11:19 Mike Pagano
2020-07-09 12:13 Mike Pagano
2020-07-01 12:23 Mike Pagano
2020-06-29 17:40 Mike Pagano
2020-06-24 16:49 Mike Pagano
2020-06-22 14:48 Mike Pagano
2020-06-17 16:40 Mike Pagano
2020-06-10 19:42 Mike Pagano
2020-06-07 21:53 Mike Pagano
2020-06-03 11:43 Mike Pagano
2020-06-02 11:37 Mike Pagano
2020-05-27 16:31 Mike Pagano
2020-05-20 11:37 Mike Pagano
2020-05-20 11:33 Mike Pagano
2020-05-14 11:32 Mike Pagano
2020-05-13 12:18 Mike Pagano
2020-05-11 22:49 Mike Pagano
2020-05-09 22:12 Mike Pagano
2020-05-06 11:47 Mike Pagano
2020-05-02 19:24 Mike Pagano
2020-05-02 13:25 Mike Pagano
2020-04-29 17:56 Mike Pagano
2020-04-23 11:55 Mike Pagano
2020-04-21 11:19 Mike Pagano
2020-04-17 11:46 Mike Pagano
2020-04-15 15:52 Mike Pagano
2020-04-13 11:18 Mike Pagano
2020-04-08 12:42 Mike Pagano
2020-04-02 15:26 Mike Pagano
2020-04-01 12:03 Mike Pagano
2020-03-25 15:01 Mike Pagano
2020-03-21 18:58 Mike Pagano
2020-03-18 14:23 Mike Pagano
2020-03-12 14:04 Mike Pagano
2020-03-05 16:26 Mike Pagano
2020-02-28 16:41 Mike Pagano
2020-02-24 11:09 Mike Pagano
2020-02-19 23:48 Mike Pagano
2020-02-14 23:55 Mike Pagano
2020-02-11 15:35 Mike Pagano
2020-02-06 11:07 Mike Pagano
2020-02-01 10:53 Mike Pagano
2020-02-01 10:31 Mike Pagano
2020-01-29 16:18 Mike Pagano
2020-01-26 12:27 Mike Pagano
2020-01-23 11:09 Mike Pagano
2020-01-17 19:57 Mike Pagano
2020-01-14 22:33 Mike Pagano
2020-01-12 15:01 Mike Pagano
2020-01-09 11:17 Mike Pagano
2020-01-04 19:59 Mike Pagano
2019-12-31 17:48 Mike Pagano
2019-12-30 23:03 Mike Pagano
2019-12-21 15:01 Mike Pagano
2019-12-18 19:30 Mike Pagano
2019-12-17 21:57 Mike Pagano
2019-12-13 12:39 Mike Pagano
2019-12-05  1:04 Thomas Deutschmann
2019-11-29 21:21 Thomas Deutschmann

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1610900360.5f74453df8e87c968a5d45bdfd6284eea40a29af.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox