public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Sun, 10 Mar 2019 14:15:16 +0000 (UTC)	[thread overview]
Message-ID: <1552227297.1d0ba6c74d9dabecc5ab3378f68b3860a5ac86f6.mpagano@gentoo> (raw)

commit:     1d0ba6c74d9dabecc5ab3378f68b3860a5ac86f6
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Mar 10 14:14:57 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Mar 10 14:14:57 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1d0ba6c7

proj/linux-patches: Linux patch 4.19.28

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1027_linux-4.19.28.patch | 2598 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2602 insertions(+)

diff --git a/0000_README b/0000_README
index 2190cb3..292278e 100644
--- a/0000_README
+++ b/0000_README
@@ -151,6 +151,10 @@ Patch:  1026_linux-4.19.27.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.19.27
 
+Patch:  1027_linux-4.19.28.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.19.28
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1027_linux-4.19.28.patch b/1027_linux-4.19.28.patch
new file mode 100644
index 0000000..c2e1d84
--- /dev/null
+++ b/1027_linux-4.19.28.patch
@@ -0,0 +1,2598 @@
+diff --git a/Makefile b/Makefile
+index 70ed9a53558a5..c6ac023ba33a5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 27
++SUBLEVEL = 28
+ EXTRAVERSION =
+ NAME = "People's Front"
+ 
+diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
+index ba150c755fcce..85b6c60f285d2 100644
+--- a/arch/mips/kernel/irq.c
++++ b/arch/mips/kernel/irq.c
+@@ -52,6 +52,7 @@ asmlinkage void spurious_interrupt(void)
+ void __init init_IRQ(void)
+ {
+ 	int i;
++	unsigned int order = get_order(IRQ_STACK_SIZE);
+ 
+ 	for (i = 0; i < NR_IRQS; i++)
+ 		irq_set_noprobe(i);
+@@ -62,8 +63,7 @@ void __init init_IRQ(void)
+ 	arch_init_irq();
+ 
+ 	for_each_possible_cpu(i) {
+-		int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
+-		void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
++		void *s = (void *)__get_free_pages(GFP_KERNEL, order);
+ 
+ 		irq_stack[i] = s;
+ 		pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
+diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
+index 9e21573714910..f8debf7aeb4c1 100644
+--- a/arch/x86/boot/compressed/pgtable_64.c
++++ b/arch/x86/boot/compressed/pgtable_64.c
+@@ -1,5 +1,7 @@
++#include <linux/efi.h>
+ #include <asm/e820/types.h>
+ #include <asm/processor.h>
++#include <asm/efi.h>
+ #include "pgtable.h"
+ #include "../string.h"
+ 
+@@ -37,9 +39,10 @@ int cmdline_find_option_bool(const char *option);
+ 
+ static unsigned long find_trampoline_placement(void)
+ {
+-	unsigned long bios_start, ebda_start;
++	unsigned long bios_start = 0, ebda_start = 0;
+ 	unsigned long trampoline_start;
+ 	struct boot_e820_entry *entry;
++	char *signature;
+ 	int i;
+ 
+ 	/*
+@@ -47,8 +50,18 @@ static unsigned long find_trampoline_placement(void)
+ 	 * This code is based on reserve_bios_regions().
+ 	 */
+ 
+-	ebda_start = *(unsigned short *)0x40e << 4;
+-	bios_start = *(unsigned short *)0x413 << 10;
++	/*
++	 * EFI systems may not provide legacy ROM. The memory may not be mapped
++	 * at all.
++	 *
++	 * Only look for values in the legacy ROM for non-EFI system.
++	 */
++	signature = (char *)&boot_params->efi_info.efi_loader_signature;
++	if (strncmp(signature, EFI32_LOADER_SIGNATURE, 4) &&
++	    strncmp(signature, EFI64_LOADER_SIGNATURE, 4)) {
++		ebda_start = *(unsigned short *)0x40e << 4;
++		bios_start = *(unsigned short *)0x413 << 10;
++	}
+ 
+ 	if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX)
+ 		bios_start = BIOS_START_MAX;
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index eeea634bee0a7..6a25278e00929 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -818,11 +818,9 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
+ static void init_amd_zn(struct cpuinfo_x86 *c)
+ {
+ 	set_cpu_cap(c, X86_FEATURE_ZEN);
+-	/*
+-	 * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
+-	 * all up to and including B1.
+-	 */
+-	if (c->x86_model <= 1 && c->x86_stepping <= 1)
++
++	/* Fix erratum 1076: CPB feature bit not being set in CPUID. */
++	if (!cpu_has(c, X86_FEATURE_CPB))
+ 		set_cpu_cap(c, X86_FEATURE_CPB);
+ }
+ 
+diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
+index 4bb68133a72af..5a0e0bd68b769 100644
+--- a/arch/xtensa/kernel/process.c
++++ b/arch/xtensa/kernel/process.c
+@@ -320,8 +320,8 @@ unsigned long get_wchan(struct task_struct *p)
+ 
+ 		/* Stack layout: sp-4: ra, sp-3: sp' */
+ 
+-		pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
+-		sp = *(unsigned long *)sp - 3;
++		pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp);
++		sp = SPILL_SLOT(sp, 1);
+ 	} while (count++ < 16);
+ 	return 0;
+ }
+diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
+index 7f9ea8e4c1b22..1342f8e6025cc 100644
+--- a/drivers/bluetooth/btrtl.c
++++ b/drivers/bluetooth/btrtl.c
+@@ -544,10 +544,9 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
+ 					    hdev->bus);
+ 
+ 	if (!btrtl_dev->ic_info) {
+-		rtl_dev_err(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
++		rtl_dev_info(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
+ 			    lmp_subver, hci_rev, hci_ver);
+-		ret = -EINVAL;
+-		goto err_free;
++		return btrtl_dev;
+ 	}
+ 
+ 	if (btrtl_dev->ic_info->has_rom_version) {
+@@ -602,6 +601,11 @@ int btrtl_download_firmware(struct hci_dev *hdev,
+ 	 * standard btusb. Once that firmware is uploaded, the subver changes
+ 	 * to a different value.
+ 	 */
++	if (!btrtl_dev->ic_info) {
++		rtl_dev_info(hdev, "rtl: assuming no firmware upload needed\n");
++		return 0;
++	}
++
+ 	switch (btrtl_dev->ic_info->lmp_subver) {
+ 	case RTL_ROM_LMP_8723A:
+ 	case RTL_ROM_LMP_3499:
+diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
+index c0a5b1f3a9863..4ccc39e00ced3 100644
+--- a/drivers/char/applicom.c
++++ b/drivers/char/applicom.c
+@@ -32,6 +32,7 @@
+ #include <linux/wait.h>
+ #include <linux/init.h>
+ #include <linux/fs.h>
++#include <linux/nospec.h>
+ 
+ #include <asm/io.h>
+ #include <linux/uaccess.h>
+@@ -386,7 +387,11 @@ static ssize_t ac_write(struct file *file, const char __user *buf, size_t count,
+ 	TicCard = st_loc.tic_des_from_pc;	/* tic number to send            */
+ 	IndexCard = NumCard - 1;
+ 
+-	if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO)
++	if (IndexCard >= MAX_BOARD)
++		return -EINVAL;
++	IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
++
++	if (!apbs[IndexCard].RamIO)
+ 		return -EINVAL;
+ 
+ #ifdef DEBUG
+@@ -697,6 +702,7 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	unsigned char IndexCard;
+ 	void __iomem *pmem;
+ 	int ret = 0;
++	static int warncount = 10;
+ 	volatile unsigned char byte_reset_it;
+ 	struct st_ram_io *adgl;
+ 	void __user *argp = (void __user *)arg;
+@@ -711,16 +717,12 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	mutex_lock(&ac_mutex);	
+ 	IndexCard = adgl->num_card-1;
+ 	 
+-	if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
+-		static int warncount = 10;
+-		if (warncount) {
+-			printk( KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n",(int)IndexCard+1);
+-			warncount--;
+-		}
+-		kfree(adgl);
+-		mutex_unlock(&ac_mutex);
+-		return -EINVAL;
+-	}
++	if (cmd != 6 && IndexCard >= MAX_BOARD)
++		goto err;
++	IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
++
++	if (cmd != 6 && !apbs[IndexCard].RamIO)
++		goto err;
+ 
+ 	switch (cmd) {
+ 		
+@@ -838,5 +840,16 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	kfree(adgl);
+ 	mutex_unlock(&ac_mutex);
+ 	return 0;
++
++err:
++	if (warncount) {
++		pr_warn("APPLICOM driver IOCTL, bad board number %d\n",
++			(int)IndexCard + 1);
++		warncount--;
++	}
++	kfree(adgl);
++	mutex_unlock(&ac_mutex);
++	return -EINVAL;
++
+ }
+ 
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index b100260b6ed2e..505c9a55d5551 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -545,13 +545,13 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
+  *                          SYSFS INTERFACE                          *
+  *********************************************************************/
+ static ssize_t show_boost(struct kobject *kobj,
+-				 struct attribute *attr, char *buf)
++			  struct kobj_attribute *attr, char *buf)
+ {
+ 	return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
+ }
+ 
+-static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
+-				  const char *buf, size_t count)
++static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
++			   const char *buf, size_t count)
+ {
+ 	int ret, enable;
+ 
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index b6a1aadaff9f3..a005711f909ea 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -833,7 +833,7 @@ static void intel_pstate_update_policies(void)
+ /************************** sysfs begin ************************/
+ #define show_one(file_name, object)					\
+ 	static ssize_t show_##file_name					\
+-	(struct kobject *kobj, struct attribute *attr, char *buf)	\
++	(struct kobject *kobj, struct kobj_attribute *attr, char *buf)	\
+ 	{								\
+ 		return sprintf(buf, "%u\n", global.object);		\
+ 	}
+@@ -842,7 +842,7 @@ static ssize_t intel_pstate_show_status(char *buf);
+ static int intel_pstate_update_status(const char *buf, size_t size);
+ 
+ static ssize_t show_status(struct kobject *kobj,
+-			   struct attribute *attr, char *buf)
++			   struct kobj_attribute *attr, char *buf)
+ {
+ 	ssize_t ret;
+ 
+@@ -853,7 +853,7 @@ static ssize_t show_status(struct kobject *kobj,
+ 	return ret;
+ }
+ 
+-static ssize_t store_status(struct kobject *a, struct attribute *b,
++static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
+ 			    const char *buf, size_t count)
+ {
+ 	char *p = memchr(buf, '\n', count);
+@@ -867,7 +867,7 @@ static ssize_t store_status(struct kobject *a, struct attribute *b,
+ }
+ 
+ static ssize_t show_turbo_pct(struct kobject *kobj,
+-				struct attribute *attr, char *buf)
++				struct kobj_attribute *attr, char *buf)
+ {
+ 	struct cpudata *cpu;
+ 	int total, no_turbo, turbo_pct;
+@@ -893,7 +893,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
+ }
+ 
+ static ssize_t show_num_pstates(struct kobject *kobj,
+-				struct attribute *attr, char *buf)
++				struct kobj_attribute *attr, char *buf)
+ {
+ 	struct cpudata *cpu;
+ 	int total;
+@@ -914,7 +914,7 @@ static ssize_t show_num_pstates(struct kobject *kobj,
+ }
+ 
+ static ssize_t show_no_turbo(struct kobject *kobj,
+-			     struct attribute *attr, char *buf)
++			     struct kobj_attribute *attr, char *buf)
+ {
+ 	ssize_t ret;
+ 
+@@ -936,7 +936,7 @@ static ssize_t show_no_turbo(struct kobject *kobj,
+ 	return ret;
+ }
+ 
+-static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
++static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
+ 			      const char *buf, size_t count)
+ {
+ 	unsigned int input;
+@@ -983,7 +983,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
+ 	return count;
+ }
+ 
+-static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
++static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
+ 				  const char *buf, size_t count)
+ {
+ 	unsigned int input;
+@@ -1013,7 +1013,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
+ 	return count;
+ }
+ 
+-static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
++static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
+ 				  const char *buf, size_t count)
+ {
+ 	unsigned int input;
+@@ -1045,12 +1045,13 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
+ }
+ 
+ static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
+-				struct attribute *attr, char *buf)
++				struct kobj_attribute *attr, char *buf)
+ {
+ 	return sprintf(buf, "%u\n", hwp_boost);
+ }
+ 
+-static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b,
++static ssize_t store_hwp_dynamic_boost(struct kobject *a,
++				       struct kobj_attribute *b,
+ 				       const char *buf, size_t count)
+ {
+ 	unsigned int input;
+diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
+index 2c22836d3ffd5..4596fde16dfe6 100644
+--- a/drivers/gnss/sirf.c
++++ b/drivers/gnss/sirf.c
+@@ -310,30 +310,26 @@ static int sirf_probe(struct serdev_device *serdev)
+ 			ret = -ENODEV;
+ 			goto err_put_device;
+ 		}
++
++		ret = regulator_enable(data->vcc);
++		if (ret)
++			goto err_put_device;
++
++		/* Wait for chip to boot into hibernate mode. */
++		msleep(SIRF_BOOT_DELAY);
+ 	}
+ 
+ 	if (data->wakeup) {
+ 		ret = gpiod_to_irq(data->wakeup);
+ 		if (ret < 0)
+-			goto err_put_device;
+-
++			goto err_disable_vcc;
+ 		data->irq = ret;
+ 
+-		ret = devm_request_threaded_irq(dev, data->irq, NULL,
+-				sirf_wakeup_handler,
++		ret = request_threaded_irq(data->irq, NULL, sirf_wakeup_handler,
+ 				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ 				"wakeup", data);
+ 		if (ret)
+-			goto err_put_device;
+-	}
+-
+-	if (data->on_off) {
+-		ret = regulator_enable(data->vcc);
+-		if (ret)
+-			goto err_put_device;
+-
+-		/* Wait for chip to boot into hibernate mode */
+-		msleep(SIRF_BOOT_DELAY);
++			goto err_disable_vcc;
+ 	}
+ 
+ 	if (IS_ENABLED(CONFIG_PM)) {
+@@ -342,7 +338,7 @@ static int sirf_probe(struct serdev_device *serdev)
+ 	} else {
+ 		ret = sirf_runtime_resume(dev);
+ 		if (ret < 0)
+-			goto err_disable_vcc;
++			goto err_free_irq;
+ 	}
+ 
+ 	ret = gnss_register_device(gdev);
+@@ -356,6 +352,9 @@ err_disable_rpm:
+ 		pm_runtime_disable(dev);
+ 	else
+ 		sirf_runtime_suspend(dev);
++err_free_irq:
++	if (data->wakeup)
++		free_irq(data->irq, data);
+ err_disable_vcc:
+ 	if (data->on_off)
+ 		regulator_disable(data->vcc);
+@@ -376,6 +375,9 @@ static void sirf_remove(struct serdev_device *serdev)
+ 	else
+ 		sirf_runtime_suspend(&serdev->dev);
+ 
++	if (data->wakeup)
++		free_irq(data->irq, data);
++
+ 	if (data->on_off)
+ 		regulator_disable(data->vcc);
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 9f697a5b8e3df..c078c791f481a 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -884,7 +884,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
+ 	default:
+ 		return U64_MAX;
+ 	}
+-	value = (((u64)high) << 16) | low;
++	value = (((u64)high) << 32) | low;
+ 	return value;
+ }
+ 
+@@ -3070,7 +3070,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
+ 	.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ 	.port_link_state = mv88e6352_port_link_state,
+ 	.port_get_cmode = mv88e6185_port_get_cmode,
+-	.stats_snapshot = mv88e6320_g1_stats_snapshot,
++	.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ 	.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ 	.stats_get_sset_count = mv88e6095_stats_get_sset_count,
+ 	.stats_get_strings = mv88e6095_stats_get_strings,
+@@ -4188,7 +4188,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ 		.name = "Marvell 88E6190",
+ 		.num_databases = 4096,
+ 		.num_ports = 11,	/* 10 + Z80 */
+-		.num_internal_phys = 11,
++		.num_internal_phys = 9,
+ 		.num_gpio = 16,
+ 		.max_vid = 8191,
+ 		.port_base_addr = 0x0,
+@@ -4211,7 +4211,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ 		.name = "Marvell 88E6190X",
+ 		.num_databases = 4096,
+ 		.num_ports = 11,	/* 10 + Z80 */
+-		.num_internal_phys = 11,
++		.num_internal_phys = 9,
+ 		.num_gpio = 16,
+ 		.max_vid = 8191,
+ 		.port_base_addr = 0x0,
+@@ -4234,7 +4234,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ 		.name = "Marvell 88E6191",
+ 		.num_databases = 4096,
+ 		.num_ports = 11,	/* 10 + Z80 */
+-		.num_internal_phys = 11,
++		.num_internal_phys = 9,
+ 		.max_vid = 8191,
+ 		.port_base_addr = 0x0,
+ 		.phy_base_addr = 0x0,
+@@ -4281,7 +4281,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ 		.name = "Marvell 88E6290",
+ 		.num_databases = 4096,
+ 		.num_ports = 11,	/* 10 + Z80 */
+-		.num_internal_phys = 11,
++		.num_internal_phys = 9,
+ 		.num_gpio = 16,
+ 		.max_vid = 8191,
+ 		.port_base_addr = 0x0,
+@@ -4443,7 +4443,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ 		.name = "Marvell 88E6390",
+ 		.num_databases = 4096,
+ 		.num_ports = 11,	/* 10 + Z80 */
+-		.num_internal_phys = 11,
++		.num_internal_phys = 9,
+ 		.num_gpio = 16,
+ 		.max_vid = 8191,
+ 		.port_base_addr = 0x0,
+@@ -4466,7 +4466,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ 		.name = "Marvell 88E6390X",
+ 		.num_databases = 4096,
+ 		.num_ports = 11,	/* 10 + Z80 */
+-		.num_internal_phys = 11,
++		.num_internal_phys = 9,
+ 		.num_gpio = 16,
+ 		.max_vid = 8191,
+ 		.port_base_addr = 0x0,
+@@ -4561,6 +4561,14 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
+ 	return 0;
+ }
+ 
++static void mv88e6xxx_ports_cmode_init(struct mv88e6xxx_chip *chip)
++{
++	int i;
++
++	for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
++		chip->ports[i].cmode = MV88E6XXX_PORT_STS_CMODE_INVALID;
++}
++
+ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
+ 							int port)
+ {
+@@ -4597,6 +4605,8 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
+ 	if (err)
+ 		goto free;
+ 
++	mv88e6xxx_ports_cmode_init(chip);
++
+ 	mutex_lock(&chip->reg_lock);
+ 	err = mv88e6xxx_switch_reset(chip);
+ 	mutex_unlock(&chip->reg_lock);
+diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
+index 92945841c8e88..7fffce734f0a5 100644
+--- a/drivers/net/dsa/mv88e6xxx/port.c
++++ b/drivers/net/dsa/mv88e6xxx/port.c
+@@ -190,7 +190,7 @@ int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup)
+ 		/* normal duplex detection */
+ 		break;
+ 	default:
+-		return -EINVAL;
++		return -EOPNOTSUPP;
+ 	}
+ 
+ 	err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
+@@ -374,6 +374,10 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ 		cmode = 0;
+ 	}
+ 
++	/* cmode doesn't change, nothing to do for us */
++	if (cmode == chip->ports[port].cmode)
++		return 0;
++
+ 	lane = mv88e6390x_serdes_get_lane(chip, port);
+ 	if (lane < 0)
+ 		return lane;
+@@ -384,7 +388,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ 			return err;
+ 	}
+ 
+-	err = mv88e6390_serdes_power(chip, port, false);
++	err = mv88e6390x_serdes_power(chip, port, false);
+ 	if (err)
+ 		return err;
+ 
+@@ -400,7 +404,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ 		if (err)
+ 			return err;
+ 
+-		err = mv88e6390_serdes_power(chip, port, true);
++		err = mv88e6390x_serdes_power(chip, port, true);
+ 		if (err)
+ 			return err;
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
+index b31910023bb64..95b59f5eb3931 100644
+--- a/drivers/net/dsa/mv88e6xxx/port.h
++++ b/drivers/net/dsa/mv88e6xxx/port.h
+@@ -52,6 +52,7 @@
+ #define MV88E6185_PORT_STS_CMODE_1000BASE_X	0x0005
+ #define MV88E6185_PORT_STS_CMODE_PHY		0x0006
+ #define MV88E6185_PORT_STS_CMODE_DISABLED	0x0007
++#define MV88E6XXX_PORT_STS_CMODE_INVALID	0xff
+ 
+ /* Offset 0x01: MAC (or PCS or Physical) Control Register */
+ #define MV88E6XXX_PORT_MAC_CTL				0x01
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 034f57500f00e..1fdaf86bbe8fb 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -463,6 +463,12 @@ normal_tx:
+ 	}
+ 
+ 	length >>= 9;
++	if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
++		dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
++				     skb->len);
++		i = 0;
++		goto tx_dma_error;
++	}
+ 	flags |= bnxt_lhint_arr[length];
+ 	txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+ 
+diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
+index ae2f35039343b..1485f66cf7b0c 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -46,6 +46,7 @@
+ #include <linux/mii.h>
+ #include <linux/of_device.h>
+ #include <linux/of_net.h>
++#include <linux/dmi.h>
+ 
+ #include <asm/irq.h>
+ 
+@@ -93,7 +94,7 @@ static int copybreak __read_mostly = 128;
+ module_param(copybreak, int, 0);
+ MODULE_PARM_DESC(copybreak, "Receive copy threshold");
+ 
+-static int disable_msi = 0;
++static int disable_msi = -1;
+ module_param(disable_msi, int, 0);
+ MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
+ 
+@@ -4931,6 +4932,24 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
+ 	return buf;
+ }
+ 
++static const struct dmi_system_id msi_blacklist[] = {
++	{
++		.ident = "Dell Inspiron 1545",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1545"),
++		},
++	},
++	{
++		.ident = "Gateway P-79",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Gateway"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
++		},
++	},
++	{}
++};
++
+ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ {
+ 	struct net_device *dev, *dev1;
+@@ -5042,6 +5061,9 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		goto err_out_free_pci;
+ 	}
+ 
++	if (disable_msi == -1)
++		disable_msi = !!dmi_check_system(msi_blacklist);
++
+ 	if (!disable_msi && pci_enable_msi(pdev) == 0) {
+ 		err = sky2_test_msi(hw);
+ 		if (err) {
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
+index 42f5bfa33694c..de5a6abda7e3f 100644
+--- a/drivers/net/ethernet/microchip/lan743x_main.c
++++ b/drivers/net/ethernet/microchip/lan743x_main.c
+@@ -1403,7 +1403,8 @@ static int lan743x_tx_frame_start(struct lan743x_tx *tx,
+ }
+ 
+ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
+-				     unsigned int frame_length)
++				     unsigned int frame_length,
++				     int nr_frags)
+ {
+ 	/* called only from within lan743x_tx_xmit_frame.
+ 	 * assuming tx->ring_lock has already been acquired.
+@@ -1413,6 +1414,10 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
+ 
+ 	/* wrap up previous descriptor */
+ 	tx->frame_data0 |= TX_DESC_DATA0_EXT_;
++	if (nr_frags <= 0) {
++		tx->frame_data0 |= TX_DESC_DATA0_LS_;
++		tx->frame_data0 |= TX_DESC_DATA0_IOC_;
++	}
+ 	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+ 	tx_descriptor->data0 = tx->frame_data0;
+ 
+@@ -1517,8 +1522,11 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
+ 	u32 tx_tail_flags = 0;
+ 
+ 	/* wrap up previous descriptor */
+-	tx->frame_data0 |= TX_DESC_DATA0_LS_;
+-	tx->frame_data0 |= TX_DESC_DATA0_IOC_;
++	if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) ==
++	    TX_DESC_DATA0_DTYPE_DATA_) {
++		tx->frame_data0 |= TX_DESC_DATA0_LS_;
++		tx->frame_data0 |= TX_DESC_DATA0_IOC_;
++	}
+ 
+ 	tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
+ 	buffer_info = &tx->buffer_info[tx->frame_tail];
+@@ -1603,7 +1611,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
+ 	}
+ 
+ 	if (gso)
+-		lan743x_tx_frame_add_lso(tx, frame_length);
++		lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
+ 
+ 	if (nr_frags <= 0)
+ 		goto finish;
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 01711e6e9a394..e1427b56a073a 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -636,15 +636,20 @@ out:
+ static int geneve_open(struct net_device *dev)
+ {
+ 	struct geneve_dev *geneve = netdev_priv(dev);
+-	bool ipv6 = !!(geneve->info.mode & IP_TUNNEL_INFO_IPV6);
+ 	bool metadata = geneve->collect_md;
++	bool ipv4, ipv6;
+ 	int ret = 0;
+ 
++	ipv6 = geneve->info.mode & IP_TUNNEL_INFO_IPV6 || metadata;
++	ipv4 = !ipv6 || metadata;
+ #if IS_ENABLED(CONFIG_IPV6)
+-	if (ipv6 || metadata)
++	if (ipv6) {
+ 		ret = geneve_sock_add(geneve, true);
++		if (ret < 0 && ret != -EAFNOSUPPORT)
++			ipv4 = false;
++	}
+ #endif
+-	if (!ret && (!ipv6 || metadata))
++	if (ipv4)
+ 		ret = geneve_sock_add(geneve, false);
+ 	if (ret < 0)
+ 		geneve_sock_release(geneve);
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index c9e2a986ccb72..c8320405c8f1d 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -743,6 +743,14 @@ void netvsc_linkstatus_callback(struct net_device *net,
+ 	schedule_delayed_work(&ndev_ctx->dwork, 0);
+ }
+ 
++static void netvsc_comp_ipcsum(struct sk_buff *skb)
++{
++	struct iphdr *iph = (struct iphdr *)skb->data;
++
++	iph->check = 0;
++	iph->check = ip_fast_csum(iph, iph->ihl);
++}
++
+ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
+ 					     struct napi_struct *napi,
+ 					     const struct ndis_tcp_ip_checksum_info *csum_info,
+@@ -766,9 +774,17 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
+ 	/* skb is already created with CHECKSUM_NONE */
+ 	skb_checksum_none_assert(skb);
+ 
+-	/*
+-	 * In Linux, the IP checksum is always checked.
+-	 * Do L4 checksum offload if enabled and present.
++	/* Incoming packets may have IP header checksum verified by the host.
++	 * They may not have IP header checksum computed after coalescing.
++	 * We compute it here if the flags are set, because on Linux, the IP
++	 * checksum is always checked.
++	 */
++	if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
++	    csum_info->receive.ip_checksum_succeeded &&
++	    skb->protocol == htons(ETH_P_IP))
++		netvsc_comp_ipcsum(skb);
++
++	/* Do L4 checksum offload if enabled and present.
+ 	 */
+ 	if (csum_info && (net->features & NETIF_F_RXCSUM)) {
+ 		if (csum_info->receive.tcp_checksum_succeeded ||
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 3db06b40580d3..05a6ae32ff652 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -339,6 +339,17 @@ static int ksz8041_config_aneg(struct phy_device *phydev)
+ 	return genphy_config_aneg(phydev);
+ }
+ 
++static int ksz8061_config_init(struct phy_device *phydev)
++{
++	int ret;
++
++	ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
++	if (ret)
++		return ret;
++
++	return kszphy_config_init(phydev);
++}
++
+ static int ksz9021_load_values_from_of(struct phy_device *phydev,
+ 				       const struct device_node *of_node,
+ 				       u16 reg,
+@@ -934,7 +945,7 @@ static struct phy_driver ksphy_driver[] = {
+ 	.phy_id_mask	= MICREL_PHY_ID_MASK,
+ 	.features	= PHY_BASIC_FEATURES,
+ 	.flags		= PHY_HAS_INTERRUPT,
+-	.config_init	= kszphy_config_init,
++	.config_init	= ksz8061_config_init,
+ 	.ack_interrupt	= kszphy_ack_interrupt,
+ 	.config_intr	= kszphy_config_intr,
+ 	.suspend	= genphy_suspend,
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index 2787e8b1d668a..f6e70f2dfd12b 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -348,6 +348,10 @@ static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state *
+ 	linkmode_zero(state->lp_advertising);
+ 	state->interface = pl->link_config.interface;
+ 	state->an_enabled = pl->link_config.an_enabled;
++	state->speed = SPEED_UNKNOWN;
++	state->duplex = DUPLEX_UNKNOWN;
++	state->pause = MLO_PAUSE_NONE;
++	state->an_complete = 0;
+ 	state->link = 1;
+ 
+ 	return pl->ops->mac_link_state(ndev, state);
+diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
+index a5ef97010eb34..5541e1c19936c 100644
+--- a/drivers/net/team/team_mode_loadbalance.c
++++ b/drivers/net/team/team_mode_loadbalance.c
+@@ -325,6 +325,20 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
+ 	return 0;
+ }
+ 
++static void lb_bpf_func_free(struct team *team)
++{
++	struct lb_priv *lb_priv = get_lb_priv(team);
++	struct bpf_prog *fp;
++
++	if (!lb_priv->ex->orig_fprog)
++		return;
++
++	__fprog_destroy(lb_priv->ex->orig_fprog);
++	fp = rcu_dereference_protected(lb_priv->fp,
++				       lockdep_is_held(&team->lock));
++	bpf_prog_destroy(fp);
++}
++
+ static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx)
+ {
+ 	struct lb_priv *lb_priv = get_lb_priv(team);
+@@ -639,6 +653,7 @@ static void lb_exit(struct team *team)
+ 
+ 	team_options_unregister(team, lb_options,
+ 				ARRAY_SIZE(lb_options));
++	lb_bpf_func_free(team);
+ 	cancel_delayed_work_sync(&lb_priv->ex->stats.refresh_dw);
+ 	free_percpu(lb_priv->pcpu_stats);
+ 	kfree(lb_priv->ex);
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 65844f28db30e..f3293355c7849 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -2122,9 +2122,9 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
+ 	}
+ 
+ 	add_wait_queue(&tfile->wq.wait, &wait);
+-	current->state = TASK_INTERRUPTIBLE;
+ 
+ 	while (1) {
++		set_current_state(TASK_INTERRUPTIBLE);
+ 		ptr = ptr_ring_consume(&tfile->tx_ring);
+ 		if (ptr)
+ 			break;
+@@ -2140,7 +2140,7 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
+ 		schedule();
+ 	}
+ 
+-	current->state = TASK_RUNNING;
++	__set_current_state(TASK_RUNNING);
+ 	remove_wait_queue(&tfile->wq.wait, &wait);
+ 
+ out:
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 735ad838e2ba8..6e381354f658e 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -976,6 +976,13 @@ static const struct usb_device_id products[] = {
+ 					      0xff),
+ 		.driver_info	    = (unsigned long)&qmi_wwan_info_quirk_dtr,
+ 	},
++	{	/* Quectel EG12/EM12 */
++		USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0512,
++					      USB_CLASS_VENDOR_SPEC,
++					      USB_SUBCLASS_VENDOR_SPEC,
++					      0xff),
++		.driver_info	    = (unsigned long)&qmi_wwan_info_quirk_dtr,
++	},
+ 
+ 	/* 3. Combined interface devices matching on interface number */
+ 	{QMI_FIXED_INTF(0x0408, 0xea42, 4)},	/* Yota / Megafon M100-1 */
+@@ -1343,17 +1350,20 @@ static bool quectel_ec20_detected(struct usb_interface *intf)
+ 	return false;
+ }
+ 
+-static bool quectel_ep06_diag_detected(struct usb_interface *intf)
++static bool quectel_diag_detected(struct usb_interface *intf)
+ {
+ 	struct usb_device *dev = interface_to_usbdev(intf);
+ 	struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
++	u16 id_vendor = le16_to_cpu(dev->descriptor.idVendor);
++	u16 id_product = le16_to_cpu(dev->descriptor.idProduct);
+ 
+-	if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c &&
+-	    le16_to_cpu(dev->descriptor.idProduct) == 0x0306 &&
+-	    intf_desc.bNumEndpoints == 2)
+-		return true;
++	if (id_vendor != 0x2c7c || intf_desc.bNumEndpoints != 2)
++		return false;
+ 
+-	return false;
++	if (id_product == 0x0306 || id_product == 0x0512)
++		return true;
++	else
++		return false;
+ }
+ 
+ static int qmi_wwan_probe(struct usb_interface *intf,
+@@ -1390,13 +1400,13 @@ static int qmi_wwan_probe(struct usb_interface *intf,
+ 		return -ENODEV;
+ 	}
+ 
+-	/* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so
++	/* Several Quectel modems supports dynamic interface configuration, so
+ 	 * we need to match on class/subclass/protocol. These values are
+ 	 * identical for the diagnostic- and QMI-interface, but bNumEndpoints is
+ 	 * different. Ignore the current interface if the number of endpoints
+ 	 * the number for the diag interface (two).
+ 	 */
+-	if (quectel_ep06_diag_detected(intf))
++	if (quectel_diag_detected(intf))
+ 		return -ENODEV;
+ 
+ 	return usbnet_probe(intf, id);
+diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
+index 0ccb021f1e786..10d580c3dea3f 100644
+--- a/drivers/net/xen-netback/hash.c
++++ b/drivers/net/xen-netback/hash.c
+@@ -454,6 +454,8 @@ void xenvif_init_hash(struct xenvif *vif)
+ 	if (xenvif_hash_cache_size == 0)
+ 		return;
+ 
++	BUG_ON(vif->hash.cache.count);
++
+ 	spin_lock_init(&vif->hash.cache.lock);
+ 	INIT_LIST_HEAD(&vif->hash.cache.list);
+ }
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index f6ae23fc3f6b0..82add0ac4a5f6 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -153,6 +153,13 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
+ {
+ 	struct xenvif *vif = netdev_priv(dev);
+ 	unsigned int size = vif->hash.size;
++	unsigned int num_queues;
++
++	/* If queues are not set up internally - always return 0
++	 * as the packet going to be dropped anyway */
++	num_queues = READ_ONCE(vif->num_queues);
++	if (num_queues < 1)
++		return 0;
+ 
+ 	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
+ 		return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 3621e05a7494c..d5081ffdc8f03 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -1072,11 +1072,6 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
+ 		skb_frag_size_set(&frags[i], len);
+ 	}
+ 
+-	/* Copied all the bits from the frag list -- free it. */
+-	skb_frag_list_init(skb);
+-	xenvif_skb_zerocopy_prepare(queue, nskb);
+-	kfree_skb(nskb);
+-
+ 	/* Release all the original (foreign) frags. */
+ 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ 		skb_frag_unref(skb, f);
+@@ -1145,6 +1140,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
+ 		xenvif_fill_frags(queue, skb);
+ 
+ 		if (unlikely(skb_has_frag_list(skb))) {
++			struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
++			xenvif_skb_zerocopy_prepare(queue, nskb);
+ 			if (xenvif_handle_frag_list(queue, skb)) {
+ 				if (net_ratelimit())
+ 					netdev_err(queue->vif->dev,
+@@ -1153,6 +1150,9 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
+ 				kfree_skb(skb);
+ 				continue;
+ 			}
++			/* Copied all the bits from the frag list -- free it. */
++			skb_frag_list_init(skb);
++			kfree_skb(nskb);
+ 		}
+ 
+ 		skb->dev      = queue->vif->dev;
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index b5f638286037a..18e4289baf993 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -757,6 +757,7 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
+ 		set_host_byte(cmd, DID_OK);
+ 		return BLK_STS_TARGET;
+ 	case DID_NEXUS_FAILURE:
++		set_host_byte(cmd, DID_OK);
+ 		return BLK_STS_NEXUS;
+ 	case DID_ALLOC_FAILURE:
+ 		set_host_byte(cmd, DID_OK);
+diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
+index a880b5c6c6c32..be815330ed958 100644
+--- a/drivers/staging/android/ashmem.c
++++ b/drivers/staging/android/ashmem.c
+@@ -75,6 +75,9 @@ struct ashmem_range {
+ /* LRU list of unpinned pages, protected by ashmem_mutex */
+ static LIST_HEAD(ashmem_lru_list);
+ 
++static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
++static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
++
+ /*
+  * long lru_count - The count of pages on our LRU list.
+  *
+@@ -168,19 +171,15 @@ static inline void lru_del(struct ashmem_range *range)
+  * @end:	   The ending page (inclusive)
+  *
+  * This function is protected by ashmem_mutex.
+- *
+- * Return: 0 if successful, or -ENOMEM if there is an error
+  */
+-static int range_alloc(struct ashmem_area *asma,
+-		       struct ashmem_range *prev_range, unsigned int purged,
+-		       size_t start, size_t end)
++static void range_alloc(struct ashmem_area *asma,
++			struct ashmem_range *prev_range, unsigned int purged,
++			size_t start, size_t end,
++			struct ashmem_range **new_range)
+ {
+-	struct ashmem_range *range;
+-
+-	range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
+-	if (!range)
+-		return -ENOMEM;
++	struct ashmem_range *range = *new_range;
+ 
++	*new_range = NULL;
+ 	range->asma = asma;
+ 	range->pgstart = start;
+ 	range->pgend = end;
+@@ -190,8 +189,6 @@ static int range_alloc(struct ashmem_area *asma,
+ 
+ 	if (range_on_lru(range))
+ 		lru_add(range);
+-
+-	return 0;
+ }
+ 
+ /**
+@@ -438,7 +435,6 @@ out:
+ static unsigned long
+ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ {
+-	struct ashmem_range *range, *next;
+ 	unsigned long freed = 0;
+ 
+ 	/* We might recurse into filesystem code, so bail out if necessary */
+@@ -448,21 +444,33 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ 	if (!mutex_trylock(&ashmem_mutex))
+ 		return -1;
+ 
+-	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
++	while (!list_empty(&ashmem_lru_list)) {
++		struct ashmem_range *range =
++			list_first_entry(&ashmem_lru_list, typeof(*range), lru);
+ 		loff_t start = range->pgstart * PAGE_SIZE;
+ 		loff_t end = (range->pgend + 1) * PAGE_SIZE;
++		struct file *f = range->asma->file;
+ 
+-		range->asma->file->f_op->fallocate(range->asma->file,
+-				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+-				start, end - start);
++		get_file(f);
++		atomic_inc(&ashmem_shrink_inflight);
+ 		range->purged = ASHMEM_WAS_PURGED;
+ 		lru_del(range);
+ 
+ 		freed += range_size(range);
++		mutex_unlock(&ashmem_mutex);
++		f->f_op->fallocate(f,
++				   FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
++				   start, end - start);
++		fput(f);
++		if (atomic_dec_and_test(&ashmem_shrink_inflight))
++			wake_up_all(&ashmem_shrink_wait);
++		if (!mutex_trylock(&ashmem_mutex))
++			goto out;
+ 		if (--sc->nr_to_scan <= 0)
+ 			break;
+ 	}
+ 	mutex_unlock(&ashmem_mutex);
++out:
+ 	return freed;
+ }
+ 
+@@ -582,7 +590,8 @@ static int get_name(struct ashmem_area *asma, void __user *name)
+  *
+  * Caller must hold ashmem_mutex.
+  */
+-static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
++static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
++		      struct ashmem_range **new_range)
+ {
+ 	struct ashmem_range *range, *next;
+ 	int ret = ASHMEM_NOT_PURGED;
+@@ -635,7 +644,7 @@ static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
+ 			 * second half and adjust the first chunk's endpoint.
+ 			 */
+ 			range_alloc(asma, range, range->purged,
+-				    pgend + 1, range->pgend);
++				    pgend + 1, range->pgend, new_range);
+ 			range_shrink(range, range->pgstart, pgstart - 1);
+ 			break;
+ 		}
+@@ -649,7 +658,8 @@ static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
+  *
+  * Caller must hold ashmem_mutex.
+  */
+-static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
++static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
++			struct ashmem_range **new_range)
+ {
+ 	struct ashmem_range *range, *next;
+ 	unsigned int purged = ASHMEM_NOT_PURGED;
+@@ -675,7 +685,8 @@ restart:
+ 		}
+ 	}
+ 
+-	return range_alloc(asma, range, purged, pgstart, pgend);
++	range_alloc(asma, range, purged, pgstart, pgend, new_range);
++	return 0;
+ }
+ 
+ /*
+@@ -708,11 +719,19 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
+ 	struct ashmem_pin pin;
+ 	size_t pgstart, pgend;
+ 	int ret = -EINVAL;
++	struct ashmem_range *range = NULL;
+ 
+ 	if (copy_from_user(&pin, p, sizeof(pin)))
+ 		return -EFAULT;
+ 
++	if (cmd == ASHMEM_PIN || cmd == ASHMEM_UNPIN) {
++		range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
++		if (!range)
++			return -ENOMEM;
++	}
++
+ 	mutex_lock(&ashmem_mutex);
++	wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
+ 
+ 	if (!asma->file)
+ 		goto out_unlock;
+@@ -735,10 +754,10 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
+ 
+ 	switch (cmd) {
+ 	case ASHMEM_PIN:
+-		ret = ashmem_pin(asma, pgstart, pgend);
++		ret = ashmem_pin(asma, pgstart, pgend, &range);
+ 		break;
+ 	case ASHMEM_UNPIN:
+-		ret = ashmem_unpin(asma, pgstart, pgend);
++		ret = ashmem_unpin(asma, pgstart, pgend, &range);
+ 		break;
+ 	case ASHMEM_GET_PIN_STATUS:
+ 		ret = ashmem_get_pin_status(asma, pgstart, pgend);
+@@ -747,6 +766,8 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
+ 
+ out_unlock:
+ 	mutex_unlock(&ashmem_mutex);
++	if (range)
++		kmem_cache_free(ashmem_range_cachep, range);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
+index 701eb9f3b0f13..0a1b1ff1be042 100644
+--- a/drivers/staging/android/ion/ion_system_heap.c
++++ b/drivers/staging/android/ion/ion_system_heap.c
+@@ -247,10 +247,10 @@ static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
+ static int ion_system_heap_create_pools(struct ion_page_pool **pools)
+ {
+ 	int i;
+-	gfp_t gfp_flags = low_order_gfp_flags;
+ 
+ 	for (i = 0; i < NUM_ORDERS; i++) {
+ 		struct ion_page_pool *pool;
++		gfp_t gfp_flags = low_order_gfp_flags;
+ 
+ 		if (orders[i] > 4)
+ 			gfp_flags = high_order_gfp_flags;
+diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
+index e521ed9d08870..35bd4d2efe166 100644
+--- a/drivers/staging/comedi/drivers/ni_660x.c
++++ b/drivers/staging/comedi/drivers/ni_660x.c
+@@ -602,6 +602,7 @@ static int ni_660x_set_pfi_routing(struct comedi_device *dev,
+ 	case NI_660X_PFI_OUTPUT_DIO:
+ 		if (chan > 31)
+ 			return -EINVAL;
++		break;
+ 	default:
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
+index 1279241449f4b..f44662dd795c4 100644
+--- a/drivers/staging/erofs/unzip_vle.c
++++ b/drivers/staging/erofs/unzip_vle.c
+@@ -57,15 +57,30 @@ enum z_erofs_vle_work_role {
+ 	Z_EROFS_VLE_WORK_SECONDARY,
+ 	Z_EROFS_VLE_WORK_PRIMARY,
+ 	/*
+-	 * The current work has at least been linked with the following
+-	 * processed chained works, which means if the processing page
+-	 * is the tail partial page of the work, the current work can
+-	 * safely use the whole page, as illustrated below:
+-	 * +--------------+-------------------------------------------+
+-	 * |  tail page   |      head page (of the previous work)     |
+-	 * +--------------+-------------------------------------------+
+-	 *   /\  which belongs to the current work
+-	 * [  (*) this page can be used for the current work itself.  ]
++	 * The current work was the tail of an exist chain, and the previous
++	 * processed chained works are all decided to be hooked up to it.
++	 * A new chain should be created for the remaining unprocessed works,
++	 * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
++	 * the next work cannot reuse the whole page in the following scenario:
++	 *  ________________________________________________________________
++	 * |      tail (partial) page     |       head (partial) page       |
++	 * |  (belongs to the next work)  |  (belongs to the current work)  |
++	 * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
++	 */
++	Z_EROFS_VLE_WORK_PRIMARY_HOOKED,
++	/*
++	 * The current work has been linked with the processed chained works,
++	 * and could be also linked with the potential remaining works, which
++	 * means if the processing page is the tail partial page of the work,
++	 * the current work can safely use the whole page (since the next work
++	 * is under control) for in-place decompression, as illustrated below:
++	 *  ________________________________________________________________
++	 * |  tail (partial) page  |          head (partial) page           |
++	 * | (of the current work) |         (of the previous work)         |
++	 * |  PRIMARY_FOLLOWED or  |                                        |
++	 * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________|
++	 *
++	 * [  (*) the above page can be used for the current work itself.  ]
+ 	 */
+ 	Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
+ 	Z_EROFS_VLE_WORK_MAX
+@@ -234,10 +249,10 @@ static int z_erofs_vle_work_add_page(
+ 	return ret ? 0 : -EAGAIN;
+ }
+ 
+-static inline bool try_to_claim_workgroup(
+-	struct z_erofs_vle_workgroup *grp,
+-	z_erofs_vle_owned_workgrp_t *owned_head,
+-	bool *hosted)
++static enum z_erofs_vle_work_role
++try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp,
++		       z_erofs_vle_owned_workgrp_t *owned_head,
++		       bool *hosted)
+ {
+ 	DBG_BUGON(*hosted == true);
+ 
+@@ -251,6 +266,9 @@ retry:
+ 
+ 		*owned_head = grp;
+ 		*hosted = true;
++		/* lucky, I am the followee :) */
++		return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
++
+ 	} else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
+ 		/*
+ 		 * type 2, link to the end of a existing open chain,
+@@ -260,12 +278,11 @@ retry:
+ 		if (Z_EROFS_VLE_WORKGRP_TAIL != cmpxchg(&grp->next,
+ 			Z_EROFS_VLE_WORKGRP_TAIL, *owned_head))
+ 			goto retry;
+-
+ 		*owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
+-	} else
+-		return false;	/* :( better luck next time */
++		return Z_EROFS_VLE_WORK_PRIMARY_HOOKED;
++	}
+ 
+-	return true;	/* lucky, I am the followee :) */
++	return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */
+ }
+ 
+ static struct z_erofs_vle_work *
+@@ -337,12 +354,8 @@ z_erofs_vle_work_lookup(struct super_block *sb,
+ 	*hosted = false;
+ 	if (!primary)
+ 		*role = Z_EROFS_VLE_WORK_SECONDARY;
+-	/* claim the workgroup if possible */
+-	else if (try_to_claim_workgroup(grp, owned_head, hosted))
+-		*role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
+-	else
+-		*role = Z_EROFS_VLE_WORK_PRIMARY;
+-
++	else	/* claim the workgroup if possible */
++		*role = try_to_claim_workgroup(grp, owned_head, hosted);
+ 	return work;
+ }
+ 
+@@ -419,6 +432,9 @@ static inline void __update_workgrp_llen(struct z_erofs_vle_workgroup *grp,
+ 	}
+ }
+ 
++#define builder_is_hooked(builder) \
++	((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED)
++
+ #define builder_is_followed(builder) \
+ 	((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
+ 
+@@ -583,7 +599,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
+ 	struct z_erofs_vle_work_builder *const builder = &fe->builder;
+ 	const loff_t offset = page_offset(page);
+ 
+-	bool tight = builder_is_followed(builder);
++	bool tight = builder_is_hooked(builder);
+ 	struct z_erofs_vle_work *work = builder->work;
+ 
+ #ifdef EROFS_FS_HAS_MANAGED_CACHE
+@@ -606,8 +622,12 @@ repeat:
+ 
+ 	/* lucky, within the range of the current map_blocks */
+ 	if (offset + cur >= map->m_la &&
+-		offset + cur < map->m_la + map->m_llen)
++		offset + cur < map->m_la + map->m_llen) {
++		/* didn't get a valid unzip work previously (very rare) */
++		if (!builder->work)
++			goto restart_now;
+ 		goto hitted;
++	}
+ 
+ 	/* go ahead the next map_blocks */
+ 	debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
+@@ -621,6 +641,7 @@ repeat:
+ 	if (unlikely(err))
+ 		goto err_out;
+ 
++restart_now:
+ 	if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
+ 		goto hitted;
+ 
+@@ -646,7 +667,7 @@ repeat:
+ 		builder->role = Z_EROFS_VLE_WORK_PRIMARY;
+ #endif
+ 
+-	tight &= builder_is_followed(builder);
++	tight &= builder_is_hooked(builder);
+ 	work = builder->work;
+ hitted:
+ 	cur = end - min_t(unsigned, offset + end - map->m_la, end);
+@@ -661,6 +682,9 @@ hitted:
+ 			(tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
+ 				Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
+ 
++	if (cur)
++		tight &= builder_is_followed(builder);
++
+ retry:
+ 	err = z_erofs_vle_work_add_page(builder, page, page_type);
+ 	/* should allocate an additional staging page for pagevec */
+@@ -901,11 +925,10 @@ repeat:
+ 	if (llen > grp->llen)
+ 		llen = grp->llen;
+ 
+-	err = z_erofs_vle_unzip_fast_percpu(compressed_pages,
+-		clusterpages, pages, llen, work->pageofs,
+-		z_erofs_onlinepage_endio);
++	err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
++					    pages, llen, work->pageofs);
+ 	if (err != -ENOTSUPP)
+-		goto out_percpu;
++		goto out;
+ 
+ 	if (sparsemem_pages >= nr_pages)
+ 		goto skip_allocpage;
+@@ -926,8 +949,25 @@ skip_allocpage:
+ 	erofs_vunmap(vout, nr_pages);
+ 
+ out:
++	/* must handle all compressed pages before endding pages */
++	for (i = 0; i < clusterpages; ++i) {
++		page = compressed_pages[i];
++
++#ifdef EROFS_FS_HAS_MANAGED_CACHE
++		if (page->mapping == mngda)
++			continue;
++#endif
++		/* recycle all individual staging pages */
++		(void)z_erofs_gather_if_stagingpage(page_pool, page);
++
++		WRITE_ONCE(compressed_pages[i], NULL);
++	}
++
+ 	for (i = 0; i < nr_pages; ++i) {
+ 		page = pages[i];
++		if (!page)
++			continue;
++
+ 		DBG_BUGON(page->mapping == NULL);
+ 
+ 		/* recycle all individual staging pages */
+@@ -940,20 +980,6 @@ out:
+ 		z_erofs_onlinepage_endio(page);
+ 	}
+ 
+-out_percpu:
+-	for (i = 0; i < clusterpages; ++i) {
+-		page = compressed_pages[i];
+-
+-#ifdef EROFS_FS_HAS_MANAGED_CACHE
+-		if (page->mapping == mngda)
+-			continue;
+-#endif
+-		/* recycle all individual staging pages */
+-		(void)z_erofs_gather_if_stagingpage(page_pool, page);
+-
+-		WRITE_ONCE(compressed_pages[i], NULL);
+-	}
+-
+ 	if (pages == z_pagemap_global)
+ 		mutex_unlock(&z_pagemap_global_lock);
+ 	else if (unlikely(pages != pages_onstack))
+diff --git a/drivers/staging/erofs/unzip_vle.h b/drivers/staging/erofs/unzip_vle.h
+index 3316bc36965d4..684ff06fc7bf8 100644
+--- a/drivers/staging/erofs/unzip_vle.h
++++ b/drivers/staging/erofs/unzip_vle.h
+@@ -218,8 +218,7 @@ extern int z_erofs_vle_plain_copy(struct page **compressed_pages,
+ 
+ extern int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
+ 	unsigned clusterpages, struct page **pages,
+-	unsigned outlen, unsigned short pageofs,
+-	void (*endio)(struct page *));
++	unsigned int outlen, unsigned short pageofs);
+ 
+ extern int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
+ 	unsigned clusterpages, void *vaddr, unsigned llen,
+diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
+index 9cb35cd33365a..055420e8af2cf 100644
+--- a/drivers/staging/erofs/unzip_vle_lz4.c
++++ b/drivers/staging/erofs/unzip_vle_lz4.c
+@@ -105,8 +105,7 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
+ 				  unsigned clusterpages,
+ 				  struct page **pages,
+ 				  unsigned outlen,
+-				  unsigned short pageofs,
+-				  void (*endio)(struct page *))
++				  unsigned short pageofs)
+ {
+ 	void *vin, *vout;
+ 	unsigned nr_pages, i, j;
+@@ -128,31 +127,30 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
+ 	ret = z_erofs_unzip_lz4(vin, vout + pageofs,
+ 		clusterpages * PAGE_SIZE, outlen);
+ 
+-	if (ret >= 0) {
+-		outlen = ret;
+-		ret = 0;
+-	}
++	if (ret < 0)
++		goto out;
++	ret = 0;
+ 
+ 	for (i = 0; i < nr_pages; ++i) {
+ 		j = min((unsigned)PAGE_SIZE - pageofs, outlen);
+ 
+ 		if (pages[i] != NULL) {
+-			if (ret < 0)
+-				SetPageError(pages[i]);
+-			else if (clusterpages == 1 && pages[i] == compressed_pages[0])
++			if (clusterpages == 1 &&
++			    pages[i] == compressed_pages[0]) {
+ 				memcpy(vin + pageofs, vout + pageofs, j);
+-			else {
++			} else {
+ 				void *dst = kmap_atomic(pages[i]);
+ 
+ 				memcpy(dst + pageofs, vout + pageofs, j);
+ 				kunmap_atomic(dst);
+ 			}
+-			endio(pages[i]);
+ 		}
+ 		vout += PAGE_SIZE;
+ 		outlen -= j;
+ 		pageofs = 0;
+ 	}
++
++out:
+ 	preempt_enable();
+ 
+ 	if (clusterpages == 1)
+diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
+index 3b8d237decbf1..649caae2b6033 100644
+--- a/drivers/staging/wilc1000/linux_wlan.c
++++ b/drivers/staging/wilc1000/linux_wlan.c
+@@ -1090,8 +1090,8 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
+ 		vif->wilc = *wilc;
+ 		vif->ndev = ndev;
+ 		wl->vif[i] = vif;
+-		wl->vif_num = i;
+-		vif->idx = wl->vif_num;
++		wl->vif_num = i + 1;
++		vif->idx = i;
+ 
+ 		ndev->netdev_ops = &wilc_netdev_ops;
+ 
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 09bf6b4b741b8..1493d0fdf5ad1 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -187,6 +187,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ 	    (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
++	     pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+ 	     pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
+ 		xhci->quirks |= XHCI_INTEL_USB_ROLE_SW;
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index c0777a374a88f..4c66edf533fe9 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -61,6 +61,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
+ 	{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
+ 	{ USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
++	{ USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
+ 	{ USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
+ 	{ USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
+ 	{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
+@@ -1353,8 +1354,13 @@ static int cp210x_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+ 	if (priv->partnum == CP210X_PARTNUM_CP2105)
+ 		req_type = REQTYPE_INTERFACE_TO_HOST;
+ 
++	result = usb_autopm_get_interface(serial->interface);
++	if (result)
++		return result;
++
+ 	result = cp210x_read_vendor_block(serial, req_type,
+ 					  CP210X_READ_LATCH, &buf, sizeof(buf));
++	usb_autopm_put_interface(serial->interface);
+ 	if (result < 0)
+ 		return result;
+ 
+@@ -1375,6 +1381,10 @@ static void cp210x_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
+ 
+ 	buf.mask = BIT(gpio);
+ 
++	result = usb_autopm_get_interface(serial->interface);
++	if (result)
++		goto out;
++
+ 	if (priv->partnum == CP210X_PARTNUM_CP2105) {
+ 		result = cp210x_write_vendor_block(serial,
+ 						   REQTYPE_HOST_TO_INTERFACE,
+@@ -1392,6 +1402,8 @@ static void cp210x_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
+ 					 NULL, 0, USB_CTRL_SET_TIMEOUT);
+ 	}
+ 
++	usb_autopm_put_interface(serial->interface);
++out:
+ 	if (result < 0) {
+ 		dev_err(&serial->interface->dev, "failed to set GPIO value: %d\n",
+ 				result);
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index b5cef322826f1..1d8077e880a01 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1015,6 +1015,8 @@ static const struct usb_device_id id_table_combined[] = {
+ 	{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
+ 	{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
+ 	{ USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
++	/* EZPrototypes devices */
++	{ USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) },
+ 	{ }					/* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 975d02666c5a0..b863bedb55a13 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -1308,6 +1308,12 @@
+ #define IONICS_VID			0x1c0c
+ #define IONICS_PLUGCOMPUTER_PID		0x0102
+ 
++/*
++ * EZPrototypes (PID reseller)
++ */
++#define EZPROTOTYPES_VID		0x1c40
++#define HJELMSLUND_USB485_ISO_PID	0x0477
++
+ /*
+  * Dresden Elektronik Sensor Terminal Board
+  */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index fb544340888b0..faf833e8f5573 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1148,6 +1148,8 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+ 	  .driver_info = NCTRL(0) | RSVD(3) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff),	/* Telit ME910 (ECM) */
++	  .driver_info = NCTRL(0) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
+diff --git a/fs/aio.c b/fs/aio.c
+index 44551d96eaa4b..45d5ef8dd0a87 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -1661,6 +1661,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
+ 	struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
+ 	struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
+ 	__poll_t mask = key_to_poll(key);
++	unsigned long flags;
+ 
+ 	req->woken = true;
+ 
+@@ -1669,10 +1670,15 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
+ 		if (!(mask & req->events))
+ 			return 0;
+ 
+-		/* try to complete the iocb inline if we can: */
+-		if (spin_trylock(&iocb->ki_ctx->ctx_lock)) {
++		/*
++		 * Try to complete the iocb inline if we can. Use
++		 * irqsave/irqrestore because not all filesystems (e.g. fuse)
++		 * call this function with IRQs disabled and because IRQs
++		 * have to be disabled before ctx_lock is obtained.
++		 */
++		if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
+ 			list_del(&iocb->ki_list);
+-			spin_unlock(&iocb->ki_ctx->ctx_lock);
++			spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
+ 
+ 			list_del_init(&req->wait.entry);
+ 			aio_poll_complete(iocb, mask);
+diff --git a/fs/exec.c b/fs/exec.c
+index 1ebf6e5a521d9..433b1257694ab 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -929,7 +929,7 @@ int kernel_read_file(struct file *file, void **buf, loff_t *size,
+ 		bytes = kernel_read(file, *buf + pos, i_size - pos, &pos);
+ 		if (bytes < 0) {
+ 			ret = bytes;
+-			goto out;
++			goto out_free;
+ 		}
+ 
+ 		if (bytes == 0)
+diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
+index 882a9b9e34bc2..72f59e8321e79 100644
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -254,20 +254,12 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
+ static struct freq_attr _name =			\
+ __ATTR(_name, 0200, NULL, store_##_name)
+ 
+-struct global_attr {
+-	struct attribute attr;
+-	ssize_t (*show)(struct kobject *kobj,
+-			struct attribute *attr, char *buf);
+-	ssize_t (*store)(struct kobject *a, struct attribute *b,
+-			 const char *c, size_t count);
+-};
+-
+ #define define_one_global_ro(_name)		\
+-static struct global_attr _name =		\
++static struct kobj_attribute _name =		\
+ __ATTR(_name, 0444, show_##_name, NULL)
+ 
+ #define define_one_global_rw(_name)		\
+-static struct global_attr _name =		\
++static struct kobj_attribute _name =		\
+ __ATTR(_name, 0644, show_##_name, store_##_name)
+ 
+ 
+diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
+index ec9d6bc658559..fabee6db0abb7 100644
+--- a/include/net/bluetooth/bluetooth.h
++++ b/include/net/bluetooth/bluetooth.h
+@@ -276,7 +276,7 @@ int  bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+ int  bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
+ int  bt_sock_wait_ready(struct sock *sk, unsigned long flags);
+ 
+-void bt_accept_enqueue(struct sock *parent, struct sock *sk);
++void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh);
+ void bt_accept_unlink(struct sock *sk);
+ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
+ 
+diff --git a/include/net/icmp.h b/include/net/icmp.h
+index 3ef2743a8eecc..8665bf24e3b7a 100644
+--- a/include/net/icmp.h
++++ b/include/net/icmp.h
+@@ -22,6 +22,7 @@
+ 
+ #include <net/inet_sock.h>
+ #include <net/snmp.h>
++#include <net/ip.h>
+ 
+ struct icmp_err {
+   int		errno;
+@@ -39,7 +40,13 @@ struct net_proto_family;
+ struct sk_buff;
+ struct net;
+ 
+-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
++void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
++		 const struct ip_options *opt);
++static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
++{
++	__icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt);
++}
++
+ int icmp_rcv(struct sk_buff *skb);
+ void icmp_err(struct sk_buff *skb, u32 info);
+ int icmp_init(void);
+diff --git a/include/net/ip.h b/include/net/ip.h
+index e44b1a44f67ad..71d31e4d4391f 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -638,6 +638,8 @@ static inline int ip_options_echo(struct net *net, struct ip_options *dopt,
+ }
+ 
+ void ip_options_fragment(struct sk_buff *skb);
++int __ip_options_compile(struct net *net, struct ip_options *opt,
++			 struct sk_buff *skb, __be32 *info);
+ int ip_options_compile(struct net *net, struct ip_options *opt,
+ 		       struct sk_buff *skb);
+ int ip_options_get(struct net *net, struct ip_options_rcu **optp,
+@@ -687,7 +689,7 @@ extern int sysctl_icmp_msgs_burst;
+ int ip_misc_proc_init(void);
+ #endif
+ 
+-int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
++int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
+ 				struct netlink_ext_ack *extack);
+ 
+ #endif	/* _IP_H */
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index a6d00093f35e7..c44da48de7dfd 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -47,7 +47,10 @@ struct qdisc_size_table {
+ struct qdisc_skb_head {
+ 	struct sk_buff	*head;
+ 	struct sk_buff	*tail;
+-	__u32		qlen;
++	union {
++		u32		qlen;
++		atomic_t	atomic_qlen;
++	};
+ 	spinlock_t	lock;
+ };
+ 
+@@ -384,27 +387,19 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
+ 	BUILD_BUG_ON(sizeof(qcb->data) < sz);
+ }
+ 
+-static inline int qdisc_qlen_cpu(const struct Qdisc *q)
+-{
+-	return this_cpu_ptr(q->cpu_qstats)->qlen;
+-}
+-
+ static inline int qdisc_qlen(const struct Qdisc *q)
+ {
+ 	return q->q.qlen;
+ }
+ 
+-static inline int qdisc_qlen_sum(const struct Qdisc *q)
++static inline u32 qdisc_qlen_sum(const struct Qdisc *q)
+ {
+-	__u32 qlen = q->qstats.qlen;
+-	int i;
++	u32 qlen = q->qstats.qlen;
+ 
+-	if (q->flags & TCQ_F_NOLOCK) {
+-		for_each_possible_cpu(i)
+-			qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
+-	} else {
++	if (q->flags & TCQ_F_NOLOCK)
++		qlen += atomic_read(&q->q.atomic_qlen);
++	else
+ 		qlen += q->q.qlen;
+-	}
+ 
+ 	return qlen;
+ }
+@@ -776,14 +771,14 @@ static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
+ 	this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
+ }
+ 
+-static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
++static inline void qdisc_qstats_atomic_qlen_inc(struct Qdisc *sch)
+ {
+-	this_cpu_inc(sch->cpu_qstats->qlen);
++	atomic_inc(&sch->q.atomic_qlen);
+ }
+ 
+-static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
++static inline void qdisc_qstats_atomic_qlen_dec(struct Qdisc *sch)
+ {
+-	this_cpu_dec(sch->cpu_qstats->qlen);
++	atomic_dec(&sch->q.atomic_qlen);
+ }
+ 
+ static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 4d81be2d07390..bcb42aaf1b3ae 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -6035,7 +6035,8 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
+ 			u32 off_reg;
+ 
+ 			aux = &env->insn_aux_data[i + delta];
+-			if (!aux->alu_state)
++			if (!aux->alu_state ||
++			    aux->alu_state == BPF_ALU_NON_POINTER)
+ 				continue;
+ 
+ 			isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
+diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
+index 5574e862de8d5..5a1c64a26e819 100644
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -1301,7 +1301,7 @@ static int parse_pred(const char *str, void *data,
+ 		/* go past the last quote */
+ 		i++;
+ 
+-	} else if (isdigit(str[i])) {
++	} else if (isdigit(str[i]) || str[i] == '-') {
+ 
+ 		/* Make sure the field is not a string */
+ 		if (is_string_field(field)) {
+@@ -1314,6 +1314,9 @@ static int parse_pred(const char *str, void *data,
+ 			goto err_free;
+ 		}
+ 
++		if (str[i] == '-')
++			i++;
++
+ 		/* We allow 0xDEADBEEF */
+ 		while (isalnum(str[i]))
+ 			i++;
+diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
+index deacc52d7ff18..8d12198eaa949 100644
+--- a/net/bluetooth/af_bluetooth.c
++++ b/net/bluetooth/af_bluetooth.c
+@@ -154,15 +154,25 @@ void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
+ }
+ EXPORT_SYMBOL(bt_sock_unlink);
+ 
+-void bt_accept_enqueue(struct sock *parent, struct sock *sk)
++void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh)
+ {
+ 	BT_DBG("parent %p, sk %p", parent, sk);
+ 
+ 	sock_hold(sk);
+-	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
++
++	if (bh)
++		bh_lock_sock_nested(sk);
++	else
++		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
++
+ 	list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
+ 	bt_sk(sk)->parent = parent;
+-	release_sock(sk);
++
++	if (bh)
++		bh_unlock_sock(sk);
++	else
++		release_sock(sk);
++
+ 	parent->sk_ack_backlog++;
+ }
+ EXPORT_SYMBOL(bt_accept_enqueue);
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 686bdc6b35b03..a3a2cd55e23a9 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -1252,7 +1252,7 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
+ 
+ 	l2cap_sock_init(sk, parent);
+ 
+-	bt_accept_enqueue(parent, sk);
++	bt_accept_enqueue(parent, sk, false);
+ 
+ 	release_sock(parent);
+ 
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index d606e92122916..c044ff2f73e6c 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -988,7 +988,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
+ 	rfcomm_pi(sk)->channel = channel;
+ 
+ 	sk->sk_state = BT_CONFIG;
+-	bt_accept_enqueue(parent, sk);
++	bt_accept_enqueue(parent, sk, true);
+ 
+ 	/* Accept connection and return socket DLC */
+ 	*d = rfcomm_pi(sk)->dlc;
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index 8f0f9279eac9f..a4ca55df73908 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -193,7 +193,7 @@ static void __sco_chan_add(struct sco_conn *conn, struct sock *sk,
+ 	conn->sk = sk;
+ 
+ 	if (parent)
+-		bt_accept_enqueue(parent, sk);
++		bt_accept_enqueue(parent, sk, true);
+ }
+ 
+ static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
+diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
+index 188d693cb251a..e2fd8baec65f3 100644
+--- a/net/core/gen_stats.c
++++ b/net/core/gen_stats.c
+@@ -256,7 +256,6 @@ __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
+ 	for_each_possible_cpu(i) {
+ 		const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
+ 
+-		qstats->qlen = 0;
+ 		qstats->backlog += qcpu->backlog;
+ 		qstats->drops += qcpu->drops;
+ 		qstats->requeues += qcpu->requeues;
+@@ -272,7 +271,6 @@ void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
+ 	if (cpu) {
+ 		__gnet_stats_copy_queue_cpu(qstats, cpu);
+ 	} else {
+-		qstats->qlen = q->qlen;
+ 		qstats->backlog = q->backlog;
+ 		qstats->drops = q->drops;
+ 		qstats->requeues = q->requeues;
+diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
+index bd67c4d0fcfdf..2aabb7eb08541 100644
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -1547,6 +1547,9 @@ static int register_queue_kobjects(struct net_device *dev)
+ error:
+ 	netdev_queue_update_kobjects(dev, txq, 0);
+ 	net_rx_queue_update_kobjects(dev, rxq, 0);
++#ifdef CONFIG_SYSFS
++	kset_unregister(dev->queues_kset);
++#endif
+ 	return error;
+ }
+ 
+diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
+index 777fa3b7fb13d..f0165c5f376b3 100644
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -667,7 +667,8 @@ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level)
+ 	case CIPSO_V4_MAP_PASS:
+ 		return 0;
+ 	case CIPSO_V4_MAP_TRANS:
+-		if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL)
++		if ((level < doi_def->map.std->lvl.cipso_size) &&
++		    (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL))
+ 			return 0;
+ 		break;
+ 	}
+@@ -1735,13 +1736,26 @@ validate_return:
+  */
+ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
+ {
++	unsigned char optbuf[sizeof(struct ip_options) + 40];
++	struct ip_options *opt = (struct ip_options *)optbuf;
++
+ 	if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
+ 		return;
+ 
++	/*
++	 * We might be called above the IP layer,
++	 * so we can not use icmp_send and IPCB here.
++	 */
++
++	memset(opt, 0, sizeof(struct ip_options));
++	opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
++	if (__ip_options_compile(dev_net(skb->dev), opt, skb, NULL))
++		return;
++
+ 	if (gateway)
+-		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0);
++		__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0, opt);
+ 	else
+-		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0);
++		__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0, opt);
+ }
+ 
+ /**
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 958e185a8e8d1..dae743b649c1d 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -700,6 +700,10 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
+ 		case RTA_GATEWAY:
+ 			cfg->fc_gw = nla_get_be32(attr);
+ 			break;
++		case RTA_VIA:
++			NL_SET_ERR_MSG(extack, "IPv4 does not support RTA_VIA attribute");
++			err = -EINVAL;
++			goto errout;
+ 		case RTA_PRIORITY:
+ 			cfg->fc_priority = nla_get_u32(attr);
+ 			break;
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index 695979b7ef6d0..ad75c468ecfb2 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -570,7 +570,8 @@ relookup_failed:
+  *			MUST reply to only the first fragment.
+  */
+ 
+-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
++void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
++		 const struct ip_options *opt)
+ {
+ 	struct iphdr *iph;
+ 	int room;
+@@ -691,7 +692,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+ 					  iph->tos;
+ 	mark = IP4_REPLY_MARK(net, skb_in->mark);
+ 
+-	if (ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in))
++	if (__ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in, opt))
+ 		goto out_unlock;
+ 
+ 
+@@ -742,7 +743,7 @@ out_bh_enable:
+ 	local_bh_enable();
+ out:;
+ }
+-EXPORT_SYMBOL(icmp_send);
++EXPORT_SYMBOL(__icmp_send);
+ 
+ 
+ static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
+diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
+index 6f977b0fef545..bd8ef4f87c795 100644
+--- a/net/ipv4/ip_input.c
++++ b/net/ipv4/ip_input.c
+@@ -308,11 +308,10 @@ drop:
+ }
+ 
+ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
+-			      struct sk_buff *skb)
++			      struct sk_buff *skb, struct net_device *dev)
+ {
+ 	const struct iphdr *iph = ip_hdr(skb);
+ 	int (*edemux)(struct sk_buff *skb);
+-	struct net_device *dev = skb->dev;
+ 	struct rtable *rt;
+ 	int err;
+ 
+@@ -401,6 +400,7 @@ drop_error:
+ 
+ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
++	struct net_device *dev = skb->dev;
+ 	int ret;
+ 
+ 	/* if ingress device is enslaved to an L3 master device pass the
+@@ -410,7 +410,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+ 	if (!skb)
+ 		return NET_RX_SUCCESS;
+ 
+-	ret = ip_rcv_finish_core(net, sk, skb);
++	ret = ip_rcv_finish_core(net, sk, skb, dev);
+ 	if (ret != NET_RX_DROP)
+ 		ret = dst_input(skb);
+ 	return ret;
+@@ -550,6 +550,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
+ 
+ 	INIT_LIST_HEAD(&sublist);
+ 	list_for_each_entry_safe(skb, next, head, list) {
++		struct net_device *dev = skb->dev;
+ 		struct dst_entry *dst;
+ 
+ 		skb_list_del_init(skb);
+@@ -559,7 +560,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
+ 		skb = l3mdev_ip_rcv(skb);
+ 		if (!skb)
+ 			continue;
+-		if (ip_rcv_finish_core(net, sk, skb) == NET_RX_DROP)
++		if (ip_rcv_finish_core(net, sk, skb, dev) == NET_RX_DROP)
+ 			continue;
+ 
+ 		dst = skb_dst(skb);
+diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
+index ed194d46c00e3..32a35043c9f59 100644
+--- a/net/ipv4/ip_options.c
++++ b/net/ipv4/ip_options.c
+@@ -251,8 +251,9 @@ static void spec_dst_fill(__be32 *spec_dst, struct sk_buff *skb)
+  * If opt == NULL, then skb->data should point to IP header.
+  */
+ 
+-int ip_options_compile(struct net *net,
+-		       struct ip_options *opt, struct sk_buff *skb)
++int __ip_options_compile(struct net *net,
++			 struct ip_options *opt, struct sk_buff *skb,
++			 __be32 *info)
+ {
+ 	__be32 spec_dst = htonl(INADDR_ANY);
+ 	unsigned char *pp_ptr = NULL;
+@@ -468,11 +469,22 @@ eol:
+ 		return 0;
+ 
+ error:
+-	if (skb) {
+-		icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((pp_ptr-iph)<<24));
+-	}
++	if (info)
++		*info = htonl((pp_ptr-iph)<<24);
+ 	return -EINVAL;
+ }
++
++int ip_options_compile(struct net *net,
++		       struct ip_options *opt, struct sk_buff *skb)
++{
++	int ret;
++	__be32 info;
++
++	ret = __ip_options_compile(net, opt, skb, &info);
++	if (ret != 0 && skb)
++		icmp_send(skb, ICMP_PARAMETERPROB, 0, info);
++	return ret;
++}
+ EXPORT_SYMBOL(ip_options_compile);
+ 
+ /*
+diff --git a/net/ipv4/netlink.c b/net/ipv4/netlink.c
+index f86bb4f066095..d8e3a1fb8e826 100644
+--- a/net/ipv4/netlink.c
++++ b/net/ipv4/netlink.c
+@@ -3,9 +3,10 @@
+ #include <linux/types.h>
+ #include <net/net_namespace.h>
+ #include <net/netlink.h>
++#include <linux/in6.h>
+ #include <net/ip.h>
+ 
+-int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
++int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
+ 				struct netlink_ext_ack *extack)
+ {
+ 	*ip_proto = nla_get_u8(attr);
+@@ -13,11 +14,19 @@ int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
+ 	switch (*ip_proto) {
+ 	case IPPROTO_TCP:
+ 	case IPPROTO_UDP:
++		return 0;
+ 	case IPPROTO_ICMP:
++		if (family != AF_INET)
++			break;
++		return 0;
++#if IS_ENABLED(CONFIG_IPV6)
++	case IPPROTO_ICMPV6:
++		if (family != AF_INET6)
++			break;
+ 		return 0;
+-	default:
+-		NL_SET_ERR_MSG(extack, "Unsupported ip proto");
+-		return -EOPNOTSUPP;
++#endif
+ 	}
++	NL_SET_ERR_MSG(extack, "Unsupported ip proto");
++	return -EOPNOTSUPP;
+ }
+ EXPORT_SYMBOL_GPL(rtm_getroute_parse_ip_proto);
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 436b46c0e687f..ca87bb6784e5d 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2814,7 +2814,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
+ 
+ 	if (tb[RTA_IP_PROTO]) {
+ 		err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
+-						  &ip_proto, extack);
++						  &ip_proto, AF_INET, extack);
+ 		if (err)
+ 			return err;
+ 	}
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index 10aafea3af0f1..35e7092eceb37 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -1954,10 +1954,10 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
+ 
+ static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
+-	__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+-			IPSTATS_MIB_OUTFORWDATAGRAMS);
+-	__IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
+-			IPSTATS_MIB_OUTOCTETS, skb->len);
++	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
++		      IPSTATS_MIB_OUTFORWDATAGRAMS);
++	IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
++		      IPSTATS_MIB_OUTOCTETS, skb->len);
+ 	return dst_output(net, sk, skb);
+ }
+ 
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 08c4516ae4a4d..ba59a9c14e02a 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -4213,6 +4213,10 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 		cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
+ 		cfg->fc_flags |= RTF_GATEWAY;
+ 	}
++	if (tb[RTA_VIA]) {
++		NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
++		goto errout;
++	}
+ 
+ 	if (tb[RTA_DST]) {
+ 		int plen = (rtm->rtm_dst_len + 7) >> 3;
+@@ -4907,7 +4911,8 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
+ 
+ 	if (tb[RTA_IP_PROTO]) {
+ 		err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
+-						  &fl6.flowi6_proto, extack);
++						  &fl6.flowi6_proto, AF_INET6,
++						  extack);
+ 		if (err)
+ 			goto errout;
+ 	}
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index da6d5a3f53995..868d7da7a0cb3 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1873,6 +1873,7 @@ static int __net_init sit_init_net(struct net *net)
+ 
+ err_reg_dev:
+ 	ipip6_dev_free(sitn->fb_tunnel_dev);
++	free_netdev(sitn->fb_tunnel_dev);
+ err_alloc_dev:
+ 	return err;
+ }
+diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
+index 8fbe6cdbe255d..d5a4db5b3fe7b 100644
+--- a/net/mpls/af_mpls.c
++++ b/net/mpls/af_mpls.c
+@@ -1822,6 +1822,9 @@ static int rtm_to_route_config(struct sk_buff *skb,
+ 				goto errout;
+ 			break;
+ 		}
++		case RTA_GATEWAY:
++			NL_SET_ERR_MSG(extack, "MPLS does not support RTA_GATEWAY attribute");
++			goto errout;
+ 		case RTA_VIA:
+ 		{
+ 			if (nla_get_via(nla, &cfg->rc_via_alen,
+diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
+index ea7c67050792c..ee3e5b6471a69 100644
+--- a/net/netlabel/netlabel_kapi.c
++++ b/net/netlabel/netlabel_kapi.c
+@@ -903,7 +903,8 @@ int netlbl_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len,
+ 		    (state == 0 && (byte & bitmask) == 0))
+ 			return bit_spot;
+ 
+-		bit_spot++;
++		if (++bit_spot >= bitmap_len)
++			return -1;
+ 		bitmask >>= 1;
+ 		if (bitmask == 0) {
+ 			byte = bitmap[++byte_offset];
+diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
+index 6a196e438b6c0..d1fc019e932e0 100644
+--- a/net/nfc/llcp_commands.c
++++ b/net/nfc/llcp_commands.c
+@@ -419,6 +419,10 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
+ 						      sock->service_name,
+ 						      sock->service_name_len,
+ 						      &service_name_tlv_length);
++		if (!service_name_tlv) {
++			err = -ENOMEM;
++			goto error_tlv;
++		}
+ 		size += service_name_tlv_length;
+ 	}
+ 
+@@ -429,9 +433,17 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
+ 
+ 	miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
+ 				      &miux_tlv_length);
++	if (!miux_tlv) {
++		err = -ENOMEM;
++		goto error_tlv;
++	}
+ 	size += miux_tlv_length;
+ 
+ 	rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
++	if (!rw_tlv) {
++		err = -ENOMEM;
++		goto error_tlv;
++	}
+ 	size += rw_tlv_length;
+ 
+ 	pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len);
+@@ -484,9 +496,17 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
+ 
+ 	miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
+ 				      &miux_tlv_length);
++	if (!miux_tlv) {
++		err = -ENOMEM;
++		goto error_tlv;
++	}
+ 	size += miux_tlv_length;
+ 
+ 	rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
++	if (!rw_tlv) {
++		err = -ENOMEM;
++		goto error_tlv;
++	}
+ 	size += rw_tlv_length;
+ 
+ 	skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size);
+diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
+index ef4026a23e802..4fa015208aab1 100644
+--- a/net/nfc/llcp_core.c
++++ b/net/nfc/llcp_core.c
+@@ -532,10 +532,10 @@ static u8 nfc_llcp_reserve_sdp_ssap(struct nfc_llcp_local *local)
+ 
+ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
+ {
+-	u8 *gb_cur, *version_tlv, version, version_length;
+-	u8 *lto_tlv, lto_length;
+-	u8 *wks_tlv, wks_length;
+-	u8 *miux_tlv, miux_length;
++	u8 *gb_cur, version, version_length;
++	u8 lto_length, wks_length, miux_length;
++	u8 *version_tlv = NULL, *lto_tlv = NULL,
++	   *wks_tlv = NULL, *miux_tlv = NULL;
+ 	__be16 wks = cpu_to_be16(local->local_wks);
+ 	u8 gb_len = 0;
+ 	int ret = 0;
+@@ -543,17 +543,33 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
+ 	version = LLCP_VERSION_11;
+ 	version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version,
+ 					 1, &version_length);
++	if (!version_tlv) {
++		ret = -ENOMEM;
++		goto out;
++	}
+ 	gb_len += version_length;
+ 
+ 	lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &local->lto, 1, &lto_length);
++	if (!lto_tlv) {
++		ret = -ENOMEM;
++		goto out;
++	}
+ 	gb_len += lto_length;
+ 
+ 	pr_debug("Local wks 0x%lx\n", local->local_wks);
+ 	wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&wks, 2, &wks_length);
++	if (!wks_tlv) {
++		ret = -ENOMEM;
++		goto out;
++	}
+ 	gb_len += wks_length;
+ 
+ 	miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0,
+ 				      &miux_length);
++	if (!miux_tlv) {
++		ret = -ENOMEM;
++		goto out;
++	}
+ 	gb_len += miux_length;
+ 
+ 	gb_len += ARRAY_SIZE(llcp_magic);
+diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
+index 8525de8116163..334f3a0576713 100644
+--- a/net/sched/act_ipt.c
++++ b/net/sched/act_ipt.c
+@@ -199,8 +199,7 @@ err3:
+ err2:
+ 	kfree(tname);
+ err1:
+-	if (ret == ACT_P_CREATED)
+-		tcf_idr_release(*a, bind);
++	tcf_idr_release(*a, bind);
+ 	return err;
+ }
+ 
+diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
+index 73e44ce2a8837..86d90fc5e97ea 100644
+--- a/net/sched/act_skbedit.c
++++ b/net/sched/act_skbedit.c
+@@ -191,8 +191,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
+ 
+ 	params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
+ 	if (unlikely(!params_new)) {
+-		if (ret == ACT_P_CREATED)
+-			tcf_idr_release(*a, bind);
++		tcf_idr_release(*a, bind);
+ 		return -ENOMEM;
+ 	}
+ 
+diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
+index 0f6601fdf8899..72d9c432e8b42 100644
+--- a/net/sched/act_tunnel_key.c
++++ b/net/sched/act_tunnel_key.c
+@@ -377,7 +377,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
+ 	return ret;
+ 
+ release_tun_meta:
+-	dst_release(&metadata->dst);
++	if (metadata)
++		dst_release(&metadata->dst);
+ 
+ err_out:
+ 	if (exists)
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 69078c82963ef..77b289da77636 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -68,7 +68,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
+ 			skb = __skb_dequeue(&q->skb_bad_txq);
+ 			if (qdisc_is_percpu_stats(q)) {
+ 				qdisc_qstats_cpu_backlog_dec(q, skb);
+-				qdisc_qstats_cpu_qlen_dec(q);
++				qdisc_qstats_atomic_qlen_dec(q);
+ 			} else {
+ 				qdisc_qstats_backlog_dec(q, skb);
+ 				q->q.qlen--;
+@@ -108,7 +108,7 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
+ 
+ 	if (qdisc_is_percpu_stats(q)) {
+ 		qdisc_qstats_cpu_backlog_inc(q, skb);
+-		qdisc_qstats_cpu_qlen_inc(q);
++		qdisc_qstats_atomic_qlen_inc(q);
+ 	} else {
+ 		qdisc_qstats_backlog_inc(q, skb);
+ 		q->q.qlen++;
+@@ -147,7 +147,7 @@ static inline int dev_requeue_skb_locked(struct sk_buff *skb, struct Qdisc *q)
+ 
+ 		qdisc_qstats_cpu_requeues_inc(q);
+ 		qdisc_qstats_cpu_backlog_inc(q, skb);
+-		qdisc_qstats_cpu_qlen_inc(q);
++		qdisc_qstats_atomic_qlen_inc(q);
+ 
+ 		skb = next;
+ 	}
+@@ -252,7 +252,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
+ 			skb = __skb_dequeue(&q->gso_skb);
+ 			if (qdisc_is_percpu_stats(q)) {
+ 				qdisc_qstats_cpu_backlog_dec(q, skb);
+-				qdisc_qstats_cpu_qlen_dec(q);
++				qdisc_qstats_atomic_qlen_dec(q);
+ 			} else {
+ 				qdisc_qstats_backlog_dec(q, skb);
+ 				q->q.qlen--;
+@@ -633,7 +633,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
+ 	if (unlikely(err))
+ 		return qdisc_drop_cpu(skb, qdisc, to_free);
+ 
+-	qdisc_qstats_cpu_qlen_inc(qdisc);
++	qdisc_qstats_atomic_qlen_inc(qdisc);
+ 	/* Note: skb can not be used after skb_array_produce(),
+ 	 * so we better not use qdisc_qstats_cpu_backlog_inc()
+ 	 */
+@@ -658,7 +658,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
+ 	if (likely(skb)) {
+ 		qdisc_qstats_cpu_backlog_dec(qdisc, skb);
+ 		qdisc_bstats_cpu_update(qdisc, skb);
+-		qdisc_qstats_cpu_qlen_dec(qdisc);
++		qdisc_qstats_atomic_qlen_dec(qdisc);
+ 	}
+ 
+ 	return skb;
+@@ -702,7 +702,6 @@ static void pfifo_fast_reset(struct Qdisc *qdisc)
+ 		struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i);
+ 
+ 		q->backlog = 0;
+-		q->qlen = 0;
+ 	}
+ }
+ 
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 74c0f656f28c5..4dfe10b9f96c8 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -440,6 +440,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 	int nb = 0;
+ 	int count = 1;
+ 	int rc = NET_XMIT_SUCCESS;
++	int rc_drop = NET_XMIT_DROP;
+ 
+ 	/* Do not fool qdisc_drop_all() */
+ 	skb->prev = NULL;
+@@ -479,6 +480,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 		q->duplicate = 0;
+ 		rootq->enqueue(skb2, rootq, to_free);
+ 		q->duplicate = dupsave;
++		rc_drop = NET_XMIT_SUCCESS;
+ 	}
+ 
+ 	/*
+@@ -491,7 +493,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 		if (skb_is_gso(skb)) {
+ 			segs = netem_segment(skb, sch, to_free);
+ 			if (!segs)
+-				return NET_XMIT_DROP;
++				return rc_drop;
+ 		} else {
+ 			segs = skb;
+ 		}
+@@ -514,8 +516,10 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 			1<<(prandom_u32() % 8);
+ 	}
+ 
+-	if (unlikely(sch->q.qlen >= sch->limit))
+-		return qdisc_drop_all(skb, sch, to_free);
++	if (unlikely(sch->q.qlen >= sch->limit)) {
++		qdisc_drop_all(skb, sch, to_free);
++		return rc_drop;
++	}
+ 
+ 	qdisc_qstats_backlog_inc(sch, skb);
+ 
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index e5e70cff5bb30..1b16250c5718d 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -1884,6 +1884,7 @@ static int sctp_sendmsg_check_sflags(struct sctp_association *asoc,
+ 
+ 		pr_debug("%s: aborting association:%p\n", __func__, asoc);
+ 		sctp_primitive_ABORT(net, asoc, chunk);
++		iov_iter_revert(&msg->msg_iter, msg_len);
+ 
+ 		return 0;
+ 	}
+diff --git a/net/socket.c b/net/socket.c
+index 5c820212ba815..18d27b8c25116 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -577,6 +577,7 @@ static void __sock_release(struct socket *sock, struct inode *inode)
+ 		if (inode)
+ 			inode_lock(inode);
+ 		sock->ops->release(sock);
++		sock->sk = NULL;
+ 		if (inode)
+ 			inode_unlock(inode);
+ 		sock->ops = NULL;
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index e1bdaf056c8f7..88c307ef13181 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -377,11 +377,13 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout)
+ 
+ #define tipc_wait_for_cond(sock_, timeo_, condition_)			       \
+ ({                                                                             \
++	DEFINE_WAIT_FUNC(wait_, woken_wake_function);                          \
+ 	struct sock *sk_;						       \
+ 	int rc_;							       \
+ 									       \
+ 	while ((rc_ = !(condition_))) {					       \
+-		DEFINE_WAIT_FUNC(wait_, woken_wake_function);	               \
++		/* coupled with smp_wmb() in tipc_sk_proto_rcv() */            \
++		smp_rmb();                                                     \
+ 		sk_ = (sock_)->sk;					       \
+ 		rc_ = tipc_sk_sock_err((sock_), timeo_);		       \
+ 		if (rc_)						       \
+@@ -1318,7 +1320,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
+ 
+ 	if (unlikely(!dest)) {
+ 		dest = &tsk->peer;
+-		if (!syn || dest->family != AF_TIPC)
++		if (!syn && dest->family != AF_TIPC)
+ 			return -EDESTADDRREQ;
+ 	}
+ 
+@@ -1961,6 +1963,8 @@ static void tipc_sk_proto_rcv(struct sock *sk,
+ 		return;
+ 	case SOCK_WAKEUP:
+ 		tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
++		/* coupled with smp_rmb() in tipc_wait_for_cond() */
++		smp_wmb();
+ 		tsk->cong_link_cnt--;
+ 		wakeup = true;
+ 		break;
+diff --git a/tools/testing/selftests/firmware/fw_lib.sh b/tools/testing/selftests/firmware/fw_lib.sh
+index 6c5f1b2ffb745..1cbb12e284a68 100755
+--- a/tools/testing/selftests/firmware/fw_lib.sh
++++ b/tools/testing/selftests/firmware/fw_lib.sh
+@@ -91,7 +91,7 @@ verify_reqs()
+ 	if [ "$TEST_REQS_FW_SYSFS_FALLBACK" = "yes" ]; then
+ 		if [ ! "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
+ 			echo "usermode helper disabled so ignoring test"
+-			exit $ksft_skip
++			exit 0
+ 		fi
+ 	fi
+ }


             reply	other threads:[~2019-03-10 14:15 UTC|newest]

Thread overview: 332+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-03-10 14:15 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2024-04-18  3:06 [gentoo-commits] proj/linux-patches:4.19 commit in: / Alice Ferrazzi
2023-09-02  9:59 Mike Pagano
2023-08-30 15:00 Mike Pagano
2023-08-16 16:59 Mike Pagano
2023-08-11 11:58 Mike Pagano
2023-08-08 18:43 Mike Pagano
2023-07-24 20:30 Mike Pagano
2023-06-28 10:29 Mike Pagano
2023-06-21 14:55 Alice Ferrazzi
2023-06-14 10:21 Mike Pagano
2023-06-09 11:32 Mike Pagano
2023-05-30 12:57 Mike Pagano
2023-05-17 11:14 Mike Pagano
2023-05-17 11:01 Mike Pagano
2023-05-10 17:59 Mike Pagano
2023-04-26  9:35 Alice Ferrazzi
2023-04-20 11:17 Alice Ferrazzi
2023-04-05 11:41 Mike Pagano
2023-03-22 14:16 Alice Ferrazzi
2023-03-17 10:46 Mike Pagano
2023-03-13 11:35 Alice Ferrazzi
2023-03-11 16:01 Mike Pagano
2023-03-03 12:31 Mike Pagano
2023-02-25 11:41 Mike Pagano
2023-02-24  3:19 Alice Ferrazzi
2023-02-24  3:15 Alice Ferrazzi
2023-02-22 14:51 Alice Ferrazzi
2023-02-06 12:49 Mike Pagano
2023-01-24  7:16 Alice Ferrazzi
2023-01-18 11:11 Mike Pagano
2022-12-14 12:15 Mike Pagano
2022-12-08 12:14 Alice Ferrazzi
2022-11-25 17:04 Mike Pagano
2022-11-23  9:39 Alice Ferrazzi
2022-11-10 17:58 Mike Pagano
2022-11-03 15:11 Mike Pagano
2022-11-01 19:48 Mike Pagano
2022-10-26 11:41 Mike Pagano
2022-10-05 11:59 Mike Pagano
2022-09-28  9:18 Mike Pagano
2022-09-20 12:03 Mike Pagano
2022-09-15 11:09 Mike Pagano
2022-09-05 12:06 Mike Pagano
2022-08-25 10:35 Mike Pagano
2022-08-11 12:36 Mike Pagano
2022-07-29 15:28 Mike Pagano
2022-07-21 20:12 Mike Pagano
2022-07-12 16:01 Mike Pagano
2022-07-07 16:18 Mike Pagano
2022-07-02 16:07 Mike Pagano
2022-06-25 10:22 Mike Pagano
2022-06-16 11:40 Mike Pagano
2022-06-14 16:02 Mike Pagano
2022-06-06 11:05 Mike Pagano
2022-05-27 12:24 Mike Pagano
2022-05-25 11:55 Mike Pagano
2022-05-18  9:50 Mike Pagano
2022-05-15 22:12 Mike Pagano
2022-05-12 11:30 Mike Pagano
2022-05-01 17:04 Mike Pagano
2022-04-27 12:03 Mike Pagano
2022-04-20 12:09 Mike Pagano
2022-04-15 13:11 Mike Pagano
2022-04-12 19:24 Mike Pagano
2022-03-28 10:59 Mike Pagano
2022-03-23 11:57 Mike Pagano
2022-03-16 13:27 Mike Pagano
2022-03-11 10:56 Mike Pagano
2022-03-08 18:30 Mike Pagano
2022-03-02 13:08 Mike Pagano
2022-02-26 21:14 Mike Pagano
2022-02-23 12:39 Mike Pagano
2022-02-16 12:47 Mike Pagano
2022-02-11 12:53 Mike Pagano
2022-02-11 12:46 Mike Pagano
2022-02-11 12:45 Mike Pagano
2022-02-11 12:37 Mike Pagano
2022-02-08 17:56 Mike Pagano
2022-01-29 17:45 Mike Pagano
2022-01-27 11:39 Mike Pagano
2022-01-11 13:14 Mike Pagano
2022-01-05 12:55 Mike Pagano
2021-12-29 13:11 Mike Pagano
2021-12-22 14:07 Mike Pagano
2021-12-14 10:36 Mike Pagano
2021-12-08 12:55 Mike Pagano
2021-12-01 12:51 Mike Pagano
2021-11-26 11:59 Mike Pagano
2021-11-12 14:16 Mike Pagano
2021-11-06 13:26 Mike Pagano
2021-11-02 19:32 Mike Pagano
2021-10-27 11:59 Mike Pagano
2021-10-20 13:26 Mike Pagano
2021-10-17 13:12 Mike Pagano
2021-10-13 15:00 Alice Ferrazzi
2021-10-09 21:33 Mike Pagano
2021-10-06 14:06 Mike Pagano
2021-09-26 14:13 Mike Pagano
2021-09-22 11:40 Mike Pagano
2021-09-20 22:05 Mike Pagano
2021-09-03 11:22 Mike Pagano
2021-09-03 10:08 Alice Ferrazzi
2021-08-26 14:06 Mike Pagano
2021-08-25 22:45 Mike Pagano
2021-08-25 20:41 Mike Pagano
2021-08-15 20:07 Mike Pagano
2021-08-12 11:51 Mike Pagano
2021-08-08 13:39 Mike Pagano
2021-08-04 11:54 Mike Pagano
2021-08-03 12:26 Mike Pagano
2021-07-31 10:34 Alice Ferrazzi
2021-07-28 12:37 Mike Pagano
2021-07-20 15:35 Alice Ferrazzi
2021-07-13 12:38 Mike Pagano
2021-07-11 14:45 Mike Pagano
2021-06-30 14:25 Mike Pagano
2021-06-16 12:22 Mike Pagano
2021-06-10 11:46 Mike Pagano
2021-06-03 10:32 Alice Ferrazzi
2021-05-26 12:05 Mike Pagano
2021-05-22 10:03 Mike Pagano
2021-05-07 11:40 Alice Ferrazzi
2021-04-30 19:02 Mike Pagano
2021-04-28 18:31 Mike Pagano
2021-04-28 11:44 Alice Ferrazzi
2021-04-16 11:15 Alice Ferrazzi
2021-04-14 11:22 Alice Ferrazzi
2021-04-10 13:24 Mike Pagano
2021-04-07 12:21 Mike Pagano
2021-03-30 14:17 Mike Pagano
2021-03-24 12:08 Mike Pagano
2021-03-22 15:50 Mike Pagano
2021-03-20 14:26 Mike Pagano
2021-03-17 16:21 Mike Pagano
2021-03-11 14:05 Mike Pagano
2021-03-07 15:15 Mike Pagano
2021-03-04 12:08 Mike Pagano
2021-02-23 14:31 Alice Ferrazzi
2021-02-13 15:28 Alice Ferrazzi
2021-02-10 10:03 Alice Ferrazzi
2021-02-07 14:40 Alice Ferrazzi
2021-02-03 23:43 Mike Pagano
2021-01-30 13:34 Alice Ferrazzi
2021-01-27 11:15 Mike Pagano
2021-01-23 16:36 Mike Pagano
2021-01-19 20:34 Mike Pagano
2021-01-17 16:20 Mike Pagano
2021-01-12 20:06 Mike Pagano
2021-01-09 12:57 Mike Pagano
2021-01-06 14:15 Mike Pagano
2020-12-30 12:52 Mike Pagano
2020-12-11 12:56 Mike Pagano
2020-12-08 12:06 Mike Pagano
2020-12-02 12:49 Mike Pagano
2020-11-24 14:40 Mike Pagano
2020-11-22 19:26 Mike Pagano
2020-11-18 19:56 Mike Pagano
2020-11-11 15:43 Mike Pagano
2020-11-10 13:56 Mike Pagano
2020-11-05 12:35 Mike Pagano
2020-11-01 20:29 Mike Pagano
2020-10-29 11:18 Mike Pagano
2020-10-17 10:17 Mike Pagano
2020-10-14 20:36 Mike Pagano
2020-10-07 12:50 Mike Pagano
2020-10-01 12:45 Mike Pagano
2020-09-26 22:07 Mike Pagano
2020-09-26 22:00 Mike Pagano
2020-09-24 15:58 Mike Pagano
2020-09-23 12:07 Mike Pagano
2020-09-17 15:01 Mike Pagano
2020-09-17 14:55 Mike Pagano
2020-09-12 17:59 Mike Pagano
2020-09-09 17:59 Mike Pagano
2020-09-03 11:37 Mike Pagano
2020-08-26 11:15 Mike Pagano
2020-08-21 10:49 Alice Ferrazzi
2020-08-19  9:36 Alice Ferrazzi
2020-08-12 23:36 Alice Ferrazzi
2020-08-07 19:16 Mike Pagano
2020-08-05 14:51 Thomas Deutschmann
2020-07-31 18:00 Mike Pagano
2020-07-29 12:33 Mike Pagano
2020-07-22 12:42 Mike Pagano
2020-07-16 11:17 Mike Pagano
2020-07-09 12:12 Mike Pagano
2020-07-01 12:14 Mike Pagano
2020-06-29 17:41 Mike Pagano
2020-06-25 15:07 Mike Pagano
2020-06-22 14:47 Mike Pagano
2020-06-10 21:27 Mike Pagano
2020-06-07 21:52 Mike Pagano
2020-06-03 11:41 Mike Pagano
2020-05-27 16:25 Mike Pagano
2020-05-20 11:30 Mike Pagano
2020-05-20 11:27 Mike Pagano
2020-05-14 11:30 Mike Pagano
2020-05-13 12:33 Mike Pagano
2020-05-11 22:50 Mike Pagano
2020-05-09 22:20 Mike Pagano
2020-05-06 11:46 Mike Pagano
2020-05-02 19:24 Mike Pagano
2020-04-29 17:57 Mike Pagano
2020-04-23 11:44 Mike Pagano
2020-04-21 11:15 Mike Pagano
2020-04-17 11:45 Mike Pagano
2020-04-15 17:09 Mike Pagano
2020-04-13 11:34 Mike Pagano
2020-04-02 15:24 Mike Pagano
2020-03-25 14:58 Mike Pagano
2020-03-20 11:57 Mike Pagano
2020-03-18 14:21 Mike Pagano
2020-03-16 12:23 Mike Pagano
2020-03-11 17:20 Mike Pagano
2020-03-05 16:23 Mike Pagano
2020-02-28 16:38 Mike Pagano
2020-02-24 11:06 Mike Pagano
2020-02-19 23:45 Mike Pagano
2020-02-14 23:52 Mike Pagano
2020-02-11 16:20 Mike Pagano
2020-02-05 17:05 Mike Pagano
2020-02-01 10:37 Mike Pagano
2020-02-01 10:30 Mike Pagano
2020-01-29 16:16 Mike Pagano
2020-01-27 14:25 Mike Pagano
2020-01-23 11:07 Mike Pagano
2020-01-17 19:56 Mike Pagano
2020-01-14 22:30 Mike Pagano
2020-01-12 15:00 Mike Pagano
2020-01-09 11:15 Mike Pagano
2020-01-04 19:50 Mike Pagano
2019-12-31 17:46 Mike Pagano
2019-12-21 15:03 Mike Pagano
2019-12-17 21:56 Mike Pagano
2019-12-13 12:35 Mike Pagano
2019-12-05 12:03 Alice Ferrazzi
2019-12-01 14:06 Thomas Deutschmann
2019-11-24 15:44 Mike Pagano
2019-11-20 19:36 Mike Pagano
2019-11-12 21:00 Mike Pagano
2019-11-10 16:20 Mike Pagano
2019-11-06 14:26 Mike Pagano
2019-10-29 12:04 Mike Pagano
2019-10-17 22:27 Mike Pagano
2019-10-11 17:04 Mike Pagano
2019-10-07 17:42 Mike Pagano
2019-10-05 11:42 Mike Pagano
2019-10-01 10:10 Mike Pagano
2019-09-21 17:11 Mike Pagano
2019-09-19 12:34 Mike Pagano
2019-09-19 10:04 Mike Pagano
2019-09-16 12:26 Mike Pagano
2019-09-10 11:12 Mike Pagano
2019-09-06 17:25 Mike Pagano
2019-08-29 14:15 Mike Pagano
2019-08-25 17:37 Mike Pagano
2019-08-23 22:18 Mike Pagano
2019-08-16 12:26 Mike Pagano
2019-08-16 12:13 Mike Pagano
2019-08-09 17:45 Mike Pagano
2019-08-06 19:19 Mike Pagano
2019-08-04 16:15 Mike Pagano
2019-07-31 15:09 Mike Pagano
2019-07-31 10:22 Mike Pagano
2019-07-28 16:27 Mike Pagano
2019-07-26 11:35 Mike Pagano
2019-07-21 14:41 Mike Pagano
2019-07-14 15:44 Mike Pagano
2019-07-10 11:05 Mike Pagano
2019-07-03 11:34 Mike Pagano
2019-06-25 10:53 Mike Pagano
2019-06-22 19:06 Mike Pagano
2019-06-19 17:17 Thomas Deutschmann
2019-06-17 19:22 Mike Pagano
2019-06-15 15:07 Mike Pagano
2019-06-11 12:42 Mike Pagano
2019-06-10 19:43 Mike Pagano
2019-06-09 16:19 Mike Pagano
2019-06-04 11:11 Mike Pagano
2019-05-31 15:02 Mike Pagano
2019-05-26 17:10 Mike Pagano
2019-05-22 11:02 Mike Pagano
2019-05-16 23:03 Mike Pagano
2019-05-14 21:00 Mike Pagano
2019-05-10 19:40 Mike Pagano
2019-05-08 10:06 Mike Pagano
2019-05-05 13:42 Mike Pagano
2019-05-04 18:28 Mike Pagano
2019-05-02 10:13 Mike Pagano
2019-04-27 17:36 Mike Pagano
2019-04-20 11:09 Mike Pagano
2019-04-19 19:51 Mike Pagano
2019-04-05 21:46 Mike Pagano
2019-04-03 10:59 Mike Pagano
2019-03-27 10:22 Mike Pagano
2019-03-23 20:23 Mike Pagano
2019-03-19 16:58 Mike Pagano
2019-03-13 22:08 Mike Pagano
2019-03-06 19:06 Mike Pagano
2019-03-05 18:04 Mike Pagano
2019-02-27 11:23 Mike Pagano
2019-02-23 11:35 Mike Pagano
2019-02-23  0:46 Mike Pagano
2019-02-20 11:19 Mike Pagano
2019-02-16  0:42 Mike Pagano
2019-02-15 12:39 Mike Pagano
2019-02-12 20:53 Mike Pagano
2019-02-06 17:08 Mike Pagano
2019-01-31 11:28 Mike Pagano
2019-01-26 15:09 Mike Pagano
2019-01-22 23:06 Mike Pagano
2019-01-16 23:32 Mike Pagano
2019-01-13 19:29 Mike Pagano
2019-01-09 17:54 Mike Pagano
2018-12-29 18:55 Mike Pagano
2018-12-29  1:08 Mike Pagano
2018-12-21 14:58 Mike Pagano
2018-12-19 19:09 Mike Pagano
2018-12-17 11:42 Mike Pagano
2018-12-13 11:40 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-05 20:16 Mike Pagano
2018-12-01 15:08 Mike Pagano
2018-11-27 16:16 Mike Pagano
2018-11-23 12:42 Mike Pagano
2018-11-21 12:30 Mike Pagano
2018-11-14  0:47 Mike Pagano
2018-11-14  0:47 Mike Pagano
2018-11-13 20:44 Mike Pagano
2018-11-04 16:22 Alice Ferrazzi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1552227297.1d0ba6c74d9dabecc5ab3378f68b3860a5ac86f6.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox