public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Thu,  5 Mar 2020 16:23:26 +0000 (UTC)	[thread overview]
Message-ID: <1583425387.836621b120225f053a815f0660ead626b1a5ebb4.mpagano@gentoo> (raw)

commit:     836621b120225f053a815f0660ead626b1a5ebb4
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Mar  5 16:23:07 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Mar  5 16:23:07 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=836621b1

Linux patch 4.19.108

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1107_linux-4.19.108.patch | 2999 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3003 insertions(+)

diff --git a/0000_README b/0000_README
index 7d48aad..65259b7 100644
--- a/0000_README
+++ b/0000_README
@@ -467,6 +467,10 @@ Patch:  1106_linux-4.19.107.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.19.107
 
+Patch:  1107_linux-4.19.108.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.19.108
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1107_linux-4.19.108.patch b/1107_linux-4.19.108.patch
new file mode 100644
index 0000000..f4ed81e
--- /dev/null
+++ b/1107_linux-4.19.108.patch
@@ -0,0 +1,2999 @@
+diff --git a/Documentation/networking/nf_flowtable.txt b/Documentation/networking/nf_flowtable.txt
+index 54128c50d508..b01c91893481 100644
+--- a/Documentation/networking/nf_flowtable.txt
++++ b/Documentation/networking/nf_flowtable.txt
+@@ -76,7 +76,7 @@ flowtable and add one rule to your forward chain.
+ 
+         table inet x {
+ 		flowtable f {
+-			hook ingress priority 0 devices = { eth0, eth1 };
++			hook ingress priority 0; devices = { eth0, eth1 };
+ 		}
+                 chain y {
+                         type filter hook forward priority 0; policy accept;
+diff --git a/Makefile b/Makefile
+index 69e2527a6968..313f0c8dd66f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 107
++SUBLEVEL = 108
+ EXTRAVERSION =
+ NAME = "People's Front"
+ 
+diff --git a/arch/arm/boot/dts/stihxxx-b2120.dtsi b/arch/arm/boot/dts/stihxxx-b2120.dtsi
+index 4dedfcb0fcb3..ac42d3c6bda0 100644
+--- a/arch/arm/boot/dts/stihxxx-b2120.dtsi
++++ b/arch/arm/boot/dts/stihxxx-b2120.dtsi
+@@ -45,7 +45,7 @@
+ 			/* DAC */
+ 			format = "i2s";
+ 			mclk-fs = <256>;
+-			frame-inversion = <1>;
++			frame-inversion;
+ 			cpu {
+ 				sound-dai = <&sti_uni_player2>;
+ 			};
+diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
+index 0bef238d2c0c..0d5f9c8f5bda 100644
+--- a/arch/mips/kernel/vpe.c
++++ b/arch/mips/kernel/vpe.c
+@@ -134,7 +134,7 @@ void release_vpe(struct vpe *v)
+ {
+ 	list_del(&v->list);
+ 	if (v->load_addr)
+-		release_progmem(v);
++		release_progmem(v->load_addr);
+ 	kfree(v);
+ }
+ 
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 0219693bf08e..3f0565e1a7a8 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1298,6 +1298,47 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu)
+ 				    control->pause_filter_count, old);
+ }
+ 
++/*
++ * The default MMIO mask is a single bit (excluding the present bit),
++ * which could conflict with the memory encryption bit. Check for
++ * memory encryption support and override the default MMIO mask if
++ * memory encryption is enabled.
++ */
++static __init void svm_adjust_mmio_mask(void)
++{
++	unsigned int enc_bit, mask_bit;
++	u64 msr, mask;
++
++	/* If there is no memory encryption support, use existing mask */
++	if (cpuid_eax(0x80000000) < 0x8000001f)
++		return;
++
++	/* If memory encryption is not enabled, use existing mask */
++	rdmsrl(MSR_K8_SYSCFG, msr);
++	if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
++		return;
++
++	enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
++	mask_bit = boot_cpu_data.x86_phys_bits;
++
++	/* Increment the mask bit if it is the same as the encryption bit */
++	if (enc_bit == mask_bit)
++		mask_bit++;
++
++	/*
++	 * If the mask bit location is below 52, then some bits above the
++	 * physical addressing limit will always be reserved, so use the
++	 * rsvd_bits() function to generate the mask. This mask, along with
++	 * the present bit, will be used to generate a page fault with
++	 * PFER.RSV = 1.
++	 *
++	 * If the mask bit location is 52 (or above), then clear the mask.
++	 */
++	mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
++
++	kvm_mmu_set_mmio_spte_mask(mask, PT_WRITABLE_MASK | PT_USER_MASK);
++}
++
+ static __init int svm_hardware_setup(void)
+ {
+ 	int cpu;
+@@ -1352,6 +1393,8 @@ static __init int svm_hardware_setup(void)
+ 		}
+ 	}
+ 
++	svm_adjust_mmio_mask();
++
+ 	for_each_possible_cpu(cpu) {
+ 		r = svm_cpu_init(cpu);
+ 		if (r)
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 9c48484dbe23..a81d7d9ce9d6 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -13724,6 +13724,7 @@ static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
+ 	else
+ 		intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
+ 
++	/* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED.  */
+ 	return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
+ }
+ 
+@@ -13753,6 +13754,20 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
+ 	case x86_intercept_outs:
+ 		return vmx_check_intercept_io(vcpu, info);
+ 
++	case x86_intercept_lgdt:
++	case x86_intercept_lidt:
++	case x86_intercept_lldt:
++	case x86_intercept_ltr:
++	case x86_intercept_sgdt:
++	case x86_intercept_sidt:
++	case x86_intercept_sldt:
++	case x86_intercept_str:
++		if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC))
++			return X86EMUL_CONTINUE;
++
++		/* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED.  */
++		break;
++
+ 	/* TODO: check more intercepts... */
+ 	default:
+ 		break;
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index ade694f94a49..2cb379e261c0 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8693,12 +8693,6 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+ 
+ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+ {
+-	vcpu->arch.apf.msr_val = 0;
+-
+-	vcpu_load(vcpu);
+-	kvm_mmu_unload(vcpu);
+-	vcpu_put(vcpu);
+-
+ 	kvm_arch_vcpu_free(vcpu);
+ }
+ 
+diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
+index 95600309ce42..23cde3d8e8fb 100644
+--- a/drivers/acpi/acpi_watchdog.c
++++ b/drivers/acpi/acpi_watchdog.c
+@@ -129,12 +129,11 @@ void __init acpi_watchdog_init(void)
+ 		gas = &entries[i].register_region;
+ 
+ 		res.start = gas->address;
++		res.end = res.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1;
+ 		if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ 			res.flags = IORESOURCE_MEM;
+-			res.end = res.start + ALIGN(gas->access_width, 4) - 1;
+ 		} else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ 			res.flags = IORESOURCE_IO;
+-			res.end = res.start + gas->access_width - 1;
+ 		} else {
+ 			pr_warn("Unsupported address space: %u\n",
+ 				gas->space_id);
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index af44db2dfb68..fec679433f72 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -735,10 +735,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 	flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ 	msg = ssif_info->curr_msg;
+ 	if (msg) {
++		if (data) {
++			if (len > IPMI_MAX_MSG_LENGTH)
++				len = IPMI_MAX_MSG_LENGTH;
++			memcpy(msg->rsp, data, len);
++		} else {
++			len = 0;
++		}
+ 		msg->rsp_size = len;
+-		if (msg->rsp_size > IPMI_MAX_MSG_LENGTH)
+-			msg->rsp_size = IPMI_MAX_MSG_LENGTH;
+-		memcpy(msg->rsp, data, msg->rsp_size);
+ 		ssif_info->curr_msg = NULL;
+ 	}
+ 
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index 8122d0e0d4c4..06a981c72246 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -600,7 +600,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
+ {
+ 	struct devfreq *devfreq;
+ 	struct devfreq_governor *governor;
+-	static atomic_t devfreq_no = ATOMIC_INIT(-1);
+ 	int err = 0;
+ 
+ 	if (!dev || !profile || !governor_name) {
+@@ -661,8 +660,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
+ 	}
+ 	devfreq->max_freq = devfreq->scaling_max_freq;
+ 
+-	dev_set_name(&devfreq->dev, "devfreq%d",
+-				atomic_inc_return(&devfreq_no));
++	dev_set_name(&devfreq->dev, "%s", dev_name(dev));
+ 	err = device_register(&devfreq->dev);
+ 	if (err) {
+ 		mutex_unlock(&devfreq->lock);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+index bb5a47a45790..5c76a815396d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+@@ -97,6 +97,7 @@ struct amdgpu_gmc {
+ 	uint32_t                srbm_soft_reset;
+ 	bool			prt_warning;
+ 	uint64_t		stolen_size;
++	uint32_t		sdpif_register;
+ 	/* apertures */
+ 	u64			shared_aperture_start;
+ 	u64			shared_aperture_end;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index ede27dab675f..8b25940c1367 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -992,6 +992,19 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
+ 	}
+ }
+ 
++/**
++ * gmc_v9_0_restore_registers - restores regs
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * This restores register values, saved at suspend.
++ */
++static void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
++{
++	if (adev->asic_type == CHIP_RAVEN)
++		WREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
++}
++
+ /**
+  * gmc_v9_0_gart_enable - gart enable
+  *
+@@ -1080,6 +1093,20 @@ static int gmc_v9_0_hw_init(void *handle)
+ 	return r;
+ }
+ 
++/**
++ * gmc_v9_0_save_registers - saves regs
++ *
++ * @adev: amdgpu_device pointer
++ *
++ * This saves potential register values that should be
++ * restored upon resume
++ */
++static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
++{
++	if (adev->asic_type == CHIP_RAVEN)
++		adev->gmc.sdpif_register = RREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
++}
++
+ /**
+  * gmc_v9_0_gart_disable - gart disable
+  *
+@@ -1112,9 +1139,16 @@ static int gmc_v9_0_hw_fini(void *handle)
+ 
+ static int gmc_v9_0_suspend(void *handle)
+ {
++	int r;
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 
+-	return gmc_v9_0_hw_fini(adev);
++	r = gmc_v9_0_hw_fini(adev);
++	if (r)
++		return r;
++
++	gmc_v9_0_save_registers(adev);
++
++	return 0;
+ }
+ 
+ static int gmc_v9_0_resume(void *handle)
+@@ -1122,6 +1156,7 @@ static int gmc_v9_0_resume(void *handle)
+ 	int r;
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 
++	gmc_v9_0_restore_registers(adev);
+ 	r = gmc_v9_0_hw_init(adev);
+ 	if (r)
+ 		return r;
+diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
+index b6f74bf4af02..27bb8c1ab858 100644
+--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
++++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
+@@ -7376,6 +7376,8 @@
+ #define mmCRTC4_CRTC_DRR_CONTROL                                                                       0x0f3e
+ #define mmCRTC4_CRTC_DRR_CONTROL_BASE_IDX                                                              2
+ 
++#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0                                                                  0x395d
++#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX                                                         2
+ 
+ // addressBlock: dce_dc_fmt4_dispdec
+ // base address: 0x2000
+diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
+index 51ed99a37803..6053f5a93f58 100644
+--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
++++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
+@@ -95,12 +95,12 @@ static void dmabuf_gem_object_free(struct kref *kref)
+ 			dmabuf_obj = container_of(pos,
+ 					struct intel_vgpu_dmabuf_obj, list);
+ 			if (dmabuf_obj == obj) {
++				list_del(pos);
+ 				intel_gvt_hypervisor_put_vfio_device(vgpu);
+ 				idr_remove(&vgpu->object_idr,
+ 					   dmabuf_obj->dmabuf_id);
+ 				kfree(dmabuf_obj->info);
+ 				kfree(dmabuf_obj);
+-				list_del(pos);
+ 				break;
+ 			}
+ 		}
+diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
+index c628be05fbfe..69bba88906cd 100644
+--- a/drivers/gpu/drm/i915/gvt/vgpu.c
++++ b/drivers/gpu/drm/i915/gvt/vgpu.c
+@@ -556,9 +556,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
+ 
+ 		intel_vgpu_reset_mmio(vgpu, dmlr);
+ 		populate_pvinfo_page(vgpu);
+-		intel_vgpu_reset_display(vgpu);
+ 
+ 		if (dmlr) {
++			intel_vgpu_reset_display(vgpu);
+ 			intel_vgpu_reset_cfg_space(vgpu);
+ 			/* only reset the failsafe mode when dmlr reset */
+ 			vgpu->failsafe = false;
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index dbfd2c006f74..6f81de85fb86 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -492,6 +492,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
+ 	if (ret)
+ 		goto err_msm_uninit;
+ 
++	if (!dev->dma_parms) {
++		dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
++					      GFP_KERNEL);
++		if (!dev->dma_parms)
++			return -ENOMEM;
++	}
++	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
++
+ 	msm_gem_shrinker_init(ddev);
+ 
+ 	switch (get_mdp_ver(pdev)) {
+diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
+index 3cd7229b6e54..895f49b565ee 100644
+--- a/drivers/hid/hid-alps.c
++++ b/drivers/hid/hid-alps.c
+@@ -734,7 +734,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
+ 	if (data->has_sp) {
+ 		input2 = input_allocate_device();
+ 		if (!input2) {
+-			input_free_device(input2);
++			ret = -ENOMEM;
+ 			goto exit;
+ 		}
+ 
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index e723156057a6..2c85d075daee 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1566,7 +1566,9 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
+ 
+ 	rsize = ((report->size - 1) >> 3) + 1;
+ 
+-	if (rsize > HID_MAX_BUFFER_SIZE)
++	if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
++		rsize = HID_MAX_BUFFER_SIZE - 1;
++	else if (rsize > HID_MAX_BUFFER_SIZE)
+ 		rsize = HID_MAX_BUFFER_SIZE;
+ 
+ 	if (csize < rsize) {
+diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
+index 2ce1eb0c9212..f2e23f81601e 100644
+--- a/drivers/hid/hid-ite.c
++++ b/drivers/hid/hid-ite.c
+@@ -44,8 +44,9 @@ static const struct hid_device_id ite_devices[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
+ 	/* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */
+-	{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS,
+-			 USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
++	{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
++		     USB_VENDOR_ID_SYNAPTICS,
++		     USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(hid, ite_devices);
+diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
+index da000195b79a..c34ef95d7cef 100644
+--- a/drivers/hid/usbhid/hiddev.c
++++ b/drivers/hid/usbhid/hiddev.c
+@@ -954,9 +954,9 @@ void hiddev_disconnect(struct hid_device *hid)
+ 	hiddev->exist = 0;
+ 
+ 	if (hiddev->open) {
+-		mutex_unlock(&hiddev->existancelock);
+ 		hid_hw_close(hiddev->hid);
+ 		wake_up_interruptible(&hiddev->wait);
++		mutex_unlock(&hiddev->existancelock);
+ 	} else {
+ 		mutex_unlock(&hiddev->existancelock);
+ 		kfree(hiddev);
+diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
+index f5e1941e65b5..a1cdcfc74acf 100644
+--- a/drivers/i2c/busses/i2c-altera.c
++++ b/drivers/i2c/busses/i2c-altera.c
+@@ -182,7 +182,7 @@ static void altr_i2c_init(struct altr_i2c_dev *idev)
+ 	/* SCL Low Time */
+ 	writel(t_low, idev->base + ALTR_I2C_SCL_LOW);
+ 	/* SDA Hold Time, 300ns */
+-	writel(div_u64(300 * clk_mhz, 1000), idev->base + ALTR_I2C_SDA_HOLD);
++	writel(3 * clk_mhz / 10, idev->base + ALTR_I2C_SDA_HOLD);
+ 
+ 	/* Mask all master interrupt bits */
+ 	altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false);
+diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
+index 30132c3957cd..41ca9ff7b5da 100644
+--- a/drivers/i2c/busses/i2c-jz4780.c
++++ b/drivers/i2c/busses/i2c-jz4780.c
+@@ -82,25 +82,6 @@
+ #define JZ4780_I2C_STA_TFNF		BIT(1)
+ #define JZ4780_I2C_STA_ACT		BIT(0)
+ 
+-static const char * const jz4780_i2c_abrt_src[] = {
+-	"ABRT_7B_ADDR_NOACK",
+-	"ABRT_10ADDR1_NOACK",
+-	"ABRT_10ADDR2_NOACK",
+-	"ABRT_XDATA_NOACK",
+-	"ABRT_GCALL_NOACK",
+-	"ABRT_GCALL_READ",
+-	"ABRT_HS_ACKD",
+-	"SBYTE_ACKDET",
+-	"ABRT_HS_NORSTRT",
+-	"SBYTE_NORSTRT",
+-	"ABRT_10B_RD_NORSTRT",
+-	"ABRT_MASTER_DIS",
+-	"ARB_LOST",
+-	"SLVFLUSH_TXFIFO",
+-	"SLV_ARBLOST",
+-	"SLVRD_INTX",
+-};
+-
+ #define JZ4780_I2C_INTST_IGC		BIT(11)
+ #define JZ4780_I2C_INTST_ISTT		BIT(10)
+ #define JZ4780_I2C_INTST_ISTP		BIT(9)
+@@ -538,21 +519,8 @@ done:
+ 
+ static void jz4780_i2c_txabrt(struct jz4780_i2c *i2c, int src)
+ {
+-	int i;
+-
+-	dev_err(&i2c->adap.dev, "txabrt: 0x%08x\n", src);
+-	dev_err(&i2c->adap.dev, "device addr=%x\n",
+-		jz4780_i2c_readw(i2c, JZ4780_I2C_TAR));
+-	dev_err(&i2c->adap.dev, "send cmd count:%d  %d\n",
+-		i2c->cmd, i2c->cmd_buf[i2c->cmd]);
+-	dev_err(&i2c->adap.dev, "receive data count:%d  %d\n",
+-		i2c->cmd, i2c->data_buf[i2c->cmd]);
+-
+-	for (i = 0; i < 16; i++) {
+-		if (src & BIT(i))
+-			dev_dbg(&i2c->adap.dev, "I2C TXABRT[%d]=%s\n",
+-				i, jz4780_i2c_abrt_src[i]);
+-	}
++	dev_dbg(&i2c->adap.dev, "txabrt: 0x%08x, cmd: %d, send: %d, recv: %d\n",
++		src, i2c->cmd, i2c->cmd_buf[i2c->cmd], i2c->data_buf[i2c->cmd]);
+ }
+ 
+ static inline int jz4780_i2c_xfer_read(struct jz4780_i2c *i2c,
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index bf7b69449b43..f9b73336a39e 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -208,7 +208,7 @@ static struct its_collection *dev_event_to_col(struct its_device *its_dev,
+ 
+ static struct its_collection *valid_col(struct its_collection *col)
+ {
+-	if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15)))
++	if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
+ 		return NULL;
+ 
+ 	return col;
+diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
+index 8c744578122a..a0d87ed9da69 100644
+--- a/drivers/macintosh/therm_windtunnel.c
++++ b/drivers/macintosh/therm_windtunnel.c
+@@ -300,9 +300,11 @@ static int control_loop(void *dummy)
+ /*	i2c probing and setup						*/
+ /************************************************************************/
+ 
+-static int
+-do_attach( struct i2c_adapter *adapter )
++static void do_attach(struct i2c_adapter *adapter)
+ {
++	struct i2c_board_info info = { };
++	struct device_node *np;
++
+ 	/* scan 0x48-0x4f (DS1775) and 0x2c-2x2f (ADM1030) */
+ 	static const unsigned short scan_ds1775[] = {
+ 		0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+@@ -313,25 +315,24 @@ do_attach( struct i2c_adapter *adapter )
+ 		I2C_CLIENT_END
+ 	};
+ 
+-	if( strncmp(adapter->name, "uni-n", 5) )
+-		return 0;
+-
+-	if( !x.running ) {
+-		struct i2c_board_info info;
++	if (x.running || strncmp(adapter->name, "uni-n", 5))
++		return;
+ 
+-		memset(&info, 0, sizeof(struct i2c_board_info));
+-		strlcpy(info.type, "therm_ds1775", I2C_NAME_SIZE);
++	np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,ds1775");
++	if (np) {
++		of_node_put(np);
++	} else {
++		strlcpy(info.type, "MAC,ds1775", I2C_NAME_SIZE);
+ 		i2c_new_probed_device(adapter, &info, scan_ds1775, NULL);
++	}
+ 
+-		strlcpy(info.type, "therm_adm1030", I2C_NAME_SIZE);
++	np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,adm1030");
++	if (np) {
++		of_node_put(np);
++	} else {
++		strlcpy(info.type, "MAC,adm1030", I2C_NAME_SIZE);
+ 		i2c_new_probed_device(adapter, &info, scan_adm1030, NULL);
+-
+-		if( x.thermostat && x.fan ) {
+-			x.running = 1;
+-			x.poll_task = kthread_run(control_loop, NULL, "g4fand");
+-		}
+ 	}
+-	return 0;
+ }
+ 
+ static int
+@@ -404,8 +405,8 @@ out:
+ enum chip { ds1775, adm1030 };
+ 
+ static const struct i2c_device_id therm_windtunnel_id[] = {
+-	{ "therm_ds1775", ds1775 },
+-	{ "therm_adm1030", adm1030 },
++	{ "MAC,ds1775", ds1775 },
++	{ "MAC,adm1030", adm1030 },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(i2c, therm_windtunnel_id);
+@@ -414,6 +415,7 @@ static int
+ do_probe(struct i2c_client *cl, const struct i2c_device_id *id)
+ {
+ 	struct i2c_adapter *adapter = cl->adapter;
++	int ret = 0;
+ 
+ 	if( !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA
+ 				     | I2C_FUNC_SMBUS_WRITE_BYTE) )
+@@ -421,11 +423,19 @@ do_probe(struct i2c_client *cl, const struct i2c_device_id *id)
+ 
+ 	switch (id->driver_data) {
+ 	case adm1030:
+-		return attach_fan( cl );
++		ret = attach_fan(cl);
++		break;
+ 	case ds1775:
+-		return attach_thermostat(cl);
++		ret = attach_thermostat(cl);
++		break;
+ 	}
+-	return 0;
++
++	if (!x.running && x.thermostat && x.fan) {
++		x.running = 1;
++		x.poll_task = kthread_run(control_loop, NULL, "g4fand");
++	}
++
++	return ret;
+ }
+ 
+ static struct i2c_driver g4fan_driver = {
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
+index 92261c946e2a..3afc0e59a2bd 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
+@@ -201,6 +201,11 @@ static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
+ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
+ 					  u16 command_id, bool capture)
+ {
++	if (unlikely(!queue->comp_ctx)) {
++		pr_err("Completion context is NULL\n");
++		return NULL;
++	}
++
+ 	if (unlikely(command_id >= queue->q_depth)) {
+ 		pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
+ 		       command_id, queue->q_depth);
+@@ -842,6 +847,24 @@ static int ena_com_get_feature(struct ena_com_dev *ena_dev,
+ 				      0);
+ }
+ 
++static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
++{
++	struct ena_admin_feature_rss_flow_hash_control *hash_key =
++		(ena_dev->rss).hash_key;
++
++	netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
++	/* The key is stored in the device in u32 array
++	 * as well as the API requires the key to be passed in this
++	 * format. Thus the size of our array should be divided by 4
++	 */
++	hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32);
++}
++
++int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
++{
++	return ena_dev->rss.hash_func;
++}
++
+ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
+ {
+ 	struct ena_rss *rss = &ena_dev->rss;
+@@ -2075,15 +2098,16 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
+ 
+ 	switch (func) {
+ 	case ENA_ADMIN_TOEPLITZ:
+-		if (key_len > sizeof(hash_key->key)) {
+-			pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
+-			       key_len, sizeof(hash_key->key));
+-			return -EINVAL;
++		if (key) {
++			if (key_len != sizeof(hash_key->key)) {
++				pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
++				       key_len, sizeof(hash_key->key));
++				return -EINVAL;
++			}
++			memcpy(hash_key->key, key, key_len);
++			rss->hash_init_val = init_val;
++			hash_key->keys_num = key_len >> 2;
+ 		}
+-
+-		memcpy(hash_key->key, key, key_len);
+-		rss->hash_init_val = init_val;
+-		hash_key->keys_num = key_len >> 2;
+ 		break;
+ 	case ENA_ADMIN_CRC32:
+ 		rss->hash_init_val = init_val;
+@@ -2120,7 +2144,11 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
+ 	if (unlikely(rc))
+ 		return rc;
+ 
+-	rss->hash_func = get_resp.u.flow_hash_func.selected_func;
++	/* ffs() returns 1 in case the lsb is set */
++	rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
++	if (rss->hash_func)
++		rss->hash_func--;
++
+ 	if (func)
+ 		*func = rss->hash_func;
+ 
+@@ -2408,6 +2436,8 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
+ 	if (unlikely(rc))
+ 		goto err_hash_key;
+ 
++	ena_com_hash_key_fill_default_key(ena_dev);
++
+ 	rc = ena_com_hash_ctrl_init(ena_dev);
+ 	if (unlikely(rc))
+ 		goto err_hash_ctrl;
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
+index 7b784f8a06a6..7272fb0d858d 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.h
++++ b/drivers/net/ethernet/amazon/ena/ena_com.h
+@@ -42,6 +42,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/types.h>
+ #include <linux/wait.h>
++#include <linux/netdevice.h>
+ 
+ #include "ena_common_defs.h"
+ #include "ena_admin_defs.h"
+@@ -631,6 +632,14 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
+  */
+ void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
+ 
++/* ena_com_get_current_hash_function - Get RSS hash function
++ * @ena_dev: ENA communication layer struct
++ *
++ * Return the current hash function.
++ * @return: 0 or one of the ena_admin_hash_functions values.
++ */
++int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev);
++
+ /* ena_com_fill_hash_function - Fill RSS hash function
+  * @ena_dev: ENA communication layer struct
+  * @func: The hash function (Toeplitz or crc)
+diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+index eb9e07fa427e..66f992510e0e 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
++++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+@@ -649,6 +649,28 @@ static u32 ena_get_rxfh_key_size(struct net_device *netdev)
+ 	return ENA_HASH_KEY_SIZE;
+ }
+ 
++static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir)
++{
++	struct ena_com_dev *ena_dev = adapter->ena_dev;
++	int i, rc;
++
++	if (!indir)
++		return 0;
++
++	rc = ena_com_indirect_table_get(ena_dev, indir);
++	if (rc)
++		return rc;
++
++	/* Our internal representation of the indices is: even indices
++	 * for Tx and uneven indices for Rx. We need to convert the Rx
++	 * indices to be consecutive
++	 */
++	for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++)
++		indir[i] = ENA_IO_RXQ_IDX_TO_COMBINED_IDX(indir[i]);
++
++	return rc;
++}
++
+ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ 			u8 *hfunc)
+ {
+@@ -657,11 +679,25 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ 	u8 func;
+ 	int rc;
+ 
+-	rc = ena_com_indirect_table_get(adapter->ena_dev, indir);
++	rc = ena_indirection_table_get(adapter, indir);
+ 	if (rc)
+ 		return rc;
+ 
++	/* We call this function in order to check if the device
++	 * supports getting/setting the hash function.
++	 */
+ 	rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key);
++
++	if (rc) {
++		if (rc == -EOPNOTSUPP) {
++			key = NULL;
++			hfunc = NULL;
++			rc = 0;
++		}
++
++		return rc;
++	}
++
+ 	if (rc)
+ 		return rc;
+ 
+@@ -670,7 +706,7 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ 		func = ETH_RSS_HASH_TOP;
+ 		break;
+ 	case ENA_ADMIN_CRC32:
+-		func = ETH_RSS_HASH_XOR;
++		func = ETH_RSS_HASH_CRC32;
+ 		break;
+ 	default:
+ 		netif_err(adapter, drv, netdev,
+@@ -713,10 +749,13 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
+ 	}
+ 
+ 	switch (hfunc) {
++	case ETH_RSS_HASH_NO_CHANGE:
++		func = ena_com_get_current_hash_function(ena_dev);
++		break;
+ 	case ETH_RSS_HASH_TOP:
+ 		func = ENA_ADMIN_TOEPLITZ;
+ 		break;
+-	case ETH_RSS_HASH_XOR:
++	case ETH_RSS_HASH_CRC32:
+ 		func = ENA_ADMIN_CRC32;
+ 		break;
+ 	default:
+@@ -817,6 +856,7 @@ static const struct ethtool_ops ena_ethtool_ops = {
+ 	.get_channels		= ena_get_channels,
+ 	.get_tunable		= ena_get_tunable,
+ 	.set_tunable		= ena_set_tunable,
++	.get_ts_info            = ethtool_op_get_ts_info,
+ };
+ 
+ void ena_set_ethtool_ops(struct net_device *netdev)
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index 9afb19ebba58..8736718b1735 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -2847,8 +2847,8 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
+ 	if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
+ 		return;
+ 
+-	keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies +
+-					   adapter->keep_alive_timeout);
++	keep_alive_expired = adapter->last_keep_alive_jiffies +
++			     adapter->keep_alive_timeout;
+ 	if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
+ 		netif_err(adapter, drv, adapter->netdev,
+ 			  "Keep alive watchdog timeout.\n");
+@@ -2950,7 +2950,7 @@ static void ena_timer_service(struct timer_list *t)
+ 	}
+ 
+ 	/* Reset the timer */
+-	mod_timer(&adapter->timer_service, jiffies + HZ);
++	mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
+ }
+ 
+ static int ena_calc_io_queue_num(struct pci_dev *pdev,
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
+index 7c7ae56c52cf..f4783effe5c0 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
+@@ -113,6 +113,8 @@
+ 
+ #define ENA_IO_TXQ_IDX(q)	(2 * (q))
+ #define ENA_IO_RXQ_IDX(q)	(2 * (q) + 1)
++#define ENA_IO_TXQ_IDX_TO_COMBINED_IDX(q)	((q) / 2)
++#define ENA_IO_RXQ_IDX_TO_COMBINED_IDX(q)	(((q) - 1) / 2)
+ 
+ #define ENA_MGMNT_IRQ_IDX		0
+ #define ENA_IO_IRQ_FIRST_IDX		1
+diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+index 50dd6bf176d0..3a489b2b99c9 100644
+--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
++++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+@@ -2034,7 +2034,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
+ 	int ret;
+ 
+ 	ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
+-				  XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
++				  XGENE_NUM_TX_RING, XGENE_NUM_RX_RING);
+ 	if (!ndev)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+index 8cc34b0bedc3..15dcfb6704e5 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+@@ -399,8 +399,10 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
+ 				     dx_buff->len,
+ 				     DMA_TO_DEVICE);
+ 
+-	if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
++	if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) {
++		ret = 0;
+ 		goto exit;
++	}
+ 
+ 	first = dx_buff;
+ 	dx_buff->len_pkt = skb->len;
+@@ -530,10 +532,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
+ 	if (likely(frags)) {
+ 		err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
+ 						       ring, frags);
+-		if (err >= 0) {
+-			++ring->stats.tx.packets;
+-			ring->stats.tx.bytes += skb->len;
+-		}
+ 	} else {
+ 		err = NETDEV_TX_BUSY;
+ 	}
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+index b3c7994d73eb..b03e5fd4327e 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+@@ -162,9 +162,12 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
+ 			}
+ 		}
+ 
+-		if (unlikely(buff->is_eop))
+-			dev_kfree_skb_any(buff->skb);
++		if (unlikely(buff->is_eop)) {
++			++self->stats.rx.packets;
++			self->stats.tx.bytes += buff->skb->len;
+ 
++			dev_kfree_skb_any(buff->skb);
++		}
+ 		buff->pa = 0U;
+ 		buff->eop_index = 0xffffU;
+ 		self->sw_head = aq_ring_next_dx(self, self->sw_head);
+diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
+index 3cdf63e35b53..4054cf9db818 100644
+--- a/drivers/net/ethernet/mscc/ocelot_board.c
++++ b/drivers/net/ethernet/mscc/ocelot_board.c
+@@ -105,6 +105,14 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
+ 		if (err != 4)
+ 			break;
+ 
++		/* At this point the IFH was read correctly, so it is safe to
++		 * presume that there is no error. The err needs to be reset
++		 * otherwise a frame could come in CPU queue between the while
++		 * condition and the check for error later on. And in that case
++		 * the new frame is just removed and not processed.
++		 */
++		err = 0;
++
+ 		ocelot_parse_ifh(ifh, &info);
+ 
+ 		dev = ocelot->ports[info.port]->dev;
+diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
+index d242a5724069..dc3be8a4acf4 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede.h
++++ b/drivers/net/ethernet/qlogic/qede/qede.h
+@@ -162,6 +162,8 @@ struct qede_rdma_dev {
+ 	struct list_head entry;
+ 	struct list_head rdma_event_list;
+ 	struct workqueue_struct *rdma_wq;
++	struct kref refcnt;
++	struct completion event_comp;
+ };
+ 
+ struct qede_ptp;
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
+index 1900bf7e67d1..cd12fb919ad5 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
+@@ -57,6 +57,9 @@ static void _qede_rdma_dev_add(struct qede_dev *edev)
+ static int qede_rdma_create_wq(struct qede_dev *edev)
+ {
+ 	INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list);
++	kref_init(&edev->rdma_info.refcnt);
++	init_completion(&edev->rdma_info.event_comp);
++
+ 	edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq");
+ 	if (!edev->rdma_info.rdma_wq) {
+ 		DP_NOTICE(edev, "qedr: Could not create workqueue\n");
+@@ -81,8 +84,23 @@ static void qede_rdma_cleanup_event(struct qede_dev *edev)
+ 	}
+ }
+ 
++static void qede_rdma_complete_event(struct kref *ref)
++{
++	struct qede_rdma_dev *rdma_dev =
++		container_of(ref, struct qede_rdma_dev, refcnt);
++
++	/* no more events will be added after this */
++	complete(&rdma_dev->event_comp);
++}
++
+ static void qede_rdma_destroy_wq(struct qede_dev *edev)
+ {
++	/* Avoid race with add_event flow, make sure it finishes before
++	 * we start accessing the list and cleaning up the work
++	 */
++	kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event);
++	wait_for_completion(&edev->rdma_info.event_comp);
++
+ 	qede_rdma_cleanup_event(edev);
+ 	destroy_workqueue(edev->rdma_info.rdma_wq);
+ }
+@@ -287,15 +305,24 @@ static void qede_rdma_add_event(struct qede_dev *edev,
+ 	if (!edev->rdma_info.qedr_dev)
+ 		return;
+ 
++	/* We don't want the cleanup flow to start while we're allocating and
++	 * scheduling the work
++	 */
++	if (!kref_get_unless_zero(&edev->rdma_info.refcnt))
++		return; /* already being destroyed */
++
+ 	event_node = qede_rdma_get_free_event_node(edev);
+ 	if (!event_node)
+-		return;
++		goto out;
+ 
+ 	event_node->event = event;
+ 	event_node->ptr = edev;
+ 
+ 	INIT_WORK(&event_node->work, qede_rdma_handle_event);
+ 	queue_work(edev->rdma_info.rdma_wq, &event_node->work);
++
++out:
++	kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event);
+ }
+ 
+ void qede_rdma_dev_event_open(struct qede_dev *edev)
+diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
+index dbfd3a0c97d3..77a9a753d979 100644
+--- a/drivers/net/hyperv/netvsc.c
++++ b/drivers/net/hyperv/netvsc.c
+@@ -110,7 +110,7 @@ static struct netvsc_device *alloc_net_device(void)
+ 
+ 	init_waitqueue_head(&net_device->wait_drain);
+ 	net_device->destroy = false;
+-	net_device->tx_disable = false;
++	net_device->tx_disable = true;
+ 
+ 	net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
+ 	net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 7ab576d8b622..bdb55db4523b 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -984,6 +984,7 @@ static int netvsc_attach(struct net_device *ndev,
+ 	}
+ 
+ 	/* In any case device is now ready */
++	nvdev->tx_disable = false;
+ 	netif_device_attach(ndev);
+ 
+ 	/* Note: enable and attach happen when sub-channels setup */
+@@ -2336,6 +2337,8 @@ static int netvsc_probe(struct hv_device *dev,
+ 	else
+ 		net->max_mtu = ETH_DATA_LEN;
+ 
++	nvdev->tx_disable = false;
++
+ 	ret = register_netdevice(net);
+ 	if (ret != 0) {
+ 		pr_err("Unable to register netdev.\n");
+diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c
+index 46fe1ae919a3..51ce3ea17fb3 100644
+--- a/drivers/net/phy/mdio-bcm-iproc.c
++++ b/drivers/net/phy/mdio-bcm-iproc.c
+@@ -188,6 +188,23 @@ static int iproc_mdio_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_PM_SLEEP
++int iproc_mdio_resume(struct device *dev)
++{
++	struct platform_device *pdev = to_platform_device(dev);
++	struct iproc_mdio_priv *priv = platform_get_drvdata(pdev);
++
++	/* restore the mii clock configuration */
++	iproc_mdio_config_clk(priv->base);
++
++	return 0;
++}
++
++static const struct dev_pm_ops iproc_mdio_pm_ops = {
++	.resume = iproc_mdio_resume
++};
++#endif /* CONFIG_PM_SLEEP */
++
+ static const struct of_device_id iproc_mdio_of_match[] = {
+ 	{ .compatible = "brcm,iproc-mdio", },
+ 	{ /* sentinel */ },
+@@ -198,6 +215,9 @@ static struct platform_driver iproc_mdio_driver = {
+ 	.driver = {
+ 		.name = "iproc-mdio",
+ 		.of_match_table = iproc_mdio_of_match,
++#ifdef CONFIG_PM_SLEEP
++		.pm = &iproc_mdio_pm_ops,
++#endif
+ 	},
+ 	.probe = iproc_mdio_probe,
+ 	.remove = iproc_mdio_remove,
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 13c8788e3b6b..a04f8577d9f2 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -63,7 +63,6 @@ enum qmi_wwan_flags {
+ 
+ enum qmi_wwan_quirks {
+ 	QMI_WWAN_QUIRK_DTR = 1 << 0,	/* needs "set DTR" request */
+-	QMI_WWAN_QUIRK_QUECTEL_DYNCFG = 1 << 1,	/* check num. endpoints */
+ };
+ 
+ struct qmimux_hdr {
+@@ -853,16 +852,6 @@ static const struct driver_info	qmi_wwan_info_quirk_dtr = {
+ 	.data           = QMI_WWAN_QUIRK_DTR,
+ };
+ 
+-static const struct driver_info	qmi_wwan_info_quirk_quectel_dyncfg = {
+-	.description	= "WWAN/QMI device",
+-	.flags		= FLAG_WWAN | FLAG_SEND_ZLP,
+-	.bind		= qmi_wwan_bind,
+-	.unbind		= qmi_wwan_unbind,
+-	.manage_power	= qmi_wwan_manage_power,
+-	.rx_fixup       = qmi_wwan_rx_fixup,
+-	.data           = QMI_WWAN_QUIRK_DTR | QMI_WWAN_QUIRK_QUECTEL_DYNCFG,
+-};
+-
+ #define HUAWEI_VENDOR_ID	0x12D1
+ 
+ /* map QMI/wwan function by a fixed interface number */
+@@ -883,14 +872,18 @@ static const struct driver_info	qmi_wwan_info_quirk_quectel_dyncfg = {
+ #define QMI_GOBI_DEVICE(vend, prod) \
+ 	QMI_FIXED_INTF(vend, prod, 0)
+ 
+-/* Quectel does not use fixed interface numbers on at least some of their
+- * devices. We need to check the number of endpoints to ensure that we bind to
+- * the correct interface.
++/* Many devices have QMI and DIAG functions which are distinguishable
++ * from other vendor specific functions by class, subclass and
++ * protocol all being 0xff. The DIAG function has exactly 2 endpoints
++ * and is silently rejected when probed.
++ *
++ * This makes it possible to match dynamically numbered QMI functions
++ * as seen on e.g. many Quectel modems.
+  */
+-#define QMI_QUIRK_QUECTEL_DYNCFG(vend, prod) \
++#define QMI_MATCH_FF_FF_FF(vend, prod) \
+ 	USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_VENDOR_SPEC, \
+ 				      USB_SUBCLASS_VENDOR_SPEC, 0xff), \
+-	.driver_info = (unsigned long)&qmi_wwan_info_quirk_quectel_dyncfg
++	.driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr
+ 
+ static const struct usb_device_id products[] = {
+ 	/* 1. CDC ECM like devices match on the control interface */
+@@ -996,10 +989,10 @@ static const struct usb_device_id products[] = {
+ 		USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
+ 		.driver_info = (unsigned long)&qmi_wwan_info,
+ 	},
+-	{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)},	/* Quectel EC25, EC20 R2.0  Mini PCIe */
+-	{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)},	/* Quectel EP06/EG06/EM06 */
+-	{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)},	/* Quectel EG12/EM12 */
+-	{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0800)},	/* Quectel RM500Q-GL */
++	{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)},	/* Quectel EC25, EC20 R2.0  Mini PCIe */
++	{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)},	/* Quectel EP06/EG06/EM06 */
++	{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)},	/* Quectel EG12/EM12 */
++	{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)},	/* Quectel RM500Q-GL */
+ 
+ 	/* 3. Combined interface devices matching on interface number */
+ 	{QMI_FIXED_INTF(0x0408, 0xea42, 4)},	/* Yota / Megafon M100-1 */
+@@ -1298,6 +1291,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x413c, 0x81b6, 8)},	/* Dell Wireless 5811e */
+ 	{QMI_FIXED_INTF(0x413c, 0x81b6, 10)},	/* Dell Wireless 5811e */
+ 	{QMI_FIXED_INTF(0x413c, 0x81d7, 0)},	/* Dell Wireless 5821e */
++	{QMI_FIXED_INTF(0x413c, 0x81d7, 1)},	/* Dell Wireless 5821e preproduction config */
+ 	{QMI_FIXED_INTF(0x413c, 0x81e0, 0)},	/* Dell Wireless 5821e with eSIM support*/
+ 	{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},	/* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
+ 	{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)},	/* HP lt4120 Snapdragon X5 LTE */
+@@ -1389,7 +1383,6 @@ static int qmi_wwan_probe(struct usb_interface *intf,
+ {
+ 	struct usb_device_id *id = (struct usb_device_id *)prod;
+ 	struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
+-	const struct driver_info *info;
+ 
+ 	/* Workaround to enable dynamic IDs.  This disables usbnet
+ 	 * blacklisting functionality.  Which, if required, can be
+@@ -1425,12 +1418,8 @@ static int qmi_wwan_probe(struct usb_interface *intf,
+ 	 * different. Ignore the current interface if the number of endpoints
+ 	 * equals the number for the diag interface (two).
+ 	 */
+-	info = (void *)id->driver_info;
+-
+-	if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) {
+-		if (desc->bNumEndpoints == 2)
+-			return -ENODEV;
+-	}
++	if (desc->bNumEndpoints == 2)
++		return -ENODEV;
+ 
+ 	return usbnet_probe(intf, id);
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index 4f5571123f70..24da49615135 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -3283,6 +3283,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
+ 	spin_lock_init(&trans_pcie->reg_lock);
+ 	mutex_init(&trans_pcie->mutex);
+ 	init_waitqueue_head(&trans_pcie->ucode_write_waitq);
++
++	trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
++						   WQ_HIGHPRI | WQ_UNBOUND, 1);
++	if (!trans_pcie->rba.alloc_wq) {
++		ret = -ENOMEM;
++		goto out_free_trans;
++	}
++	INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
++
+ 	trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
+ 	if (!trans_pcie->tso_hdr_page) {
+ 		ret = -ENOMEM;
+@@ -3485,10 +3494,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
+ 		trans_pcie->inta_mask = CSR_INI_SET_MASK;
+ 	 }
+ 
+-	trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
+-						   WQ_HIGHPRI | WQ_UNBOUND, 1);
+-	INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
+-
+ #ifdef CONFIG_IWLWIFI_PCIE_RTPM
+ 	trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3;
+ #else
+@@ -3501,6 +3506,8 @@ out_free_ict:
+ 	iwl_pcie_free_ict(trans);
+ out_no_pci:
+ 	free_percpu(trans_pcie->tso_hdr_page);
++	destroy_workqueue(trans_pcie->rba.alloc_wq);
++out_free_trans:
+ 	iwl_trans_free(trans);
+ 	return ERR_PTR(ret);
+ }
+diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
+index e39bb5c42c9a..7e526014b638 100644
+--- a/drivers/net/wireless/marvell/mwifiex/main.h
++++ b/drivers/net/wireless/marvell/mwifiex/main.h
+@@ -1294,19 +1294,6 @@ mwifiex_copy_rates(u8 *dest, u32 pos, u8 *src, int len)
+ 	return pos;
+ }
+ 
+-/* This function return interface number with the same bss_type.
+- */
+-static inline u8
+-mwifiex_get_intf_num(struct mwifiex_adapter *adapter, u8 bss_type)
+-{
+-	u8 i, num = 0;
+-
+-	for (i = 0; i < adapter->priv_num; i++)
+-		if (adapter->priv[i] && adapter->priv[i]->bss_type == bss_type)
+-			num++;
+-	return num;
+-}
+-
+ /*
+  * This function returns the correct private structure pointer based
+  * upon the BSS type and BSS number.
+diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
+index 6058c48d56dc..b6b7bbe168eb 100644
+--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
++++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
+@@ -897,7 +897,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
+ 	u8 *peer, *pos, *end;
+ 	u8 i, action, basic;
+ 	u16 cap = 0;
+-	int ie_len = 0;
++	int ies_len = 0;
+ 
+ 	if (len < (sizeof(struct ethhdr) + 3))
+ 		return;
+@@ -919,7 +919,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
+ 		pos = buf + sizeof(struct ethhdr) + 4;
+ 		/* payload 1+ category 1 + action 1 + dialog 1 */
+ 		cap = get_unaligned_le16(pos);
+-		ie_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
++		ies_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
+ 		pos += 2;
+ 		break;
+ 
+@@ -929,7 +929,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
+ 		/* payload 1+ category 1 + action 1 + dialog 1 + status code 2*/
+ 		pos = buf + sizeof(struct ethhdr) + 6;
+ 		cap = get_unaligned_le16(pos);
+-		ie_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
++		ies_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
+ 		pos += 2;
+ 		break;
+ 
+@@ -937,7 +937,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
+ 		if (len < (sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN))
+ 			return;
+ 		pos = buf + sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN;
+-		ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
++		ies_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
+ 		break;
+ 	default:
+ 		mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS frame type.\n");
+@@ -950,33 +950,33 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
+ 
+ 	sta_ptr->tdls_cap.capab = cpu_to_le16(cap);
+ 
+-	for (end = pos + ie_len; pos + 1 < end; pos += 2 + pos[1]) {
+-		if (pos + 2 + pos[1] > end)
++	for (end = pos + ies_len; pos + 1 < end; pos += 2 + pos[1]) {
++		u8 ie_len = pos[1];
++
++		if (pos + 2 + ie_len > end)
+ 			break;
+ 
+ 		switch (*pos) {
+ 		case WLAN_EID_SUPP_RATES:
+-			if (pos[1] > 32)
++			if (ie_len > sizeof(sta_ptr->tdls_cap.rates))
+ 				return;
+-			sta_ptr->tdls_cap.rates_len = pos[1];
+-			for (i = 0; i < pos[1]; i++)
++			sta_ptr->tdls_cap.rates_len = ie_len;
++			for (i = 0; i < ie_len; i++)
+ 				sta_ptr->tdls_cap.rates[i] = pos[i + 2];
+ 			break;
+ 
+ 		case WLAN_EID_EXT_SUPP_RATES:
+-			if (pos[1] > 32)
++			if (ie_len > sizeof(sta_ptr->tdls_cap.rates))
+ 				return;
+ 			basic = sta_ptr->tdls_cap.rates_len;
+-			if (pos[1] > 32 - basic)
++			if (ie_len > sizeof(sta_ptr->tdls_cap.rates) - basic)
+ 				return;
+-			for (i = 0; i < pos[1]; i++)
++			for (i = 0; i < ie_len; i++)
+ 				sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
+-			sta_ptr->tdls_cap.rates_len += pos[1];
++			sta_ptr->tdls_cap.rates_len += ie_len;
+ 			break;
+ 		case WLAN_EID_HT_CAPABILITY:
+-			if (pos > end - sizeof(struct ieee80211_ht_cap) - 2)
+-				return;
+-			if (pos[1] != sizeof(struct ieee80211_ht_cap))
++			if (ie_len != sizeof(struct ieee80211_ht_cap))
+ 				return;
+ 			/* copy the ie's value into ht_capb*/
+ 			memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos + 2,
+@@ -984,59 +984,45 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
+ 			sta_ptr->is_11n_enabled = 1;
+ 			break;
+ 		case WLAN_EID_HT_OPERATION:
+-			if (pos > end -
+-			    sizeof(struct ieee80211_ht_operation) - 2)
+-				return;
+-			if (pos[1] != sizeof(struct ieee80211_ht_operation))
++			if (ie_len != sizeof(struct ieee80211_ht_operation))
+ 				return;
+ 			/* copy the ie's value into ht_oper*/
+ 			memcpy(&sta_ptr->tdls_cap.ht_oper, pos + 2,
+ 			       sizeof(struct ieee80211_ht_operation));
+ 			break;
+ 		case WLAN_EID_BSS_COEX_2040:
+-			if (pos > end - 3)
+-				return;
+-			if (pos[1] != 1)
++			if (ie_len != sizeof(pos[2]))
+ 				return;
+ 			sta_ptr->tdls_cap.coex_2040 = pos[2];
+ 			break;
+ 		case WLAN_EID_EXT_CAPABILITY:
+-			if (pos > end - sizeof(struct ieee_types_header))
+-				return;
+-			if (pos[1] < sizeof(struct ieee_types_header))
++			if (ie_len < sizeof(struct ieee_types_header))
+ 				return;
+-			if (pos[1] > 8)
++			if (ie_len > 8)
+ 				return;
+ 			memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
+ 			       sizeof(struct ieee_types_header) +
+-			       min_t(u8, pos[1], 8));
++			       min_t(u8, ie_len, 8));
+ 			break;
+ 		case WLAN_EID_RSN:
+-			if (pos > end - sizeof(struct ieee_types_header))
++			if (ie_len < sizeof(struct ieee_types_header))
+ 				return;
+-			if (pos[1] < sizeof(struct ieee_types_header))
+-				return;
+-			if (pos[1] > IEEE_MAX_IE_SIZE -
++			if (ie_len > IEEE_MAX_IE_SIZE -
+ 			    sizeof(struct ieee_types_header))
+ 				return;
+ 			memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
+ 			       sizeof(struct ieee_types_header) +
+-			       min_t(u8, pos[1], IEEE_MAX_IE_SIZE -
++			       min_t(u8, ie_len, IEEE_MAX_IE_SIZE -
+ 				     sizeof(struct ieee_types_header)));
+ 			break;
+ 		case WLAN_EID_QOS_CAPA:
+-			if (pos > end - 3)
+-				return;
+-			if (pos[1] != 1)
++			if (ie_len != sizeof(pos[2]))
+ 				return;
+ 			sta_ptr->tdls_cap.qos_info = pos[2];
+ 			break;
+ 		case WLAN_EID_VHT_OPERATION:
+ 			if (priv->adapter->is_hw_11ac_capable) {
+-				if (pos > end -
+-				    sizeof(struct ieee80211_vht_operation) - 2)
+-					return;
+-				if (pos[1] !=
++				if (ie_len !=
+ 				    sizeof(struct ieee80211_vht_operation))
+ 					return;
+ 				/* copy the ie's value into vhtoper*/
+@@ -1046,10 +1032,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
+ 			break;
+ 		case WLAN_EID_VHT_CAPABILITY:
+ 			if (priv->adapter->is_hw_11ac_capable) {
+-				if (pos > end -
+-				    sizeof(struct ieee80211_vht_cap) - 2)
+-					return;
+-				if (pos[1] != sizeof(struct ieee80211_vht_cap))
++				if (ie_len != sizeof(struct ieee80211_vht_cap))
+ 					return;
+ 				/* copy the ie's value into vhtcap*/
+ 				memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos + 2,
+@@ -1059,9 +1042,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
+ 			break;
+ 		case WLAN_EID_AID:
+ 			if (priv->adapter->is_hw_11ac_capable) {
+-				if (pos > end - 4)
+-					return;
+-				if (pos[1] != 2)
++				if (ie_len != sizeof(u16))
+ 					return;
+ 				sta_ptr->tdls_cap.aid =
+ 					get_unaligned_le16((pos + 2));
+diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
+index d0207f8e68b7..dcef73eb4120 100644
+--- a/drivers/nfc/pn544/i2c.c
++++ b/drivers/nfc/pn544/i2c.c
+@@ -236,6 +236,7 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
+ 
+ out:
+ 	gpiod_set_value_cansleep(phy->gpiod_en, !phy->en_polarity);
++	usleep_range(10000, 15000);
+ }
+ 
+ static void pn544_hci_i2c_enable_mode(struct pn544_i2c_phy *phy, int run_mode)
+diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
+index c6e710a713d3..527b87959742 100644
+--- a/drivers/pwm/pwm-omap-dmtimer.c
++++ b/drivers/pwm/pwm-omap-dmtimer.c
+@@ -259,7 +259,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
+ 	if (!timer_pdev) {
+ 		dev_err(&pdev->dev, "Unable to find Timer pdev\n");
+ 		ret = -ENODEV;
+-		goto put;
++		goto err_find_timer_pdev;
+ 	}
+ 
+ 	timer_pdata = dev_get_platdata(&timer_pdev->dev);
+@@ -267,7 +267,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
+ 		dev_dbg(&pdev->dev,
+ 			 "dmtimer pdata structure NULL, deferring probe\n");
+ 		ret = -EPROBE_DEFER;
+-		goto put;
++		goto err_platdata;
+ 	}
+ 
+ 	pdata = timer_pdata->timer_ops;
+@@ -286,19 +286,19 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
+ 	    !pdata->write_counter) {
+ 		dev_err(&pdev->dev, "Incomplete dmtimer pdata structure\n");
+ 		ret = -EINVAL;
+-		goto put;
++		goto err_platdata;
+ 	}
+ 
+ 	if (!of_get_property(timer, "ti,timer-pwm", NULL)) {
+ 		dev_err(&pdev->dev, "Missing ti,timer-pwm capability\n");
+ 		ret = -ENODEV;
+-		goto put;
++		goto err_timer_property;
+ 	}
+ 
+ 	dm_timer = pdata->request_by_node(timer);
+ 	if (!dm_timer) {
+ 		ret = -EPROBE_DEFER;
+-		goto put;
++		goto err_request_timer;
+ 	}
+ 
+ 	omap = devm_kzalloc(&pdev->dev, sizeof(*omap), GFP_KERNEL);
+@@ -355,7 +355,14 @@ err_pwmchip_add:
+ err_alloc_omap:
+ 
+ 	pdata->free(dm_timer);
+-put:
++err_request_timer:
++
++err_timer_property:
++err_platdata:
++
++	put_device(&timer_pdev->dev);
++err_find_timer_pdev:
++
+ 	of_node_put(timer);
+ 
+ 	return ret;
+@@ -375,6 +382,8 @@ static int pwm_omap_dmtimer_remove(struct platform_device *pdev)
+ 
+ 	omap->pdata->free(omap->dm_timer);
+ 
++	put_device(&omap->dm_timer_pdev->dev);
++
+ 	mutex_destroy(&omap->mutex);
+ 
+ 	return 0;
+diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
+index 7e85d238767b..1c799ddd9709 100644
+--- a/drivers/s390/crypto/ap_bus.h
++++ b/drivers/s390/crypto/ap_bus.h
+@@ -158,7 +158,7 @@ struct ap_card {
+ 	unsigned int functions;		/* AP device function bitfield. */
+ 	int queue_depth;		/* AP queue depth.*/
+ 	int id;				/* AP card number. */
+-	atomic_t total_request_count;	/* # requests ever for this AP device.*/
++	atomic64_t total_request_count;	/* # requests ever for this AP device.*/
+ };
+ 
+ #define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
+@@ -175,7 +175,7 @@ struct ap_queue {
+ 	enum ap_state state;		/* State of the AP device. */
+ 	int pendingq_count;		/* # requests on pendingq list. */
+ 	int requestq_count;		/* # requests on requestq list. */
+-	int total_request_count;	/* # requests ever for this AP device.*/
++	u64 total_request_count;	/* # requests ever for this AP device.*/
+ 	int request_timeout;		/* Request timeout in jiffies. */
+ 	struct timer_list timeout;	/* Timer for request timeouts. */
+ 	struct list_head pendingq;	/* List of message sent to AP queue. */
+diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
+index 63b4cc6cd7e5..e85bfca1ed16 100644
+--- a/drivers/s390/crypto/ap_card.c
++++ b/drivers/s390/crypto/ap_card.c
+@@ -63,13 +63,13 @@ static ssize_t request_count_show(struct device *dev,
+ 				  char *buf)
+ {
+ 	struct ap_card *ac = to_ap_card(dev);
+-	unsigned int req_cnt;
++	u64 req_cnt;
+ 
+ 	req_cnt = 0;
+ 	spin_lock_bh(&ap_list_lock);
+-	req_cnt = atomic_read(&ac->total_request_count);
++	req_cnt = atomic64_read(&ac->total_request_count);
+ 	spin_unlock_bh(&ap_list_lock);
+-	return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
++	return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
+ }
+ 
+ static ssize_t request_count_store(struct device *dev,
+@@ -83,7 +83,7 @@ static ssize_t request_count_store(struct device *dev,
+ 	for_each_ap_queue(aq, ac)
+ 		aq->total_request_count = 0;
+ 	spin_unlock_bh(&ap_list_lock);
+-	atomic_set(&ac->total_request_count, 0);
++	atomic64_set(&ac->total_request_count, 0);
+ 
+ 	return count;
+ }
+diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
+index 576ac08777c5..e1647da122f7 100644
+--- a/drivers/s390/crypto/ap_queue.c
++++ b/drivers/s390/crypto/ap_queue.c
+@@ -470,12 +470,12 @@ static ssize_t request_count_show(struct device *dev,
+ 				  char *buf)
+ {
+ 	struct ap_queue *aq = to_ap_queue(dev);
+-	unsigned int req_cnt;
++	u64 req_cnt;
+ 
+ 	spin_lock_bh(&aq->lock);
+ 	req_cnt = aq->total_request_count;
+ 	spin_unlock_bh(&aq->lock);
+-	return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
++	return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
+ }
+ 
+ static ssize_t request_count_store(struct device *dev,
+@@ -667,7 +667,7 @@ void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
+ 	list_add_tail(&ap_msg->list, &aq->requestq);
+ 	aq->requestq_count++;
+ 	aq->total_request_count++;
+-	atomic_inc(&aq->card->total_request_count);
++	atomic64_inc(&aq->card->total_request_count);
+ 	/* Send/receive as many request from the queue as possible. */
+ 	ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL));
+ 	spin_unlock_bh(&aq->lock);
+diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
+index b2737bfeb8bb..23c24a699cef 100644
+--- a/drivers/s390/crypto/zcrypt_api.c
++++ b/drivers/s390/crypto/zcrypt_api.c
+@@ -190,8 +190,8 @@ static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
+ 	weight += atomic_read(&zc->load);
+ 	pref_weight += atomic_read(&pref_zc->load);
+ 	if (weight == pref_weight)
+-		return atomic_read(&zc->card->total_request_count) >
+-			atomic_read(&pref_zc->card->total_request_count);
++		return atomic64_read(&zc->card->total_request_count) >
++			atomic64_read(&pref_zc->card->total_request_count);
+ 	return weight > pref_weight;
+ }
+ 
+@@ -719,11 +719,12 @@ static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
+ 	spin_unlock(&zcrypt_list_lock);
+ }
+ 
+-static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
++static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
+ {
+ 	struct zcrypt_card *zc;
+ 	struct zcrypt_queue *zq;
+ 	int card;
++	u64 cnt;
+ 
+ 	memset(reqcnt, 0, sizeof(int) * max_adapters);
+ 	spin_lock(&zcrypt_list_lock);
+@@ -735,8 +736,9 @@ static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
+ 			    || card >= max_adapters)
+ 				continue;
+ 			spin_lock(&zq->queue->lock);
+-			reqcnt[card] = zq->queue->total_request_count;
++			cnt = zq->queue->total_request_count;
+ 			spin_unlock(&zq->queue->lock);
++			reqcnt[card] = (cnt < UINT_MAX) ? (u32) cnt : UINT_MAX;
+ 		}
+ 	}
+ 	local_bh_enable();
+@@ -907,9 +909,9 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 		return 0;
+ 	}
+ 	case ZCRYPT_PERDEV_REQCNT: {
+-		int *reqcnt;
++		u32 *reqcnt;
+ 
+-		reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL);
++		reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL);
+ 		if (!reqcnt)
+ 			return -ENOMEM;
+ 		zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
+@@ -966,7 +968,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ 	}
+ 	case Z90STAT_PERDEV_REQCNT: {
+ 		/* the old ioctl supports only 64 adapters */
+-		int reqcnt[MAX_ZDEV_CARDIDS];
++		u32 reqcnt[MAX_ZDEV_CARDIDS];
+ 
+ 		zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
+ 		if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
+diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
+index aa90004f49e2..eb917e93fa72 100644
+--- a/drivers/s390/net/qeth_l2_main.c
++++ b/drivers/s390/net/qeth_l2_main.c
+@@ -2148,15 +2148,14 @@ int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state)
+ 
+ 	QETH_CARD_TEXT(card, 2, "vniccsch");
+ 
+-	/* do not change anything if BridgePort is enabled */
+-	if (qeth_bridgeport_is_in_use(card))
+-		return -EBUSY;
+-
+ 	/* check if characteristic and enable/disable are supported */
+ 	if (!(card->options.vnicc.sup_chars & vnicc) ||
+ 	    !(card->options.vnicc.set_char_sup & vnicc))
+ 		return -EOPNOTSUPP;
+ 
++	if (qeth_bridgeport_is_in_use(card))
++		return -EBUSY;
++
+ 	/* set enable/disable command and store wanted characteristic */
+ 	if (state) {
+ 		cmd = IPA_VNICC_ENABLE;
+@@ -2202,14 +2201,13 @@ int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state)
+ 
+ 	QETH_CARD_TEXT(card, 2, "vniccgch");
+ 
+-	/* do not get anything if BridgePort is enabled */
+-	if (qeth_bridgeport_is_in_use(card))
+-		return -EBUSY;
+-
+ 	/* check if characteristic is supported */
+ 	if (!(card->options.vnicc.sup_chars & vnicc))
+ 		return -EOPNOTSUPP;
+ 
++	if (qeth_bridgeport_is_in_use(card))
++		return -EBUSY;
++
+ 	/* if card is ready, query current VNICC state */
+ 	if (qeth_card_hw_is_reachable(card))
+ 		rc = qeth_l2_vnicc_query_chars(card);
+@@ -2227,15 +2225,14 @@ int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout)
+ 
+ 	QETH_CARD_TEXT(card, 2, "vniccsto");
+ 
+-	/* do not change anything if BridgePort is enabled */
+-	if (qeth_bridgeport_is_in_use(card))
+-		return -EBUSY;
+-
+ 	/* check if characteristic and set_timeout are supported */
+ 	if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
+ 	    !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
+ 		return -EOPNOTSUPP;
+ 
++	if (qeth_bridgeport_is_in_use(card))
++		return -EBUSY;
++
+ 	/* do we need to do anything? */
+ 	if (card->options.vnicc.learning_timeout == timeout)
+ 		return rc;
+@@ -2264,14 +2261,14 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout)
+ 
+ 	QETH_CARD_TEXT(card, 2, "vniccgto");
+ 
+-	/* do not get anything if BridgePort is enabled */
+-	if (qeth_bridgeport_is_in_use(card))
+-		return -EBUSY;
+-
+ 	/* check if characteristic and get_timeout are supported */
+ 	if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
+ 	    !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
+ 		return -EOPNOTSUPP;
++
++	if (qeth_bridgeport_is_in_use(card))
++		return -EBUSY;
++
+ 	/* if card is ready, get timeout. Otherwise, just return stored value */
+ 	*timeout = card->options.vnicc.learning_timeout;
+ 	if (qeth_card_hw_is_reachable(card))
+diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
+index 257e254c6137..0ec6385eb15e 100644
+--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
++++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
+@@ -47,7 +47,8 @@
+     defined(CONFIG_ARCH_TEGRA_124_SOC) || \
+     defined(CONFIG_ARCH_TEGRA_132_SOC) || \
+     defined(CONFIG_ARCH_TEGRA_210_SOC) || \
+-    defined(CONFIG_ARCH_TEGRA_186_SOC)
++    defined(CONFIG_ARCH_TEGRA_186_SOC) || \
++    defined(CONFIG_ARCH_TEGRA_194_SOC)
+ static u32 tegra30_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset)
+ {
+ 	if (WARN_ON(!fuse->base))
+diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c
+index 1919f91fa756..8d16a41eacae 100644
+--- a/drivers/thermal/broadcom/brcmstb_thermal.c
++++ b/drivers/thermal/broadcom/brcmstb_thermal.c
+@@ -58,7 +58,7 @@
+ #define AVS_TMON_TP_TEST_ENABLE		0x20
+ 
+ /* Default coefficients */
+-#define AVS_TMON_TEMP_SLOPE		-487
++#define AVS_TMON_TEMP_SLOPE		487
+ #define AVS_TMON_TEMP_OFFSET		410040
+ 
+ /* HW related temperature constants */
+@@ -117,23 +117,12 @@ struct brcmstb_thermal_priv {
+ 	struct thermal_zone_device *thermal;
+ };
+ 
+-static void avs_tmon_get_coeffs(struct thermal_zone_device *tz, int *slope,
+-				int *offset)
+-{
+-	*slope = thermal_zone_get_slope(tz);
+-	*offset = thermal_zone_get_offset(tz);
+-}
+-
+ /* Convert a HW code to a temperature reading (millidegree celsius) */
+ static inline int avs_tmon_code_to_temp(struct thermal_zone_device *tz,
+ 					u32 code)
+ {
+-	const int val = code & AVS_TMON_TEMP_MASK;
+-	int slope, offset;
+-
+-	avs_tmon_get_coeffs(tz, &slope, &offset);
+-
+-	return slope * val + offset;
++	return (AVS_TMON_TEMP_OFFSET -
++		(int)((code & AVS_TMON_TEMP_MAX) * AVS_TMON_TEMP_SLOPE));
+ }
+ 
+ /*
+@@ -145,20 +134,18 @@ static inline int avs_tmon_code_to_temp(struct thermal_zone_device *tz,
+ static inline u32 avs_tmon_temp_to_code(struct thermal_zone_device *tz,
+ 					int temp, bool low)
+ {
+-	int slope, offset;
+-
+ 	if (temp < AVS_TMON_TEMP_MIN)
+-		return AVS_TMON_TEMP_MAX; /* Maximum code value */
+-
+-	avs_tmon_get_coeffs(tz, &slope, &offset);
++		return AVS_TMON_TEMP_MAX;	/* Maximum code value */
+ 
+-	if (temp >= offset)
++	if (temp >= AVS_TMON_TEMP_OFFSET)
+ 		return 0;	/* Minimum code value */
+ 
+ 	if (low)
+-		return (u32)(DIV_ROUND_UP(offset - temp, abs(slope)));
++		return (u32)(DIV_ROUND_UP(AVS_TMON_TEMP_OFFSET - temp,
++					  AVS_TMON_TEMP_SLOPE));
+ 	else
+-		return (u32)((offset - temp) / abs(slope));
++		return (u32)((AVS_TMON_TEMP_OFFSET - temp) /
++			      AVS_TMON_TEMP_SLOPE);
+ }
+ 
+ static int brcmstb_get_temp(void *data, int *temp)
+diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
+index 06ed20dd01ba..cee0274806c5 100644
+--- a/drivers/tty/sysrq.c
++++ b/drivers/tty/sysrq.c
+@@ -546,7 +546,6 @@ void __handle_sysrq(int key, bool check_mask)
+ 	 */
+ 	orig_log_level = console_loglevel;
+ 	console_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
+-	pr_info("SysRq : ");
+ 
+         op_p = __sysrq_get_key_op(key);
+         if (op_p) {
+@@ -555,14 +554,15 @@ void __handle_sysrq(int key, bool check_mask)
+ 		 * should not) and is the invoked operation enabled?
+ 		 */
+ 		if (!check_mask || sysrq_on_mask(op_p->enable_mask)) {
+-			pr_cont("%s\n", op_p->action_msg);
++			pr_info("%s\n", op_p->action_msg);
+ 			console_loglevel = orig_log_level;
+ 			op_p->handler(key);
+ 		} else {
+-			pr_cont("This sysrq operation is disabled.\n");
++			pr_info("This sysrq operation is disabled.\n");
++			console_loglevel = orig_log_level;
+ 		}
+ 	} else {
+-		pr_cont("HELP : ");
++		pr_info("HELP : ");
+ 		/* Only print the help msg once per handler */
+ 		for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++) {
+ 			if (sysrq_key_table[i]) {
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index 124356dc39e1..88c8c158ec25 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -1187,10 +1187,6 @@ static int vhost_net_release(struct inode *inode, struct file *f)
+ 
+ static struct socket *get_raw_socket(int fd)
+ {
+-	struct {
+-		struct sockaddr_ll sa;
+-		char  buf[MAX_ADDR_LEN];
+-	} uaddr;
+ 	int r;
+ 	struct socket *sock = sockfd_lookup(fd, &r);
+ 
+@@ -1203,11 +1199,7 @@ static struct socket *get_raw_socket(int fd)
+ 		goto err;
+ 	}
+ 
+-	r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa, 0);
+-	if (r < 0)
+-		goto err;
+-
+-	if (uaddr.sa.sll_family != AF_PACKET) {
++	if (sock->sk->sk_family != AF_PACKET) {
+ 		r = -EPFNOSUPPORT;
+ 		goto err;
+ 	}
+diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
+index 56ad19608a9b..9d91ed59615d 100644
+--- a/drivers/watchdog/wdat_wdt.c
++++ b/drivers/watchdog/wdat_wdt.c
+@@ -392,7 +392,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)
+ 
+ 		memset(&r, 0, sizeof(r));
+ 		r.start = gas->address;
+-		r.end = r.start + gas->access_width - 1;
++		r.end = r.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1;
+ 		if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ 			r.flags = IORESOURCE_MEM;
+ 		} else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
+index 1d377b7f2860..130bdca9e568 100644
+--- a/fs/cifs/cifsacl.c
++++ b/fs/cifs/cifsacl.c
+@@ -603,7 +603,7 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
+ 			((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
+ 		*pmode |= (S_IXUGO & (*pbits_to_set));
+ 
+-	cifs_dbg(NOISY, "access flags 0x%x mode now 0x%x\n", flags, *pmode);
++	cifs_dbg(NOISY, "access flags 0x%x mode now %04o\n", flags, *pmode);
+ 	return;
+ }
+ 
+@@ -632,7 +632,7 @@ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
+ 	if (mode & S_IXUGO)
+ 		*pace_flags |= SET_FILE_EXEC_RIGHTS;
+ 
+-	cifs_dbg(NOISY, "mode: 0x%x, access flags now 0x%x\n",
++	cifs_dbg(NOISY, "mode: %04o, access flags now 0x%x\n",
+ 		 mode, *pace_flags);
+ 	return;
+ }
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 6c62ce40608a..975f800b9dd4 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -3794,7 +3794,7 @@ int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+ 	cifs_sb->mnt_gid = pvolume_info->linux_gid;
+ 	cifs_sb->mnt_file_mode = pvolume_info->file_mode;
+ 	cifs_sb->mnt_dir_mode = pvolume_info->dir_mode;
+-	cifs_dbg(FYI, "file mode: 0x%hx  dir mode: 0x%hx\n",
++	cifs_dbg(FYI, "file mode: %04ho  dir mode: %04ho\n",
+ 		 cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode);
+ 
+ 	cifs_sb->actimeo = pvolume_info->actimeo;
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index 26154db6c87f..fbebf241dbf2 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -1579,7 +1579,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
+ 	struct TCP_Server_Info *server;
+ 	char *full_path;
+ 
+-	cifs_dbg(FYI, "In cifs_mkdir, mode = 0x%hx inode = 0x%p\n",
++	cifs_dbg(FYI, "In cifs_mkdir, mode = %04ho inode = 0x%p\n",
+ 		 mode, inode);
+ 
+ 	cifs_sb = CIFS_SB(inode->i_sb);
+diff --git a/fs/dax.c b/fs/dax.c
+index f0d932fa39c2..d09701aa6f2f 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -1301,6 +1301,9 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
+ 		lockdep_assert_held(&inode->i_rwsem);
+ 	}
+ 
++	if (iocb->ki_flags & IOCB_NOWAIT)
++		flags |= IOMAP_NOWAIT;
++
+ 	while (iov_iter_count(iter)) {
+ 		ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
+ 				iter, dax_iomap_actor);
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index cb797489b2d8..d44fc3f579e1 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2298,7 +2298,7 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	struct flex_groups **old_groups, **new_groups;
+-	int size, i;
++	int size, i, j;
+ 
+ 	if (!sbi->s_log_groups_per_flex)
+ 		return 0;
+@@ -2319,8 +2319,8 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
+ 					 sizeof(struct flex_groups)),
+ 					 GFP_KERNEL);
+ 		if (!new_groups[i]) {
+-			for (i--; i >= sbi->s_flex_groups_allocated; i--)
+-				kvfree(new_groups[i]);
++			for (j = sbi->s_flex_groups_allocated; j < i; j++)
++				kvfree(new_groups[j]);
+ 			kvfree(new_groups);
+ 			ext4_msg(sb, KERN_ERR,
+ 				 "not enough memory for %d flex groups", size);
+diff --git a/fs/namei.c b/fs/namei.c
+index c00a7e1da4c0..327844fedf3d 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1368,7 +1368,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
+ 			nd->path.dentry = parent;
+ 			nd->seq = seq;
+ 			if (unlikely(!path_connected(&nd->path)))
+-				return -ENOENT;
++				return -ECHILD;
+ 			break;
+ 		} else {
+ 			struct mount *mnt = real_mount(nd->path.mnt);
+diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
+index 66ceb12ebc63..2939a6cd7fec 100644
+--- a/include/acpi/actypes.h
++++ b/include/acpi/actypes.h
+@@ -528,11 +528,12 @@ typedef u64 acpi_integer;
+ #define ACPI_MAKE_RSDP_SIG(dest)        (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8))
+ 
+ /*
+- * Algorithm to obtain access bit width.
++ * Algorithm to obtain access bit or byte width.
+  * Can be used with access_width of struct acpi_generic_address and access_size of
+  * struct acpi_resource_generic_register.
+  */
+ #define ACPI_ACCESS_BIT_WIDTH(size)     (1 << ((size) + 2))
++#define ACPI_ACCESS_BYTE_WIDTH(size)    (1 << ((size) - 1))
+ 
+ /*******************************************************************************
+  *
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 8b3e5e8a72fb..8506637f070d 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -495,7 +495,7 @@ struct hid_report_enum {
+ };
+ 
+ #define HID_MIN_BUFFER_SIZE	64		/* make sure there is at least a packet size of space */
+-#define HID_MAX_BUFFER_SIZE	4096		/* 4kb */
++#define HID_MAX_BUFFER_SIZE	8192		/* 8kb */
+ #define HID_CONTROL_FIFO_SIZE	256		/* to init devices with >100 reports */
+ #define HID_OUTPUT_FIFO_SIZE	64
+ 
+diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
+index 4618cbbe3632..99f8580344d0 100644
+--- a/include/net/flow_dissector.h
++++ b/include/net/flow_dissector.h
+@@ -5,6 +5,7 @@
+ #include <linux/types.h>
+ #include <linux/in6.h>
+ #include <linux/siphash.h>
++#include <linux/string.h>
+ #include <uapi/linux/if_ether.h>
+ 
+ /**
+@@ -306,4 +307,12 @@ static inline void *skb_flow_dissector_target(struct flow_dissector *flow_dissec
+ 	return ((char *)target_container) + flow_dissector->offset[key_id];
+ }
+ 
++static inline void
++flow_dissector_init_keys(struct flow_dissector_key_control *key_control,
++			 struct flow_dissector_key_basic *key_basic)
++{
++	memset(key_control, 0, sizeof(*key_control));
++	memset(key_basic, 0, sizeof(*key_basic));
++}
++
+ #endif
+diff --git a/include/uapi/linux/usb/charger.h b/include/uapi/linux/usb/charger.h
+index 5f72af35b3ed..ad22079125bf 100644
+--- a/include/uapi/linux/usb/charger.h
++++ b/include/uapi/linux/usb/charger.h
+@@ -14,18 +14,18 @@
+  * ACA (Accessory Charger Adapters)
+  */
+ enum usb_charger_type {
+-	UNKNOWN_TYPE,
+-	SDP_TYPE,
+-	DCP_TYPE,
+-	CDP_TYPE,
+-	ACA_TYPE,
++	UNKNOWN_TYPE = 0,
++	SDP_TYPE = 1,
++	DCP_TYPE = 2,
++	CDP_TYPE = 3,
++	ACA_TYPE = 4,
+ };
+ 
+ /* USB charger state */
+ enum usb_charger_state {
+-	USB_CHARGER_DEFAULT,
+-	USB_CHARGER_PRESENT,
+-	USB_CHARGER_ABSENT,
++	USB_CHARGER_DEFAULT = 0,
++	USB_CHARGER_PRESENT = 1,
++	USB_CHARGER_ABSENT = 2,
+ };
+ 
+ #endif /* _UAPI__LINUX_USB_CHARGER_H */
+diff --git a/kernel/audit.c b/kernel/audit.c
+index 2a8058764aa6..1f08c38e604a 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -1106,13 +1106,11 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature
+ 	audit_log_end(ab);
+ }
+ 
+-static int audit_set_feature(struct sk_buff *skb)
++static int audit_set_feature(struct audit_features *uaf)
+ {
+-	struct audit_features *uaf;
+ 	int i;
+ 
+ 	BUILD_BUG_ON(AUDIT_LAST_FEATURE + 1 > ARRAY_SIZE(audit_feature_names));
+-	uaf = nlmsg_data(nlmsg_hdr(skb));
+ 
+ 	/* if there is ever a version 2 we should handle that here */
+ 
+@@ -1180,6 +1178,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ {
+ 	u32			seq;
+ 	void			*data;
++	int			data_len;
+ 	int			err;
+ 	struct audit_buffer	*ab;
+ 	u16			msg_type = nlh->nlmsg_type;
+@@ -1193,6 +1192,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 
+ 	seq  = nlh->nlmsg_seq;
+ 	data = nlmsg_data(nlh);
++	data_len = nlmsg_len(nlh);
+ 
+ 	switch (msg_type) {
+ 	case AUDIT_GET: {
+@@ -1216,7 +1216,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 		struct audit_status	s;
+ 		memset(&s, 0, sizeof(s));
+ 		/* guard against past and future API changes */
+-		memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh)));
++		memcpy(&s, data, min_t(size_t, sizeof(s), data_len));
+ 		if (s.mask & AUDIT_STATUS_ENABLED) {
+ 			err = audit_set_enabled(s.enabled);
+ 			if (err < 0)
+@@ -1320,7 +1320,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 			return err;
+ 		break;
+ 	case AUDIT_SET_FEATURE:
+-		err = audit_set_feature(skb);
++		if (data_len < sizeof(struct audit_features))
++			return -EINVAL;
++		err = audit_set_feature(data);
+ 		if (err)
+ 			return err;
+ 		break;
+@@ -1332,6 +1334,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 
+ 		err = audit_filter(msg_type, AUDIT_FILTER_USER);
+ 		if (err == 1) { /* match or error */
++			char *str = data;
++
+ 			err = 0;
+ 			if (msg_type == AUDIT_USER_TTY) {
+ 				err = tty_audit_push();
+@@ -1339,26 +1343,24 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 					break;
+ 			}
+ 			audit_log_common_recv_msg(&ab, msg_type);
+-			if (msg_type != AUDIT_USER_TTY)
++			if (msg_type != AUDIT_USER_TTY) {
++				/* ensure NULL termination */
++				str[data_len - 1] = '\0';
+ 				audit_log_format(ab, " msg='%.*s'",
+ 						 AUDIT_MESSAGE_TEXT_MAX,
+-						 (char *)data);
+-			else {
+-				int size;
+-
++						 str);
++			} else {
+ 				audit_log_format(ab, " data=");
+-				size = nlmsg_len(nlh);
+-				if (size > 0 &&
+-				    ((unsigned char *)data)[size - 1] == '\0')
+-					size--;
+-				audit_log_n_untrustedstring(ab, data, size);
++				if (data_len > 0 && str[data_len - 1] == '\0')
++					data_len--;
++				audit_log_n_untrustedstring(ab, str, data_len);
+ 			}
+ 			audit_log_end(ab);
+ 		}
+ 		break;
+ 	case AUDIT_ADD_RULE:
+ 	case AUDIT_DEL_RULE:
+-		if (nlmsg_len(nlh) < sizeof(struct audit_rule_data))
++		if (data_len < sizeof(struct audit_rule_data))
+ 			return -EINVAL;
+ 		if (audit_enabled == AUDIT_LOCKED) {
+ 			audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE);
+@@ -1366,7 +1368,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 			audit_log_end(ab);
+ 			return -EPERM;
+ 		}
+-		err = audit_rule_change(msg_type, seq, data, nlmsg_len(nlh));
++		err = audit_rule_change(msg_type, seq, data, data_len);
+ 		break;
+ 	case AUDIT_LIST_RULES:
+ 		err = audit_list_rules_send(skb, seq);
+@@ -1380,7 +1382,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 	case AUDIT_MAKE_EQUIV: {
+ 		void *bufp = data;
+ 		u32 sizes[2];
+-		size_t msglen = nlmsg_len(nlh);
++		size_t msglen = data_len;
+ 		char *old, *new;
+ 
+ 		err = -EINVAL;
+@@ -1456,7 +1458,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 
+ 		memset(&s, 0, sizeof(s));
+ 		/* guard against past and future API changes */
+-		memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh)));
++		memcpy(&s, data, min_t(size_t, sizeof(s), data_len));
+ 		/* check if new data is valid */
+ 		if ((s.enabled != 0 && s.enabled != 1) ||
+ 		    (s.log_passwd != 0 && s.log_passwd != 1))
+diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
+index 425c67e4f568..1c8a48abda80 100644
+--- a/kernel/auditfilter.c
++++ b/kernel/auditfilter.c
+@@ -452,6 +452,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
+ 	bufp = data->buf;
+ 	for (i = 0; i < data->field_count; i++) {
+ 		struct audit_field *f = &entry->rule.fields[i];
++		u32 f_val;
+ 
+ 		err = -EINVAL;
+ 
+@@ -460,12 +461,12 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
+ 			goto exit_free;
+ 
+ 		f->type = data->fields[i];
+-		f->val = data->values[i];
++		f_val = data->values[i];
+ 
+ 		/* Support legacy tests for a valid loginuid */
+-		if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) {
++		if ((f->type == AUDIT_LOGINUID) && (f_val == AUDIT_UID_UNSET)) {
+ 			f->type = AUDIT_LOGINUID_SET;
+-			f->val = 0;
++			f_val = 0;
+ 			entry->rule.pflags |= AUDIT_LOGINUID_LEGACY;
+ 		}
+ 
+@@ -481,7 +482,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
+ 		case AUDIT_SUID:
+ 		case AUDIT_FSUID:
+ 		case AUDIT_OBJ_UID:
+-			f->uid = make_kuid(current_user_ns(), f->val);
++			f->uid = make_kuid(current_user_ns(), f_val);
+ 			if (!uid_valid(f->uid))
+ 				goto exit_free;
+ 			break;
+@@ -490,11 +491,12 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
+ 		case AUDIT_SGID:
+ 		case AUDIT_FSGID:
+ 		case AUDIT_OBJ_GID:
+-			f->gid = make_kgid(current_user_ns(), f->val);
++			f->gid = make_kgid(current_user_ns(), f_val);
+ 			if (!gid_valid(f->gid))
+ 				goto exit_free;
+ 			break;
+ 		case AUDIT_ARCH:
++			f->val = f_val;
+ 			entry->rule.arch_f = f;
+ 			break;
+ 		case AUDIT_SUBJ_USER:
+@@ -507,11 +509,13 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
+ 		case AUDIT_OBJ_TYPE:
+ 		case AUDIT_OBJ_LEV_LOW:
+ 		case AUDIT_OBJ_LEV_HIGH:
+-			str = audit_unpack_string(&bufp, &remain, f->val);
+-			if (IS_ERR(str))
++			str = audit_unpack_string(&bufp, &remain, f_val);
++			if (IS_ERR(str)) {
++				err = PTR_ERR(str);
+ 				goto exit_free;
+-			entry->rule.buflen += f->val;
+-
++			}
++			entry->rule.buflen += f_val;
++			f->lsm_str = str;
+ 			err = security_audit_rule_init(f->type, f->op, str,
+ 						       (void **)&f->lsm_rule);
+ 			/* Keep currently invalid fields around in case they
+@@ -520,68 +524,71 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
+ 				pr_warn("audit rule for LSM \'%s\' is invalid\n",
+ 					str);
+ 				err = 0;
+-			}
+-			if (err) {
+-				kfree(str);
++			} else if (err)
+ 				goto exit_free;
+-			} else
+-				f->lsm_str = str;
+ 			break;
+ 		case AUDIT_WATCH:
+-			str = audit_unpack_string(&bufp, &remain, f->val);
+-			if (IS_ERR(str))
++			str = audit_unpack_string(&bufp, &remain, f_val);
++			if (IS_ERR(str)) {
++				err = PTR_ERR(str);
+ 				goto exit_free;
+-			entry->rule.buflen += f->val;
+-
+-			err = audit_to_watch(&entry->rule, str, f->val, f->op);
++			}
++			err = audit_to_watch(&entry->rule, str, f_val, f->op);
+ 			if (err) {
+ 				kfree(str);
+ 				goto exit_free;
+ 			}
++			entry->rule.buflen += f_val;
+ 			break;
+ 		case AUDIT_DIR:
+-			str = audit_unpack_string(&bufp, &remain, f->val);
+-			if (IS_ERR(str))
++			str = audit_unpack_string(&bufp, &remain, f_val);
++			if (IS_ERR(str)) {
++				err = PTR_ERR(str);
+ 				goto exit_free;
+-			entry->rule.buflen += f->val;
+-
++			}
+ 			err = audit_make_tree(&entry->rule, str, f->op);
+ 			kfree(str);
+ 			if (err)
+ 				goto exit_free;
++			entry->rule.buflen += f_val;
+ 			break;
+ 		case AUDIT_INODE:
++			f->val = f_val;
+ 			err = audit_to_inode(&entry->rule, f);
+ 			if (err)
+ 				goto exit_free;
+ 			break;
+ 		case AUDIT_FILTERKEY:
+-			if (entry->rule.filterkey || f->val > AUDIT_MAX_KEY_LEN)
++			if (entry->rule.filterkey || f_val > AUDIT_MAX_KEY_LEN)
+ 				goto exit_free;
+-			str = audit_unpack_string(&bufp, &remain, f->val);
+-			if (IS_ERR(str))
++			str = audit_unpack_string(&bufp, &remain, f_val);
++			if (IS_ERR(str)) {
++				err = PTR_ERR(str);
+ 				goto exit_free;
+-			entry->rule.buflen += f->val;
++			}
++			entry->rule.buflen += f_val;
+ 			entry->rule.filterkey = str;
+ 			break;
+ 		case AUDIT_EXE:
+-			if (entry->rule.exe || f->val > PATH_MAX)
++			if (entry->rule.exe || f_val > PATH_MAX)
+ 				goto exit_free;
+-			str = audit_unpack_string(&bufp, &remain, f->val);
++			str = audit_unpack_string(&bufp, &remain, f_val);
+ 			if (IS_ERR(str)) {
+ 				err = PTR_ERR(str);
+ 				goto exit_free;
+ 			}
+-			entry->rule.buflen += f->val;
+-
+-			audit_mark = audit_alloc_mark(&entry->rule, str, f->val);
++			audit_mark = audit_alloc_mark(&entry->rule, str, f_val);
+ 			if (IS_ERR(audit_mark)) {
+ 				kfree(str);
+ 				err = PTR_ERR(audit_mark);
+ 				goto exit_free;
+ 			}
++			entry->rule.buflen += f_val;
+ 			entry->rule.exe = audit_mark;
+ 			break;
++		default:
++			f->val = f_val;
++			break;
+ 		}
+ 	}
+ 
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index f4e4095ec7ea..00050a22f6a1 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -523,6 +523,8 @@ static void do_unoptimize_kprobes(void)
+ 	arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
+ 	/* Loop free_list for disarming */
+ 	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
++		/* Switching from detour code to origin */
++		op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+ 		/* Disarm probes if marked disabled */
+ 		if (kprobe_disabled(&op->kp))
+ 			arch_disarm_kprobe(&op->kp);
+@@ -662,6 +664,7 @@ static void force_unoptimize_kprobe(struct optimized_kprobe *op)
+ {
+ 	lockdep_assert_cpus_held();
+ 	arch_unoptimize_kprobe(op);
++	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+ 	if (kprobe_disabled(&op->kp))
+ 		arch_disarm_kprobe(&op->kp);
+ }
+@@ -689,7 +692,6 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
+ 		return;
+ 	}
+ 
+-	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+ 	if (!list_empty(&op->list)) {
+ 		/* Dequeue from the optimization queue */
+ 		list_del_init(&op->list);
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 7f4f4ab5bfef..86ccaaf0c1bf 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -353,6 +353,18 @@ static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
+ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
+ {
+ 	if (cfs_rq->on_list) {
++		struct rq *rq = rq_of(cfs_rq);
++
++		/*
++		 * With cfs_rq being unthrottled/throttled during an enqueue,
++		 * it can happen the tmp_alone_branch points the a leaf that
++		 * we finally want to del. In this case, tmp_alone_branch moves
++		 * to the prev element but it will point to rq->leaf_cfs_rq_list
++		 * at the end of the enqueue.
++		 */
++		if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list)
++			rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
++
+ 		list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
+ 		cfs_rq->on_list = 0;
+ 	}
+@@ -363,9 +375,10 @@ static inline void assert_list_leaf_cfs_rq(struct rq *rq)
+ 	SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
+ }
+ 
+-/* Iterate through all cfs_rq's on a runqueue in bottom-up order */
+-#define for_each_leaf_cfs_rq(rq, cfs_rq) \
+-	list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
++/* Iterate thr' all leaf cfs_rq's on a runqueue */
++#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)			\
++	list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list,	\
++				 leaf_cfs_rq_list)
+ 
+ /* Do the two (enqueued) entities belong to the same group ? */
+ static inline struct cfs_rq *
+@@ -462,8 +475,8 @@ static inline void assert_list_leaf_cfs_rq(struct rq *rq)
+ {
+ }
+ 
+-#define for_each_leaf_cfs_rq(rq, cfs_rq)	\
+-		for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
++#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos)	\
++		for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
+ 
+ static inline struct sched_entity *parent_entity(struct sched_entity *se)
+ {
+@@ -4441,6 +4454,10 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
+ 		/* adjust cfs_rq_clock_task() */
+ 		cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
+ 					     cfs_rq->throttled_clock_task;
++
++		/* Add cfs_rq with already running entity in the list */
++		if (cfs_rq->nr_running >= 1)
++			list_add_leaf_cfs_rq(cfs_rq);
+ 	}
+ 
+ 	return 0;
+@@ -4452,8 +4469,10 @@ static int tg_throttle_down(struct task_group *tg, void *data)
+ 	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
+ 
+ 	/* group is entering throttled state, stop time */
+-	if (!cfs_rq->throttle_count)
++	if (!cfs_rq->throttle_count) {
+ 		cfs_rq->throttled_clock_task = rq_clock_task(rq);
++		list_del_leaf_cfs_rq(cfs_rq);
++	}
+ 	cfs_rq->throttle_count++;
+ 
+ 	return 0;
+@@ -4556,6 +4575,8 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
+ 			break;
+ 	}
+ 
++	assert_list_leaf_cfs_rq(rq);
++
+ 	if (!se)
+ 		add_nr_running(rq, task_delta);
+ 
+@@ -7441,10 +7462,27 @@ static inline bool others_have_blocked(struct rq *rq)
+ 
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ 
++static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
++{
++	if (cfs_rq->load.weight)
++		return false;
++
++	if (cfs_rq->avg.load_sum)
++		return false;
++
++	if (cfs_rq->avg.util_sum)
++		return false;
++
++	if (cfs_rq->avg.runnable_load_sum)
++		return false;
++
++	return true;
++}
++
+ static void update_blocked_averages(int cpu)
+ {
+ 	struct rq *rq = cpu_rq(cpu);
+-	struct cfs_rq *cfs_rq;
++	struct cfs_rq *cfs_rq, *pos;
+ 	const struct sched_class *curr_class;
+ 	struct rq_flags rf;
+ 	bool done = true;
+@@ -7456,13 +7494,9 @@ static void update_blocked_averages(int cpu)
+ 	 * Iterates the task_group tree in a bottom up fashion, see
+ 	 * list_add_leaf_cfs_rq() for details.
+ 	 */
+-	for_each_leaf_cfs_rq(rq, cfs_rq) {
++	for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
+ 		struct sched_entity *se;
+ 
+-		/* throttled entities do not contribute to load */
+-		if (throttled_hierarchy(cfs_rq))
+-			continue;
+-
+ 		if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq))
+ 			update_tg_load_avg(cfs_rq, 0);
+ 
+@@ -7471,6 +7505,13 @@ static void update_blocked_averages(int cpu)
+ 		if (se && !skip_blocked_update(se))
+ 			update_load_avg(cfs_rq_of(se), se, 0);
+ 
++		/*
++		 * There can be a lot of idle CPU cgroups.  Don't let fully
++		 * decayed cfs_rqs linger on the list.
++		 */
++		if (cfs_rq_is_decayed(cfs_rq))
++			list_del_leaf_cfs_rq(cfs_rq);
++
+ 		/* Don't need periodic decay once load/util_avg are null */
+ 		if (cfs_rq_has_blocked(cfs_rq))
+ 			done = false;
+@@ -10256,10 +10297,10 @@ const struct sched_class fair_sched_class = {
+ #ifdef CONFIG_SCHED_DEBUG
+ void print_cfs_stats(struct seq_file *m, int cpu)
+ {
+-	struct cfs_rq *cfs_rq;
++	struct cfs_rq *cfs_rq, *pos;
+ 
+ 	rcu_read_lock();
+-	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
++	for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
+ 		print_cfs_rq(m, cpu, cfs_rq);
+ 	rcu_read_unlock();
+ }
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index e61aa1c68e99..c41f7d1ab5fa 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1549,6 +1549,7 @@ static __init int init_trace_selftests(void)
+ 
+ 	pr_info("Running postponed tracer tests:\n");
+ 
++	tracing_selftest_running = true;
+ 	list_for_each_entry_safe(p, n, &postponed_selftests, list) {
+ 		ret = run_tracer_selftest(p->type);
+ 		/* If the test fails, then warn and remove from available_tracers */
+@@ -1567,6 +1568,7 @@ static __init int init_trace_selftests(void)
+ 		list_del(&p->list);
+ 		kfree(p);
+ 	}
++	tracing_selftest_running = false;
+ 
+  out:
+ 	mutex_unlock(&trace_types_lock);
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 5bb93cf18009..146998357bed 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -173,16 +173,13 @@ static ssize_t enabled_store(struct kobject *kobj,
+ {
+ 	ssize_t ret = count;
+ 
+-	if (!memcmp("always", buf,
+-		    min(sizeof("always")-1, count))) {
++	if (sysfs_streq(buf, "always")) {
+ 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ 		set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
+-	} else if (!memcmp("madvise", buf,
+-			   min(sizeof("madvise")-1, count))) {
++	} else if (sysfs_streq(buf, "madvise")) {
+ 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
+ 		set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
+-	} else if (!memcmp("never", buf,
+-			   min(sizeof("never")-1, count))) {
++	} else if (sysfs_streq(buf, "never")) {
+ 		clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
+ 		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ 	} else
+@@ -246,32 +243,27 @@ static ssize_t defrag_store(struct kobject *kobj,
+ 			    struct kobj_attribute *attr,
+ 			    const char *buf, size_t count)
+ {
+-	if (!memcmp("always", buf,
+-		    min(sizeof("always")-1, count))) {
++	if (sysfs_streq(buf, "always")) {
+ 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
+ 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+ 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
+-	} else if (!memcmp("defer+madvise", buf,
+-		    min(sizeof("defer+madvise")-1, count))) {
++	} else if (sysfs_streq(buf, "defer+madvise")) {
+ 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
+ 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
+ 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+-	} else if (!memcmp("defer", buf,
+-		    min(sizeof("defer")-1, count))) {
++	} else if (sysfs_streq(buf, "defer")) {
+ 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
+ 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+ 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
+-	} else if (!memcmp("madvise", buf,
+-			   min(sizeof("madvise")-1, count))) {
++	} else if (sysfs_streq(buf, "madvise")) {
+ 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
+ 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
+ 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+ 		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
+-	} else if (!memcmp("never", buf,
+-			   min(sizeof("never")-1, count))) {
++	} else if (sysfs_streq(buf, "never")) {
+ 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
+ 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
+ 		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+@@ -2661,7 +2653,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
+ 	unsigned long flags;
+ 	pgoff_t end;
+ 
+-	VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
++	VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
+ 	VM_BUG_ON_PAGE(!PageLocked(page), page);
+ 	VM_BUG_ON_PAGE(!PageCompound(page), page);
+ 
+diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
+index 0ff3953f64aa..8916c5d9b3b3 100644
+--- a/net/core/fib_rules.c
++++ b/net/core/fib_rules.c
+@@ -968,7 +968,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
+ 
+ 	frh = nlmsg_data(nlh);
+ 	frh->family = ops->family;
+-	frh->table = rule->table;
++	frh->table = rule->table < 256 ? rule->table : RT_TABLE_COMPAT;
+ 	if (nla_put_u32(skb, FRA_TABLE, rule->table))
+ 		goto nla_put_failure;
+ 	if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index 7091568b9f63..5e8979c1f76d 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -981,8 +981,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
+ 					found++;
+ 					break;
+ 				}
+-				if (rt_can_ecmp)
+-					fallback_ins = fallback_ins ?: ins;
++				fallback_ins = fallback_ins ?: ins;
+ 				goto next_iter;
+ 			}
+ 
+@@ -1025,7 +1024,9 @@ next_iter:
+ 	}
+ 
+ 	if (fallback_ins && !found) {
+-		/* No ECMP-able route found, replace first non-ECMP one */
++		/* No matching route with same ecmp-able-ness found, replace
++		 * first matching route
++		 */
+ 		ins = fallback_ins;
+ 		iter = rcu_dereference_protected(*ins,
+ 				    lockdep_is_held(&rt->fib6_table->tb6_lock));
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index f8fe4c9ead4d..9c36a743ddbc 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -4514,6 +4514,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
+ 		 */
+ 		cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
+ 						     NLM_F_REPLACE);
++		cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
+ 		nhn++;
+ 	}
+ 
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index f101a6460b44..7fa9871b1db9 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -945,16 +945,22 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
+ 				elem_parse_failed = true;
+ 			break;
+ 		case WLAN_EID_VHT_OPERATION:
+-			if (elen >= sizeof(struct ieee80211_vht_operation))
++			if (elen >= sizeof(struct ieee80211_vht_operation)) {
+ 				elems->vht_operation = (void *)pos;
+-			else
+-				elem_parse_failed = true;
++				if (calc_crc)
++					crc = crc32_be(crc, pos - 2, elen + 2);
++				break;
++			}
++			elem_parse_failed = true;
+ 			break;
+ 		case WLAN_EID_OPMODE_NOTIF:
+-			if (elen > 0)
++			if (elen > 0) {
+ 				elems->opmode_notif = pos;
+-			else
+-				elem_parse_failed = true;
++				if (calc_crc)
++					crc = crc32_be(crc, pos - 2, elen + 2);
++				break;
++			}
++			elem_parse_failed = true;
+ 			break;
+ 		case WLAN_EID_MESH_ID:
+ 			elems->mesh_id = pos;
+diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
+index 5e66042ac346..1c6d15ea76d4 100644
+--- a/net/netfilter/nft_tunnel.c
++++ b/net/netfilter/nft_tunnel.c
+@@ -467,8 +467,8 @@ static int nft_tunnel_opts_dump(struct sk_buff *skb,
+ static int nft_tunnel_ports_dump(struct sk_buff *skb,
+ 				 struct ip_tunnel_info *info)
+ {
+-	if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, htons(info->key.tp_src)) < 0 ||
+-	    nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, htons(info->key.tp_dst)) < 0)
++	if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 ||
++	    nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0)
+ 		return -1;
+ 
+ 	return 0;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 930d17fa906c..4a1b1bb39b4b 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1029,7 +1029,8 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
+ 	if (nlk->netlink_bind && groups) {
+ 		int group;
+ 
+-		for (group = 0; group < nlk->ngroups; group++) {
++		/* nl_groups is a u32, so cap the maximum groups we can bind */
++		for (group = 0; group < BITS_PER_TYPE(u32); group++) {
+ 			if (!test_bit(group, &groups))
+ 				continue;
+ 			err = nlk->netlink_bind(net, group + 1);
+@@ -1048,7 +1049,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
+ 			netlink_insert(sk, nladdr->nl_pid) :
+ 			netlink_autobind(sock);
+ 		if (err) {
+-			netlink_undo_bind(nlk->ngroups, groups, sk);
++			netlink_undo_bind(BITS_PER_TYPE(u32), groups, sk);
+ 			goto unlock;
+ 		}
+ 	}
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index c006d3b89ba3..44ca31f8538d 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -196,6 +196,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ 	struct fl_flow_key skb_mkey;
+ 
+ 	list_for_each_entry_rcu(mask, &head->masks, list) {
++		flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
+ 		fl_clear_masked_range(&skb_key, mask);
+ 
+ 		skb_key.indev_ifindex = skb->skb_iif;
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index 559f09ac0b22..9f4d325f3a79 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -185,6 +185,16 @@ static inline bool sctp_chunk_length_valid(struct sctp_chunk *chunk,
+ 	return true;
+ }
+ 
++/* Check for format error in an ABORT chunk */
++static inline bool sctp_err_chunk_valid(struct sctp_chunk *chunk)
++{
++	struct sctp_errhdr *err;
++
++	sctp_walk_errors(err, chunk->chunk_hdr);
++
++	return (void *)err == (void *)chunk->chunk_end;
++}
++
+ /**********************************************************
+  * These are the state functions for handling chunk events.
+  **********************************************************/
+@@ -2270,6 +2280,9 @@ enum sctp_disposition sctp_sf_shutdown_pending_abort(
+ 		    sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
+ 		return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
+ 
++	if (!sctp_err_chunk_valid(chunk))
++		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++
+ 	return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
+ }
+ 
+@@ -2313,6 +2326,9 @@ enum sctp_disposition sctp_sf_shutdown_sent_abort(
+ 		    sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
+ 		return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
+ 
++	if (!sctp_err_chunk_valid(chunk))
++		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++
+ 	/* Stop the T2-shutdown timer. */
+ 	sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ 			SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
+@@ -2580,6 +2596,9 @@ enum sctp_disposition sctp_sf_do_9_1_abort(
+ 		    sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
+ 		return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
+ 
++	if (!sctp_err_chunk_valid(chunk))
++		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++
+ 	return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
+ }
+ 
+@@ -2597,16 +2616,8 @@ static enum sctp_disposition __sctp_sf_do_9_1_abort(
+ 
+ 	/* See if we have an error cause code in the chunk.  */
+ 	len = ntohs(chunk->chunk_hdr->length);
+-	if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) {
+-		struct sctp_errhdr *err;
+-
+-		sctp_walk_errors(err, chunk->chunk_hdr);
+-		if ((void *)err != (void *)chunk->chunk_end)
+-			return sctp_sf_pdiscard(net, ep, asoc, type, arg,
+-						commands);
+-
++	if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
+ 		error = ((struct sctp_errhdr *)chunk->skb->data)->cause;
+-	}
+ 
+ 	sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
+ 	/* ASSOC_FAILED will DELETE_TCB. */
+diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
+index 52241d679cc9..aa9a17ac1f7b 100644
+--- a/net/smc/smc_clc.c
++++ b/net/smc/smc_clc.c
+@@ -364,7 +364,9 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
+ 	dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline));
+ 	dclc.hdr.version = SMC_CLC_V1;
+ 	dclc.hdr.flag = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ? 1 : 0;
+-	memcpy(dclc.id_for_peer, local_systemid, sizeof(local_systemid));
++	if (smc->conn.lgr && !smc->conn.lgr->is_smcd)
++		memcpy(dclc.id_for_peer, local_systemid,
++		       sizeof(local_systemid));
+ 	dclc.peer_diagnosis = htonl(peer_diag_info);
+ 	memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
+ 
+diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
+index 0a613e0ef3bf..8f40bbfd60ea 100644
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -506,7 +506,7 @@ struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
+ 				       u32 seq, u64 *p_record_sn)
+ {
+ 	u64 record_sn = context->hint_record_sn;
+-	struct tls_record_info *info;
++	struct tls_record_info *info, *last;
+ 
+ 	info = context->retransmit_hint;
+ 	if (!info ||
+@@ -516,6 +516,25 @@ struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
+ 		 */
+ 		info = list_first_entry(&context->records_list,
+ 					struct tls_record_info, list);
++
++		/* send the start_marker record if seq number is before the
++		 * tls offload start marker sequence number. This record is
++		 * required to handle TCP packets which are before TLS offload
++		 * started.
++		 *  And if it's not start marker, look if this seq number
++		 * belongs to the list.
++		 */
++		if (likely(!tls_record_is_start_marker(info))) {
++			/* we have the first record, get the last record to see
++			 * if this seq number belongs to the list.
++			 */
++			last = list_last_entry(&context->records_list,
++					       struct tls_record_info, list);
++
++			if (!between(seq, tls_record_start_seq(info),
++				     last->end_seq))
++				return NULL;
++		}
+ 		record_sn = context->unacked_record_sn;
+ 	}
+ 
+diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c
+index a9c0f368db5d..24e18405cdb4 100644
+--- a/net/wireless/ethtool.c
++++ b/net/wireless/ethtool.c
+@@ -7,9 +7,13 @@
+ void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+ {
+ 	struct wireless_dev *wdev = dev->ieee80211_ptr;
++	struct device *pdev = wiphy_dev(wdev->wiphy);
+ 
+-	strlcpy(info->driver, wiphy_dev(wdev->wiphy)->driver->name,
+-		sizeof(info->driver));
++	if (pdev->driver)
++		strlcpy(info->driver, pdev->driver->name,
++			sizeof(info->driver));
++	else
++		strlcpy(info->driver, "N/A", sizeof(info->driver));
+ 
+ 	strlcpy(info->version, init_utsname()->release, sizeof(info->version));
+ 
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 823dea187691..dfde06b8d25d 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -323,6 +323,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
+ 	[NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT] = { .type = NLA_FLAG },
+ 	[NL80211_ATTR_CONTROL_PORT_OVER_NL80211] = { .type = NLA_FLAG },
+ 	[NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG },
++	[NL80211_ATTR_STATUS_CODE] = { .type = NLA_U16 },
+ 	[NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 },
+ 	[NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
+ 	[NL80211_ATTR_PID] = { .type = NLA_U32 },
+diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
+index 692d2fa31c35..ed34902022c1 100644
+--- a/tools/perf/ui/browsers/hists.c
++++ b/tools/perf/ui/browsers/hists.c
+@@ -2931,6 +2931,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
+ 
+ 				continue;
+ 			}
++			actions->ms.map = map;
+ 			top = pstack__peek(browser->pstack);
+ 			if (top == &browser->hists->dso_filter) {
+ 				/*
+diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
+index bbb0e042d8e5..59475287e2e1 100644
+--- a/tools/perf/util/stat-shadow.c
++++ b/tools/perf/util/stat-shadow.c
+@@ -209,12 +209,12 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count,
+ 				    int cpu, struct runtime_stat *st)
+ {
+ 	int ctx = evsel_context(counter);
++	u64 count_ns = count;
+ 
+ 	count *= counter->scale;
+ 
+-	if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) ||
+-	    perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK))
+-		update_runtime_stat(st, STAT_NSECS, 0, cpu, count);
++	if (perf_evsel__is_clock(counter))
++		update_runtime_stat(st, STAT_NSECS, 0, cpu, count_ns);
+ 	else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
+ 		update_runtime_stat(st, STAT_CYCLES, ctx, cpu, count);
+ 	else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
+diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
+index c0885fb65767..7d1a7c0dc56a 100755
+--- a/tools/testing/selftests/net/fib_tests.sh
++++ b/tools/testing/selftests/net/fib_tests.sh
+@@ -848,6 +848,12 @@ ipv6_rt_replace_mpath()
+ 	check_route6 "2001:db8:104::/64 via 2001:db8:101::3 dev veth1 metric 1024"
+ 	log_test $? 0 "Multipath with single path via multipath attribute"
+ 
++	# multipath with dev-only
++	add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
++	run_cmd "$IP -6 ro replace 2001:db8:104::/64 dev veth1"
++	check_route6 "2001:db8:104::/64 dev veth1 metric 1024"
++	log_test $? 0 "Multipath with dev-only"
++
+ 	# route replace fails - invalid nexthop 1
+ 	add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
+ 	run_cmd "$IP -6 ro replace 2001:db8:104::/64 nexthop via 2001:db8:111::3 nexthop via 2001:db8:103::3"
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index beec19fcf8cd..4e499b78569b 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2024,12 +2024,12 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ 	if (slots->generation != ghc->generation)
+ 		__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
+ 
+-	if (unlikely(!ghc->memslot))
+-		return kvm_write_guest(kvm, gpa, data, len);
+-
+ 	if (kvm_is_error_hva(ghc->hva))
+ 		return -EFAULT;
+ 
++	if (unlikely(!ghc->memslot))
++		return kvm_write_guest(kvm, gpa, data, len);
++
+ 	r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
+ 	if (r)
+ 		return -EFAULT;
+@@ -2057,12 +2057,12 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ 	if (slots->generation != ghc->generation)
+ 		__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
+ 
+-	if (unlikely(!ghc->memslot))
+-		return kvm_read_guest(kvm, ghc->gpa, data, len);
+-
+ 	if (kvm_is_error_hva(ghc->hva))
+ 		return -EFAULT;
+ 
++	if (unlikely(!ghc->memslot))
++		return kvm_read_guest(kvm, ghc->gpa, data, len);
++
+ 	r = __copy_from_user(data, (void __user *)ghc->hva, len);
+ 	if (r)
+ 		return -EFAULT;


             reply	other threads:[~2020-03-05 16:23 UTC|newest]

Thread overview: 332+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-05 16:23 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2024-04-18  3:06 [gentoo-commits] proj/linux-patches:4.19 commit in: / Alice Ferrazzi
2023-09-02  9:59 Mike Pagano
2023-08-30 15:00 Mike Pagano
2023-08-16 16:59 Mike Pagano
2023-08-11 11:58 Mike Pagano
2023-08-08 18:43 Mike Pagano
2023-07-24 20:30 Mike Pagano
2023-06-28 10:29 Mike Pagano
2023-06-21 14:55 Alice Ferrazzi
2023-06-14 10:21 Mike Pagano
2023-06-09 11:32 Mike Pagano
2023-05-30 12:57 Mike Pagano
2023-05-17 11:14 Mike Pagano
2023-05-17 11:01 Mike Pagano
2023-05-10 17:59 Mike Pagano
2023-04-26  9:35 Alice Ferrazzi
2023-04-20 11:17 Alice Ferrazzi
2023-04-05 11:41 Mike Pagano
2023-03-22 14:16 Alice Ferrazzi
2023-03-17 10:46 Mike Pagano
2023-03-13 11:35 Alice Ferrazzi
2023-03-11 16:01 Mike Pagano
2023-03-03 12:31 Mike Pagano
2023-02-25 11:41 Mike Pagano
2023-02-24  3:19 Alice Ferrazzi
2023-02-24  3:15 Alice Ferrazzi
2023-02-22 14:51 Alice Ferrazzi
2023-02-06 12:49 Mike Pagano
2023-01-24  7:16 Alice Ferrazzi
2023-01-18 11:11 Mike Pagano
2022-12-14 12:15 Mike Pagano
2022-12-08 12:14 Alice Ferrazzi
2022-11-25 17:04 Mike Pagano
2022-11-23  9:39 Alice Ferrazzi
2022-11-10 17:58 Mike Pagano
2022-11-03 15:11 Mike Pagano
2022-11-01 19:48 Mike Pagano
2022-10-26 11:41 Mike Pagano
2022-10-05 11:59 Mike Pagano
2022-09-28  9:18 Mike Pagano
2022-09-20 12:03 Mike Pagano
2022-09-15 11:09 Mike Pagano
2022-09-05 12:06 Mike Pagano
2022-08-25 10:35 Mike Pagano
2022-08-11 12:36 Mike Pagano
2022-07-29 15:28 Mike Pagano
2022-07-21 20:12 Mike Pagano
2022-07-12 16:01 Mike Pagano
2022-07-07 16:18 Mike Pagano
2022-07-02 16:07 Mike Pagano
2022-06-25 10:22 Mike Pagano
2022-06-16 11:40 Mike Pagano
2022-06-14 16:02 Mike Pagano
2022-06-06 11:05 Mike Pagano
2022-05-27 12:24 Mike Pagano
2022-05-25 11:55 Mike Pagano
2022-05-18  9:50 Mike Pagano
2022-05-15 22:12 Mike Pagano
2022-05-12 11:30 Mike Pagano
2022-05-01 17:04 Mike Pagano
2022-04-27 12:03 Mike Pagano
2022-04-20 12:09 Mike Pagano
2022-04-15 13:11 Mike Pagano
2022-04-12 19:24 Mike Pagano
2022-03-28 10:59 Mike Pagano
2022-03-23 11:57 Mike Pagano
2022-03-16 13:27 Mike Pagano
2022-03-11 10:56 Mike Pagano
2022-03-08 18:30 Mike Pagano
2022-03-02 13:08 Mike Pagano
2022-02-26 21:14 Mike Pagano
2022-02-23 12:39 Mike Pagano
2022-02-16 12:47 Mike Pagano
2022-02-11 12:53 Mike Pagano
2022-02-11 12:46 Mike Pagano
2022-02-11 12:45 Mike Pagano
2022-02-11 12:37 Mike Pagano
2022-02-08 17:56 Mike Pagano
2022-01-29 17:45 Mike Pagano
2022-01-27 11:39 Mike Pagano
2022-01-11 13:14 Mike Pagano
2022-01-05 12:55 Mike Pagano
2021-12-29 13:11 Mike Pagano
2021-12-22 14:07 Mike Pagano
2021-12-14 10:36 Mike Pagano
2021-12-08 12:55 Mike Pagano
2021-12-01 12:51 Mike Pagano
2021-11-26 11:59 Mike Pagano
2021-11-12 14:16 Mike Pagano
2021-11-06 13:26 Mike Pagano
2021-11-02 19:32 Mike Pagano
2021-10-27 11:59 Mike Pagano
2021-10-20 13:26 Mike Pagano
2021-10-17 13:12 Mike Pagano
2021-10-13 15:00 Alice Ferrazzi
2021-10-09 21:33 Mike Pagano
2021-10-06 14:06 Mike Pagano
2021-09-26 14:13 Mike Pagano
2021-09-22 11:40 Mike Pagano
2021-09-20 22:05 Mike Pagano
2021-09-03 11:22 Mike Pagano
2021-09-03 10:08 Alice Ferrazzi
2021-08-26 14:06 Mike Pagano
2021-08-25 22:45 Mike Pagano
2021-08-25 20:41 Mike Pagano
2021-08-15 20:07 Mike Pagano
2021-08-12 11:51 Mike Pagano
2021-08-08 13:39 Mike Pagano
2021-08-04 11:54 Mike Pagano
2021-08-03 12:26 Mike Pagano
2021-07-31 10:34 Alice Ferrazzi
2021-07-28 12:37 Mike Pagano
2021-07-20 15:35 Alice Ferrazzi
2021-07-13 12:38 Mike Pagano
2021-07-11 14:45 Mike Pagano
2021-06-30 14:25 Mike Pagano
2021-06-16 12:22 Mike Pagano
2021-06-10 11:46 Mike Pagano
2021-06-03 10:32 Alice Ferrazzi
2021-05-26 12:05 Mike Pagano
2021-05-22 10:03 Mike Pagano
2021-05-07 11:40 Alice Ferrazzi
2021-04-30 19:02 Mike Pagano
2021-04-28 18:31 Mike Pagano
2021-04-28 11:44 Alice Ferrazzi
2021-04-16 11:15 Alice Ferrazzi
2021-04-14 11:22 Alice Ferrazzi
2021-04-10 13:24 Mike Pagano
2021-04-07 12:21 Mike Pagano
2021-03-30 14:17 Mike Pagano
2021-03-24 12:08 Mike Pagano
2021-03-22 15:50 Mike Pagano
2021-03-20 14:26 Mike Pagano
2021-03-17 16:21 Mike Pagano
2021-03-11 14:05 Mike Pagano
2021-03-07 15:15 Mike Pagano
2021-03-04 12:08 Mike Pagano
2021-02-23 14:31 Alice Ferrazzi
2021-02-13 15:28 Alice Ferrazzi
2021-02-10 10:03 Alice Ferrazzi
2021-02-07 14:40 Alice Ferrazzi
2021-02-03 23:43 Mike Pagano
2021-01-30 13:34 Alice Ferrazzi
2021-01-27 11:15 Mike Pagano
2021-01-23 16:36 Mike Pagano
2021-01-19 20:34 Mike Pagano
2021-01-17 16:20 Mike Pagano
2021-01-12 20:06 Mike Pagano
2021-01-09 12:57 Mike Pagano
2021-01-06 14:15 Mike Pagano
2020-12-30 12:52 Mike Pagano
2020-12-11 12:56 Mike Pagano
2020-12-08 12:06 Mike Pagano
2020-12-02 12:49 Mike Pagano
2020-11-24 14:40 Mike Pagano
2020-11-22 19:26 Mike Pagano
2020-11-18 19:56 Mike Pagano
2020-11-11 15:43 Mike Pagano
2020-11-10 13:56 Mike Pagano
2020-11-05 12:35 Mike Pagano
2020-11-01 20:29 Mike Pagano
2020-10-29 11:18 Mike Pagano
2020-10-17 10:17 Mike Pagano
2020-10-14 20:36 Mike Pagano
2020-10-07 12:50 Mike Pagano
2020-10-01 12:45 Mike Pagano
2020-09-26 22:07 Mike Pagano
2020-09-26 22:00 Mike Pagano
2020-09-24 15:58 Mike Pagano
2020-09-23 12:07 Mike Pagano
2020-09-17 15:01 Mike Pagano
2020-09-17 14:55 Mike Pagano
2020-09-12 17:59 Mike Pagano
2020-09-09 17:59 Mike Pagano
2020-09-03 11:37 Mike Pagano
2020-08-26 11:15 Mike Pagano
2020-08-21 10:49 Alice Ferrazzi
2020-08-19  9:36 Alice Ferrazzi
2020-08-12 23:36 Alice Ferrazzi
2020-08-07 19:16 Mike Pagano
2020-08-05 14:51 Thomas Deutschmann
2020-07-31 18:00 Mike Pagano
2020-07-29 12:33 Mike Pagano
2020-07-22 12:42 Mike Pagano
2020-07-16 11:17 Mike Pagano
2020-07-09 12:12 Mike Pagano
2020-07-01 12:14 Mike Pagano
2020-06-29 17:41 Mike Pagano
2020-06-25 15:07 Mike Pagano
2020-06-22 14:47 Mike Pagano
2020-06-10 21:27 Mike Pagano
2020-06-07 21:52 Mike Pagano
2020-06-03 11:41 Mike Pagano
2020-05-27 16:25 Mike Pagano
2020-05-20 11:30 Mike Pagano
2020-05-20 11:27 Mike Pagano
2020-05-14 11:30 Mike Pagano
2020-05-13 12:33 Mike Pagano
2020-05-11 22:50 Mike Pagano
2020-05-09 22:20 Mike Pagano
2020-05-06 11:46 Mike Pagano
2020-05-02 19:24 Mike Pagano
2020-04-29 17:57 Mike Pagano
2020-04-23 11:44 Mike Pagano
2020-04-21 11:15 Mike Pagano
2020-04-17 11:45 Mike Pagano
2020-04-15 17:09 Mike Pagano
2020-04-13 11:34 Mike Pagano
2020-04-02 15:24 Mike Pagano
2020-03-25 14:58 Mike Pagano
2020-03-20 11:57 Mike Pagano
2020-03-18 14:21 Mike Pagano
2020-03-16 12:23 Mike Pagano
2020-03-11 17:20 Mike Pagano
2020-02-28 16:38 Mike Pagano
2020-02-24 11:06 Mike Pagano
2020-02-19 23:45 Mike Pagano
2020-02-14 23:52 Mike Pagano
2020-02-11 16:20 Mike Pagano
2020-02-05 17:05 Mike Pagano
2020-02-01 10:37 Mike Pagano
2020-02-01 10:30 Mike Pagano
2020-01-29 16:16 Mike Pagano
2020-01-27 14:25 Mike Pagano
2020-01-23 11:07 Mike Pagano
2020-01-17 19:56 Mike Pagano
2020-01-14 22:30 Mike Pagano
2020-01-12 15:00 Mike Pagano
2020-01-09 11:15 Mike Pagano
2020-01-04 19:50 Mike Pagano
2019-12-31 17:46 Mike Pagano
2019-12-21 15:03 Mike Pagano
2019-12-17 21:56 Mike Pagano
2019-12-13 12:35 Mike Pagano
2019-12-05 12:03 Alice Ferrazzi
2019-12-01 14:06 Thomas Deutschmann
2019-11-24 15:44 Mike Pagano
2019-11-20 19:36 Mike Pagano
2019-11-12 21:00 Mike Pagano
2019-11-10 16:20 Mike Pagano
2019-11-06 14:26 Mike Pagano
2019-10-29 12:04 Mike Pagano
2019-10-17 22:27 Mike Pagano
2019-10-11 17:04 Mike Pagano
2019-10-07 17:42 Mike Pagano
2019-10-05 11:42 Mike Pagano
2019-10-01 10:10 Mike Pagano
2019-09-21 17:11 Mike Pagano
2019-09-19 12:34 Mike Pagano
2019-09-19 10:04 Mike Pagano
2019-09-16 12:26 Mike Pagano
2019-09-10 11:12 Mike Pagano
2019-09-06 17:25 Mike Pagano
2019-08-29 14:15 Mike Pagano
2019-08-25 17:37 Mike Pagano
2019-08-23 22:18 Mike Pagano
2019-08-16 12:26 Mike Pagano
2019-08-16 12:13 Mike Pagano
2019-08-09 17:45 Mike Pagano
2019-08-06 19:19 Mike Pagano
2019-08-04 16:15 Mike Pagano
2019-07-31 15:09 Mike Pagano
2019-07-31 10:22 Mike Pagano
2019-07-28 16:27 Mike Pagano
2019-07-26 11:35 Mike Pagano
2019-07-21 14:41 Mike Pagano
2019-07-14 15:44 Mike Pagano
2019-07-10 11:05 Mike Pagano
2019-07-03 11:34 Mike Pagano
2019-06-25 10:53 Mike Pagano
2019-06-22 19:06 Mike Pagano
2019-06-19 17:17 Thomas Deutschmann
2019-06-17 19:22 Mike Pagano
2019-06-15 15:07 Mike Pagano
2019-06-11 12:42 Mike Pagano
2019-06-10 19:43 Mike Pagano
2019-06-09 16:19 Mike Pagano
2019-06-04 11:11 Mike Pagano
2019-05-31 15:02 Mike Pagano
2019-05-26 17:10 Mike Pagano
2019-05-22 11:02 Mike Pagano
2019-05-16 23:03 Mike Pagano
2019-05-14 21:00 Mike Pagano
2019-05-10 19:40 Mike Pagano
2019-05-08 10:06 Mike Pagano
2019-05-05 13:42 Mike Pagano
2019-05-04 18:28 Mike Pagano
2019-05-02 10:13 Mike Pagano
2019-04-27 17:36 Mike Pagano
2019-04-20 11:09 Mike Pagano
2019-04-19 19:51 Mike Pagano
2019-04-05 21:46 Mike Pagano
2019-04-03 10:59 Mike Pagano
2019-03-27 10:22 Mike Pagano
2019-03-23 20:23 Mike Pagano
2019-03-19 16:58 Mike Pagano
2019-03-13 22:08 Mike Pagano
2019-03-10 14:15 Mike Pagano
2019-03-06 19:06 Mike Pagano
2019-03-05 18:04 Mike Pagano
2019-02-27 11:23 Mike Pagano
2019-02-23 11:35 Mike Pagano
2019-02-23  0:46 Mike Pagano
2019-02-20 11:19 Mike Pagano
2019-02-16  0:42 Mike Pagano
2019-02-15 12:39 Mike Pagano
2019-02-12 20:53 Mike Pagano
2019-02-06 17:08 Mike Pagano
2019-01-31 11:28 Mike Pagano
2019-01-26 15:09 Mike Pagano
2019-01-22 23:06 Mike Pagano
2019-01-16 23:32 Mike Pagano
2019-01-13 19:29 Mike Pagano
2019-01-09 17:54 Mike Pagano
2018-12-29 18:55 Mike Pagano
2018-12-29  1:08 Mike Pagano
2018-12-21 14:58 Mike Pagano
2018-12-19 19:09 Mike Pagano
2018-12-17 11:42 Mike Pagano
2018-12-13 11:40 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-05 20:16 Mike Pagano
2018-12-01 15:08 Mike Pagano
2018-11-27 16:16 Mike Pagano
2018-11-23 12:42 Mike Pagano
2018-11-21 12:30 Mike Pagano
2018-11-14  0:47 Mike Pagano
2018-11-14  0:47 Mike Pagano
2018-11-13 20:44 Mike Pagano
2018-11-04 16:22 Alice Ferrazzi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1583425387.836621b120225f053a815f0660ead626b1a5ebb4.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox