public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Sun,  7 Jun 2020 21:53:55 +0000 (UTC)	[thread overview]
Message-ID: <1591566820.8b1238f2f0e800b2e46758123c46f6e8f4c91df7.mpagano@gentoo> (raw)

commit:     8b1238f2f0e800b2e46758123c46f6e8f4c91df7
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jun  7 21:53:40 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jun  7 21:53:40 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8b1238f2

Linux patch 5.4.45

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1044_linux-5.4.45.patch | 1836 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1840 insertions(+)

diff --git a/0000_README b/0000_README
index a5f1550..60c9c07 100644
--- a/0000_README
+++ b/0000_README
@@ -219,6 +219,10 @@ Patch:  1043_linux-5.4.44.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.4.44
 
+Patch:  1044_linux-5.4.45.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.4.45
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1044_linux-5.4.45.patch b/1044_linux-5.4.45.patch
new file mode 100644
index 0000000..235a18c
--- /dev/null
+++ b/1044_linux-5.4.45.patch
@@ -0,0 +1,1836 @@
+diff --git a/Makefile b/Makefile
+index ef4697fcb8ea..d57c443d9073 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 44
++SUBLEVEL = 45
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
+index 7ee89dc61f6e..23dc002aa574 100644
+--- a/arch/arc/kernel/setup.c
++++ b/arch/arc/kernel/setup.c
+@@ -12,6 +12,7 @@
+ #include <linux/clocksource.h>
+ #include <linux/console.h>
+ #include <linux/module.h>
++#include <linux/sizes.h>
+ #include <linux/cpu.h>
+ #include <linux/of_fdt.h>
+ #include <linux/of.h>
+@@ -409,12 +410,12 @@ static void arc_chk_core_config(void)
+ 	if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr)
+ 		panic("Linux built with incorrect DCCM Base address\n");
+ 
+-	if (CONFIG_ARC_DCCM_SZ != cpu->dccm.sz)
++	if (CONFIG_ARC_DCCM_SZ * SZ_1K != cpu->dccm.sz)
+ 		panic("Linux built with incorrect DCCM Size\n");
+ #endif
+ 
+ #ifdef CONFIG_ARC_HAS_ICCM
+-	if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz)
++	if (CONFIG_ARC_ICCM_SZ * SZ_1K != cpu->iccm.sz)
+ 		panic("Linux built with incorrect ICCM Size\n");
+ #endif
+ 
+diff --git a/arch/arc/plat-eznps/Kconfig b/arch/arc/plat-eznps/Kconfig
+index a931d0a256d0..a645bca5899a 100644
+--- a/arch/arc/plat-eznps/Kconfig
++++ b/arch/arc/plat-eznps/Kconfig
+@@ -6,6 +6,7 @@
+ 
+ menuconfig ARC_PLAT_EZNPS
+ 	bool "\"EZchip\" ARC dev platform"
++	depends on ISA_ARCOMPACT
+ 	select CPU_BIG_ENDIAN
+ 	select CLKSRC_NPS if !PHYS_ADDR_T_64BIT
+ 	select EZNPS_GIC
+diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
+index 7ccc5c85c74e..000b350d4060 100644
+--- a/arch/powerpc/platforms/powernv/opal-imc.c
++++ b/arch/powerpc/platforms/powernv/opal-imc.c
+@@ -59,10 +59,6 @@ static void export_imc_mode_and_cmd(struct device_node *node,
+ 
+ 	imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root);
+ 
+-	/*
+-	 * Return here, either because 'imc' directory already exists,
+-	 * Or failed to create a new one.
+-	 */
+ 	if (!imc_debugfs_parent)
+ 		return;
+ 
+@@ -135,7 +131,6 @@ static int imc_get_mem_addr_nest(struct device_node *node,
+ 	}
+ 
+ 	pmu_ptr->imc_counter_mmaped = true;
+-	export_imc_mode_and_cmd(node, pmu_ptr);
+ 	kfree(base_addr_arr);
+ 	kfree(chipid_arr);
+ 	return 0;
+@@ -151,7 +146,7 @@ error:
+  *		    and domain as the inputs.
+  * Allocates memory for the struct imc_pmu, sets up its domain, size and offsets
+  */
+-static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
++static struct imc_pmu *imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
+ {
+ 	int ret = 0;
+ 	struct imc_pmu *pmu_ptr;
+@@ -159,27 +154,23 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
+ 
+ 	/* Return for unknown domain */
+ 	if (domain < 0)
+-		return -EINVAL;
++		return NULL;
+ 
+ 	/* memory for pmu */
+ 	pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL);
+ 	if (!pmu_ptr)
+-		return -ENOMEM;
++		return NULL;
+ 
+ 	/* Set the domain */
+ 	pmu_ptr->domain = domain;
+ 
+ 	ret = of_property_read_u32(parent, "size", &pmu_ptr->counter_mem_size);
+-	if (ret) {
+-		ret = -EINVAL;
++	if (ret)
+ 		goto free_pmu;
+-	}
+ 
+ 	if (!of_property_read_u32(parent, "offset", &offset)) {
+-		if (imc_get_mem_addr_nest(parent, pmu_ptr, offset)) {
+-			ret = -EINVAL;
++		if (imc_get_mem_addr_nest(parent, pmu_ptr, offset))
+ 			goto free_pmu;
+-		}
+ 	}
+ 
+ 	/* Function to register IMC pmu */
+@@ -190,14 +181,14 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
+ 		if (pmu_ptr->domain == IMC_DOMAIN_NEST)
+ 			kfree(pmu_ptr->mem_info);
+ 		kfree(pmu_ptr);
+-		return ret;
++		return NULL;
+ 	}
+ 
+-	return 0;
++	return pmu_ptr;
+ 
+ free_pmu:
+ 	kfree(pmu_ptr);
+-	return ret;
++	return NULL;
+ }
+ 
+ static void disable_nest_pmu_counters(void)
+@@ -254,6 +245,7 @@ int get_max_nest_dev(void)
+ static int opal_imc_counters_probe(struct platform_device *pdev)
+ {
+ 	struct device_node *imc_dev = pdev->dev.of_node;
++	struct imc_pmu *pmu;
+ 	int pmu_count = 0, domain;
+ 	bool core_imc_reg = false, thread_imc_reg = false;
+ 	u32 type;
+@@ -269,6 +261,7 @@ static int opal_imc_counters_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	for_each_compatible_node(imc_dev, NULL, IMC_DTB_UNIT_COMPAT) {
++		pmu = NULL;
+ 		if (of_property_read_u32(imc_dev, "type", &type)) {
+ 			pr_warn("IMC Device without type property\n");
+ 			continue;
+@@ -300,9 +293,13 @@ static int opal_imc_counters_probe(struct platform_device *pdev)
+ 			break;
+ 		}
+ 
+-		if (!imc_pmu_create(imc_dev, pmu_count, domain)) {
+-			if (domain == IMC_DOMAIN_NEST)
++		pmu = imc_pmu_create(imc_dev, pmu_count, domain);
++		if (pmu != NULL) {
++			if (domain == IMC_DOMAIN_NEST) {
++				if (!imc_debugfs_parent)
++					export_imc_mode_and_cmd(imc_dev, pmu);
+ 				pmu_count++;
++			}
+ 			if (domain == IMC_DOMAIN_CORE)
+ 				core_imc_reg = true;
+ 			if (domain == IMC_DOMAIN_THREAD)
+@@ -310,10 +307,6 @@ static int opal_imc_counters_probe(struct platform_device *pdev)
+ 		}
+ 	}
+ 
+-	/* If none of the nest units are registered, remove debugfs interface */
+-	if (pmu_count == 0)
+-		debugfs_remove_recursive(imc_debugfs_parent);
+-
+ 	/* If core imc is not registered, unregister thread-imc */
+ 	if (!core_imc_reg && thread_imc_reg)
+ 		unregister_thread_imc();
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index 8057aafd5f5e..6d130c89fbd8 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -25,6 +25,7 @@
+ #include <linux/nmi.h>
+ #include <linux/ctype.h>
+ #include <linux/highmem.h>
++#include <linux/security.h>
+ 
+ #include <asm/debugfs.h>
+ #include <asm/ptrace.h>
+@@ -187,6 +188,8 @@ static void dump_tlb_44x(void);
+ static void dump_tlb_book3e(void);
+ #endif
+ 
++static void clear_all_bpt(void);
++
+ #ifdef CONFIG_PPC64
+ #define REG		"%.16lx"
+ #else
+@@ -283,10 +286,38 @@ Commands:\n\
+ "  U	show uptime information\n"
+ "  ?	help\n"
+ "  # n	limit output to n lines per page (for dp, dpa, dl)\n"
+-"  zr	reboot\n\
+-  zh	halt\n"
++"  zr	reboot\n"
++"  zh	halt\n"
+ ;
+ 
++#ifdef CONFIG_SECURITY
++static bool xmon_is_locked_down(void)
++{
++	static bool lockdown;
++
++	if (!lockdown) {
++		lockdown = !!security_locked_down(LOCKDOWN_XMON_RW);
++		if (lockdown) {
++			printf("xmon: Disabled due to kernel lockdown\n");
++			xmon_is_ro = true;
++		}
++	}
++
++	if (!xmon_is_ro) {
++		xmon_is_ro = !!security_locked_down(LOCKDOWN_XMON_WR);
++		if (xmon_is_ro)
++			printf("xmon: Read-only due to kernel lockdown\n");
++	}
++
++	return lockdown;
++}
++#else /* CONFIG_SECURITY */
++static inline bool xmon_is_locked_down(void)
++{
++	return false;
++}
++#endif
++
+ static struct pt_regs *xmon_regs;
+ 
+ static inline void sync(void)
+@@ -438,7 +469,10 @@ static bool wait_for_other_cpus(int ncpus)
+ 
+ 	return false;
+ }
+-#endif /* CONFIG_SMP */
++#else /* CONFIG_SMP */
++static inline void get_output_lock(void) {}
++static inline void release_output_lock(void) {}
++#endif
+ 
+ static inline int unrecoverable_excp(struct pt_regs *regs)
+ {
+@@ -455,6 +489,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
+ 	int cmd = 0;
+ 	struct bpt *bp;
+ 	long recurse_jmp[JMP_BUF_LEN];
++	bool locked_down;
+ 	unsigned long offset;
+ 	unsigned long flags;
+ #ifdef CONFIG_SMP
+@@ -465,6 +500,8 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
+ 	local_irq_save(flags);
+ 	hard_irq_disable();
+ 
++	locked_down = xmon_is_locked_down();
++
+ 	if (!fromipi) {
+ 		tracing_enabled = tracing_is_on();
+ 		tracing_off();
+@@ -518,7 +555,8 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
+ 
+ 	if (!fromipi) {
+ 		get_output_lock();
+-		excprint(regs);
++		if (!locked_down)
++			excprint(regs);
+ 		if (bp) {
+ 			printf("cpu 0x%x stopped at breakpoint 0x%tx (",
+ 			       cpu, BP_NUM(bp));
+@@ -570,10 +608,14 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
+ 		}
+ 		remove_bpts();
+ 		disable_surveillance();
+-		/* for breakpoint or single step, print the current instr. */
+-		if (bp || TRAP(regs) == 0xd00)
+-			ppc_inst_dump(regs->nip, 1, 0);
+-		printf("enter ? for help\n");
++
++		if (!locked_down) {
++			/* for breakpoint or single step, print curr insn */
++			if (bp || TRAP(regs) == 0xd00)
++				ppc_inst_dump(regs->nip, 1, 0);
++			printf("enter ? for help\n");
++		}
++
+ 		mb();
+ 		xmon_gate = 1;
+ 		barrier();
+@@ -597,8 +639,9 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
+ 			spin_cpu_relax();
+ 			touch_nmi_watchdog();
+ 		} else {
+-			cmd = cmds(regs);
+-			if (cmd != 0) {
++			if (!locked_down)
++				cmd = cmds(regs);
++			if (locked_down || cmd != 0) {
+ 				/* exiting xmon */
+ 				insert_bpts();
+ 				xmon_gate = 0;
+@@ -635,13 +678,16 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
+ 			       "can't continue\n");
+ 		remove_bpts();
+ 		disable_surveillance();
+-		/* for breakpoint or single step, print the current instr. */
+-		if (bp || TRAP(regs) == 0xd00)
+-			ppc_inst_dump(regs->nip, 1, 0);
+-		printf("enter ? for help\n");
++		if (!locked_down) {
++			/* for breakpoint or single step, print current insn */
++			if (bp || TRAP(regs) == 0xd00)
++				ppc_inst_dump(regs->nip, 1, 0);
++			printf("enter ? for help\n");
++		}
+ 	}
+ 
+-	cmd = cmds(regs);
++	if (!locked_down)
++		cmd = cmds(regs);
+ 
+ 	insert_bpts();
+ 	in_xmon = 0;
+@@ -670,7 +716,10 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
+ 		}
+ 	}
+ #endif
+-	insert_cpu_bpts();
++	if (locked_down)
++		clear_all_bpt();
++	else
++		insert_cpu_bpts();
+ 
+ 	touch_nmi_watchdog();
+ 	local_irq_restore(flags);
+@@ -3761,6 +3810,11 @@ static void xmon_init(int enable)
+ #ifdef CONFIG_MAGIC_SYSRQ
+ static void sysrq_handle_xmon(int key)
+ {
++	if (xmon_is_locked_down()) {
++		clear_all_bpt();
++		xmon_init(0);
++		return;
++	}
+ 	/* ensure xmon is enabled */
+ 	xmon_init(1);
+ 	debugger(get_irq_regs());
+@@ -3782,7 +3836,6 @@ static int __init setup_xmon_sysrq(void)
+ device_initcall(setup_xmon_sysrq);
+ #endif /* CONFIG_MAGIC_SYSRQ */
+ 
+-#ifdef CONFIG_DEBUG_FS
+ static void clear_all_bpt(void)
+ {
+ 	int i;
+@@ -3800,18 +3853,22 @@ static void clear_all_bpt(void)
+ 		iabr = NULL;
+ 		dabr.enabled = 0;
+ 	}
+-
+-	printf("xmon: All breakpoints cleared\n");
+ }
+ 
++#ifdef CONFIG_DEBUG_FS
+ static int xmon_dbgfs_set(void *data, u64 val)
+ {
+ 	xmon_on = !!val;
+ 	xmon_init(xmon_on);
+ 
+ 	/* make sure all breakpoints removed when disabling */
+-	if (!xmon_on)
++	if (!xmon_on) {
+ 		clear_all_bpt();
++		get_output_lock();
++		printf("xmon: All breakpoints cleared\n");
++		release_output_lock();
++	}
++
+ 	return 0;
+ }
+ 
+@@ -3837,7 +3894,11 @@ static int xmon_early __initdata;
+ 
+ static int __init early_parse_xmon(char *p)
+ {
+-	if (!p || strncmp(p, "early", 5) == 0) {
++	if (xmon_is_locked_down()) {
++		xmon_init(0);
++		xmon_early = 0;
++		xmon_on = 0;
++	} else if (!p || strncmp(p, "early", 5) == 0) {
+ 		/* just "xmon" is equivalent to "xmon=early" */
+ 		xmon_init(1);
+ 		xmon_early = 1;
+diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
+index 3431b2d5e334..f942341429b1 100644
+--- a/arch/s390/kernel/mcount.S
++++ b/arch/s390/kernel/mcount.S
+@@ -41,6 +41,7 @@ EXPORT_SYMBOL(_mcount)
+ ENTRY(ftrace_caller)
+ 	.globl	ftrace_regs_caller
+ 	.set	ftrace_regs_caller,ftrace_caller
++	stg	%r14,(__SF_GPRS+8*8)(%r15)	# save traced function caller
+ 	lgr	%r1,%r15
+ #if !(defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT))
+ 	aghi	%r0,MCOUNT_RETURN_FIXUP
+diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
+index 5674710a4841..7dfae86afa47 100644
+--- a/arch/s390/mm/hugetlbpage.c
++++ b/arch/s390/mm/hugetlbpage.c
+@@ -159,10 +159,13 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ 		rste &= ~_SEGMENT_ENTRY_NOEXEC;
+ 
+ 	/* Set correct table type for 2G hugepages */
+-	if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
+-		rste |= _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE;
+-	else
++	if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
++		if (likely(pte_present(pte)))
++			rste |= _REGION3_ENTRY_LARGE;
++		rste |= _REGION_ENTRY_TYPE_R3;
++	} else if (likely(pte_present(pte)))
+ 		rste |= _SEGMENT_ENTRY_LARGE;
++
+ 	clear_huge_pte_skeys(mm, rste);
+ 	pte_val(*ptep) = rste;
+ }
+diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
+index 1e6bb4c25334..ea85f23d9e22 100644
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -253,6 +253,7 @@ static inline int pmd_large(pmd_t pte)
+ }
+ 
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
++/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_large */
+ static inline int pmd_trans_huge(pmd_t pmd)
+ {
+ 	return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
+diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
+index b8ef8557d4b3..2a36902d418c 100644
+--- a/arch/x86/mm/mmio-mod.c
++++ b/arch/x86/mm/mmio-mod.c
+@@ -372,7 +372,7 @@ static void enter_uniprocessor(void)
+ 	int cpu;
+ 	int err;
+ 
+-	if (downed_cpus == NULL &&
++	if (!cpumask_available(downed_cpus) &&
+ 	    !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) {
+ 		pr_notice("Failed to allocate mask\n");
+ 		goto out;
+@@ -402,7 +402,7 @@ static void leave_uniprocessor(void)
+ 	int cpu;
+ 	int err;
+ 
+-	if (downed_cpus == NULL || cpumask_weight(downed_cpus) == 0)
++	if (!cpumask_available(downed_cpus) || cpumask_weight(downed_cpus) == 0)
+ 		return;
+ 	pr_notice("Re-enabling CPUs...\n");
+ 	for_each_cpu(cpu, downed_cpus) {
+diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
+index 3d7fdea872f8..2553e05e0725 100644
+--- a/drivers/block/null_blk_zoned.c
++++ b/drivers/block/null_blk_zoned.c
+@@ -20,6 +20,10 @@ int null_zone_init(struct nullb_device *dev)
+ 		pr_err("zone_size must be power-of-two\n");
+ 		return -EINVAL;
+ 	}
++	if (dev->zone_size > dev->size) {
++		pr_err("Zone size larger than device capacity\n");
++		return -EINVAL;
++	}
+ 
+ 	dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT;
+ 	dev->nr_zones = dev_size >>
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index ea2849338d6c..9b69e55ad701 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -191,10 +191,11 @@ static const struct edid_quirk {
+ 	{ "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
+ 	{ "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
+ 
+-	/* Oculus Rift DK1, DK2, and CV1 VR Headsets */
++	/* Oculus Rift DK1, DK2, CV1 and Rift S VR Headsets */
+ 	{ "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
+ 	{ "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP },
+ 	{ "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP },
++	{ "OVR", 0x0012, EDID_QUIRK_NON_DESKTOP },
+ 
+ 	/* Windows Mixed Reality Headsets */
+ 	{ "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP },
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 9b15ac4f2fb6..4ab6531a4a74 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -7218,11 +7218,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+ 		intel_connector->get_hw_state = intel_connector_get_hw_state;
+ 
+ 	/* init MST on ports that can support it */
+-	if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
+-	    (port == PORT_B || port == PORT_C ||
+-	     port == PORT_D || port == PORT_F))
+-		intel_dp_mst_encoder_init(intel_dig_port,
+-					  intel_connector->base.base.id);
++	intel_dp_mst_encoder_init(intel_dig_port,
++				  intel_connector->base.base.id);
+ 
+ 	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
+ 		intel_dp_aux_fini(intel_dp);
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+index 600873c796d0..74d45a0eecb8 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+@@ -653,21 +653,31 @@ intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port)
+ int
+ intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
+ {
++	struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
+ 	struct intel_dp *intel_dp = &intel_dig_port->dp;
+-	struct drm_device *dev = intel_dig_port->base.base.dev;
++	enum port port = intel_dig_port->base.port;
+ 	int ret;
+ 
+-	intel_dp->can_mst = true;
++	if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
++		return 0;
++
++	if (INTEL_GEN(i915) < 12 && port == PORT_A)
++		return 0;
++
++	if (INTEL_GEN(i915) < 11 && port == PORT_E)
++		return 0;
++
+ 	intel_dp->mst_mgr.cbs = &mst_cbs;
+ 
+ 	/* create encoders */
+ 	intel_dp_create_fake_mst_encoders(intel_dig_port);
+-	ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev,
++	ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
+ 					   &intel_dp->aux, 16, 3, conn_base_id);
+-	if (ret) {
+-		intel_dp->can_mst = false;
++	if (ret)
+ 		return ret;
+-	}
++
++	intel_dp->can_mst = true;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 03c720b47306..39e4da7468e1 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -69,6 +69,7 @@ MODULE_LICENSE("GPL");
+ #define MT_QUIRK_ASUS_CUSTOM_UP		BIT(17)
+ #define MT_QUIRK_WIN8_PTP_BUTTONS	BIT(18)
+ #define MT_QUIRK_SEPARATE_APP_REPORT	BIT(19)
++#define MT_QUIRK_FORCE_MULTI_INPUT	BIT(20)
+ 
+ #define MT_INPUTMODE_TOUCHSCREEN	0x02
+ #define MT_INPUTMODE_TOUCHPAD		0x03
+@@ -189,6 +190,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app);
+ #define MT_CLS_WIN_8				0x0012
+ #define MT_CLS_EXPORT_ALL_INPUTS		0x0013
+ #define MT_CLS_WIN_8_DUAL			0x0014
++#define MT_CLS_WIN_8_FORCE_MULTI_INPUT		0x0015
+ 
+ /* vendor specific classes */
+ #define MT_CLS_3M				0x0101
+@@ -279,6 +281,15 @@ static const struct mt_class mt_classes[] = {
+ 			MT_QUIRK_CONTACT_CNT_ACCURATE |
+ 			MT_QUIRK_WIN8_PTP_BUTTONS,
+ 		.export_all_inputs = true },
++	{ .name = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
++		.quirks = MT_QUIRK_ALWAYS_VALID |
++			MT_QUIRK_IGNORE_DUPLICATES |
++			MT_QUIRK_HOVERING |
++			MT_QUIRK_CONTACT_CNT_ACCURATE |
++			MT_QUIRK_STICKY_FINGERS |
++			MT_QUIRK_WIN8_PTP_BUTTONS |
++			MT_QUIRK_FORCE_MULTI_INPUT,
++		.export_all_inputs = true },
+ 
+ 	/*
+ 	 * vendor specific classes
+@@ -1714,6 +1725,11 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	if (id->group != HID_GROUP_MULTITOUCH_WIN_8)
+ 		hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+ 
++	if (mtclass->quirks & MT_QUIRK_FORCE_MULTI_INPUT) {
++		hdev->quirks &= ~HID_QUIRK_INPUT_PER_APP;
++		hdev->quirks |= HID_QUIRK_MULTI_INPUT;
++	}
++
+ 	timer_setup(&td->release_timer, mt_expired_timeout, 0);
+ 
+ 	ret = hid_parse(hdev);
+@@ -1926,6 +1942,11 @@ static const struct hid_device_id mt_devices[] = {
+ 		MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ 			USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) },
+ 
++	/* Elan devices */
++	{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
++		HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
++			USB_VENDOR_ID_ELAN, 0x313a) },
++
+ 	/* Elitegroup panel */
+ 	{ .driver_data = MT_CLS_SERIAL,
+ 		MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP,
+@@ -2056,6 +2077,11 @@ static const struct hid_device_id mt_devices[] = {
+ 		MT_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM,
+ 			USB_DEVICE_ID_MTP_STM)},
+ 
++	/* Synaptics devices */
++	{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
++		HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
++			USB_VENDOR_ID_SYNAPTICS, 0xce08) },
++
+ 	/* TopSeed panels */
+ 	{ .driver_data = MT_CLS_TOPSEED,
+ 		MT_USB_DEVICE(USB_VENDOR_ID_TOPSEED2,
+diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
+index 4c6ed6ef31f1..2f073f536070 100644
+--- a/drivers/hid/hid-sony.c
++++ b/drivers/hid/hid-sony.c
+@@ -867,6 +867,23 @@ static u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc,
+ 	if (sc->quirks & PS3REMOTE)
+ 		return ps3remote_fixup(hdev, rdesc, rsize);
+ 
++	/*
++	 * Some knock-off USB dongles incorrectly report their button count
++	 * as 13 instead of 16 causing three non-functional buttons.
++	 */
++	if ((sc->quirks & SIXAXIS_CONTROLLER_USB) && *rsize >= 45 &&
++		/* Report Count (13) */
++		rdesc[23] == 0x95 && rdesc[24] == 0x0D &&
++		/* Usage Maximum (13) */
++		rdesc[37] == 0x29 && rdesc[38] == 0x0D &&
++		/* Report Count (3) */
++		rdesc[43] == 0x95 && rdesc[44] == 0x03) {
++		hid_info(hdev, "Fixing up USB dongle report descriptor\n");
++		rdesc[24] = 0x10;
++		rdesc[38] = 0x10;
++		rdesc[44] = 0x00;
++	}
++
+ 	return rdesc;
+ }
+ 
+diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+index a66f08041a1a..ec142bc8c1da 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
++++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+@@ -389,6 +389,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
+ 		},
+ 		.driver_data = (void *)&sipodev_desc
+ 	},
++	{
++		.ident = "Schneider SCL142ALM",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SCHNEIDER"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SCL142ALM"),
++		},
++		.driver_data = (void *)&sipodev_desc
++	},
+ 	{ }	/* Terminate list */
+ };
+ 
+diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
+index 92d2c706c2a7..a60042431370 100644
+--- a/drivers/i2c/busses/i2c-altera.c
++++ b/drivers/i2c/busses/i2c-altera.c
+@@ -70,6 +70,7 @@
+  * @isr_mask: cached copy of local ISR enables.
+  * @isr_status: cached copy of local ISR status.
+  * @lock: spinlock for IRQ synchronization.
++ * @isr_mutex: mutex for IRQ thread.
+  */
+ struct altr_i2c_dev {
+ 	void __iomem *base;
+@@ -86,6 +87,7 @@ struct altr_i2c_dev {
+ 	u32 isr_mask;
+ 	u32 isr_status;
+ 	spinlock_t lock;	/* IRQ synchronization */
++	struct mutex isr_mutex;
+ };
+ 
+ static void
+@@ -245,10 +247,11 @@ static irqreturn_t altr_i2c_isr(int irq, void *_dev)
+ 	struct altr_i2c_dev *idev = _dev;
+ 	u32 status = idev->isr_status;
+ 
++	mutex_lock(&idev->isr_mutex);
+ 	if (!idev->msg) {
+ 		dev_warn(idev->dev, "unexpected interrupt\n");
+ 		altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ);
+-		return IRQ_HANDLED;
++		goto out;
+ 	}
+ 	read = (idev->msg->flags & I2C_M_RD) != 0;
+ 
+@@ -301,6 +304,8 @@ static irqreturn_t altr_i2c_isr(int irq, void *_dev)
+ 		complete(&idev->msg_complete);
+ 		dev_dbg(idev->dev, "Message Complete\n");
+ 	}
++out:
++	mutex_unlock(&idev->isr_mutex);
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -312,6 +317,7 @@ static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg)
+ 	u32 value;
+ 	u8 addr = i2c_8bit_addr_from_msg(msg);
+ 
++	mutex_lock(&idev->isr_mutex);
+ 	idev->msg = msg;
+ 	idev->msg_len = msg->len;
+ 	idev->buf = msg->buf;
+@@ -336,6 +342,7 @@ static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg)
+ 		altr_i2c_int_enable(idev, imask, true);
+ 		altr_i2c_fill_tx_fifo(idev);
+ 	}
++	mutex_unlock(&idev->isr_mutex);
+ 
+ 	time_left = wait_for_completion_timeout(&idev->msg_complete,
+ 						ALTR_I2C_XFER_TIMEOUT);
+@@ -409,6 +416,7 @@ static int altr_i2c_probe(struct platform_device *pdev)
+ 	idev->dev = &pdev->dev;
+ 	init_completion(&idev->msg_complete);
+ 	spin_lock_init(&idev->lock);
++	mutex_init(&idev->isr_mutex);
+ 
+ 	ret = device_property_read_u32(idev->dev, "fifo-size",
+ 				       &idev->fifo_size);
+diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
+index b462eaca1ee3..4494dab8c3d8 100644
+--- a/drivers/infiniband/hw/qedr/main.c
++++ b/drivers/infiniband/hw/qedr/main.c
+@@ -360,7 +360,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
+ 	xa_init_flags(&dev->srqs, XA_FLAGS_LOCK_IRQ);
+ 
+ 	if (IS_IWARP(dev)) {
+-		xa_init_flags(&dev->qps, XA_FLAGS_LOCK_IRQ);
++		xa_init(&dev->qps);
+ 		dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
+ 	}
+ 
+diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
+index 0cfd849b13d6..8e927f6c1520 100644
+--- a/drivers/infiniband/hw/qedr/qedr.h
++++ b/drivers/infiniband/hw/qedr/qedr.h
+@@ -40,6 +40,7 @@
+ #include <linux/qed/qed_rdma_if.h>
+ #include <linux/qed/qede_rdma.h>
+ #include <linux/qed/roce_common.h>
++#include <linux/completion.h>
+ #include "qedr_hsi_rdma.h"
+ 
+ #define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
+@@ -377,10 +378,20 @@ enum qedr_qp_err_bitmap {
+ 	QEDR_QP_ERR_RQ_PBL_FULL = 32,
+ };
+ 
++enum qedr_qp_create_type {
++	QEDR_QP_CREATE_NONE,
++	QEDR_QP_CREATE_USER,
++	QEDR_QP_CREATE_KERNEL,
++};
++
++enum qedr_iwarp_cm_flags {
++	QEDR_IWARP_CM_WAIT_FOR_CONNECT    = BIT(0),
++	QEDR_IWARP_CM_WAIT_FOR_DISCONNECT = BIT(1),
++};
++
+ struct qedr_qp {
+ 	struct ib_qp ibqp;	/* must be first */
+ 	struct qedr_dev *dev;
+-	struct qedr_iw_ep *ep;
+ 	struct qedr_qp_hwq_info sq;
+ 	struct qedr_qp_hwq_info rq;
+ 
+@@ -395,6 +406,7 @@ struct qedr_qp {
+ 	u32 id;
+ 	struct qedr_pd *pd;
+ 	enum ib_qp_type qp_type;
++	enum qedr_qp_create_type create_type;
+ 	struct qed_rdma_qp *qed_qp;
+ 	u32 qp_id;
+ 	u16 icid;
+@@ -437,8 +449,11 @@ struct qedr_qp {
+ 	/* Relevant to qps created from user space only (applications) */
+ 	struct qedr_userq usq;
+ 	struct qedr_userq urq;
+-	atomic_t refcnt;
+-	bool destroyed;
++
++	/* synchronization objects used with iwarp ep */
++	struct kref refcnt;
++	struct completion iwarp_cm_comp;
++	unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */
+ };
+ 
+ struct qedr_ah {
+@@ -531,7 +546,7 @@ struct qedr_iw_ep {
+ 	struct iw_cm_id	*cm_id;
+ 	struct qedr_qp	*qp;
+ 	void		*qed_context;
+-	u8		during_connect;
++	struct kref	refcnt;
+ };
+ 
+ static inline
+diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+index 22881d4442b9..5e9732990be5 100644
+--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
++++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+@@ -79,6 +79,27 @@ qedr_fill_sockaddr6(const struct qed_iwarp_cm_info *cm_info,
+ 	}
+ }
+ 
++static void qedr_iw_free_qp(struct kref *ref)
++{
++	struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt);
++
++	kfree(qp);
++}
++
++static void
++qedr_iw_free_ep(struct kref *ref)
++{
++	struct qedr_iw_ep *ep = container_of(ref, struct qedr_iw_ep, refcnt);
++
++	if (ep->qp)
++		kref_put(&ep->qp->refcnt, qedr_iw_free_qp);
++
++	if (ep->cm_id)
++		ep->cm_id->rem_ref(ep->cm_id);
++
++	kfree(ep);
++}
++
+ static void
+ qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params)
+ {
+@@ -93,6 +114,7 @@ qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params)
+ 
+ 	ep->dev = dev;
+ 	ep->qed_context = params->ep_context;
++	kref_init(&ep->refcnt);
+ 
+ 	memset(&event, 0, sizeof(event));
+ 	event.event = IW_CM_EVENT_CONNECT_REQUEST;
+@@ -141,12 +163,10 @@ qedr_iw_close_event(void *context, struct qed_iwarp_cm_event_params *params)
+ {
+ 	struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+ 
+-	if (ep->cm_id) {
++	if (ep->cm_id)
+ 		qedr_iw_issue_event(context, params, IW_CM_EVENT_CLOSE);
+ 
+-		ep->cm_id->rem_ref(ep->cm_id);
+-		ep->cm_id = NULL;
+-	}
++	kref_put(&ep->refcnt, qedr_iw_free_ep);
+ }
+ 
+ static void
+@@ -186,11 +206,13 @@ static void qedr_iw_disconnect_worker(struct work_struct *work)
+ 	struct qedr_qp *qp = ep->qp;
+ 	struct iw_cm_event event;
+ 
+-	if (qp->destroyed) {
+-		kfree(dwork);
+-		qedr_iw_qp_rem_ref(&qp->ibqp);
+-		return;
+-	}
++	/* The qp won't be released until we release the ep.
++	 * the ep's refcnt was increased before calling this
++	 * function, therefore it is safe to access qp
++	 */
++	if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
++			     &qp->iwarp_cm_flags))
++		goto out;
+ 
+ 	memset(&event, 0, sizeof(event));
+ 	event.status = dwork->status;
+@@ -204,7 +226,6 @@ static void qedr_iw_disconnect_worker(struct work_struct *work)
+ 	else
+ 		qp_params.new_state = QED_ROCE_QP_STATE_SQD;
+ 
+-	kfree(dwork);
+ 
+ 	if (ep->cm_id)
+ 		ep->cm_id->event_handler(ep->cm_id, &event);
+@@ -214,7 +235,10 @@ static void qedr_iw_disconnect_worker(struct work_struct *work)
+ 
+ 	dev->ops->rdma_modify_qp(dev->rdma_ctx, qp->qed_qp, &qp_params);
+ 
+-	qedr_iw_qp_rem_ref(&qp->ibqp);
++	complete(&ep->qp->iwarp_cm_comp);
++out:
++	kfree(dwork);
++	kref_put(&ep->refcnt, qedr_iw_free_ep);
+ }
+ 
+ static void
+@@ -224,13 +248,17 @@ qedr_iw_disconnect_event(void *context,
+ 	struct qedr_discon_work *work;
+ 	struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+ 	struct qedr_dev *dev = ep->dev;
+-	struct qedr_qp *qp = ep->qp;
+ 
+ 	work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ 	if (!work)
+ 		return;
+ 
+-	qedr_iw_qp_add_ref(&qp->ibqp);
++	/* We can't get a close event before disconnect, but since
++	 * we're scheduling a work queue we need to make sure close
++	 * won't delete the ep, so we increase the refcnt
++	 */
++	kref_get(&ep->refcnt);
++
+ 	work->ep = ep;
+ 	work->event = params->event;
+ 	work->status = params->status;
+@@ -252,16 +280,30 @@ qedr_iw_passive_complete(void *context,
+ 	if ((params->status == -ECONNREFUSED) && (!ep->qp)) {
+ 		DP_DEBUG(dev, QEDR_MSG_IWARP,
+ 			 "PASSIVE connection refused releasing ep...\n");
+-		kfree(ep);
++		kref_put(&ep->refcnt, qedr_iw_free_ep);
+ 		return;
+ 	}
+ 
++	complete(&ep->qp->iwarp_cm_comp);
+ 	qedr_iw_issue_event(context, params, IW_CM_EVENT_ESTABLISHED);
+ 
+ 	if (params->status < 0)
+ 		qedr_iw_close_event(context, params);
+ }
+ 
++static void
++qedr_iw_active_complete(void *context,
++			struct qed_iwarp_cm_event_params *params)
++{
++	struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
++
++	complete(&ep->qp->iwarp_cm_comp);
++	qedr_iw_issue_event(context, params, IW_CM_EVENT_CONNECT_REPLY);
++
++	if (params->status < 0)
++		kref_put(&ep->refcnt, qedr_iw_free_ep);
++}
++
+ static int
+ qedr_iw_mpa_reply(void *context, struct qed_iwarp_cm_event_params *params)
+ {
+@@ -288,27 +330,15 @@ qedr_iw_event_handler(void *context, struct qed_iwarp_cm_event_params *params)
+ 		qedr_iw_mpa_reply(context, params);
+ 		break;
+ 	case QED_IWARP_EVENT_PASSIVE_COMPLETE:
+-		ep->during_connect = 0;
+ 		qedr_iw_passive_complete(context, params);
+ 		break;
+-
+ 	case QED_IWARP_EVENT_ACTIVE_COMPLETE:
+-		ep->during_connect = 0;
+-		qedr_iw_issue_event(context,
+-				    params,
+-				    IW_CM_EVENT_CONNECT_REPLY);
+-		if (params->status < 0) {
+-			struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+-
+-			ep->cm_id->rem_ref(ep->cm_id);
+-			ep->cm_id = NULL;
+-		}
++		qedr_iw_active_complete(context, params);
+ 		break;
+ 	case QED_IWARP_EVENT_DISCONNECT:
+ 		qedr_iw_disconnect_event(context, params);
+ 		break;
+ 	case QED_IWARP_EVENT_CLOSE:
+-		ep->during_connect = 0;
+ 		qedr_iw_close_event(context, params);
+ 		break;
+ 	case QED_IWARP_EVENT_RQ_EMPTY:
+@@ -476,6 +506,19 @@ qedr_addr6_resolve(struct qedr_dev *dev,
+ 	return rc;
+ }
+ 
++struct qedr_qp *qedr_iw_load_qp(struct qedr_dev *dev, u32 qpn)
++{
++	struct qedr_qp *qp;
++
++	xa_lock(&dev->qps);
++	qp = xa_load(&dev->qps, qpn);
++	if (qp)
++		kref_get(&qp->refcnt);
++	xa_unlock(&dev->qps);
++
++	return qp;
++}
++
+ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ {
+ 	struct qedr_dev *dev = get_qedr_dev(cm_id->device);
+@@ -491,10 +534,6 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 	int rc = 0;
+ 	int i;
+ 
+-	qp = xa_load(&dev->qps, conn_param->qpn);
+-	if (unlikely(!qp))
+-		return -EINVAL;
+-
+ 	laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ 	raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
+ 	laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+@@ -516,8 +555,15 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 		return -ENOMEM;
+ 
+ 	ep->dev = dev;
++	kref_init(&ep->refcnt);
++
++	qp = qedr_iw_load_qp(dev, conn_param->qpn);
++	if (!qp) {
++		rc = -EINVAL;
++		goto err;
++	}
++
+ 	ep->qp = qp;
+-	qp->ep = ep;
+ 	cm_id->add_ref(cm_id);
+ 	ep->cm_id = cm_id;
+ 
+@@ -580,16 +626,20 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 	in_params.qp = qp->qed_qp;
+ 	memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
+ 
+-	ep->during_connect = 1;
++	if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
++			     &qp->iwarp_cm_flags))
++		goto err; /* QP already being destroyed */
++
+ 	rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
+-	if (rc)
++	if (rc) {
++		complete(&qp->iwarp_cm_comp);
+ 		goto err;
++	}
+ 
+ 	return rc;
+ 
+ err:
+-	cm_id->rem_ref(cm_id);
+-	kfree(ep);
++	kref_put(&ep->refcnt, qedr_iw_free_ep);
+ 	return rc;
+ }
+ 
+@@ -677,18 +727,17 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 	struct qedr_dev *dev = ep->dev;
+ 	struct qedr_qp *qp;
+ 	struct qed_iwarp_accept_in params;
+-	int rc;
++	int rc = 0;
+ 
+ 	DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
+ 
+-	qp = xa_load(&dev->qps, conn_param->qpn);
++	qp = qedr_iw_load_qp(dev, conn_param->qpn);
+ 	if (!qp) {
+ 		DP_ERR(dev, "Invalid QP number %d\n", conn_param->qpn);
+ 		return -EINVAL;
+ 	}
+ 
+ 	ep->qp = qp;
+-	qp->ep = ep;
+ 	cm_id->add_ref(cm_id);
+ 	ep->cm_id = cm_id;
+ 
+@@ -700,15 +749,21 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ 	params.ird = conn_param->ird;
+ 	params.ord = conn_param->ord;
+ 
+-	ep->during_connect = 1;
++	if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
++			     &qp->iwarp_cm_flags))
++		goto err; /* QP already destroyed */
++
+ 	rc = dev->ops->iwarp_accept(dev->rdma_ctx, &params);
+-	if (rc)
++	if (rc) {
++		complete(&qp->iwarp_cm_comp);
+ 		goto err;
++	}
+ 
+ 	return rc;
++
+ err:
+-	ep->during_connect = 0;
+-	cm_id->rem_ref(cm_id);
++	kref_put(&ep->refcnt, qedr_iw_free_ep);
++
+ 	return rc;
+ }
+ 
+@@ -731,17 +786,14 @@ void qedr_iw_qp_add_ref(struct ib_qp *ibqp)
+ {
+ 	struct qedr_qp *qp = get_qedr_qp(ibqp);
+ 
+-	atomic_inc(&qp->refcnt);
++	kref_get(&qp->refcnt);
+ }
+ 
+ void qedr_iw_qp_rem_ref(struct ib_qp *ibqp)
+ {
+ 	struct qedr_qp *qp = get_qedr_qp(ibqp);
+ 
+-	if (atomic_dec_and_test(&qp->refcnt)) {
+-		xa_erase_irq(&qp->dev->qps, qp->qp_id);
+-		kfree(qp);
+-	}
++	kref_put(&qp->refcnt, qedr_iw_free_qp);
+ }
+ 
+ struct ib_qp *qedr_iw_get_qp(struct ib_device *ibdev, int qpn)
+diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
+index a7ccca3c4f89..8b4240c1cc76 100644
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -51,6 +51,7 @@
+ #include "verbs.h"
+ #include <rdma/qedr-abi.h>
+ #include "qedr_roce_cm.h"
++#include "qedr_iw_cm.h"
+ 
+ #define QEDR_SRQ_WQE_ELEM_SIZE	sizeof(union rdma_srq_elm)
+ #define	RDMA_MAX_SGE_PER_SRQ	(4)
+@@ -1193,7 +1194,10 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
+ 				      struct ib_qp_init_attr *attrs)
+ {
+ 	spin_lock_init(&qp->q_lock);
+-	atomic_set(&qp->refcnt, 1);
++	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
++		kref_init(&qp->refcnt);
++		init_completion(&qp->iwarp_cm_comp);
++	}
+ 	qp->pd = pd;
+ 	qp->qp_type = attrs->qp_type;
+ 	qp->max_inline_data = attrs->cap.max_inline_data;
+@@ -1600,6 +1604,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
+ 	int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
+ 	int rc = -EINVAL;
+ 
++	qp->create_type = QEDR_QP_CREATE_USER;
+ 	memset(&ureq, 0, sizeof(ureq));
+ 	rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
+ 	if (rc) {
+@@ -1813,6 +1818,7 @@ static int qedr_create_kernel_qp(struct qedr_dev *dev,
+ 	u32 n_sq_entries;
+ 
+ 	memset(&in_params, 0, sizeof(in_params));
++	qp->create_type = QEDR_QP_CREATE_KERNEL;
+ 
+ 	/* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
+ 	 * the ring. The ring should allow at least a single WR, even if the
+@@ -1926,7 +1932,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
+ 	qp->ibqp.qp_num = qp->qp_id;
+ 
+ 	if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+-		rc = xa_insert_irq(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
++		rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
+ 		if (rc)
+ 			goto err;
+ 	}
+@@ -2445,7 +2451,7 @@ static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
+ 			return rc;
+ 	}
+ 
+-	if (udata)
++	if (qp->create_type == QEDR_QP_CREATE_USER)
+ 		qedr_cleanup_user(dev, qp);
+ 	else
+ 		qedr_cleanup_kernel(dev, qp);
+@@ -2475,34 +2481,44 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+ 			qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
+ 		}
+ 	} else {
+-		/* Wait for the connect/accept to complete */
+-		if (qp->ep) {
+-			int wait_count = 1;
+-
+-			while (qp->ep->during_connect) {
+-				DP_DEBUG(dev, QEDR_MSG_QP,
+-					 "Still in during connect/accept\n");
+-
+-				msleep(100);
+-				if (wait_count++ > 200) {
+-					DP_NOTICE(dev,
+-						  "during connect timeout\n");
+-					break;
+-				}
+-			}
+-		}
++		/* If connection establishment started the WAIT_FOR_CONNECT
++		 * bit will be on and we need to Wait for the establishment
++		 * to complete before destroying the qp.
++		 */
++		if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
++				     &qp->iwarp_cm_flags))
++			wait_for_completion(&qp->iwarp_cm_comp);
++
++		/* If graceful disconnect started, the WAIT_FOR_DISCONNECT
++		 * bit will be on, and we need to wait for the disconnect to
++		 * complete before continuing. We can use the same completion,
++		 * iwarp_cm_comp, since this is the only place that waits for
++		 * this completion and it is sequential. In addition,
++		 * disconnect can't occur before the connection is fully
++		 * established, therefore if WAIT_FOR_DISCONNECT is on it
++		 * means WAIT_FOR_CONNECT is also on and the completion for
++		 * CONNECT already occurred.
++		 */
++		if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
++				     &qp->iwarp_cm_flags))
++			wait_for_completion(&qp->iwarp_cm_comp);
+ 	}
+ 
+ 	if (qp->qp_type == IB_QPT_GSI)
+ 		qedr_destroy_gsi_qp(dev);
+ 
++	/* We need to remove the entry from the xarray before we release the
++	 * qp_id to avoid a race of the qp_id being reallocated and failing
++	 * on xa_insert
++	 */
++	if (rdma_protocol_iwarp(&dev->ibdev, 1))
++		xa_erase(&dev->qps, qp->qp_id);
++
+ 	qedr_free_qp_resources(dev, qp, udata);
+ 
+-	if (atomic_dec_and_test(&qp->refcnt) &&
+-	    rdma_protocol_iwarp(&dev->ibdev, 1)) {
+-		xa_erase_irq(&dev->qps, qp->qp_id);
+-		kfree(qp);
+-	}
++	if (rdma_protocol_iwarp(&dev->ibdev, 1))
++		qedr_iw_qp_rem_ref(&qp->ibqp);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index 6027bb65f7f6..dc9a3bb24114 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -818,10 +818,15 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
+ 		   PCR_MATRIX_MASK, PCR_MATRIX(MT7530_ALL_MEMBERS));
+ 
+ 	/* Trapped into security mode allows packet forwarding through VLAN
+-	 * table lookup.
++	 * table lookup. CPU port is set to fallback mode to let untagged
++	 * frames pass through.
+ 	 */
+-	mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+-		   MT7530_PORT_SECURITY_MODE);
++	if (dsa_is_cpu_port(ds, port))
++		mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
++			   MT7530_PORT_FALLBACK_MODE);
++	else
++		mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
++			   MT7530_PORT_SECURITY_MODE);
+ 
+ 	/* Set the port as a user port which is to be able to recognize VID
+ 	 * from incoming packets before fetching entry within the VLAN table.
+diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
+index 0e7e36d8f994..3ef7b5a6fc22 100644
+--- a/drivers/net/dsa/mt7530.h
++++ b/drivers/net/dsa/mt7530.h
+@@ -148,6 +148,12 @@ enum mt7530_port_mode {
+ 	/* Port Matrix Mode: Frames are forwarded by the PCR_MATRIX members. */
+ 	MT7530_PORT_MATRIX_MODE = PORT_VLAN(0),
+ 
++	/* Fallback Mode: Forward received frames with ingress ports that do
++	 * not belong to the VLAN member. Frames whose VID is not listed on
++	 * the VLAN table are forwarded by the PCR_MATRIX members.
++	 */
++	MT7530_PORT_FALLBACK_MODE = PORT_VLAN(1),
++
+ 	/* Security Mode: Discard any frame due to ingress membership
+ 	 * violation or VID missed on the VLAN table.
+ 	 */
+diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
+index a58185b1d8bf..3e3711b60d01 100644
+--- a/drivers/net/ethernet/apple/bmac.c
++++ b/drivers/net/ethernet/apple/bmac.c
+@@ -1182,7 +1182,7 @@ bmac_get_station_address(struct net_device *dev, unsigned char *ea)
+ 	int i;
+ 	unsigned short data;
+ 
+-	for (i = 0; i < 6; i++)
++	for (i = 0; i < 3; i++)
+ 		{
+ 			reset_and_select_srom(dev);
+ 			data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
+diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
+index f839fa94ebdd..d3b8ce734c1b 100644
+--- a/drivers/net/ethernet/freescale/ucc_geth.c
++++ b/drivers/net/ethernet/freescale/ucc_geth.c
+@@ -42,6 +42,7 @@
+ #include <soc/fsl/qe/ucc.h>
+ #include <soc/fsl/qe/ucc_fast.h>
+ #include <asm/machdep.h>
++#include <net/sch_generic.h>
+ 
+ #include "ucc_geth.h"
+ 
+@@ -1548,11 +1549,8 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
+ 
+ static void ugeth_quiesce(struct ucc_geth_private *ugeth)
+ {
+-	/* Prevent any further xmits, plus detach the device. */
+-	netif_device_detach(ugeth->ndev);
+-
+-	/* Wait for any current xmits to finish. */
+-	netif_tx_disable(ugeth->ndev);
++	/* Prevent any further xmits */
++	netif_tx_stop_all_queues(ugeth->ndev);
+ 
+ 	/* Disable the interrupt to avoid NAPI rescheduling. */
+ 	disable_irq(ugeth->ug_info->uf_info.irq);
+@@ -1565,7 +1563,10 @@ static void ugeth_activate(struct ucc_geth_private *ugeth)
+ {
+ 	napi_enable(&ugeth->napi);
+ 	enable_irq(ugeth->ug_info->uf_info.irq);
+-	netif_device_attach(ugeth->ndev);
++
++	/* allow to xmit again  */
++	netif_tx_wake_all_queues(ugeth->ndev);
++	__netdev_watchdog_up(ugeth->ndev);
+ }
+ 
+ /* Called every time the controller might need to be made
+diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
+index 38068fc34141..c7bdada4d1b9 100644
+--- a/drivers/net/ethernet/smsc/smsc911x.c
++++ b/drivers/net/ethernet/smsc/smsc911x.c
+@@ -2502,20 +2502,20 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
+ 
+ 	retval = smsc911x_init(dev);
+ 	if (retval < 0)
+-		goto out_disable_resources;
++		goto out_init_fail;
+ 
+ 	netif_carrier_off(dev);
+ 
+ 	retval = smsc911x_mii_init(pdev, dev);
+ 	if (retval) {
+ 		SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
+-		goto out_disable_resources;
++		goto out_init_fail;
+ 	}
+ 
+ 	retval = register_netdev(dev);
+ 	if (retval) {
+ 		SMSC_WARN(pdata, probe, "Error %i registering device", retval);
+-		goto out_disable_resources;
++		goto out_init_fail;
+ 	} else {
+ 		SMSC_TRACE(pdata, probe,
+ 			   "Network interface: \"%s\"", dev->name);
+@@ -2556,9 +2556,10 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
+ 
+ 	return 0;
+ 
+-out_disable_resources:
++out_init_fail:
+ 	pm_runtime_put(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
++out_disable_resources:
+ 	(void)smsc911x_disable_resources(pdev);
+ out_enable_resources_fail:
+ 	smsc911x_free_resources(pdev);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+index 0d21082ceb93..4d75158c64b2 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+@@ -318,6 +318,19 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
+ 	/* Enable PTP clock */
+ 	regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+ 	val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id);
++	switch (gmac->phy_mode) {
++	case PHY_INTERFACE_MODE_RGMII:
++		val |= NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) |
++			NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id);
++		break;
++	case PHY_INTERFACE_MODE_SGMII:
++		val |= NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) |
++				NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id);
++		break;
++	default:
++		/* We don't get here; the switch above will have errored out */
++		unreachable();
++	}
+ 	regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+ 
+ 	if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) {
+diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
+index c4c8f1b62e1e..da0d3834b5f0 100644
+--- a/drivers/net/wireless/cisco/airo.c
++++ b/drivers/net/wireless/cisco/airo.c
+@@ -1925,6 +1925,10 @@ static netdev_tx_t mpi_start_xmit(struct sk_buff *skb,
+ 		airo_print_err(dev->name, "%s: skb == NULL!",__func__);
+ 		return NETDEV_TX_OK;
+ 	}
++	if (skb_padto(skb, ETH_ZLEN)) {
++		dev->stats.tx_dropped++;
++		return NETDEV_TX_OK;
++	}
+ 	npacks = skb_queue_len (&ai->txq);
+ 
+ 	if (npacks >= MAXTXQ - 1) {
+@@ -2127,6 +2131,10 @@ static netdev_tx_t airo_start_xmit(struct sk_buff *skb,
+ 		airo_print_err(dev->name, "%s: skb == NULL!", __func__);
+ 		return NETDEV_TX_OK;
+ 	}
++	if (skb_padto(skb, ETH_ZLEN)) {
++		dev->stats.tx_dropped++;
++		return NETDEV_TX_OK;
++	}
+ 
+ 	/* Find a vacant FID */
+ 	for( i = 0; i < MAX_FIDS / 2 && (fids[i] & 0xffff0000); i++ );
+@@ -2201,6 +2209,10 @@ static netdev_tx_t airo_start_xmit11(struct sk_buff *skb,
+ 		airo_print_err(dev->name, "%s: skb == NULL!", __func__);
+ 		return NETDEV_TX_OK;
+ 	}
++	if (skb_padto(skb, ETH_ZLEN)) {
++		dev->stats.tx_dropped++;
++		return NETDEV_TX_OK;
++	}
+ 
+ 	/* Find a vacant FID */
+ 	for( i = MAX_FIDS / 2; i < MAX_FIDS && (fids[i] & 0xffff0000); i++ );
+diff --git a/drivers/net/wireless/intersil/p54/p54usb.c b/drivers/net/wireless/intersil/p54/p54usb.c
+index b94764c88750..ff0e30c0c14c 100644
+--- a/drivers/net/wireless/intersil/p54/p54usb.c
++++ b/drivers/net/wireless/intersil/p54/p54usb.c
+@@ -61,6 +61,7 @@ static const struct usb_device_id p54u_table[] = {
+ 	{USB_DEVICE(0x0db0, 0x6826)},	/* MSI UB54G (MS-6826) */
+ 	{USB_DEVICE(0x107b, 0x55f2)},	/* Gateway WGU-210 (Gemtek) */
+ 	{USB_DEVICE(0x124a, 0x4023)},	/* Shuttle PN15, Airvast WM168g, IOGear GWU513 */
++	{USB_DEVICE(0x124a, 0x4026)},	/* AirVasT USB wireless device */
+ 	{USB_DEVICE(0x1435, 0x0210)},	/* Inventel UR054G */
+ 	{USB_DEVICE(0x15a9, 0x0002)},	/* Gemtek WUBI-100GW 802.11g */
+ 	{USB_DEVICE(0x1630, 0x0005)},	/* 2Wire 802.11g USB (v1) / Z-Com */
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
+index e858bba8c8ff..0075fba93546 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
+@@ -212,6 +212,7 @@ static inline bool is_mt76x0(struct mt76x02_dev *dev)
+ static inline bool is_mt76x2(struct mt76x02_dev *dev)
+ {
+ 	return mt76_chip(&dev->mt76) == 0x7612 ||
++	       mt76_chip(&dev->mt76) == 0x7632 ||
+ 	       mt76_chip(&dev->mt76) == 0x7662 ||
+ 	       mt76_chip(&dev->mt76) == 0x7602;
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+index 8b26c6108186..96a2b7ba6764 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+@@ -18,6 +18,7 @@ static const struct usb_device_id mt76x2u_device_table[] = {
+ 	{ USB_DEVICE(0x7392, 0xb711) },	/* Edimax EW 7722 UAC */
+ 	{ USB_DEVICE(0x0846, 0x9053) },	/* Netgear A6210 */
+ 	{ USB_DEVICE(0x045e, 0x02e6) },	/* XBox One Wireless Adapter */
++	{ USB_DEVICE(0x045e, 0x02fe) },	/* XBox One Wireless Adapter */
+ 	{ },
+ };
+ 
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
+index 849335d76cf6..6f4692f0d714 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
+@@ -974,12 +974,13 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
+ 	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
+ 	struct hisi_sas_phy *phy = sas_phy->lldd_phy;
+ 	struct asd_sas_port *sas_port = sas_phy->port;
+-	struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
++	struct hisi_sas_port *port;
+ 	unsigned long flags;
+ 
+ 	if (!sas_port)
+ 		return;
+ 
++	port = to_hisi_sas_port(sas_port);
+ 	spin_lock_irqsave(&hisi_hba->lock, flags);
+ 	port->port_attached = 1;
+ 	port->id = phy->port_id;
+diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
+index 3717eea37ecb..5f0ad8b32e3a 100644
+--- a/drivers/scsi/scsi_pm.c
++++ b/drivers/scsi/scsi_pm.c
+@@ -80,6 +80,10 @@ static int scsi_dev_type_resume(struct device *dev,
+ 	dev_dbg(dev, "scsi resume: %d\n", err);
+ 
+ 	if (err == 0) {
++		bool was_runtime_suspended;
++
++		was_runtime_suspended = pm_runtime_suspended(dev);
++
+ 		pm_runtime_disable(dev);
+ 		err = pm_runtime_set_active(dev);
+ 		pm_runtime_enable(dev);
+@@ -93,8 +97,10 @@ static int scsi_dev_type_resume(struct device *dev,
+ 		 */
+ 		if (!err && scsi_is_sdev_device(dev)) {
+ 			struct scsi_device *sdev = to_scsi_device(dev);
+-
+-			blk_set_runtime_active(sdev->request_queue);
++			if (was_runtime_suspended)
++				blk_post_runtime_resume(sdev->request_queue, 0);
++			else
++				blk_set_runtime_active(sdev->request_queue);
+ 		}
+ 	}
+ 
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 13ab1494c384..bc73181b0405 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -2480,6 +2480,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
+ 
+ 	err = ufshcd_map_sg(hba, lrbp);
+ 	if (err) {
++		ufshcd_release(hba);
+ 		lrbp->cmd = NULL;
+ 		clear_bit_unlock(tag, &hba->lrb_in_use);
+ 		goto out;
+diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
+index 11cac7e10663..d2ca3b357cfe 100644
+--- a/drivers/spi/spi-dw.c
++++ b/drivers/spi/spi-dw.c
+@@ -297,6 +297,9 @@ static int dw_spi_transfer_one(struct spi_controller *master,
+ 	dws->len = transfer->len;
+ 	spin_unlock_irqrestore(&dws->buf_lock, flags);
+ 
++	/* Ensure dw->rx and dw->rx_end are visible */
++	smp_mb();
++
+ 	spi_enable_chip(dws, 0);
+ 
+ 	/* Handle per transfer options for bpw and speed */
+diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/intel-ipu3.h
+index 0b1cb9f9cbd1..1bfa8c86132a 100644
+--- a/drivers/staging/media/ipu3/include/intel-ipu3.h
++++ b/drivers/staging/media/ipu3/include/intel-ipu3.h
+@@ -450,7 +450,7 @@ struct ipu3_uapi_awb_fr_config_s {
+ 	__u32 bayer_sign;
+ 	__u8 bayer_nf;
+ 	__u8 reserved2[7];
+-} __attribute__((aligned(32))) __packed;
++} __packed;
+ 
+ /**
+  * struct ipu3_uapi_4a_config - 4A config
+@@ -466,7 +466,8 @@ struct ipu3_uapi_4a_config {
+ 	struct ipu3_uapi_ae_grid_config ae_grd_config;
+ 	__u8 padding[20];
+ 	struct ipu3_uapi_af_config_s af_config;
+-	struct ipu3_uapi_awb_fr_config_s awb_fr_config;
++	struct ipu3_uapi_awb_fr_config_s awb_fr_config
++		__attribute__((aligned(32)));
+ } __packed;
+ 
+ /**
+@@ -2472,7 +2473,7 @@ struct ipu3_uapi_acc_param {
+ 	struct ipu3_uapi_yuvp1_yds_config yds2 __attribute__((aligned(32)));
+ 	struct ipu3_uapi_yuvp2_tcc_static_config tcc __attribute__((aligned(32)));
+ 	struct ipu3_uapi_anr_config anr;
+-	struct ipu3_uapi_awb_fr_config_s awb_fr __attribute__((aligned(32)));
++	struct ipu3_uapi_awb_fr_config_s awb_fr;
+ 	struct ipu3_uapi_ae_config ae;
+ 	struct ipu3_uapi_af_config_s af;
+ 	struct ipu3_uapi_awb_config awb;
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index b2ccb908f6b6..2050100e6e84 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -409,6 +409,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
+ 	}
+ 
+ 	ctx->flags = p->flags;
++	init_waitqueue_head(&ctx->sqo_wait);
+ 	init_waitqueue_head(&ctx->cq_wait);
+ 	init_completion(&ctx->ctx_done);
+ 	init_completion(&ctx->sqo_thread_started);
+@@ -3237,7 +3238,6 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
+ {
+ 	int ret;
+ 
+-	init_waitqueue_head(&ctx->sqo_wait);
+ 	mmgrab(current->mm);
+ 	ctx->sqo_mm = current->mm;
+ 
+diff --git a/include/linux/security.h b/include/linux/security.h
+index 9df7547afc0c..fd022768e91d 100644
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -117,12 +117,14 @@ enum lockdown_reason {
+ 	LOCKDOWN_MODULE_PARAMETERS,
+ 	LOCKDOWN_MMIOTRACE,
+ 	LOCKDOWN_DEBUGFS,
++	LOCKDOWN_XMON_WR,
+ 	LOCKDOWN_INTEGRITY_MAX,
+ 	LOCKDOWN_KCORE,
+ 	LOCKDOWN_KPROBES,
+ 	LOCKDOWN_BPF_READ,
+ 	LOCKDOWN_PERF,
+ 	LOCKDOWN_TRACEFS,
++	LOCKDOWN_XMON_RW,
+ 	LOCKDOWN_CONFIDENTIALITY_MAX,
+ };
+ 
+diff --git a/include/uapi/linux/mmc/ioctl.h b/include/uapi/linux/mmc/ioctl.h
+index 00c08120f3ba..27a39847d55c 100644
+--- a/include/uapi/linux/mmc/ioctl.h
++++ b/include/uapi/linux/mmc/ioctl.h
+@@ -3,6 +3,7 @@
+ #define LINUX_MMC_IOCTL_H
+ 
+ #include <linux/types.h>
++#include <linux/major.h>
+ 
+ struct mmc_ioc_cmd {
+ 	/*
+diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
+index ca19b4c8acf5..4a942d4e9763 100644
+--- a/kernel/cgroup/rstat.c
++++ b/kernel/cgroup/rstat.c
+@@ -33,12 +33,9 @@ void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
+ 		return;
+ 
+ 	/*
+-	 * Paired with the one in cgroup_rstat_cpu_pop_upated().  Either we
+-	 * see NULL updated_next or they see our updated stat.
+-	 */
+-	smp_mb();
+-
+-	/*
++	 * Speculative already-on-list test. This may race leading to
++	 * temporary inaccuracies, which is fine.
++	 *
+ 	 * Because @parent's updated_children is terminated with @parent
+ 	 * instead of NULL, we can tell whether @cgrp is on the list by
+ 	 * testing the next pointer for NULL.
+@@ -134,13 +131,6 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
+ 		*nextp = rstatc->updated_next;
+ 		rstatc->updated_next = NULL;
+ 
+-		/*
+-		 * Paired with the one in cgroup_rstat_cpu_updated().
+-		 * Either they see NULL updated_next or we see their
+-		 * updated stat.
+-		 */
+-		smp_mb();
+-
+ 		return pos;
+ 	}
+ 
+diff --git a/kernel/relay.c b/kernel/relay.c
+index ade14fb7ce2e..4b760ec16342 100644
+--- a/kernel/relay.c
++++ b/kernel/relay.c
+@@ -581,6 +581,11 @@ struct rchan *relay_open(const char *base_filename,
+ 		return NULL;
+ 
+ 	chan->buf = alloc_percpu(struct rchan_buf *);
++	if (!chan->buf) {
++		kfree(chan);
++		return NULL;
++	}
++
+ 	chan->version = RELAYFS_CHANNEL_VERSION;
+ 	chan->n_subbufs = n_subbufs;
+ 	chan->subbuf_size = subbuf_size;
+diff --git a/mm/mremap.c b/mm/mremap.c
+index 245bf9c555b2..8005d0b2b843 100644
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -266,7 +266,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
+ 		new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
+ 		if (!new_pmd)
+ 			break;
+-		if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd)) {
++		if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || pmd_devmap(*old_pmd)) {
+ 			if (extent == HPAGE_PMD_SIZE) {
+ 				bool moved;
+ 				/* See comment in move_ptes() */
+diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
+index cc826c2767a3..fbc2ee6d46fc 100644
+--- a/security/integrity/evm/evm_crypto.c
++++ b/security/integrity/evm/evm_crypto.c
+@@ -209,7 +209,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
+ 	data->hdr.length = crypto_shash_digestsize(desc->tfm);
+ 
+ 	error = -ENODATA;
+-	list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) {
++	list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) {
+ 		bool is_ima = false;
+ 
+ 		if (strcmp(xattr->name, XATTR_NAME_IMA) == 0)
+diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
+index f9a81b187fae..a2c393385db0 100644
+--- a/security/integrity/evm/evm_main.c
++++ b/security/integrity/evm/evm_main.c
+@@ -99,7 +99,7 @@ static int evm_find_protected_xattrs(struct dentry *dentry)
+ 	if (!(inode->i_opflags & IOP_XATTR))
+ 		return -EOPNOTSUPP;
+ 
+-	list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) {
++	list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) {
+ 		error = __vfs_getxattr(dentry, inode, xattr->name, NULL, 0);
+ 		if (error < 0) {
+ 			if (error == -ENODATA)
+@@ -230,7 +230,7 @@ static int evm_protected_xattr(const char *req_xattr_name)
+ 	struct xattr_list *xattr;
+ 
+ 	namelen = strlen(req_xattr_name);
+-	list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) {
++	list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) {
+ 		if ((strlen(xattr->name) == namelen)
+ 		    && (strncmp(req_xattr_name, xattr->name, namelen) == 0)) {
+ 			found = 1;
+diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
+index c11c1f7b3ddd..0f37ef27268d 100644
+--- a/security/integrity/evm/evm_secfs.c
++++ b/security/integrity/evm/evm_secfs.c
+@@ -234,7 +234,14 @@ static ssize_t evm_write_xattrs(struct file *file, const char __user *buf,
+ 		goto out;
+ 	}
+ 
+-	/* Guard against races in evm_read_xattrs */
++	/*
++	 * xattr_list_mutex guards against races in evm_read_xattrs().
++	 * Entries are only added to the evm_config_xattrnames list
++	 * and never deleted. Therefore, the list is traversed
++	 * using list_for_each_entry_lockless() without holding
++	 * the mutex in evm_calc_hmac_or_hash(), evm_find_protected_xattrs()
++	 * and evm_protected_xattr().
++	 */
+ 	mutex_lock(&xattr_list_mutex);
+ 	list_for_each_entry(tmp, &evm_config_xattrnames, list) {
+ 		if (strcmp(xattr->name, tmp->name) == 0) {
+diff --git a/security/lockdown/lockdown.c b/security/lockdown/lockdown.c
+index 40b790536def..b2f87015d6e9 100644
+--- a/security/lockdown/lockdown.c
++++ b/security/lockdown/lockdown.c
+@@ -32,12 +32,14 @@ static const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
+ 	[LOCKDOWN_MODULE_PARAMETERS] = "unsafe module parameters",
+ 	[LOCKDOWN_MMIOTRACE] = "unsafe mmio",
+ 	[LOCKDOWN_DEBUGFS] = "debugfs access",
++	[LOCKDOWN_XMON_WR] = "xmon write access",
+ 	[LOCKDOWN_INTEGRITY_MAX] = "integrity",
+ 	[LOCKDOWN_KCORE] = "/proc/kcore access",
+ 	[LOCKDOWN_KPROBES] = "use of kprobes",
+ 	[LOCKDOWN_BPF_READ] = "use of bpf to read kernel RAM",
+ 	[LOCKDOWN_PERF] = "unsafe use of perf",
+ 	[LOCKDOWN_TRACEFS] = "use of tracefs",
++	[LOCKDOWN_XMON_RW] = "xmon read and write access",
+ 	[LOCKDOWN_CONFIDENTIALITY_MAX] = "confidentiality",
+ };
+ 
+diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+index 67b276a65a8d..8ad31c91fc75 100644
+--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
++++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+@@ -626,7 +626,7 @@ static int kabylake_card_late_probe(struct snd_soc_card *card)
+  * kabylake audio machine driver for  MAX98927 + RT5514 + RT5663
+  */
+ static struct snd_soc_card kabylake_audio_card = {
+-	.name = "kbl_r5514_5663_max",
++	.name = "kbl-r5514-5663-max",
+ 	.owner = THIS_MODULE,
+ 	.dai_link = kabylake_dais,
+ 	.num_links = ARRAY_SIZE(kabylake_dais),
+diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
+index 1778acdc367c..e8d676c192f6 100644
+--- a/sound/soc/intel/boards/skl_hda_dsp_generic.c
++++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
+@@ -90,7 +90,7 @@ skl_hda_add_dai_link(struct snd_soc_card *card, struct snd_soc_dai_link *link)
+ }
+ 
+ static struct snd_soc_card hda_soc_card = {
+-	.name = "skl_hda_card",
++	.name = "hda-dsp",
+ 	.owner = THIS_MODULE,
+ 	.dai_link = skl_hda_be_dai_links,
+ 	.dapm_widgets = skl_hda_widgets,
+diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
+index 06b7d6c6c9a0..302ca1920791 100644
+--- a/sound/soc/intel/boards/sof_rt5682.c
++++ b/sound/soc/intel/boards/sof_rt5682.c
+@@ -374,7 +374,7 @@ static int dmic_init(struct snd_soc_pcm_runtime *rtd)
+ 
+ /* sof audio machine driver for rt5682 codec */
+ static struct snd_soc_card sof_audio_card_rt5682 = {
+-	.name = "sof_rt5682",
++	.name = "rt5682", /* the sof- prefix is added by the core */
+ 	.owner = THIS_MODULE,
+ 	.controls = sof_controls,
+ 	.num_controls = ARRAY_SIZE(sof_controls),
+diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
+index 24dd8ed48580..b025daea062d 100755
+--- a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
++++ b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
+@@ -300,7 +300,7 @@ test_uc_aware()
+ 	local i
+ 
+ 	for ((i = 0; i < attempts; ++i)); do
+-		if $ARPING -c 1 -I $h1 -b 192.0.2.66 -q -w 0.1; then
++		if $ARPING -c 1 -I $h1 -b 192.0.2.66 -q -w 1; then
+ 			((passes++))
+ 		fi
+ 


             reply	other threads:[~2020-06-07 21:53 UTC|newest]

Thread overview: 305+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-07 21:53 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2024-04-18  3:06 [gentoo-commits] proj/linux-patches:5.4 commit in: / Alice Ferrazzi
2023-10-05 14:24 Mike Pagano
2023-09-23 10:18 Mike Pagano
2023-09-02  9:58 Mike Pagano
2023-08-30 14:56 Mike Pagano
2023-08-16 17:00 Mike Pagano
2023-08-11 11:57 Mike Pagano
2023-08-08 18:42 Mike Pagano
2023-07-27 11:51 Mike Pagano
2023-07-24 20:29 Mike Pagano
2023-06-28 10:28 Mike Pagano
2023-06-21 14:55 Alice Ferrazzi
2023-06-14 10:20 Mike Pagano
2023-06-09 11:32 Mike Pagano
2023-06-05 11:50 Mike Pagano
2023-05-30 12:56 Mike Pagano
2023-05-17 11:21 Mike Pagano
2023-05-17 11:00 Mike Pagano
2023-05-10 17:58 Mike Pagano
2023-04-26  9:51 Alice Ferrazzi
2023-04-20 11:17 Alice Ferrazzi
2023-04-05 10:01 Alice Ferrazzi
2023-03-30 13:41 Alice Ferrazzi
2023-03-22 14:16 Alice Ferrazzi
2023-03-17 10:46 Mike Pagano
2023-03-13 11:34 Alice Ferrazzi
2023-03-11 16:20 Mike Pagano
2023-03-03 12:31 Mike Pagano
2023-02-25 11:42 Mike Pagano
2023-02-24  3:08 Alice Ferrazzi
2023-02-22 14:41 Alice Ferrazzi
2023-02-06 12:48 Mike Pagano
2023-02-02 19:15 Mike Pagano
2023-01-24  7:25 Alice Ferrazzi
2023-01-18 11:10 Mike Pagano
2022-12-19 12:27 Alice Ferrazzi
2022-12-14 12:14 Mike Pagano
2022-12-08 12:13 Alice Ferrazzi
2022-11-25 17:05 Mike Pagano
2022-11-10 17:59 Mike Pagano
2022-11-03 15:13 Mike Pagano
2022-11-01 19:47 Mike Pagano
2022-10-29  9:52 Mike Pagano
2022-10-26 11:44 Mike Pagano
2022-10-17 16:48 Mike Pagano
2022-10-15 10:06 Mike Pagano
2022-10-07 11:12 Mike Pagano
2022-10-05 11:58 Mike Pagano
2022-09-28  9:26 Mike Pagano
2022-09-20 12:02 Mike Pagano
2022-09-15 10:31 Mike Pagano
2022-09-05 12:04 Mike Pagano
2022-08-25 10:34 Mike Pagano
2022-08-11 12:35 Mike Pagano
2022-08-03 14:51 Alice Ferrazzi
2022-07-29 15:29 Mike Pagano
2022-07-21 20:09 Mike Pagano
2022-07-15 10:04 Mike Pagano
2022-07-12 16:01 Mike Pagano
2022-07-07 16:18 Mike Pagano
2022-07-02 16:08 Mike Pagano
2022-06-29 11:09 Mike Pagano
2022-06-27 19:03 Mike Pagano
2022-06-25 19:46 Mike Pagano
2022-06-22 13:50 Mike Pagano
2022-06-22 13:25 Mike Pagano
2022-06-22 12:47 Mike Pagano
2022-06-16 11:43 Mike Pagano
2022-06-14 17:12 Mike Pagano
2022-06-06 11:04 Mike Pagano
2022-05-25 11:55 Mike Pagano
2022-05-18  9:49 Mike Pagano
2022-05-15 22:11 Mike Pagano
2022-05-12 11:30 Mike Pagano
2022-05-09 10:55 Mike Pagano
2022-04-27 12:21 Mike Pagano
2022-04-20 12:08 Mike Pagano
2022-04-15 13:10 Mike Pagano
2022-04-12 19:21 Mike Pagano
2022-03-28 10:58 Mike Pagano
2022-03-23 11:57 Mike Pagano
2022-03-19 13:21 Mike Pagano
2022-03-16 13:31 Mike Pagano
2022-03-11 10:55 Mike Pagano
2022-03-08 18:31 Mike Pagano
2022-03-02 13:07 Mike Pagano
2022-02-23 12:38 Mike Pagano
2022-02-16 12:46 Mike Pagano
2022-02-11 12:36 Mike Pagano
2022-02-08 17:55 Mike Pagano
2022-02-05 12:14 Mike Pagano
2022-02-01 17:24 Mike Pagano
2022-01-31 13:01 Mike Pagano
2022-01-29 17:44 Mike Pagano
2022-01-27 11:38 Mike Pagano
2022-01-20 10:00 Mike Pagano
2022-01-16 10:22 Mike Pagano
2022-01-11 14:34 Mike Pagano
2022-01-05 12:54 Mike Pagano
2021-12-29 13:07 Mike Pagano
2021-12-22 14:06 Mike Pagano
2021-12-17 11:55 Mike Pagano
2021-12-16 16:51 Mike Pagano
2021-12-14 14:19 Mike Pagano
2021-12-08 12:54 Mike Pagano
2021-12-01 12:50 Mike Pagano
2021-11-26 11:58 Mike Pagano
2021-11-21 20:44 Mike Pagano
2021-11-17 12:00 Mike Pagano
2021-11-12 14:14 Mike Pagano
2021-11-06 13:26 Mike Pagano
2021-11-04 11:23 Mike Pagano
2021-11-02 19:31 Mike Pagano
2021-10-27 15:51 Mike Pagano
2021-10-27 11:58 Mike Pagano
2021-10-20 13:24 Mike Pagano
2021-10-17 13:12 Mike Pagano
2021-10-13 14:55 Alice Ferrazzi
2021-10-09 21:32 Mike Pagano
2021-10-06 14:06 Mike Pagano
2021-09-30 10:49 Mike Pagano
2021-09-26 14:13 Mike Pagano
2021-09-22 11:39 Mike Pagano
2021-09-20 22:03 Mike Pagano
2021-09-16 11:19 Mike Pagano
2021-09-15 12:00 Mike Pagano
2021-09-12 14:38 Mike Pagano
2021-09-03 11:21 Mike Pagano
2021-09-03  9:39 Alice Ferrazzi
2021-08-26 14:36 Mike Pagano
2021-08-18 12:46 Mike Pagano
2021-08-15 20:06 Mike Pagano
2021-08-12 11:52 Mike Pagano
2021-08-08 13:38 Mike Pagano
2021-08-04 11:53 Mike Pagano
2021-08-03 12:23 Mike Pagano
2021-07-31 10:32 Alice Ferrazzi
2021-07-28 12:36 Mike Pagano
2021-07-25 17:27 Mike Pagano
2021-07-20 15:39 Alice Ferrazzi
2021-07-19 11:18 Mike Pagano
2021-07-14 16:22 Mike Pagano
2021-07-13 12:37 Mike Pagano
2021-07-11 14:44 Mike Pagano
2021-07-07 13:13 Mike Pagano
2021-06-30 14:24 Mike Pagano
2021-06-23 15:11 Mike Pagano
2021-06-18 11:38 Mike Pagano
2021-06-16 12:23 Mike Pagano
2021-06-10 11:59 Mike Pagano
2021-06-07 11:23 Mike Pagano
2021-06-03 10:28 Alice Ferrazzi
2021-05-28 12:03 Alice Ferrazzi
2021-05-26 12:06 Mike Pagano
2021-05-22 10:04 Mike Pagano
2021-05-19 12:23 Mike Pagano
2021-05-14 14:10 Alice Ferrazzi
2021-05-11 14:20 Mike Pagano
2021-05-07 11:44 Alice Ferrazzi
2021-05-07 11:37 Mike Pagano
2021-05-02 16:02 Mike Pagano
2021-05-02 16:00 Mike Pagano
2021-04-30 19:01 Mike Pagano
2021-04-28 11:52 Alice Ferrazzi
2021-04-21 11:42 Mike Pagano
2021-04-16 11:14 Alice Ferrazzi
2021-04-14 11:20 Alice Ferrazzi
2021-04-10 13:25 Mike Pagano
2021-04-07 13:27 Mike Pagano
2021-03-30 13:12 Alice Ferrazzi
2021-03-24 12:09 Mike Pagano
2021-03-22 15:55 Mike Pagano
2021-03-20 14:32 Mike Pagano
2021-03-17 18:43 Mike Pagano
2021-03-16 16:04 Mike Pagano
2021-03-11 14:08 Mike Pagano
2021-03-09 12:18 Mike Pagano
2021-03-07 15:16 Mike Pagano
2021-03-04 14:51 Mike Pagano
2021-03-04 12:06 Alice Ferrazzi
2021-03-01 23:49 Mike Pagano
2021-03-01 23:44 Mike Pagano
2021-02-27 14:16 Mike Pagano
2021-02-26 10:01 Alice Ferrazzi
2021-02-23 17:01 Mike Pagano
2021-02-23 14:28 Alice Ferrazzi
2021-02-17 11:39 Alice Ferrazzi
2021-02-13 14:46 Alice Ferrazzi
2021-02-10  9:53 Alice Ferrazzi
2021-02-07 15:24 Alice Ferrazzi
2021-02-03 23:48 Mike Pagano
2021-01-30 13:37 Alice Ferrazzi
2021-01-27 11:13 Mike Pagano
2021-01-23 17:50 Mike Pagano
2021-01-23 16:37 Mike Pagano
2021-01-19 20:32 Mike Pagano
2021-01-17 16:19 Mike Pagano
2021-01-12 20:05 Mike Pagano
2021-01-09 17:51 Mike Pagano
2021-01-08 16:08 Mike Pagano
2021-01-06 14:14 Mike Pagano
2020-12-30 12:53 Mike Pagano
2020-12-21 13:27 Mike Pagano
2020-12-16 23:14 Mike Pagano
2020-12-11 12:56 Mike Pagano
2020-12-08 12:07 Mike Pagano
2020-12-02 12:50 Mike Pagano
2020-11-26 14:27 Mike Pagano
2020-11-24 14:44 Mike Pagano
2020-11-22 19:31 Mike Pagano
2020-11-18 20:19 Mike Pagano
2020-11-18 20:10 Mike Pagano
2020-11-18 20:03 Mike Pagano
2020-11-13 12:16 Mike Pagano
2020-11-11 15:48 Mike Pagano
2020-11-10 13:57 Mike Pagano
2020-11-05 12:36 Mike Pagano
2020-11-01 20:31 Mike Pagano
2020-10-29 11:19 Mike Pagano
2020-10-17 10:18 Mike Pagano
2020-10-14 20:37 Mike Pagano
2020-10-07 12:48 Mike Pagano
2020-10-01 12:49 Mike Pagano
2020-09-26 21:59 Mike Pagano
2020-09-24 15:38 Mike Pagano
2020-09-24 15:38 Mike Pagano
2020-09-24 15:38 Mike Pagano
2020-09-23 12:10 Mike Pagano
2020-09-17 14:56 Mike Pagano
2020-09-12 18:08 Mike Pagano
2020-09-09 18:00 Mike Pagano
2020-09-08 22:26 Mike Pagano
2020-09-05 10:47 Mike Pagano
2020-09-03 11:38 Mike Pagano
2020-08-26 11:16 Mike Pagano
2020-08-21 13:25 Alice Ferrazzi
2020-08-19  9:28 Alice Ferrazzi
2020-08-12 23:30 Alice Ferrazzi
2020-08-07 12:16 Alice Ferrazzi
2020-08-05 14:45 Thomas Deutschmann
2020-08-01 19:45 Mike Pagano
2020-07-31 18:28 Mike Pagano
2020-07-31 18:04 Mike Pagano
2020-07-30 14:58 Mike Pagano
2020-07-29 12:40 Mike Pagano
2020-07-22 12:53 Mike Pagano
2020-07-16 11:19 Mike Pagano
2020-07-09 12:13 Mike Pagano
2020-07-01 12:23 Mike Pagano
2020-06-29 17:40 Mike Pagano
2020-06-24 16:49 Mike Pagano
2020-06-22 14:48 Mike Pagano
2020-06-17 16:40 Mike Pagano
2020-06-10 19:42 Mike Pagano
2020-06-03 11:43 Mike Pagano
2020-06-02 11:37 Mike Pagano
2020-05-27 16:31 Mike Pagano
2020-05-20 11:37 Mike Pagano
2020-05-20 11:33 Mike Pagano
2020-05-14 11:32 Mike Pagano
2020-05-13 12:18 Mike Pagano
2020-05-11 22:49 Mike Pagano
2020-05-09 22:12 Mike Pagano
2020-05-06 11:47 Mike Pagano
2020-05-02 19:24 Mike Pagano
2020-05-02 13:25 Mike Pagano
2020-04-29 17:56 Mike Pagano
2020-04-23 11:55 Mike Pagano
2020-04-21 11:19 Mike Pagano
2020-04-17 11:46 Mike Pagano
2020-04-15 15:52 Mike Pagano
2020-04-13 11:18 Mike Pagano
2020-04-08 12:42 Mike Pagano
2020-04-02 15:26 Mike Pagano
2020-04-01 12:03 Mike Pagano
2020-03-25 15:01 Mike Pagano
2020-03-21 18:58 Mike Pagano
2020-03-18 14:23 Mike Pagano
2020-03-12 14:04 Mike Pagano
2020-03-05 16:26 Mike Pagano
2020-02-28 16:41 Mike Pagano
2020-02-24 11:09 Mike Pagano
2020-02-19 23:48 Mike Pagano
2020-02-14 23:55 Mike Pagano
2020-02-11 15:35 Mike Pagano
2020-02-06 11:07 Mike Pagano
2020-02-01 10:53 Mike Pagano
2020-02-01 10:31 Mike Pagano
2020-01-29 16:18 Mike Pagano
2020-01-26 12:27 Mike Pagano
2020-01-23 11:09 Mike Pagano
2020-01-17 19:57 Mike Pagano
2020-01-14 22:33 Mike Pagano
2020-01-12 15:01 Mike Pagano
2020-01-09 11:17 Mike Pagano
2020-01-04 19:59 Mike Pagano
2019-12-31 17:48 Mike Pagano
2019-12-30 23:03 Mike Pagano
2019-12-21 15:01 Mike Pagano
2019-12-18 19:30 Mike Pagano
2019-12-17 21:57 Mike Pagano
2019-12-13 12:39 Mike Pagano
2019-12-05  1:04 Thomas Deutschmann
2019-11-29 21:21 Thomas Deutschmann

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1591566820.8b1238f2f0e800b2e46758123c46f6e8f4c91df7.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox