public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Sat, 20 Apr 2019 11:09:58 +0000 (UTC)	[thread overview]
Message-ID: <1555758574.466fe7cb06476e8a577628c1620bdaff6551d949.mpagano@gentoo> (raw)

commit:     466fe7cb06476e8a577628c1620bdaff6551d949
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Apr 20 11:09:34 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Apr 20 11:09:34 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=466fe7cb

Linux patch 4.19.36

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1035_linux-4.19.36.patch | 7962 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 7966 insertions(+)

diff --git a/0000_README b/0000_README
index fbfea55..043542a 100644
--- a/0000_README
+++ b/0000_README
@@ -183,6 +183,10 @@ Patch:  1034_linux-4.19.35.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.19.35
 
+Patch:  1035_linux-4.19.36.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.19.36
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1035_linux-4.19.36.patch b/1035_linux-4.19.36.patch
new file mode 100644
index 0000000..a57f768
--- /dev/null
+++ b/1035_linux-4.19.36.patch
@@ -0,0 +1,7962 @@
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 9e9b19ecf6f7..11a59e82d92e 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -7320,6 +7320,12 @@ L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+ S:	Supported
+ F:	sound/soc/intel/
+ 
++INTEL ATOMISP2 DUMMY / POWER-MANAGEMENT DRIVER
++M:	Hans de Goede <hdegoede@redhat.com>
++L:	platform-driver-x86@vger.kernel.org
++S:	Maintained
++F:	drivers/platform/x86/intel_atomisp2_pm.c
++
+ INTEL C600 SERIES SAS CONTROLLER DRIVER
+ M:	Intel SCU Linux support <intel-linux-scu@intel.com>
+ M:	Artur Paszkiewicz <artur.paszkiewicz@intel.com>
+diff --git a/Makefile b/Makefile
+index f4229975b48c..3fac08f6a11e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 35
++SUBLEVEL = 36
+ EXTRAVERSION =
+ NAME = "People's Front"
+ 
+diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
+index 2a1d2cbfee1a..651fa7978e51 100644
+--- a/arch/arc/configs/hsdk_defconfig
++++ b/arch/arc/configs/hsdk_defconfig
+@@ -8,6 +8,7 @@ CONFIG_NAMESPACES=y
+ # CONFIG_UTS_NS is not set
+ # CONFIG_PID_NS is not set
+ CONFIG_BLK_DEV_INITRD=y
++CONFIG_BLK_DEV_RAM=y
+ CONFIG_EMBEDDED=y
+ CONFIG_PERF_EVENTS=y
+ # CONFIG_VM_EVENT_COUNTERS is not set
+diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
+index 1f945d0f40da..208bf2c9e7b0 100644
+--- a/arch/arc/kernel/head.S
++++ b/arch/arc/kernel/head.S
+@@ -107,6 +107,7 @@ ENTRY(stext)
+ 	;    r2 = pointer to uboot provided cmdline or external DTB in mem
+ 	; These are handled later in handle_uboot_args()
+ 	st	r0, [@uboot_tag]
++	st      r1, [@uboot_magic]
+ 	st	r2, [@uboot_arg]
+ #endif
+ 
+diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
+index 3320ca2fe20f..a1218937abd6 100644
+--- a/arch/arc/kernel/setup.c
++++ b/arch/arc/kernel/setup.c
+@@ -35,6 +35,7 @@ unsigned int intr_to_DE_cnt;
+ 
+ /* Part of U-boot ABI: see head.S */
+ int __initdata uboot_tag;
++int __initdata uboot_magic;
+ char __initdata *uboot_arg;
+ 
+ const struct machine_desc *machine_desc;
+@@ -484,6 +485,8 @@ static inline bool uboot_arg_invalid(unsigned long addr)
+ #define UBOOT_TAG_NONE		0
+ #define UBOOT_TAG_CMDLINE	1
+ #define UBOOT_TAG_DTB		2
++/* We always pass 0 as magic from U-boot */
++#define UBOOT_MAGIC_VALUE	0
+ 
+ void __init handle_uboot_args(void)
+ {
+@@ -499,6 +502,11 @@ void __init handle_uboot_args(void)
+ 		goto ignore_uboot_args;
+ 	}
+ 
++	if (uboot_magic != UBOOT_MAGIC_VALUE) {
++		pr_warn(IGNORE_ARGS "non zero uboot magic\n");
++		goto ignore_uboot_args;
++	}
++
+ 	if (uboot_tag != UBOOT_TAG_NONE &&
+             uboot_arg_invalid((unsigned long)uboot_arg)) {
+ 		pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
+diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/crypto/sha256-armv4.pl
+index b9ec44060ed3..a03cf4dfb781 100644
+--- a/arch/arm/crypto/sha256-armv4.pl
++++ b/arch/arm/crypto/sha256-armv4.pl
+@@ -212,10 +212,11 @@ K256:
+ .global	sha256_block_data_order
+ .type	sha256_block_data_order,%function
+ sha256_block_data_order:
++.Lsha256_block_data_order:
+ #if __ARM_ARCH__<7
+ 	sub	r3,pc,#8		@ sha256_block_data_order
+ #else
+-	adr	r3,sha256_block_data_order
++	adr	r3,.Lsha256_block_data_order
+ #endif
+ #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+ 	ldr	r12,.LOPENSSL_armcap
+diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped
+index 3b58300d611c..054aae0edfce 100644
+--- a/arch/arm/crypto/sha256-core.S_shipped
++++ b/arch/arm/crypto/sha256-core.S_shipped
+@@ -93,10 +93,11 @@ K256:
+ .global	sha256_block_data_order
+ .type	sha256_block_data_order,%function
+ sha256_block_data_order:
++.Lsha256_block_data_order:
+ #if __ARM_ARCH__<7
+ 	sub	r3,pc,#8		@ sha256_block_data_order
+ #else
+-	adr	r3,sha256_block_data_order
++	adr	r3,.Lsha256_block_data_order
+ #endif
+ #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+ 	ldr	r12,.LOPENSSL_armcap
+diff --git a/arch/arm/crypto/sha512-armv4.pl b/arch/arm/crypto/sha512-armv4.pl
+index fb5d15048c0b..788c17b56ecc 100644
+--- a/arch/arm/crypto/sha512-armv4.pl
++++ b/arch/arm/crypto/sha512-armv4.pl
+@@ -274,10 +274,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
+ .global	sha512_block_data_order
+ .type	sha512_block_data_order,%function
+ sha512_block_data_order:
++.Lsha512_block_data_order:
+ #if __ARM_ARCH__<7
+ 	sub	r3,pc,#8		@ sha512_block_data_order
+ #else
+-	adr	r3,sha512_block_data_order
++	adr	r3,.Lsha512_block_data_order
+ #endif
+ #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+ 	ldr	r12,.LOPENSSL_armcap
+diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped
+index b1c334a49cda..710ea309769e 100644
+--- a/arch/arm/crypto/sha512-core.S_shipped
++++ b/arch/arm/crypto/sha512-core.S_shipped
+@@ -141,10 +141,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
+ .global	sha512_block_data_order
+ .type	sha512_block_data_order,%function
+ sha512_block_data_order:
++.Lsha512_block_data_order:
+ #if __ARM_ARCH__<7
+ 	sub	r3,pc,#8		@ sha512_block_data_order
+ #else
+-	adr	r3,sha512_block_data_order
++	adr	r3,.Lsha512_block_data_order
+ #endif
+ #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+ 	ldr	r12,.LOPENSSL_armcap
+diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
+index a50dc00d79a2..d0a05a3bdb96 100644
+--- a/arch/arm/kernel/patch.c
++++ b/arch/arm/kernel/patch.c
+@@ -16,7 +16,7 @@ struct patch {
+ 	unsigned int insn;
+ };
+ 
+-static DEFINE_SPINLOCK(patch_lock);
++static DEFINE_RAW_SPINLOCK(patch_lock);
+ 
+ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
+ 	__acquires(&patch_lock)
+@@ -33,7 +33,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
+ 		return addr;
+ 
+ 	if (flags)
+-		spin_lock_irqsave(&patch_lock, *flags);
++		raw_spin_lock_irqsave(&patch_lock, *flags);
+ 	else
+ 		__acquire(&patch_lock);
+ 
+@@ -48,7 +48,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
+ 	clear_fixmap(fixmap);
+ 
+ 	if (flags)
+-		spin_unlock_irqrestore(&patch_lock, *flags);
++		raw_spin_unlock_irqrestore(&patch_lock, *flags);
+ 	else
+ 		__release(&patch_lock);
+ }
+diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
+index b600e38364eb..377ff9cda667 100644
+--- a/arch/arm/plat-samsung/Kconfig
++++ b/arch/arm/plat-samsung/Kconfig
+@@ -256,7 +256,7 @@ config S3C_PM_DEBUG_LED_SMDK
+ 
+ config SAMSUNG_PM_CHECK
+ 	bool "S3C2410 PM Suspend Memory CRC"
+-	depends on PM
++	depends on PM && (PLAT_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210)
+ 	select CRC32
+ 	help
+ 	  Enable the PM code's memory area checksum over sleep. This option
+diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
+index 44d66c33d59d..23b88b923f06 100644
+--- a/arch/powerpc/kernel/rtasd.c
++++ b/arch/powerpc/kernel/rtasd.c
+@@ -274,27 +274,16 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
+ }
+ 
+ #ifdef CONFIG_PPC_PSERIES
+-static s32 prrn_update_scope;
+-
+-static void prrn_work_fn(struct work_struct *work)
++static void handle_prrn_event(s32 scope)
+ {
+ 	/*
+ 	 * For PRRN, we must pass the negative of the scope value in
+ 	 * the RTAS event.
+ 	 */
+-	pseries_devicetree_update(-prrn_update_scope);
++	pseries_devicetree_update(-scope);
+ 	numa_update_cpu_topology(false);
+ }
+ 
+-static DECLARE_WORK(prrn_work, prrn_work_fn);
+-
+-static void prrn_schedule_update(u32 scope)
+-{
+-	flush_work(&prrn_work);
+-	prrn_update_scope = scope;
+-	schedule_work(&prrn_work);
+-}
+-
+ static void handle_rtas_event(const struct rtas_error_log *log)
+ {
+ 	if (rtas_error_type(log) != RTAS_TYPE_PRRN || !prrn_is_enabled())
+@@ -303,7 +292,7 @@ static void handle_rtas_event(const struct rtas_error_log *log)
+ 	/* For PRRN Events the extended log length is used to denote
+ 	 * the scope for calling rtas update-nodes.
+ 	 */
+-	prrn_schedule_update(rtas_error_extended_log_length(log));
++	handle_prrn_event(rtas_error_extended_log_length(log));
+ }
+ 
+ #else
+diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
+index 87abd5145cc9..3fb855155286 100644
+--- a/arch/x86/hyperv/hv_init.c
++++ b/arch/x86/hyperv/hv_init.c
+@@ -101,9 +101,13 @@ static int hv_cpu_init(unsigned int cpu)
+ 	u64 msr_vp_index;
+ 	struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()];
+ 	void **input_arg;
++	struct page *pg;
+ 
+ 	input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
+-	*input_arg = page_address(alloc_page(GFP_KERNEL));
++	pg = alloc_page(GFP_KERNEL);
++	if (unlikely(!pg))
++		return -ENOMEM;
++	*input_arg = page_address(pg);
+ 
+ 	hv_get_vp_index(msr_vp_index);
+ 
+diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
+index 2c4d5ece7456..93426c5fc70f 100644
+--- a/arch/x86/kernel/aperture_64.c
++++ b/arch/x86/kernel/aperture_64.c
+@@ -14,6 +14,7 @@
+ #define pr_fmt(fmt) "AGP: " fmt
+ 
+ #include <linux/kernel.h>
++#include <linux/kcore.h>
+ #include <linux/types.h>
+ #include <linux/init.h>
+ #include <linux/memblock.h>
+@@ -57,7 +58,7 @@ int fallback_aper_force __initdata;
+ 
+ int fix_aperture __initdata = 1;
+ 
+-#ifdef CONFIG_PROC_VMCORE
++#if defined(CONFIG_PROC_VMCORE) || defined(CONFIG_PROC_KCORE)
+ /*
+  * If the first kernel maps the aperture over e820 RAM, the kdump kernel will
+  * use the same range because it will remain configured in the northbridge.
+@@ -66,20 +67,25 @@ int fix_aperture __initdata = 1;
+  */
+ static unsigned long aperture_pfn_start, aperture_page_count;
+ 
+-static int gart_oldmem_pfn_is_ram(unsigned long pfn)
++static int gart_mem_pfn_is_ram(unsigned long pfn)
+ {
+ 	return likely((pfn < aperture_pfn_start) ||
+ 		      (pfn >= aperture_pfn_start + aperture_page_count));
+ }
+ 
+-static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
++static void __init exclude_from_core(u64 aper_base, u32 aper_order)
+ {
+ 	aperture_pfn_start = aper_base >> PAGE_SHIFT;
+ 	aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT;
+-	WARN_ON(register_oldmem_pfn_is_ram(&gart_oldmem_pfn_is_ram));
++#ifdef CONFIG_PROC_VMCORE
++	WARN_ON(register_oldmem_pfn_is_ram(&gart_mem_pfn_is_ram));
++#endif
++#ifdef CONFIG_PROC_KCORE
++	WARN_ON(register_mem_pfn_is_ram(&gart_mem_pfn_is_ram));
++#endif
+ }
+ #else
+-static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
++static void exclude_from_core(u64 aper_base, u32 aper_order)
+ {
+ }
+ #endif
+@@ -469,7 +475,7 @@ out:
+ 			 * may have allocated the range over its e820 RAM
+ 			 * and fixed up the northbridge
+ 			 */
+-			exclude_from_vmcore(last_aper_base, last_aper_order);
++			exclude_from_core(last_aper_base, last_aper_order);
+ 
+ 			return 1;
+ 		}
+@@ -515,7 +521,7 @@ out:
+ 	 * overlap with the first kernel's memory. We can't access the
+ 	 * range through vmcore even though it should be part of the dump.
+ 	 */
+-	exclude_from_vmcore(aper_alloc, aper_order);
++	exclude_from_core(aper_alloc, aper_order);
+ 
+ 	/* Fix up the north bridges */
+ 	for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
+diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
+index 8949b7ae6d92..fa61c870ada9 100644
+--- a/arch/x86/kernel/cpu/cyrix.c
++++ b/arch/x86/kernel/cpu/cyrix.c
+@@ -124,7 +124,7 @@ static void set_cx86_reorder(void)
+ 	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
+ 
+ 	/* Load/Store Serialize to mem access disable (=reorder it) */
+-	setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80);
++	setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
+ 	/* set load/store serialize from 1GB to 4GB */
+ 	ccr3 |= 0xe0;
+ 	setCx86(CX86_CCR3, ccr3);
+@@ -135,11 +135,11 @@ static void set_cx86_memwb(void)
+ 	pr_info("Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
+ 
+ 	/* CCR2 bit 2: unlock NW bit */
+-	setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04);
++	setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
+ 	/* set 'Not Write-through' */
+ 	write_cr0(read_cr0() | X86_CR0_NW);
+ 	/* CCR2 bit 2: lock NW bit and set WT1 */
+-	setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14);
++	setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14);
+ }
+ 
+ /*
+@@ -153,14 +153,14 @@ static void geode_configure(void)
+ 	local_irq_save(flags);
+ 
+ 	/* Suspend on halt power saving and enable #SUSP pin */
+-	setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88);
++	setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
+ 
+ 	ccr3 = getCx86(CX86_CCR3);
+ 	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);	/* enable MAPEN */
+ 
+ 
+ 	/* FPU fast, DTE cache, Mem bypass */
+-	setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38);
++	setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38);
+ 	setCx86(CX86_CCR3, ccr3);			/* disable MAPEN */
+ 
+ 	set_cx86_memwb();
+@@ -296,7 +296,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
+ 		/* GXm supports extended cpuid levels 'ala' AMD */
+ 		if (c->cpuid_level == 2) {
+ 			/* Enable cxMMX extensions (GX1 Datasheet 54) */
+-			setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1);
++			setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1);
+ 
+ 			/*
+ 			 * GXm : 0x30 ... 0x5f GXm  datasheet 51
+@@ -319,7 +319,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
+ 		if (dir1 > 7) {
+ 			dir0_msn++;  /* M II */
+ 			/* Enable MMX extensions (App note 108) */
+-			setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1);
++			setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
+ 		} else {
+ 			/* A 6x86MX - it has the bug. */
+ 			set_cpu_bug(c, X86_BUG_COMA);
+diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
+index b0acb22e5a46..1e3f1f140ffb 100644
+--- a/arch/x86/kernel/hpet.c
++++ b/arch/x86/kernel/hpet.c
+@@ -909,6 +909,8 @@ int __init hpet_enable(void)
+ 		return 0;
+ 
+ 	hpet_set_mapping();
++	if (!hpet_virt_address)
++		return 0;
+ 
+ 	/*
+ 	 * Read the period and check for a sane value:
+diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
+index 34a5c1715148..2882fe1d2a78 100644
+--- a/arch/x86/kernel/hw_breakpoint.c
++++ b/arch/x86/kernel/hw_breakpoint.c
+@@ -357,6 +357,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
+ #endif
+ 	default:
+ 		WARN_ON_ONCE(1);
++		return -EINVAL;
+ 	}
+ 
+ 	/*
+diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
+index f1c5eb99d445..ddb1ca6923b1 100644
+--- a/arch/x86/kernel/mpparse.c
++++ b/arch/x86/kernel/mpparse.c
+@@ -599,8 +599,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
+ 			mpf_base = base;
+ 			mpf_found = true;
+ 
+-			pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n",
+-				base, base + sizeof(*mpf) - 1, mpf);
++			pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
++				base, base + sizeof(*mpf) - 1);
+ 
+ 			memblock_reserve(base, sizeof(*mpf));
+ 			if (mpf->physptr)
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 6b6bcafd1d2c..3380a312d186 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -13181,24 +13181,6 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+ 	kvm_clear_interrupt_queue(vcpu);
+ }
+ 
+-static void load_vmcs12_mmu_host_state(struct kvm_vcpu *vcpu,
+-			struct vmcs12 *vmcs12)
+-{
+-	u32 entry_failure_code;
+-
+-	nested_ept_uninit_mmu_context(vcpu);
+-
+-	/*
+-	 * Only PDPTE load can fail as the value of cr3 was checked on entry and
+-	 * couldn't have changed.
+-	 */
+-	if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
+-		nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
+-
+-	if (!enable_ept)
+-		vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
+-}
+-
+ /*
+  * A part of what we need to when the nested L2 guest exits and we want to
+  * run its L1 parent, is to reset L1's guest state to the host state specified
+@@ -13212,6 +13194,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
+ 				   struct vmcs12 *vmcs12)
+ {
+ 	struct kvm_segment seg;
++	u32 entry_failure_code;
+ 
+ 	if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
+ 		vcpu->arch.efer = vmcs12->host_ia32_efer;
+@@ -13238,7 +13221,17 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
+ 	vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
+ 	vmx_set_cr4(vcpu, vmcs12->host_cr4);
+ 
+-	load_vmcs12_mmu_host_state(vcpu, vmcs12);
++	nested_ept_uninit_mmu_context(vcpu);
++
++	/*
++	 * Only PDPTE load can fail as the value of cr3 was checked on entry and
++	 * couldn't have changed.
++	 */
++	if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
++		nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
++
++	if (!enable_ept)
++		vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
+ 
+ 	/*
+ 	 * If vmcs01 don't use VPID, CPU flushes TLB on every
+@@ -13334,6 +13327,140 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
+ 		nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
+ }
+ 
++static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
++{
++	struct shared_msr_entry *efer_msr;
++	unsigned int i;
++
++	if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
++		return vmcs_read64(GUEST_IA32_EFER);
++
++	if (cpu_has_load_ia32_efer)
++		return host_efer;
++
++	for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
++		if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
++			return vmx->msr_autoload.guest.val[i].value;
++	}
++
++	efer_msr = find_msr_entry(vmx, MSR_EFER);
++	if (efer_msr)
++		return efer_msr->data;
++
++	return host_efer;
++}
++
++static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
++{
++	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
++	struct vcpu_vmx *vmx = to_vmx(vcpu);
++	struct vmx_msr_entry g, h;
++	struct msr_data msr;
++	gpa_t gpa;
++	u32 i, j;
++
++	vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
++
++	if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
++		/*
++		 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
++		 * as vmcs01.GUEST_DR7 contains a userspace defined value
++		 * and vcpu->arch.dr7 is not squirreled away before the
++		 * nested VMENTER (not worth adding a variable in nested_vmx).
++		 */
++		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
++			kvm_set_dr(vcpu, 7, DR7_FIXED_1);
++		else
++			WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
++	}
++
++	/*
++	 * Note that calling vmx_set_{efer,cr0,cr4} is important as they
++	 * handle a variety of side effects to KVM's software model.
++	 */
++	vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
++
++	vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
++	vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
++
++	vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
++	vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
++
++	nested_ept_uninit_mmu_context(vcpu);
++	vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
++	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
++
++	/*
++	 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
++	 * from vmcs01 (if necessary).  The PDPTRs are not loaded on
++	 * VMFail, like everything else we just need to ensure our
++	 * software model is up-to-date.
++	 */
++	ept_save_pdptrs(vcpu);
++
++	kvm_mmu_reset_context(vcpu);
++
++	if (cpu_has_vmx_msr_bitmap())
++		vmx_update_msr_bitmap(vcpu);
++
++	/*
++	 * This nasty bit of open coding is a compromise between blindly
++	 * loading L1's MSRs using the exit load lists (incorrect emulation
++	 * of VMFail), leaving the nested VM's MSRs in the software model
++	 * (incorrect behavior) and snapshotting the modified MSRs (too
++	 * expensive since the lists are unbound by hardware).  For each
++	 * MSR that was (prematurely) loaded from the nested VMEntry load
++	 * list, reload it from the exit load list if it exists and differs
++	 * from the guest value.  The intent is to stuff host state as
++	 * silently as possible, not to fully process the exit load list.
++	 */
++	msr.host_initiated = false;
++	for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
++		gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
++		if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
++			pr_debug_ratelimited(
++				"%s read MSR index failed (%u, 0x%08llx)\n",
++				__func__, i, gpa);
++			goto vmabort;
++		}
++
++		for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
++			gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
++			if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
++				pr_debug_ratelimited(
++					"%s read MSR failed (%u, 0x%08llx)\n",
++					__func__, j, gpa);
++				goto vmabort;
++			}
++			if (h.index != g.index)
++				continue;
++			if (h.value == g.value)
++				break;
++
++			if (nested_vmx_load_msr_check(vcpu, &h)) {
++				pr_debug_ratelimited(
++					"%s check failed (%u, 0x%x, 0x%x)\n",
++					__func__, j, h.index, h.reserved);
++				goto vmabort;
++			}
++
++			msr.index = h.index;
++			msr.data = h.value;
++			if (kvm_set_msr(vcpu, &msr)) {
++				pr_debug_ratelimited(
++					"%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
++					__func__, j, h.index, h.value);
++				goto vmabort;
++			}
++		}
++	}
++
++	return;
++
++vmabort:
++	nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
++}
++
+ /*
+  * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
+  * and modify vmcs12 to make it see what it would expect to see there if
+@@ -13478,7 +13605,13 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
+ 	 */
+ 	nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+ 
+-	load_vmcs12_mmu_host_state(vcpu, vmcs12);
++	/*
++	 * Restore L1's host state to KVM's software model.  We're here
++	 * because a consistency check was caught by hardware, which
++	 * means some amount of guest state has been propagated to KVM's
++	 * model and needs to be unwound to the host's state.
++	 */
++	nested_vmx_restore_host_state(vcpu);
+ 
+ 	/*
+ 	 * The emulated instruction was already skipped in
+diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
+index b154e057ca67..6b8396ccb5c4 100644
+--- a/block/blk-iolatency.c
++++ b/block/blk-iolatency.c
+@@ -75,6 +75,7 @@
+ #include <linux/blk-mq.h>
+ #include "blk-rq-qos.h"
+ #include "blk-stat.h"
++#include "blk.h"
+ 
+ #define DEFAULT_SCALE_COOKIE 1000000U
+ 
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index d4e5610e09c5..49e16f009095 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -194,6 +194,7 @@ static struct workqueue_struct *ec_query_wq;
+ static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
+ static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
+ static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */
++static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
+ 
+ /* --------------------------------------------------------------------------
+  *                           Logging/Debugging
+@@ -499,6 +500,26 @@ static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
+ 		ec_log_drv("event blocked");
+ }
+ 
++/*
++ * Process _Q events that might have accumulated in the EC.
++ * Run with locked ec mutex.
++ */
++static void acpi_ec_clear(struct acpi_ec *ec)
++{
++	int i, status;
++	u8 value = 0;
++
++	for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
++		status = acpi_ec_query(ec, &value);
++		if (status || !value)
++			break;
++	}
++	if (unlikely(i == ACPI_EC_CLEAR_MAX))
++		pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
++	else
++		pr_info("%d stale EC events cleared\n", i);
++}
++
+ static void acpi_ec_enable_event(struct acpi_ec *ec)
+ {
+ 	unsigned long flags;
+@@ -507,6 +528,10 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)
+ 	if (acpi_ec_started(ec))
+ 		__acpi_ec_enable_event(ec);
+ 	spin_unlock_irqrestore(&ec->lock, flags);
++
++	/* Drain additional events if hardware requires that */
++	if (EC_FLAGS_CLEAR_ON_RESUME)
++		acpi_ec_clear(ec);
+ }
+ 
+ #ifdef CONFIG_PM_SLEEP
+@@ -1034,6 +1059,18 @@ void acpi_ec_unblock_transactions(void)
+ 		acpi_ec_start(first_ec, true);
+ }
+ 
++void acpi_ec_mark_gpe_for_wake(void)
++{
++	if (first_ec && !ec_no_wakeup)
++		acpi_mark_gpe_for_wake(NULL, first_ec->gpe);
++}
++
++void acpi_ec_set_gpe_wake_mask(u8 action)
++{
++	if (first_ec && !ec_no_wakeup)
++		acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
++}
++
+ void acpi_ec_dispatch_gpe(void)
+ {
+ 	if (first_ec)
+@@ -1808,6 +1845,31 @@ static int ec_flag_query_handshake(const struct dmi_system_id *id)
+ }
+ #endif
+ 
++/*
++ * On some hardware it is necessary to clear events accumulated by the EC during
++ * sleep. These ECs stop reporting GPEs until they are manually polled, if too
++ * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
++ *
++ * https://bugzilla.kernel.org/show_bug.cgi?id=44161
++ *
++ * Ideally, the EC should also be instructed NOT to accumulate events during
++ * sleep (which Windows seems to do somehow), but the interface to control this
++ * behaviour is not known at this time.
++ *
++ * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
++ * however it is very likely that other Samsung models are affected.
++ *
++ * On systems which don't accumulate _Q events during sleep, this extra check
++ * should be harmless.
++ */
++static int ec_clear_on_resume(const struct dmi_system_id *id)
++{
++	pr_debug("Detected system needing EC poll on resume.\n");
++	EC_FLAGS_CLEAR_ON_RESUME = 1;
++	ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
++	return 0;
++}
++
+ /*
+  * Some ECDTs contain wrong register addresses.
+  * MSI MS-171F
+@@ -1857,6 +1919,9 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
+ 	ec_honor_ecdt_gpe, "ASUS X580VD", {
+ 	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ 	DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL},
++	{
++	ec_clear_on_resume, "Samsung hardware", {
++	DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
+ 	{},
+ };
+ 
+diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
+index 530a3f675490..f59d0b9e2683 100644
+--- a/drivers/acpi/internal.h
++++ b/drivers/acpi/internal.h
+@@ -188,6 +188,8 @@ int acpi_ec_ecdt_probe(void);
+ int acpi_ec_dsdt_probe(void);
+ void acpi_ec_block_transactions(void);
+ void acpi_ec_unblock_transactions(void);
++void acpi_ec_mark_gpe_for_wake(void);
++void acpi_ec_set_gpe_wake_mask(u8 action);
+ void acpi_ec_dispatch_gpe(void);
+ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
+ 			      acpi_handle handle, acpi_ec_query_func func,
+diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
+index 295b59271189..96c5e27967f4 100644
+--- a/drivers/acpi/sbs.c
++++ b/drivers/acpi/sbs.c
+@@ -441,9 +441,13 @@ static int acpi_ac_get_present(struct acpi_sbs *sbs)
+ 
+ 	/*
+ 	 * The spec requires that bit 4 always be 1. If it's not set, assume
+-	 * that the implementation doesn't support an SBS charger
++	 * that the implementation doesn't support an SBS charger.
++	 *
++	 * And on some MacBooks a status of 0xffff is always returned, no
++	 * matter whether the charger is plugged in or not, which is also
++	 * wrong, so ignore the SBS charger for those too.
+ 	 */
+-	if (!((status >> 4) & 0x1))
++	if (!((status >> 4) & 0x1) || status == 0xffff)
+ 		return -ENODEV;
+ 
+ 	sbs->charger_present = (status >> 15) & 0x1;
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 754d59f95500..74c489047f57 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -940,6 +940,8 @@ static int lps0_device_attach(struct acpi_device *adev,
+ 
+ 		acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
+ 				  bitmask);
++
++		acpi_ec_mark_gpe_for_wake();
+ 	} else {
+ 		acpi_handle_debug(adev->handle,
+ 				  "_DSM function 0 evaluation failed\n");
+@@ -968,11 +970,16 @@ static int acpi_s2idle_prepare(void)
+ 	if (lps0_device_handle) {
+ 		acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
+ 		acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
++
++		acpi_ec_set_gpe_wake_mask(ACPI_GPE_ENABLE);
+ 	}
+ 
+ 	if (acpi_sci_irq_valid())
+ 		enable_irq_wake(acpi_sci_irq);
+ 
++	/* Change the configuration of GPEs to avoid spurious wakeup. */
++	acpi_enable_all_wakeup_gpes();
++	acpi_os_wait_events_complete();
+ 	return 0;
+ }
+ 
+@@ -1017,10 +1024,14 @@ static void acpi_s2idle_sync(void)
+ 
+ static void acpi_s2idle_restore(void)
+ {
++	acpi_enable_all_runtime_gpes();
++
+ 	if (acpi_sci_irq_valid())
+ 		disable_irq_wake(acpi_sci_irq);
+ 
+ 	if (lps0_device_handle) {
++		acpi_ec_set_gpe_wake_mask(ACPI_GPE_DISABLE);
++
+ 		acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
+ 		acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON);
+ 	}
+diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
+index 78db97687f26..c4b06cc075f9 100644
+--- a/drivers/acpi/utils.c
++++ b/drivers/acpi/utils.c
+@@ -800,6 +800,7 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
+ 	match.hrv = hrv;
+ 
+ 	dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
++	put_device(dev);
+ 	return !!dev;
+ }
+ EXPORT_SYMBOL(acpi_dev_present);
+diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
+index f1a42f0f1ded..df3da49ff9e8 100644
+--- a/drivers/auxdisplay/hd44780.c
++++ b/drivers/auxdisplay/hd44780.c
+@@ -299,6 +299,8 @@ static int hd44780_remove(struct platform_device *pdev)
+ 	struct charlcd *lcd = platform_get_drvdata(pdev);
+ 
+ 	charlcd_unregister(lcd);
++
++	kfree(lcd);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/base/node.c b/drivers/base/node.c
+index 1ac4c36e13bb..c3968e2d0a98 100644
+--- a/drivers/base/node.c
++++ b/drivers/base/node.c
+@@ -197,11 +197,16 @@ static ssize_t node_read_vmstat(struct device *dev,
+ 			     sum_zone_numa_state(nid, i));
+ #endif
+ 
+-	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
++	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
++		/* Skip hidden vmstat items. */
++		if (*vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
++				 NR_VM_NUMA_STAT_ITEMS] == '\0')
++			continue;
+ 		n += sprintf(buf+n, "%s %lu\n",
+ 			     vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
+ 			     NR_VM_NUMA_STAT_ITEMS],
+ 			     node_page_state(pgdat, i));
++	}
+ 
+ 	return n;
+ }
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 4b5714199490..bf5be0bfaf77 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -1388,12 +1388,12 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
+ 	if (IS_ERR(gpd_data))
+ 		return PTR_ERR(gpd_data);
+ 
+-	genpd_lock(genpd);
+-
+ 	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
+ 	if (ret)
+ 		goto out;
+ 
++	genpd_lock(genpd);
++
+ 	dev_pm_domain_set(dev, &genpd->domain);
+ 
+ 	genpd->device_count++;
+@@ -1401,9 +1401,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
+ 
+ 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
+ 
+- out:
+ 	genpd_unlock(genpd);
+-
++ out:
+ 	if (ret)
+ 		genpd_free_dev_data(dev, gpd_data);
+ 	else
+@@ -1452,15 +1451,15 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
+ 	genpd->device_count--;
+ 	genpd->max_off_time_changed = true;
+ 
+-	if (genpd->detach_dev)
+-		genpd->detach_dev(genpd, dev);
+-
+ 	dev_pm_domain_set(dev, NULL);
+ 
+ 	list_del_init(&pdd->list_node);
+ 
+ 	genpd_unlock(genpd);
+ 
++	if (genpd->detach_dev)
++		genpd->detach_dev(genpd, dev);
++
+ 	genpd_free_dev_data(dev, gpd_data);
+ 
+ 	return 0;
+diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
+index 7f07a5085e9b..fdcdc751d03b 100644
+--- a/drivers/crypto/axis/artpec6_crypto.c
++++ b/drivers/crypto/axis/artpec6_crypto.c
+@@ -284,6 +284,7 @@ enum artpec6_crypto_hash_flags {
+ 
+ struct artpec6_crypto_req_common {
+ 	struct list_head list;
++	struct list_head complete_in_progress;
+ 	struct artpec6_crypto_dma_descriptors *dma;
+ 	struct crypto_async_request *req;
+ 	void (*complete)(struct crypto_async_request *req);
+@@ -2046,7 +2047,8 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq)
+ 	return artpec6_crypto_dma_map_descs(common);
+ }
+ 
+-static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
++static void artpec6_crypto_process_queue(struct artpec6_crypto *ac,
++	    struct list_head *completions)
+ {
+ 	struct artpec6_crypto_req_common *req;
+ 
+@@ -2057,7 +2059,7 @@ static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
+ 		list_move_tail(&req->list, &ac->pending);
+ 		artpec6_crypto_start_dma(req);
+ 
+-		req->req->complete(req->req, -EINPROGRESS);
++		list_add_tail(&req->complete_in_progress, completions);
+ 	}
+ 
+ 	/*
+@@ -2087,6 +2089,11 @@ static void artpec6_crypto_task(unsigned long data)
+ 	struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
+ 	struct artpec6_crypto_req_common *req;
+ 	struct artpec6_crypto_req_common *n;
++	struct list_head complete_done;
++	struct list_head complete_in_progress;
++
++	INIT_LIST_HEAD(&complete_done);
++	INIT_LIST_HEAD(&complete_in_progress);
+ 
+ 	if (list_empty(&ac->pending)) {
+ 		pr_debug("Spurious IRQ\n");
+@@ -2120,19 +2127,30 @@ static void artpec6_crypto_task(unsigned long data)
+ 
+ 		pr_debug("Completing request %p\n", req);
+ 
+-		list_del(&req->list);
++		list_move_tail(&req->list, &complete_done);
+ 
+ 		artpec6_crypto_dma_unmap_all(req);
+ 		artpec6_crypto_copy_bounce_buffers(req);
+ 
+ 		ac->pending_count--;
+ 		artpec6_crypto_common_destroy(req);
+-		req->complete(req->req);
+ 	}
+ 
+-	artpec6_crypto_process_queue(ac);
++	artpec6_crypto_process_queue(ac, &complete_in_progress);
+ 
+ 	spin_unlock_bh(&ac->queue_lock);
++
++	/* Perform the completion callbacks without holding the queue lock
++	 * to allow new request submissions from the callbacks.
++	 */
++	list_for_each_entry_safe(req, n, &complete_done, list) {
++		req->complete(req->req);
++	}
++
++	list_for_each_entry_safe(req, n, &complete_in_progress,
++				 complete_in_progress) {
++		req->req->complete(req->req, -EINPROGRESS);
++	}
+ }
+ 
+ static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
+diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
+index eb27fa76e8fc..bcc6be4a5cb2 100644
+--- a/drivers/gpio/gpio-pxa.c
++++ b/drivers/gpio/gpio-pxa.c
+@@ -777,6 +777,9 @@ static int pxa_gpio_suspend(void)
+ 	struct pxa_gpio_bank *c;
+ 	int gpio;
+ 
++	if (!pchip)
++		return 0;
++
+ 	for_each_gpio_bank(gpio, c, pchip) {
+ 		c->saved_gplr = readl_relaxed(c->regbase + GPLR_OFFSET);
+ 		c->saved_gpdr = readl_relaxed(c->regbase + GPDR_OFFSET);
+@@ -795,6 +798,9 @@ static void pxa_gpio_resume(void)
+ 	struct pxa_gpio_bank *c;
+ 	int gpio;
+ 
++	if (!pchip)
++		return;
++
+ 	for_each_gpio_bank(gpio, c, pchip) {
+ 		/* restore level with set/clear */
+ 		writel_relaxed(c->saved_gplr, c->regbase + GPSR_OFFSET);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+index 47243165a082..ae90a99909ef 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+@@ -323,57 +323,7 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
+ 		struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+ 		struct queue_properties *q)
+ {
+-	uint64_t addr;
+-	struct cik_mqd *m;
+-	int retval;
+-
+-	retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
+-					mqd_mem_obj);
+-
+-	if (retval != 0)
+-		return -ENOMEM;
+-
+-	m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
+-	addr = (*mqd_mem_obj)->gpu_addr;
+-
+-	memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
+-
+-	m->header = 0xC0310800;
+-	m->compute_pipelinestat_enable = 1;
+-	m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
+-	m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
+-	m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
+-	m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+-
+-	m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE |
+-					PRELOAD_REQ;
+-	m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
+-				QUANTUM_DURATION(10);
+-
+-	m->cp_mqd_control             = MQD_CONTROL_PRIV_STATE_EN;
+-	m->cp_mqd_base_addr_lo        = lower_32_bits(addr);
+-	m->cp_mqd_base_addr_hi        = upper_32_bits(addr);
+-
+-	m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE;
+-
+-	/*
+-	 * Pipe Priority
+-	 * Identifies the pipe relative priority when this queue is connected
+-	 * to the pipeline. The pipe priority is against the GFX pipe and HP3D.
+-	 * In KFD we are using a fixed pipe priority set to CS_MEDIUM.
+-	 * 0 = CS_LOW (typically below GFX)
+-	 * 1 = CS_MEDIUM (typically between HP3D and GFX
+-	 * 2 = CS_HIGH (typically above HP3D)
+-	 */
+-	m->cp_hqd_pipe_priority = 1;
+-	m->cp_hqd_queue_priority = 15;
+-
+-	*mqd = m;
+-	if (gart_addr)
+-		*gart_addr = addr;
+-	retval = mm->update_mqd(mm, m, q);
+-
+-	return retval;
++	return init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
+ }
+ 
+ static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
+diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
+index ce9db7aab225..a29f87e98d9d 100644
+--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
++++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
+@@ -146,7 +146,7 @@ struct cirrus_device {
+ 
+ struct cirrus_fbdev {
+ 	struct drm_fb_helper helper;
+-	struct drm_framebuffer gfb;
++	struct drm_framebuffer *gfb;
+ 	void *sysram;
+ 	int size;
+ 	int x1, y1, x2, y2; /* dirty rect */
+diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+index b643ac92801c..82cc82e0bd80 100644
+--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
++++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+@@ -22,14 +22,14 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
+ 	struct drm_gem_object *obj;
+ 	struct cirrus_bo *bo;
+ 	int src_offset, dst_offset;
+-	int bpp = afbdev->gfb.format->cpp[0];
++	int bpp = afbdev->gfb->format->cpp[0];
+ 	int ret = -EBUSY;
+ 	bool unmap = false;
+ 	bool store_for_later = false;
+ 	int x2, y2;
+ 	unsigned long flags;
+ 
+-	obj = afbdev->gfb.obj[0];
++	obj = afbdev->gfb->obj[0];
+ 	bo = gem_to_cirrus_bo(obj);
+ 
+ 	/*
+@@ -82,7 +82,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
+ 	}
+ 	for (i = y; i < y + height; i++) {
+ 		/* assume equal stride for now */
+-		src_offset = dst_offset = i * afbdev->gfb.pitches[0] + (x * bpp);
++		src_offset = dst_offset = i * afbdev->gfb->pitches[0] + (x * bpp);
+ 		memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
+ 
+ 	}
+@@ -192,23 +192,26 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
+ 		return -ENOMEM;
+ 
+ 	info = drm_fb_helper_alloc_fbi(helper);
+-	if (IS_ERR(info))
+-		return PTR_ERR(info);
++	if (IS_ERR(info)) {
++		ret = PTR_ERR(info);
++		goto err_vfree;
++	}
+ 
+ 	info->par = gfbdev;
+ 
+-	ret = cirrus_framebuffer_init(cdev->dev, &gfbdev->gfb, &mode_cmd, gobj);
++	fb = kzalloc(sizeof(*fb), GFP_KERNEL);
++	if (!fb) {
++		ret = -ENOMEM;
++		goto err_drm_gem_object_put_unlocked;
++	}
++
++	ret = cirrus_framebuffer_init(cdev->dev, fb, &mode_cmd, gobj);
+ 	if (ret)
+-		return ret;
++		goto err_kfree;
+ 
+ 	gfbdev->sysram = sysram;
+ 	gfbdev->size = size;
+-
+-	fb = &gfbdev->gfb;
+-	if (!fb) {
+-		DRM_INFO("fb is NULL\n");
+-		return -EINVAL;
+-	}
++	gfbdev->gfb = fb;
+ 
+ 	/* setup helper */
+ 	gfbdev->helper.fb = fb;
+@@ -241,24 +244,27 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
+ 	DRM_INFO("   pitch is %d\n", fb->pitches[0]);
+ 
+ 	return 0;
++
++err_kfree:
++	kfree(fb);
++err_drm_gem_object_put_unlocked:
++	drm_gem_object_put_unlocked(gobj);
++err_vfree:
++	vfree(sysram);
++	return ret;
+ }
+ 
+ static int cirrus_fbdev_destroy(struct drm_device *dev,
+ 				struct cirrus_fbdev *gfbdev)
+ {
+-	struct drm_framebuffer *gfb = &gfbdev->gfb;
++	struct drm_framebuffer *gfb = gfbdev->gfb;
+ 
+ 	drm_fb_helper_unregister_fbi(&gfbdev->helper);
+ 
+-	if (gfb->obj[0]) {
+-		drm_gem_object_put_unlocked(gfb->obj[0]);
+-		gfb->obj[0] = NULL;
+-	}
+-
+ 	vfree(gfbdev->sysram);
+ 	drm_fb_helper_fini(&gfbdev->helper);
+-	drm_framebuffer_unregister_private(gfb);
+-	drm_framebuffer_cleanup(gfb);
++	if (gfb)
++		drm_framebuffer_put(gfb);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
+index 336bfda40125..90a4e641d3fb 100644
+--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
++++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
+@@ -127,7 +127,7 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
+ 		return ret;
+ 	}
+ 
+-	if (&cdev->mode_info.gfbdev->gfb == crtc->primary->fb) {
++	if (cdev->mode_info.gfbdev->gfb == crtc->primary->fb) {
+ 		/* if pushing console in kmap it */
+ 		ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ 		if (ret)
+diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
+index ffbf4a950f69..522d6c46d7b1 100644
+--- a/drivers/gpu/drm/exynos/exynos_mixer.c
++++ b/drivers/gpu/drm/exynos/exynos_mixer.c
+@@ -20,6 +20,7 @@
+ #include "regs-vp.h"
+ 
+ #include <linux/kernel.h>
++#include <linux/ktime.h>
+ #include <linux/spinlock.h>
+ #include <linux/wait.h>
+ #include <linux/i2c.h>
+@@ -337,15 +338,62 @@ static void mixer_cfg_vp_blend(struct mixer_context *ctx)
+ 	mixer_reg_write(ctx, MXR_VIDEO_CFG, val);
+ }
+ 
+-static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
++static bool mixer_is_synced(struct mixer_context *ctx)
+ {
+-	/* block update on vsync */
+-	mixer_reg_writemask(ctx, MXR_STATUS, enable ?
+-			MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
++	u32 base, shadow;
+ 
++	if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
++	    ctx->mxr_ver == MXR_VER_128_0_0_184)
++		return !(mixer_reg_read(ctx, MXR_CFG) &
++			 MXR_CFG_LAYER_UPDATE_COUNT_MASK);
++
++	if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
++	    vp_reg_read(ctx, VP_SHADOW_UPDATE))
++		return false;
++
++	base = mixer_reg_read(ctx, MXR_CFG);
++	shadow = mixer_reg_read(ctx, MXR_CFG_S);
++	if (base != shadow)
++		return false;
++
++	base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
++	shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
++	if (base != shadow)
++		return false;
++
++	base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
++	shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
++	if (base != shadow)
++		return false;
++
++	return true;
++}
++
++static int mixer_wait_for_sync(struct mixer_context *ctx)
++{
++	ktime_t timeout = ktime_add_us(ktime_get(), 100000);
++
++	while (!mixer_is_synced(ctx)) {
++		usleep_range(1000, 2000);
++		if (ktime_compare(ktime_get(), timeout) > 0)
++			return -ETIMEDOUT;
++	}
++	return 0;
++}
++
++static void mixer_disable_sync(struct mixer_context *ctx)
++{
++	mixer_reg_writemask(ctx, MXR_STATUS, 0, MXR_STATUS_SYNC_ENABLE);
++}
++
++static void mixer_enable_sync(struct mixer_context *ctx)
++{
++	if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
++	    ctx->mxr_ver == MXR_VER_128_0_0_184)
++		mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
++	mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SYNC_ENABLE);
+ 	if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
+-		vp_reg_write(ctx, VP_SHADOW_UPDATE, enable ?
+-			VP_SHADOW_UPDATE_ENABLE : 0);
++		vp_reg_write(ctx, VP_SHADOW_UPDATE, VP_SHADOW_UPDATE_ENABLE);
+ }
+ 
+ static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height)
+@@ -482,7 +530,6 @@ static void vp_video_buffer(struct mixer_context *ctx,
+ 
+ 	spin_lock_irqsave(&ctx->reg_slock, flags);
+ 
+-	vp_reg_write(ctx, VP_SHADOW_UPDATE, 1);
+ 	/* interlace or progressive scan mode */
+ 	val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0);
+ 	vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP);
+@@ -537,11 +584,6 @@ static void vp_video_buffer(struct mixer_context *ctx,
+ 	vp_regs_dump(ctx);
+ }
+ 
+-static void mixer_layer_update(struct mixer_context *ctx)
+-{
+-	mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
+-}
+-
+ static void mixer_graph_buffer(struct mixer_context *ctx,
+ 			       struct exynos_drm_plane *plane)
+ {
+@@ -618,11 +660,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
+ 	mixer_cfg_layer(ctx, win, priority, true);
+ 	mixer_cfg_gfx_blend(ctx, win, fb->format->has_alpha);
+ 
+-	/* layer update mandatory for mixer 16.0.33.0 */
+-	if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
+-		ctx->mxr_ver == MXR_VER_128_0_0_184)
+-		mixer_layer_update(ctx);
+-
+ 	spin_unlock_irqrestore(&ctx->reg_slock, flags);
+ 
+ 	mixer_regs_dump(ctx);
+@@ -687,7 +724,7 @@ static void mixer_win_reset(struct mixer_context *ctx)
+ static irqreturn_t mixer_irq_handler(int irq, void *arg)
+ {
+ 	struct mixer_context *ctx = arg;
+-	u32 val, base, shadow;
++	u32 val;
+ 
+ 	spin_lock(&ctx->reg_slock);
+ 
+@@ -701,26 +738,9 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
+ 		val &= ~MXR_INT_STATUS_VSYNC;
+ 
+ 		/* interlace scan need to check shadow register */
+-		if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
+-			if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
+-			    vp_reg_read(ctx, VP_SHADOW_UPDATE))
+-				goto out;
+-
+-			base = mixer_reg_read(ctx, MXR_CFG);
+-			shadow = mixer_reg_read(ctx, MXR_CFG_S);
+-			if (base != shadow)
+-				goto out;
+-
+-			base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
+-			shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
+-			if (base != shadow)
+-				goto out;
+-
+-			base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
+-			shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
+-			if (base != shadow)
+-				goto out;
+-		}
++		if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)
++		    && !mixer_is_synced(ctx))
++			goto out;
+ 
+ 		drm_crtc_handle_vblank(&ctx->crtc->base);
+ 	}
+@@ -895,12 +915,14 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
+ 
+ static void mixer_atomic_begin(struct exynos_drm_crtc *crtc)
+ {
+-	struct mixer_context *mixer_ctx = crtc->ctx;
++	struct mixer_context *ctx = crtc->ctx;
+ 
+-	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
++	if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
+ 		return;
+ 
+-	mixer_vsync_set_update(mixer_ctx, false);
++	if (mixer_wait_for_sync(ctx))
++		dev_err(ctx->dev, "timeout waiting for VSYNC\n");
++	mixer_disable_sync(ctx);
+ }
+ 
+ static void mixer_update_plane(struct exynos_drm_crtc *crtc,
+@@ -942,7 +964,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
+ 	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
+ 		return;
+ 
+-	mixer_vsync_set_update(mixer_ctx, true);
++	mixer_enable_sync(mixer_ctx);
+ 	exynos_crtc_handle_event(crtc);
+ }
+ 
+@@ -957,7 +979,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
+ 
+ 	exynos_drm_pipe_clk_enable(crtc, true);
+ 
+-	mixer_vsync_set_update(ctx, false);
++	mixer_disable_sync(ctx);
+ 
+ 	mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
+ 
+@@ -970,7 +992,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
+ 
+ 	mixer_commit(ctx);
+ 
+-	mixer_vsync_set_update(ctx, true);
++	mixer_enable_sync(ctx);
+ 
+ 	set_bit(MXR_BIT_POWERED, &ctx->flags);
+ }
+diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+index 8a0f85f5fc1a..6a765682fbfa 100644
+--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
++++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+@@ -38,6 +38,7 @@ int nvkm_volt_set_id(struct nvkm_volt *, u8 id, u8 min_id, u8 temp,
+ 
+ int nv40_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
+ int gf100_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
++int gf117_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
+ int gk104_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
+ int gk20a_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
+ int gm20b_volt_new(struct nvkm_device *, int, struct nvkm_volt **);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+index 9109b69cd052..9635704a1d86 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
++++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+@@ -161,7 +161,7 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
+ 	}
+ 
+ 	ret = pm_runtime_get_sync(drm->dev);
+-	if (IS_ERR_VALUE(ret) && ret != -EACCES)
++	if (ret < 0 && ret != -EACCES)
+ 		return ret;
+ 	ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
+ 	pm_runtime_put_autosuspend(drm->dev);
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+index e294013426ce..347a6a4cb339 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+@@ -1613,7 +1613,7 @@ nvd7_chipset = {
+ 	.pci = gf106_pci_new,
+ 	.therm = gf119_therm_new,
+ 	.timer = nv41_timer_new,
+-	.volt = gf100_volt_new,
++	.volt = gf117_volt_new,
+ 	.ce[0] = gf100_ce_new,
+ 	.disp = gf119_disp_new,
+ 	.dma = gf119_dma_new,
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
+index bcd179ba11d0..146adcdd316a 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/Kbuild
+@@ -2,6 +2,7 @@ nvkm-y += nvkm/subdev/volt/base.o
+ nvkm-y += nvkm/subdev/volt/gpio.o
+ nvkm-y += nvkm/subdev/volt/nv40.o
+ nvkm-y += nvkm/subdev/volt/gf100.o
++nvkm-y += nvkm/subdev/volt/gf117.o
+ nvkm-y += nvkm/subdev/volt/gk104.o
+ nvkm-y += nvkm/subdev/volt/gk20a.o
+ nvkm-y += nvkm/subdev/volt/gm20b.o
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c
+new file mode 100644
+index 000000000000..547a58f0aeac
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gf117.c
+@@ -0,0 +1,60 @@
++/*
++ * Copyright 2019 Ilia Mirkin
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Ilia Mirkin
++ */
++#include "priv.h"
++
++#include <subdev/fuse.h>
++
++static int
++gf117_volt_speedo_read(struct nvkm_volt *volt)
++{
++	struct nvkm_device *device = volt->subdev.device;
++	struct nvkm_fuse *fuse = device->fuse;
++
++	if (!fuse)
++		return -EINVAL;
++
++	return nvkm_fuse_read(fuse, 0x3a8);
++}
++
++static const struct nvkm_volt_func
++gf117_volt = {
++	.oneinit = gf100_volt_oneinit,
++	.vid_get = nvkm_voltgpio_get,
++	.vid_set = nvkm_voltgpio_set,
++	.speedo_read = gf117_volt_speedo_read,
++};
++
++int
++gf117_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
++{
++	struct nvkm_volt *volt;
++	int ret;
++
++	ret = nvkm_volt_new_(&gf117_volt, device, index, &volt);
++	*pvolt = volt;
++	if (ret)
++		return ret;
++
++	return nvkm_voltgpio_init(volt);
++}
+diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+index 72edb334d997..88c7d035ace6 100644
+--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
++++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+@@ -70,18 +70,12 @@ static inline struct innolux_panel *to_innolux_panel(struct drm_panel *panel)
+ static int innolux_panel_disable(struct drm_panel *panel)
+ {
+ 	struct innolux_panel *innolux = to_innolux_panel(panel);
+-	int err;
+ 
+ 	if (!innolux->enabled)
+ 		return 0;
+ 
+ 	backlight_disable(innolux->backlight);
+ 
+-	err = mipi_dsi_dcs_set_display_off(innolux->link);
+-	if (err < 0)
+-		DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
+-			      err);
+-
+ 	innolux->enabled = false;
+ 
+ 	return 0;
+@@ -95,6 +89,11 @@ static int innolux_panel_unprepare(struct drm_panel *panel)
+ 	if (!innolux->prepared)
+ 		return 0;
+ 
++	err = mipi_dsi_dcs_set_display_off(innolux->link);
++	if (err < 0)
++		DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
++			      err);
++
+ 	err = mipi_dsi_dcs_enter_sleep_mode(innolux->link);
+ 	if (err < 0) {
+ 		DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 7c484729f9b2..268f5a3b3122 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -1445,7 +1445,6 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj)
+ 		container_of(kobj, struct ttm_bo_global, kobj);
+ 
+ 	__free_page(glob->dummy_read_page);
+-	kfree(glob);
+ }
+ 
+ void ttm_bo_global_release(struct drm_global_reference *ref)
+diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
+index 450387c92b63..df73d5ff84a8 100644
+--- a/drivers/gpu/drm/ttm/ttm_memory.c
++++ b/drivers/gpu/drm/ttm/ttm_memory.c
+@@ -216,14 +216,6 @@ static ssize_t ttm_mem_global_store(struct kobject *kobj,
+ 	return size;
+ }
+ 
+-static void ttm_mem_global_kobj_release(struct kobject *kobj)
+-{
+-	struct ttm_mem_global *glob =
+-		container_of(kobj, struct ttm_mem_global, kobj);
+-
+-	kfree(glob);
+-}
+-
+ static struct attribute *ttm_mem_global_attrs[] = {
+ 	&ttm_mem_global_lower_mem_limit,
+ 	NULL
+@@ -235,7 +227,6 @@ static const struct sysfs_ops ttm_mem_global_ops = {
+ };
+ 
+ static struct kobj_type ttm_mem_glob_kobj_type = {
+-	.release = &ttm_mem_global_kobj_release,
+ 	.sysfs_ops = &ttm_mem_global_ops,
+ 	.default_attrs = ttm_mem_global_attrs,
+ };
+diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
+index d5a23295dd80..bb7b58407039 100644
+--- a/drivers/gpu/drm/udl/udl_gem.c
++++ b/drivers/gpu/drm/udl/udl_gem.c
+@@ -224,7 +224,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
+ 	*offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
+ 
+ out:
+-	drm_gem_object_put(&gobj->base);
++	drm_gem_object_put_unlocked(&gobj->base);
+ unlock:
+ 	mutex_unlock(&udl->gem_lock);
+ 	return ret;
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index b7870e7e41d4..97d33b8ed36c 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -348,6 +348,7 @@
+ #define USB_DEVICE_ID_DMI_ENC		0x5fab
+ 
+ #define USB_VENDOR_ID_DRAGONRISE		0x0079
++#define USB_DEVICE_ID_REDRAGON_SEYMUR2		0x0006
+ #define USB_DEVICE_ID_DRAGONRISE_WIIU		0x1800
+ #define USB_DEVICE_ID_DRAGONRISE_PS3		0x1801
+ #define USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR	0x1803
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 77316f022c5a..94088c0ed68a 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -70,6 +70,7 @@ static const struct hid_device_id hid_quirks[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC), HID_QUIRK_NOGET },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES), HID_QUIRK_MULTI_INPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES), HID_QUIRK_MULTI_INPUT },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_REDRAGON_SEYMUR2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR), HID_QUIRK_MULTI_INPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1), HID_QUIRK_MULTI_INPUT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), HID_QUIRK_MULTI_INPUT },
+diff --git a/drivers/hid/i2c-hid/Makefile b/drivers/hid/i2c-hid/Makefile
+index 832d8f9aaba2..099e1ce2f234 100644
+--- a/drivers/hid/i2c-hid/Makefile
++++ b/drivers/hid/i2c-hid/Makefile
+@@ -3,3 +3,6 @@
+ #
+ 
+ obj-$(CONFIG_I2C_HID)				+= i2c-hid.o
++
++i2c-hid-objs					=  i2c-hid-core.o
++i2c-hid-$(CONFIG_DMI)				+= i2c-hid-dmi-quirks.o
+diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
+new file mode 100644
+index 000000000000..3cde7c1b9c33
+--- /dev/null
++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
+@@ -0,0 +1,1348 @@
++/*
++ * HID over I2C protocol implementation
++ *
++ * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
++ * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France
++ * Copyright (c) 2012 Red Hat, Inc
++ *
++ * This code is partly based on "USB HID support for Linux":
++ *
++ *  Copyright (c) 1999 Andreas Gal
++ *  Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
++ *  Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
++ *  Copyright (c) 2007-2008 Oliver Neukum
++ *  Copyright (c) 2006-2010 Jiri Kosina
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License.  See the file COPYING in the main directory of this archive for
++ * more details.
++ */
++
++#include <linux/module.h>
++#include <linux/i2c.h>
++#include <linux/interrupt.h>
++#include <linux/input.h>
++#include <linux/irq.h>
++#include <linux/delay.h>
++#include <linux/slab.h>
++#include <linux/pm.h>
++#include <linux/pm_runtime.h>
++#include <linux/device.h>
++#include <linux/wait.h>
++#include <linux/err.h>
++#include <linux/string.h>
++#include <linux/list.h>
++#include <linux/jiffies.h>
++#include <linux/kernel.h>
++#include <linux/hid.h>
++#include <linux/mutex.h>
++#include <linux/acpi.h>
++#include <linux/of.h>
++#include <linux/regulator/consumer.h>
++
++#include <linux/platform_data/i2c-hid.h>
++
++#include "../hid-ids.h"
++#include "i2c-hid.h"
++
++/* quirks to control the device */
++#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV	BIT(0)
++#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET	BIT(1)
++#define I2C_HID_QUIRK_NO_RUNTIME_PM		BIT(2)
++#define I2C_HID_QUIRK_DELAY_AFTER_SLEEP		BIT(3)
++
++/* flags */
++#define I2C_HID_STARTED		0
++#define I2C_HID_RESET_PENDING	1
++#define I2C_HID_READ_PENDING	2
++
++#define I2C_HID_PWR_ON		0x00
++#define I2C_HID_PWR_SLEEP	0x01
++
++/* debug option */
++static bool debug;
++module_param(debug, bool, 0444);
++MODULE_PARM_DESC(debug, "print a lot of debug information");
++
++#define i2c_hid_dbg(ihid, fmt, arg...)					  \
++do {									  \
++	if (debug)							  \
++		dev_printk(KERN_DEBUG, &(ihid)->client->dev, fmt, ##arg); \
++} while (0)
++
++struct i2c_hid_desc {
++	__le16 wHIDDescLength;
++	__le16 bcdVersion;
++	__le16 wReportDescLength;
++	__le16 wReportDescRegister;
++	__le16 wInputRegister;
++	__le16 wMaxInputLength;
++	__le16 wOutputRegister;
++	__le16 wMaxOutputLength;
++	__le16 wCommandRegister;
++	__le16 wDataRegister;
++	__le16 wVendorID;
++	__le16 wProductID;
++	__le16 wVersionID;
++	__le32 reserved;
++} __packed;
++
++struct i2c_hid_cmd {
++	unsigned int registerIndex;
++	__u8 opcode;
++	unsigned int length;
++	bool wait;
++};
++
++union command {
++	u8 data[0];
++	struct cmd {
++		__le16 reg;
++		__u8 reportTypeID;
++		__u8 opcode;
++	} __packed c;
++};
++
++#define I2C_HID_CMD(opcode_) \
++	.opcode = opcode_, .length = 4, \
++	.registerIndex = offsetof(struct i2c_hid_desc, wCommandRegister)
++
++/* fetch HID descriptor */
++static const struct i2c_hid_cmd hid_descr_cmd = { .length = 2 };
++/* fetch report descriptors */
++static const struct i2c_hid_cmd hid_report_descr_cmd = {
++		.registerIndex = offsetof(struct i2c_hid_desc,
++			wReportDescRegister),
++		.opcode = 0x00,
++		.length = 2 };
++/* commands */
++static const struct i2c_hid_cmd hid_reset_cmd =		{ I2C_HID_CMD(0x01),
++							  .wait = true };
++static const struct i2c_hid_cmd hid_get_report_cmd =	{ I2C_HID_CMD(0x02) };
++static const struct i2c_hid_cmd hid_set_report_cmd =	{ I2C_HID_CMD(0x03) };
++static const struct i2c_hid_cmd hid_set_power_cmd =	{ I2C_HID_CMD(0x08) };
++static const struct i2c_hid_cmd hid_no_cmd =		{ .length = 0 };
++
++/*
++ * These definitions are not used here, but are defined by the spec.
++ * Keeping them here for documentation purposes.
++ *
++ * static const struct i2c_hid_cmd hid_get_idle_cmd = { I2C_HID_CMD(0x04) };
++ * static const struct i2c_hid_cmd hid_set_idle_cmd = { I2C_HID_CMD(0x05) };
++ * static const struct i2c_hid_cmd hid_get_protocol_cmd = { I2C_HID_CMD(0x06) };
++ * static const struct i2c_hid_cmd hid_set_protocol_cmd = { I2C_HID_CMD(0x07) };
++ */
++
++/* The main device structure */
++struct i2c_hid {
++	struct i2c_client	*client;	/* i2c client */
++	struct hid_device	*hid;	/* pointer to corresponding HID dev */
++	union {
++		__u8 hdesc_buffer[sizeof(struct i2c_hid_desc)];
++		struct i2c_hid_desc hdesc;	/* the HID Descriptor */
++	};
++	__le16			wHIDDescRegister; /* location of the i2c
++						   * register of the HID
++						   * descriptor. */
++	unsigned int		bufsize;	/* i2c buffer size */
++	u8			*inbuf;		/* Input buffer */
++	u8			*rawbuf;	/* Raw Input buffer */
++	u8			*cmdbuf;	/* Command buffer */
++	u8			*argsbuf;	/* Command arguments buffer */
++
++	unsigned long		flags;		/* device flags */
++	unsigned long		quirks;		/* Various quirks */
++
++	wait_queue_head_t	wait;		/* For waiting the interrupt */
++
++	struct i2c_hid_platform_data pdata;
++
++	bool			irq_wake_enabled;
++	struct mutex		reset_lock;
++
++	unsigned long		sleep_delay;
++};
++
++static const struct i2c_hid_quirks {
++	__u16 idVendor;
++	__u16 idProduct;
++	__u32 quirks;
++} i2c_hid_quirks[] = {
++	{ USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8752,
++		I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
++	{ USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
++		I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
++	{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
++		I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
++		I2C_HID_QUIRK_NO_RUNTIME_PM },
++	{ I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_4B33,
++		I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
++	{ 0, 0 }
++};
++
++/*
++ * i2c_hid_lookup_quirk: return any quirks associated with a I2C HID device
++ * @idVendor: the 16-bit vendor ID
++ * @idProduct: the 16-bit product ID
++ *
++ * Returns: a u32 quirks value.
++ */
++static u32 i2c_hid_lookup_quirk(const u16 idVendor, const u16 idProduct)
++{
++	u32 quirks = 0;
++	int n;
++
++	for (n = 0; i2c_hid_quirks[n].idVendor; n++)
++		if (i2c_hid_quirks[n].idVendor == idVendor &&
++		    (i2c_hid_quirks[n].idProduct == (__u16)HID_ANY_ID ||
++		     i2c_hid_quirks[n].idProduct == idProduct))
++			quirks = i2c_hid_quirks[n].quirks;
++
++	return quirks;
++}
++
++static int __i2c_hid_command(struct i2c_client *client,
++		const struct i2c_hid_cmd *command, u8 reportID,
++		u8 reportType, u8 *args, int args_len,
++		unsigned char *buf_recv, int data_len)
++{
++	struct i2c_hid *ihid = i2c_get_clientdata(client);
++	union command *cmd = (union command *)ihid->cmdbuf;
++	int ret;
++	struct i2c_msg msg[2];
++	int msg_num = 1;
++
++	int length = command->length;
++	bool wait = command->wait;
++	unsigned int registerIndex = command->registerIndex;
++
++	/* special case for hid_descr_cmd */
++	if (command == &hid_descr_cmd) {
++		cmd->c.reg = ihid->wHIDDescRegister;
++	} else {
++		cmd->data[0] = ihid->hdesc_buffer[registerIndex];
++		cmd->data[1] = ihid->hdesc_buffer[registerIndex + 1];
++	}
++
++	if (length > 2) {
++		cmd->c.opcode = command->opcode;
++		cmd->c.reportTypeID = reportID | reportType << 4;
++	}
++
++	memcpy(cmd->data + length, args, args_len);
++	length += args_len;
++
++	i2c_hid_dbg(ihid, "%s: cmd=%*ph\n", __func__, length, cmd->data);
++
++	msg[0].addr = client->addr;
++	msg[0].flags = client->flags & I2C_M_TEN;
++	msg[0].len = length;
++	msg[0].buf = cmd->data;
++	if (data_len > 0) {
++		msg[1].addr = client->addr;
++		msg[1].flags = client->flags & I2C_M_TEN;
++		msg[1].flags |= I2C_M_RD;
++		msg[1].len = data_len;
++		msg[1].buf = buf_recv;
++		msg_num = 2;
++		set_bit(I2C_HID_READ_PENDING, &ihid->flags);
++	}
++
++	if (wait)
++		set_bit(I2C_HID_RESET_PENDING, &ihid->flags);
++
++	ret = i2c_transfer(client->adapter, msg, msg_num);
++
++	if (data_len > 0)
++		clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
++
++	if (ret != msg_num)
++		return ret < 0 ? ret : -EIO;
++
++	ret = 0;
++
++	if (wait && (ihid->quirks & I2C_HID_QUIRK_NO_IRQ_AFTER_RESET)) {
++		msleep(100);
++	} else if (wait) {
++		i2c_hid_dbg(ihid, "%s: waiting...\n", __func__);
++		if (!wait_event_timeout(ihid->wait,
++				!test_bit(I2C_HID_RESET_PENDING, &ihid->flags),
++				msecs_to_jiffies(5000)))
++			ret = -ENODATA;
++		i2c_hid_dbg(ihid, "%s: finished.\n", __func__);
++	}
++
++	return ret;
++}
++
++static int i2c_hid_command(struct i2c_client *client,
++		const struct i2c_hid_cmd *command,
++		unsigned char *buf_recv, int data_len)
++{
++	return __i2c_hid_command(client, command, 0, 0, NULL, 0,
++				buf_recv, data_len);
++}
++
++static int i2c_hid_get_report(struct i2c_client *client, u8 reportType,
++		u8 reportID, unsigned char *buf_recv, int data_len)
++{
++	struct i2c_hid *ihid = i2c_get_clientdata(client);
++	u8 args[3];
++	int ret;
++	int args_len = 0;
++	u16 readRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
++
++	i2c_hid_dbg(ihid, "%s\n", __func__);
++
++	if (reportID >= 0x0F) {
++		args[args_len++] = reportID;
++		reportID = 0x0F;
++	}
++
++	args[args_len++] = readRegister & 0xFF;
++	args[args_len++] = readRegister >> 8;
++
++	ret = __i2c_hid_command(client, &hid_get_report_cmd, reportID,
++		reportType, args, args_len, buf_recv, data_len);
++	if (ret) {
++		dev_err(&client->dev,
++			"failed to retrieve report from device.\n");
++		return ret;
++	}
++
++	return 0;
++}
++
++/**
++ * i2c_hid_set_or_send_report: forward an incoming report to the device
++ * @client: the i2c_client of the device
++ * @reportType: 0x03 for HID_FEATURE_REPORT ; 0x02 for HID_OUTPUT_REPORT
++ * @reportID: the report ID
++ * @buf: the actual data to transfer, without the report ID
++ * @len: size of buf
++ * @use_data: true: use SET_REPORT HID command, false: send plain OUTPUT report
++ */
++static int i2c_hid_set_or_send_report(struct i2c_client *client, u8 reportType,
++		u8 reportID, unsigned char *buf, size_t data_len, bool use_data)
++{
++	struct i2c_hid *ihid = i2c_get_clientdata(client);
++	u8 *args = ihid->argsbuf;
++	const struct i2c_hid_cmd *hidcmd;
++	int ret;
++	u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
++	u16 outputRegister = le16_to_cpu(ihid->hdesc.wOutputRegister);
++	u16 maxOutputLength = le16_to_cpu(ihid->hdesc.wMaxOutputLength);
++	u16 size;
++	int args_len;
++	int index = 0;
++
++	i2c_hid_dbg(ihid, "%s\n", __func__);
++
++	if (data_len > ihid->bufsize)
++		return -EINVAL;
++
++	size =		2			/* size */ +
++			(reportID ? 1 : 0)	/* reportID */ +
++			data_len		/* buf */;
++	args_len =	(reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
++			2			/* dataRegister */ +
++			size			/* args */;
++
++	if (!use_data && maxOutputLength == 0)
++		return -ENOSYS;
++
++	if (reportID >= 0x0F) {
++		args[index++] = reportID;
++		reportID = 0x0F;
++	}
++
++	/*
++	 * use the data register for feature reports or if the device does not
++	 * support the output register
++	 */
++	if (use_data) {
++		args[index++] = dataRegister & 0xFF;
++		args[index++] = dataRegister >> 8;
++		hidcmd = &hid_set_report_cmd;
++	} else {
++		args[index++] = outputRegister & 0xFF;
++		args[index++] = outputRegister >> 8;
++		hidcmd = &hid_no_cmd;
++	}
++
++	args[index++] = size & 0xFF;
++	args[index++] = size >> 8;
++
++	if (reportID)
++		args[index++] = reportID;
++
++	memcpy(&args[index], buf, data_len);
++
++	ret = __i2c_hid_command(client, hidcmd, reportID,
++		reportType, args, args_len, NULL, 0);
++	if (ret) {
++		dev_err(&client->dev, "failed to set a report to device.\n");
++		return ret;
++	}
++
++	return data_len;
++}
++
++static int i2c_hid_set_power(struct i2c_client *client, int power_state)
++{
++	struct i2c_hid *ihid = i2c_get_clientdata(client);
++	int ret;
++	unsigned long now, delay;
++
++	i2c_hid_dbg(ihid, "%s\n", __func__);
++
++	/*
++	 * Some devices require to send a command to wakeup before power on.
++	 * The call will get a return value (EREMOTEIO) but device will be
++	 * triggered and activated. After that, it goes like a normal device.
++	 */
++	if (power_state == I2C_HID_PWR_ON &&
++	    ihid->quirks & I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV) {
++		ret = i2c_hid_command(client, &hid_set_power_cmd, NULL, 0);
++
++		/* Device was already activated */
++		if (!ret)
++			goto set_pwr_exit;
++	}
++
++	if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
++	    power_state == I2C_HID_PWR_ON) {
++		now = jiffies;
++		if (time_after(ihid->sleep_delay, now)) {
++			delay = jiffies_to_usecs(ihid->sleep_delay - now);
++			usleep_range(delay, delay + 1);
++		}
++	}
++
++	ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
++		0, NULL, 0, NULL, 0);
++
++	if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
++	    power_state == I2C_HID_PWR_SLEEP)
++		ihid->sleep_delay = jiffies + msecs_to_jiffies(20);
++
++	if (ret)
++		dev_err(&client->dev, "failed to change power setting.\n");
++
++set_pwr_exit:
++	return ret;
++}
++
++static int i2c_hid_hwreset(struct i2c_client *client)
++{
++	struct i2c_hid *ihid = i2c_get_clientdata(client);
++	int ret;
++
++	i2c_hid_dbg(ihid, "%s\n", __func__);
++
++	/*
++	 * This prevents sending feature reports while the device is
++	 * being reset. Otherwise we may lose the reset complete
++	 * interrupt.
++	 */
++	mutex_lock(&ihid->reset_lock);
++
++	ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
++	if (ret)
++		goto out_unlock;
++
++	/*
++	 * The HID over I2C specification states that if a DEVICE needs time
++	 * after the PWR_ON request, it should utilise CLOCK stretching.
++	 * However, it has been observered that the Windows driver provides a
++	 * 1ms sleep between the PWR_ON and RESET requests and that some devices
++	 * rely on this.
++	 */
++	usleep_range(1000, 5000);
++
++	i2c_hid_dbg(ihid, "resetting...\n");
++
++	ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
++	if (ret) {
++		dev_err(&client->dev, "failed to reset device.\n");
++		i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
++	}
++
++out_unlock:
++	mutex_unlock(&ihid->reset_lock);
++	return ret;
++}
++
++static void i2c_hid_get_input(struct i2c_hid *ihid)
++{
++	int ret;
++	u32 ret_size;
++	int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
++
++	if (size > ihid->bufsize)
++		size = ihid->bufsize;
++
++	ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
++	if (ret != size) {
++		if (ret < 0)
++			return;
++
++		dev_err(&ihid->client->dev, "%s: got %d data instead of %d\n",
++			__func__, ret, size);
++		return;
++	}
++
++	ret_size = ihid->inbuf[0] | ihid->inbuf[1] << 8;
++
++	if (!ret_size) {
++		/* host or device initiated RESET completed */
++		if (test_and_clear_bit(I2C_HID_RESET_PENDING, &ihid->flags))
++			wake_up(&ihid->wait);
++		return;
++	}
++
++	if ((ret_size > size) || (ret_size < 2)) {
++		dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
++			__func__, size, ret_size);
++		return;
++	}
++
++	i2c_hid_dbg(ihid, "input: %*ph\n", ret_size, ihid->inbuf);
++
++	if (test_bit(I2C_HID_STARTED, &ihid->flags))
++		hid_input_report(ihid->hid, HID_INPUT_REPORT, ihid->inbuf + 2,
++				ret_size - 2, 1);
++
++	return;
++}
++
++static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
++{
++	struct i2c_hid *ihid = dev_id;
++
++	if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
++		return IRQ_HANDLED;
++
++	i2c_hid_get_input(ihid);
++
++	return IRQ_HANDLED;
++}
++
++static int i2c_hid_get_report_length(struct hid_report *report)
++{
++	return ((report->size - 1) >> 3) + 1 +
++		report->device->report_enum[report->type].numbered + 2;
++}
++
++/*
++ * Traverse the supplied list of reports and find the longest
++ */
++static void i2c_hid_find_max_report(struct hid_device *hid, unsigned int type,
++		unsigned int *max)
++{
++	struct hid_report *report;
++	unsigned int size;
++
++	/* We should not rely on wMaxInputLength, as some devices may set it to
++	 * a wrong length. */
++	list_for_each_entry(report, &hid->report_enum[type].report_list, list) {
++		size = i2c_hid_get_report_length(report);
++		if (*max < size)
++			*max = size;
++	}
++}
++
++static void i2c_hid_free_buffers(struct i2c_hid *ihid)
++{
++	kfree(ihid->inbuf);
++	kfree(ihid->rawbuf);
++	kfree(ihid->argsbuf);
++	kfree(ihid->cmdbuf);
++	ihid->inbuf = NULL;
++	ihid->rawbuf = NULL;
++	ihid->cmdbuf = NULL;
++	ihid->argsbuf = NULL;
++	ihid->bufsize = 0;
++}
++
++static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
++{
++	/* the worst case is computed from the set_report command with a
++	 * reportID > 15 and the maximum report length */
++	int args_len = sizeof(__u8) + /* ReportID */
++		       sizeof(__u8) + /* optional ReportID byte */
++		       sizeof(__u16) + /* data register */
++		       sizeof(__u16) + /* size of the report */
++		       report_size; /* report */
++
++	ihid->inbuf = kzalloc(report_size, GFP_KERNEL);
++	ihid->rawbuf = kzalloc(report_size, GFP_KERNEL);
++	ihid->argsbuf = kzalloc(args_len, GFP_KERNEL);
++	ihid->cmdbuf = kzalloc(sizeof(union command) + args_len, GFP_KERNEL);
++
++	if (!ihid->inbuf || !ihid->rawbuf || !ihid->argsbuf || !ihid->cmdbuf) {
++		i2c_hid_free_buffers(ihid);
++		return -ENOMEM;
++	}
++
++	ihid->bufsize = report_size;
++
++	return 0;
++}
++
++static int i2c_hid_get_raw_report(struct hid_device *hid,
++		unsigned char report_number, __u8 *buf, size_t count,
++		unsigned char report_type)
++{
++	struct i2c_client *client = hid->driver_data;
++	struct i2c_hid *ihid = i2c_get_clientdata(client);
++	size_t ret_count, ask_count;
++	int ret;
++
++	if (report_type == HID_OUTPUT_REPORT)
++		return -EINVAL;
++
++	/* +2 bytes to include the size of the reply in the query buffer */
++	ask_count = min(count + 2, (size_t)ihid->bufsize);
++
++	ret = i2c_hid_get_report(client,
++			report_type == HID_FEATURE_REPORT ? 0x03 : 0x01,
++			report_number, ihid->rawbuf, ask_count);
++
++	if (ret < 0)
++		return ret;
++
++	ret_count = ihid->rawbuf[0] | (ihid->rawbuf[1] << 8);
++
++	if (ret_count <= 2)
++		return 0;
++
++	ret_count = min(ret_count, ask_count);
++
++	/* The query buffer contains the size, dropping it in the reply */
++	count = min(count, ret_count - 2);
++	memcpy(buf, ihid->rawbuf + 2, count);
++
++	return count;
++}
++
++static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
++		size_t count, unsigned char report_type, bool use_data)
++{
++	struct i2c_client *client = hid->driver_data;
++	struct i2c_hid *ihid = i2c_get_clientdata(client);
++	int report_id = buf[0];
++	int ret;
++
++	if (report_type == HID_INPUT_REPORT)
++		return -EINVAL;
++
++	mutex_lock(&ihid->reset_lock);
++
++	if (report_id) {
++		buf++;
++		count--;
++	}
++
++	ret = i2c_hid_set_or_send_report(client,
++				report_type == HID_FEATURE_REPORT ? 0x03 : 0x02,
++				report_id, buf, count, use_data);
++
++	if (report_id && ret >= 0)
++		ret++; /* add report_id to the number of transfered bytes */
++
++	mutex_unlock(&ihid->reset_lock);
++
++	return ret;
++}
++
++static int i2c_hid_output_report(struct hid_device *hid, __u8 *buf,
++		size_t count)
++{
++	return i2c_hid_output_raw_report(hid, buf, count, HID_OUTPUT_REPORT,
++			false);
++}
++
++static int i2c_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
++			       __u8 *buf, size_t len, unsigned char rtype,
++			       int reqtype)
++{
++	switch (reqtype) {
++	case HID_REQ_GET_REPORT:
++		return i2c_hid_get_raw_report(hid, reportnum, buf, len, rtype);
++	case HID_REQ_SET_REPORT:
++		if (buf[0] != reportnum)
++			return -EINVAL;
++		return i2c_hid_output_raw_report(hid, buf, len, rtype, true);
++	default:
++		return -EIO;
++	}
++}
++
++static int i2c_hid_parse(struct hid_device *hid)
++{
++	struct i2c_client *client = hid->driver_data;
++	struct i2c_hid *ihid = i2c_get_clientdata(client);
++	struct i2c_hid_desc *hdesc = &ihid->hdesc;
++	unsigned int rsize;
++	char *rdesc;
++	int ret;
++	int tries = 3;
++	char *use_override;
++
++	i2c_hid_dbg(ihid, "entering %s\n", __func__);
++
++	rsize = le16_to_cpu(hdesc->wReportDescLength);
++	if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
++		dbg_hid("weird size of report descriptor (%u)\n", rsize);
++		return -EINVAL;
++	}
++
++	do {
++		ret = i2c_hid_hwreset(client);
++		if (ret)
++			msleep(1000);
++	} while (tries-- > 0 && ret);
++
++	if (ret)
++		return ret;
++
++	use_override = i2c_hid_get_dmi_hid_report_desc_override(client->name,
++								&rsize);
++
++	if (use_override) {
++		rdesc = use_override;
++		i2c_hid_dbg(ihid, "Using a HID report descriptor override\n");
++	} else {
++		rdesc = kzalloc(rsize, GFP_KERNEL);
++
++		if (!rdesc) {
++			dbg_hid("couldn't allocate rdesc memory\n");
++			return -ENOMEM;
++		}
++
++		i2c_hid_dbg(ihid, "asking HID report descriptor\n");
++
++		ret = i2c_hid_command(client, &hid_report_descr_cmd,
++				      rdesc, rsize);
++		if (ret) {
++			hid_err(hid, "reading report descriptor failed\n");
++			kfree(rdesc);
++			return -EIO;
++		}
++	}
++
++	i2c_hid_dbg(ihid, "Report Descriptor: %*ph\n", rsize, rdesc);
++
++	ret = hid_parse_report(hid, rdesc, rsize);
++	if (!use_override)
++		kfree(rdesc);
++
++	if (ret) {
++		dbg_hid("parsing report descriptor failed\n");
++		return ret;
++	}
++
++	return 0;
++}
++
++static int i2c_hid_start(struct hid_device *hid)
++{
++	struct i2c_client *client = hid->driver_data;
++	struct i2c_hid *ihid = i2c_get_clientdata(client);
++	int ret;
++	unsigned int bufsize = HID_MIN_BUFFER_SIZE;
++
++	i2c_hid_find_max_report(hid, HID_INPUT_REPORT, &bufsize);
++	i2c_hid_find_max_report(hid, HID_OUTPUT_REPORT, &bufsize);
++	i2c_hid_find_max_report(hid, HID_FEATURE_REPORT, &bufsize);
++
++	if (bufsize > ihid->bufsize) {
++		disable_irq(client->irq);
++		i2c_hid_free_buffers(ihid);
++
++		ret = i2c_hid_alloc_buffers(ihid, bufsize);
++		enable_irq(client->irq);
++
++		if (ret)
++			return ret;
++	}
++
++	return 0;
++}
++
++static void i2c_hid_stop(struct hid_device *hid)
++{
++	hid->claimed = 0;
++}
++
++static int i2c_hid_open(struct hid_device *hid)
++{
++	struct i2c_client *client = hid->driver_data;
++	struct i2c_hid *ihid = i2c_get_clientdata(client);
++	int ret = 0;
++
++	ret = pm_runtime_get_sync(&client->dev);
++	if (ret < 0)
++		return ret;
++
++	set_bit(I2C_HID_STARTED, &ihid->flags);
++	return 0;
++}
++
++static void i2c_hid_close(struct hid_device *hid)
++{
++	struct i2c_client *client = hid->driver_data;
++	struct i2c_hid *ihid = i2c_get_clientdata(client);
++
++	clear_bit(I2C_HID_STARTED, &ihid->flags);
++
++	/* Save some power */
++	pm_runtime_put(&client->dev);
++}
++
++static int i2c_hid_power(struct hid_device *hid, int lvl)
++{
++	struct i2c_client *client = hid->driver_data;
++	struct i2c_hid *ihid = i2c_get_clientdata(client);
++
++	i2c_hid_dbg(ihid, "%s lvl:%d\n", __func__, lvl);
++
++	switch (lvl) {
++	case PM_HINT_FULLON:
++		pm_runtime_get_sync(&client->dev);
++		break;
++	case PM_HINT_NORMAL:
++		pm_runtime_put(&client->dev);
++		break;
++	}
++	return 0;
++}
++
++struct hid_ll_driver i2c_hid_ll_driver = {
++	.parse = i2c_hid_parse,
++	.start = i2c_hid_start,
++	.stop = i2c_hid_stop,
++	.open = i2c_hid_open,
++	.close = i2c_hid_close,
++	.power = i2c_hid_power,
++	.output_report = i2c_hid_output_report,
++	.raw_request = i2c_hid_raw_request,
++};
++EXPORT_SYMBOL_GPL(i2c_hid_ll_driver);
++
++static int i2c_hid_init_irq(struct i2c_client *client)
++{
++	struct i2c_hid *ihid = i2c_get_clientdata(client);
++	unsigned long irqflags = 0;
++	int ret;
++
++	dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq);
++
++	if (!irq_get_trigger_type(client->irq))
++		irqflags = IRQF_TRIGGER_LOW;
++
++	ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq,
++				   irqflags | IRQF_ONESHOT, client->name, ihid);
++	if (ret < 0) {
++		dev_warn(&client->dev,
++			"Could not register for %s interrupt, irq = %d,"
++			" ret = %d\n",
++			client->name, client->irq, ret);
++
++		return ret;
++	}
++
++	return 0;
++}
++
++static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
++{
++	struct i2c_client *client = ihid->client;
++	struct i2c_hid_desc *hdesc = &ihid->hdesc;
++	unsigned int dsize;
++	int ret;
++
++	/* i2c hid fetch using a fixed descriptor size (30 bytes) */
++	if (i2c_hid_get_dmi_i2c_hid_desc_override(client->name)) {
++		i2c_hid_dbg(ihid, "Using a HID descriptor override\n");
++		ihid->hdesc =
++			*i2c_hid_get_dmi_i2c_hid_desc_override(client->name);
++	} else {
++		i2c_hid_dbg(ihid, "Fetching the HID descriptor\n");
++		ret = i2c_hid_command(client, &hid_descr_cmd,
++				      ihid->hdesc_buffer,
++				      sizeof(struct i2c_hid_desc));
++		if (ret) {
++			dev_err(&client->dev, "hid_descr_cmd failed\n");
++			return -ENODEV;
++		}
++	}
++
++	/* Validate the length of HID descriptor, the 4 first bytes:
++	 * bytes 0-1 -> length
++	 * bytes 2-3 -> bcdVersion (has to be 1.00) */
++	/* check bcdVersion == 1.0 */
++	if (le16_to_cpu(hdesc->bcdVersion) != 0x0100) {
++		dev_err(&client->dev,
++			"unexpected HID descriptor bcdVersion (0x%04hx)\n",
++			le16_to_cpu(hdesc->bcdVersion));
++		return -ENODEV;
++	}
++
++	/* Descriptor length should be 30 bytes as per the specification */
++	dsize = le16_to_cpu(hdesc->wHIDDescLength);
++	if (dsize != sizeof(struct i2c_hid_desc)) {
++		dev_err(&client->dev, "weird size of HID descriptor (%u)\n",
++			dsize);
++		return -ENODEV;
++	}
++	i2c_hid_dbg(ihid, "HID Descriptor: %*ph\n", dsize, ihid->hdesc_buffer);
++	return 0;
++}
++
++#ifdef CONFIG_ACPI
++static const struct acpi_device_id i2c_hid_acpi_blacklist[] = {
++	/*
++	 * The CHPN0001 ACPI device, which is used to describe the Chipone
++	 * ICN8505 controller, has a _CID of PNP0C50 but is not HID compatible.
++	 */
++	{"CHPN0001", 0 },
++	{ },
++};
++
++static int i2c_hid_acpi_pdata(struct i2c_client *client,
++		struct i2c_hid_platform_data *pdata)
++{
++	static guid_t i2c_hid_guid =
++		GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
++			  0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
++	union acpi_object *obj;
++	struct acpi_device *adev;
++	acpi_handle handle;
++
++	handle = ACPI_HANDLE(&client->dev);
++	if (!handle || acpi_bus_get_device(handle, &adev)) {
++		dev_err(&client->dev, "Error could not get ACPI device\n");
++		return -ENODEV;
++	}
++
++	if (acpi_match_device_ids(adev, i2c_hid_acpi_blacklist) == 0)
++		return -ENODEV;
++
++	obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL,
++				      ACPI_TYPE_INTEGER);
++	if (!obj) {
++		dev_err(&client->dev, "Error _DSM call to get HID descriptor address failed\n");
++		return -ENODEV;
++	}
++
++	pdata->hid_descriptor_address = obj->integer.value;
++	ACPI_FREE(obj);
++
++	return 0;
++}
++
++static void i2c_hid_acpi_fix_up_power(struct device *dev)
++{
++	struct acpi_device *adev;
++
++	adev = ACPI_COMPANION(dev);
++	if (adev)
++		acpi_device_fix_up_power(adev);
++}
++
++static const struct acpi_device_id i2c_hid_acpi_match[] = {
++	{"ACPI0C50", 0 },
++	{"PNP0C50", 0 },
++	{ },
++};
++MODULE_DEVICE_TABLE(acpi, i2c_hid_acpi_match);
++#else
++static inline int i2c_hid_acpi_pdata(struct i2c_client *client,
++		struct i2c_hid_platform_data *pdata)
++{
++	return -ENODEV;
++}
++
++static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {}
++#endif
++
++#ifdef CONFIG_OF
++static int i2c_hid_of_probe(struct i2c_client *client,
++		struct i2c_hid_platform_data *pdata)
++{
++	struct device *dev = &client->dev;
++	u32 val;
++	int ret;
++
++	ret = of_property_read_u32(dev->of_node, "hid-descr-addr", &val);
++	if (ret) {
++		dev_err(&client->dev, "HID register address not provided\n");
++		return -ENODEV;
++	}
++	if (val >> 16) {
++		dev_err(&client->dev, "Bad HID register address: 0x%08x\n",
++			val);
++		return -EINVAL;
++	}
++	pdata->hid_descriptor_address = val;
++
++	return 0;
++}
++
++static const struct of_device_id i2c_hid_of_match[] = {
++	{ .compatible = "hid-over-i2c" },
++	{},
++};
++MODULE_DEVICE_TABLE(of, i2c_hid_of_match);
++#else
++static inline int i2c_hid_of_probe(struct i2c_client *client,
++		struct i2c_hid_platform_data *pdata)
++{
++	return -ENODEV;
++}
++#endif
++
++static void i2c_hid_fwnode_probe(struct i2c_client *client,
++				 struct i2c_hid_platform_data *pdata)
++{
++	u32 val;
++
++	if (!device_property_read_u32(&client->dev, "post-power-on-delay-ms",
++				      &val))
++		pdata->post_power_delay_ms = val;
++}
++
++static int i2c_hid_probe(struct i2c_client *client,
++			 const struct i2c_device_id *dev_id)
++{
++	int ret;
++	struct i2c_hid *ihid;
++	struct hid_device *hid;
++	__u16 hidRegister;
++	struct i2c_hid_platform_data *platform_data = client->dev.platform_data;
++
++	dbg_hid("HID probe called for i2c 0x%02x\n", client->addr);
++
++	if (!client->irq) {
++		dev_err(&client->dev,
++			"HID over i2c has not been provided an Int IRQ\n");
++		return -EINVAL;
++	}
++
++	if (client->irq < 0) {
++		if (client->irq != -EPROBE_DEFER)
++			dev_err(&client->dev,
++				"HID over i2c doesn't have a valid IRQ\n");
++		return client->irq;
++	}
++
++	ihid = devm_kzalloc(&client->dev, sizeof(*ihid), GFP_KERNEL);
++	if (!ihid)
++		return -ENOMEM;
++
++	if (client->dev.of_node) {
++		ret = i2c_hid_of_probe(client, &ihid->pdata);
++		if (ret)
++			return ret;
++	} else if (!platform_data) {
++		ret = i2c_hid_acpi_pdata(client, &ihid->pdata);
++		if (ret)
++			return ret;
++	} else {
++		ihid->pdata = *platform_data;
++	}
++
++	/* Parse platform agnostic common properties from ACPI / device tree */
++	i2c_hid_fwnode_probe(client, &ihid->pdata);
++
++	ihid->pdata.supplies[0].supply = "vdd";
++	ihid->pdata.supplies[1].supply = "vddl";
++
++	ret = devm_regulator_bulk_get(&client->dev,
++				      ARRAY_SIZE(ihid->pdata.supplies),
++				      ihid->pdata.supplies);
++	if (ret)
++		return ret;
++
++	ret = regulator_bulk_enable(ARRAY_SIZE(ihid->pdata.supplies),
++				    ihid->pdata.supplies);
++	if (ret < 0)
++		return ret;
++
++	if (ihid->pdata.post_power_delay_ms)
++		msleep(ihid->pdata.post_power_delay_ms);
++
++	i2c_set_clientdata(client, ihid);
++
++	ihid->client = client;
++
++	hidRegister = ihid->pdata.hid_descriptor_address;
++	ihid->wHIDDescRegister = cpu_to_le16(hidRegister);
++
++	init_waitqueue_head(&ihid->wait);
++	mutex_init(&ihid->reset_lock);
++
++	/* we need to allocate the command buffer without knowing the maximum
++	 * size of the reports. Let's use HID_MIN_BUFFER_SIZE, then we do the
++	 * real computation later. */
++	ret = i2c_hid_alloc_buffers(ihid, HID_MIN_BUFFER_SIZE);
++	if (ret < 0)
++		goto err_regulator;
++
++	i2c_hid_acpi_fix_up_power(&client->dev);
++
++	pm_runtime_get_noresume(&client->dev);
++	pm_runtime_set_active(&client->dev);
++	pm_runtime_enable(&client->dev);
++	device_enable_async_suspend(&client->dev);
++
++	/* Make sure there is something at this address */
++	ret = i2c_smbus_read_byte(client);
++	if (ret < 0) {
++		dev_dbg(&client->dev, "nothing at this address: %d\n", ret);
++		ret = -ENXIO;
++		goto err_pm;
++	}
++
++	ret = i2c_hid_fetch_hid_descriptor(ihid);
++	if (ret < 0)
++		goto err_pm;
++
++	ret = i2c_hid_init_irq(client);
++	if (ret < 0)
++		goto err_pm;
++
++	hid = hid_allocate_device();
++	if (IS_ERR(hid)) {
++		ret = PTR_ERR(hid);
++		goto err_irq;
++	}
++
++	ihid->hid = hid;
++
++	hid->driver_data = client;
++	hid->ll_driver = &i2c_hid_ll_driver;
++	hid->dev.parent = &client->dev;
++	hid->bus = BUS_I2C;
++	hid->version = le16_to_cpu(ihid->hdesc.bcdVersion);
++	hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
++	hid->product = le16_to_cpu(ihid->hdesc.wProductID);
++
++	snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX",
++		 client->name, hid->vendor, hid->product);
++	strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
++
++	ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
++
++	ret = hid_add_device(hid);
++	if (ret) {
++		if (ret != -ENODEV)
++			hid_err(client, "can't add hid device: %d\n", ret);
++		goto err_mem_free;
++	}
++
++	if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
++		pm_runtime_put(&client->dev);
++
++	return 0;
++
++err_mem_free:
++	hid_destroy_device(hid);
++
++err_irq:
++	free_irq(client->irq, ihid);
++
++err_pm:
++	pm_runtime_put_noidle(&client->dev);
++	pm_runtime_disable(&client->dev);
++
++err_regulator:
++	regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
++			       ihid->pdata.supplies);
++	i2c_hid_free_buffers(ihid);
++	return ret;
++}
++
++static int i2c_hid_remove(struct i2c_client *client)
++{
++	struct i2c_hid *ihid = i2c_get_clientdata(client);
++	struct hid_device *hid;
++
++	if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
++		pm_runtime_get_sync(&client->dev);
++	pm_runtime_disable(&client->dev);
++	pm_runtime_set_suspended(&client->dev);
++	pm_runtime_put_noidle(&client->dev);
++
++	hid = ihid->hid;
++	hid_destroy_device(hid);
++
++	free_irq(client->irq, ihid);
++
++	if (ihid->bufsize)
++		i2c_hid_free_buffers(ihid);
++
++	regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
++			       ihid->pdata.supplies);
++
++	return 0;
++}
++
++static void i2c_hid_shutdown(struct i2c_client *client)
++{
++	struct i2c_hid *ihid = i2c_get_clientdata(client);
++
++	i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
++	free_irq(client->irq, ihid);
++}
++
++#ifdef CONFIG_PM_SLEEP
++static int i2c_hid_suspend(struct device *dev)
++{
++	struct i2c_client *client = to_i2c_client(dev);
++	struct i2c_hid *ihid = i2c_get_clientdata(client);
++	struct hid_device *hid = ihid->hid;
++	int ret;
++	int wake_status;
++
++	if (hid->driver && hid->driver->suspend) {
++		/*
++		 * Wake up the device so that IO issues in
++		 * HID driver's suspend code can succeed.
++		 */
++		ret = pm_runtime_resume(dev);
++		if (ret < 0)
++			return ret;
++
++		ret = hid->driver->suspend(hid, PMSG_SUSPEND);
++		if (ret < 0)
++			return ret;
++	}
++
++	if (!pm_runtime_suspended(dev)) {
++		/* Save some power */
++		i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
++
++		disable_irq(client->irq);
++	}
++
++	if (device_may_wakeup(&client->dev)) {
++		wake_status = enable_irq_wake(client->irq);
++		if (!wake_status)
++			ihid->irq_wake_enabled = true;
++		else
++			hid_warn(hid, "Failed to enable irq wake: %d\n",
++				wake_status);
++	} else {
++		regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
++				       ihid->pdata.supplies);
++	}
++
++	return 0;
++}
++
++static int i2c_hid_resume(struct device *dev)
++{
++	int ret;
++	struct i2c_client *client = to_i2c_client(dev);
++	struct i2c_hid *ihid = i2c_get_clientdata(client);
++	struct hid_device *hid = ihid->hid;
++	int wake_status;
++
++	if (!device_may_wakeup(&client->dev)) {
++		ret = regulator_bulk_enable(ARRAY_SIZE(ihid->pdata.supplies),
++					    ihid->pdata.supplies);
++		if (ret)
++			hid_warn(hid, "Failed to enable supplies: %d\n", ret);
++
++		if (ihid->pdata.post_power_delay_ms)
++			msleep(ihid->pdata.post_power_delay_ms);
++	} else if (ihid->irq_wake_enabled) {
++		wake_status = disable_irq_wake(client->irq);
++		if (!wake_status)
++			ihid->irq_wake_enabled = false;
++		else
++			hid_warn(hid, "Failed to disable irq wake: %d\n",
++				wake_status);
++	}
++
++	/* We'll resume to full power */
++	pm_runtime_disable(dev);
++	pm_runtime_set_active(dev);
++	pm_runtime_enable(dev);
++
++	enable_irq(client->irq);
++
++	/* Instead of resetting device, simply powers the device on. This
++	 * solves "incomplete reports" on Raydium devices 2386:3118 and
++	 * 2386:4B33 and fixes various SIS touchscreens no longer sending
++	 * data after a suspend/resume.
++	 */
++	ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
++	if (ret)
++		return ret;
++
++	if (hid->driver && hid->driver->reset_resume) {
++		ret = hid->driver->reset_resume(hid);
++		return ret;
++	}
++
++	return 0;
++}
++#endif
++
++#ifdef CONFIG_PM
++static int i2c_hid_runtime_suspend(struct device *dev)
++{
++	struct i2c_client *client = to_i2c_client(dev);
++
++	i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
++	disable_irq(client->irq);
++	return 0;
++}
++
++static int i2c_hid_runtime_resume(struct device *dev)
++{
++	struct i2c_client *client = to_i2c_client(dev);
++
++	enable_irq(client->irq);
++	i2c_hid_set_power(client, I2C_HID_PWR_ON);
++	return 0;
++}
++#endif
++
++static const struct dev_pm_ops i2c_hid_pm = {
++	SET_SYSTEM_SLEEP_PM_OPS(i2c_hid_suspend, i2c_hid_resume)
++	SET_RUNTIME_PM_OPS(i2c_hid_runtime_suspend, i2c_hid_runtime_resume,
++			   NULL)
++};
++
++static const struct i2c_device_id i2c_hid_id_table[] = {
++	{ "hid", 0 },
++	{ "hid-over-i2c", 0 },
++	{ },
++};
++MODULE_DEVICE_TABLE(i2c, i2c_hid_id_table);
++
++
++static struct i2c_driver i2c_hid_driver = {
++	.driver = {
++		.name	= "i2c_hid",
++		.pm	= &i2c_hid_pm,
++		.acpi_match_table = ACPI_PTR(i2c_hid_acpi_match),
++		.of_match_table = of_match_ptr(i2c_hid_of_match),
++	},
++
++	.probe		= i2c_hid_probe,
++	.remove		= i2c_hid_remove,
++	.shutdown	= i2c_hid_shutdown,
++	.id_table	= i2c_hid_id_table,
++};
++
++module_i2c_driver(i2c_hid_driver);
++
++MODULE_DESCRIPTION("HID over I2C core driver");
++MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+new file mode 100644
+index 000000000000..1d645c9ab417
+--- /dev/null
++++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+@@ -0,0 +1,376 @@
++// SPDX-License-Identifier: GPL-2.0+
++
++/*
++ * Quirks for I2C-HID devices that do not supply proper descriptors
++ *
++ * Copyright (c) 2018 Julian Sax <jsbc@gmx.de>
++ *
++ */
++
++#include <linux/types.h>
++#include <linux/dmi.h>
++#include <linux/mod_devicetable.h>
++
++#include "i2c-hid.h"
++
++
++struct i2c_hid_desc_override {
++	union {
++		struct i2c_hid_desc *i2c_hid_desc;
++		uint8_t             *i2c_hid_desc_buffer;
++	};
++	uint8_t              *hid_report_desc;
++	unsigned int          hid_report_desc_size;
++	uint8_t              *i2c_name;
++};
++
++
++/*
++ * descriptors for the SIPODEV SP1064 touchpad
++ *
++ * This device does not supply any descriptors and on windows a filter
++ * driver operates between the i2c-hid layer and the device and injects
++ * these descriptors when the device is prompted. The descriptors were
++ * extracted by listening to the i2c-hid traffic that occurs between the
++ * windows filter driver and the windows i2c-hid driver.
++ */
++
++static const struct i2c_hid_desc_override sipodev_desc = {
++	.i2c_hid_desc_buffer = (uint8_t [])
++	{0x1e, 0x00,                  /* Length of descriptor                 */
++	 0x00, 0x01,                  /* Version of descriptor                */
++	 0xdb, 0x01,                  /* Length of report descriptor          */
++	 0x21, 0x00,                  /* Location of report descriptor        */
++	 0x24, 0x00,                  /* Location of input report             */
++	 0x1b, 0x00,                  /* Max input report length              */
++	 0x25, 0x00,                  /* Location of output report            */
++	 0x11, 0x00,                  /* Max output report length             */
++	 0x22, 0x00,                  /* Location of command register         */
++	 0x23, 0x00,                  /* Location of data register            */
++	 0x11, 0x09,                  /* Vendor ID                            */
++	 0x88, 0x52,                  /* Product ID                           */
++	 0x06, 0x00,                  /* Version ID                           */
++	 0x00, 0x00, 0x00, 0x00       /* Reserved                             */
++	},
++
++	.hid_report_desc = (uint8_t [])
++	{0x05, 0x01,                  /* Usage Page (Desktop),                */
++	 0x09, 0x02,                  /* Usage (Mouse),                       */
++	 0xA1, 0x01,                  /* Collection (Application),            */
++	 0x85, 0x01,                  /*     Report ID (1),                   */
++	 0x09, 0x01,                  /*     Usage (Pointer),                 */
++	 0xA1, 0x00,                  /*     Collection (Physical),           */
++	 0x05, 0x09,                  /*         Usage Page (Button),         */
++	 0x19, 0x01,                  /*         Usage Minimum (01h),         */
++	 0x29, 0x02,                  /*         Usage Maximum (02h),         */
++	 0x25, 0x01,                  /*         Logical Maximum (1),         */
++	 0x75, 0x01,                  /*         Report Size (1),             */
++	 0x95, 0x02,                  /*         Report Count (2),            */
++	 0x81, 0x02,                  /*         Input (Variable),            */
++	 0x95, 0x06,                  /*         Report Count (6),            */
++	 0x81, 0x01,                  /*         Input (Constant),            */
++	 0x05, 0x01,                  /*         Usage Page (Desktop),        */
++	 0x09, 0x30,                  /*         Usage (X),                   */
++	 0x09, 0x31,                  /*         Usage (Y),                   */
++	 0x15, 0x81,                  /*         Logical Minimum (-127),      */
++	 0x25, 0x7F,                  /*         Logical Maximum (127),       */
++	 0x75, 0x08,                  /*         Report Size (8),             */
++	 0x95, 0x02,                  /*         Report Count (2),            */
++	 0x81, 0x06,                  /*         Input (Variable, Relative),  */
++	 0xC0,                        /*     End Collection,                  */
++	 0xC0,                        /* End Collection,                      */
++	 0x05, 0x0D,                  /* Usage Page (Digitizer),              */
++	 0x09, 0x05,                  /* Usage (Touchpad),                    */
++	 0xA1, 0x01,                  /* Collection (Application),            */
++	 0x85, 0x04,                  /*     Report ID (4),                   */
++	 0x05, 0x0D,                  /*     Usage Page (Digitizer),          */
++	 0x09, 0x22,                  /*     Usage (Finger),                  */
++	 0xA1, 0x02,                  /*     Collection (Logical),            */
++	 0x15, 0x00,                  /*         Logical Minimum (0),         */
++	 0x25, 0x01,                  /*         Logical Maximum (1),         */
++	 0x09, 0x47,                  /*         Usage (Touch Valid),         */
++	 0x09, 0x42,                  /*         Usage (Tip Switch),          */
++	 0x95, 0x02,                  /*         Report Count (2),            */
++	 0x75, 0x01,                  /*         Report Size (1),             */
++	 0x81, 0x02,                  /*         Input (Variable),            */
++	 0x95, 0x01,                  /*         Report Count (1),            */
++	 0x75, 0x03,                  /*         Report Size (3),             */
++	 0x25, 0x05,                  /*         Logical Maximum (5),         */
++	 0x09, 0x51,                  /*         Usage (Contact Identifier),  */
++	 0x81, 0x02,                  /*         Input (Variable),            */
++	 0x75, 0x01,                  /*         Report Size (1),             */
++	 0x95, 0x03,                  /*         Report Count (3),            */
++	 0x81, 0x03,                  /*         Input (Constant, Variable),  */
++	 0x05, 0x01,                  /*         Usage Page (Desktop),        */
++	 0x26, 0x44, 0x0A,            /*         Logical Maximum (2628),      */
++	 0x75, 0x10,                  /*         Report Size (16),            */
++	 0x55, 0x0E,                  /*         Unit Exponent (14),          */
++	 0x65, 0x11,                  /*         Unit (Centimeter),           */
++	 0x09, 0x30,                  /*         Usage (X),                   */
++	 0x46, 0x1A, 0x04,            /*         Physical Maximum (1050),     */
++	 0x95, 0x01,                  /*         Report Count (1),            */
++	 0x81, 0x02,                  /*         Input (Variable),            */
++	 0x46, 0xBC, 0x02,            /*         Physical Maximum (700),      */
++	 0x26, 0x34, 0x05,            /*         Logical Maximum (1332),      */
++	 0x09, 0x31,                  /*         Usage (Y),                   */
++	 0x81, 0x02,                  /*         Input (Variable),            */
++	 0xC0,                        /*     End Collection,                  */
++	 0x05, 0x0D,                  /*     Usage Page (Digitizer),          */
++	 0x09, 0x22,                  /*     Usage (Finger),                  */
++	 0xA1, 0x02,                  /*     Collection (Logical),            */
++	 0x25, 0x01,                  /*         Logical Maximum (1),         */
++	 0x09, 0x47,                  /*         Usage (Touch Valid),         */
++	 0x09, 0x42,                  /*         Usage (Tip Switch),          */
++	 0x95, 0x02,                  /*         Report Count (2),            */
++	 0x75, 0x01,                  /*         Report Size (1),             */
++	 0x81, 0x02,                  /*         Input (Variable),            */
++	 0x95, 0x01,                  /*         Report Count (1),            */
++	 0x75, 0x03,                  /*         Report Size (3),             */
++	 0x25, 0x05,                  /*         Logical Maximum (5),         */
++	 0x09, 0x51,                  /*         Usage (Contact Identifier),  */
++	 0x81, 0x02,                  /*         Input (Variable),            */
++	 0x75, 0x01,                  /*         Report Size (1),             */
++	 0x95, 0x03,                  /*         Report Count (3),            */
++	 0x81, 0x03,                  /*         Input (Constant, Variable),  */
++	 0x05, 0x01,                  /*         Usage Page (Desktop),        */
++	 0x26, 0x44, 0x0A,            /*         Logical Maximum (2628),      */
++	 0x75, 0x10,                  /*         Report Size (16),            */
++	 0x09, 0x30,                  /*         Usage (X),                   */
++	 0x46, 0x1A, 0x04,            /*         Physical Maximum (1050),     */
++	 0x95, 0x01,                  /*         Report Count (1),            */
++	 0x81, 0x02,                  /*         Input (Variable),            */
++	 0x46, 0xBC, 0x02,            /*         Physical Maximum (700),      */
++	 0x26, 0x34, 0x05,            /*         Logical Maximum (1332),      */
++	 0x09, 0x31,                  /*         Usage (Y),                   */
++	 0x81, 0x02,                  /*         Input (Variable),            */
++	 0xC0,                        /*     End Collection,                  */
++	 0x05, 0x0D,                  /*     Usage Page (Digitizer),          */
++	 0x09, 0x22,                  /*     Usage (Finger),                  */
++	 0xA1, 0x02,                  /*     Collection (Logical),            */
++	 0x25, 0x01,                  /*         Logical Maximum (1),         */
++	 0x09, 0x47,                  /*         Usage (Touch Valid),         */
++	 0x09, 0x42,                  /*         Usage (Tip Switch),          */
++	 0x95, 0x02,                  /*         Report Count (2),            */
++	 0x75, 0x01,                  /*         Report Size (1),             */
++	 0x81, 0x02,                  /*         Input (Variable),            */
++	 0x95, 0x01,                  /*         Report Count (1),            */
++	 0x75, 0x03,                  /*         Report Size (3),             */
++	 0x25, 0x05,                  /*         Logical Maximum (5),         */
++	 0x09, 0x51,                  /*         Usage (Contact Identifier),  */
++	 0x81, 0x02,                  /*         Input (Variable),            */
++	 0x75, 0x01,                  /*         Report Size (1),             */
++	 0x95, 0x03,                  /*         Report Count (3),            */
++	 0x81, 0x03,                  /*         Input (Constant, Variable),  */
++	 0x05, 0x01,                  /*         Usage Page (Desktop),        */
++	 0x26, 0x44, 0x0A,            /*         Logical Maximum (2628),      */
++	 0x75, 0x10,                  /*         Report Size (16),            */
++	 0x09, 0x30,                  /*         Usage (X),                   */
++	 0x46, 0x1A, 0x04,            /*         Physical Maximum (1050),     */
++	 0x95, 0x01,                  /*         Report Count (1),            */
++	 0x81, 0x02,                  /*         Input (Variable),            */
++	 0x46, 0xBC, 0x02,            /*         Physical Maximum (700),      */
++	 0x26, 0x34, 0x05,            /*         Logical Maximum (1332),      */
++	 0x09, 0x31,                  /*         Usage (Y),                   */
++	 0x81, 0x02,                  /*         Input (Variable),            */
++	 0xC0,                        /*     End Collection,                  */
++	 0x05, 0x0D,                  /*     Usage Page (Digitizer),          */
++	 0x09, 0x22,                  /*     Usage (Finger),                  */
++	 0xA1, 0x02,                  /*     Collection (Logical),            */
++	 0x25, 0x01,                  /*         Logical Maximum (1),         */
++	 0x09, 0x47,                  /*         Usage (Touch Valid),         */
++	 0x09, 0x42,                  /*         Usage (Tip Switch),          */
++	 0x95, 0x02,                  /*         Report Count (2),            */
++	 0x75, 0x01,                  /*         Report Size (1),             */
++	 0x81, 0x02,                  /*         Input (Variable),            */
++	 0x95, 0x01,                  /*         Report Count (1),            */
++	 0x75, 0x03,                  /*         Report Size (3),             */
++	 0x25, 0x05,                  /*         Logical Maximum (5),         */
++	 0x09, 0x51,                  /*         Usage (Contact Identifier),  */
++	 0x81, 0x02,                  /*         Input (Variable),            */
++	 0x75, 0x01,                  /*         Report Size (1),             */
++	 0x95, 0x03,                  /*         Report Count (3),            */
++	 0x81, 0x03,                  /*         Input (Constant, Variable),  */
++	 0x05, 0x01,                  /*         Usage Page (Desktop),        */
++	 0x26, 0x44, 0x0A,            /*         Logical Maximum (2628),      */
++	 0x75, 0x10,                  /*         Report Size (16),            */
++	 0x09, 0x30,                  /*         Usage (X),                   */
++	 0x46, 0x1A, 0x04,            /*         Physical Maximum (1050),     */
++	 0x95, 0x01,                  /*         Report Count (1),            */
++	 0x81, 0x02,                  /*         Input (Variable),            */
++	 0x46, 0xBC, 0x02,            /*         Physical Maximum (700),      */
++	 0x26, 0x34, 0x05,            /*         Logical Maximum (1332),      */
++	 0x09, 0x31,                  /*         Usage (Y),                   */
++	 0x81, 0x02,                  /*         Input (Variable),            */
++	 0xC0,                        /*     End Collection,                  */
++	 0x05, 0x0D,                  /*     Usage Page (Digitizer),          */
++	 0x55, 0x0C,                  /*     Unit Exponent (12),              */
++	 0x66, 0x01, 0x10,            /*     Unit (Seconds),                  */
++	 0x47, 0xFF, 0xFF, 0x00, 0x00,/*     Physical Maximum (65535),        */
++	 0x27, 0xFF, 0xFF, 0x00, 0x00,/*     Logical Maximum (65535),         */
++	 0x75, 0x10,                  /*     Report Size (16),                */
++	 0x95, 0x01,                  /*     Report Count (1),                */
++	 0x09, 0x56,                  /*     Usage (Scan Time),               */
++	 0x81, 0x02,                  /*     Input (Variable),                */
++	 0x09, 0x54,                  /*     Usage (Contact Count),           */
++	 0x25, 0x7F,                  /*     Logical Maximum (127),           */
++	 0x75, 0x08,                  /*     Report Size (8),                 */
++	 0x81, 0x02,                  /*     Input (Variable),                */
++	 0x05, 0x09,                  /*     Usage Page (Button),             */
++	 0x09, 0x01,                  /*     Usage (01h),                     */
++	 0x25, 0x01,                  /*     Logical Maximum (1),             */
++	 0x75, 0x01,                  /*     Report Size (1),                 */
++	 0x95, 0x01,                  /*     Report Count (1),                */
++	 0x81, 0x02,                  /*     Input (Variable),                */
++	 0x95, 0x07,                  /*     Report Count (7),                */
++	 0x81, 0x03,                  /*     Input (Constant, Variable),      */
++	 0x05, 0x0D,                  /*     Usage Page (Digitizer),          */
++	 0x85, 0x02,                  /*     Report ID (2),                   */
++	 0x09, 0x55,                  /*     Usage (Contact Count Maximum),   */
++	 0x09, 0x59,                  /*     Usage (59h),                     */
++	 0x75, 0x04,                  /*     Report Size (4),                 */
++	 0x95, 0x02,                  /*     Report Count (2),                */
++	 0x25, 0x0F,                  /*     Logical Maximum (15),            */
++	 0xB1, 0x02,                  /*     Feature (Variable),              */
++	 0x05, 0x0D,                  /*     Usage Page (Digitizer),          */
++	 0x85, 0x07,                  /*     Report ID (7),                   */
++	 0x09, 0x60,                  /*     Usage (60h),                     */
++	 0x75, 0x01,                  /*     Report Size (1),                 */
++	 0x95, 0x01,                  /*     Report Count (1),                */
++	 0x25, 0x01,                  /*     Logical Maximum (1),             */
++	 0xB1, 0x02,                  /*     Feature (Variable),              */
++	 0x95, 0x07,                  /*     Report Count (7),                */
++	 0xB1, 0x03,                  /*     Feature (Constant, Variable),    */
++	 0x85, 0x06,                  /*     Report ID (6),                   */
++	 0x06, 0x00, 0xFF,            /*     Usage Page (FF00h),              */
++	 0x09, 0xC5,                  /*     Usage (C5h),                     */
++	 0x26, 0xFF, 0x00,            /*     Logical Maximum (255),           */
++	 0x75, 0x08,                  /*     Report Size (8),                 */
++	 0x96, 0x00, 0x01,            /*     Report Count (256),              */
++	 0xB1, 0x02,                  /*     Feature (Variable),              */
++	 0xC0,                        /* End Collection,                      */
++	 0x06, 0x00, 0xFF,            /* Usage Page (FF00h),                  */
++	 0x09, 0x01,                  /* Usage (01h),                         */
++	 0xA1, 0x01,                  /* Collection (Application),            */
++	 0x85, 0x0D,                  /*     Report ID (13),                  */
++	 0x26, 0xFF, 0x00,            /*     Logical Maximum (255),           */
++	 0x19, 0x01,                  /*     Usage Minimum (01h),             */
++	 0x29, 0x02,                  /*     Usage Maximum (02h),             */
++	 0x75, 0x08,                  /*     Report Size (8),                 */
++	 0x95, 0x02,                  /*     Report Count (2),                */
++	 0xB1, 0x02,                  /*     Feature (Variable),              */
++	 0xC0,                        /* End Collection,                      */
++	 0x05, 0x0D,                  /* Usage Page (Digitizer),              */
++	 0x09, 0x0E,                  /* Usage (Configuration),               */
++	 0xA1, 0x01,                  /* Collection (Application),            */
++	 0x85, 0x03,                  /*     Report ID (3),                   */
++	 0x09, 0x22,                  /*     Usage (Finger),                  */
++	 0xA1, 0x02,                  /*     Collection (Logical),            */
++	 0x09, 0x52,                  /*         Usage (Device Mode),         */
++	 0x25, 0x0A,                  /*         Logical Maximum (10),        */
++	 0x95, 0x01,                  /*         Report Count (1),            */
++	 0xB1, 0x02,                  /*         Feature (Variable),          */
++	 0xC0,                        /*     End Collection,                  */
++	 0x09, 0x22,                  /*     Usage (Finger),                  */
++	 0xA1, 0x00,                  /*     Collection (Physical),           */
++	 0x85, 0x05,                  /*         Report ID (5),               */
++	 0x09, 0x57,                  /*         Usage (57h),                 */
++	 0x09, 0x58,                  /*         Usage (58h),                 */
++	 0x75, 0x01,                  /*         Report Size (1),             */
++	 0x95, 0x02,                  /*         Report Count (2),            */
++	 0x25, 0x01,                  /*         Logical Maximum (1),         */
++	 0xB1, 0x02,                  /*         Feature (Variable),          */
++	 0x95, 0x06,                  /*         Report Count (6),            */
++	 0xB1, 0x03,                  /*         Feature (Constant, Variable),*/
++	 0xC0,                        /*     End Collection,                  */
++	 0xC0                         /* End Collection                       */
++	},
++	.hid_report_desc_size = 475,
++	.i2c_name = "SYNA3602:00"
++};
++
++
++static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
++	{
++		.ident = "Teclast F6 Pro",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TECLAST"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "F6 Pro"),
++		},
++		.driver_data = (void *)&sipodev_desc
++	},
++	{
++		.ident = "Teclast F7",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TECLAST"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "F7"),
++		},
++		.driver_data = (void *)&sipodev_desc
++	},
++	{
++		.ident = "Trekstor Primebook C13",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C13"),
++		},
++		.driver_data = (void *)&sipodev_desc
++	},
++	{
++		.ident = "Trekstor Primebook C11",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C11"),
++		},
++		.driver_data = (void *)&sipodev_desc
++	},
++	{
++		.ident = "Direkt-Tek DTLAPY116-2",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "DTLAPY116-2"),
++		},
++		.driver_data = (void *)&sipodev_desc
++	},
++	{
++		.ident = "Mediacom Flexbook Edge 11",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook edge11 - M-FBE11"),
++		},
++		.driver_data = (void *)&sipodev_desc
++	}
++};
++
++
++struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name)
++{
++	struct i2c_hid_desc_override *override;
++	const struct dmi_system_id *system_id;
++
++	system_id = dmi_first_match(i2c_hid_dmi_desc_override_table);
++	if (!system_id)
++		return NULL;
++
++	override = system_id->driver_data;
++	if (strcmp(override->i2c_name, i2c_name))
++		return NULL;
++
++	return override->i2c_hid_desc;
++}
++
++char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
++					       unsigned int *size)
++{
++	struct i2c_hid_desc_override *override;
++	const struct dmi_system_id *system_id;
++
++	system_id = dmi_first_match(i2c_hid_dmi_desc_override_table);
++	if (!system_id)
++		return NULL;
++
++	override = system_id->driver_data;
++	if (strcmp(override->i2c_name, i2c_name))
++		return NULL;
++
++	*size = override->hid_report_desc_size;
++	return override->hid_report_desc;
++}
+diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
+deleted file mode 100644
+index 88daa388e1f6..000000000000
+--- a/drivers/hid/i2c-hid/i2c-hid.c
++++ /dev/null
+@@ -1,1328 +0,0 @@
+-/*
+- * HID over I2C protocol implementation
+- *
+- * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
+- * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France
+- * Copyright (c) 2012 Red Hat, Inc
+- *
+- * This code is partly based on "USB HID support for Linux":
+- *
+- *  Copyright (c) 1999 Andreas Gal
+- *  Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
+- *  Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
+- *  Copyright (c) 2007-2008 Oliver Neukum
+- *  Copyright (c) 2006-2010 Jiri Kosina
+- *
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License.  See the file COPYING in the main directory of this archive for
+- * more details.
+- */
+-
+-#include <linux/module.h>
+-#include <linux/i2c.h>
+-#include <linux/interrupt.h>
+-#include <linux/input.h>
+-#include <linux/irq.h>
+-#include <linux/delay.h>
+-#include <linux/slab.h>
+-#include <linux/pm.h>
+-#include <linux/pm_runtime.h>
+-#include <linux/device.h>
+-#include <linux/wait.h>
+-#include <linux/err.h>
+-#include <linux/string.h>
+-#include <linux/list.h>
+-#include <linux/jiffies.h>
+-#include <linux/kernel.h>
+-#include <linux/hid.h>
+-#include <linux/mutex.h>
+-#include <linux/acpi.h>
+-#include <linux/of.h>
+-#include <linux/regulator/consumer.h>
+-
+-#include <linux/platform_data/i2c-hid.h>
+-
+-#include "../hid-ids.h"
+-
+-/* quirks to control the device */
+-#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV	BIT(0)
+-#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET	BIT(1)
+-#define I2C_HID_QUIRK_NO_RUNTIME_PM		BIT(2)
+-#define I2C_HID_QUIRK_DELAY_AFTER_SLEEP		BIT(3)
+-
+-/* flags */
+-#define I2C_HID_STARTED		0
+-#define I2C_HID_RESET_PENDING	1
+-#define I2C_HID_READ_PENDING	2
+-
+-#define I2C_HID_PWR_ON		0x00
+-#define I2C_HID_PWR_SLEEP	0x01
+-
+-/* debug option */
+-static bool debug;
+-module_param(debug, bool, 0444);
+-MODULE_PARM_DESC(debug, "print a lot of debug information");
+-
+-#define i2c_hid_dbg(ihid, fmt, arg...)					  \
+-do {									  \
+-	if (debug)							  \
+-		dev_printk(KERN_DEBUG, &(ihid)->client->dev, fmt, ##arg); \
+-} while (0)
+-
+-struct i2c_hid_desc {
+-	__le16 wHIDDescLength;
+-	__le16 bcdVersion;
+-	__le16 wReportDescLength;
+-	__le16 wReportDescRegister;
+-	__le16 wInputRegister;
+-	__le16 wMaxInputLength;
+-	__le16 wOutputRegister;
+-	__le16 wMaxOutputLength;
+-	__le16 wCommandRegister;
+-	__le16 wDataRegister;
+-	__le16 wVendorID;
+-	__le16 wProductID;
+-	__le16 wVersionID;
+-	__le32 reserved;
+-} __packed;
+-
+-struct i2c_hid_cmd {
+-	unsigned int registerIndex;
+-	__u8 opcode;
+-	unsigned int length;
+-	bool wait;
+-};
+-
+-union command {
+-	u8 data[0];
+-	struct cmd {
+-		__le16 reg;
+-		__u8 reportTypeID;
+-		__u8 opcode;
+-	} __packed c;
+-};
+-
+-#define I2C_HID_CMD(opcode_) \
+-	.opcode = opcode_, .length = 4, \
+-	.registerIndex = offsetof(struct i2c_hid_desc, wCommandRegister)
+-
+-/* fetch HID descriptor */
+-static const struct i2c_hid_cmd hid_descr_cmd = { .length = 2 };
+-/* fetch report descriptors */
+-static const struct i2c_hid_cmd hid_report_descr_cmd = {
+-		.registerIndex = offsetof(struct i2c_hid_desc,
+-			wReportDescRegister),
+-		.opcode = 0x00,
+-		.length = 2 };
+-/* commands */
+-static const struct i2c_hid_cmd hid_reset_cmd =		{ I2C_HID_CMD(0x01),
+-							  .wait = true };
+-static const struct i2c_hid_cmd hid_get_report_cmd =	{ I2C_HID_CMD(0x02) };
+-static const struct i2c_hid_cmd hid_set_report_cmd =	{ I2C_HID_CMD(0x03) };
+-static const struct i2c_hid_cmd hid_set_power_cmd =	{ I2C_HID_CMD(0x08) };
+-static const struct i2c_hid_cmd hid_no_cmd =		{ .length = 0 };
+-
+-/*
+- * These definitions are not used here, but are defined by the spec.
+- * Keeping them here for documentation purposes.
+- *
+- * static const struct i2c_hid_cmd hid_get_idle_cmd = { I2C_HID_CMD(0x04) };
+- * static const struct i2c_hid_cmd hid_set_idle_cmd = { I2C_HID_CMD(0x05) };
+- * static const struct i2c_hid_cmd hid_get_protocol_cmd = { I2C_HID_CMD(0x06) };
+- * static const struct i2c_hid_cmd hid_set_protocol_cmd = { I2C_HID_CMD(0x07) };
+- */
+-
+-/* The main device structure */
+-struct i2c_hid {
+-	struct i2c_client	*client;	/* i2c client */
+-	struct hid_device	*hid;	/* pointer to corresponding HID dev */
+-	union {
+-		__u8 hdesc_buffer[sizeof(struct i2c_hid_desc)];
+-		struct i2c_hid_desc hdesc;	/* the HID Descriptor */
+-	};
+-	__le16			wHIDDescRegister; /* location of the i2c
+-						   * register of the HID
+-						   * descriptor. */
+-	unsigned int		bufsize;	/* i2c buffer size */
+-	u8			*inbuf;		/* Input buffer */
+-	u8			*rawbuf;	/* Raw Input buffer */
+-	u8			*cmdbuf;	/* Command buffer */
+-	u8			*argsbuf;	/* Command arguments buffer */
+-
+-	unsigned long		flags;		/* device flags */
+-	unsigned long		quirks;		/* Various quirks */
+-
+-	wait_queue_head_t	wait;		/* For waiting the interrupt */
+-
+-	struct i2c_hid_platform_data pdata;
+-
+-	bool			irq_wake_enabled;
+-	struct mutex		reset_lock;
+-
+-	unsigned long		sleep_delay;
+-};
+-
+-static const struct i2c_hid_quirks {
+-	__u16 idVendor;
+-	__u16 idProduct;
+-	__u32 quirks;
+-} i2c_hid_quirks[] = {
+-	{ USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8752,
+-		I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
+-	{ USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
+-		I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
+-	{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
+-		I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
+-		I2C_HID_QUIRK_NO_RUNTIME_PM },
+-	{ I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_4B33,
+-		I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
+-	{ 0, 0 }
+-};
+-
+-/*
+- * i2c_hid_lookup_quirk: return any quirks associated with a I2C HID device
+- * @idVendor: the 16-bit vendor ID
+- * @idProduct: the 16-bit product ID
+- *
+- * Returns: a u32 quirks value.
+- */
+-static u32 i2c_hid_lookup_quirk(const u16 idVendor, const u16 idProduct)
+-{
+-	u32 quirks = 0;
+-	int n;
+-
+-	for (n = 0; i2c_hid_quirks[n].idVendor; n++)
+-		if (i2c_hid_quirks[n].idVendor == idVendor &&
+-		    (i2c_hid_quirks[n].idProduct == (__u16)HID_ANY_ID ||
+-		     i2c_hid_quirks[n].idProduct == idProduct))
+-			quirks = i2c_hid_quirks[n].quirks;
+-
+-	return quirks;
+-}
+-
+-static int __i2c_hid_command(struct i2c_client *client,
+-		const struct i2c_hid_cmd *command, u8 reportID,
+-		u8 reportType, u8 *args, int args_len,
+-		unsigned char *buf_recv, int data_len)
+-{
+-	struct i2c_hid *ihid = i2c_get_clientdata(client);
+-	union command *cmd = (union command *)ihid->cmdbuf;
+-	int ret;
+-	struct i2c_msg msg[2];
+-	int msg_num = 1;
+-
+-	int length = command->length;
+-	bool wait = command->wait;
+-	unsigned int registerIndex = command->registerIndex;
+-
+-	/* special case for hid_descr_cmd */
+-	if (command == &hid_descr_cmd) {
+-		cmd->c.reg = ihid->wHIDDescRegister;
+-	} else {
+-		cmd->data[0] = ihid->hdesc_buffer[registerIndex];
+-		cmd->data[1] = ihid->hdesc_buffer[registerIndex + 1];
+-	}
+-
+-	if (length > 2) {
+-		cmd->c.opcode = command->opcode;
+-		cmd->c.reportTypeID = reportID | reportType << 4;
+-	}
+-
+-	memcpy(cmd->data + length, args, args_len);
+-	length += args_len;
+-
+-	i2c_hid_dbg(ihid, "%s: cmd=%*ph\n", __func__, length, cmd->data);
+-
+-	msg[0].addr = client->addr;
+-	msg[0].flags = client->flags & I2C_M_TEN;
+-	msg[0].len = length;
+-	msg[0].buf = cmd->data;
+-	if (data_len > 0) {
+-		msg[1].addr = client->addr;
+-		msg[1].flags = client->flags & I2C_M_TEN;
+-		msg[1].flags |= I2C_M_RD;
+-		msg[1].len = data_len;
+-		msg[1].buf = buf_recv;
+-		msg_num = 2;
+-		set_bit(I2C_HID_READ_PENDING, &ihid->flags);
+-	}
+-
+-	if (wait)
+-		set_bit(I2C_HID_RESET_PENDING, &ihid->flags);
+-
+-	ret = i2c_transfer(client->adapter, msg, msg_num);
+-
+-	if (data_len > 0)
+-		clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
+-
+-	if (ret != msg_num)
+-		return ret < 0 ? ret : -EIO;
+-
+-	ret = 0;
+-
+-	if (wait && (ihid->quirks & I2C_HID_QUIRK_NO_IRQ_AFTER_RESET)) {
+-		msleep(100);
+-	} else if (wait) {
+-		i2c_hid_dbg(ihid, "%s: waiting...\n", __func__);
+-		if (!wait_event_timeout(ihid->wait,
+-				!test_bit(I2C_HID_RESET_PENDING, &ihid->flags),
+-				msecs_to_jiffies(5000)))
+-			ret = -ENODATA;
+-		i2c_hid_dbg(ihid, "%s: finished.\n", __func__);
+-	}
+-
+-	return ret;
+-}
+-
+-static int i2c_hid_command(struct i2c_client *client,
+-		const struct i2c_hid_cmd *command,
+-		unsigned char *buf_recv, int data_len)
+-{
+-	return __i2c_hid_command(client, command, 0, 0, NULL, 0,
+-				buf_recv, data_len);
+-}
+-
+-static int i2c_hid_get_report(struct i2c_client *client, u8 reportType,
+-		u8 reportID, unsigned char *buf_recv, int data_len)
+-{
+-	struct i2c_hid *ihid = i2c_get_clientdata(client);
+-	u8 args[3];
+-	int ret;
+-	int args_len = 0;
+-	u16 readRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
+-
+-	i2c_hid_dbg(ihid, "%s\n", __func__);
+-
+-	if (reportID >= 0x0F) {
+-		args[args_len++] = reportID;
+-		reportID = 0x0F;
+-	}
+-
+-	args[args_len++] = readRegister & 0xFF;
+-	args[args_len++] = readRegister >> 8;
+-
+-	ret = __i2c_hid_command(client, &hid_get_report_cmd, reportID,
+-		reportType, args, args_len, buf_recv, data_len);
+-	if (ret) {
+-		dev_err(&client->dev,
+-			"failed to retrieve report from device.\n");
+-		return ret;
+-	}
+-
+-	return 0;
+-}
+-
+-/**
+- * i2c_hid_set_or_send_report: forward an incoming report to the device
+- * @client: the i2c_client of the device
+- * @reportType: 0x03 for HID_FEATURE_REPORT ; 0x02 for HID_OUTPUT_REPORT
+- * @reportID: the report ID
+- * @buf: the actual data to transfer, without the report ID
+- * @len: size of buf
+- * @use_data: true: use SET_REPORT HID command, false: send plain OUTPUT report
+- */
+-static int i2c_hid_set_or_send_report(struct i2c_client *client, u8 reportType,
+-		u8 reportID, unsigned char *buf, size_t data_len, bool use_data)
+-{
+-	struct i2c_hid *ihid = i2c_get_clientdata(client);
+-	u8 *args = ihid->argsbuf;
+-	const struct i2c_hid_cmd *hidcmd;
+-	int ret;
+-	u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
+-	u16 outputRegister = le16_to_cpu(ihid->hdesc.wOutputRegister);
+-	u16 maxOutputLength = le16_to_cpu(ihid->hdesc.wMaxOutputLength);
+-	u16 size;
+-	int args_len;
+-	int index = 0;
+-
+-	i2c_hid_dbg(ihid, "%s\n", __func__);
+-
+-	if (data_len > ihid->bufsize)
+-		return -EINVAL;
+-
+-	size =		2			/* size */ +
+-			(reportID ? 1 : 0)	/* reportID */ +
+-			data_len		/* buf */;
+-	args_len =	(reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
+-			2			/* dataRegister */ +
+-			size			/* args */;
+-
+-	if (!use_data && maxOutputLength == 0)
+-		return -ENOSYS;
+-
+-	if (reportID >= 0x0F) {
+-		args[index++] = reportID;
+-		reportID = 0x0F;
+-	}
+-
+-	/*
+-	 * use the data register for feature reports or if the device does not
+-	 * support the output register
+-	 */
+-	if (use_data) {
+-		args[index++] = dataRegister & 0xFF;
+-		args[index++] = dataRegister >> 8;
+-		hidcmd = &hid_set_report_cmd;
+-	} else {
+-		args[index++] = outputRegister & 0xFF;
+-		args[index++] = outputRegister >> 8;
+-		hidcmd = &hid_no_cmd;
+-	}
+-
+-	args[index++] = size & 0xFF;
+-	args[index++] = size >> 8;
+-
+-	if (reportID)
+-		args[index++] = reportID;
+-
+-	memcpy(&args[index], buf, data_len);
+-
+-	ret = __i2c_hid_command(client, hidcmd, reportID,
+-		reportType, args, args_len, NULL, 0);
+-	if (ret) {
+-		dev_err(&client->dev, "failed to set a report to device.\n");
+-		return ret;
+-	}
+-
+-	return data_len;
+-}
+-
+-static int i2c_hid_set_power(struct i2c_client *client, int power_state)
+-{
+-	struct i2c_hid *ihid = i2c_get_clientdata(client);
+-	int ret;
+-	unsigned long now, delay;
+-
+-	i2c_hid_dbg(ihid, "%s\n", __func__);
+-
+-	/*
+-	 * Some devices require to send a command to wakeup before power on.
+-	 * The call will get a return value (EREMOTEIO) but device will be
+-	 * triggered and activated. After that, it goes like a normal device.
+-	 */
+-	if (power_state == I2C_HID_PWR_ON &&
+-	    ihid->quirks & I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV) {
+-		ret = i2c_hid_command(client, &hid_set_power_cmd, NULL, 0);
+-
+-		/* Device was already activated */
+-		if (!ret)
+-			goto set_pwr_exit;
+-	}
+-
+-	if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
+-	    power_state == I2C_HID_PWR_ON) {
+-		now = jiffies;
+-		if (time_after(ihid->sleep_delay, now)) {
+-			delay = jiffies_to_usecs(ihid->sleep_delay - now);
+-			usleep_range(delay, delay + 1);
+-		}
+-	}
+-
+-	ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
+-		0, NULL, 0, NULL, 0);
+-
+-	if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
+-	    power_state == I2C_HID_PWR_SLEEP)
+-		ihid->sleep_delay = jiffies + msecs_to_jiffies(20);
+-
+-	if (ret)
+-		dev_err(&client->dev, "failed to change power setting.\n");
+-
+-set_pwr_exit:
+-	return ret;
+-}
+-
+-static int i2c_hid_hwreset(struct i2c_client *client)
+-{
+-	struct i2c_hid *ihid = i2c_get_clientdata(client);
+-	int ret;
+-
+-	i2c_hid_dbg(ihid, "%s\n", __func__);
+-
+-	/*
+-	 * This prevents sending feature reports while the device is
+-	 * being reset. Otherwise we may lose the reset complete
+-	 * interrupt.
+-	 */
+-	mutex_lock(&ihid->reset_lock);
+-
+-	ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+-	if (ret)
+-		goto out_unlock;
+-
+-	/*
+-	 * The HID over I2C specification states that if a DEVICE needs time
+-	 * after the PWR_ON request, it should utilise CLOCK stretching.
+-	 * However, it has been observered that the Windows driver provides a
+-	 * 1ms sleep between the PWR_ON and RESET requests and that some devices
+-	 * rely on this.
+-	 */
+-	usleep_range(1000, 5000);
+-
+-	i2c_hid_dbg(ihid, "resetting...\n");
+-
+-	ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
+-	if (ret) {
+-		dev_err(&client->dev, "failed to reset device.\n");
+-		i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+-	}
+-
+-out_unlock:
+-	mutex_unlock(&ihid->reset_lock);
+-	return ret;
+-}
+-
+-static void i2c_hid_get_input(struct i2c_hid *ihid)
+-{
+-	int ret;
+-	u32 ret_size;
+-	int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
+-
+-	if (size > ihid->bufsize)
+-		size = ihid->bufsize;
+-
+-	ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
+-	if (ret != size) {
+-		if (ret < 0)
+-			return;
+-
+-		dev_err(&ihid->client->dev, "%s: got %d data instead of %d\n",
+-			__func__, ret, size);
+-		return;
+-	}
+-
+-	ret_size = ihid->inbuf[0] | ihid->inbuf[1] << 8;
+-
+-	if (!ret_size) {
+-		/* host or device initiated RESET completed */
+-		if (test_and_clear_bit(I2C_HID_RESET_PENDING, &ihid->flags))
+-			wake_up(&ihid->wait);
+-		return;
+-	}
+-
+-	if ((ret_size > size) || (ret_size < 2)) {
+-		dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
+-			__func__, size, ret_size);
+-		return;
+-	}
+-
+-	i2c_hid_dbg(ihid, "input: %*ph\n", ret_size, ihid->inbuf);
+-
+-	if (test_bit(I2C_HID_STARTED, &ihid->flags))
+-		hid_input_report(ihid->hid, HID_INPUT_REPORT, ihid->inbuf + 2,
+-				ret_size - 2, 1);
+-
+-	return;
+-}
+-
+-static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
+-{
+-	struct i2c_hid *ihid = dev_id;
+-
+-	if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
+-		return IRQ_HANDLED;
+-
+-	i2c_hid_get_input(ihid);
+-
+-	return IRQ_HANDLED;
+-}
+-
+-static int i2c_hid_get_report_length(struct hid_report *report)
+-{
+-	return ((report->size - 1) >> 3) + 1 +
+-		report->device->report_enum[report->type].numbered + 2;
+-}
+-
+-/*
+- * Traverse the supplied list of reports and find the longest
+- */
+-static void i2c_hid_find_max_report(struct hid_device *hid, unsigned int type,
+-		unsigned int *max)
+-{
+-	struct hid_report *report;
+-	unsigned int size;
+-
+-	/* We should not rely on wMaxInputLength, as some devices may set it to
+-	 * a wrong length. */
+-	list_for_each_entry(report, &hid->report_enum[type].report_list, list) {
+-		size = i2c_hid_get_report_length(report);
+-		if (*max < size)
+-			*max = size;
+-	}
+-}
+-
+-static void i2c_hid_free_buffers(struct i2c_hid *ihid)
+-{
+-	kfree(ihid->inbuf);
+-	kfree(ihid->rawbuf);
+-	kfree(ihid->argsbuf);
+-	kfree(ihid->cmdbuf);
+-	ihid->inbuf = NULL;
+-	ihid->rawbuf = NULL;
+-	ihid->cmdbuf = NULL;
+-	ihid->argsbuf = NULL;
+-	ihid->bufsize = 0;
+-}
+-
+-static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
+-{
+-	/* the worst case is computed from the set_report command with a
+-	 * reportID > 15 and the maximum report length */
+-	int args_len = sizeof(__u8) + /* ReportID */
+-		       sizeof(__u8) + /* optional ReportID byte */
+-		       sizeof(__u16) + /* data register */
+-		       sizeof(__u16) + /* size of the report */
+-		       report_size; /* report */
+-
+-	ihid->inbuf = kzalloc(report_size, GFP_KERNEL);
+-	ihid->rawbuf = kzalloc(report_size, GFP_KERNEL);
+-	ihid->argsbuf = kzalloc(args_len, GFP_KERNEL);
+-	ihid->cmdbuf = kzalloc(sizeof(union command) + args_len, GFP_KERNEL);
+-
+-	if (!ihid->inbuf || !ihid->rawbuf || !ihid->argsbuf || !ihid->cmdbuf) {
+-		i2c_hid_free_buffers(ihid);
+-		return -ENOMEM;
+-	}
+-
+-	ihid->bufsize = report_size;
+-
+-	return 0;
+-}
+-
+-static int i2c_hid_get_raw_report(struct hid_device *hid,
+-		unsigned char report_number, __u8 *buf, size_t count,
+-		unsigned char report_type)
+-{
+-	struct i2c_client *client = hid->driver_data;
+-	struct i2c_hid *ihid = i2c_get_clientdata(client);
+-	size_t ret_count, ask_count;
+-	int ret;
+-
+-	if (report_type == HID_OUTPUT_REPORT)
+-		return -EINVAL;
+-
+-	/* +2 bytes to include the size of the reply in the query buffer */
+-	ask_count = min(count + 2, (size_t)ihid->bufsize);
+-
+-	ret = i2c_hid_get_report(client,
+-			report_type == HID_FEATURE_REPORT ? 0x03 : 0x01,
+-			report_number, ihid->rawbuf, ask_count);
+-
+-	if (ret < 0)
+-		return ret;
+-
+-	ret_count = ihid->rawbuf[0] | (ihid->rawbuf[1] << 8);
+-
+-	if (ret_count <= 2)
+-		return 0;
+-
+-	ret_count = min(ret_count, ask_count);
+-
+-	/* The query buffer contains the size, dropping it in the reply */
+-	count = min(count, ret_count - 2);
+-	memcpy(buf, ihid->rawbuf + 2, count);
+-
+-	return count;
+-}
+-
+-static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
+-		size_t count, unsigned char report_type, bool use_data)
+-{
+-	struct i2c_client *client = hid->driver_data;
+-	struct i2c_hid *ihid = i2c_get_clientdata(client);
+-	int report_id = buf[0];
+-	int ret;
+-
+-	if (report_type == HID_INPUT_REPORT)
+-		return -EINVAL;
+-
+-	mutex_lock(&ihid->reset_lock);
+-
+-	if (report_id) {
+-		buf++;
+-		count--;
+-	}
+-
+-	ret = i2c_hid_set_or_send_report(client,
+-				report_type == HID_FEATURE_REPORT ? 0x03 : 0x02,
+-				report_id, buf, count, use_data);
+-
+-	if (report_id && ret >= 0)
+-		ret++; /* add report_id to the number of transfered bytes */
+-
+-	mutex_unlock(&ihid->reset_lock);
+-
+-	return ret;
+-}
+-
+-static int i2c_hid_output_report(struct hid_device *hid, __u8 *buf,
+-		size_t count)
+-{
+-	return i2c_hid_output_raw_report(hid, buf, count, HID_OUTPUT_REPORT,
+-			false);
+-}
+-
+-static int i2c_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
+-			       __u8 *buf, size_t len, unsigned char rtype,
+-			       int reqtype)
+-{
+-	switch (reqtype) {
+-	case HID_REQ_GET_REPORT:
+-		return i2c_hid_get_raw_report(hid, reportnum, buf, len, rtype);
+-	case HID_REQ_SET_REPORT:
+-		if (buf[0] != reportnum)
+-			return -EINVAL;
+-		return i2c_hid_output_raw_report(hid, buf, len, rtype, true);
+-	default:
+-		return -EIO;
+-	}
+-}
+-
+-static int i2c_hid_parse(struct hid_device *hid)
+-{
+-	struct i2c_client *client = hid->driver_data;
+-	struct i2c_hid *ihid = i2c_get_clientdata(client);
+-	struct i2c_hid_desc *hdesc = &ihid->hdesc;
+-	unsigned int rsize;
+-	char *rdesc;
+-	int ret;
+-	int tries = 3;
+-
+-	i2c_hid_dbg(ihid, "entering %s\n", __func__);
+-
+-	rsize = le16_to_cpu(hdesc->wReportDescLength);
+-	if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
+-		dbg_hid("weird size of report descriptor (%u)\n", rsize);
+-		return -EINVAL;
+-	}
+-
+-	do {
+-		ret = i2c_hid_hwreset(client);
+-		if (ret)
+-			msleep(1000);
+-	} while (tries-- > 0 && ret);
+-
+-	if (ret)
+-		return ret;
+-
+-	rdesc = kzalloc(rsize, GFP_KERNEL);
+-
+-	if (!rdesc) {
+-		dbg_hid("couldn't allocate rdesc memory\n");
+-		return -ENOMEM;
+-	}
+-
+-	i2c_hid_dbg(ihid, "asking HID report descriptor\n");
+-
+-	ret = i2c_hid_command(client, &hid_report_descr_cmd, rdesc, rsize);
+-	if (ret) {
+-		hid_err(hid, "reading report descriptor failed\n");
+-		kfree(rdesc);
+-		return -EIO;
+-	}
+-
+-	i2c_hid_dbg(ihid, "Report Descriptor: %*ph\n", rsize, rdesc);
+-
+-	ret = hid_parse_report(hid, rdesc, rsize);
+-	kfree(rdesc);
+-	if (ret) {
+-		dbg_hid("parsing report descriptor failed\n");
+-		return ret;
+-	}
+-
+-	return 0;
+-}
+-
+-static int i2c_hid_start(struct hid_device *hid)
+-{
+-	struct i2c_client *client = hid->driver_data;
+-	struct i2c_hid *ihid = i2c_get_clientdata(client);
+-	int ret;
+-	unsigned int bufsize = HID_MIN_BUFFER_SIZE;
+-
+-	i2c_hid_find_max_report(hid, HID_INPUT_REPORT, &bufsize);
+-	i2c_hid_find_max_report(hid, HID_OUTPUT_REPORT, &bufsize);
+-	i2c_hid_find_max_report(hid, HID_FEATURE_REPORT, &bufsize);
+-
+-	if (bufsize > ihid->bufsize) {
+-		disable_irq(client->irq);
+-		i2c_hid_free_buffers(ihid);
+-
+-		ret = i2c_hid_alloc_buffers(ihid, bufsize);
+-		enable_irq(client->irq);
+-
+-		if (ret)
+-			return ret;
+-	}
+-
+-	return 0;
+-}
+-
+-static void i2c_hid_stop(struct hid_device *hid)
+-{
+-	hid->claimed = 0;
+-}
+-
+-static int i2c_hid_open(struct hid_device *hid)
+-{
+-	struct i2c_client *client = hid->driver_data;
+-	struct i2c_hid *ihid = i2c_get_clientdata(client);
+-	int ret = 0;
+-
+-	ret = pm_runtime_get_sync(&client->dev);
+-	if (ret < 0)
+-		return ret;
+-
+-	set_bit(I2C_HID_STARTED, &ihid->flags);
+-	return 0;
+-}
+-
+-static void i2c_hid_close(struct hid_device *hid)
+-{
+-	struct i2c_client *client = hid->driver_data;
+-	struct i2c_hid *ihid = i2c_get_clientdata(client);
+-
+-	clear_bit(I2C_HID_STARTED, &ihid->flags);
+-
+-	/* Save some power */
+-	pm_runtime_put(&client->dev);
+-}
+-
+-static int i2c_hid_power(struct hid_device *hid, int lvl)
+-{
+-	struct i2c_client *client = hid->driver_data;
+-	struct i2c_hid *ihid = i2c_get_clientdata(client);
+-
+-	i2c_hid_dbg(ihid, "%s lvl:%d\n", __func__, lvl);
+-
+-	switch (lvl) {
+-	case PM_HINT_FULLON:
+-		pm_runtime_get_sync(&client->dev);
+-		break;
+-	case PM_HINT_NORMAL:
+-		pm_runtime_put(&client->dev);
+-		break;
+-	}
+-	return 0;
+-}
+-
+-struct hid_ll_driver i2c_hid_ll_driver = {
+-	.parse = i2c_hid_parse,
+-	.start = i2c_hid_start,
+-	.stop = i2c_hid_stop,
+-	.open = i2c_hid_open,
+-	.close = i2c_hid_close,
+-	.power = i2c_hid_power,
+-	.output_report = i2c_hid_output_report,
+-	.raw_request = i2c_hid_raw_request,
+-};
+-EXPORT_SYMBOL_GPL(i2c_hid_ll_driver);
+-
+-static int i2c_hid_init_irq(struct i2c_client *client)
+-{
+-	struct i2c_hid *ihid = i2c_get_clientdata(client);
+-	unsigned long irqflags = 0;
+-	int ret;
+-
+-	dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq);
+-
+-	if (!irq_get_trigger_type(client->irq))
+-		irqflags = IRQF_TRIGGER_LOW;
+-
+-	ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq,
+-				   irqflags | IRQF_ONESHOT, client->name, ihid);
+-	if (ret < 0) {
+-		dev_warn(&client->dev,
+-			"Could not register for %s interrupt, irq = %d,"
+-			" ret = %d\n",
+-			client->name, client->irq, ret);
+-
+-		return ret;
+-	}
+-
+-	return 0;
+-}
+-
+-static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
+-{
+-	struct i2c_client *client = ihid->client;
+-	struct i2c_hid_desc *hdesc = &ihid->hdesc;
+-	unsigned int dsize;
+-	int ret;
+-
+-	/* i2c hid fetch using a fixed descriptor size (30 bytes) */
+-	i2c_hid_dbg(ihid, "Fetching the HID descriptor\n");
+-	ret = i2c_hid_command(client, &hid_descr_cmd, ihid->hdesc_buffer,
+-				sizeof(struct i2c_hid_desc));
+-	if (ret) {
+-		dev_err(&client->dev, "hid_descr_cmd failed\n");
+-		return -ENODEV;
+-	}
+-
+-	/* Validate the length of HID descriptor, the 4 first bytes:
+-	 * bytes 0-1 -> length
+-	 * bytes 2-3 -> bcdVersion (has to be 1.00) */
+-	/* check bcdVersion == 1.0 */
+-	if (le16_to_cpu(hdesc->bcdVersion) != 0x0100) {
+-		dev_err(&client->dev,
+-			"unexpected HID descriptor bcdVersion (0x%04hx)\n",
+-			le16_to_cpu(hdesc->bcdVersion));
+-		return -ENODEV;
+-	}
+-
+-	/* Descriptor length should be 30 bytes as per the specification */
+-	dsize = le16_to_cpu(hdesc->wHIDDescLength);
+-	if (dsize != sizeof(struct i2c_hid_desc)) {
+-		dev_err(&client->dev, "weird size of HID descriptor (%u)\n",
+-			dsize);
+-		return -ENODEV;
+-	}
+-	i2c_hid_dbg(ihid, "HID Descriptor: %*ph\n", dsize, ihid->hdesc_buffer);
+-	return 0;
+-}
+-
+-#ifdef CONFIG_ACPI
+-static const struct acpi_device_id i2c_hid_acpi_blacklist[] = {
+-	/*
+-	 * The CHPN0001 ACPI device, which is used to describe the Chipone
+-	 * ICN8505 controller, has a _CID of PNP0C50 but is not HID compatible.
+-	 */
+-	{"CHPN0001", 0 },
+-	{ },
+-};
+-
+-static int i2c_hid_acpi_pdata(struct i2c_client *client,
+-		struct i2c_hid_platform_data *pdata)
+-{
+-	static guid_t i2c_hid_guid =
+-		GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
+-			  0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
+-	union acpi_object *obj;
+-	struct acpi_device *adev;
+-	acpi_handle handle;
+-
+-	handle = ACPI_HANDLE(&client->dev);
+-	if (!handle || acpi_bus_get_device(handle, &adev)) {
+-		dev_err(&client->dev, "Error could not get ACPI device\n");
+-		return -ENODEV;
+-	}
+-
+-	if (acpi_match_device_ids(adev, i2c_hid_acpi_blacklist) == 0)
+-		return -ENODEV;
+-
+-	obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL,
+-				      ACPI_TYPE_INTEGER);
+-	if (!obj) {
+-		dev_err(&client->dev, "Error _DSM call to get HID descriptor address failed\n");
+-		return -ENODEV;
+-	}
+-
+-	pdata->hid_descriptor_address = obj->integer.value;
+-	ACPI_FREE(obj);
+-
+-	return 0;
+-}
+-
+-static void i2c_hid_acpi_fix_up_power(struct device *dev)
+-{
+-	struct acpi_device *adev;
+-
+-	adev = ACPI_COMPANION(dev);
+-	if (adev)
+-		acpi_device_fix_up_power(adev);
+-}
+-
+-static const struct acpi_device_id i2c_hid_acpi_match[] = {
+-	{"ACPI0C50", 0 },
+-	{"PNP0C50", 0 },
+-	{ },
+-};
+-MODULE_DEVICE_TABLE(acpi, i2c_hid_acpi_match);
+-#else
+-static inline int i2c_hid_acpi_pdata(struct i2c_client *client,
+-		struct i2c_hid_platform_data *pdata)
+-{
+-	return -ENODEV;
+-}
+-
+-static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {}
+-#endif
+-
+-#ifdef CONFIG_OF
+-static int i2c_hid_of_probe(struct i2c_client *client,
+-		struct i2c_hid_platform_data *pdata)
+-{
+-	struct device *dev = &client->dev;
+-	u32 val;
+-	int ret;
+-
+-	ret = of_property_read_u32(dev->of_node, "hid-descr-addr", &val);
+-	if (ret) {
+-		dev_err(&client->dev, "HID register address not provided\n");
+-		return -ENODEV;
+-	}
+-	if (val >> 16) {
+-		dev_err(&client->dev, "Bad HID register address: 0x%08x\n",
+-			val);
+-		return -EINVAL;
+-	}
+-	pdata->hid_descriptor_address = val;
+-
+-	return 0;
+-}
+-
+-static const struct of_device_id i2c_hid_of_match[] = {
+-	{ .compatible = "hid-over-i2c" },
+-	{},
+-};
+-MODULE_DEVICE_TABLE(of, i2c_hid_of_match);
+-#else
+-static inline int i2c_hid_of_probe(struct i2c_client *client,
+-		struct i2c_hid_platform_data *pdata)
+-{
+-	return -ENODEV;
+-}
+-#endif
+-
+-static void i2c_hid_fwnode_probe(struct i2c_client *client,
+-				 struct i2c_hid_platform_data *pdata)
+-{
+-	u32 val;
+-
+-	if (!device_property_read_u32(&client->dev, "post-power-on-delay-ms",
+-				      &val))
+-		pdata->post_power_delay_ms = val;
+-}
+-
+-static int i2c_hid_probe(struct i2c_client *client,
+-			 const struct i2c_device_id *dev_id)
+-{
+-	int ret;
+-	struct i2c_hid *ihid;
+-	struct hid_device *hid;
+-	__u16 hidRegister;
+-	struct i2c_hid_platform_data *platform_data = client->dev.platform_data;
+-
+-	dbg_hid("HID probe called for i2c 0x%02x\n", client->addr);
+-
+-	if (!client->irq) {
+-		dev_err(&client->dev,
+-			"HID over i2c has not been provided an Int IRQ\n");
+-		return -EINVAL;
+-	}
+-
+-	if (client->irq < 0) {
+-		if (client->irq != -EPROBE_DEFER)
+-			dev_err(&client->dev,
+-				"HID over i2c doesn't have a valid IRQ\n");
+-		return client->irq;
+-	}
+-
+-	ihid = devm_kzalloc(&client->dev, sizeof(*ihid), GFP_KERNEL);
+-	if (!ihid)
+-		return -ENOMEM;
+-
+-	if (client->dev.of_node) {
+-		ret = i2c_hid_of_probe(client, &ihid->pdata);
+-		if (ret)
+-			return ret;
+-	} else if (!platform_data) {
+-		ret = i2c_hid_acpi_pdata(client, &ihid->pdata);
+-		if (ret)
+-			return ret;
+-	} else {
+-		ihid->pdata = *platform_data;
+-	}
+-
+-	/* Parse platform agnostic common properties from ACPI / device tree */
+-	i2c_hid_fwnode_probe(client, &ihid->pdata);
+-
+-	ihid->pdata.supplies[0].supply = "vdd";
+-	ihid->pdata.supplies[1].supply = "vddl";
+-
+-	ret = devm_regulator_bulk_get(&client->dev,
+-				      ARRAY_SIZE(ihid->pdata.supplies),
+-				      ihid->pdata.supplies);
+-	if (ret)
+-		return ret;
+-
+-	ret = regulator_bulk_enable(ARRAY_SIZE(ihid->pdata.supplies),
+-				    ihid->pdata.supplies);
+-	if (ret < 0)
+-		return ret;
+-
+-	if (ihid->pdata.post_power_delay_ms)
+-		msleep(ihid->pdata.post_power_delay_ms);
+-
+-	i2c_set_clientdata(client, ihid);
+-
+-	ihid->client = client;
+-
+-	hidRegister = ihid->pdata.hid_descriptor_address;
+-	ihid->wHIDDescRegister = cpu_to_le16(hidRegister);
+-
+-	init_waitqueue_head(&ihid->wait);
+-	mutex_init(&ihid->reset_lock);
+-
+-	/* we need to allocate the command buffer without knowing the maximum
+-	 * size of the reports. Let's use HID_MIN_BUFFER_SIZE, then we do the
+-	 * real computation later. */
+-	ret = i2c_hid_alloc_buffers(ihid, HID_MIN_BUFFER_SIZE);
+-	if (ret < 0)
+-		goto err_regulator;
+-
+-	i2c_hid_acpi_fix_up_power(&client->dev);
+-
+-	pm_runtime_get_noresume(&client->dev);
+-	pm_runtime_set_active(&client->dev);
+-	pm_runtime_enable(&client->dev);
+-	device_enable_async_suspend(&client->dev);
+-
+-	/* Make sure there is something at this address */
+-	ret = i2c_smbus_read_byte(client);
+-	if (ret < 0) {
+-		dev_dbg(&client->dev, "nothing at this address: %d\n", ret);
+-		ret = -ENXIO;
+-		goto err_pm;
+-	}
+-
+-	ret = i2c_hid_fetch_hid_descriptor(ihid);
+-	if (ret < 0)
+-		goto err_pm;
+-
+-	ret = i2c_hid_init_irq(client);
+-	if (ret < 0)
+-		goto err_pm;
+-
+-	hid = hid_allocate_device();
+-	if (IS_ERR(hid)) {
+-		ret = PTR_ERR(hid);
+-		goto err_irq;
+-	}
+-
+-	ihid->hid = hid;
+-
+-	hid->driver_data = client;
+-	hid->ll_driver = &i2c_hid_ll_driver;
+-	hid->dev.parent = &client->dev;
+-	hid->bus = BUS_I2C;
+-	hid->version = le16_to_cpu(ihid->hdesc.bcdVersion);
+-	hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
+-	hid->product = le16_to_cpu(ihid->hdesc.wProductID);
+-
+-	snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX",
+-		 client->name, hid->vendor, hid->product);
+-	strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
+-
+-	ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
+-
+-	ret = hid_add_device(hid);
+-	if (ret) {
+-		if (ret != -ENODEV)
+-			hid_err(client, "can't add hid device: %d\n", ret);
+-		goto err_mem_free;
+-	}
+-
+-	if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
+-		pm_runtime_put(&client->dev);
+-
+-	return 0;
+-
+-err_mem_free:
+-	hid_destroy_device(hid);
+-
+-err_irq:
+-	free_irq(client->irq, ihid);
+-
+-err_pm:
+-	pm_runtime_put_noidle(&client->dev);
+-	pm_runtime_disable(&client->dev);
+-
+-err_regulator:
+-	regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
+-			       ihid->pdata.supplies);
+-	i2c_hid_free_buffers(ihid);
+-	return ret;
+-}
+-
+-static int i2c_hid_remove(struct i2c_client *client)
+-{
+-	struct i2c_hid *ihid = i2c_get_clientdata(client);
+-	struct hid_device *hid;
+-
+-	if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
+-		pm_runtime_get_sync(&client->dev);
+-	pm_runtime_disable(&client->dev);
+-	pm_runtime_set_suspended(&client->dev);
+-	pm_runtime_put_noidle(&client->dev);
+-
+-	hid = ihid->hid;
+-	hid_destroy_device(hid);
+-
+-	free_irq(client->irq, ihid);
+-
+-	if (ihid->bufsize)
+-		i2c_hid_free_buffers(ihid);
+-
+-	regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
+-			       ihid->pdata.supplies);
+-
+-	return 0;
+-}
+-
+-static void i2c_hid_shutdown(struct i2c_client *client)
+-{
+-	struct i2c_hid *ihid = i2c_get_clientdata(client);
+-
+-	i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+-	free_irq(client->irq, ihid);
+-}
+-
+-#ifdef CONFIG_PM_SLEEP
+-static int i2c_hid_suspend(struct device *dev)
+-{
+-	struct i2c_client *client = to_i2c_client(dev);
+-	struct i2c_hid *ihid = i2c_get_clientdata(client);
+-	struct hid_device *hid = ihid->hid;
+-	int ret;
+-	int wake_status;
+-
+-	if (hid->driver && hid->driver->suspend) {
+-		/*
+-		 * Wake up the device so that IO issues in
+-		 * HID driver's suspend code can succeed.
+-		 */
+-		ret = pm_runtime_resume(dev);
+-		if (ret < 0)
+-			return ret;
+-
+-		ret = hid->driver->suspend(hid, PMSG_SUSPEND);
+-		if (ret < 0)
+-			return ret;
+-	}
+-
+-	if (!pm_runtime_suspended(dev)) {
+-		/* Save some power */
+-		i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+-
+-		disable_irq(client->irq);
+-	}
+-
+-	if (device_may_wakeup(&client->dev)) {
+-		wake_status = enable_irq_wake(client->irq);
+-		if (!wake_status)
+-			ihid->irq_wake_enabled = true;
+-		else
+-			hid_warn(hid, "Failed to enable irq wake: %d\n",
+-				wake_status);
+-	} else {
+-		regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
+-				       ihid->pdata.supplies);
+-	}
+-
+-	return 0;
+-}
+-
+-static int i2c_hid_resume(struct device *dev)
+-{
+-	int ret;
+-	struct i2c_client *client = to_i2c_client(dev);
+-	struct i2c_hid *ihid = i2c_get_clientdata(client);
+-	struct hid_device *hid = ihid->hid;
+-	int wake_status;
+-
+-	if (!device_may_wakeup(&client->dev)) {
+-		ret = regulator_bulk_enable(ARRAY_SIZE(ihid->pdata.supplies),
+-					    ihid->pdata.supplies);
+-		if (ret)
+-			hid_warn(hid, "Failed to enable supplies: %d\n", ret);
+-
+-		if (ihid->pdata.post_power_delay_ms)
+-			msleep(ihid->pdata.post_power_delay_ms);
+-	} else if (ihid->irq_wake_enabled) {
+-		wake_status = disable_irq_wake(client->irq);
+-		if (!wake_status)
+-			ihid->irq_wake_enabled = false;
+-		else
+-			hid_warn(hid, "Failed to disable irq wake: %d\n",
+-				wake_status);
+-	}
+-
+-	/* We'll resume to full power */
+-	pm_runtime_disable(dev);
+-	pm_runtime_set_active(dev);
+-	pm_runtime_enable(dev);
+-
+-	enable_irq(client->irq);
+-
+-	/* Instead of resetting device, simply powers the device on. This
+-	 * solves "incomplete reports" on Raydium devices 2386:3118 and
+-	 * 2386:4B33 and fixes various SIS touchscreens no longer sending
+-	 * data after a suspend/resume.
+-	 */
+-	ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+-	if (ret)
+-		return ret;
+-
+-	if (hid->driver && hid->driver->reset_resume) {
+-		ret = hid->driver->reset_resume(hid);
+-		return ret;
+-	}
+-
+-	return 0;
+-}
+-#endif
+-
+-#ifdef CONFIG_PM
+-static int i2c_hid_runtime_suspend(struct device *dev)
+-{
+-	struct i2c_client *client = to_i2c_client(dev);
+-
+-	i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+-	disable_irq(client->irq);
+-	return 0;
+-}
+-
+-static int i2c_hid_runtime_resume(struct device *dev)
+-{
+-	struct i2c_client *client = to_i2c_client(dev);
+-
+-	enable_irq(client->irq);
+-	i2c_hid_set_power(client, I2C_HID_PWR_ON);
+-	return 0;
+-}
+-#endif
+-
+-static const struct dev_pm_ops i2c_hid_pm = {
+-	SET_SYSTEM_SLEEP_PM_OPS(i2c_hid_suspend, i2c_hid_resume)
+-	SET_RUNTIME_PM_OPS(i2c_hid_runtime_suspend, i2c_hid_runtime_resume,
+-			   NULL)
+-};
+-
+-static const struct i2c_device_id i2c_hid_id_table[] = {
+-	{ "hid", 0 },
+-	{ "hid-over-i2c", 0 },
+-	{ },
+-};
+-MODULE_DEVICE_TABLE(i2c, i2c_hid_id_table);
+-
+-
+-static struct i2c_driver i2c_hid_driver = {
+-	.driver = {
+-		.name	= "i2c_hid",
+-		.pm	= &i2c_hid_pm,
+-		.acpi_match_table = ACPI_PTR(i2c_hid_acpi_match),
+-		.of_match_table = of_match_ptr(i2c_hid_of_match),
+-	},
+-
+-	.probe		= i2c_hid_probe,
+-	.remove		= i2c_hid_remove,
+-	.shutdown	= i2c_hid_shutdown,
+-	.id_table	= i2c_hid_id_table,
+-};
+-
+-module_i2c_driver(i2c_hid_driver);
+-
+-MODULE_DESCRIPTION("HID over I2C core driver");
+-MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
+-MODULE_LICENSE("GPL");
+diff --git a/drivers/hid/i2c-hid/i2c-hid.h b/drivers/hid/i2c-hid/i2c-hid.h
+new file mode 100644
+index 000000000000..a8c19aef5824
+--- /dev/null
++++ b/drivers/hid/i2c-hid/i2c-hid.h
+@@ -0,0 +1,20 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++
++#ifndef I2C_HID_H
++#define I2C_HID_H
++
++
++#ifdef CONFIG_DMI
++struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name);
++char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
++					       unsigned int *size);
++#else
++static inline struct i2c_hid_desc
++		   *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name)
++{ return NULL; }
++static inline char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
++							     unsigned int *size)
++{ return NULL; }
++#endif
++
++#endif
+diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
+index 45b2460f3166..e8819d750938 100644
+--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
++++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
+@@ -668,6 +668,10 @@ static const struct amba_id debug_ids[] = {
+ 		.id	= 0x000bbd08,
+ 		.mask	= 0x000fffff,
+ 	},
++	{       /* Debug for Cortex-A73 */
++		.id	= 0x000bbd09,
++		.mask	= 0x000fffff,
++	},
+ 	{ 0, 0 },
+ };
+ 
+diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
+index 9b1e84a6b1cc..63c5ba66b305 100644
+--- a/drivers/infiniband/hw/hfi1/qp.c
++++ b/drivers/infiniband/hw/hfi1/qp.c
+@@ -784,7 +784,7 @@ void notify_error_qp(struct rvt_qp *qp)
+ 		write_seqlock(lock);
+ 		if (!list_empty(&priv->s_iowait.list) &&
+ 		    !(qp->s_flags & RVT_S_BUSY)) {
+-			qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
++			qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
+ 			list_del_init(&priv->s_iowait.list);
+ 			priv->s_iowait.lock = NULL;
+ 			rvt_put_qp(qp);
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+index a9ea966877f2..dda8e79d4b27 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
+@@ -173,7 +173,12 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
+ 
+ 		rcu_read_lock();
+ 		in = __in_dev_get_rcu(upper_dev);
+-		local_ipaddr = ntohl(in->ifa_list->ifa_address);
++
++		if (!in->ifa_list)
++			local_ipaddr = 0;
++		else
++			local_ipaddr = ntohl(in->ifa_list->ifa_address);
++
+ 		rcu_read_unlock();
+ 	} else {
+ 		local_ipaddr = ntohl(ifa->ifa_address);
+@@ -185,6 +190,11 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
+ 	case NETDEV_UP:
+ 		/* Fall through */
+ 	case NETDEV_CHANGEADDR:
++
++		/* Just skip if no need to handle ARP cache */
++		if (!local_ipaddr)
++			break;
++
+ 		i40iw_manage_arp_cache(iwdev,
+ 				       netdev->dev_addr,
+ 				       &local_ipaddr,
+diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
+index 155b4dfc0ae8..baab9afa9174 100644
+--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
++++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
+@@ -804,8 +804,8 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
+ 	unsigned long flags;
+ 
+ 	for (i = 0 ; i < dev->num_ports; i++) {
+-		cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
+ 		det = &sriov->alias_guid.ports_guid[i];
++		cancel_delayed_work_sync(&det->alias_guid_work);
+ 		spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
+ 		while (!list_empty(&det->cb_list)) {
+ 			cb_ctx = list_entry(det->cb_list.next,
+diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
+index d9c748b6f9e4..7f9824b0609e 100644
+--- a/drivers/iommu/dmar.c
++++ b/drivers/iommu/dmar.c
+@@ -144,7 +144,7 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
+ 		for (tmp = dev; tmp; tmp = tmp->bus->self)
+ 			level++;
+ 
+-	size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
++	size = sizeof(*info) + level * sizeof(info->path[0]);
+ 	if (size <= sizeof(dmar_pci_notify_info_buf)) {
+ 		info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
+ 	} else {
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 2b8f5ebae821..603bf5233a99 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -1624,6 +1624,9 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
+ 	u32 pmen;
+ 	unsigned long flags;
+ 
++	if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
++		return;
++
+ 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
+ 	pmen = readl(iommu->reg + DMAR_PMEN_REG);
+ 	pmen &= ~DMA_PMEN_EPM;
+diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
+index 567b29c47608..98b6e1d4b1a6 100644
+--- a/drivers/irqchip/irq-mbigen.c
++++ b/drivers/irqchip/irq-mbigen.c
+@@ -161,6 +161,9 @@ static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg)
+ 	void __iomem *base = d->chip_data;
+ 	u32 val;
+ 
++	if (!msg->address_lo && !msg->address_hi)
++		return;
++ 
+ 	base += get_mbigen_vec_reg(d->hwirq);
+ 	val = readl_relaxed(base);
+ 
+diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
+index 0a2088e12d96..97b27f338c30 100644
+--- a/drivers/irqchip/irq-stm32-exti.c
++++ b/drivers/irqchip/irq-stm32-exti.c
+@@ -650,11 +650,6 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
+ 	 */
+ 	writel_relaxed(0, base + stm32_bank->imr_ofst);
+ 	writel_relaxed(0, base + stm32_bank->emr_ofst);
+-	writel_relaxed(0, base + stm32_bank->rtsr_ofst);
+-	writel_relaxed(0, base + stm32_bank->ftsr_ofst);
+-	writel_relaxed(~0UL, base + stm32_bank->rpr_ofst);
+-	if (stm32_bank->fpr_ofst != UNDEF_REG)
+-		writel_relaxed(~0UL, base + stm32_bank->fpr_ofst);
+ 
+ 	pr_info("%s: bank%d, External IRQs available:%#x\n",
+ 		node->full_name, bank_idx, irqs_mask);
+diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
+index cd363a2100d4..257ae0d8cfe2 100644
+--- a/drivers/media/usb/au0828/au0828-core.c
++++ b/drivers/media/usb/au0828/au0828-core.c
+@@ -629,7 +629,6 @@ static int au0828_usb_probe(struct usb_interface *interface,
+ 		pr_err("%s() au0282_dev_register failed to register on V4L2\n",
+ 			__func__);
+ 		mutex_unlock(&dev->lock);
+-		kfree(dev);
+ 		goto done;
+ 	}
+ 
+diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
+index 2154d1bfd18b..07caaa2cfe1e 100644
+--- a/drivers/misc/lkdtm/core.c
++++ b/drivers/misc/lkdtm/core.c
+@@ -152,7 +152,9 @@ static const struct crashtype crashtypes[] = {
+ 	CRASHTYPE(EXEC_VMALLOC),
+ 	CRASHTYPE(EXEC_RODATA),
+ 	CRASHTYPE(EXEC_USERSPACE),
++	CRASHTYPE(EXEC_NULL),
+ 	CRASHTYPE(ACCESS_USERSPACE),
++	CRASHTYPE(ACCESS_NULL),
+ 	CRASHTYPE(WRITE_RO),
+ 	CRASHTYPE(WRITE_RO_AFTER_INIT),
+ 	CRASHTYPE(WRITE_KERN),
+diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
+index 9e513dcfd809..8c3f2e6af256 100644
+--- a/drivers/misc/lkdtm/lkdtm.h
++++ b/drivers/misc/lkdtm/lkdtm.h
+@@ -45,7 +45,9 @@ void lkdtm_EXEC_KMALLOC(void);
+ void lkdtm_EXEC_VMALLOC(void);
+ void lkdtm_EXEC_RODATA(void);
+ void lkdtm_EXEC_USERSPACE(void);
++void lkdtm_EXEC_NULL(void);
+ void lkdtm_ACCESS_USERSPACE(void);
++void lkdtm_ACCESS_NULL(void);
+ 
+ /* lkdtm_refcount.c */
+ void lkdtm_REFCOUNT_INC_OVERFLOW(void);
+diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
+index 53b85c9d16b8..62f76d506f04 100644
+--- a/drivers/misc/lkdtm/perms.c
++++ b/drivers/misc/lkdtm/perms.c
+@@ -47,7 +47,7 @@ static noinline void execute_location(void *dst, bool write)
+ {
+ 	void (*func)(void) = dst;
+ 
+-	pr_info("attempting ok execution at %p\n", do_nothing);
++	pr_info("attempting ok execution at %px\n", do_nothing);
+ 	do_nothing();
+ 
+ 	if (write == CODE_WRITE) {
+@@ -55,7 +55,7 @@ static noinline void execute_location(void *dst, bool write)
+ 		flush_icache_range((unsigned long)dst,
+ 				   (unsigned long)dst + EXEC_SIZE);
+ 	}
+-	pr_info("attempting bad execution at %p\n", func);
++	pr_info("attempting bad execution at %px\n", func);
+ 	func();
+ }
+ 
+@@ -66,14 +66,14 @@ static void execute_user_location(void *dst)
+ 	/* Intentionally crossing kernel/user memory boundary. */
+ 	void (*func)(void) = dst;
+ 
+-	pr_info("attempting ok execution at %p\n", do_nothing);
++	pr_info("attempting ok execution at %px\n", do_nothing);
+ 	do_nothing();
+ 
+ 	copied = access_process_vm(current, (unsigned long)dst, do_nothing,
+ 				   EXEC_SIZE, FOLL_WRITE);
+ 	if (copied < EXEC_SIZE)
+ 		return;
+-	pr_info("attempting bad execution at %p\n", func);
++	pr_info("attempting bad execution at %px\n", func);
+ 	func();
+ }
+ 
+@@ -82,7 +82,7 @@ void lkdtm_WRITE_RO(void)
+ 	/* Explicitly cast away "const" for the test. */
+ 	unsigned long *ptr = (unsigned long *)&rodata;
+ 
+-	pr_info("attempting bad rodata write at %p\n", ptr);
++	pr_info("attempting bad rodata write at %px\n", ptr);
+ 	*ptr ^= 0xabcd1234;
+ }
+ 
+@@ -100,7 +100,7 @@ void lkdtm_WRITE_RO_AFTER_INIT(void)
+ 		return;
+ 	}
+ 
+-	pr_info("attempting bad ro_after_init write at %p\n", ptr);
++	pr_info("attempting bad ro_after_init write at %px\n", ptr);
+ 	*ptr ^= 0xabcd1234;
+ }
+ 
+@@ -112,7 +112,7 @@ void lkdtm_WRITE_KERN(void)
+ 	size = (unsigned long)do_overwritten - (unsigned long)do_nothing;
+ 	ptr = (unsigned char *)do_overwritten;
+ 
+-	pr_info("attempting bad %zu byte write at %p\n", size, ptr);
++	pr_info("attempting bad %zu byte write at %px\n", size, ptr);
+ 	memcpy(ptr, (unsigned char *)do_nothing, size);
+ 	flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
+ 
+@@ -164,6 +164,11 @@ void lkdtm_EXEC_USERSPACE(void)
+ 	vm_munmap(user_addr, PAGE_SIZE);
+ }
+ 
++void lkdtm_EXEC_NULL(void)
++{
++	execute_location(NULL, CODE_AS_IS);
++}
++
+ void lkdtm_ACCESS_USERSPACE(void)
+ {
+ 	unsigned long user_addr, tmp = 0;
+@@ -185,16 +190,29 @@ void lkdtm_ACCESS_USERSPACE(void)
+ 
+ 	ptr = (unsigned long *)user_addr;
+ 
+-	pr_info("attempting bad read at %p\n", ptr);
++	pr_info("attempting bad read at %px\n", ptr);
+ 	tmp = *ptr;
+ 	tmp += 0xc0dec0de;
+ 
+-	pr_info("attempting bad write at %p\n", ptr);
++	pr_info("attempting bad write at %px\n", ptr);
+ 	*ptr = tmp;
+ 
+ 	vm_munmap(user_addr, PAGE_SIZE);
+ }
+ 
++void lkdtm_ACCESS_NULL(void)
++{
++	unsigned long tmp;
++	unsigned long *ptr = (unsigned long *)NULL;
++
++	pr_info("attempting bad read at %px\n", ptr);
++	tmp = *ptr;
++	tmp += 0xc0dec0de;
++
++	pr_info("attempting bad write at %px\n", ptr);
++	*ptr = tmp;
++}
++
+ void __init lkdtm_perms_init(void)
+ {
+ 	/* Make sure we can write to __ro_after_init values during __init */
+diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
+index 9e68c3645e22..e6f14257a7d0 100644
+--- a/drivers/mmc/host/davinci_mmc.c
++++ b/drivers/mmc/host/davinci_mmc.c
+@@ -1117,7 +1117,7 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
+ {
+ }
+ #endif
+-static void __init init_mmcsd_host(struct mmc_davinci_host *host)
++static void init_mmcsd_host(struct mmc_davinci_host *host)
+ {
+ 
+ 	mmc_davinci_reset_ctrl(host, 1);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+index afed0f0f4027..c0c75c111abb 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
++++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+@@ -59,7 +59,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
+ 
+ 		desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
+ 		stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
+-				STMMAC_RING_MODE, 0, false, skb->len);
++				STMMAC_RING_MODE, 1, false, skb->len);
+ 		tx_q->tx_skbuff[entry] = NULL;
+ 		entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+ 
+@@ -91,7 +91,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
+ 		tx_q->tx_skbuff_dma[entry].is_jumbo = true;
+ 		desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
+ 		stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum,
+-				STMMAC_RING_MODE, 0, true, skb->len);
++				STMMAC_RING_MODE, 1, true, skb->len);
+ 	}
+ 
+ 	tx_q->cur_tx = entry;
+diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h
+index d9ff3b8be86e..60f1f286b030 100644
+--- a/drivers/net/wireless/rsi/rsi_common.h
++++ b/drivers/net/wireless/rsi/rsi_common.h
+@@ -75,7 +75,6 @@ static inline int rsi_kill_thread(struct rsi_thread *handle)
+ 	atomic_inc(&handle->thread_done);
+ 	rsi_set_event(&handle->event);
+ 
+-	wait_for_completion(&handle->completion);
+ 	return kthread_stop(handle->task);
+ }
+ 
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 7eb1549cea81..30649addc625 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -2489,6 +2489,25 @@ void pci_config_pm_runtime_put(struct pci_dev *pdev)
+ 		pm_runtime_put_sync(parent);
+ }
+ 
++static const struct dmi_system_id bridge_d3_blacklist[] = {
++#ifdef CONFIG_X86
++	{
++		/*
++		 * Gigabyte X299 root port is not marked as hotplug capable
++		 * which allows Linux to power manage it.  However, this
++		 * confuses the BIOS SMI handler so don't power manage root
++		 * ports on that system.
++		 */
++		.ident = "X299 DESIGNARE EX-CF",
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
++			DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
++		},
++	},
++#endif
++	{ }
++};
++
+ /**
+  * pci_bridge_d3_possible - Is it possible to put the bridge into D3
+  * @bridge: Bridge to check
+@@ -2530,6 +2549,9 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
+ 		if (bridge->is_hotplug_bridge)
+ 			return false;
+ 
++		if (dmi_check_system(bridge_d3_blacklist))
++			return false;
++
+ 		/*
+ 		 * It should be safe to put PCIe ports from 2015 or newer
+ 		 * to D3.
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index a3dd777e3ce8..c6ff4d5fa482 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -627,7 +627,7 @@ static int pinctrl_generic_group_name_to_selector(struct pinctrl_dev *pctldev,
+ 	while (selector < ngroups) {
+ 		const char *gname = ops->get_group_name(pctldev, selector);
+ 
+-		if (!strcmp(function, gname))
++		if (gname && !strcmp(function, gname))
+ 			return selector;
+ 
+ 		selector++;
+@@ -743,7 +743,7 @@ int pinctrl_get_group_selector(struct pinctrl_dev *pctldev,
+ 	while (group_selector < ngroups) {
+ 		const char *gname = pctlops->get_group_name(pctldev,
+ 							    group_selector);
+-		if (!strcmp(gname, pin_group)) {
++		if (gname && !strcmp(gname, pin_group)) {
+ 			dev_dbg(pctldev->dev,
+ 				"found group selector %u for %s\n",
+ 				group_selector,
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index 7563c07e14e4..1e2524de6a63 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -1231,6 +1231,18 @@ config I2C_MULTI_INSTANTIATE
+ 	  To compile this driver as a module, choose M here: the module
+ 	  will be called i2c-multi-instantiate.
+ 
++config INTEL_ATOMISP2_PM
++	tristate "Intel AtomISP2 dummy / power-management driver"
++	depends on PCI && IOSF_MBI && PM
++	help
++	  Power-management driver for Intel's Image Signal Processor found on
++	  Bay and Cherry Trail devices. This dummy driver's sole purpose is to
++	  turn the ISP off (put it in D3) to save power and to allow entering
++	  of S0ix modes.
++
++	  To compile this driver as a module, choose M here: the module
++	  will be called intel_atomisp2_pm.
++
+ endif # X86_PLATFORM_DEVICES
+ 
+ config PMC_ATOM
+diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
+index e6d1becf81ce..dc29af4d8e2f 100644
+--- a/drivers/platform/x86/Makefile
++++ b/drivers/platform/x86/Makefile
+@@ -92,3 +92,4 @@ obj-$(CONFIG_MLX_PLATFORM)	+= mlx-platform.o
+ obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o
+ obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN)	+= intel_chtdc_ti_pwrbtn.o
+ obj-$(CONFIG_I2C_MULTI_INSTANTIATE)	+= i2c-multi-instantiate.o
++obj-$(CONFIG_INTEL_ATOMISP2_PM)	+= intel_atomisp2_pm.o
+diff --git a/drivers/platform/x86/intel_atomisp2_pm.c b/drivers/platform/x86/intel_atomisp2_pm.c
+new file mode 100644
+index 000000000000..9371603a0ac9
+--- /dev/null
++++ b/drivers/platform/x86/intel_atomisp2_pm.c
+@@ -0,0 +1,119 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Dummy driver for Intel's Image Signal Processor found on Bay and Cherry
++ * Trail devices. The sole purpose of this driver is to allow the ISP to
++ * be put in D3.
++ *
++ * Copyright (C) 2018 Hans de Goede <hdegoede@redhat.com>
++ *
++ * Based on various non upstream patches for ISP support:
++ * Copyright (C) 2010-2017 Intel Corporation. All rights reserved.
++ * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
++ */
++
++#include <linux/delay.h>
++#include <linux/module.h>
++#include <linux/mod_devicetable.h>
++#include <linux/pci.h>
++#include <linux/pm_runtime.h>
++#include <asm/iosf_mbi.h>
++
++/* PCI configuration regs */
++#define PCI_INTERRUPT_CTRL		0x9c
++
++#define PCI_CSI_CONTROL			0xe8
++#define PCI_CSI_CONTROL_PORTS_OFF_MASK	0x7
++
++/* IOSF BT_MBI_UNIT_PMC regs */
++#define ISPSSPM0			0x39
++#define ISPSSPM0_ISPSSC_OFFSET		0
++#define ISPSSPM0_ISPSSC_MASK		0x00000003
++#define ISPSSPM0_ISPSSS_OFFSET		24
++#define ISPSSPM0_ISPSSS_MASK		0x03000000
++#define ISPSSPM0_IUNIT_POWER_ON		0x0
++#define ISPSSPM0_IUNIT_POWER_OFF	0x3
++
++static int isp_probe(struct pci_dev *dev, const struct pci_device_id *id)
++{
++	unsigned long timeout;
++	u32 val;
++
++	pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, 0);
++
++	/*
++	 * MRFLD IUNIT DPHY is located in an always-power-on island
++	 * MRFLD HW design need all CSI ports are disabled before
++	 * powering down the IUNIT.
++	 */
++	pci_read_config_dword(dev, PCI_CSI_CONTROL, &val);
++	val |= PCI_CSI_CONTROL_PORTS_OFF_MASK;
++	pci_write_config_dword(dev, PCI_CSI_CONTROL, val);
++
++	/* Write 0x3 to ISPSSPM0 bit[1:0] to power off the IUNIT */
++	iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0,
++			ISPSSPM0_IUNIT_POWER_OFF, ISPSSPM0_ISPSSC_MASK);
++
++	/*
++	 * There should be no IUNIT access while power-down is
++	 * in progress HW sighting: 4567865
++	 * Wait up to 50 ms for the IUNIT to shut down.
++	 */
++	timeout = jiffies + msecs_to_jiffies(50);
++	while (1) {
++		/* Wait until ISPSSPM0 bit[25:24] shows 0x3 */
++		iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, &val);
++		val = (val & ISPSSPM0_ISPSSS_MASK) >> ISPSSPM0_ISPSSS_OFFSET;
++		if (val == ISPSSPM0_IUNIT_POWER_OFF)
++			break;
++
++		if (time_after(jiffies, timeout)) {
++			dev_err(&dev->dev, "IUNIT power-off timeout.\n");
++			return -EBUSY;
++		}
++		usleep_range(1000, 2000);
++	}
++
++	pm_runtime_allow(&dev->dev);
++	pm_runtime_put_sync_suspend(&dev->dev);
++
++	return 0;
++}
++
++static void isp_remove(struct pci_dev *dev)
++{
++	pm_runtime_get_sync(&dev->dev);
++	pm_runtime_forbid(&dev->dev);
++}
++
++static int isp_pci_suspend(struct device *dev)
++{
++	return 0;
++}
++
++static int isp_pci_resume(struct device *dev)
++{
++	return 0;
++}
++
++static UNIVERSAL_DEV_PM_OPS(isp_pm_ops, isp_pci_suspend,
++			    isp_pci_resume, NULL);
++
++static const struct pci_device_id isp_id_table[] = {
++	{ PCI_VDEVICE(INTEL, 0x22b8), },
++	{ 0, }
++};
++MODULE_DEVICE_TABLE(pci, isp_id_table);
++
++static struct pci_driver isp_pci_driver = {
++	.name = "intel_atomisp2_pm",
++	.id_table = isp_id_table,
++	.probe = isp_probe,
++	.remove = isp_remove,
++	.driver.pm = &isp_pm_ops,
++};
++
++module_pci_driver(isp_pci_driver);
++
++MODULE_DESCRIPTION("Intel AtomISP2 dummy / power-management drv (for suspend)");
++MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 18e4289baf99..655790f30434 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -3095,7 +3095,6 @@ void scsi_device_resume(struct scsi_device *sdev)
+ 	 * device deleted during suspend)
+ 	 */
+ 	mutex_lock(&sdev->state_mutex);
+-	WARN_ON_ONCE(!sdev->quiesced_by);
+ 	sdev->quiesced_by = NULL;
+ 	blk_clear_preempt_only(sdev->request_queue);
+ 	if (sdev->sdev_state == SDEV_QUIESCE)
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 6fd2fe210fc3..4d0fc6b01fa0 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -2185,6 +2185,8 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
+ 	scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
+ 	/* flush running scans then delete devices */
+ 	flush_work(&session->scan_work);
++	/* flush running unbind operations */
++	flush_work(&session->unbind_work);
+ 	__iscsi_unbind_session(&session->unbind_work);
+ 
+ 	/* hw iscsi may not have removed all connections from session */
+diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
+index ed71a4c9c8b2..4b452f36f054 100644
+--- a/drivers/soc/tegra/pmc.c
++++ b/drivers/soc/tegra/pmc.c
+@@ -524,16 +524,10 @@ EXPORT_SYMBOL(tegra_powergate_power_off);
+  */
+ int tegra_powergate_is_powered(unsigned int id)
+ {
+-	int status;
+-
+ 	if (!tegra_powergate_is_valid(id))
+ 		return -EINVAL;
+ 
+-	mutex_lock(&pmc->powergates_lock);
+-	status = tegra_powergate_state(id);
+-	mutex_unlock(&pmc->powergates_lock);
+-
+-	return status;
++	return tegra_powergate_state(id);
+ }
+ 
+ /**
+diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
+index 24b006a95142..8646fb7425f2 100644
+--- a/drivers/thermal/broadcom/bcm2835_thermal.c
++++ b/drivers/thermal/broadcom/bcm2835_thermal.c
+@@ -128,8 +128,7 @@ static const struct debugfs_reg32 bcm2835_thermal_regs[] = {
+ 
+ static void bcm2835_thermal_debugfs(struct platform_device *pdev)
+ {
+-	struct thermal_zone_device *tz = platform_get_drvdata(pdev);
+-	struct bcm2835_thermal_data *data = tz->devdata;
++	struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
+ 	struct debugfs_regset32 *regset;
+ 
+ 	data->debugfsdir = debugfs_create_dir("bcm2835_thermal", NULL);
+@@ -275,7 +274,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
+ 
+ 	data->tz = tz;
+ 
+-	platform_set_drvdata(pdev, tz);
++	platform_set_drvdata(pdev, data);
+ 
+ 	/*
+ 	 * Thermal_zone doesn't enable hwmon as default,
+@@ -299,8 +298,8 @@ err_clk:
+ 
+ static int bcm2835_thermal_remove(struct platform_device *pdev)
+ {
+-	struct thermal_zone_device *tz = platform_get_drvdata(pdev);
+-	struct bcm2835_thermal_data *data = tz->devdata;
++	struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
++	struct thermal_zone_device *tz = data->tz;
+ 
+ 	debugfs_remove_recursive(data->debugfsdir);
+ 	thermal_zone_of_sensor_unregister(&pdev->dev, tz);
+diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
+index e26b01c05e82..e9d58de8b5da 100644
+--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
++++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
+@@ -22,6 +22,13 @@ enum int3400_thermal_uuid {
+ 	INT3400_THERMAL_PASSIVE_1,
+ 	INT3400_THERMAL_ACTIVE,
+ 	INT3400_THERMAL_CRITICAL,
++	INT3400_THERMAL_ADAPTIVE_PERFORMANCE,
++	INT3400_THERMAL_EMERGENCY_CALL_MODE,
++	INT3400_THERMAL_PASSIVE_2,
++	INT3400_THERMAL_POWER_BOSS,
++	INT3400_THERMAL_VIRTUAL_SENSOR,
++	INT3400_THERMAL_COOLING_MODE,
++	INT3400_THERMAL_HARDWARE_DUTY_CYCLING,
+ 	INT3400_THERMAL_MAXIMUM_UUID,
+ };
+ 
+@@ -29,6 +36,13 @@ static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
+ 	"42A441D6-AE6A-462b-A84B-4A8CE79027D3",
+ 	"3A95C389-E4B8-4629-A526-C52C88626BAE",
+ 	"97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
++	"63BE270F-1C11-48FD-A6F7-3AF253FF3E2D",
++	"5349962F-71E6-431D-9AE8-0A635B710AEE",
++	"9E04115A-AE87-4D1C-9500-0F3E340BFE75",
++	"F5A35014-C209-46A4-993A-EB56DE7530A1",
++	"6ED722A7-9240-48A5-B479-31EEF723D7CF",
++	"16CAF1B7-DD38-40ED-B1C1-1B8A1913D531",
++	"BE84BABF-C4D4-403D-B495-3128FD44dAC1",
+ };
+ 
+ struct int3400_thermal_priv {
+@@ -302,10 +316,9 @@ static int int3400_thermal_probe(struct platform_device *pdev)
+ 
+ 	platform_set_drvdata(pdev, priv);
+ 
+-	if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
+-		int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
+-		int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
+-	}
++	int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
++	int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
++
+ 	priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
+ 						priv, &int3400_thermal_ops,
+ 						&int3400_thermal_params, 0, 0);
+diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
+index cde891c54cde..8e8328347c0e 100644
+--- a/drivers/thermal/intel_powerclamp.c
++++ b/drivers/thermal/intel_powerclamp.c
+@@ -101,7 +101,7 @@ struct powerclamp_worker_data {
+ 	bool clamping;
+ };
+ 
+-static struct powerclamp_worker_data * __percpu worker_data;
++static struct powerclamp_worker_data __percpu *worker_data;
+ static struct thermal_cooling_device *cooling_dev;
+ static unsigned long *cpu_clamping_mask;  /* bit map for tracking per cpu
+ 					   * clamping kthread worker
+@@ -494,7 +494,7 @@ static void start_power_clamp_worker(unsigned long cpu)
+ 	struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu);
+ 	struct kthread_worker *worker;
+ 
+-	worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inject/%ld", cpu);
++	worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inj/%ld", cpu);
+ 	if (IS_ERR(worker))
+ 		return;
+ 
+diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
+index 48eef552cba4..fc9399d9c082 100644
+--- a/drivers/thermal/samsung/exynos_tmu.c
++++ b/drivers/thermal/samsung/exynos_tmu.c
+@@ -666,7 +666,7 @@ static int exynos_get_temp(void *p, int *temp)
+ 	struct exynos_tmu_data *data = p;
+ 	int value, ret = 0;
+ 
+-	if (!data || !data->tmu_read || !data->enabled)
++	if (!data || !data->tmu_read)
+ 		return -EINVAL;
+ 	else if (!data->enabled)
+ 		/*
+diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
+index 0e3627289047..77efa0a43fe7 100644
+--- a/drivers/tty/serial/xilinx_uartps.c
++++ b/drivers/tty/serial/xilinx_uartps.c
+@@ -1223,7 +1223,7 @@ static void cdns_uart_console_write(struct console *co, const char *s,
+  *
+  * Return: 0 on success, negative errno otherwise.
+  */
+-static int __init cdns_uart_console_setup(struct console *co, char *options)
++static int cdns_uart_console_setup(struct console *co, char *options)
+ {
+ 	struct uart_port *port = console_port;
+ 
+diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
+index 89bac3d2f05b..619128b55837 100644
+--- a/fs/9p/v9fs.c
++++ b/fs/9p/v9fs.c
+@@ -61,6 +61,8 @@ enum {
+ 	Opt_cache_loose, Opt_fscache, Opt_mmap,
+ 	/* Access options */
+ 	Opt_access, Opt_posixacl,
++	/* Lock timeout option */
++	Opt_locktimeout,
+ 	/* Error token */
+ 	Opt_err
+ };
+@@ -80,6 +82,7 @@ static const match_table_t tokens = {
+ 	{Opt_cachetag, "cachetag=%s"},
+ 	{Opt_access, "access=%s"},
+ 	{Opt_posixacl, "posixacl"},
++	{Opt_locktimeout, "locktimeout=%u"},
+ 	{Opt_err, NULL}
+ };
+ 
+@@ -187,6 +190,7 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
+ #ifdef CONFIG_9P_FSCACHE
+ 	v9ses->cachetag = NULL;
+ #endif
++	v9ses->session_lock_timeout = P9_LOCK_TIMEOUT;
+ 
+ 	if (!opts)
+ 		return 0;
+@@ -359,6 +363,23 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
+ #endif
+ 			break;
+ 
++		case Opt_locktimeout:
++			r = match_int(&args[0], &option);
++			if (r < 0) {
++				p9_debug(P9_DEBUG_ERROR,
++					 "integer field, but no integer?\n");
++				ret = r;
++				continue;
++			}
++			if (option < 1) {
++				p9_debug(P9_DEBUG_ERROR,
++					 "locktimeout must be a greater than zero integer.\n");
++				ret = -EINVAL;
++				continue;
++			}
++			v9ses->session_lock_timeout = (long)option * HZ;
++			break;
++
+ 		default:
+ 			continue;
+ 		}
+diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
+index 982e017acadb..129e5243a6bf 100644
+--- a/fs/9p/v9fs.h
++++ b/fs/9p/v9fs.h
+@@ -116,6 +116,7 @@ struct v9fs_session_info {
+ 	struct p9_client *clnt;	/* 9p client */
+ 	struct list_head slist; /* list of sessions registered with v9fs */
+ 	struct rw_semaphore rename_sem;
++	long session_lock_timeout; /* retry interval for blocking locks */
+ };
+ 
+ /* cache_validity flags */
+diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
+index 48db9a9f13f9..cb6c4031af55 100644
+--- a/fs/9p/vfs_dir.c
++++ b/fs/9p/vfs_dir.c
+@@ -105,7 +105,6 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
+ 	int err = 0;
+ 	struct p9_fid *fid;
+ 	int buflen;
+-	int reclen = 0;
+ 	struct p9_rdir *rdir;
+ 	struct kvec kvec;
+ 
+@@ -138,11 +137,10 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
+ 		while (rdir->head < rdir->tail) {
+ 			err = p9stat_read(fid->clnt, rdir->buf + rdir->head,
+ 					  rdir->tail - rdir->head, &st);
+-			if (err) {
++			if (err <= 0) {
+ 				p9_debug(P9_DEBUG_VFS, "returned %d\n", err);
+ 				return -EIO;
+ 			}
+-			reclen = st.size+2;
+ 
+ 			over = !dir_emit(ctx, st.name, strlen(st.name),
+ 					 v9fs_qid2ino(&st.qid), dt_type(&st));
+@@ -150,8 +148,8 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
+ 			if (over)
+ 				return 0;
+ 
+-			rdir->head += reclen;
+-			ctx->pos += reclen;
++			rdir->head += err;
++			ctx->pos += err;
+ 		}
+ 	}
+ }
+diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
+index c87e6d6ec069..05454a7e22dc 100644
+--- a/fs/9p/vfs_file.c
++++ b/fs/9p/vfs_file.c
+@@ -154,6 +154,7 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
+ 	uint8_t status = P9_LOCK_ERROR;
+ 	int res = 0;
+ 	unsigned char fl_type;
++	struct v9fs_session_info *v9ses;
+ 
+ 	fid = filp->private_data;
+ 	BUG_ON(fid == NULL);
+@@ -189,6 +190,8 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
+ 	if (IS_SETLKW(cmd))
+ 		flock.flags = P9_LOCK_FLAGS_BLOCK;
+ 
++	v9ses = v9fs_inode2v9ses(file_inode(filp));
++
+ 	/*
+ 	 * if its a blocked request and we get P9_LOCK_BLOCKED as the status
+ 	 * for lock request, keep on trying
+@@ -202,7 +205,8 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
+ 			break;
+ 		if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
+ 			break;
+-		if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT) != 0)
++		if (schedule_timeout_interruptible(v9ses->session_lock_timeout)
++				!= 0)
+ 			break;
+ 		/*
+ 		 * p9_client_lock_dotl overwrites flock.client_id with the
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index 020f49c15b30..b59ebed4f615 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -780,43 +780,50 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
+ 	} else if ((rc == -EACCES) && backup_cred(cifs_sb) &&
+ 		   (strcmp(server->vals->version_string, SMB1_VERSION_STRING)
+ 		      == 0)) {
+-			/*
+-			 * For SMB2 and later the backup intent flag is already
+-			 * sent if needed on open and there is no path based
+-			 * FindFirst operation to use to retry with
+-			 */
++		/*
++		 * For SMB2 and later the backup intent flag is already
++		 * sent if needed on open and there is no path based
++		 * FindFirst operation to use to retry with
++		 */
+ 
+-			srchinf = kzalloc(sizeof(struct cifs_search_info),
+-						GFP_KERNEL);
+-			if (srchinf == NULL) {
+-				rc = -ENOMEM;
+-				goto cgii_exit;
+-			}
++		srchinf = kzalloc(sizeof(struct cifs_search_info),
++					GFP_KERNEL);
++		if (srchinf == NULL) {
++			rc = -ENOMEM;
++			goto cgii_exit;
++		}
+ 
+-			srchinf->endOfSearch = false;
++		srchinf->endOfSearch = false;
++		if (tcon->unix_ext)
++			srchinf->info_level = SMB_FIND_FILE_UNIX;
++		else if ((tcon->ses->capabilities &
++			 tcon->ses->server->vals->cap_nt_find) == 0)
++			srchinf->info_level = SMB_FIND_FILE_INFO_STANDARD;
++		else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
+ 			srchinf->info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
++		else /* no srvino useful for fallback to some netapp */
++			srchinf->info_level = SMB_FIND_FILE_DIRECTORY_INFO;
+ 
+-			srchflgs = CIFS_SEARCH_CLOSE_ALWAYS |
+-					CIFS_SEARCH_CLOSE_AT_END |
+-					CIFS_SEARCH_BACKUP_SEARCH;
++		srchflgs = CIFS_SEARCH_CLOSE_ALWAYS |
++				CIFS_SEARCH_CLOSE_AT_END |
++				CIFS_SEARCH_BACKUP_SEARCH;
+ 
+-			rc = CIFSFindFirst(xid, tcon, full_path,
+-				cifs_sb, NULL, srchflgs, srchinf, false);
+-			if (!rc) {
+-				data =
+-				(FILE_ALL_INFO *)srchinf->srch_entries_start;
++		rc = CIFSFindFirst(xid, tcon, full_path,
++			cifs_sb, NULL, srchflgs, srchinf, false);
++		if (!rc) {
++			data = (FILE_ALL_INFO *)srchinf->srch_entries_start;
+ 
+-				cifs_dir_info_to_fattr(&fattr,
+-				(FILE_DIRECTORY_INFO *)data, cifs_sb);
+-				fattr.cf_uniqueid = le64_to_cpu(
+-				((SEARCH_ID_FULL_DIR_INFO *)data)->UniqueId);
+-				validinum = true;
++			cifs_dir_info_to_fattr(&fattr,
++			(FILE_DIRECTORY_INFO *)data, cifs_sb);
++			fattr.cf_uniqueid = le64_to_cpu(
++			((SEARCH_ID_FULL_DIR_INFO *)data)->UniqueId);
++			validinum = true;
+ 
+-				cifs_buf_release(srchinf->ntwrk_buf_start);
+-			}
+-			kfree(srchinf);
+-			if (rc)
+-				goto cgii_exit;
++			cifs_buf_release(srchinf->ntwrk_buf_start);
++		}
++		kfree(srchinf);
++		if (rc)
++			goto cgii_exit;
+ 	} else
+ 		goto cgii_exit;
+ 
+diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
+index c3ae8c1d6089..18814f1d67d9 100644
+--- a/fs/cifs/smb2maperror.c
++++ b/fs/cifs/smb2maperror.c
+@@ -1036,7 +1036,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
+ 	{STATUS_UNFINISHED_CONTEXT_DELETED, -EIO,
+ 	"STATUS_UNFINISHED_CONTEXT_DELETED"},
+ 	{STATUS_NO_TGT_REPLY, -EIO, "STATUS_NO_TGT_REPLY"},
+-	{STATUS_OBJECTID_NOT_FOUND, -EIO, "STATUS_OBJECTID_NOT_FOUND"},
++	/* Note that ENOATTTR and ENODATA are the same errno */
++	{STATUS_OBJECTID_NOT_FOUND, -ENODATA, "STATUS_OBJECTID_NOT_FOUND"},
+ 	{STATUS_NO_IP_ADDRESSES, -EIO, "STATUS_NO_IP_ADDRESSES"},
+ 	{STATUS_WRONG_CREDENTIAL_HANDLE, -EIO,
+ 	"STATUS_WRONG_CREDENTIAL_HANDLE"},
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index 2e76fb55d94a..5f24fdc140ad 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -999,6 +999,13 @@ resizefs_out:
+ 		if (!blk_queue_discard(q))
+ 			return -EOPNOTSUPP;
+ 
++		/*
++		 * We haven't replayed the journal, so we cannot use our
++		 * block-bitmap-guided storage zapping commands.
++		 */
++		if (test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb))
++			return -EROFS;
++
+ 		if (copy_from_user(&range, (struct fstrim_range __user *)arg,
+ 		    sizeof(range)))
+ 			return -EFAULT;
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 3d9b18505c0c..e7ae26e36c9c 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -932,11 +932,18 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
+ 	memcpy(n_group_desc, o_group_desc,
+ 	       EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
+ 	n_group_desc[gdb_num] = gdb_bh;
++
++	BUFFER_TRACE(gdb_bh, "get_write_access");
++	err = ext4_journal_get_write_access(handle, gdb_bh);
++	if (err) {
++		kvfree(n_group_desc);
++		brelse(gdb_bh);
++		return err;
++	}
++
+ 	EXT4_SB(sb)->s_group_desc = n_group_desc;
+ 	EXT4_SB(sb)->s_gdb_count++;
+ 	kvfree(o_group_desc);
+-	BUFFER_TRACE(gdb_bh, "get_write_access");
+-	err = ext4_journal_get_write_access(handle, gdb_bh);
+ 	return err;
+ }
+ 
+@@ -2073,6 +2080,10 @@ out:
+ 		free_flex_gd(flex_gd);
+ 	if (resize_inode != NULL)
+ 		iput(resize_inode);
+-	ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", n_blocks_count);
++	if (err)
++		ext4_warning(sb, "error (%d) occurred during "
++			     "file system resize", err);
++	ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
++		 ext4_blocks_count(es));
+ 	return err;
+ }
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index a1cf7d68b4f0..abba7ece78e9 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -430,6 +430,12 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
+ 	spin_unlock(&sbi->s_md_lock);
+ }
+ 
++static bool system_going_down(void)
++{
++	return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
++		|| system_state == SYSTEM_RESTART;
++}
++
+ /* Deal with the reporting of failure conditions on a filesystem such as
+  * inconsistencies detected or read IO failures.
+  *
+@@ -460,7 +466,12 @@ static void ext4_handle_error(struct super_block *sb)
+ 		if (journal)
+ 			jbd2_journal_abort(journal, -EIO);
+ 	}
+-	if (test_opt(sb, ERRORS_RO)) {
++	/*
++	 * We force ERRORS_RO behavior when system is rebooting. Otherwise we
++	 * could panic during 'reboot -f' as the underlying device got already
++	 * disabled.
++	 */
++	if (test_opt(sb, ERRORS_RO) || system_going_down()) {
+ 		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
+ 		/*
+ 		 * Make sure updated value of ->s_mount_flags will be visible
+@@ -468,8 +479,7 @@ static void ext4_handle_error(struct super_block *sb)
+ 		 */
+ 		smp_wmb();
+ 		sb->s_flags |= SB_RDONLY;
+-	}
+-	if (test_opt(sb, ERRORS_PANIC)) {
++	} else if (test_opt(sb, ERRORS_PANIC)) {
+ 		if (EXT4_SB(sb)->s_journal &&
+ 		  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
+ 			return;
+diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
+index 214a968962a1..ebe649d9793c 100644
+--- a/fs/f2fs/debug.c
++++ b/fs/f2fs/debug.c
+@@ -190,8 +190,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
+ 	si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry);
+ 	si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi));
+ 	si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
+-	if (f2fs_discard_en(sbi))
+-		si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
++	si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
+ 	si->base_mem += SIT_VBLOCK_MAP_SIZE;
+ 	if (sbi->segs_per_sec > 1)
+ 		si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index a3ba20e5946f..1f5d5f62bb77 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3409,11 +3409,20 @@ static inline int get_blkz_type(struct f2fs_sb_info *sbi,
+ }
+ #endif
+ 
+-static inline bool f2fs_discard_en(struct f2fs_sb_info *sbi)
++static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
+ {
+-	struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
++	return f2fs_sb_has_blkzoned(sbi->sb);
++}
+ 
+-	return blk_queue_discard(q) || f2fs_sb_has_blkzoned(sbi->sb);
++static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
++{
++	return blk_queue_discard(bdev_get_queue(sbi->sb->s_bdev));
++}
++
++static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
++{
++	return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) ||
++					f2fs_hw_should_discard(sbi);
+ }
+ 
+ static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt)
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 81c1dd635a8d..b3f46e3bec17 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -770,7 +770,6 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
+ {
+ 	struct inode *inode = d_inode(dentry);
+ 	int err;
+-	bool size_changed = false;
+ 
+ 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
+ 		return -EIO;
+@@ -830,8 +829,6 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
+ 		down_write(&F2FS_I(inode)->i_sem);
+ 		F2FS_I(inode)->last_disk_size = i_size_read(inode);
+ 		up_write(&F2FS_I(inode)->i_sem);
+-
+-		size_changed = true;
+ 	}
+ 
+ 	__setattr_copy(inode, attr);
+@@ -845,7 +842,7 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
+ 	}
+ 
+ 	/* file size may changed here */
+-	f2fs_mark_inode_dirty_sync(inode, size_changed);
++	f2fs_mark_inode_dirty_sync(inode, true);
+ 
+ 	/* inode change will produce dirty node pages flushed by checkpoint */
+ 	f2fs_balance_fs(F2FS_I_SB(inode), true);
+@@ -1983,7 +1980,7 @@ static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EPERM;
+ 
+-	if (!blk_queue_discard(q))
++	if (!f2fs_hw_support_discard(F2FS_SB(sb)))
+ 		return -EOPNOTSUPP;
+ 
+ 	if (copy_from_user(&range, (struct fstrim_range __user *)arg,
+diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
+index 9a8579fb3a30..ae0e5f2e67b4 100644
+--- a/fs/f2fs/recovery.c
++++ b/fs/f2fs/recovery.c
+@@ -99,8 +99,12 @@ err_out:
+ 	return ERR_PTR(err);
+ }
+ 
+-static void del_fsync_inode(struct fsync_inode_entry *entry)
++static void del_fsync_inode(struct fsync_inode_entry *entry, int drop)
+ {
++	if (drop) {
++		/* inode should not be recovered, drop it */
++		f2fs_inode_synced(entry->inode);
++	}
+ 	iput(entry->inode);
+ 	list_del(&entry->list);
+ 	kmem_cache_free(fsync_entry_slab, entry);
+@@ -321,12 +325,12 @@ next:
+ 	return err;
+ }
+ 
+-static void destroy_fsync_dnodes(struct list_head *head)
++static void destroy_fsync_dnodes(struct list_head *head, int drop)
+ {
+ 	struct fsync_inode_entry *entry, *tmp;
+ 
+ 	list_for_each_entry_safe(entry, tmp, head, list)
+-		del_fsync_inode(entry);
++		del_fsync_inode(entry, drop);
+ }
+ 
+ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
+@@ -561,7 +565,7 @@ out:
+ }
+ 
+ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
+-						struct list_head *dir_list)
++		struct list_head *tmp_inode_list, struct list_head *dir_list)
+ {
+ 	struct curseg_info *curseg;
+ 	struct page *page = NULL;
+@@ -615,7 +619,7 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
+ 		}
+ 
+ 		if (entry->blkaddr == blkaddr)
+-			del_fsync_inode(entry);
++			list_move_tail(&entry->list, tmp_inode_list);
+ next:
+ 		/* check next segment */
+ 		blkaddr = next_blkaddr_of_node(page);
+@@ -628,7 +632,7 @@ next:
+ 
+ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
+ {
+-	struct list_head inode_list;
++	struct list_head inode_list, tmp_inode_list;
+ 	struct list_head dir_list;
+ 	int err;
+ 	int ret = 0;
+@@ -659,6 +663,7 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
+ 	}
+ 
+ 	INIT_LIST_HEAD(&inode_list);
++	INIT_LIST_HEAD(&tmp_inode_list);
+ 	INIT_LIST_HEAD(&dir_list);
+ 
+ 	/* prevent checkpoint */
+@@ -677,11 +682,16 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
+ 	need_writecp = true;
+ 
+ 	/* step #2: recover data */
+-	err = recover_data(sbi, &inode_list, &dir_list);
++	err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list);
+ 	if (!err)
+ 		f2fs_bug_on(sbi, !list_empty(&inode_list));
++	else {
++		/* restore s_flags to let iput() trash data */
++		sbi->sb->s_flags = s_flags;
++	}
+ skip:
+-	destroy_fsync_dnodes(&inode_list);
++	destroy_fsync_dnodes(&inode_list, err);
++	destroy_fsync_dnodes(&tmp_inode_list, err);
+ 
+ 	/* truncate meta pages to be used by the recovery */
+ 	truncate_inode_pages_range(META_MAPPING(sbi),
+@@ -690,13 +700,13 @@ skip:
+ 	if (err) {
+ 		truncate_inode_pages_final(NODE_MAPPING(sbi));
+ 		truncate_inode_pages_final(META_MAPPING(sbi));
++	} else {
++		clear_sbi_flag(sbi, SBI_POR_DOING);
+ 	}
+-
+-	clear_sbi_flag(sbi, SBI_POR_DOING);
+ 	mutex_unlock(&sbi->cp_mutex);
+ 
+ 	/* let's drop all the directory inodes for clean checkpoint */
+-	destroy_fsync_dnodes(&dir_list);
++	destroy_fsync_dnodes(&dir_list, err);
+ 
+ 	if (need_writecp) {
+ 		set_sbi_flag(sbi, SBI_IS_RECOVERED);
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 1fa6f8185766..ac038563273d 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -1744,11 +1744,11 @@ static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
+ 	struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
+ 	int i;
+ 
+-	if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
++	if (se->valid_blocks == max_blocks || !f2fs_hw_support_discard(sbi))
+ 		return false;
+ 
+ 	if (!force) {
+-		if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
++		if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks ||
+ 			SM_I(sbi)->dcc_info->nr_discards >=
+ 				SM_I(sbi)->dcc_info->max_discards)
+ 			return false;
+@@ -1854,7 +1854,7 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
+ 				dirty_i->nr_dirty[PRE]--;
+ 		}
+ 
+-		if (!test_opt(sbi, DISCARD))
++		if (!f2fs_realtime_discard_enable(sbi))
+ 			continue;
+ 
+ 		if (force && start >= cpc->trim_start &&
+@@ -2044,8 +2044,7 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
+ 			del = 0;
+ 		}
+ 
+-		if (f2fs_discard_en(sbi) &&
+-			!f2fs_test_and_set_bit(offset, se->discard_map))
++		if (!f2fs_test_and_set_bit(offset, se->discard_map))
+ 			sbi->discard_blks--;
+ 
+ 		/* don't overwrite by SSR to keep node chain */
+@@ -2073,8 +2072,7 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
+ 			del = 0;
+ 		}
+ 
+-		if (f2fs_discard_en(sbi) &&
+-			f2fs_test_and_clear_bit(offset, se->discard_map))
++		if (f2fs_test_and_clear_bit(offset, se->discard_map))
+ 			sbi->discard_blks++;
+ 	}
+ 	if (!f2fs_test_bit(offset, se->ckpt_valid_map))
+@@ -2690,7 +2688,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
+ 	 * discard option. User configuration looks like using runtime discard
+ 	 * or periodic fstrim instead of it.
+ 	 */
+-	if (test_opt(sbi, DISCARD))
++	if (f2fs_realtime_discard_enable(sbi))
+ 		goto out;
+ 
+ 	start_block = START_BLOCK(sbi, start_segno);
+@@ -3781,13 +3779,11 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
+ 			return -ENOMEM;
+ #endif
+ 
+-		if (f2fs_discard_en(sbi)) {
+-			sit_i->sentries[start].discard_map
+-				= f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE,
+-								GFP_KERNEL);
+-			if (!sit_i->sentries[start].discard_map)
+-				return -ENOMEM;
+-		}
++		sit_i->sentries[start].discard_map
++			= f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE,
++							GFP_KERNEL);
++		if (!sit_i->sentries[start].discard_map)
++			return -ENOMEM;
+ 	}
+ 
+ 	sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
+@@ -3935,18 +3931,16 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
+ 				total_node_blocks += se->valid_blocks;
+ 
+ 			/* build discard map only one time */
+-			if (f2fs_discard_en(sbi)) {
+-				if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
+-					memset(se->discard_map, 0xff,
+-						SIT_VBLOCK_MAP_SIZE);
+-				} else {
+-					memcpy(se->discard_map,
+-						se->cur_valid_map,
+-						SIT_VBLOCK_MAP_SIZE);
+-					sbi->discard_blks +=
+-						sbi->blocks_per_seg -
+-						se->valid_blocks;
+-				}
++			if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
++				memset(se->discard_map, 0xff,
++					SIT_VBLOCK_MAP_SIZE);
++			} else {
++				memcpy(se->discard_map,
++					se->cur_valid_map,
++					SIT_VBLOCK_MAP_SIZE);
++				sbi->discard_blks +=
++					sbi->blocks_per_seg -
++					se->valid_blocks;
+ 			}
+ 
+ 			if (sbi->segs_per_sec > 1)
+@@ -3984,16 +3978,13 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
+ 		if (IS_NODESEG(se->type))
+ 			total_node_blocks += se->valid_blocks;
+ 
+-		if (f2fs_discard_en(sbi)) {
+-			if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
+-				memset(se->discard_map, 0xff,
+-							SIT_VBLOCK_MAP_SIZE);
+-			} else {
+-				memcpy(se->discard_map, se->cur_valid_map,
+-							SIT_VBLOCK_MAP_SIZE);
+-				sbi->discard_blks += old_valid_blocks;
+-				sbi->discard_blks -= se->valid_blocks;
+-			}
++		if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
++			memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE);
++		} else {
++			memcpy(se->discard_map, se->cur_valid_map,
++						SIT_VBLOCK_MAP_SIZE);
++			sbi->discard_blks += old_valid_blocks;
++			sbi->discard_blks -= se->valid_blocks;
+ 		}
+ 
+ 		if (sbi->segs_per_sec > 1) {
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 79370b7fa9d2..2264f27fd26d 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -360,7 +360,6 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
+ static int parse_options(struct super_block *sb, char *options)
+ {
+ 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
+-	struct request_queue *q;
+ 	substring_t args[MAX_OPT_ARGS];
+ 	char *p, *name;
+ 	int arg = 0;
+@@ -415,14 +414,7 @@ static int parse_options(struct super_block *sb, char *options)
+ 				return -EINVAL;
+ 			break;
+ 		case Opt_discard:
+-			q = bdev_get_queue(sb->s_bdev);
+-			if (blk_queue_discard(q)) {
+-				set_opt(sbi, DISCARD);
+-			} else if (!f2fs_sb_has_blkzoned(sb)) {
+-				f2fs_msg(sb, KERN_WARNING,
+-					"mounting with \"discard\" option, but "
+-					"the device does not support discard");
+-			}
++			set_opt(sbi, DISCARD);
+ 			break;
+ 		case Opt_nodiscard:
+ 			if (f2fs_sb_has_blkzoned(sb)) {
+@@ -1033,7 +1025,8 @@ static void f2fs_put_super(struct super_block *sb)
+ 	/* be sure to wait for any on-going discard commands */
+ 	dropped = f2fs_wait_discard_bios(sbi);
+ 
+-	if (f2fs_discard_en(sbi) && !sbi->discard_blks && !dropped) {
++	if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) &&
++					!sbi->discard_blks && !dropped) {
+ 		struct cp_control cpc = {
+ 			.reason = CP_UMOUNT | CP_TRIMMED,
+ 		};
+@@ -1403,8 +1396,7 @@ static void default_options(struct f2fs_sb_info *sbi)
+ 	set_opt(sbi, NOHEAP);
+ 	sbi->sb->s_flags |= SB_LAZYTIME;
+ 	set_opt(sbi, FLUSH_MERGE);
+-	if (blk_queue_discard(bdev_get_queue(sbi->sb->s_bdev)))
+-		set_opt(sbi, DISCARD);
++	set_opt(sbi, DISCARD);
+ 	if (f2fs_sb_has_blkzoned(sbi->sb))
+ 		set_opt_mode(sbi, F2FS_MOUNT_LFS);
+ 	else
+@@ -1893,6 +1885,19 @@ void f2fs_quota_off_umount(struct super_block *sb)
+ 	}
+ }
+ 
++static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
++{
++	struct quota_info *dqopt = sb_dqopt(sb);
++	int type;
++
++	for (type = 0; type < MAXQUOTAS; type++) {
++		if (!dqopt->files[type])
++			continue;
++		f2fs_inode_synced(dqopt->files[type]);
++	}
++}
++
++
+ static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
+ {
+ 	*projid = F2FS_I(inode)->i_projid;
+@@ -2337,7 +2342,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
+ 	unsigned int segment_count_main;
+ 	unsigned int cp_pack_start_sum, cp_payload;
+ 	block_t user_block_count;
+-	int i;
++	int i, j;
+ 
+ 	total = le32_to_cpu(raw_super->segment_count);
+ 	fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
+@@ -2378,11 +2383,43 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
+ 		if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
+ 			le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
+ 			return 1;
++		for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
++			if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
++				le32_to_cpu(ckpt->cur_node_segno[j])) {
++				f2fs_msg(sbi->sb, KERN_ERR,
++					"Node segment (%u, %u) has the same "
++					"segno: %u", i, j,
++					le32_to_cpu(ckpt->cur_node_segno[i]));
++				return 1;
++			}
++		}
+ 	}
+ 	for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
+ 		if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
+ 			le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
+ 			return 1;
++		for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
++			if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
++				le32_to_cpu(ckpt->cur_data_segno[j])) {
++				f2fs_msg(sbi->sb, KERN_ERR,
++					"Data segment (%u, %u) has the same "
++					"segno: %u", i, j,
++					le32_to_cpu(ckpt->cur_data_segno[i]));
++				return 1;
++			}
++		}
++	}
++	for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
++		for (j = i; j < NR_CURSEG_DATA_TYPE; j++) {
++			if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
++				le32_to_cpu(ckpt->cur_data_segno[j])) {
++				f2fs_msg(sbi->sb, KERN_ERR,
++					"Data segment (%u) and Data segment (%u)"
++					" has the same segno: %u", i, j,
++					le32_to_cpu(ckpt->cur_node_segno[i]));
++				return 1;
++			}
++		}
+ 	}
+ 
+ 	sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
+@@ -3107,10 +3144,10 @@ skip_recovery:
+ 
+ free_meta:
+ #ifdef CONFIG_QUOTA
++	f2fs_truncate_quota_inode_pages(sb);
+ 	if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb))
+ 		f2fs_quota_off_umount(sbi->sb);
+ #endif
+-	f2fs_sync_inode_meta(sbi);
+ 	/*
+ 	 * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
+ 	 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
+diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
+index 780bba695453..97a51690338e 100644
+--- a/fs/notify/inotify/inotify_user.c
++++ b/fs/notify/inotify/inotify_user.c
+@@ -519,8 +519,10 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
+ 	fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
+ 	if (!fsn_mark)
+ 		return -ENOENT;
+-	else if (create)
+-		return -EEXIST;
++	else if (create) {
++		ret = -EEXIST;
++		goto out;
++	}
+ 
+ 	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
+ 
+@@ -548,6 +550,7 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
+ 	/* return the wd */
+ 	ret = i_mark->wd;
+ 
++out:
+ 	/* match the get from fsnotify_find_mark() */
+ 	fsnotify_put_mark(fsn_mark);
+ 
+diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
+index d297fe4472a9..d0137e3e585e 100644
+--- a/fs/proc/kcore.c
++++ b/fs/proc/kcore.c
+@@ -54,6 +54,28 @@ static LIST_HEAD(kclist_head);
+ static DECLARE_RWSEM(kclist_lock);
+ static int kcore_need_update = 1;
+ 
++/*
++ * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
++ * Same as oldmem_pfn_is_ram in vmcore
++ */
++static int (*mem_pfn_is_ram)(unsigned long pfn);
++
++int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
++{
++	if (mem_pfn_is_ram)
++		return -EBUSY;
++	mem_pfn_is_ram = fn;
++	return 0;
++}
++
++static int pfn_is_ram(unsigned long pfn)
++{
++	if (mem_pfn_is_ram)
++		return mem_pfn_is_ram(pfn);
++	else
++		return 1;
++}
++
+ /* This doesn't grab kclist_lock, so it should only be used at init time. */
+ void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
+ 		       int type)
+@@ -465,6 +487,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
+ 				goto out;
+ 			}
+ 			m = NULL;	/* skip the list anchor */
++		} else if (!pfn_is_ram(__pa(start) >> PAGE_SHIFT)) {
++			if (clear_user(buffer, tsz)) {
++				ret = -EFAULT;
++				goto out;
++			}
+ 		} else if (m->type == KCORE_VMALLOC) {
+ 			vread(buf, (char *)start, tsz);
+ 			/* we have to zero-fill user buffer even if no read */
+diff --git a/include/linux/atalk.h b/include/linux/atalk.h
+index 23f805562f4e..d5cfc0b15b76 100644
+--- a/include/linux/atalk.h
++++ b/include/linux/atalk.h
+@@ -158,19 +158,29 @@ extern int sysctl_aarp_retransmit_limit;
+ extern int sysctl_aarp_resolve_time;
+ 
+ #ifdef CONFIG_SYSCTL
+-extern void atalk_register_sysctl(void);
++extern int atalk_register_sysctl(void);
+ extern void atalk_unregister_sysctl(void);
+ #else
+-#define atalk_register_sysctl()		do { } while(0)
+-#define atalk_unregister_sysctl()	do { } while(0)
++static inline int atalk_register_sysctl(void)
++{
++	return 0;
++}
++static inline void atalk_unregister_sysctl(void)
++{
++}
+ #endif
+ 
+ #ifdef CONFIG_PROC_FS
+ extern int atalk_proc_init(void);
+ extern void atalk_proc_exit(void);
+ #else
+-#define atalk_proc_init()	({ 0; })
+-#define atalk_proc_exit()	do { } while(0)
++static inline int atalk_proc_init(void)
++{
++	return 0;
++}
++static inline void atalk_proc_exit(void)
++{
++}
+ #endif /* CONFIG_PROC_FS */
+ 
+ #endif /* __LINUX_ATALK_H__ */
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index 269d376f5a11..81c2238b884c 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -124,7 +124,10 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
+ # define ASM_UNREACHABLE
+ #endif
+ #ifndef unreachable
+-# define unreachable() do { annotate_reachable(); do { } while (1); } while (0)
++# define unreachable() do {		\
++	annotate_unreachable();		\
++	__builtin_unreachable();	\
++} while (0)
+ #endif
+ 
+ /*
+diff --git a/include/linux/kcore.h b/include/linux/kcore.h
+index 8c3f8c14eeaa..c843f4a9c512 100644
+--- a/include/linux/kcore.h
++++ b/include/linux/kcore.h
+@@ -44,6 +44,8 @@ void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
+ 	m->vaddr = (unsigned long)vaddr;
+ 	kclist_add(m, addr, sz, KCORE_REMAP);
+ }
++
++extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn));
+ #else
+ static inline
+ void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
+diff --git a/include/linux/swap.h b/include/linux/swap.h
+index 77221c16733a..7bd0a6f2ac2b 100644
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -155,9 +155,9 @@ struct swap_extent {
+ /*
+  * Max bad pages in the new format..
+  */
+-#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
+ #define MAX_SWAP_BADPAGES \
+-	((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
++	((offsetof(union swap_header, magic.magic) - \
++	  offsetof(union swap_header, info.badpages)) / sizeof(int))
+ 
+ enum {
+ 	SWP_USED	= (1 << 0),	/* is slot in swap_info[] used? */
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 0db1b9b428b7..1dfb75057580 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -259,6 +259,8 @@ struct hci_dev {
+ 	__u16		le_max_tx_time;
+ 	__u16		le_max_rx_len;
+ 	__u16		le_max_rx_time;
++	__u8		le_max_key_size;
++	__u8		le_min_key_size;
+ 	__u16		discov_interleaved_timeout;
+ 	__u16		conn_info_min_age;
+ 	__u16		conn_info_max_age;
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index da588def3c61..5e3daf53b3d1 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -850,7 +850,7 @@ static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
+ 		xfrm_pol_put(pols[i]);
+ }
+ 
+-void __xfrm_state_destroy(struct xfrm_state *);
++void __xfrm_state_destroy(struct xfrm_state *, bool);
+ 
+ static inline void __xfrm_state_put(struct xfrm_state *x)
+ {
+@@ -860,7 +860,13 @@ static inline void __xfrm_state_put(struct xfrm_state *x)
+ static inline void xfrm_state_put(struct xfrm_state *x)
+ {
+ 	if (refcount_dec_and_test(&x->refcnt))
+-		__xfrm_state_destroy(x);
++		__xfrm_state_destroy(x, false);
++}
++
++static inline void xfrm_state_put_sync(struct xfrm_state *x)
++{
++	if (refcount_dec_and_test(&x->refcnt))
++		__xfrm_state_destroy(x, true);
+ }
+ 
+ static inline void xfrm_state_hold(struct xfrm_state *x)
+@@ -1616,7 +1622,7 @@ struct xfrmk_spdinfo {
+ 
+ struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
+ int xfrm_state_delete(struct xfrm_state *x);
+-int xfrm_state_flush(struct net *net, u8 proto, bool task_valid);
++int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
+ int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
+ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
+ void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
+diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
+index 573d5b901fb1..6d182746afab 100644
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -76,6 +76,7 @@ enum rxrpc_client_trace {
+ 	rxrpc_client_chan_disconnect,
+ 	rxrpc_client_chan_pass,
+ 	rxrpc_client_chan_unstarted,
++	rxrpc_client_chan_wait_failed,
+ 	rxrpc_client_cleanup,
+ 	rxrpc_client_count,
+ 	rxrpc_client_discard,
+@@ -275,6 +276,7 @@ enum rxrpc_tx_point {
+ 	EM(rxrpc_client_chan_disconnect,	"ChDisc") \
+ 	EM(rxrpc_client_chan_pass,		"ChPass") \
+ 	EM(rxrpc_client_chan_unstarted,		"ChUnst") \
++	EM(rxrpc_client_chan_wait_failed,	"ChWtFl") \
+ 	EM(rxrpc_client_cleanup,		"Clean ") \
+ 	EM(rxrpc_client_count,			"Count ") \
+ 	EM(rxrpc_client_discard,		"Discar") \
+diff --git a/include/uapi/linux/netfilter/xt_cgroup.h b/include/uapi/linux/netfilter/xt_cgroup.h
+index e96dfa1b34f7..b74e370d6133 100644
+--- a/include/uapi/linux/netfilter/xt_cgroup.h
++++ b/include/uapi/linux/netfilter/xt_cgroup.h
+@@ -22,4 +22,20 @@ struct xt_cgroup_info_v1 {
+ 	void		*priv __attribute__((aligned(8)));
+ };
+ 
++#define XT_CGROUP_PATH_MAX	512
++
++struct xt_cgroup_info_v2 {
++	__u8		has_path;
++	__u8		has_classid;
++	__u8		invert_path;
++	__u8		invert_classid;
++	union {
++		char	path[XT_CGROUP_PATH_MAX];
++		__u32	classid;
++	};
++
++	/* kernel internal data */
++	void		*priv __attribute__((aligned(8)));
++};
++
+ #endif /* _UAPI_XT_CGROUP_H */
+diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
+index 2ada5e21dfa6..4a8f390a2b82 100644
+--- a/kernel/bpf/inode.c
++++ b/kernel/bpf/inode.c
+@@ -554,19 +554,6 @@ struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type typ
+ }
+ EXPORT_SYMBOL(bpf_prog_get_type_path);
+ 
+-static void bpf_evict_inode(struct inode *inode)
+-{
+-	enum bpf_type type;
+-
+-	truncate_inode_pages_final(&inode->i_data);
+-	clear_inode(inode);
+-
+-	if (S_ISLNK(inode->i_mode))
+-		kfree(inode->i_link);
+-	if (!bpf_inode_type(inode, &type))
+-		bpf_any_put(inode->i_private, type);
+-}
+-
+ /*
+  * Display the mount options in /proc/mounts.
+  */
+@@ -579,11 +566,28 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root)
+ 	return 0;
+ }
+ 
++static void bpf_destroy_inode_deferred(struct rcu_head *head)
++{
++	struct inode *inode = container_of(head, struct inode, i_rcu);
++	enum bpf_type type;
++
++	if (S_ISLNK(inode->i_mode))
++		kfree(inode->i_link);
++	if (!bpf_inode_type(inode, &type))
++		bpf_any_put(inode->i_private, type);
++	free_inode_nonrcu(inode);
++}
++
++static void bpf_destroy_inode(struct inode *inode)
++{
++	call_rcu(&inode->i_rcu, bpf_destroy_inode_deferred);
++}
++
+ static const struct super_operations bpf_super_ops = {
+ 	.statfs		= simple_statfs,
+ 	.drop_inode	= generic_delete_inode,
+ 	.show_options	= bpf_show_options,
+-	.evict_inode	= bpf_evict_inode,
++	.destroy_inode	= bpf_destroy_inode,
+ };
+ 
+ enum {
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index aa996a0854b9..87bd96399d1c 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -7178,6 +7178,7 @@ static void perf_event_mmap_output(struct perf_event *event,
+ 	struct perf_output_handle handle;
+ 	struct perf_sample_data sample;
+ 	int size = mmap_event->event_id.header.size;
++	u32 type = mmap_event->event_id.header.type;
+ 	int ret;
+ 
+ 	if (!perf_event_mmap_match(event, data))
+@@ -7221,6 +7222,7 @@ static void perf_event_mmap_output(struct perf_event *event,
+ 	perf_output_end(&handle);
+ out:
+ 	mmap_event->event_id.header.size = size;
++	mmap_event->event_id.header.type = type;
+ }
+ 
+ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
+diff --git a/kernel/hung_task.c b/kernel/hung_task.c
+index 9eca2371f189..4a9191617076 100644
+--- a/kernel/hung_task.c
++++ b/kernel/hung_task.c
+@@ -15,6 +15,7 @@
+ #include <linux/lockdep.h>
+ #include <linux/export.h>
+ #include <linux/sysctl.h>
++#include <linux/suspend.h>
+ #include <linux/utsname.h>
+ #include <linux/sched/signal.h>
+ #include <linux/sched/debug.h>
+@@ -240,6 +241,28 @@ void reset_hung_task_detector(void)
+ }
+ EXPORT_SYMBOL_GPL(reset_hung_task_detector);
+ 
++static bool hung_detector_suspended;
++
++static int hungtask_pm_notify(struct notifier_block *self,
++			      unsigned long action, void *hcpu)
++{
++	switch (action) {
++	case PM_SUSPEND_PREPARE:
++	case PM_HIBERNATION_PREPARE:
++	case PM_RESTORE_PREPARE:
++		hung_detector_suspended = true;
++		break;
++	case PM_POST_SUSPEND:
++	case PM_POST_HIBERNATION:
++	case PM_POST_RESTORE:
++		hung_detector_suspended = false;
++		break;
++	default:
++		break;
++	}
++	return NOTIFY_OK;
++}
++
+ /*
+  * kthread which checks for tasks stuck in D state
+  */
+@@ -259,7 +282,8 @@ static int watchdog(void *dummy)
+ 		interval = min_t(unsigned long, interval, timeout);
+ 		t = hung_timeout_jiffies(hung_last_checked, interval);
+ 		if (t <= 0) {
+-			if (!atomic_xchg(&reset_hung_task, 0))
++			if (!atomic_xchg(&reset_hung_task, 0) &&
++			    !hung_detector_suspended)
+ 				check_hung_uninterruptible_tasks(timeout);
+ 			hung_last_checked = jiffies;
+ 			continue;
+@@ -273,6 +297,10 @@ static int watchdog(void *dummy)
+ static int __init hung_task_init(void)
+ {
+ 	atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
++
++	/* Disable hung task detector on suspend */
++	pm_notifier(hungtask_pm_notify, 0);
++
+ 	watchdog_task = kthread_run(watchdog, NULL, "khungtaskd");
+ 
+ 	return 0;
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 9a4f57d7e931..d7f409866cdf 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -6930,7 +6930,7 @@ static int __maybe_unused cpu_period_quota_parse(char *buf,
+ {
+ 	char tok[21];	/* U64_MAX */
+ 
+-	if (!sscanf(buf, "%s %llu", tok, periodp))
++	if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
+ 		return -EINVAL;
+ 
+ 	*periodp *= NSEC_PER_USEC;
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index 3fffad3bc8a8..217f81ecae17 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -50,10 +50,10 @@ struct sugov_cpu {
+ 
+ 	bool			iowait_boost_pending;
+ 	unsigned int		iowait_boost;
+-	unsigned int		iowait_boost_max;
+ 	u64			last_update;
+ 
+ 	unsigned long		bw_dl;
++	unsigned long		min;
+ 	unsigned long		max;
+ 
+ 	/* The field below is for single-CPU policies only: */
+@@ -283,8 +283,7 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
+ 	if (delta_ns <= TICK_NSEC)
+ 		return false;
+ 
+-	sg_cpu->iowait_boost = set_iowait_boost
+-		? sg_cpu->sg_policy->policy->min : 0;
++	sg_cpu->iowait_boost = set_iowait_boost ? sg_cpu->min : 0;
+ 	sg_cpu->iowait_boost_pending = set_iowait_boost;
+ 
+ 	return true;
+@@ -324,14 +323,13 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
+ 
+ 	/* Double the boost at each request */
+ 	if (sg_cpu->iowait_boost) {
+-		sg_cpu->iowait_boost <<= 1;
+-		if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max)
+-			sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
++		sg_cpu->iowait_boost =
++			min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
+ 		return;
+ 	}
+ 
+ 	/* First wakeup after IO: start with minimum boost */
+-	sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
++	sg_cpu->iowait_boost = sg_cpu->min;
+ }
+ 
+ /**
+@@ -353,47 +351,38 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
+  * This mechanism is designed to boost high frequently IO waiting tasks, while
+  * being more conservative on tasks which does sporadic IO operations.
+  */
+-static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
+-			       unsigned long *util, unsigned long *max)
++static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
++					unsigned long util, unsigned long max)
+ {
+-	unsigned int boost_util, boost_max;
++	unsigned long boost;
+ 
+ 	/* No boost currently required */
+ 	if (!sg_cpu->iowait_boost)
+-		return;
++		return util;
+ 
+ 	/* Reset boost if the CPU appears to have been idle enough */
+ 	if (sugov_iowait_reset(sg_cpu, time, false))
+-		return;
++		return util;
+ 
+-	/*
+-	 * An IO waiting task has just woken up:
+-	 * allow to further double the boost value
+-	 */
+-	if (sg_cpu->iowait_boost_pending) {
+-		sg_cpu->iowait_boost_pending = false;
+-	} else {
++	if (!sg_cpu->iowait_boost_pending) {
+ 		/*
+-		 * Otherwise: reduce the boost value and disable it when we
+-		 * reach the minimum.
++		 * No boost pending; reduce the boost value.
+ 		 */
+ 		sg_cpu->iowait_boost >>= 1;
+-		if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) {
++		if (sg_cpu->iowait_boost < sg_cpu->min) {
+ 			sg_cpu->iowait_boost = 0;
+-			return;
++			return util;
+ 		}
+ 	}
+ 
++	sg_cpu->iowait_boost_pending = false;
++
+ 	/*
+-	 * Apply the current boost value: a CPU is boosted only if its current
+-	 * utilization is smaller then the current IO boost level.
++	 * @util is already in capacity scale; convert iowait_boost
++	 * into the same scale so we can compare.
+ 	 */
+-	boost_util = sg_cpu->iowait_boost;
+-	boost_max = sg_cpu->iowait_boost_max;
+-	if (*util * boost_max < *max * boost_util) {
+-		*util = boost_util;
+-		*max = boost_max;
+-	}
++	boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT;
++	return max(boost, util);
+ }
+ 
+ #ifdef CONFIG_NO_HZ_COMMON
+@@ -440,7 +429,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
+ 
+ 	util = sugov_get_util(sg_cpu);
+ 	max = sg_cpu->max;
+-	sugov_iowait_apply(sg_cpu, time, &util, &max);
++	util = sugov_iowait_apply(sg_cpu, time, util, max);
+ 	next_f = get_next_freq(sg_policy, util, max);
+ 	/*
+ 	 * Do not reduce the frequency if the CPU has not been idle
+@@ -480,7 +469,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
+ 
+ 		j_util = sugov_get_util(j_sg_cpu);
+ 		j_max = j_sg_cpu->max;
+-		sugov_iowait_apply(j_sg_cpu, time, &j_util, &j_max);
++		j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max);
+ 
+ 		if (j_util * max > j_max * util) {
+ 			util = j_util;
+@@ -817,7 +806,9 @@ static int sugov_start(struct cpufreq_policy *policy)
+ 		memset(sg_cpu, 0, sizeof(*sg_cpu));
+ 		sg_cpu->cpu			= cpu;
+ 		sg_cpu->sg_policy		= sg_policy;
+-		sg_cpu->iowait_boost_max	= policy->cpuinfo.max_freq;
++		sg_cpu->min			=
++			(SCHED_CAPACITY_SCALE * policy->cpuinfo.min_freq) /
++			policy->cpuinfo.max_freq;
+ 	}
+ 
+ 	for_each_cpu(cpu, policy->cpus) {
+diff --git a/lib/div64.c b/lib/div64.c
+index 01c8602bb6ff..ee146bb4c558 100644
+--- a/lib/div64.c
++++ b/lib/div64.c
+@@ -109,7 +109,7 @@ u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
+ 		quot = div_u64_rem(dividend, divisor, &rem32);
+ 		*remainder = rem32;
+ 	} else {
+-		int n = 1 + fls(high);
++		int n = fls(high);
+ 		quot = div_u64(dividend >> n, divisor >> n);
+ 
+ 		if (quot != 0)
+@@ -147,7 +147,7 @@ u64 div64_u64(u64 dividend, u64 divisor)
+ 	if (high == 0) {
+ 		quot = div_u64(dividend, divisor);
+ 	} else {
+-		int n = 1 + fls(high);
++		int n = fls(high);
+ 		quot = div_u64(dividend >> n, divisor >> n);
+ 
+ 		if (quot != 0)
+diff --git a/mm/vmstat.c b/mm/vmstat.c
+index 7878da76abf2..2878dc4e9af6 100644
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -1547,6 +1547,10 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
+ 	if (is_zone_first_populated(pgdat, zone)) {
+ 		seq_printf(m, "\n  per-node stats");
+ 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
++			/* Skip hidden vmstat items. */
++			if (*vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
++					 NR_VM_NUMA_STAT_ITEMS] == '\0')
++				continue;
+ 			seq_printf(m, "\n      %-12s %lu",
+ 				vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
+ 				NR_VM_NUMA_STAT_ITEMS],
+diff --git a/net/9p/protocol.c b/net/9p/protocol.c
+index ee32bbf12675..b4d80c533f89 100644
+--- a/net/9p/protocol.c
++++ b/net/9p/protocol.c
+@@ -571,9 +571,10 @@ int p9stat_read(struct p9_client *clnt, char *buf, int len, struct p9_wstat *st)
+ 	if (ret) {
+ 		p9_debug(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret);
+ 		trace_9p_protocol_dump(clnt, &fake_pdu);
++		return ret;
+ 	}
+ 
+-	return ret;
++	return fake_pdu.offset;
+ }
+ EXPORT_SYMBOL(p9stat_read);
+ 
+diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
+index 8006295f8bd7..dda73991bb54 100644
+--- a/net/appletalk/atalk_proc.c
++++ b/net/appletalk/atalk_proc.c
+@@ -255,7 +255,7 @@ out_interface:
+ 	goto out;
+ }
+ 
+-void __exit atalk_proc_exit(void)
++void atalk_proc_exit(void)
+ {
+ 	remove_proc_entry("interface", atalk_proc_dir);
+ 	remove_proc_entry("route", atalk_proc_dir);
+diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
+index 9b6bc5abe946..795fbc6c06aa 100644
+--- a/net/appletalk/ddp.c
++++ b/net/appletalk/ddp.c
+@@ -1910,12 +1910,16 @@ static const char atalk_err_snap[] __initconst =
+ /* Called by proto.c on kernel start up */
+ static int __init atalk_init(void)
+ {
+-	int rc = proto_register(&ddp_proto, 0);
++	int rc;
+ 
+-	if (rc != 0)
++	rc = proto_register(&ddp_proto, 0);
++	if (rc)
+ 		goto out;
+ 
+-	(void)sock_register(&atalk_family_ops);
++	rc = sock_register(&atalk_family_ops);
++	if (rc)
++		goto out_proto;
++
+ 	ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv);
+ 	if (!ddp_dl)
+ 		printk(atalk_err_snap);
+@@ -1923,12 +1927,33 @@ static int __init atalk_init(void)
+ 	dev_add_pack(&ltalk_packet_type);
+ 	dev_add_pack(&ppptalk_packet_type);
+ 
+-	register_netdevice_notifier(&ddp_notifier);
++	rc = register_netdevice_notifier(&ddp_notifier);
++	if (rc)
++		goto out_sock;
++
+ 	aarp_proto_init();
+-	atalk_proc_init();
+-	atalk_register_sysctl();
++	rc = atalk_proc_init();
++	if (rc)
++		goto out_aarp;
++
++	rc = atalk_register_sysctl();
++	if (rc)
++		goto out_proc;
+ out:
+ 	return rc;
++out_proc:
++	atalk_proc_exit();
++out_aarp:
++	aarp_cleanup_module();
++	unregister_netdevice_notifier(&ddp_notifier);
++out_sock:
++	dev_remove_pack(&ppptalk_packet_type);
++	dev_remove_pack(&ltalk_packet_type);
++	unregister_snap_client(ddp_dl);
++	sock_unregister(PF_APPLETALK);
++out_proto:
++	proto_unregister(&ddp_proto);
++	goto out;
+ }
+ module_init(atalk_init);
+ 
+diff --git a/net/appletalk/sysctl_net_atalk.c b/net/appletalk/sysctl_net_atalk.c
+index c744a853fa5f..d945b7c0176d 100644
+--- a/net/appletalk/sysctl_net_atalk.c
++++ b/net/appletalk/sysctl_net_atalk.c
+@@ -45,9 +45,12 @@ static struct ctl_table atalk_table[] = {
+ 
+ static struct ctl_table_header *atalk_table_header;
+ 
+-void atalk_register_sysctl(void)
++int __init atalk_register_sysctl(void)
+ {
+ 	atalk_table_header = register_net_sysctl(&init_net, "net/appletalk", atalk_table);
++	if (!atalk_table_header)
++		return -ENOMEM;
++	return 0;
+ }
+ 
+ void atalk_unregister_sysctl(void)
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 74b29c7d841c..a06f03047717 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -3084,6 +3084,8 @@ struct hci_dev *hci_alloc_dev(void)
+ 	hdev->le_max_tx_time = 0x0148;
+ 	hdev->le_max_rx_len = 0x001b;
+ 	hdev->le_max_rx_time = 0x0148;
++	hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
++	hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
+ 	hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
+ 	hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
+ 
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index 73f7211d0431..a1c1b7e8a45c 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -88,9 +88,6 @@ struct smp_dev {
+ 	u8			local_rand[16];
+ 	bool			debug_key;
+ 
+-	u8			min_key_size;
+-	u8			max_key_size;
+-
+ 	struct crypto_cipher	*tfm_aes;
+ 	struct crypto_shash	*tfm_cmac;
+ 	struct crypto_kpp	*tfm_ecdh;
+@@ -720,7 +717,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
+ 	if (rsp == NULL) {
+ 		req->io_capability = conn->hcon->io_capability;
+ 		req->oob_flag = oob_flag;
+-		req->max_key_size = SMP_DEV(hdev)->max_key_size;
++		req->max_key_size = hdev->le_max_key_size;
+ 		req->init_key_dist = local_dist;
+ 		req->resp_key_dist = remote_dist;
+ 		req->auth_req = (authreq & AUTH_REQ_MASK(hdev));
+@@ -731,7 +728,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
+ 
+ 	rsp->io_capability = conn->hcon->io_capability;
+ 	rsp->oob_flag = oob_flag;
+-	rsp->max_key_size = SMP_DEV(hdev)->max_key_size;
++	rsp->max_key_size = hdev->le_max_key_size;
+ 	rsp->init_key_dist = req->init_key_dist & remote_dist;
+ 	rsp->resp_key_dist = req->resp_key_dist & local_dist;
+ 	rsp->auth_req = (authreq & AUTH_REQ_MASK(hdev));
+@@ -745,7 +742,7 @@ static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
+ 	struct hci_dev *hdev = conn->hcon->hdev;
+ 	struct smp_chan *smp = chan->data;
+ 
+-	if (max_key_size > SMP_DEV(hdev)->max_key_size ||
++	if (max_key_size > hdev->le_max_key_size ||
+ 	    max_key_size < SMP_MIN_ENC_KEY_SIZE)
+ 		return SMP_ENC_KEY_SIZE;
+ 
+@@ -3264,8 +3261,6 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
+ 	smp->tfm_aes = tfm_aes;
+ 	smp->tfm_cmac = tfm_cmac;
+ 	smp->tfm_ecdh = tfm_ecdh;
+-	smp->min_key_size = SMP_MIN_ENC_KEY_SIZE;
+-	smp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
+ 
+ create_chan:
+ 	chan = l2cap_chan_create();
+@@ -3391,7 +3386,7 @@ static ssize_t le_min_key_size_read(struct file *file,
+ 	struct hci_dev *hdev = file->private_data;
+ 	char buf[4];
+ 
+-	snprintf(buf, sizeof(buf), "%2u\n", SMP_DEV(hdev)->min_key_size);
++	snprintf(buf, sizeof(buf), "%2u\n", hdev->le_min_key_size);
+ 
+ 	return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+ }
+@@ -3412,11 +3407,11 @@ static ssize_t le_min_key_size_write(struct file *file,
+ 
+ 	sscanf(buf, "%hhu", &key_size);
+ 
+-	if (key_size > SMP_DEV(hdev)->max_key_size ||
++	if (key_size > hdev->le_max_key_size ||
+ 	    key_size < SMP_MIN_ENC_KEY_SIZE)
+ 		return -EINVAL;
+ 
+-	SMP_DEV(hdev)->min_key_size = key_size;
++	hdev->le_min_key_size = key_size;
+ 
+ 	return count;
+ }
+@@ -3435,7 +3430,7 @@ static ssize_t le_max_key_size_read(struct file *file,
+ 	struct hci_dev *hdev = file->private_data;
+ 	char buf[4];
+ 
+-	snprintf(buf, sizeof(buf), "%2u\n", SMP_DEV(hdev)->max_key_size);
++	snprintf(buf, sizeof(buf), "%2u\n", hdev->le_max_key_size);
+ 
+ 	return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+ }
+@@ -3457,10 +3452,10 @@ static ssize_t le_max_key_size_write(struct file *file,
+ 	sscanf(buf, "%hhu", &key_size);
+ 
+ 	if (key_size > SMP_MAX_ENC_KEY_SIZE ||
+-	    key_size < SMP_DEV(hdev)->min_key_size)
++	    key_size < hdev->le_min_key_size)
+ 		return -EINVAL;
+ 
+-	SMP_DEV(hdev)->max_key_size = key_size;
++	hdev->le_max_key_size = key_size;
+ 
+ 	return count;
+ }
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index c4a7db62658e..01ecd510014f 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -1743,6 +1743,9 @@ static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
+ static void ip6erspan_set_version(struct nlattr *data[],
+ 				  struct __ip6_tnl_parm *parms)
+ {
++	if (!data)
++		return;
++
+ 	parms->erspan_ver = 1;
+ 	if (data[IFLA_GRE_ERSPAN_VER])
+ 		parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
+diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
+index f5b4febeaa25..bc65db782bfb 100644
+--- a/net/ipv6/xfrm6_tunnel.c
++++ b/net/ipv6/xfrm6_tunnel.c
+@@ -344,8 +344,8 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
+ 	struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
+ 	unsigned int i;
+ 
+-	xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
+ 	xfrm_flush_gc();
++	xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
+ 
+ 	for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
+ 		WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 7da629d59717..7d4bed955060 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -1773,7 +1773,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_m
+ 	if (proto == 0)
+ 		return -EINVAL;
+ 
+-	err = xfrm_state_flush(net, proto, true);
++	err = xfrm_state_flush(net, proto, true, false);
+ 	err2 = unicast_flush_resp(sk, hdr);
+ 	if (err || err2) {
+ 		if (err == -ESRCH) /* empty table - go quietly */
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 7d424fd27025..c06393fc716d 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -7203,9 +7203,6 @@ static void __nft_release_tables(struct net *net)
+ 
+ 		list_for_each_entry(chain, &table->chains, list)
+ 			nf_tables_unregister_hook(net, table, chain);
+-		list_for_each_entry(flowtable, &table->flowtables, list)
+-			nf_unregister_net_hooks(net, flowtable->ops,
+-						flowtable->ops_len);
+ 		/* No packets are walking on these chains anymore. */
+ 		ctx.table = table;
+ 		list_for_each_entry(chain, &table->chains, list) {
+diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c
+index 5d92e1781980..5cb1ecb29ea4 100644
+--- a/net/netfilter/xt_cgroup.c
++++ b/net/netfilter/xt_cgroup.c
+@@ -68,6 +68,38 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par)
+ 	return 0;
+ }
+ 
++static int cgroup_mt_check_v2(const struct xt_mtchk_param *par)
++{
++	struct xt_cgroup_info_v2 *info = par->matchinfo;
++	struct cgroup *cgrp;
++
++	if ((info->invert_path & ~1) || (info->invert_classid & ~1))
++		return -EINVAL;
++
++	if (!info->has_path && !info->has_classid) {
++		pr_info("xt_cgroup: no path or classid specified\n");
++		return -EINVAL;
++	}
++
++	if (info->has_path && info->has_classid) {
++		pr_info_ratelimited("path and classid specified\n");
++		return -EINVAL;
++	}
++
++	info->priv = NULL;
++	if (info->has_path) {
++		cgrp = cgroup_get_from_path(info->path);
++		if (IS_ERR(cgrp)) {
++			pr_info_ratelimited("invalid path, errno=%ld\n",
++					    PTR_ERR(cgrp));
++			return -EINVAL;
++		}
++		info->priv = cgrp;
++	}
++
++	return 0;
++}
++
+ static bool
+ cgroup_mt_v0(const struct sk_buff *skb, struct xt_action_param *par)
+ {
+@@ -99,6 +131,24 @@ static bool cgroup_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
+ 			info->invert_classid;
+ }
+ 
++static bool cgroup_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
++{
++	const struct xt_cgroup_info_v2 *info = par->matchinfo;
++	struct sock_cgroup_data *skcd = &skb->sk->sk_cgrp_data;
++	struct cgroup *ancestor = info->priv;
++	struct sock *sk = skb->sk;
++
++	if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk)))
++		return false;
++
++	if (ancestor)
++		return cgroup_is_descendant(sock_cgroup_ptr(skcd), ancestor) ^
++			info->invert_path;
++	else
++		return (info->classid == sock_cgroup_classid(skcd)) ^
++			info->invert_classid;
++}
++
+ static void cgroup_mt_destroy_v1(const struct xt_mtdtor_param *par)
+ {
+ 	struct xt_cgroup_info_v1 *info = par->matchinfo;
+@@ -107,6 +157,14 @@ static void cgroup_mt_destroy_v1(const struct xt_mtdtor_param *par)
+ 		cgroup_put(info->priv);
+ }
+ 
++static void cgroup_mt_destroy_v2(const struct xt_mtdtor_param *par)
++{
++	struct xt_cgroup_info_v2 *info = par->matchinfo;
++
++	if (info->priv)
++		cgroup_put(info->priv);
++}
++
+ static struct xt_match cgroup_mt_reg[] __read_mostly = {
+ 	{
+ 		.name		= "cgroup",
+@@ -134,6 +192,20 @@ static struct xt_match cgroup_mt_reg[] __read_mostly = {
+ 				  (1 << NF_INET_POST_ROUTING) |
+ 				  (1 << NF_INET_LOCAL_IN),
+ 	},
++	{
++		.name		= "cgroup",
++		.revision	= 2,
++		.family		= NFPROTO_UNSPEC,
++		.checkentry	= cgroup_mt_check_v2,
++		.match		= cgroup_mt_v2,
++		.matchsize	= sizeof(struct xt_cgroup_info_v2),
++		.usersize	= offsetof(struct xt_cgroup_info_v2, priv),
++		.destroy	= cgroup_mt_destroy_v2,
++		.me		= THIS_MODULE,
++		.hooks		= (1 << NF_INET_LOCAL_OUT) |
++				  (1 << NF_INET_POST_ROUTING) |
++				  (1 << NF_INET_LOCAL_IN),
++	},
+ };
+ 
+ static int __init cgroup_mt_init(void)
+diff --git a/net/rds/rdma.c b/net/rds/rdma.c
+index 98237feb607a..e1965d9cbcf8 100644
+--- a/net/rds/rdma.c
++++ b/net/rds/rdma.c
+@@ -517,9 +517,10 @@ static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
+ 	return tot_pages;
+ }
+ 
+-int rds_rdma_extra_size(struct rds_rdma_args *args)
++int rds_rdma_extra_size(struct rds_rdma_args *args,
++			struct rds_iov_vector *iov)
+ {
+-	struct rds_iovec vec;
++	struct rds_iovec *vec;
+ 	struct rds_iovec __user *local_vec;
+ 	int tot_pages = 0;
+ 	unsigned int nr_pages;
+@@ -530,13 +531,23 @@ int rds_rdma_extra_size(struct rds_rdma_args *args)
+ 	if (args->nr_local == 0)
+ 		return -EINVAL;
+ 
++	iov->iov = kcalloc(args->nr_local,
++			   sizeof(struct rds_iovec),
++			   GFP_KERNEL);
++	if (!iov->iov)
++		return -ENOMEM;
++
++	vec = &iov->iov[0];
++
++	if (copy_from_user(vec, local_vec, args->nr_local *
++			   sizeof(struct rds_iovec)))
++		return -EFAULT;
++	iov->len = args->nr_local;
++
+ 	/* figure out the number of pages in the vector */
+-	for (i = 0; i < args->nr_local; i++) {
+-		if (copy_from_user(&vec, &local_vec[i],
+-				   sizeof(struct rds_iovec)))
+-			return -EFAULT;
++	for (i = 0; i < args->nr_local; i++, vec++) {
+ 
+-		nr_pages = rds_pages_in_vec(&vec);
++		nr_pages = rds_pages_in_vec(vec);
+ 		if (nr_pages == 0)
+ 			return -EINVAL;
+ 
+@@ -558,15 +569,15 @@ int rds_rdma_extra_size(struct rds_rdma_args *args)
+  * Extract all arguments and set up the rdma_op
+  */
+ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
+-			  struct cmsghdr *cmsg)
++		       struct cmsghdr *cmsg,
++		       struct rds_iov_vector *vec)
+ {
+ 	struct rds_rdma_args *args;
+ 	struct rm_rdma_op *op = &rm->rdma;
+ 	int nr_pages;
+ 	unsigned int nr_bytes;
+ 	struct page **pages = NULL;
+-	struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack;
+-	int iov_size;
++	struct rds_iovec *iovs;
+ 	unsigned int i, j;
+ 	int ret = 0;
+ 
+@@ -586,31 +597,23 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
+ 		goto out_ret;
+ 	}
+ 
+-	/* Check whether to allocate the iovec area */
+-	iov_size = args->nr_local * sizeof(struct rds_iovec);
+-	if (args->nr_local > UIO_FASTIOV) {
+-		iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL);
+-		if (!iovs) {
+-			ret = -ENOMEM;
+-			goto out_ret;
+-		}
++	if (vec->len != args->nr_local) {
++		ret = -EINVAL;
++		goto out_ret;
+ 	}
+ 
+-	if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) {
+-		ret = -EFAULT;
+-		goto out;
+-	}
++	iovs = vec->iov;
+ 
+ 	nr_pages = rds_rdma_pages(iovs, args->nr_local);
+ 	if (nr_pages < 0) {
+ 		ret = -EINVAL;
+-		goto out;
++		goto out_ret;
+ 	}
+ 
+ 	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
+ 	if (!pages) {
+ 		ret = -ENOMEM;
+-		goto out;
++		goto out_ret;
+ 	}
+ 
+ 	op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
+@@ -623,7 +626,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
+ 	op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
+ 	if (!op->op_sg) {
+ 		ret = -ENOMEM;
+-		goto out;
++		goto out_pages;
+ 	}
+ 
+ 	if (op->op_notify || op->op_recverr) {
+@@ -635,7 +638,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
+ 		op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
+ 		if (!op->op_notifier) {
+ 			ret = -ENOMEM;
+-			goto out;
++			goto out_pages;
+ 		}
+ 		op->op_notifier->n_user_token = args->user_token;
+ 		op->op_notifier->n_status = RDS_RDMA_SUCCESS;
+@@ -681,7 +684,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
+ 		 */
+ 		ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
+ 		if (ret < 0)
+-			goto out;
++			goto out_pages;
+ 		else
+ 			ret = 0;
+ 
+@@ -714,13 +717,11 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
+ 				nr_bytes,
+ 				(unsigned int) args->remote_vec.bytes);
+ 		ret = -EINVAL;
+-		goto out;
++		goto out_pages;
+ 	}
+ 	op->op_bytes = nr_bytes;
+ 
+-out:
+-	if (iovs != iovstack)
+-		sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size);
++out_pages:
+ 	kfree(pages);
+ out_ret:
+ 	if (ret)
+diff --git a/net/rds/rds.h b/net/rds/rds.h
+index c4dcf654d8fe..4234ab81b5af 100644
+--- a/net/rds/rds.h
++++ b/net/rds/rds.h
+@@ -386,6 +386,18 @@ static inline void rds_message_zcopy_queue_init(struct rds_msg_zcopy_queue *q)
+ 	INIT_LIST_HEAD(&q->zcookie_head);
+ }
+ 
++struct rds_iov_vector {
++	struct rds_iovec *iov;
++	int               len;
++};
++
++struct rds_iov_vector_arr {
++	struct rds_iov_vector *vec;
++	int                    len;
++	int                    indx;
++	int                    incr;
++};
++
+ struct rds_message {
+ 	refcount_t		m_refcount;
+ 	struct list_head	m_sock_item;
+@@ -904,13 +916,13 @@ int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
+ int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
+ int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
+ void rds_rdma_drop_keys(struct rds_sock *rs);
+-int rds_rdma_extra_size(struct rds_rdma_args *args);
+-int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
+-			  struct cmsghdr *cmsg);
++int rds_rdma_extra_size(struct rds_rdma_args *args,
++			struct rds_iov_vector *iov);
+ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
+ 			  struct cmsghdr *cmsg);
+ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
+-			  struct cmsghdr *cmsg);
++			  struct cmsghdr *cmsg,
++			  struct rds_iov_vector *vec);
+ int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
+ 			  struct cmsghdr *cmsg);
+ void rds_rdma_free_op(struct rm_rdma_op *ro);
+diff --git a/net/rds/send.c b/net/rds/send.c
+index fe785ee819dd..ec2267cbf85f 100644
+--- a/net/rds/send.c
++++ b/net/rds/send.c
+@@ -876,13 +876,15 @@ out:
+  * rds_message is getting to be quite complicated, and we'd like to allocate
+  * it all in one go. This figures out how big it needs to be up front.
+  */
+-static int rds_rm_size(struct msghdr *msg, int num_sgs)
++static int rds_rm_size(struct msghdr *msg, int num_sgs,
++		       struct rds_iov_vector_arr *vct)
+ {
+ 	struct cmsghdr *cmsg;
+ 	int size = 0;
+ 	int cmsg_groups = 0;
+ 	int retval;
+ 	bool zcopy_cookie = false;
++	struct rds_iov_vector *iov, *tmp_iov;
+ 
+ 	for_each_cmsghdr(cmsg, msg) {
+ 		if (!CMSG_OK(msg, cmsg))
+@@ -893,8 +895,24 @@ static int rds_rm_size(struct msghdr *msg, int num_sgs)
+ 
+ 		switch (cmsg->cmsg_type) {
+ 		case RDS_CMSG_RDMA_ARGS:
++			if (vct->indx >= vct->len) {
++				vct->len += vct->incr;
++				tmp_iov =
++					krealloc(vct->vec,
++						 vct->len *
++						 sizeof(struct rds_iov_vector),
++						 GFP_KERNEL);
++				if (!tmp_iov) {
++					vct->len -= vct->incr;
++					return -ENOMEM;
++				}
++				vct->vec = tmp_iov;
++			}
++			iov = &vct->vec[vct->indx];
++			memset(iov, 0, sizeof(struct rds_iov_vector));
++			vct->indx++;
+ 			cmsg_groups |= 1;
+-			retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
++			retval = rds_rdma_extra_size(CMSG_DATA(cmsg), iov);
+ 			if (retval < 0)
+ 				return retval;
+ 			size += retval;
+@@ -951,10 +969,11 @@ static int rds_cmsg_zcopy(struct rds_sock *rs, struct rds_message *rm,
+ }
+ 
+ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
+-			 struct msghdr *msg, int *allocated_mr)
++			 struct msghdr *msg, int *allocated_mr,
++			 struct rds_iov_vector_arr *vct)
+ {
+ 	struct cmsghdr *cmsg;
+-	int ret = 0;
++	int ret = 0, ind = 0;
+ 
+ 	for_each_cmsghdr(cmsg, msg) {
+ 		if (!CMSG_OK(msg, cmsg))
+@@ -968,7 +987,10 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
+ 		 */
+ 		switch (cmsg->cmsg_type) {
+ 		case RDS_CMSG_RDMA_ARGS:
+-			ret = rds_cmsg_rdma_args(rs, rm, cmsg);
++			if (ind >= vct->indx)
++				return -ENOMEM;
++			ret = rds_cmsg_rdma_args(rs, rm, cmsg, &vct->vec[ind]);
++			ind++;
+ 			break;
+ 
+ 		case RDS_CMSG_RDMA_DEST:
+@@ -1084,6 +1106,11 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
+ 		      sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
+ 	int num_sgs = ceil(payload_len, PAGE_SIZE);
+ 	int namelen;
++	struct rds_iov_vector_arr vct = {0};
++	int ind;
++
++	/* expect 1 RDMA CMSG per rds_sendmsg. can still grow if more needed. */
++	vct.incr = 1;
+ 
+ 	/* Mirror Linux UDP mirror of BSD error message compatibility */
+ 	/* XXX: Perhaps MSG_MORE someday */
+@@ -1220,7 +1247,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
+ 		num_sgs = iov_iter_npages(&msg->msg_iter, INT_MAX);
+ 	}
+ 	/* size of rm including all sgs */
+-	ret = rds_rm_size(msg, num_sgs);
++	ret = rds_rm_size(msg, num_sgs, &vct);
+ 	if (ret < 0)
+ 		goto out;
+ 
+@@ -1270,7 +1297,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
+ 	rm->m_conn_path = cpath;
+ 
+ 	/* Parse any control messages the user may have included. */
+-	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
++	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
+ 	if (ret) {
+ 		/* Trigger connection so that its ready for the next retry */
+ 		if (ret ==  -EAGAIN)
+@@ -1348,9 +1375,18 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
+ 	if (ret)
+ 		goto out;
+ 	rds_message_put(rm);
++
++	for (ind = 0; ind < vct.indx; ind++)
++		kfree(vct.vec[ind].iov);
++	kfree(vct.vec);
++
+ 	return payload_len;
+ 
+ out:
++	for (ind = 0; ind < vct.indx; ind++)
++		kfree(vct.vec[ind].iov);
++	kfree(vct.vec);
++
+ 	/* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
+ 	 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
+ 	 * or in any other way, we need to destroy the MR again */
+diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
+index 6e419b15a9f8..c979a56faaef 100644
+--- a/net/rxrpc/conn_client.c
++++ b/net/rxrpc/conn_client.c
+@@ -707,6 +707,7 @@ int rxrpc_connect_call(struct rxrpc_sock *rx,
+ 
+ 	ret = rxrpc_wait_for_channel(call, gfp);
+ 	if (ret < 0) {
++		trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed);
+ 		rxrpc_disconnect_client_call(call);
+ 		goto out;
+ 	}
+@@ -777,16 +778,22 @@ static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
+  */
+ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
+ {
+-	unsigned int channel = call->cid & RXRPC_CHANNELMASK;
+ 	struct rxrpc_connection *conn = call->conn;
+-	struct rxrpc_channel *chan = &conn->channels[channel];
++	struct rxrpc_channel *chan = NULL;
+ 	struct rxrpc_net *rxnet = conn->params.local->rxnet;
++	unsigned int channel = -1;
++	u32 cid;
+ 
++	spin_lock(&conn->channel_lock);
++
++	cid = call->cid;
++	if (cid) {
++		channel = cid & RXRPC_CHANNELMASK;
++		chan = &conn->channels[channel];
++	}
+ 	trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
+ 	call->conn = NULL;
+ 
+-	spin_lock(&conn->channel_lock);
+-
+ 	/* Calls that have never actually been assigned a channel can simply be
+ 	 * discarded.  If the conn didn't get used either, it will follow
+ 	 * immediately unless someone else grabs it in the meantime.
+@@ -810,7 +817,10 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
+ 		goto out;
+ 	}
+ 
+-	ASSERTCMP(rcu_access_pointer(chan->call), ==, call);
++	if (rcu_access_pointer(chan->call) != call) {
++		spin_unlock(&conn->channel_lock);
++		BUG();
++	}
+ 
+ 	/* If a client call was exposed to the world, we save the result for
+ 	 * retransmission.
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index cc0203efb584..3f729cd512af 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -432,7 +432,7 @@ void xfrm_state_free(struct xfrm_state *x)
+ }
+ EXPORT_SYMBOL(xfrm_state_free);
+ 
+-static void xfrm_state_gc_destroy(struct xfrm_state *x)
++static void ___xfrm_state_destroy(struct xfrm_state *x)
+ {
+ 	tasklet_hrtimer_cancel(&x->mtimer);
+ 	del_timer_sync(&x->rtimer);
+@@ -474,7 +474,7 @@ static void xfrm_state_gc_task(struct work_struct *work)
+ 	synchronize_rcu();
+ 
+ 	hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
+-		xfrm_state_gc_destroy(x);
++		___xfrm_state_destroy(x);
+ }
+ 
+ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
+@@ -598,14 +598,19 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
+ }
+ EXPORT_SYMBOL(xfrm_state_alloc);
+ 
+-void __xfrm_state_destroy(struct xfrm_state *x)
++void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
+ {
+ 	WARN_ON(x->km.state != XFRM_STATE_DEAD);
+ 
+-	spin_lock_bh(&xfrm_state_gc_lock);
+-	hlist_add_head(&x->gclist, &xfrm_state_gc_list);
+-	spin_unlock_bh(&xfrm_state_gc_lock);
+-	schedule_work(&xfrm_state_gc_work);
++	if (sync) {
++		synchronize_rcu();
++		___xfrm_state_destroy(x);
++	} else {
++		spin_lock_bh(&xfrm_state_gc_lock);
++		hlist_add_head(&x->gclist, &xfrm_state_gc_list);
++		spin_unlock_bh(&xfrm_state_gc_lock);
++		schedule_work(&xfrm_state_gc_work);
++	}
+ }
+ EXPORT_SYMBOL(__xfrm_state_destroy);
+ 
+@@ -708,7 +713,7 @@ xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool
+ }
+ #endif
+ 
+-int xfrm_state_flush(struct net *net, u8 proto, bool task_valid)
++int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync)
+ {
+ 	int i, err = 0, cnt = 0;
+ 
+@@ -730,7 +735,10 @@ restart:
+ 				err = xfrm_state_delete(x);
+ 				xfrm_audit_state_delete(x, err ? 0 : 1,
+ 							task_valid);
+-				xfrm_state_put(x);
++				if (sync)
++					xfrm_state_put_sync(x);
++				else
++					xfrm_state_put(x);
+ 				if (!err)
+ 					cnt++;
+ 
+@@ -2217,7 +2225,7 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x)
+ 		if (atomic_read(&t->tunnel_users) == 2)
+ 			xfrm_state_delete(t);
+ 		atomic_dec(&t->tunnel_users);
+-		xfrm_state_put(t);
++		xfrm_state_put_sync(t);
+ 		x->tunnel = NULL;
+ 	}
+ }
+@@ -2377,8 +2385,8 @@ void xfrm_state_fini(struct net *net)
+ 	unsigned int sz;
+ 
+ 	flush_work(&net->xfrm.state_hash_work);
+-	xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
+ 	flush_work(&xfrm_state_gc_work);
++	xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
+ 
+ 	WARN_ON(!list_empty(&net->xfrm.state_all));
+ 
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index ab557827aac0..7e4904b93004 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -1932,7 +1932,7 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 	struct xfrm_usersa_flush *p = nlmsg_data(nlh);
+ 	int err;
+ 
+-	err = xfrm_state_flush(net, p->proto, true);
++	err = xfrm_state_flush(net, p->proto, true, false);
+ 	if (err) {
+ 		if (err == -ESRCH) /* empty table */
+ 			return 0;
+diff --git a/sound/drivers/opl3/opl3_voice.h b/sound/drivers/opl3/opl3_voice.h
+index 5b02bd49fde4..4e4ecc21760b 100644
+--- a/sound/drivers/opl3/opl3_voice.h
++++ b/sound/drivers/opl3/opl3_voice.h
+@@ -41,7 +41,7 @@ void snd_opl3_timer_func(struct timer_list *t);
+ 
+ /* Prototypes for opl3_drums.c */
+ void snd_opl3_load_drums(struct snd_opl3 *opl3);
+-void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int on_off, int vel, struct snd_midi_channel *chan);
++void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int vel, int on_off, struct snd_midi_channel *chan);
+ 
+ /* Prototypes for opl3_oss.c */
+ #if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
+diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c
+index d77dcba276b5..1eb8b61a185b 100644
+--- a/sound/isa/sb/sb8.c
++++ b/sound/isa/sb/sb8.c
+@@ -111,6 +111,10 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev)
+ 
+ 	/* block the 0x388 port to avoid PnP conflicts */
+ 	acard->fm_res = request_region(0x388, 4, "SoundBlaster FM");
++	if (!acard->fm_res) {
++		err = -EBUSY;
++		goto _err;
++	}
+ 
+ 	if (port[dev] != SNDRV_AUTO_PORT) {
+ 		if ((err = snd_sbdsp_create(card, port[dev], irq[dev],
+diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
+index 907cf1a46712..3ef2b27ebbe8 100644
+--- a/sound/pci/echoaudio/echoaudio.c
++++ b/sound/pci/echoaudio/echoaudio.c
+@@ -1954,6 +1954,11 @@ static int snd_echo_create(struct snd_card *card,
+ 	}
+ 	chip->dsp_registers = (volatile u32 __iomem *)
+ 		ioremap_nocache(chip->dsp_registers_phys, sz);
++	if (!chip->dsp_registers) {
++		dev_err(chip->card->dev, "ioremap failed\n");
++		snd_echo_free(chip);
++		return -ENOMEM;
++	}
+ 
+ 	if (request_irq(pci->irq, snd_echo_interrupt, IRQF_SHARED,
+ 			KBUILD_MODNAME, chip)) {
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index b9d832bde23e..bd60eb7168fa 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5604,6 +5604,7 @@ enum {
+ 	ALC298_FIXUP_TPT470_DOCK,
+ 	ALC255_FIXUP_DUMMY_LINEOUT_VERB,
+ 	ALC255_FIXUP_DELL_HEADSET_MIC,
++	ALC256_FIXUP_HUAWEI_MBXP_PINS,
+ 	ALC295_FIXUP_HP_X360,
+ 	ALC221_FIXUP_HP_HEADSET_MIC,
+ 	ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
+@@ -5892,6 +5893,22 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC269_FIXUP_HEADSET_MIC
+ 	},
++	[ALC256_FIXUP_HUAWEI_MBXP_PINS] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{0x12, 0x90a60130},
++			{0x13, 0x40000000},
++			{0x14, 0x90170110},
++			{0x18, 0x411111f0},
++			{0x19, 0x04a11040},
++			{0x1a, 0x411111f0},
++			{0x1b, 0x90170112},
++			{0x1d, 0x40759a05},
++			{0x1e, 0x411111f0},
++			{0x21, 0x04211020},
++			{ }
++		},
++	},
+ 	[ALC269_FIXUP_ASUS_X101_FUNC] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc269_fixup_x101_headset_mic,
+@@ -6885,6 +6902,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+ 	SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
++	SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MBXP", ALC256_FIXUP_HUAWEI_MBXP_PINS),
+ 	SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
+ 
+ #if 0
+diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
+index 592efb370c44..f4dc3d445aae 100644
+--- a/sound/soc/soc-ops.c
++++ b/sound/soc/soc-ops.c
+@@ -373,7 +373,7 @@ int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol,
+ 	unsigned int rshift = mc->rshift;
+ 	int max = mc->max;
+ 	int min = mc->min;
+-	unsigned int mask = (1 << (fls(min + max) - 1)) - 1;
++	unsigned int mask = (1U << (fls(min + max) - 1)) - 1;
+ 	unsigned int val;
+ 	int ret;
+ 
+@@ -418,7 +418,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
+ 	unsigned int rshift = mc->rshift;
+ 	int max = mc->max;
+ 	int min = mc->min;
+-	unsigned int mask = (1 << (fls(min + max) - 1)) - 1;
++	unsigned int mask = (1U << (fls(min + max) - 1)) - 1;
+ 	int err = 0;
+ 	unsigned int val, val_mask, val2 = 0;
+ 
+diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
+index 32f4a898e3f2..05c10eb56a0c 100644
+--- a/tools/perf/Documentation/perf-config.txt
++++ b/tools/perf/Documentation/perf-config.txt
+@@ -114,7 +114,7 @@ Given a $HOME/.perfconfig like this:
+ 
+ 	[report]
+ 		# Defaults
+-		sort-order = comm,dso,symbol
++		sort_order = comm,dso,symbol
+ 		percent-limit = 0
+ 		queue-size = 0
+ 		children = true
+diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
+index d21d8751e749..33eefc33e0ea 100644
+--- a/tools/perf/builtin-top.c
++++ b/tools/perf/builtin-top.c
+@@ -1491,8 +1491,9 @@ int cmd_top(int argc, const char **argv)
+ 	annotation_config__init();
+ 
+ 	symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
+-	if (symbol__init(NULL) < 0)
+-		return -1;
++	status = symbol__init(NULL);
++	if (status < 0)
++		goto out_delete_evlist;
+ 
+ 	sort__setup_elide(stdout);
+ 
+diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
+index d0406116c905..926a8e1b5e94 100644
+--- a/tools/perf/tests/evsel-tp-sched.c
++++ b/tools/perf/tests/evsel-tp-sched.c
+@@ -85,5 +85,6 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
+ 	if (perf_evsel__test_field(evsel, "target_cpu", 4, true))
+ 		ret = -1;
+ 
++	perf_evsel__delete(evsel);
+ 	return ret;
+ }
+diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c
+index 01f0706995a9..9acc1e80b936 100644
+--- a/tools/perf/tests/expr.c
++++ b/tools/perf/tests/expr.c
+@@ -19,7 +19,7 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
+ 	const char *p;
+ 	const char **other;
+ 	double val;
+-	int ret;
++	int i, ret;
+ 	struct parse_ctx ctx;
+ 	int num_other;
+ 
+@@ -56,6 +56,9 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
+ 	TEST_ASSERT_VAL("find other", !strcmp(other[1], "BAZ"));
+ 	TEST_ASSERT_VAL("find other", !strcmp(other[2], "BOZO"));
+ 	TEST_ASSERT_VAL("find other", other[3] == NULL);
++
++	for (i = 0; i < num_other; i++)
++		free((void *)other[i]);
+ 	free((void *)other);
+ 
+ 	return 0;
+diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
+index c531e6deb104..493ecb611540 100644
+--- a/tools/perf/tests/openat-syscall-all-cpus.c
++++ b/tools/perf/tests/openat-syscall-all-cpus.c
+@@ -45,7 +45,7 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int
+ 	if (IS_ERR(evsel)) {
+ 		tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
+ 		pr_debug("%s\n", errbuf);
+-		goto out_thread_map_delete;
++		goto out_cpu_map_delete;
+ 	}
+ 
+ 	if (perf_evsel__open(evsel, cpus, threads) < 0) {
+@@ -119,6 +119,8 @@ out_close_fd:
+ 	perf_evsel__close_fd(evsel);
+ out_evsel_delete:
+ 	perf_evsel__delete(evsel);
++out_cpu_map_delete:
++	cpu_map__put(cpus);
+ out_thread_map_delete:
+ 	thread_map__put(threads);
+ 	return err;
+diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
+index 04b1d53e4bf9..1d352621bd48 100644
+--- a/tools/perf/util/build-id.c
++++ b/tools/perf/util/build-id.c
+@@ -183,6 +183,7 @@ char *build_id_cache__linkname(const char *sbuild_id, char *bf, size_t size)
+ 	return bf;
+ }
+ 
++/* The caller is responsible to free the returned buffer. */
+ char *build_id_cache__origname(const char *sbuild_id)
+ {
+ 	char *linkname;
+diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
+index 5ac157056cdf..9bb742accfa5 100644
+--- a/tools/perf/util/config.c
++++ b/tools/perf/util/config.c
+@@ -628,11 +628,10 @@ static int collect_config(const char *var, const char *value,
+ 	}
+ 
+ 	ret = set_value(item, value);
+-	return ret;
+ 
+ out_free:
+ 	free(key);
+-	return -1;
++	return ret;
+ }
+ 
+ int perf_config_set__collect(struct perf_config_set *set, const char *file_name,
+diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
+index e7dbdcc8d465..b65ad5a273eb 100644
+--- a/tools/perf/util/evsel.c
++++ b/tools/perf/util/evsel.c
+@@ -1274,6 +1274,7 @@ void perf_evsel__exit(struct perf_evsel *evsel)
+ {
+ 	assert(list_empty(&evsel->node));
+ 	assert(evsel->evlist == NULL);
++	perf_evsel__free_counts(evsel);
+ 	perf_evsel__free_fd(evsel);
+ 	perf_evsel__free_id(evsel);
+ 	perf_evsel__free_config_terms(evsel);
+diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
+index 828cb9794c76..e1e94b44d588 100644
+--- a/tools/perf/util/hist.c
++++ b/tools/perf/util/hist.c
+@@ -1048,8 +1048,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
+ 
+ 	err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
+ 					iter->evsel, al, max_stack_depth);
+-	if (err)
++	if (err) {
++		map__put(alm);
+ 		return err;
++	}
+ 
+ 	err = iter->ops->prepare_entry(iter, al);
+ 	if (err)
+diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
+index ebb18a9bc460..1a7c76d2baa8 100644
+--- a/tools/perf/util/parse-events.c
++++ b/tools/perf/util/parse-events.c
+@@ -2263,6 +2263,7 @@ static bool is_event_supported(u8 type, unsigned config)
+ 		perf_evsel__delete(evsel);
+ 	}
+ 
++	thread_map__put(tmap);
+ 	return ret;
+ }
+ 
+@@ -2333,6 +2334,7 @@ void print_sdt_events(const char *subsys_glob, const char *event_glob,
+ 				printf("  %-50s [%s]\n", buf, "SDT event");
+ 				free(buf);
+ 			}
++			free(path);
+ 		} else
+ 			printf("  %-50s [%s]\n", nd->s, "SDT event");
+ 		if (nd2) {
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index 980bd9d20646..83964f796edb 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -5054,6 +5054,9 @@ int fork_it(char **argv)
+ 		signal(SIGQUIT, SIG_IGN);
+ 		if (waitpid(child_pid, &status, 0) == -1)
+ 			err(status, "waitpid");
++
++		if (WIFEXITED(status))
++			status = WEXITSTATUS(status);
+ 	}
+ 	/*
+ 	 * n.b. fork_it() does not check for errors from for_all_cpus()
+diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c
+index 4204359c9fee..8159fd98680b 100644
+--- a/tools/usb/usbip/libsrc/vhci_driver.c
++++ b/tools/usb/usbip/libsrc/vhci_driver.c
+@@ -150,7 +150,7 @@ static int get_nports(struct udev_device *hc_device)
+ 
+ static int vhci_hcd_filter(const struct dirent *dirent)
+ {
+-	return strcmp(dirent->d_name, "vhci_hcd") >= 0;
++	return !strncmp(dirent->d_name, "vhci_hcd.", 9);
+ }
+ 
+ static int get_ncontrollers(void)


             reply	other threads:[~2019-04-20 23:24 UTC|newest]

Thread overview: 332+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-04-20 11:09 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2024-04-18  3:06 [gentoo-commits] proj/linux-patches:4.19 commit in: / Alice Ferrazzi
2023-09-02  9:59 Mike Pagano
2023-08-30 15:00 Mike Pagano
2023-08-16 16:59 Mike Pagano
2023-08-11 11:58 Mike Pagano
2023-08-08 18:43 Mike Pagano
2023-07-24 20:30 Mike Pagano
2023-06-28 10:29 Mike Pagano
2023-06-21 14:55 Alice Ferrazzi
2023-06-14 10:21 Mike Pagano
2023-06-09 11:32 Mike Pagano
2023-05-30 12:57 Mike Pagano
2023-05-17 11:14 Mike Pagano
2023-05-17 11:01 Mike Pagano
2023-05-10 17:59 Mike Pagano
2023-04-26  9:35 Alice Ferrazzi
2023-04-20 11:17 Alice Ferrazzi
2023-04-05 11:41 Mike Pagano
2023-03-22 14:16 Alice Ferrazzi
2023-03-17 10:46 Mike Pagano
2023-03-13 11:35 Alice Ferrazzi
2023-03-11 16:01 Mike Pagano
2023-03-03 12:31 Mike Pagano
2023-02-25 11:41 Mike Pagano
2023-02-24  3:19 Alice Ferrazzi
2023-02-24  3:15 Alice Ferrazzi
2023-02-22 14:51 Alice Ferrazzi
2023-02-06 12:49 Mike Pagano
2023-01-24  7:16 Alice Ferrazzi
2023-01-18 11:11 Mike Pagano
2022-12-14 12:15 Mike Pagano
2022-12-08 12:14 Alice Ferrazzi
2022-11-25 17:04 Mike Pagano
2022-11-23  9:39 Alice Ferrazzi
2022-11-10 17:58 Mike Pagano
2022-11-03 15:11 Mike Pagano
2022-11-01 19:48 Mike Pagano
2022-10-26 11:41 Mike Pagano
2022-10-05 11:59 Mike Pagano
2022-09-28  9:18 Mike Pagano
2022-09-20 12:03 Mike Pagano
2022-09-15 11:09 Mike Pagano
2022-09-05 12:06 Mike Pagano
2022-08-25 10:35 Mike Pagano
2022-08-11 12:36 Mike Pagano
2022-07-29 15:28 Mike Pagano
2022-07-21 20:12 Mike Pagano
2022-07-12 16:01 Mike Pagano
2022-07-07 16:18 Mike Pagano
2022-07-02 16:07 Mike Pagano
2022-06-25 10:22 Mike Pagano
2022-06-16 11:40 Mike Pagano
2022-06-14 16:02 Mike Pagano
2022-06-06 11:05 Mike Pagano
2022-05-27 12:24 Mike Pagano
2022-05-25 11:55 Mike Pagano
2022-05-18  9:50 Mike Pagano
2022-05-15 22:12 Mike Pagano
2022-05-12 11:30 Mike Pagano
2022-05-01 17:04 Mike Pagano
2022-04-27 12:03 Mike Pagano
2022-04-20 12:09 Mike Pagano
2022-04-15 13:11 Mike Pagano
2022-04-12 19:24 Mike Pagano
2022-03-28 10:59 Mike Pagano
2022-03-23 11:57 Mike Pagano
2022-03-16 13:27 Mike Pagano
2022-03-11 10:56 Mike Pagano
2022-03-08 18:30 Mike Pagano
2022-03-02 13:08 Mike Pagano
2022-02-26 21:14 Mike Pagano
2022-02-23 12:39 Mike Pagano
2022-02-16 12:47 Mike Pagano
2022-02-11 12:53 Mike Pagano
2022-02-11 12:46 Mike Pagano
2022-02-11 12:45 Mike Pagano
2022-02-11 12:37 Mike Pagano
2022-02-08 17:56 Mike Pagano
2022-01-29 17:45 Mike Pagano
2022-01-27 11:39 Mike Pagano
2022-01-11 13:14 Mike Pagano
2022-01-05 12:55 Mike Pagano
2021-12-29 13:11 Mike Pagano
2021-12-22 14:07 Mike Pagano
2021-12-14 10:36 Mike Pagano
2021-12-08 12:55 Mike Pagano
2021-12-01 12:51 Mike Pagano
2021-11-26 11:59 Mike Pagano
2021-11-12 14:16 Mike Pagano
2021-11-06 13:26 Mike Pagano
2021-11-02 19:32 Mike Pagano
2021-10-27 11:59 Mike Pagano
2021-10-20 13:26 Mike Pagano
2021-10-17 13:12 Mike Pagano
2021-10-13 15:00 Alice Ferrazzi
2021-10-09 21:33 Mike Pagano
2021-10-06 14:06 Mike Pagano
2021-09-26 14:13 Mike Pagano
2021-09-22 11:40 Mike Pagano
2021-09-20 22:05 Mike Pagano
2021-09-03 11:22 Mike Pagano
2021-09-03 10:08 Alice Ferrazzi
2021-08-26 14:06 Mike Pagano
2021-08-25 22:45 Mike Pagano
2021-08-25 20:41 Mike Pagano
2021-08-15 20:07 Mike Pagano
2021-08-12 11:51 Mike Pagano
2021-08-08 13:39 Mike Pagano
2021-08-04 11:54 Mike Pagano
2021-08-03 12:26 Mike Pagano
2021-07-31 10:34 Alice Ferrazzi
2021-07-28 12:37 Mike Pagano
2021-07-20 15:35 Alice Ferrazzi
2021-07-13 12:38 Mike Pagano
2021-07-11 14:45 Mike Pagano
2021-06-30 14:25 Mike Pagano
2021-06-16 12:22 Mike Pagano
2021-06-10 11:46 Mike Pagano
2021-06-03 10:32 Alice Ferrazzi
2021-05-26 12:05 Mike Pagano
2021-05-22 10:03 Mike Pagano
2021-05-07 11:40 Alice Ferrazzi
2021-04-30 19:02 Mike Pagano
2021-04-28 18:31 Mike Pagano
2021-04-28 11:44 Alice Ferrazzi
2021-04-16 11:15 Alice Ferrazzi
2021-04-14 11:22 Alice Ferrazzi
2021-04-10 13:24 Mike Pagano
2021-04-07 12:21 Mike Pagano
2021-03-30 14:17 Mike Pagano
2021-03-24 12:08 Mike Pagano
2021-03-22 15:50 Mike Pagano
2021-03-20 14:26 Mike Pagano
2021-03-17 16:21 Mike Pagano
2021-03-11 14:05 Mike Pagano
2021-03-07 15:15 Mike Pagano
2021-03-04 12:08 Mike Pagano
2021-02-23 14:31 Alice Ferrazzi
2021-02-13 15:28 Alice Ferrazzi
2021-02-10 10:03 Alice Ferrazzi
2021-02-07 14:40 Alice Ferrazzi
2021-02-03 23:43 Mike Pagano
2021-01-30 13:34 Alice Ferrazzi
2021-01-27 11:15 Mike Pagano
2021-01-23 16:36 Mike Pagano
2021-01-19 20:34 Mike Pagano
2021-01-17 16:20 Mike Pagano
2021-01-12 20:06 Mike Pagano
2021-01-09 12:57 Mike Pagano
2021-01-06 14:15 Mike Pagano
2020-12-30 12:52 Mike Pagano
2020-12-11 12:56 Mike Pagano
2020-12-08 12:06 Mike Pagano
2020-12-02 12:49 Mike Pagano
2020-11-24 14:40 Mike Pagano
2020-11-22 19:26 Mike Pagano
2020-11-18 19:56 Mike Pagano
2020-11-11 15:43 Mike Pagano
2020-11-10 13:56 Mike Pagano
2020-11-05 12:35 Mike Pagano
2020-11-01 20:29 Mike Pagano
2020-10-29 11:18 Mike Pagano
2020-10-17 10:17 Mike Pagano
2020-10-14 20:36 Mike Pagano
2020-10-07 12:50 Mike Pagano
2020-10-01 12:45 Mike Pagano
2020-09-26 22:07 Mike Pagano
2020-09-26 22:00 Mike Pagano
2020-09-24 15:58 Mike Pagano
2020-09-23 12:07 Mike Pagano
2020-09-17 15:01 Mike Pagano
2020-09-17 14:55 Mike Pagano
2020-09-12 17:59 Mike Pagano
2020-09-09 17:59 Mike Pagano
2020-09-03 11:37 Mike Pagano
2020-08-26 11:15 Mike Pagano
2020-08-21 10:49 Alice Ferrazzi
2020-08-19  9:36 Alice Ferrazzi
2020-08-12 23:36 Alice Ferrazzi
2020-08-07 19:16 Mike Pagano
2020-08-05 14:51 Thomas Deutschmann
2020-07-31 18:00 Mike Pagano
2020-07-29 12:33 Mike Pagano
2020-07-22 12:42 Mike Pagano
2020-07-16 11:17 Mike Pagano
2020-07-09 12:12 Mike Pagano
2020-07-01 12:14 Mike Pagano
2020-06-29 17:41 Mike Pagano
2020-06-25 15:07 Mike Pagano
2020-06-22 14:47 Mike Pagano
2020-06-10 21:27 Mike Pagano
2020-06-07 21:52 Mike Pagano
2020-06-03 11:41 Mike Pagano
2020-05-27 16:25 Mike Pagano
2020-05-20 11:30 Mike Pagano
2020-05-20 11:27 Mike Pagano
2020-05-14 11:30 Mike Pagano
2020-05-13 12:33 Mike Pagano
2020-05-11 22:50 Mike Pagano
2020-05-09 22:20 Mike Pagano
2020-05-06 11:46 Mike Pagano
2020-05-02 19:24 Mike Pagano
2020-04-29 17:57 Mike Pagano
2020-04-23 11:44 Mike Pagano
2020-04-21 11:15 Mike Pagano
2020-04-17 11:45 Mike Pagano
2020-04-15 17:09 Mike Pagano
2020-04-13 11:34 Mike Pagano
2020-04-02 15:24 Mike Pagano
2020-03-25 14:58 Mike Pagano
2020-03-20 11:57 Mike Pagano
2020-03-18 14:21 Mike Pagano
2020-03-16 12:23 Mike Pagano
2020-03-11 17:20 Mike Pagano
2020-03-05 16:23 Mike Pagano
2020-02-28 16:38 Mike Pagano
2020-02-24 11:06 Mike Pagano
2020-02-19 23:45 Mike Pagano
2020-02-14 23:52 Mike Pagano
2020-02-11 16:20 Mike Pagano
2020-02-05 17:05 Mike Pagano
2020-02-01 10:37 Mike Pagano
2020-02-01 10:30 Mike Pagano
2020-01-29 16:16 Mike Pagano
2020-01-27 14:25 Mike Pagano
2020-01-23 11:07 Mike Pagano
2020-01-17 19:56 Mike Pagano
2020-01-14 22:30 Mike Pagano
2020-01-12 15:00 Mike Pagano
2020-01-09 11:15 Mike Pagano
2020-01-04 19:50 Mike Pagano
2019-12-31 17:46 Mike Pagano
2019-12-21 15:03 Mike Pagano
2019-12-17 21:56 Mike Pagano
2019-12-13 12:35 Mike Pagano
2019-12-05 12:03 Alice Ferrazzi
2019-12-01 14:06 Thomas Deutschmann
2019-11-24 15:44 Mike Pagano
2019-11-20 19:36 Mike Pagano
2019-11-12 21:00 Mike Pagano
2019-11-10 16:20 Mike Pagano
2019-11-06 14:26 Mike Pagano
2019-10-29 12:04 Mike Pagano
2019-10-17 22:27 Mike Pagano
2019-10-11 17:04 Mike Pagano
2019-10-07 17:42 Mike Pagano
2019-10-05 11:42 Mike Pagano
2019-10-01 10:10 Mike Pagano
2019-09-21 17:11 Mike Pagano
2019-09-19 12:34 Mike Pagano
2019-09-19 10:04 Mike Pagano
2019-09-16 12:26 Mike Pagano
2019-09-10 11:12 Mike Pagano
2019-09-06 17:25 Mike Pagano
2019-08-29 14:15 Mike Pagano
2019-08-25 17:37 Mike Pagano
2019-08-23 22:18 Mike Pagano
2019-08-16 12:26 Mike Pagano
2019-08-16 12:13 Mike Pagano
2019-08-09 17:45 Mike Pagano
2019-08-06 19:19 Mike Pagano
2019-08-04 16:15 Mike Pagano
2019-07-31 15:09 Mike Pagano
2019-07-31 10:22 Mike Pagano
2019-07-28 16:27 Mike Pagano
2019-07-26 11:35 Mike Pagano
2019-07-21 14:41 Mike Pagano
2019-07-14 15:44 Mike Pagano
2019-07-10 11:05 Mike Pagano
2019-07-03 11:34 Mike Pagano
2019-06-25 10:53 Mike Pagano
2019-06-22 19:06 Mike Pagano
2019-06-19 17:17 Thomas Deutschmann
2019-06-17 19:22 Mike Pagano
2019-06-15 15:07 Mike Pagano
2019-06-11 12:42 Mike Pagano
2019-06-10 19:43 Mike Pagano
2019-06-09 16:19 Mike Pagano
2019-06-04 11:11 Mike Pagano
2019-05-31 15:02 Mike Pagano
2019-05-26 17:10 Mike Pagano
2019-05-22 11:02 Mike Pagano
2019-05-16 23:03 Mike Pagano
2019-05-14 21:00 Mike Pagano
2019-05-10 19:40 Mike Pagano
2019-05-08 10:06 Mike Pagano
2019-05-05 13:42 Mike Pagano
2019-05-04 18:28 Mike Pagano
2019-05-02 10:13 Mike Pagano
2019-04-27 17:36 Mike Pagano
2019-04-19 19:51 Mike Pagano
2019-04-05 21:46 Mike Pagano
2019-04-03 10:59 Mike Pagano
2019-03-27 10:22 Mike Pagano
2019-03-23 20:23 Mike Pagano
2019-03-19 16:58 Mike Pagano
2019-03-13 22:08 Mike Pagano
2019-03-10 14:15 Mike Pagano
2019-03-06 19:06 Mike Pagano
2019-03-05 18:04 Mike Pagano
2019-02-27 11:23 Mike Pagano
2019-02-23 11:35 Mike Pagano
2019-02-23  0:46 Mike Pagano
2019-02-20 11:19 Mike Pagano
2019-02-16  0:42 Mike Pagano
2019-02-15 12:39 Mike Pagano
2019-02-12 20:53 Mike Pagano
2019-02-06 17:08 Mike Pagano
2019-01-31 11:28 Mike Pagano
2019-01-26 15:09 Mike Pagano
2019-01-22 23:06 Mike Pagano
2019-01-16 23:32 Mike Pagano
2019-01-13 19:29 Mike Pagano
2019-01-09 17:54 Mike Pagano
2018-12-29 18:55 Mike Pagano
2018-12-29  1:08 Mike Pagano
2018-12-21 14:58 Mike Pagano
2018-12-19 19:09 Mike Pagano
2018-12-17 11:42 Mike Pagano
2018-12-13 11:40 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-05 20:16 Mike Pagano
2018-12-01 15:08 Mike Pagano
2018-11-27 16:16 Mike Pagano
2018-11-23 12:42 Mike Pagano
2018-11-21 12:30 Mike Pagano
2018-11-14  0:47 Mike Pagano
2018-11-14  0:47 Mike Pagano
2018-11-13 20:44 Mike Pagano
2018-11-04 16:22 Alice Ferrazzi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1555758574.466fe7cb06476e8a577628c1620bdaff6551d949.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox