public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Wed, 21 Nov 2018 12:18:11 +0000 (UTC)	[thread overview]
Message-ID: <1542802669.1cc2d593aa5e4aa0a4a17970b4866a24eb4284b5.mpagano@gentoo> (raw)

commit:     1cc2d593aa5e4aa0a4a17970b4866a24eb4284b5
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Nov 21 12:17:49 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Nov 21 12:17:49 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1cc2d593

Linux patch 4.4.164

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1163_linux-4.4.164.patch | 4219 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4223 insertions(+)

diff --git a/0000_README b/0000_README
index 3fa80ea..aeea8d7 100644
--- a/0000_README
+++ b/0000_README
@@ -695,6 +695,10 @@ Patch:  1162_linux-4.4.163.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.4.163
 
+Patch:  1163_linux-4.4.164.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.4.164
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1163_linux-4.4.164.patch b/1163_linux-4.4.164.patch
new file mode 100644
index 0000000..2157e5a
--- /dev/null
+++ b/1163_linux-4.4.164.patch
@@ -0,0 +1,4219 @@
+diff --git a/Makefile b/Makefile
+index 4e3179768eea..9382e7e4e750 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 163
++SUBLEVEL = 164
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+diff --git a/arch/alpha/include/asm/termios.h b/arch/alpha/include/asm/termios.h
+index 7fde0f88da88..51ed90be770a 100644
+--- a/arch/alpha/include/asm/termios.h
++++ b/arch/alpha/include/asm/termios.h
+@@ -72,9 +72,15 @@
+ })
+ 
+ #define user_termios_to_kernel_termios(k, u) \
+-	copy_from_user(k, u, sizeof(struct termios))
++	copy_from_user(k, u, sizeof(struct termios2))
+ 
+ #define kernel_termios_to_user_termios(u, k) \
++	copy_to_user(u, k, sizeof(struct termios2))
++
++#define user_termios_to_kernel_termios_1(k, u) \
++	copy_from_user(k, u, sizeof(struct termios))
++
++#define kernel_termios_to_user_termios_1(u, k) \
+ 	copy_to_user(u, k, sizeof(struct termios))
+ 
+ #endif	/* _ALPHA_TERMIOS_H */
+diff --git a/arch/alpha/include/uapi/asm/ioctls.h b/arch/alpha/include/uapi/asm/ioctls.h
+index f30c94ae1bdb..7ee8ab577e11 100644
+--- a/arch/alpha/include/uapi/asm/ioctls.h
++++ b/arch/alpha/include/uapi/asm/ioctls.h
+@@ -31,6 +31,11 @@
+ #define TCXONC		_IO('t', 30)
+ #define TCFLSH		_IO('t', 31)
+ 
++#define TCGETS2		_IOR('T', 42, struct termios2)
++#define TCSETS2		_IOW('T', 43, struct termios2)
++#define TCSETSW2	_IOW('T', 44, struct termios2)
++#define TCSETSF2	_IOW('T', 45, struct termios2)
++
+ #define TIOCSWINSZ	_IOW('t', 103, struct winsize)
+ #define TIOCGWINSZ	_IOR('t', 104, struct winsize)
+ #define	TIOCSTART	_IO('t', 110)		/* start output, like ^Q */
+diff --git a/arch/alpha/include/uapi/asm/termbits.h b/arch/alpha/include/uapi/asm/termbits.h
+index 879dd3589921..483c7ec2a879 100644
+--- a/arch/alpha/include/uapi/asm/termbits.h
++++ b/arch/alpha/include/uapi/asm/termbits.h
+@@ -25,6 +25,19 @@ struct termios {
+ 	speed_t c_ospeed;		/* output speed */
+ };
+ 
++/* Alpha has identical termios and termios2 */
++
++struct termios2 {
++	tcflag_t c_iflag;		/* input mode flags */
++	tcflag_t c_oflag;		/* output mode flags */
++	tcflag_t c_cflag;		/* control mode flags */
++	tcflag_t c_lflag;		/* local mode flags */
++	cc_t c_cc[NCCS];		/* control characters */
++	cc_t c_line;			/* line discipline (== c_cc[19]) */
++	speed_t c_ispeed;		/* input speed */
++	speed_t c_ospeed;		/* output speed */
++};
++
+ /* Alpha has matching termios and ktermios */
+ 
+ struct ktermios {
+@@ -147,6 +160,7 @@ struct ktermios {
+ #define B3000000  00034
+ #define B3500000  00035
+ #define B4000000  00036
++#define BOTHER    00037
+ 
+ #define CSIZE	00001400
+ #define   CS5	00000000
+@@ -164,6 +178,9 @@ struct ktermios {
+ #define CMSPAR	  010000000000		/* mark or space (stick) parity */
+ #define CRTSCTS	  020000000000		/* flow control */
+ 
++#define CIBAUD	07600000
++#define IBSHIFT	16
++
+ /* c_lflag bits */
+ #define ISIG	0x00000080
+ #define ICANON	0x00000100
+diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+index 445aa678f914..6a37101344aa 100644
+--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
++++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+@@ -249,7 +249,7 @@
+ 
+ 		sysmgr: sysmgr@ffd12000 {
+ 			compatible = "altr,sys-mgr", "syscon";
+-			reg = <0xffd12000 0x1000>;
++			reg = <0xffd12000 0x228>;
+ 		};
+ 
+ 		/* Local timer */
+diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
+index 376701f41cc2..692bbc1c5b79 100644
+--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
++++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
+@@ -67,7 +67,7 @@ void (*cvmx_override_pko_queue_priority) (int pko_port,
+ void (*cvmx_override_ipd_port_setup) (int ipd_port);
+ 
+ /* Port count per interface */
+-static int interface_port_count[5];
++static int interface_port_count[9];
+ 
+ /* Port last configured link info index by IPD/PKO port */
+ static cvmx_helper_link_info_t
+diff --git a/arch/mips/include/asm/mach-loongson64/irq.h b/arch/mips/include/asm/mach-loongson64/irq.h
+index d18c45c7c394..19ff9ce46c02 100644
+--- a/arch/mips/include/asm/mach-loongson64/irq.h
++++ b/arch/mips/include/asm/mach-loongson64/irq.h
+@@ -9,7 +9,7 @@
+ #define MIPS_CPU_IRQ_BASE 56
+ 
+ #define LOONGSON_UART_IRQ   (MIPS_CPU_IRQ_BASE + 2) /* UART */
+-#define LOONGSON_HT1_IRQ    (MIPS_CPU_IRQ_BASE + 3) /* HT1 */
++#define LOONGSON_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 3) /* CASCADE */
+ #define LOONGSON_TIMER_IRQ  (MIPS_CPU_IRQ_BASE + 7) /* CPU Timer */
+ 
+ #define LOONGSON_HT1_CFG_BASE		loongson_sysconf.ht_control_base
+diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
+index 610f0f3bdb34..93c46c9cebb7 100644
+--- a/arch/mips/kernel/crash.c
++++ b/arch/mips/kernel/crash.c
+@@ -34,6 +34,9 @@ static void crash_shutdown_secondary(void *passed_regs)
+ 	if (!cpu_online(cpu))
+ 		return;
+ 
++	/* We won't be sent IPIs any more. */
++	set_cpu_online(cpu, false);
++
+ 	local_irq_disable();
+ 	if (!cpumask_test_cpu(cpu, &cpus_in_crash))
+ 		crash_save_cpu(regs, cpu);
+diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c
+index 50980bf3983e..92bc066e47a3 100644
+--- a/arch/mips/kernel/machine_kexec.c
++++ b/arch/mips/kernel/machine_kexec.c
+@@ -95,6 +95,9 @@ machine_kexec(struct kimage *image)
+ 			*ptr = (unsigned long) phys_to_virt(*ptr);
+ 	}
+ 
++	/* Mark offline BEFORE disabling local irq. */
++	set_cpu_online(smp_processor_id(), false);
++
+ 	/*
+ 	 * we do not want to be bothered.
+ 	 */
+diff --git a/arch/mips/loongson64/loongson-3/irq.c b/arch/mips/loongson64/loongson-3/irq.c
+index 0f75b6b3d218..241cb88f9c03 100644
+--- a/arch/mips/loongson64/loongson-3/irq.c
++++ b/arch/mips/loongson64/loongson-3/irq.c
+@@ -42,51 +42,8 @@ void mach_irq_dispatch(unsigned int pending)
+ 	}
+ }
+ 
+-static struct irqaction cascade_irqaction = {
+-	.handler = no_action,
+-	.flags = IRQF_NO_SUSPEND,
+-	.name = "cascade",
+-};
+-
+-static inline void mask_loongson_irq(struct irq_data *d)
+-{
+-	clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
+-	irq_disable_hazard();
+-
+-	/* Workaround: UART IRQ may deliver to any core */
+-	if (d->irq == LOONGSON_UART_IRQ) {
+-		int cpu = smp_processor_id();
+-		int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node;
+-		int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node;
+-		u64 intenclr_addr = smp_group[node_id] |
+-			(u64)(&LOONGSON_INT_ROUTER_INTENCLR);
+-		u64 introuter_lpc_addr = smp_group[node_id] |
+-			(u64)(&LOONGSON_INT_ROUTER_LPC);
+-
+-		*(volatile u32 *)intenclr_addr = 1 << 10;
+-		*(volatile u8 *)introuter_lpc_addr = 0x10 + (1<<core_id);
+-	}
+-}
+-
+-static inline void unmask_loongson_irq(struct irq_data *d)
+-{
+-	/* Workaround: UART IRQ may deliver to any core */
+-	if (d->irq == LOONGSON_UART_IRQ) {
+-		int cpu = smp_processor_id();
+-		int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node;
+-		int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node;
+-		u64 intenset_addr = smp_group[node_id] |
+-			(u64)(&LOONGSON_INT_ROUTER_INTENSET);
+-		u64 introuter_lpc_addr = smp_group[node_id] |
+-			(u64)(&LOONGSON_INT_ROUTER_LPC);
+-
+-		*(volatile u32 *)intenset_addr = 1 << 10;
+-		*(volatile u8 *)introuter_lpc_addr = 0x10 + (1<<core_id);
+-	}
+-
+-	set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
+-	irq_enable_hazard();
+-}
++static inline void mask_loongson_irq(struct irq_data *d) { }
++static inline void unmask_loongson_irq(struct irq_data *d) { }
+ 
+  /* For MIPS IRQs which shared by all cores */
+ static struct irq_chip loongson_irq_chip = {
+@@ -124,12 +81,11 @@ void __init mach_init_irq(void)
+ 	mips_cpu_irq_init();
+ 	init_i8259_irqs();
+ 	irq_set_chip_and_handler(LOONGSON_UART_IRQ,
+-			&loongson_irq_chip, handle_level_irq);
+-
+-	/* setup HT1 irq */
+-	setup_irq(LOONGSON_HT1_IRQ, &cascade_irqaction);
++			&loongson_irq_chip, handle_percpu_irq);
++	irq_set_chip_and_handler(LOONGSON_BRIDGE_IRQ,
++			&loongson_irq_chip, handle_percpu_irq);
+ 
+-	set_c0_status(STATUSF_IP2 | STATUSF_IP6);
++	set_c0_status(STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP6);
+ }
+ 
+ #ifdef CONFIG_HOTPLUG_CPU
+diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
+index 13cb2461fef5..3b7b022384a0 100644
+--- a/arch/parisc/kernel/entry.S
++++ b/arch/parisc/kernel/entry.S
+@@ -185,7 +185,7 @@
+ 	bv,n	0(%r3)
+ 	nop
+ 	.word	0		/* checksum (will be patched) */
+-	.word	PA(os_hpmc)	/* address of handler */
++	.word	0		/* address of handler */
+ 	.word	0		/* length of handler */
+ 	.endm
+ 
+diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
+index 77e2262c97f6..6f61a17e2485 100644
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
+@@ -829,7 +829,8 @@ void __init initialize_ivt(const void *iva)
+ 	for (i = 0; i < 8; i++)
+ 	    *ivap++ = 0;
+ 
+-	/* Compute Checksum for HPMC handler */
++	/* Setup IVA and compute checksum for HPMC handler */
++	ivap[6] = (u32)__pa(os_hpmc);
+ 	length = os_hpmc_size;
+ 	ivap[7] = length;
+ 
+diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
+index 1b366c477687..63741f2e8d01 100644
+--- a/arch/parisc/mm/init.c
++++ b/arch/parisc/mm/init.c
+@@ -491,12 +491,8 @@ static void __init map_pages(unsigned long start_vaddr,
+ 						pte = pte_mkhuge(pte);
+ 				}
+ 
+-				if (address >= end_paddr) {
+-					if (force)
+-						break;
+-					else
+-						pte_val(pte) = 0;
+-				}
++				if (address >= end_paddr)
++					break;
+ 
+ 				set_pte(pg_table, pte);
+ 
+diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
+index 12866ccb5694..5c2199857aa8 100644
+--- a/arch/powerpc/boot/crt0.S
++++ b/arch/powerpc/boot/crt0.S
+@@ -47,8 +47,10 @@ p_end:		.long	_end
+ p_pstack:	.long	_platform_stack_top
+ #endif
+ 
+-	.weak	_zimage_start
+ 	.globl	_zimage_start
++	/* Clang appears to require the .weak directive to be after the symbol
++	 * is defined. See https://bugs.llvm.org/show_bug.cgi?id=38921  */
++	.weak	_zimage_start
+ _zimage_start:
+ 	.globl	_zimage_start_lib
+ _zimage_start_lib:
+diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
+index 98697611e7b3..705f4dc5073b 100644
+--- a/arch/powerpc/include/asm/mpic.h
++++ b/arch/powerpc/include/asm/mpic.h
+@@ -392,7 +392,14 @@ extern struct bus_type mpic_subsys;
+ #define	MPIC_REGSET_TSI108		MPIC_REGSET(1)	/* Tsi108/109 PIC */
+ 
+ /* Get the version of primary MPIC */
++#ifdef CONFIG_MPIC
+ extern u32 fsl_mpic_primary_get_version(void);
++#else
++static inline u32 fsl_mpic_primary_get_version(void)
++{
++	return 0;
++}
++#endif
+ 
+ /* Allocate the controller structure and setup the linux irq descs
+  * for the range if interrupts passed in. No HW initialization is
+diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
+index bb04e4df3100..1b784b8fd8b4 100644
+--- a/arch/powerpc/mm/tlb_nohash.c
++++ b/arch/powerpc/mm/tlb_nohash.c
+@@ -487,6 +487,9 @@ static void setup_page_sizes(void)
+ 		for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
+ 			struct mmu_psize_def *def = &mmu_psize_defs[psize];
+ 
++			if (!def->shift)
++				continue;
++
+ 			if (tlb1ps & (1U << (def->shift - 10))) {
+ 				def->flags |= MMU_PAGE_SIZE_DIRECT;
+ 
+diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
+index 6596f66ce112..a5d0c2f08110 100644
+--- a/arch/sparc/kernel/perf_event.c
++++ b/arch/sparc/kernel/perf_event.c
+@@ -926,6 +926,8 @@ static void read_in_all_counters(struct cpu_hw_events *cpuc)
+ 			sparc_perf_event_update(cp, &cp->hw,
+ 						cpuc->current_idx[i]);
+ 			cpuc->current_idx[i] = PIC_NO_INDEX;
++			if (cp->hw.state & PERF_HES_STOPPED)
++				cp->hw.state |= PERF_HES_ARCH;
+ 		}
+ 	}
+ }
+@@ -958,10 +960,12 @@ static void calculate_single_pcr(struct cpu_hw_events *cpuc)
+ 
+ 		enc = perf_event_get_enc(cpuc->events[i]);
+ 		cpuc->pcr[0] &= ~mask_for_index(idx);
+-		if (hwc->state & PERF_HES_STOPPED)
++		if (hwc->state & PERF_HES_ARCH) {
+ 			cpuc->pcr[0] |= nop_for_index(idx);
+-		else
++		} else {
+ 			cpuc->pcr[0] |= event_encoding(enc, idx);
++			hwc->state = 0;
++		}
+ 	}
+ out:
+ 	cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
+@@ -987,6 +991,9 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
+ 
+ 		cpuc->current_idx[i] = idx;
+ 
++		if (cp->hw.state & PERF_HES_ARCH)
++			continue;
++
+ 		sparc_pmu_start(cp, PERF_EF_RELOAD);
+ 	}
+ out:
+@@ -1078,6 +1085,8 @@ static void sparc_pmu_start(struct perf_event *event, int flags)
+ 	event->hw.state = 0;
+ 
+ 	sparc_pmu_enable_event(cpuc, &event->hw, idx);
++
++	perf_event_update_userpage(event);
+ }
+ 
+ static void sparc_pmu_stop(struct perf_event *event, int flags)
+@@ -1370,9 +1379,9 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
+ 	cpuc->events[n0] = event->hw.event_base;
+ 	cpuc->current_idx[n0] = PIC_NO_INDEX;
+ 
+-	event->hw.state = PERF_HES_UPTODATE;
++	event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+ 	if (!(ef_flags & PERF_EF_START))
+-		event->hw.state |= PERF_HES_STOPPED;
++		event->hw.state |= PERF_HES_ARCH;
+ 
+ 	/*
+ 	 * If group events scheduling transaction was started,
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index d9afe6d40550..9beee7f364ad 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -41,7 +41,6 @@ config X86
+ 	select ARCH_USE_BUILTIN_BSWAP
+ 	select ARCH_USE_CMPXCHG_LOCKREF		if X86_64
+ 	select ARCH_USE_QUEUED_RWLOCKS
+-	select ARCH_USE_QUEUED_SPINLOCKS
+ 	select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+ 	select ARCH_WANTS_DYNAMIC_TASK_STRUCT
+ 	select ARCH_WANT_FRAME_POINTERS
+diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
+index a7661c430cd9..523db6ce88dd 100644
+--- a/arch/x86/boot/tools/build.c
++++ b/arch/x86/boot/tools/build.c
+@@ -391,6 +391,13 @@ int main(int argc, char ** argv)
+ 		die("Unable to mmap '%s': %m", argv[2]);
+ 	/* Number of 16-byte paragraphs, including space for a 4-byte CRC */
+ 	sys_size = (sz + 15 + 4) / 16;
++#ifdef CONFIG_EFI_STUB
++	/*
++	 * COFF requires minimum 32-byte alignment of sections, and
++	 * adding a signature is problematic without that alignment.
++	 */
++	sys_size = (sys_size + 1) & ~1;
++#endif
+ 
+ 	/* Patch the setup code with the appropriate size parameters */
+ 	buf[0x1f1] = setup_sectors-1;
+diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c
+index 145863d4d343..a8b215865636 100644
+--- a/arch/x86/kernel/check.c
++++ b/arch/x86/kernel/check.c
+@@ -30,6 +30,11 @@ static __init int set_corruption_check(char *arg)
+ 	ssize_t ret;
+ 	unsigned long val;
+ 
++	if (!arg) {
++		pr_err("memory_corruption_check config string not provided\n");
++		return -EINVAL;
++	}
++
+ 	ret = kstrtoul(arg, 10, &val);
+ 	if (ret)
+ 		return ret;
+@@ -44,6 +49,11 @@ static __init int set_corruption_check_period(char *arg)
+ 	ssize_t ret;
+ 	unsigned long val;
+ 
++	if (!arg) {
++		pr_err("memory_corruption_check_period config string not provided\n");
++		return -EINVAL;
++	}
++
+ 	ret = kstrtoul(arg, 10, &val);
+ 	if (ret)
+ 		return ret;
+@@ -58,6 +68,11 @@ static __init int set_corruption_check_size(char *arg)
+ 	char *end;
+ 	unsigned size;
+ 
++	if (!arg) {
++		pr_err("memory_corruption_check_size config string not provided\n");
++		return -EINVAL;
++	}
++
+ 	size = memparse(arg, &end);
+ 
+ 	if (*end == '\0')
+diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
+index 9be3e79eb629..31fad2cbd734 100644
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -294,7 +294,6 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+ 		 * thread's fpu state, reconstruct fxstate from the fsave
+ 		 * header. Sanitize the copied state etc.
+ 		 */
+-		struct fpu *fpu = &tsk->thread.fpu;
+ 		struct user_i387_ia32_struct env;
+ 		int err = 0;
+ 
+diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
+index f42e78de1e10..85872a08994a 100644
+--- a/arch/x86/xen/spinlock.c
++++ b/arch/x86/xen/spinlock.c
+@@ -8,6 +8,7 @@
+ #include <linux/log2.h>
+ #include <linux/gfp.h>
+ #include <linux/slab.h>
++#include <linux/atomic.h>
+ 
+ #include <asm/paravirt.h>
+ 
+@@ -19,6 +20,7 @@
+ 
+ static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
+ static DEFINE_PER_CPU(char *, irq_name);
++static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
+ static bool xen_pvspin = true;
+ 
+ #ifdef CONFIG_QUEUED_SPINLOCKS
+@@ -42,33 +44,24 @@ static void xen_qlock_kick(int cpu)
+ static void xen_qlock_wait(u8 *byte, u8 val)
+ {
+ 	int irq = __this_cpu_read(lock_kicker_irq);
++	atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest);
+ 
+ 	/* If kicker interrupts not initialized yet, just spin */
+-	if (irq == -1)
++	if (irq == -1 || in_nmi())
+ 		return;
+ 
+-	/* clear pending */
+-	xen_clear_irq_pending(irq);
+-	barrier();
+-
+-	/*
+-	 * We check the byte value after clearing pending IRQ to make sure
+-	 * that we won't miss a wakeup event because of the clearing.
+-	 *
+-	 * The sync_clear_bit() call in xen_clear_irq_pending() is atomic.
+-	 * So it is effectively a memory barrier for x86.
+-	 */
+-	if (READ_ONCE(*byte) != val)
+-		return;
++	/* Detect reentry. */
++	atomic_inc(nest_cnt);
+ 
+-	/*
+-	 * If an interrupt happens here, it will leave the wakeup irq
+-	 * pending, which will cause xen_poll_irq() to return
+-	 * immediately.
+-	 */
++	/* If irq pending already and no nested call clear it. */
++	if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) {
++		xen_clear_irq_pending(irq);
++	} else if (READ_ONCE(*byte) == val) {
++		/* Block until irq becomes pending (or a spurious wakeup) */
++		xen_poll_irq(irq);
++	}
+ 
+-	/* Block until irq becomes pending (or perhaps a spurious wakeup) */
+-	xen_poll_irq(irq);
++	atomic_dec(nest_cnt);
+ }
+ 
+ #else /* CONFIG_QUEUED_SPINLOCKS */
+diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
+index ca20a892021b..6c6877d628ef 100644
+--- a/arch/xtensa/boot/Makefile
++++ b/arch/xtensa/boot/Makefile
+@@ -31,7 +31,7 @@ $(bootdir-y): $(addprefix $(obj)/,$(subdir-y)) \
+ 	      $(addprefix $(obj)/,$(host-progs))
+ 	$(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS)
+ 
+-OBJCOPYFLAGS = --strip-all -R .comment -R .note.gnu.build-id -O binary
++OBJCOPYFLAGS = --strip-all -R .comment -R .notes -O binary
+ 
+ vmlinux.bin: vmlinux FORCE
+ 	$(call if_changed,objcopy)
+diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
+index 83e2e4bc01ba..d3ac00fcb15c 100644
+--- a/arch/xtensa/include/asm/processor.h
++++ b/arch/xtensa/include/asm/processor.h
+@@ -24,7 +24,11 @@
+ # error Linux requires the Xtensa Windowed Registers Option.
+ #endif
+ 
+-#define ARCH_SLAB_MINALIGN	XCHAL_DATA_WIDTH
++/* Xtensa ABI requires stack alignment to be at least 16 */
++
++#define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16)
++
++#define ARCH_SLAB_MINALIGN STACK_ALIGN
+ 
+ /*
+  * User space process size: 1 GB.
+diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
+index 05e1df943856..c7b3bedbfffe 100644
+--- a/arch/xtensa/kernel/head.S
++++ b/arch/xtensa/kernel/head.S
+@@ -88,9 +88,12 @@ _SetupMMU:
+ 	initialize_mmu
+ #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+ 	rsr	a2, excsave1
+-	movi	a3, 0x08000000
++	movi	a3, XCHAL_KSEG_PADDR
++	bltu	a2, a3, 1f
++	sub	a2, a2, a3
++	movi	a3, XCHAL_KSEG_SIZE
+ 	bgeu	a2, a3, 1f
+-	movi	a3, 0xd0000000
++	movi	a3, XCHAL_KSEG_CACHED_VADDR
+ 	add	a2, a2, a3
+ 	wsr	a2, excsave1
+ 1:
+diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
+index c417cbe4ec87..bdfeda5a913c 100644
+--- a/arch/xtensa/kernel/vmlinux.lds.S
++++ b/arch/xtensa/kernel/vmlinux.lds.S
+@@ -110,6 +110,7 @@ SECTIONS
+   .fixup   : { *(.fixup) }
+ 
+   EXCEPTION_TABLE(16)
++  NOTES
+   /* Data section */
+ 
+   _sdata = .;
+diff --git a/crypto/lrw.c b/crypto/lrw.c
+index 6f9908a7ebcb..d38a382b09eb 100644
+--- a/crypto/lrw.c
++++ b/crypto/lrw.c
+@@ -132,7 +132,12 @@ static inline int get_index128(be128 *block)
+ 		return x + ffz(val);
+ 	}
+ 
+-	return x;
++	/*
++	 * If we get here, then x == 128 and we are incrementing the counter
++	 * from all ones to all zeros. This means we must return index 127, i.e.
++	 * the one corresponding to key2*{ 1,...,1 }.
++	 */
++	return 127;
+ }
+ 
+ static int crypt(struct blkcipher_desc *d,
+diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
+index 8a0f77fb5181..572755e557d6 100644
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -235,9 +235,11 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
+ 	{ "INT33FC", },
+ 
+ 	/* Braswell LPSS devices */
++	{ "80862286", LPSS_ADDR(lpss_dma_desc) },
+ 	{ "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
+ 	{ "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
+ 	{ "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
++	{ "808622C0", LPSS_ADDR(lpss_dma_desc) },
+ 	{ "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
+ 
+ 	/* Broadwell LPSS devices */
+diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
+index 2104b1b4ccda..9ab759bcebd5 100644
+--- a/drivers/block/ataflop.c
++++ b/drivers/block/ataflop.c
+@@ -1933,6 +1933,11 @@ static int __init atari_floppy_init (void)
+ 		unit[i].disk = alloc_disk(1);
+ 		if (!unit[i].disk)
+ 			goto Enomem;
++
++		unit[i].disk->queue = blk_init_queue(do_fd_request,
++						     &ataflop_lock);
++		if (!unit[i].disk->queue)
++			goto Enomem;
+ 	}
+ 
+ 	if (UseTrackbuffer < 0)
+@@ -1964,10 +1969,6 @@ static int __init atari_floppy_init (void)
+ 		sprintf(unit[i].disk->disk_name, "fd%d", i);
+ 		unit[i].disk->fops = &floppy_fops;
+ 		unit[i].disk->private_data = &unit[i];
+-		unit[i].disk->queue = blk_init_queue(do_fd_request,
+-					&ataflop_lock);
+-		if (!unit[i].disk->queue)
+-			goto Enomem;
+ 		set_capacity(unit[i].disk, MAX_DISK_SIZE * 2);
+ 		add_disk(unit[i].disk);
+ 	}
+@@ -1982,13 +1983,17 @@ static int __init atari_floppy_init (void)
+ 
+ 	return 0;
+ Enomem:
+-	while (i--) {
+-		struct request_queue *q = unit[i].disk->queue;
++	do {
++		struct gendisk *disk = unit[i].disk;
+ 
+-		put_disk(unit[i].disk);
+-		if (q)
+-			blk_cleanup_queue(q);
+-	}
++		if (disk) {
++			if (disk->queue) {
++				blk_cleanup_queue(disk->queue);
++				disk->queue = NULL;
++			}
++			put_disk(unit[i].disk);
++		}
++	} while (i--);
+ 
+ 	unregister_blkdev(FLOPPY_MAJOR, "fd");
+ 	return -ENOMEM;
+diff --git a/drivers/block/swim.c b/drivers/block/swim.c
+index b5afd495d482..eec6e393c124 100644
+--- a/drivers/block/swim.c
++++ b/drivers/block/swim.c
+@@ -868,8 +868,17 @@ static int swim_floppy_init(struct swim_priv *swd)
+ 
+ exit_put_disks:
+ 	unregister_blkdev(FLOPPY_MAJOR, "fd");
+-	while (drive--)
+-		put_disk(swd->unit[drive].disk);
++	do {
++		struct gendisk *disk = swd->unit[drive].disk;
++
++		if (disk) {
++			if (disk->queue) {
++				blk_cleanup_queue(disk->queue);
++				disk->queue = NULL;
++			}
++			put_disk(disk);
++		}
++	} while (drive--);
+ 	return err;
+ }
+ 
+diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
+index 0b697946e9bc..a08c6529271e 100644
+--- a/drivers/bluetooth/btbcm.c
++++ b/drivers/bluetooth/btbcm.c
+@@ -270,6 +270,7 @@ static const struct {
+ 	{ 0x4103, "BCM4330B1"	},	/* 002.001.003 */
+ 	{ 0x410e, "BCM43341B0"	},	/* 002.001.014 */
+ 	{ 0x4406, "BCM4324B3"	},	/* 002.004.006 */
++	{ 0x6109, "BCM4335C0"	},	/* 003.001.009 */
+ 	{ 0x610c, "BCM4354"	},	/* 003.001.012 */
+ 	{ }
+ };
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index 1012b2cb6a16..d203940203b6 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -2425,7 +2425,7 @@ static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi,
+ 		return -ENOSYS;
+ 
+ 	if (arg != CDSL_CURRENT && arg != CDSL_NONE) {
+-		if ((int)arg >= cdi->capacity)
++		if (arg >= cdi->capacity)
+ 			return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index d6d166fe49a3..7a2e23d6bfdd 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -613,8 +613,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 			flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ 			ssif_info->waiting_alert = true;
+ 			ssif_info->rtc_us_timer = SSIF_MSG_USEC;
+-			mod_timer(&ssif_info->retry_timer,
+-				  jiffies + SSIF_MSG_JIFFIES);
++			if (!ssif_info->stopping)
++				mod_timer(&ssif_info->retry_timer,
++					  jiffies + SSIF_MSG_JIFFIES);
+ 			ipmi_ssif_unlock_cond(ssif_info, flags);
+ 			return;
+ 		}
+@@ -951,8 +952,9 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
+ 			ssif_info->waiting_alert = true;
+ 			ssif_info->retries_left = SSIF_RECV_RETRIES;
+ 			ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
+-			mod_timer(&ssif_info->retry_timer,
+-				  jiffies + SSIF_MSG_PART_JIFFIES);
++			if (!ssif_info->stopping)
++				mod_timer(&ssif_info->retry_timer,
++					  jiffies + SSIF_MSG_PART_JIFFIES);
+ 			ipmi_ssif_unlock_cond(ssif_info, flags);
+ 		}
+ 	}
+diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
+index 95a40ec854ad..e3f2915ca4be 100644
+--- a/drivers/char/tpm/tpm-interface.c
++++ b/drivers/char/tpm/tpm-interface.c
+@@ -415,7 +415,8 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, const void *cmd,
+ 	header = cmd;
+ 
+ 	err = be32_to_cpu(header->return_code);
+-	if (err != 0 && desc)
++	if (err != 0 && err != TPM_ERR_DISABLED && err != TPM_ERR_DEACTIVATED
++	    && desc)
+ 		dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err,
+ 			desc);
+ 
+diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
+index 849f2e29c243..0fb18765f982 100644
+--- a/drivers/char/tpm/xen-tpmfront.c
++++ b/drivers/char/tpm/xen-tpmfront.c
+@@ -201,7 +201,7 @@ static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
+ 		return -ENOMEM;
+ 	}
+ 
+-	rv = xenbus_grant_ring(dev, &priv->shr, 1, &gref);
++	rv = xenbus_grant_ring(dev, priv->shr, 1, &gref);
+ 	if (rv < 0)
+ 		return rv;
+ 
+diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
+index d266299dfdb1..785864893f9a 100644
+--- a/drivers/clk/clk-s2mps11.c
++++ b/drivers/clk/clk-s2mps11.c
+@@ -297,6 +297,36 @@ static const struct platform_device_id s2mps11_clk_id[] = {
+ };
+ MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
+ 
++#ifdef CONFIG_OF
++/*
++ * Device is instantiated through parent MFD device and device matching is done
++ * through platform_device_id.
++ *
++ * However if device's DT node contains proper clock compatible and driver is
++ * built as a module, then the *module* matching will be done trough DT aliases.
++ * This requires of_device_id table.  In the same time this will not change the
++ * actual *device* matching so do not add .of_match_table.
++ */
++static const struct of_device_id s2mps11_dt_match[] = {
++	{
++		.compatible = "samsung,s2mps11-clk",
++		.data = (void *)S2MPS11X,
++	}, {
++		.compatible = "samsung,s2mps13-clk",
++		.data = (void *)S2MPS13X,
++	}, {
++		.compatible = "samsung,s2mps14-clk",
++		.data = (void *)S2MPS14X,
++	}, {
++		.compatible = "samsung,s5m8767-clk",
++		.data = (void *)S5M8767X,
++	}, {
++		/* Sentinel */
++	},
++};
++MODULE_DEVICE_TABLE(of, s2mps11_dt_match);
++#endif
++
+ static struct platform_driver s2mps11_clk_driver = {
+ 	.driver = {
+ 		.name  = "s2mps11-clk",
+diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
+index 0efd36e483ab..60c8a9bd562d 100644
+--- a/drivers/clocksource/i8253.c
++++ b/drivers/clocksource/i8253.c
+@@ -19,6 +19,13 @@
+ DEFINE_RAW_SPINLOCK(i8253_lock);
+ EXPORT_SYMBOL(i8253_lock);
+ 
++/*
++ * Handle PIT quirk in pit_shutdown() where zeroing the counter register
++ * restarts the PIT, negating the shutdown. On platforms with the quirk,
++ * platform specific code can set this to false.
++ */
++bool i8253_clear_counter_on_shutdown = true;
++
+ #ifdef CONFIG_CLKSRC_I8253
+ /*
+  * Since the PIT overflows every tick, its not very useful
+@@ -108,8 +115,11 @@ static int pit_shutdown(struct clock_event_device *evt)
+ 	raw_spin_lock(&i8253_lock);
+ 
+ 	outb_p(0x30, PIT_MODE);
+-	outb_p(0, PIT_CH0);
+-	outb_p(0, PIT_CH0);
++
++	if (i8253_clear_counter_on_shutdown) {
++		outb_p(0, PIT_CH0);
++		outb_p(0, PIT_CH0);
++	}
+ 
+ 	raw_spin_unlock(&i8253_lock);
+ 	return 0;
+diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
+index dade7c47ff18..8344b7c91fe3 100644
+--- a/drivers/dma/dma-jz4780.c
++++ b/drivers/dma/dma-jz4780.c
+@@ -750,6 +750,11 @@ static int jz4780_dma_probe(struct platform_device *pdev)
+ 	struct resource *res;
+ 	int i, ret;
+ 
++	if (!dev->of_node) {
++		dev_err(dev, "This driver must be probed from devicetree\n");
++		return -EINVAL;
++	}
++
+ 	jzdma = devm_kzalloc(dev, sizeof(*jzdma), GFP_KERNEL);
+ 	if (!jzdma)
+ 		return -ENOMEM;
+diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
+index ac8c28968422..106fa9b327d9 100644
+--- a/drivers/dma/ioat/init.c
++++ b/drivers/dma/ioat/init.c
+@@ -1210,8 +1210,15 @@ static void ioat_shutdown(struct pci_dev *pdev)
+ 
+ 		spin_lock_bh(&ioat_chan->prep_lock);
+ 		set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
+-		del_timer_sync(&ioat_chan->timer);
+ 		spin_unlock_bh(&ioat_chan->prep_lock);
++		/*
++		 * Synchronization rule for del_timer_sync():
++		 *  - The caller must not hold locks which would prevent
++		 *    completion of the timer's handler.
++		 * So prep_lock cannot be held before calling it.
++		 */
++		del_timer_sync(&ioat_chan->timer);
++
+ 		/* this should quiesce then reset */
+ 		ioat_reset_hw(ioat_chan);
+ 	}
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 273e05a3c933..5a1bafb5ecbb 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -1225,6 +1225,9 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
+ 	mutex_lock(&mgr->lock);
+ 	mstb = mgr->mst_primary;
+ 
++	if (!mstb)
++		goto out;
++
+ 	for (i = 0; i < lct - 1; i++) {
+ 		int shift = (i % 2) ? 0 : 4;
+ 		int port_num = (rad[i / 2] >> shift) & 0xf;
+diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
+index 4dccd9b003a1..0d738d7870fd 100644
+--- a/drivers/gpu/drm/i915/intel_audio.c
++++ b/drivers/gpu/drm/i915/intel_audio.c
+@@ -76,6 +76,9 @@ static const struct {
+ /* HDMI N/CTS table */
+ #define TMDS_297M 297000
+ #define TMDS_296M 296703
++#define TMDS_594M 594000
++#define TMDS_593M 593407
++
+ static const struct {
+ 	int sample_rate;
+ 	int clock;
+@@ -96,6 +99,20 @@ static const struct {
+ 	{ 176400, TMDS_297M, 18816, 247500 },
+ 	{ 192000, TMDS_296M, 23296, 281250 },
+ 	{ 192000, TMDS_297M, 20480, 247500 },
++	{ 44100, TMDS_593M, 8918, 937500 },
++	{ 44100, TMDS_594M, 9408, 990000 },
++	{ 48000, TMDS_593M, 5824, 562500 },
++	{ 48000, TMDS_594M, 6144, 594000 },
++	{ 32000, TMDS_593M, 5824, 843750 },
++	{ 32000, TMDS_594M, 3072, 445500 },
++	{ 88200, TMDS_593M, 17836, 937500 },
++	{ 88200, TMDS_594M, 18816, 990000 },
++	{ 96000, TMDS_593M, 11648, 562500 },
++	{ 96000, TMDS_594M, 12288, 594000 },
++	{ 176400, TMDS_593M, 35672, 937500 },
++	{ 176400, TMDS_594M, 37632, 990000 },
++	{ 192000, TMDS_593M, 23296, 562500 },
++	{ 192000, TMDS_594M, 24576, 594000 },
+ };
+ 
+ /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
+diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+index 083db3f5181f..8282ae0c4fc3 100644
+--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
++++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+@@ -262,6 +262,17 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
+ 	}
+ 
+ 	txn->last_pat->next_pa = 0;
++	/* ensure that the written descriptors are visible to DMM */
++	wmb();
++
++	/*
++	 * NOTE: the wmb() above should be enough, but there seems to be a bug
++	 * in OMAP's memory barrier implementation, which in some rare cases may
++	 * cause the writes not to be observable after wmb().
++	 */
++
++	/* read back to ensure the data is in RAM */
++	readl(&txn->last_pat->next_pa);
+ 
+ 	/* write to PAT_DESCR to clear out any pending transaction */
+ 	writel(0x0, dmm->base + reg[PAT_DESCR][engine->id]);
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+index f22e1e1ee64a..d1f3be78c649 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+@@ -547,6 +547,11 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
++static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
++{
++	rockchip_drm_platform_remove(pdev);
++}
++
+ static const struct of_device_id rockchip_drm_dt_ids[] = {
+ 	{ .compatible = "rockchip,display-subsystem", },
+ 	{ /* sentinel */ },
+@@ -556,6 +561,7 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
+ static struct platform_driver rockchip_drm_platform_driver = {
+ 	.probe = rockchip_drm_platform_probe,
+ 	.remove = rockchip_drm_platform_remove,
++	.shutdown = rockchip_drm_platform_shutdown,
+ 	.driver = {
+ 		.name = "rockchip-drm",
+ 		.of_match_table = rockchip_drm_dt_ids,
+diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
+index b59b15d4caa9..308d8432fea3 100644
+--- a/drivers/hid/usbhid/hiddev.c
++++ b/drivers/hid/usbhid/hiddev.c
+@@ -521,14 +521,24 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
+ 			if (cmd == HIDIOCGCOLLECTIONINDEX) {
+ 				if (uref->usage_index >= field->maxusage)
+ 					goto inval;
++				uref->usage_index =
++					array_index_nospec(uref->usage_index,
++							   field->maxusage);
+ 			} else if (uref->usage_index >= field->report_count)
+ 				goto inval;
+ 		}
+ 
+-		if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
+-		    (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
+-		     uref->usage_index + uref_multi->num_values > field->report_count))
+-			goto inval;
++		if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) {
++			if (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
++			    uref->usage_index + uref_multi->num_values >
++			    field->report_count)
++				goto inval;
++
++			uref->usage_index =
++				array_index_nospec(uref->usage_index,
++						   field->report_count -
++						   uref_multi->num_values);
++		}
+ 
+ 		switch (cmd) {
+ 		case HIDIOCGUSAGE:
+diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
+index 0a74991a60f0..1b2b79f6ea3a 100644
+--- a/drivers/hwmon/pmbus/pmbus.c
++++ b/drivers/hwmon/pmbus/pmbus.c
+@@ -117,6 +117,8 @@ static int pmbus_identify(struct i2c_client *client,
+ 		} else {
+ 			info->pages = 1;
+ 		}
++
++		pmbus_clear_faults(client);
+ 	}
+ 
+ 	if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) {
+diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
+index d013acf3f83a..c00bad02761a 100644
+--- a/drivers/hwmon/pmbus/pmbus_core.c
++++ b/drivers/hwmon/pmbus/pmbus_core.c
+@@ -1759,7 +1759,10 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
+ 	if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK))
+ 		client->flags |= I2C_CLIENT_PEC;
+ 
+-	pmbus_clear_faults(client);
++	if (data->info->pages)
++		pmbus_clear_faults(client);
++	else
++		pmbus_clear_fault_page(client, -1);
+ 
+ 	if (info->identify) {
+ 		ret = (*info->identify)(client, info);
+diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
+index 93986f0590ef..d83e5b75a37b 100644
+--- a/drivers/iio/adc/at91_adc.c
++++ b/drivers/iio/adc/at91_adc.c
+@@ -245,12 +245,14 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
+ 	struct iio_poll_func *pf = p;
+ 	struct iio_dev *idev = pf->indio_dev;
+ 	struct at91_adc_state *st = iio_priv(idev);
++	struct iio_chan_spec const *chan;
+ 	int i, j = 0;
+ 
+ 	for (i = 0; i < idev->masklength; i++) {
+ 		if (!test_bit(i, idev->active_scan_mask))
+ 			continue;
+-		st->buffer[j] = at91_adc_readl(st, AT91_ADC_CHAN(st, i));
++		chan = idev->channels + i;
++		st->buffer[j] = at91_adc_readl(st, AT91_ADC_CHAN(st, chan->channel));
+ 		j++;
+ 	}
+ 
+@@ -276,6 +278,8 @@ static void handle_adc_eoc_trigger(int irq, struct iio_dev *idev)
+ 		iio_trigger_poll(idev->trig);
+ 	} else {
+ 		st->last_value = at91_adc_readl(st, AT91_ADC_CHAN(st, st->chnb));
++		/* Needed to ACK the DRDY interruption */
++		at91_adc_readl(st, AT91_ADC_LCDR);
+ 		st->done = true;
+ 		wake_up_interruptible(&st->wq_data_avail);
+ 	}
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 4ed621ad27e4..05aa3ac1381b 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -2372,7 +2372,7 @@ static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
+ 	struct keybuf *buf = refill->buf;
+ 	int ret = MAP_CONTINUE;
+ 
+-	if (bkey_cmp(k, refill->end) >= 0) {
++	if (bkey_cmp(k, refill->end) > 0) {
+ 		ret = MAP_DONE;
+ 		goto out;
+ 	}
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 6865b186f749..9371194677dc 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1685,8 +1685,7 @@ static void free_params(struct dm_ioctl *param, size_t param_size, int param_fla
+ }
+ 
+ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel,
+-		       int ioctl_flags,
+-		       struct dm_ioctl **param, int *param_flags)
++		       int ioctl_flags, struct dm_ioctl **param, int *param_flags)
+ {
+ 	struct dm_ioctl *dmi;
+ 	int secure_data;
+@@ -1734,18 +1733,13 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
+ 		return -ENOMEM;
+ 	}
+ 
+-	if (copy_from_user(dmi, user, param_kernel->data_size))
+-		goto bad;
++	/* Copy from param_kernel (which was already copied from user) */
++	memcpy(dmi, param_kernel, minimum_data_size);
+ 
+-data_copied:
+-	/*
+-	 * Abort if something changed the ioctl data while it was being copied.
+-	 */
+-	if (dmi->data_size != param_kernel->data_size) {
+-		DMERR("rejecting ioctl: data size modified while processing parameters");
++	if (copy_from_user(&dmi->data, (char __user *)user + minimum_data_size,
++			   param_kernel->data_size - minimum_data_size))
+ 		goto bad;
+-	}
+-
++data_copied:
+ 	/* Wipe the user buffer so we do not return it to userspace */
+ 	if (secure_data && clear_user(user, param_kernel->data_size))
+ 		goto bad;
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 89dcbf2fa846..82e284d2b202 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1605,6 +1605,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
+ 	 */
+ 	if (rdev->saved_raid_disk >= 0 &&
+ 	    rdev->saved_raid_disk >= first &&
++	    rdev->saved_raid_disk < conf->raid_disks &&
+ 	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
+ 		first = last = rdev->saved_raid_disk;
+ 
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 89111d455b71..8d613652d0e2 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1737,6 +1737,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
+ 		first = last = rdev->raid_disk;
+ 
+ 	if (rdev->saved_raid_disk >= first &&
++	    rdev->saved_raid_disk < conf->geo.raid_disks &&
+ 	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
+ 		mirror = rdev->saved_raid_disk;
+ 	else
+diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
+index 3c5fb2509c47..118277d57c30 100644
+--- a/drivers/media/i2c/tvp5150.c
++++ b/drivers/media/i2c/tvp5150.c
+@@ -870,9 +870,6 @@ static int tvp5150_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
+ 
+ 	/* tvp5150 has some special limits */
+ 	rect.left = clamp(rect.left, 0, TVP5150_MAX_CROP_LEFT);
+-	rect.width = clamp_t(unsigned int, rect.width,
+-			     TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left,
+-			     TVP5150_H_MAX - rect.left);
+ 	rect.top = clamp(rect.top, 0, TVP5150_MAX_CROP_TOP);
+ 
+ 	/* Calculate height based on current standard */
+@@ -886,9 +883,16 @@ static int tvp5150_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
+ 	else
+ 		hmax = TVP5150_V_MAX_OTHERS;
+ 
+-	rect.height = clamp_t(unsigned int, rect.height,
++	/*
++	 * alignments:
++	 *  - width = 2 due to UYVY colorspace
++	 *  - height, image = no special alignment
++	 */
++	v4l_bound_align_image(&rect.width,
++			      TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left,
++			      TVP5150_H_MAX - rect.left, 1, &rect.height,
+ 			      hmax - TVP5150_MAX_CROP_TOP - rect.top,
+-			      hmax - rect.top);
++			      hmax - rect.top, 0, 0);
+ 
+ 	tvp5150_write(sd, TVP5150_VERT_BLANKING_START, rect.top);
+ 	tvp5150_write(sd, TVP5150_VERT_BLANKING_STOP,
+diff --git a/drivers/media/pci/cx23885/altera-ci.c b/drivers/media/pci/cx23885/altera-ci.c
+index aaf4e46ff3e9..a0c1ff97f905 100644
+--- a/drivers/media/pci/cx23885/altera-ci.c
++++ b/drivers/media/pci/cx23885/altera-ci.c
+@@ -660,6 +660,10 @@ static int altera_hw_filt_init(struct altera_ci_config *config, int hw_filt_nr)
+ 		}
+ 
+ 		temp_int = append_internal(inter);
++		if (!temp_int) {
++			ret = -ENOMEM;
++			goto err;
++		}
+ 		inter->filts_used = 1;
+ 		inter->dev = config->dev;
+ 		inter->fpga_rw = config->fpga_rw;
+@@ -694,6 +698,7 @@ err:
+ 		     __func__, ret);
+ 
+ 	kfree(pid_filt);
++	kfree(inter);
+ 
+ 	return ret;
+ }
+@@ -728,6 +733,10 @@ int altera_ci_init(struct altera_ci_config *config, int ci_nr)
+ 		}
+ 
+ 		temp_int = append_internal(inter);
++		if (!temp_int) {
++			ret = -ENOMEM;
++			goto err;
++		}
+ 		inter->cis_used = 1;
+ 		inter->dev = config->dev;
+ 		inter->fpga_rw = config->fpga_rw;
+@@ -796,6 +805,7 @@ err:
+ 	ci_dbg_print("%s: Cannot initialize CI: Error %d.\n", __func__, ret);
+ 
+ 	kfree(state);
++	kfree(inter);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
+index 394004607059..7c7dfaed9d15 100644
+--- a/drivers/media/usb/em28xx/em28xx-cards.c
++++ b/drivers/media/usb/em28xx/em28xx-cards.c
+@@ -2021,13 +2021,13 @@ struct em28xx_board em28xx_boards[] = {
+ 		.input           = { {
+ 			.type     = EM28XX_VMUX_COMPOSITE1,
+ 			.vmux     = TVP5150_COMPOSITE1,
+-			.amux     = EM28XX_AUDIO_SRC_LINE,
++			.amux     = EM28XX_AMUX_LINE_IN,
+ 			.gpio     = terratec_av350_unmute_gpio,
+ 
+ 		}, {
+ 			.type     = EM28XX_VMUX_SVIDEO,
+ 			.vmux     = TVP5150_SVIDEO,
+-			.amux     = EM28XX_AUDIO_SRC_LINE,
++			.amux     = EM28XX_AMUX_LINE_IN,
+ 			.gpio     = terratec_av350_unmute_gpio,
+ 		} },
+ 	},
+diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
+index 6a3cf342e087..6cfcdcea27e0 100644
+--- a/drivers/media/usb/em28xx/em28xx-video.c
++++ b/drivers/media/usb/em28xx/em28xx-video.c
+@@ -1149,6 +1149,8 @@ static void em28xx_ctrl_notify(struct v4l2_ctrl *ctrl, void *priv)
+ {
+ 	struct em28xx *dev = priv;
+ 
++	dev->v4l2->field_count = 0;
++
+ 	/*
+ 	 * In the case of non-AC97 volume controls, we still need
+ 	 * to do some setups at em28xx, in order to mute/unmute
+@@ -1288,9 +1290,9 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ 
+ 	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+ 	if (!fmt) {
+-		em28xx_videodbg("Fourcc format (%08x) invalid.\n",
+-				f->fmt.pix.pixelformat);
+-		return -EINVAL;
++		fmt = &format[0];
++		em28xx_videodbg("Fourcc format (%08x) invalid. Using default (%08x).\n",
++				f->fmt.pix.pixelformat, fmt->fourcc);
+ 	}
+ 
+ 	if (dev->board.is_em2800) {
+diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
+index cb851c14ca4b..159f35b2bd11 100644
+--- a/drivers/misc/genwqe/card_base.h
++++ b/drivers/misc/genwqe/card_base.h
+@@ -404,7 +404,7 @@ struct genwqe_file {
+ 	struct file *filp;
+ 
+ 	struct fasync_struct *async_queue;
+-	struct task_struct *owner;
++	struct pid *opener;
+ 	struct list_head list;		/* entry in list of open files */
+ 
+ 	spinlock_t map_lock;		/* lock for dma_mappings */
+diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
+index 7f1b282d7d96..c0012ca4229e 100644
+--- a/drivers/misc/genwqe/card_dev.c
++++ b/drivers/misc/genwqe/card_dev.c
+@@ -52,7 +52,7 @@ static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
+ {
+ 	unsigned long flags;
+ 
+-	cfile->owner = current;
++	cfile->opener = get_pid(task_tgid(current));
+ 	spin_lock_irqsave(&cd->file_lock, flags);
+ 	list_add(&cfile->list, &cd->file_list);
+ 	spin_unlock_irqrestore(&cd->file_lock, flags);
+@@ -65,6 +65,7 @@ static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
+ 	spin_lock_irqsave(&cd->file_lock, flags);
+ 	list_del(&cfile->list);
+ 	spin_unlock_irqrestore(&cd->file_lock, flags);
++	put_pid(cfile->opener);
+ 
+ 	return 0;
+ }
+@@ -275,7 +276,7 @@ static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig)
+ 	return files;
+ }
+ 
+-static int genwqe_force_sig(struct genwqe_dev *cd, int sig)
++static int genwqe_terminate(struct genwqe_dev *cd)
+ {
+ 	unsigned int files = 0;
+ 	unsigned long flags;
+@@ -283,7 +284,7 @@ static int genwqe_force_sig(struct genwqe_dev *cd, int sig)
+ 
+ 	spin_lock_irqsave(&cd->file_lock, flags);
+ 	list_for_each_entry(cfile, &cd->file_list, list) {
+-		force_sig(sig, cfile->owner);
++		kill_pid(cfile->opener, SIGKILL, 1);
+ 		files++;
+ 	}
+ 	spin_unlock_irqrestore(&cd->file_lock, flags);
+@@ -1356,7 +1357,7 @@ static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
+ 		dev_warn(&pci_dev->dev,
+ 			 "[%s] send SIGKILL and wait ...\n", __func__);
+ 
+-		rc = genwqe_force_sig(cd, SIGKILL); /* force terminate */
++		rc = genwqe_terminate(cd);
+ 		if (rc) {
+ 			/* Give kill_timout more seconds to end processes */
+ 			for (i = 0; (i < genwqe_kill_timeout) &&
+diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
+index d48f03104b5b..e417e4274d66 100644
+--- a/drivers/mmc/host/sdhci-pci-o2micro.c
++++ b/drivers/mmc/host/sdhci-pci-o2micro.c
+@@ -334,6 +334,9 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
+ 		pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ 		break;
+ 	case PCI_DEVICE_ID_O2_SEABIRD0:
++		if (chip->pdev->revision == 0x01)
++			chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER;
++		/* fall through */
+ 	case PCI_DEVICE_ID_O2_SEABIRD1:
+ 		/* UnLock WP */
+ 		ret = pci_read_config_byte(chip->pdev,
+diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
+index f73c41697a00..5ab9a46daf06 100644
+--- a/drivers/mtd/devices/Kconfig
++++ b/drivers/mtd/devices/Kconfig
+@@ -208,7 +208,7 @@ comment "Disk-On-Chip Device Drivers"
+ config MTD_DOCG3
+ 	tristate "M-Systems Disk-On-Chip G3"
+ 	select BCH
+-	select BCH_CONST_PARAMS
++	select BCH_CONST_PARAMS if !MTD_NAND_BCH
+ 	select BITREVERSE
+ 	---help---
+ 	  This provides an MTD device driver for the M-Systems DiskOnChip
+diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+index 0e4fdc3dd729..18672ad773fb 100644
+--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
++++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+@@ -556,8 +556,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
+ 		for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
+ 			BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
+ 				   ETH_GSTRING_LEN));
+-			memcpy(string, bnad_net_stats_strings[i],
+-			       ETH_GSTRING_LEN);
++			strncpy(string, bnad_net_stats_strings[i],
++				ETH_GSTRING_LEN);
+ 			string += ETH_GSTRING_LEN;
+ 		}
+ 		bmap = bna_tx_rid_mask(&bnad->bna);
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+index 5ae8874bbf72..d70b2e5d5222 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+@@ -1826,11 +1826,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
+ {
+ 	struct e1000_adapter *adapter = netdev_priv(netdev);
+ 	int i;
+-	char *p = NULL;
+ 	const struct e1000_stats *stat = e1000_gstrings_stats;
+ 
+ 	e1000_update_stats(adapter);
+-	for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
++	for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) {
++		char *p;
++
+ 		switch (stat->type) {
+ 		case NETDEV_STATS:
+ 			p = (char *)netdev + stat->stat_offset;
+@@ -1841,15 +1842,13 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
+ 		default:
+ 			WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n",
+ 				  stat->type, i);
+-			break;
++			continue;
+ 		}
+ 
+ 		if (stat->sizeof_stat == sizeof(u64))
+ 			data[i] = *(u64 *)p;
+ 		else
+ 			data[i] = *(u32 *)p;
+-
+-		stat++;
+ 	}
+ /* BUG_ON(i != E1000_STATS_LEN); */
+ }
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
+index 2a1d4a9d3c19..1f84f2fa459f 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -521,8 +521,6 @@ void e1000_down(struct e1000_adapter *adapter)
+ 	struct net_device *netdev = adapter->netdev;
+ 	u32 rctl, tctl;
+ 
+-	netif_carrier_off(netdev);
+-
+ 	/* disable receives in the hardware */
+ 	rctl = er32(RCTL);
+ 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
+@@ -538,6 +536,15 @@ void e1000_down(struct e1000_adapter *adapter)
+ 	E1000_WRITE_FLUSH();
+ 	msleep(10);
+ 
++	/* Set the carrier off after transmits have been disabled in the
++	 * hardware, to avoid race conditions with e1000_watchdog() (which
++	 * may be running concurrently to us, checking for the carrier
++	 * bit to decide whether it should enable transmits again). Such
++	 * a race condition would result into transmission being disabled
++	 * in the hardware until the next IFF_DOWN+IFF_UP cycle.
++	 */
++	netif_carrier_off(netdev);
++
+ 	napi_disable(&adapter->napi);
+ 
+ 	e1000_irq_disable(adapter);
+diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
+index b09a6b80d107..355c5fb802cd 100644
+--- a/drivers/net/ethernet/qlogic/qla3xxx.c
++++ b/drivers/net/ethernet/qlogic/qla3xxx.c
+@@ -380,8 +380,6 @@ static void fm93c56a_select(struct ql3_adapter *qdev)
+ 
+ 	qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
+ 	ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
+-	ql_write_nvram_reg(qdev, spir,
+-			   ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
+ }
+ 
+ /*
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 50bfded6d7ef..5ac0b850d6b1 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1475,6 +1475,8 @@ static void tun_setup(struct net_device *dev)
+  */
+ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
+ {
++	if (!data)
++		return 0;
+ 	return -EINVAL;
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
+index 5bb1be478954..f201e50447d8 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi.c
++++ b/drivers/net/wireless/ath/ath10k/wmi.c
+@@ -1749,6 +1749,12 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
+ 	if (ret)
+ 		dev_kfree_skb_any(skb);
+ 
++	if (ret == -EAGAIN) {
++		ath10k_warn(ar, "wmi command %d timeout, restarting hardware\n",
++			    cmd_id);
++		queue_work(ar->workqueue, &ar->restart_work);
++	}
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
+index a71187c783b7..273c7ecf4879 100644
+--- a/drivers/nvdimm/bus.c
++++ b/drivers/nvdimm/bus.c
+@@ -158,6 +158,8 @@ static void nd_async_device_register(void *d, async_cookie_t cookie)
+ 		put_device(dev);
+ 	}
+ 	put_device(dev);
++	if (dev->parent)
++		put_device(dev->parent);
+ }
+ 
+ static void nd_async_device_unregister(void *d, async_cookie_t cookie)
+@@ -175,6 +177,8 @@ static void nd_async_device_unregister(void *d, async_cookie_t cookie)
+ void __nd_device_register(struct device *dev)
+ {
+ 	dev->bus = &nvdimm_bus_type;
++	if (dev->parent)
++		get_device(dev->parent);
+ 	get_device(dev);
+ 	async_schedule_domain(nd_async_device_register, dev,
+ 			&nd_async_domain);
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 5697b32819cb..84d501f5ff4e 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -3061,7 +3061,11 @@ static void disable_igfx_irq(struct pci_dev *dev)
+ 
+ 	pci_iounmap(dev, regs);
+ }
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0042, disable_igfx_irq);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0046, disable_igfx_irq);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x004a, disable_igfx_irq);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0106, disable_igfx_irq);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
+ 
+diff --git a/drivers/pcmcia/ricoh.h b/drivers/pcmcia/ricoh.h
+index 01098c841f87..8ac7b138c094 100644
+--- a/drivers/pcmcia/ricoh.h
++++ b/drivers/pcmcia/ricoh.h
+@@ -119,6 +119,10 @@
+ #define  RL5C4XX_MISC_CONTROL           0x2F /* 8 bit */
+ #define  RL5C4XX_ZV_ENABLE              0x08
+ 
++/* Misc Control 3 Register */
++#define RL5C4XX_MISC3			0x00A2 /* 16 bit */
++#define  RL5C47X_MISC3_CB_CLKRUN_DIS	BIT(1)
++
+ #ifdef __YENTA_H
+ 
+ #define rl_misc(socket)		((socket)->private[0])
+@@ -156,6 +160,35 @@ static void ricoh_set_zv(struct yenta_socket *socket)
+         }
+ }
+ 
++static void ricoh_set_clkrun(struct yenta_socket *socket, bool quiet)
++{
++	u16 misc3;
++
++	/*
++	 * RL5C475II likely has this setting, too, however no datasheet
++	 * is publicly available for this chip
++	 */
++	if (socket->dev->device != PCI_DEVICE_ID_RICOH_RL5C476 &&
++	    socket->dev->device != PCI_DEVICE_ID_RICOH_RL5C478)
++		return;
++
++	if (socket->dev->revision < 0x80)
++		return;
++
++	misc3 = config_readw(socket, RL5C4XX_MISC3);
++	if (misc3 & RL5C47X_MISC3_CB_CLKRUN_DIS) {
++		if (!quiet)
++			dev_dbg(&socket->dev->dev,
++				"CLKRUN feature already disabled\n");
++	} else if (disable_clkrun) {
++		if (!quiet)
++			dev_info(&socket->dev->dev,
++				 "Disabling CLKRUN feature\n");
++		misc3 |= RL5C47X_MISC3_CB_CLKRUN_DIS;
++		config_writew(socket, RL5C4XX_MISC3, misc3);
++	}
++}
++
+ static void ricoh_save_state(struct yenta_socket *socket)
+ {
+ 	rl_misc(socket) = config_readw(socket, RL5C4XX_MISC);
+@@ -172,6 +205,7 @@ static void ricoh_restore_state(struct yenta_socket *socket)
+ 	config_writew(socket, RL5C4XX_16BIT_IO_0, rl_io(socket));
+ 	config_writew(socket, RL5C4XX_16BIT_MEM_0, rl_mem(socket));
+ 	config_writew(socket, RL5C4XX_CONFIG, rl_config(socket));
++	ricoh_set_clkrun(socket, true);
+ }
+ 
+ 
+@@ -197,6 +231,7 @@ static int ricoh_override(struct yenta_socket *socket)
+ 	config_writew(socket, RL5C4XX_CONFIG, config);
+ 
+ 	ricoh_set_zv(socket);
++	ricoh_set_clkrun(socket, false);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
+index 5d6d9b1549bc..5034422a1d96 100644
+--- a/drivers/pcmcia/yenta_socket.c
++++ b/drivers/pcmcia/yenta_socket.c
+@@ -26,7 +26,8 @@
+ 
+ static bool disable_clkrun;
+ module_param(disable_clkrun, bool, 0444);
+-MODULE_PARM_DESC(disable_clkrun, "If PC card doesn't function properly, please try this option");
++MODULE_PARM_DESC(disable_clkrun,
++		 "If PC card doesn't function properly, please try this option (TI and Ricoh bridges only)");
+ 
+ static bool isa_probe = 1;
+ module_param(isa_probe, bool, 0444);
+diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+index 9ce0e30e33e8..5cd9a81a6060 100644
+--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
++++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+@@ -321,6 +321,8 @@ static int pmic_mpp_set_mux(struct pinctrl_dev *pctldev, unsigned function,
+ 	pad->function = function;
+ 
+ 	ret = pmic_mpp_write_mode_ctl(state, pad);
++	if (ret < 0)
++		return ret;
+ 
+ 	val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT;
+ 
+@@ -345,13 +347,12 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev,
+ 
+ 	switch (param) {
+ 	case PIN_CONFIG_BIAS_DISABLE:
+-		arg = pad->pullup == PMIC_MPP_PULL_UP_OPEN;
++		if (pad->pullup != PMIC_MPP_PULL_UP_OPEN)
++			return -EINVAL;
++		arg = 1;
+ 		break;
+ 	case PIN_CONFIG_BIAS_PULL_UP:
+ 		switch (pad->pullup) {
+-		case PMIC_MPP_PULL_UP_OPEN:
+-			arg = 0;
+-			break;
+ 		case PMIC_MPP_PULL_UP_0P6KOHM:
+ 			arg = 600;
+ 			break;
+@@ -366,13 +367,17 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev,
+ 		}
+ 		break;
+ 	case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+-		arg = !pad->is_enabled;
++		if (pad->is_enabled)
++			return -EINVAL;
++		arg = 1;
+ 		break;
+ 	case PIN_CONFIG_POWER_SOURCE:
+ 		arg = pad->power_source;
+ 		break;
+ 	case PIN_CONFIG_INPUT_ENABLE:
+-		arg = pad->input_enabled;
++		if (!pad->input_enabled)
++			return -EINVAL;
++		arg = 1;
+ 		break;
+ 	case PIN_CONFIG_OUTPUT:
+ 		arg = pad->out_value;
+@@ -384,7 +389,9 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev,
+ 		arg = pad->amux_input;
+ 		break;
+ 	case PMIC_MPP_CONF_PAIRED:
+-		arg = pad->paired;
++		if (!pad->paired)
++			return -EINVAL;
++		arg = 1;
+ 		break;
+ 	case PIN_CONFIG_DRIVE_STRENGTH:
+ 		arg = pad->drive_strength;
+@@ -457,7 +464,7 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ 			pad->dtest = arg;
+ 			break;
+ 		case PIN_CONFIG_DRIVE_STRENGTH:
+-			arg = pad->drive_strength;
++			pad->drive_strength = arg;
+ 			break;
+ 		case PMIC_MPP_CONF_AMUX_ROUTE:
+ 			if (arg >= PMIC_MPP_AMUX_ROUTE_ABUS4)
+@@ -501,6 +508,10 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ 	if (ret < 0)
+ 		return ret;
+ 
++	ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_SINK_CTL, pad->drive_strength);
++	if (ret < 0)
++		return ret;
++
+ 	val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT;
+ 
+ 	return pmic_mpp_write(state, pad, PMIC_MPP_REG_EN_CTL, val);
+diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+index 19a3c3bc2f1f..b1e8a2d905ff 100644
+--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
++++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+@@ -259,22 +259,32 @@ static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev,
+ 
+ 	switch (param) {
+ 	case PIN_CONFIG_BIAS_DISABLE:
+-		arg = pin->bias == PM8XXX_GPIO_BIAS_NP;
++		if (pin->bias != PM8XXX_GPIO_BIAS_NP)
++			return -EINVAL;
++		arg = 1;
+ 		break;
+ 	case PIN_CONFIG_BIAS_PULL_DOWN:
+-		arg = pin->bias == PM8XXX_GPIO_BIAS_PD;
++		if (pin->bias != PM8XXX_GPIO_BIAS_PD)
++			return -EINVAL;
++		arg = 1;
+ 		break;
+ 	case PIN_CONFIG_BIAS_PULL_UP:
+-		arg = pin->bias <= PM8XXX_GPIO_BIAS_PU_1P5_30;
++		if (pin->bias > PM8XXX_GPIO_BIAS_PU_1P5_30)
++			return -EINVAL;
++		arg = 1;
+ 		break;
+ 	case PM8XXX_QCOM_PULL_UP_STRENGTH:
+ 		arg = pin->pull_up_strength;
+ 		break;
+ 	case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+-		arg = pin->disable;
++		if (!pin->disable)
++			return -EINVAL;
++		arg = 1;
+ 		break;
+ 	case PIN_CONFIG_INPUT_ENABLE:
+-		arg = pin->mode == PM8XXX_GPIO_MODE_INPUT;
++		if (pin->mode != PM8XXX_GPIO_MODE_INPUT)
++			return -EINVAL;
++		arg = 1;
+ 		break;
+ 	case PIN_CONFIG_OUTPUT:
+ 		if (pin->mode & PM8XXX_GPIO_MODE_OUTPUT)
+@@ -289,10 +299,14 @@ static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev,
+ 		arg = pin->output_strength;
+ 		break;
+ 	case PIN_CONFIG_DRIVE_PUSH_PULL:
+-		arg = !pin->open_drain;
++		if (pin->open_drain)
++			return -EINVAL;
++		arg = 1;
+ 		break;
+ 	case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+-		arg = pin->open_drain;
++		if (!pin->open_drain)
++			return -EINVAL;
++		arg = 1;
+ 		break;
+ 	default:
+ 		return -EINVAL;
+diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
+index e79f2a181ad2..b9ec4a16db1f 100644
+--- a/drivers/rtc/hctosys.c
++++ b/drivers/rtc/hctosys.c
+@@ -50,8 +50,10 @@ static int __init rtc_hctosys(void)
+ 	tv64.tv_sec = rtc_tm_to_time64(&tm);
+ 
+ #if BITS_PER_LONG == 32
+-	if (tv64.tv_sec > INT_MAX)
++	if (tv64.tv_sec > INT_MAX) {
++		err = -ERANGE;
+ 		goto err_read;
++	}
+ #endif
+ 
+ 	err = do_settimeofday64(&tv64);
+diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
+index 71cb05b1c3eb..60be0742e2c8 100644
+--- a/drivers/scsi/esp_scsi.c
++++ b/drivers/scsi/esp_scsi.c
+@@ -1349,6 +1349,7 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
+ 
+ 	bytes_sent = esp->data_dma_len;
+ 	bytes_sent -= ecount;
++	bytes_sent -= esp->send_cmd_residual;
+ 
+ 	/*
+ 	 * The am53c974 has a DMA 'pecularity'. The doc states:
+diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
+index 84dcbe4a6268..55be43fe7667 100644
+--- a/drivers/scsi/esp_scsi.h
++++ b/drivers/scsi/esp_scsi.h
+@@ -540,6 +540,8 @@ struct esp {
+ 
+ 	void			*dma;
+ 	int			dmarev;
++
++	u32			send_cmd_residual;
+ };
+ 
+ /* A front-end driver for the ESP chip should do the following in
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 3406586b9201..ad4f16ab7f7a 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -3485,6 +3485,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
+ 	struct hbq_dmabuf *dmabuf;
+ 	struct lpfc_cq_event *cq_event;
+ 	unsigned long iflag;
++	int count = 0;
+ 
+ 	spin_lock_irqsave(&phba->hbalock, iflag);
+ 	phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
+@@ -3506,16 +3507,22 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
+ 			if (irspiocbq)
+ 				lpfc_sli_sp_handle_rspiocb(phba, pring,
+ 							   irspiocbq);
++			count++;
+ 			break;
+ 		case CQE_CODE_RECEIVE:
+ 		case CQE_CODE_RECEIVE_V1:
+ 			dmabuf = container_of(cq_event, struct hbq_dmabuf,
+ 					      cq_event);
+ 			lpfc_sli4_handle_received_buffer(phba, dmabuf);
++			count++;
+ 			break;
+ 		default:
+ 			break;
+ 		}
++
++		/* Limit the number of events to 64 to avoid soft lockups */
++		if (count == 64)
++			break;
+ 	}
+ }
+ 
+diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
+index 26c67c42985c..1002124bd8bf 100644
+--- a/drivers/scsi/mac_esp.c
++++ b/drivers/scsi/mac_esp.c
+@@ -426,6 +426,8 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
+ 			scsi_esp_cmd(esp, ESP_CMD_TI);
+ 		}
+ 	}
++
++	esp->send_cmd_residual = esp_count;
+ }
+ 
+ static int mac_esp_irq_pending(struct esp *esp)
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 6835bae33ec4..ac7acd257c99 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -6510,6 +6510,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
+ 		get_user(user_sense_off, &cioc->sense_off))
+ 		return -EFAULT;
+ 
++	if (local_sense_off != user_sense_off)
++		return -EINVAL;
++
+ 	if (local_sense_len) {
+ 		void __user **sense_ioc_ptr =
+ 			(void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
+diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
+index cb11e04be568..87059a6786f4 100644
+--- a/drivers/scsi/qla2xxx/qla_mbx.c
++++ b/drivers/scsi/qla2xxx/qla_mbx.c
+@@ -3315,10 +3315,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
+ 	mcp->mb[0] = MBC_PORT_PARAMS;
+ 	mcp->mb[1] = loop_id;
+ 	mcp->mb[2] = BIT_0;
+-	if (IS_CNA_CAPABLE(vha->hw))
+-		mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
+-	else
+-		mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
++	mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
+ 	mcp->mb[9] = vha->vp_idx;
+ 	mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
+ 	mcp->in_mb = MBX_3|MBX_1|MBX_0;
+diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
+index bc34cf7482fb..a4753644f4cf 100644
+--- a/drivers/soc/tegra/pmc.c
++++ b/drivers/soc/tegra/pmc.c
+@@ -738,7 +738,7 @@ void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
+ 	if (!pmc->soc->has_tsense_reset)
+ 		return;
+ 
+-	np = of_find_node_by_name(pmc->dev->of_node, "i2c-thermtrip");
++	np = of_get_child_by_name(pmc->dev->of_node, "i2c-thermtrip");
+ 	if (!np) {
+ 		dev_warn(dev, "i2c-thermtrip node not found, %s.\n", disabled);
+ 		return;
+diff --git a/drivers/tc/tc.c b/drivers/tc/tc.c
+index 3be9519654e5..cf3fad2cb871 100644
+--- a/drivers/tc/tc.c
++++ b/drivers/tc/tc.c
+@@ -2,7 +2,7 @@
+  *	TURBOchannel bus services.
+  *
+  *	Copyright (c) Harald Koerfgen, 1998
+- *	Copyright (c) 2001, 2003, 2005, 2006  Maciej W. Rozycki
++ *	Copyright (c) 2001, 2003, 2005, 2006, 2018  Maciej W. Rozycki
+  *	Copyright (c) 2005  James Simmons
+  *
+  *	This file is subject to the terms and conditions of the GNU
+@@ -10,6 +10,7 @@
+  *	directory of this archive for more details.
+  */
+ #include <linux/compiler.h>
++#include <linux/dma-mapping.h>
+ #include <linux/errno.h>
+ #include <linux/init.h>
+ #include <linux/ioport.h>
+@@ -92,6 +93,11 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus)
+ 		tdev->dev.bus = &tc_bus_type;
+ 		tdev->slot = slot;
+ 
++		/* TURBOchannel has 34-bit DMA addressing (16GiB space). */
++		tdev->dma_mask = DMA_BIT_MASK(34);
++		tdev->dev.dma_mask = &tdev->dma_mask;
++		tdev->dev.coherent_dma_mask = DMA_BIT_MASK(34);
++
+ 		for (i = 0; i < 8; i++) {
+ 			tdev->firmware[i] =
+ 				readb(module + offset + TC_FIRM_VER + 4 * i);
+diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
+index a260cde743e2..2db68dfe497d 100644
+--- a/drivers/tty/serial/kgdboc.c
++++ b/drivers/tty/serial/kgdboc.c
+@@ -133,6 +133,11 @@ static void kgdboc_unregister_kbd(void)
+ 
+ static int kgdboc_option_setup(char *opt)
+ {
++	if (!opt) {
++		pr_err("kgdboc: config string not provided\n");
++		return -EINVAL;
++	}
++
+ 	if (strlen(opt) >= MAX_CONFIG_LEN) {
+ 		printk(KERN_ERR "kgdboc: config string too long\n");
+ 		return -ENOSPC;
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index 7d5ee8a13ac6..17a22073d226 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -648,7 +648,7 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
+ 		uart_write_wakeup(port);
+ }
+ 
+-static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
++static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
+ {
+ 	struct uart_port *port = &s->p[portno].port;
+ 
+@@ -657,7 +657,7 @@ static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
+ 
+ 		iir = sc16is7xx_port_read(port, SC16IS7XX_IIR_REG);
+ 		if (iir & SC16IS7XX_IIR_NO_INT_BIT)
+-			break;
++			return false;
+ 
+ 		iir &= SC16IS7XX_IIR_ID_MASK;
+ 
+@@ -685,16 +685,23 @@ static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
+ 					    port->line, iir);
+ 			break;
+ 		}
+-	} while (1);
++	} while (0);
++	return true;
+ }
+ 
+ static void sc16is7xx_ist(struct kthread_work *ws)
+ {
+ 	struct sc16is7xx_port *s = to_sc16is7xx_port(ws, irq_work);
+-	int i;
+ 
+-	for (i = 0; i < s->devtype->nr_uart; ++i)
+-		sc16is7xx_port_irq(s, i);
++	while (1) {
++		bool keep_polling = false;
++		int i;
++
++		for (i = 0; i < s->devtype->nr_uart; ++i)
++			keep_polling |= sc16is7xx_port_irq(s, i);
++		if (!keep_polling)
++			break;
++	}
+ }
+ 
+ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 198451fa9e5d..c1cff2b455ae 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -357,7 +357,7 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
+ 	mutex_lock(&tty_mutex);
+ 	/* Search through the tty devices to look for a match */
+ 	list_for_each_entry(p, &tty_drivers, tty_drivers) {
+-		if (strncmp(name, p->name, len) != 0)
++		if (!len || strncmp(name, p->name, len) != 0)
+ 			continue;
+ 		stp = str;
+ 		if (*stp == ',')
+diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
+index 1445dd39aa62..bece7e39f512 100644
+--- a/drivers/tty/tty_ioctl.c
++++ b/drivers/tty/tty_ioctl.c
+@@ -330,7 +330,7 @@ speed_t tty_termios_baud_rate(struct ktermios *termios)
+ 		else
+ 			cbaud += 15;
+ 	}
+-	return baud_table[cbaud];
++	return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
+ }
+ EXPORT_SYMBOL(tty_termios_baud_rate);
+ 
+@@ -366,7 +366,7 @@ speed_t tty_termios_input_baud_rate(struct ktermios *termios)
+ 		else
+ 			cbaud += 15;
+ 	}
+-	return baud_table[cbaud];
++	return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
+ #else
+ 	return tty_termios_baud_rate(termios);
+ #endif
+diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
+index b9823eb9c195..0ab15d833d1b 100644
+--- a/drivers/uio/uio.c
++++ b/drivers/uio/uio.c
+@@ -249,6 +249,8 @@ static struct class uio_class = {
+ 	.dev_groups = uio_groups,
+ };
+ 
++bool uio_class_registered;
++
+ /*
+  * device functions
+  */
+@@ -772,6 +774,9 @@ static int init_uio_class(void)
+ 		printk(KERN_ERR "class_register failed for uio\n");
+ 		goto err_class_register;
+ 	}
++
++	uio_class_registered = true;
++
+ 	return 0;
+ 
+ err_class_register:
+@@ -782,6 +787,7 @@ exit:
+ 
+ static void release_uio_class(void)
+ {
++	uio_class_registered = false;
+ 	class_unregister(&uio_class);
+ 	uio_major_cleanup();
+ }
+@@ -801,6 +807,9 @@ int __uio_register_device(struct module *owner,
+ 	struct uio_device *idev;
+ 	int ret = 0;
+ 
++	if (!uio_class_registered)
++		return -EPROBE_DEFER;
++
+ 	if (!parent || !info || !info->name || !info->version)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/usb/chipidea/otg.h b/drivers/usb/chipidea/otg.h
+index 9ecb598e48f0..a5557c70034a 100644
+--- a/drivers/usb/chipidea/otg.h
++++ b/drivers/usb/chipidea/otg.h
+@@ -20,7 +20,8 @@ void ci_handle_vbus_change(struct ci_hdrc *ci);
+ static inline void ci_otg_queue_work(struct ci_hdrc *ci)
+ {
+ 	disable_irq_nosync(ci->irq);
+-	queue_work(ci->wq, &ci->work);
++	if (queue_work(ci->wq, &ci->work) == false)
++		enable_irq(ci->irq);
+ }
+ 
+ #endif /* __DRIVERS_USB_CHIPIDEA_OTG_H */
+diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
+index da6cc25baaef..8fc62a03637a 100644
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -1009,7 +1009,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
+ 				prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
+ 			}
+ 			/*
+-			 * Set prot_iter to data_iter, and advance past any
++			 * Set prot_iter to data_iter and truncate it to
++			 * prot_bytes, and advance data_iter past any
+ 			 * preceeding prot_bytes that may be present.
+ 			 *
+ 			 * Also fix up the exp_data_len to reflect only the
+@@ -1018,6 +1019,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
+ 			if (prot_bytes) {
+ 				exp_data_len -= prot_bytes;
+ 				prot_iter = data_iter;
++				iov_iter_truncate(&prot_iter, prot_bytes);
+ 				iov_iter_advance(&data_iter, prot_bytes);
+ 			}
+ 			tag = vhost64_to_cpu(vq, v_req_pi.tag);
+diff --git a/drivers/video/fbdev/aty/mach64_accel.c b/drivers/video/fbdev/aty/mach64_accel.c
+index 182bd680141f..e9dfe0e40b8b 100644
+--- a/drivers/video/fbdev/aty/mach64_accel.c
++++ b/drivers/video/fbdev/aty/mach64_accel.c
+@@ -126,7 +126,7 @@ void aty_init_engine(struct atyfb_par *par, struct fb_info *info)
+ 
+ 	/* set host attributes */
+ 	wait_for_fifo(13, par);
+-	aty_st_le32(HOST_CNTL, 0, par);
++	aty_st_le32(HOST_CNTL, HOST_BYTE_ALIGN, par);
+ 
+ 	/* set pattern attributes */
+ 	aty_st_le32(PAT_REG0, 0, par);
+@@ -232,7 +232,8 @@ void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+ 		rotation = rotation24bpp(dx, direction);
+ 	}
+ 
+-	wait_for_fifo(4, par);
++	wait_for_fifo(5, par);
++	aty_st_le32(DP_PIX_WIDTH, par->crtc.dp_pix_width, par);
+ 	aty_st_le32(DP_SRC, FRGD_SRC_BLIT, par);
+ 	aty_st_le32(SRC_Y_X, (sx << 16) | sy, par);
+ 	aty_st_le32(SRC_HEIGHT1_WIDTH1, (width << 16) | area->height, par);
+@@ -268,7 +269,8 @@ void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+ 		rotation = rotation24bpp(dx, DST_X_LEFT_TO_RIGHT);
+ 	}
+ 
+-	wait_for_fifo(3, par);
++	wait_for_fifo(4, par);
++	aty_st_le32(DP_PIX_WIDTH, par->crtc.dp_pix_width, par);
+ 	aty_st_le32(DP_FRGD_CLR, color, par);
+ 	aty_st_le32(DP_SRC,
+ 		    BKGD_SRC_BKGD_CLR | FRGD_SRC_FRGD_CLR | MONO_SRC_ONE,
+@@ -283,7 +285,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
+ {
+ 	struct atyfb_par *par = (struct atyfb_par *) info->par;
+ 	u32 src_bytes, dx = image->dx, dy = image->dy, width = image->width;
+-	u32 pix_width_save, pix_width, host_cntl, rotation = 0, src, mix;
++	u32 pix_width, rotation = 0, src, mix;
+ 
+ 	if (par->asleep)
+ 		return;
+@@ -295,8 +297,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
+ 		return;
+ 	}
+ 
+-	pix_width = pix_width_save = aty_ld_le32(DP_PIX_WIDTH, par);
+-	host_cntl = aty_ld_le32(HOST_CNTL, par) | HOST_BYTE_ALIGN;
++	pix_width = par->crtc.dp_pix_width;
+ 
+ 	switch (image->depth) {
+ 	case 1:
+@@ -344,7 +345,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
+ 		 * since Rage 3D IIc we have DP_HOST_TRIPLE_EN bit
+ 		 * this hwaccelerated triple has an issue with not aligned data
+ 		 */
+-		if (M64_HAS(HW_TRIPLE) && image->width % 8 == 0)
++		if (image->depth == 1 && M64_HAS(HW_TRIPLE) && image->width % 8 == 0)
+ 			pix_width |= DP_HOST_TRIPLE_EN;
+ 	}
+ 
+@@ -369,19 +370,18 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
+ 		mix = FRGD_MIX_D_XOR_S | BKGD_MIX_D;
+ 	}
+ 
+-	wait_for_fifo(6, par);
+-	aty_st_le32(DP_WRITE_MASK, 0xFFFFFFFF, par);
++	wait_for_fifo(5, par);
+ 	aty_st_le32(DP_PIX_WIDTH, pix_width, par);
+ 	aty_st_le32(DP_MIX, mix, par);
+ 	aty_st_le32(DP_SRC, src, par);
+-	aty_st_le32(HOST_CNTL, host_cntl, par);
++	aty_st_le32(HOST_CNTL, HOST_BYTE_ALIGN, par);
+ 	aty_st_le32(DST_CNTL, DST_Y_TOP_TO_BOTTOM | DST_X_LEFT_TO_RIGHT | rotation, par);
+ 
+ 	draw_rect(dx, dy, width, image->height, par);
+ 	src_bytes = (((image->width * image->depth) + 7) / 8) * image->height;
+ 
+ 	/* manual triple each pixel */
+-	if (info->var.bits_per_pixel == 24 && !(pix_width & DP_HOST_TRIPLE_EN)) {
++	if (image->depth == 1 && info->var.bits_per_pixel == 24 && !(pix_width & DP_HOST_TRIPLE_EN)) {
+ 		int inbit, outbit, mult24, byte_id_in_dword, width;
+ 		u8 *pbitmapin = (u8*)image->data, *pbitmapout;
+ 		u32 hostdword;
+@@ -414,7 +414,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
+ 				}
+ 			}
+ 			wait_for_fifo(1, par);
+-			aty_st_le32(HOST_DATA0, hostdword, par);
++			aty_st_le32(HOST_DATA0, le32_to_cpu(hostdword), par);
+ 		}
+ 	} else {
+ 		u32 *pbitmap, dwords = (src_bytes + 3) / 4;
+@@ -423,8 +423,4 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
+ 			aty_st_le32(HOST_DATA0, get_unaligned_le32(pbitmap), par);
+ 		}
+ 	}
+-
+-	/* restore pix_width */
+-	wait_for_fifo(1, par);
+-	aty_st_le32(DP_PIX_WIDTH, pix_width_save, par);
+ }
+diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
+index 0c427d6a12d1..4c5c6550809d 100644
+--- a/drivers/w1/masters/omap_hdq.c
++++ b/drivers/w1/masters/omap_hdq.c
+@@ -785,6 +785,8 @@ static int omap_hdq_remove(struct platform_device *pdev)
+ 	/* remove module dependency */
+ 	pm_runtime_disable(&pdev->dev);
+ 
++	w1_remove_master_device(&omap_w1_master);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index 1889e928a0da..a8a388382347 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -310,6 +310,9 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
+ 	*/
+ 	flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
+ 
++	/* Convert the size to actually allocated. */
++	size = 1UL << (order + XEN_PAGE_SHIFT);
++
+ 	/* On ARM this function returns an ioremap'ped virtual address for
+ 	 * which virt_to_phys doesn't return the corresponding physical
+ 	 * address. In fact on ARM virt_to_phys only works for kernel direct
+@@ -359,6 +362,9 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
+ 	 * physical address */
+ 	phys = xen_bus_to_phys(dev_addr);
+ 
++	/* Convert the size to actually allocated. */
++	size = 1UL << (order + XEN_PAGE_SHIFT);
++
+ 	if (((dev_addr + size - 1 <= dma_mask)) ||
+ 	    range_straddles_page_boundary(phys, size))
+ 		xen_destroy_contiguous_region(phys, order);
+diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
+index 12ceaf52dae6..e7b3d2c4472d 100644
+--- a/fs/9p/vfs_file.c
++++ b/fs/9p/vfs_file.c
+@@ -204,6 +204,14 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
+ 			break;
+ 		if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT) != 0)
+ 			break;
++		/*
++		 * p9_client_lock_dotl overwrites flock.client_id with the
++		 * server message, free and reuse the client name
++		 */
++		if (flock.client_id != fid->clnt->name) {
++			kfree(flock.client_id);
++			flock.client_id = fid->clnt->name;
++		}
+ 	}
+ 
+ 	/* map 9p status to VFS status */
+@@ -235,6 +243,8 @@ out_unlock:
+ 		locks_lock_file_wait(filp, fl);
+ 		fl->fl_type = fl_type;
+ 	}
++	if (flock.client_id != fid->clnt->name)
++		kfree(flock.client_id);
+ out:
+ 	return res;
+ }
+@@ -269,7 +279,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
+ 
+ 	res = p9_client_getlock_dotl(fid, &glock);
+ 	if (res < 0)
+-		return res;
++		goto out;
+ 	/* map 9p lock type to os lock type */
+ 	switch (glock.type) {
+ 	case P9_LOCK_TYPE_RDLCK:
+@@ -290,7 +300,9 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
+ 			fl->fl_end = glock.start + glock.length - 1;
+ 		fl->fl_pid = glock.proc_id;
+ 	}
+-	kfree(glock.client_id);
++out:
++	if (glock.client_id != fid->clnt->name)
++		kfree(glock.client_id);
+ 	return res;
+ }
+ 
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 62bc72001fce..f010d6c8dd14 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -604,28 +604,30 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+ 			 * Do the same thing for the memory mapping - between
+ 			 * elf_bss and last_bss is the bss section.
+ 			 */
+-			k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
++			k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
+ 			if (k > last_bss)
+ 				last_bss = k;
+ 		}
+ 	}
+ 
++	/*
++	 * Now fill out the bss section: first pad the last page from
++	 * the file up to the page boundary, and zero it from elf_bss
++	 * up to the end of the page.
++	 */
++	if (padzero(elf_bss)) {
++		error = -EFAULT;
++		goto out;
++	}
++	/*
++	 * Next, align both the file and mem bss up to the page size,
++	 * since this is where elf_bss was just zeroed up to, and where
++	 * last_bss will end after the vm_brk() below.
++	 */
++	elf_bss = ELF_PAGEALIGN(elf_bss);
++	last_bss = ELF_PAGEALIGN(last_bss);
++	/* Finally, if there is still more bss to allocate, do it. */
+ 	if (last_bss > elf_bss) {
+-		/*
+-		 * Now fill out the bss section.  First pad the last page up
+-		 * to the page boundary, and then perform a mmap to make sure
+-		 * that there are zero-mapped pages up to and including the
+-		 * last bss page.
+-		 */
+-		if (padzero(elf_bss)) {
+-			error = -EFAULT;
+-			goto out;
+-		}
+-
+-		/* What we have mapped so far */
+-		elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
+-
+-		/* Map the last of the bss segment */
+ 		error = vm_brk(elf_bss, last_bss - elf_bss);
+ 		if (BAD_ADDR(error))
+ 			goto out;
+@@ -1212,11 +1214,13 @@ static int load_elf_library(struct file *file)
+ 		goto out_free_ph;
+ 	}
+ 
+-	len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
+-			    ELF_MIN_ALIGN - 1);
+-	bss = eppnt->p_memsz + eppnt->p_vaddr;
+-	if (bss > len)
+-		vm_brk(len, bss - len);
++	len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr);
++	bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr);
++	if (bss > len) {
++		error = vm_brk(len, bss - len);
++		if (BAD_ADDR(error))
++			goto out_free_ph;
++	}
+ 	error = 0;
+ 
+ out_free_ph:
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index a72f941ca750..80cd28456f08 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -7835,6 +7835,20 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ 	buf = btrfs_find_create_tree_block(root, bytenr);
+ 	if (!buf)
+ 		return ERR_PTR(-ENOMEM);
++
++	/*
++	 * Extra safety check in case the extent tree is corrupted and extent
++	 * allocator chooses to use a tree block which is already used and
++	 * locked.
++	 */
++	if (buf->lock_owner == current->pid) {
++		btrfs_err_rl(root->fs_info,
++"tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected",
++			buf->start, btrfs_header_owner(buf), current->pid);
++		free_extent_buffer(buf);
++		return ERR_PTR(-EUCLEAN);
++	}
++
+ 	btrfs_set_header_generation(buf, trans->transid);
+ 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
+ 	btrfs_tree_lock(buf);
+@@ -8704,15 +8718,14 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
+ 	if (eb == root->node) {
+ 		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
+ 			parent = eb->start;
+-		else
+-			BUG_ON(root->root_key.objectid !=
+-			       btrfs_header_owner(eb));
++		else if (root->root_key.objectid != btrfs_header_owner(eb))
++			goto owner_mismatch;
+ 	} else {
+ 		if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
+ 			parent = path->nodes[level + 1]->start;
+-		else
+-			BUG_ON(root->root_key.objectid !=
+-			       btrfs_header_owner(path->nodes[level + 1]));
++		else if (root->root_key.objectid !=
++			 btrfs_header_owner(path->nodes[level + 1]))
++			goto owner_mismatch;
+ 	}
+ 
+ 	btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
+@@ -8720,6 +8733,11 @@ out:
+ 	wc->refs[level] = 0;
+ 	wc->flags[level] = 0;
+ 	return 0;
++
++owner_mismatch:
++	btrfs_err_rl(root->fs_info, "unexpected tree owner, have %llu expect %llu",
++		     btrfs_header_owner(eb), root->root_key.objectid);
++	return -EUCLEAN;
+ }
+ 
+ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
+@@ -8773,6 +8791,8 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
+ 			ret = walk_up_proc(trans, root, path, wc);
+ 			if (ret > 0)
+ 				return 0;
++			if (ret < 0)
++				return ret;
+ 
+ 			if (path->locks[level]) {
+ 				btrfs_tree_unlock_rw(path->nodes[level],
+@@ -9501,6 +9521,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
+ 
+ 		block_group = btrfs_lookup_first_block_group(info, last);
+ 		while (block_group) {
++			wait_block_group_cache_done(block_group);
+ 			spin_lock(&block_group->lock);
+ 			if (block_group->iref)
+ 				break;
+@@ -9891,7 +9912,7 @@ error:
+ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
+ 				       struct btrfs_root *root)
+ {
+-	struct btrfs_block_group_cache *block_group, *tmp;
++	struct btrfs_block_group_cache *block_group;
+ 	struct btrfs_root *extent_root = root->fs_info->extent_root;
+ 	struct btrfs_block_group_item item;
+ 	struct btrfs_key key;
+@@ -9899,7 +9920,10 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
+ 	bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
+ 
+ 	trans->can_flush_pending_bgs = false;
+-	list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
++	while (!list_empty(&trans->new_bgs)) {
++		block_group = list_first_entry(&trans->new_bgs,
++					       struct btrfs_block_group_cache,
++					       bg_list);
+ 		if (ret)
+ 			goto next;
+ 
+@@ -10609,6 +10633,10 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
+ 
+ 	*trimmed = 0;
+ 
++	/* Discard not supported = nothing to do. */
++	if (!blk_queue_discard(bdev_get_queue(device->bdev)))
++		return 0;
++
+ 	/* Not writeable = nothing to do. */
+ 	if (!device->writeable)
+ 		return 0;
+@@ -10731,8 +10759,8 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
+ 	}
+ 
+ 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+-	devices = &root->fs_info->fs_devices->alloc_list;
+-	list_for_each_entry(device, devices, dev_alloc_list) {
++	devices = &root->fs_info->fs_devices->devices;
++	list_for_each_entry(device, devices, dev_list) {
+ 		ret = btrfs_trim_free_extents(device, range->minlen,
+ 					      &group_trimmed);
+ 		if (ret)
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 45934deacfd7..1aa897dd9ce3 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -1699,6 +1699,8 @@ static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
+ 	bitmap_clear(info->bitmap, start, count);
+ 
+ 	info->bytes -= bytes;
++	if (info->max_extent_size > ctl->unit)
++		info->max_extent_size = 0;
+ }
+ 
+ static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
+@@ -1782,6 +1784,13 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
+ 	return -1;
+ }
+ 
++static inline u64 get_max_extent_size(struct btrfs_free_space *entry)
++{
++	if (entry->bitmap)
++		return entry->max_extent_size;
++	return entry->bytes;
++}
++
+ /* Cache the size of the max extent in bytes */
+ static struct btrfs_free_space *
+ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
+@@ -1803,8 +1812,8 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
+ 	for (node = &entry->offset_index; node; node = rb_next(node)) {
+ 		entry = rb_entry(node, struct btrfs_free_space, offset_index);
+ 		if (entry->bytes < *bytes) {
+-			if (entry->bytes > *max_extent_size)
+-				*max_extent_size = entry->bytes;
++			*max_extent_size = max(get_max_extent_size(entry),
++					       *max_extent_size);
+ 			continue;
+ 		}
+ 
+@@ -1822,8 +1831,8 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
+ 		}
+ 
+ 		if (entry->bytes < *bytes + align_off) {
+-			if (entry->bytes > *max_extent_size)
+-				*max_extent_size = entry->bytes;
++			*max_extent_size = max(get_max_extent_size(entry),
++					       *max_extent_size);
+ 			continue;
+ 		}
+ 
+@@ -1835,8 +1844,10 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
+ 				*offset = tmp;
+ 				*bytes = size;
+ 				return entry;
+-			} else if (size > *max_extent_size) {
+-				*max_extent_size = size;
++			} else {
++				*max_extent_size =
++					max(get_max_extent_size(entry),
++					    *max_extent_size);
+ 			}
+ 			continue;
+ 		}
+@@ -2694,8 +2705,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
+ 
+ 	err = search_bitmap(ctl, entry, &search_start, &search_bytes, true);
+ 	if (err) {
+-		if (search_bytes > *max_extent_size)
+-			*max_extent_size = search_bytes;
++		*max_extent_size = max(get_max_extent_size(entry),
++				       *max_extent_size);
+ 		return 0;
+ 	}
+ 
+@@ -2732,8 +2743,9 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
+ 
+ 	entry = rb_entry(node, struct btrfs_free_space, offset_index);
+ 	while (1) {
+-		if (entry->bytes < bytes && entry->bytes > *max_extent_size)
+-			*max_extent_size = entry->bytes;
++		if (entry->bytes < bytes)
++			*max_extent_size = max(get_max_extent_size(entry),
++					       *max_extent_size);
+ 
+ 		if (entry->bytes < bytes ||
+ 		    (!entry->bitmap && entry->offset < min_start)) {
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index b895be3d4311..383717ccecc7 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -481,6 +481,7 @@ again:
+ 		pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
+ 		if (!pages) {
+ 			/* just bail out to the uncompressed code */
++			nr_pages = 0;
+ 			goto cont;
+ 		}
+ 
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 6caeb946fc1d..150d3c891815 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -3950,9 +3950,17 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
+ 		goto out_unlock;
+ 	if (len == 0)
+ 		olen = len = src->i_size - off;
+-	/* if we extend to eof, continue to block boundary */
+-	if (off + len == src->i_size)
++	/*
++	 * If we extend to eof, continue to block boundary if and only if the
++	 * destination end offset matches the destination file's size, otherwise
++	 * we would be corrupting data by placing the eof block into the middle
++	 * of a file.
++	 */
++	if (off + len == src->i_size) {
++		if (!IS_ALIGNED(len, bs) && destoff + len < inode->i_size)
++			goto out_unlock;
+ 		len = ALIGN(src->i_size, bs) - off;
++	}
+ 
+ 	if (len == 0) {
+ 		ret = 0;
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index a751937dded5..90e29d40aa82 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -2446,6 +2446,7 @@ qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
+ 		qgroup->rfer_cmpr = 0;
+ 		qgroup->excl = 0;
+ 		qgroup->excl_cmpr = 0;
++		qgroup_dirty(fs_info, qgroup);
+ 	}
+ 	spin_unlock(&fs_info->qgroup_lock);
+ }
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index cfe913d2d3df..d6ccfb31aef0 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -1318,7 +1318,7 @@ static void __del_reloc_root(struct btrfs_root *root)
+ 	struct mapping_node *node = NULL;
+ 	struct reloc_control *rc = root->fs_info->reloc_ctl;
+ 
+-	if (rc) {
++	if (rc && root->node) {
+ 		spin_lock(&rc->reloc_root_tree.lock);
+ 		rb_node = tree_search(&rc->reloc_root_tree.rb_root,
+ 				      root->node->start);
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 2c7f9a5f8717..63f59f17c97e 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -5240,9 +5240,33 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
+ 
+ 			dir_inode = btrfs_iget(root->fs_info->sb, &inode_key,
+ 					       root, NULL);
+-			/* If parent inode was deleted, skip it. */
+-			if (IS_ERR(dir_inode))
+-				continue;
++			/*
++			 * If the parent inode was deleted, return an error to
++			 * fallback to a transaction commit. This is to prevent
++			 * getting an inode that was moved from one parent A to
++			 * a parent B, got its former parent A deleted and then
++			 * it got fsync'ed, from existing at both parents after
++			 * a log replay (and the old parent still existing).
++			 * Example:
++			 *
++			 * mkdir /mnt/A
++			 * mkdir /mnt/B
++			 * touch /mnt/B/bar
++			 * sync
++			 * mv /mnt/B/bar /mnt/A/bar
++			 * mv -T /mnt/A /mnt/B
++			 * fsync /mnt/B/bar
++			 * <power fail>
++			 *
++			 * If we ignore the old parent B which got deleted,
++			 * after a log replay we would have file bar linked
++			 * at both parents and the old parent B would still
++			 * exist.
++			 */
++			if (IS_ERR(dir_inode)) {
++				ret = PTR_ERR(dir_inode);
++				goto out;
++			}
+ 
+ 			ret = btrfs_log_inode(trans, root, dir_inode,
+ 					      LOG_INODE_ALL, 0, LLONG_MAX, ctx);
+diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
+index 0e72a14228f8..7bc6d27d47a4 100644
+--- a/fs/cifs/cifs_debug.c
++++ b/fs/cifs/cifs_debug.c
+@@ -285,6 +285,9 @@ static ssize_t cifs_stats_proc_write(struct file *file,
+ 		atomic_set(&totBufAllocCount, 0);
+ 		atomic_set(&totSmBufAllocCount, 0);
+ #endif /* CONFIG_CIFS_STATS2 */
++		atomic_set(&tcpSesReconnectCount, 0);
++		atomic_set(&tconInfoReconnectCount, 0);
++
+ 		spin_lock(&GlobalMid_Lock);
+ 		GlobalMaxActiveXid = 0;
+ 		GlobalCurrentXid = 0;
+diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
+index 6908080e9b6d..e3f2b7370bd8 100644
+--- a/fs/cifs/cifs_spnego.c
++++ b/fs/cifs/cifs_spnego.c
+@@ -143,8 +143,10 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo)
+ 		sprintf(dp, ";sec=krb5");
+ 	else if (server->sec_mskerberos)
+ 		sprintf(dp, ";sec=mskrb5");
+-	else
+-		goto out;
++	else {
++		cifs_dbg(VFS, "unknown or missing server auth type, use krb5\n");
++		sprintf(dp, ";sec=krb5");
++	}
+ 
+ 	dp = description + strlen(description);
+ 	sprintf(dp, ";uid=0x%x",
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index 36c8594bb147..5c3187df9ab9 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -756,7 +756,15 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
+ 	} else if (rc == -EREMOTE) {
+ 		cifs_create_dfs_fattr(&fattr, sb);
+ 		rc = 0;
+-	} else if (rc == -EACCES && backup_cred(cifs_sb)) {
++	} else if ((rc == -EACCES) && backup_cred(cifs_sb) &&
++		   (strcmp(server->vals->version_string, SMB1_VERSION_STRING)
++		      == 0)) {
++			/*
++			 * For SMB2 and later the backup intent flag is already
++			 * sent if needed on open and there is no path based
++			 * FindFirst operation to use to retry with
++			 */
++
+ 			srchinf = kzalloc(sizeof(struct cifs_search_info),
+ 						GFP_KERNEL);
+ 			if (srchinf == NULL) {
+diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
+index 0525ebc3aea2..66e8c5d58b21 100644
+--- a/fs/configfs/symlink.c
++++ b/fs/configfs/symlink.c
+@@ -64,7 +64,7 @@ static void fill_item_path(struct config_item * item, char * buffer, int length)
+ 
+ 		/* back up enough to print this bus id with '/' */
+ 		length -= cur;
+-		strncpy(buffer + length,config_item_name(p),cur);
++		memcpy(buffer + length, config_item_name(p), cur);
+ 		*(buffer + --length) = '/';
+ 	}
+ }
+diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
+index 355c522f3585..a6c9c2d66af1 100644
+--- a/fs/cramfs/inode.c
++++ b/fs/cramfs/inode.c
+@@ -185,7 +185,8 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
+ 			continue;
+ 		blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_CACHE_SHIFT;
+ 		blk_offset += offset;
+-		if (blk_offset + len > BUFFER_SIZE)
++		if (blk_offset > BUFFER_SIZE ||
++		    blk_offset + len > BUFFER_SIZE)
+ 			continue;
+ 		return read_buffers[i] + blk_offset;
+ 	}
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index f5d9f82b173a..b6e25d771eea 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -3039,9 +3039,6 @@ extern struct buffer_head *ext4_get_first_inline_block(struct inode *inode,
+ extern int ext4_inline_data_fiemap(struct inode *inode,
+ 				   struct fiemap_extent_info *fieinfo,
+ 				   int *has_inline, __u64 start, __u64 len);
+-extern int ext4_try_to_evict_inline_data(handle_t *handle,
+-					 struct inode *inode,
+-					 int needed);
+ extern void ext4_inline_data_truncate(struct inode *inode, int *has_inline);
+ 
+ extern int ext4_convert_inline_data(struct inode *inode);
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index 1e7a9774119c..1aec46733ef8 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -859,7 +859,7 @@ int ext4_da_write_inline_data_begin(struct address_space *mapping,
+ 	handle_t *handle;
+ 	struct page *page;
+ 	struct ext4_iloc iloc;
+-	int retries;
++	int retries = 0;
+ 
+ 	ret = ext4_get_inode_loc(inode, &iloc);
+ 	if (ret)
+@@ -888,11 +888,11 @@ retry_journal:
+ 	flags |= AOP_FLAG_NOFS;
+ 
+ 	if (ret == -ENOSPC) {
++		ext4_journal_stop(handle);
+ 		ret = ext4_da_convert_inline_data_to_extent(mapping,
+ 							    inode,
+ 							    flags,
+ 							    fsdata);
+-		ext4_journal_stop(handle);
+ 		if (ret == -ENOSPC &&
+ 		    ext4_should_retry_alloc(inode->i_sb, &retries))
+ 			goto retry_journal;
+@@ -1867,42 +1867,6 @@ out:
+ 	return (error < 0 ? error : 0);
+ }
+ 
+-/*
+- * Called during xattr set, and if we can sparse space 'needed',
+- * just create the extent tree evict the data to the outer block.
+- *
+- * We use jbd2 instead of page cache to move data to the 1st block
+- * so that the whole transaction can be committed as a whole and
+- * the data isn't lost because of the delayed page cache write.
+- */
+-int ext4_try_to_evict_inline_data(handle_t *handle,
+-				  struct inode *inode,
+-				  int needed)
+-{
+-	int error;
+-	struct ext4_xattr_entry *entry;
+-	struct ext4_inode *raw_inode;
+-	struct ext4_iloc iloc;
+-
+-	error = ext4_get_inode_loc(inode, &iloc);
+-	if (error)
+-		return error;
+-
+-	raw_inode = ext4_raw_inode(&iloc);
+-	entry = (struct ext4_xattr_entry *)((void *)raw_inode +
+-					    EXT4_I(inode)->i_inline_off);
+-	if (EXT4_XATTR_LEN(entry->e_name_len) +
+-	    EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size)) < needed) {
+-		error = -ENOSPC;
+-		goto out;
+-	}
+-
+-	error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
+-out:
+-	brelse(iloc.bh);
+-	return error;
+-}
+-
+ void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
+ {
+ 	handle_t *handle;
+diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
+index 05048fcfd602..6b5e2eddd8d7 100644
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -526,9 +526,13 @@ mext_check_arguments(struct inode *orig_inode,
+ 			orig_inode->i_ino, donor_inode->i_ino);
+ 		return -EINVAL;
+ 	}
+-	if (orig_eof < orig_start + *len - 1)
++	if (orig_eof <= orig_start)
++		*len = 0;
++	else if (orig_eof < orig_start + *len - 1)
+ 		*len = orig_eof - orig_start;
+-	if (donor_eof < donor_start + *len - 1)
++	if (donor_eof <= donor_start)
++		*len = 0;
++	else if (donor_eof < donor_start + *len - 1)
+ 		*len = donor_eof - donor_start;
+ 	if (!*len) {
+ 		ext4_debug("ext4 move extent: len should not be 0 "
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index a1f1e53d0e25..aa08e129149d 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -124,6 +124,7 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
+ 	if (!is_dx_block && type == INDEX) {
+ 		ext4_error_inode(inode, func, line, block,
+ 		       "directory leaf block found instead of index block");
++		brelse(bh);
+ 		return ERR_PTR(-EFSCORRUPTED);
+ 	}
+ 	if (!ext4_has_metadata_csum(inode->i_sb) ||
+@@ -2830,7 +2831,9 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
+ 			list_del_init(&EXT4_I(inode)->i_orphan);
+ 			mutex_unlock(&sbi->s_orphan_lock);
+ 		}
+-	}
++	} else
++		brelse(iloc.bh);
++
+ 	jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
+ 	jbd_debug(4, "orphan inode %lu will point to %d\n",
+ 			inode->i_ino, NEXT_ORPHAN(inode));
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 783280ebc2fe..bad13f049fb0 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -442,16 +442,18 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
+ 
+ 		BUFFER_TRACE(bh, "get_write_access");
+ 		err = ext4_journal_get_write_access(handle, bh);
+-		if (err)
++		if (err) {
++			brelse(bh);
+ 			return err;
++		}
+ 		ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
+ 			   block - start, count2);
+ 		ext4_set_bits(bh->b_data, block - start, count2);
+ 
+ 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
++		brelse(bh);
+ 		if (unlikely(err))
+ 			return err;
+-		brelse(bh);
+ 	}
+ 
+ 	return 0;
+@@ -588,7 +590,6 @@ handle_bb:
+ 		bh = bclean(handle, sb, block);
+ 		if (IS_ERR(bh)) {
+ 			err = PTR_ERR(bh);
+-			bh = NULL;
+ 			goto out;
+ 		}
+ 		overhead = ext4_group_overhead_blocks(sb, group);
+@@ -600,9 +601,9 @@ handle_bb:
+ 		ext4_mark_bitmap_end(group_data[i].blocks_count,
+ 				     sb->s_blocksize * 8, bh->b_data);
+ 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
++		brelse(bh);
+ 		if (err)
+ 			goto out;
+-		brelse(bh);
+ 
+ handle_ib:
+ 		if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
+@@ -617,18 +618,16 @@ handle_ib:
+ 		bh = bclean(handle, sb, block);
+ 		if (IS_ERR(bh)) {
+ 			err = PTR_ERR(bh);
+-			bh = NULL;
+ 			goto out;
+ 		}
+ 
+ 		ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
+ 				     sb->s_blocksize * 8, bh->b_data);
+ 		err = ext4_handle_dirty_metadata(handle, NULL, bh);
++		brelse(bh);
+ 		if (err)
+ 			goto out;
+-		brelse(bh);
+ 	}
+-	bh = NULL;
+ 
+ 	/* Mark group tables in block bitmap */
+ 	for (j = 0; j < GROUP_TABLE_COUNT; j++) {
+@@ -659,7 +658,6 @@ handle_ib:
+ 	}
+ 
+ out:
+-	brelse(bh);
+ 	err2 = ext4_journal_stop(handle);
+ 	if (err2 && !err)
+ 		err = err2;
+@@ -846,6 +844,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
+ 	err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
+ 	if (unlikely(err)) {
+ 		ext4_std_error(sb, err);
++		iloc.bh = NULL;
+ 		goto exit_inode;
+ 	}
+ 	brelse(dind);
+@@ -897,6 +896,7 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
+ 				     sizeof(struct buffer_head *),
+ 				     GFP_NOFS);
+ 	if (!n_group_desc) {
++		brelse(gdb_bh);
+ 		err = -ENOMEM;
+ 		ext4_warning(sb, "not enough memory for %lu groups",
+ 			     gdb_num + 1);
+@@ -912,8 +912,6 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
+ 	kvfree(o_group_desc);
+ 	BUFFER_TRACE(gdb_bh, "get_write_access");
+ 	err = ext4_journal_get_write_access(handle, gdb_bh);
+-	if (unlikely(err))
+-		brelse(gdb_bh);
+ 	return err;
+ }
+ 
+@@ -1095,8 +1093,10 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
+ 			   backup_block, backup_block -
+ 			   ext4_group_first_block_no(sb, group));
+ 		BUFFER_TRACE(bh, "get_write_access");
+-		if ((err = ext4_journal_get_write_access(handle, bh)))
++		if ((err = ext4_journal_get_write_access(handle, bh))) {
++			brelse(bh);
+ 			break;
++		}
+ 		lock_buffer(bh);
+ 		memcpy(bh->b_data, data, size);
+ 		if (rest)
+@@ -1991,7 +1991,7 @@ retry:
+ 
+ 	err = ext4_alloc_flex_bg_array(sb, n_group + 1);
+ 	if (err)
+-		return err;
++		goto out;
+ 
+ 	err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
+ 	if (err)
+@@ -2027,6 +2027,10 @@ retry:
+ 		n_blocks_count_retry = 0;
+ 		free_flex_gd(flex_gd);
+ 		flex_gd = NULL;
++		if (resize_inode) {
++			iput(resize_inode);
++			resize_inode = NULL;
++		}
+ 		goto retry;
+ 	}
+ 
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index a3d905abbaa9..cd9cd581fd92 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3731,6 +3731,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 	sbi->s_groups_count = blocks_count;
+ 	sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
+ 			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
++	if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
++	    le32_to_cpu(es->s_inodes_count)) {
++		ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
++			 le32_to_cpu(es->s_inodes_count),
++			 ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
++		ret = -EINVAL;
++		goto failed_mount;
++	}
+ 	db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
+ 		   EXT4_DESC_PER_BLOCK(sb);
+ 	if (ext4_has_feature_meta_bg(sb)) {
+@@ -3750,14 +3758,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 		ret = -ENOMEM;
+ 		goto failed_mount;
+ 	}
+-	if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
+-	    le32_to_cpu(es->s_inodes_count)) {
+-		ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
+-			 le32_to_cpu(es->s_inodes_count),
+-			 ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
+-		ret = -EINVAL;
+-		goto failed_mount;
+-	}
+ 
+ 	bgl_lock_init(sbi->s_blockgroup_lock);
+ 
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index d6bae37489af..53679716baca 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1044,22 +1044,8 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
+ 	if (EXT4_I(inode)->i_extra_isize == 0)
+ 		return -ENOSPC;
+ 	error = ext4_xattr_set_entry(i, s, inode);
+-	if (error) {
+-		if (error == -ENOSPC &&
+-		    ext4_has_inline_data(inode)) {
+-			error = ext4_try_to_evict_inline_data(handle, inode,
+-					EXT4_XATTR_LEN(strlen(i->name) +
+-					EXT4_XATTR_SIZE(i->value_len)));
+-			if (error)
+-				return error;
+-			error = ext4_xattr_ibody_find(inode, i, is);
+-			if (error)
+-				return error;
+-			error = ext4_xattr_set_entry(i, s, inode);
+-		}
+-		if (error)
+-			return error;
+-	}
++	if (error)
++		return error;
+ 	header = IHDR(inode, ext4_raw_inode(&is->iloc));
+ 	if (!IS_LAST_ENTRY(s->first)) {
+ 		header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
+@@ -1175,6 +1161,8 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
+ 			error = ext4_xattr_block_set(handle, inode, &i, &bs);
+ 		} else if (error == -ENOSPC) {
+ 			if (EXT4_I(inode)->i_file_acl && !bs.s.base) {
++				brelse(bs.bh);
++				bs.bh = NULL;
+ 				error = ext4_xattr_block_find(inode, &i, &bs);
+ 				if (error)
+ 					goto cleanup;
+@@ -1502,6 +1490,8 @@ cleanup:
+ 	kfree(buffer);
+ 	if (is)
+ 		brelse(is->iloc.bh);
++	if (bs)
++		brelse(bs->bh);
+ 	kfree(is);
+ 	kfree(bs);
+ 	brelse(bh);
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index 2671e922c720..e566652ac922 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -402,12 +402,19 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
+ 	if (test_bit(FR_BACKGROUND, &req->flags)) {
+ 		spin_lock(&fc->lock);
+ 		clear_bit(FR_BACKGROUND, &req->flags);
+-		if (fc->num_background == fc->max_background)
++		if (fc->num_background == fc->max_background) {
+ 			fc->blocked = 0;
+-
+-		/* Wake up next waiter, if any */
+-		if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
+ 			wake_up(&fc->blocked_waitq);
++		} else if (!fc->blocked) {
++			/*
++			 * Wake up next waiter, if any.  It's okay to use
++			 * waitqueue_active(), as we've already synced up
++			 * fc->blocked with waiters with the wake_up() call
++			 * above.
++			 */
++			if (waitqueue_active(&fc->blocked_waitq))
++				wake_up(&fc->blocked_waitq);
++		}
+ 
+ 		if (fc->num_background == fc->congestion_threshold &&
+ 		    fc->connected && fc->bdi_initialized) {
+@@ -1328,12 +1335,14 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
+ 		goto out_end;
+ 	}
+ 	list_move_tail(&req->list, &fpq->processing);
+-	spin_unlock(&fpq->lock);
++	__fuse_get_request(req);
+ 	set_bit(FR_SENT, &req->flags);
++	spin_unlock(&fpq->lock);
+ 	/* matches barrier in request_wait_answer() */
+ 	smp_mb__after_atomic();
+ 	if (test_bit(FR_INTERRUPTED, &req->flags))
+ 		queue_interrupt(fiq, req);
++	fuse_put_request(fc, req);
+ 
+ 	return reqsize;
+ 
+@@ -1762,8 +1771,10 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
+ 	req->in.args[1].size = total_len;
+ 
+ 	err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
+-	if (err)
++	if (err) {
+ 		fuse_retrieve_end(fc, req);
++		fuse_put_request(fc, req);
++	}
+ 
+ 	return err;
+ }
+@@ -1922,16 +1933,20 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
+ 
+ 	/* Is it an interrupt reply? */
+ 	if (req->intr_unique == oh.unique) {
++		__fuse_get_request(req);
+ 		spin_unlock(&fpq->lock);
+ 
+ 		err = -EINVAL;
+-		if (nbytes != sizeof(struct fuse_out_header))
++		if (nbytes != sizeof(struct fuse_out_header)) {
++			fuse_put_request(fc, req);
+ 			goto err_finish;
++		}
+ 
+ 		if (oh.error == -ENOSYS)
+ 			fc->no_interrupt = 1;
+ 		else if (oh.error == -EAGAIN)
+ 			queue_interrupt(&fc->iq, req);
++		fuse_put_request(fc, req);
+ 
+ 		fuse_copy_finish(cs);
+ 		return nbytes;
+diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
+index baab99b69d8a..d9178388cf48 100644
+--- a/fs/gfs2/ops_fstype.c
++++ b/fs/gfs2/ops_fstype.c
+@@ -1353,6 +1353,9 @@ static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type,
+ 	struct path path;
+ 	int error;
+ 
++	if (!dev_name || !*dev_name)
++		return ERR_PTR(-EINVAL);
++
+ 	error = kern_path(dev_name, LOOKUP_FOLLOW, &path);
+ 	if (error) {
+ 		pr_warn("path_lookup on %s returned error %d\n",
+diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
+index 684996c8a3a4..4d5a5a4cc017 100644
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -254,8 +254,8 @@ restart:
+ 		bh = jh2bh(jh);
+ 
+ 		if (buffer_locked(bh)) {
+-			spin_unlock(&journal->j_list_lock);
+ 			get_bh(bh);
++			spin_unlock(&journal->j_list_lock);
+ 			wait_on_buffer(bh);
+ 			/* the journal_head may have gone by now */
+ 			BUFFER_TRACE(bh, "brelse");
+@@ -336,8 +336,8 @@ restart2:
+ 		jh = transaction->t_checkpoint_io_list;
+ 		bh = jh2bh(jh);
+ 		if (buffer_locked(bh)) {
+-			spin_unlock(&journal->j_list_lock);
+ 			get_bh(bh);
++			spin_unlock(&journal->j_list_lock);
+ 			wait_on_buffer(bh);
+ 			/* the journal_head may have gone by now */
+ 			BUFFER_TRACE(bh, "brelse");
+diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
+index 600da1a4df29..1544f530ccd0 100644
+--- a/fs/jffs2/super.c
++++ b/fs/jffs2/super.c
+@@ -285,10 +285,8 @@ static int jffs2_fill_super(struct super_block *sb, void *data, int silent)
+ 	sb->s_fs_info = c;
+ 
+ 	ret = jffs2_parse_options(c, data);
+-	if (ret) {
+-		kfree(c);
++	if (ret)
+ 		return -EINVAL;
+-	}
+ 
+ 	/* Initialize JFFS2 superblock locks, the further initialization will
+ 	 * be done later */
+diff --git a/fs/lockd/host.c b/fs/lockd/host.c
+index d716c9993a26..c7eb47f2fb6c 100644
+--- a/fs/lockd/host.c
++++ b/fs/lockd/host.c
+@@ -340,7 +340,7 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
+ 	};
+ 	struct lockd_net *ln = net_generic(net, lockd_net_id);
+ 
+-	dprintk("lockd: %s(host='%*s', vers=%u, proto=%s)\n", __func__,
++	dprintk("lockd: %s(host='%.*s', vers=%u, proto=%s)\n", __func__,
+ 			(int)hostname_len, hostname, rqstp->rq_vers,
+ 			(rqstp->rq_prot == IPPROTO_UDP ? "udp" : "tcp"));
+ 
+diff --git a/fs/namespace.c b/fs/namespace.c
+index b56b50e3da11..88c5d5bddf74 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1584,8 +1584,13 @@ static int do_umount(struct mount *mnt, int flags)
+ 
+ 	namespace_lock();
+ 	lock_mount_hash();
+-	event++;
+ 
++	/* Recheck MNT_LOCKED with the locks held */
++	retval = -EINVAL;
++	if (mnt->mnt.mnt_flags & MNT_LOCKED)
++		goto out;
++
++	event++;
+ 	if (flags & MNT_DETACH) {
+ 		if (!list_empty(&mnt->mnt_list))
+ 			umount_tree(mnt, UMOUNT_PROPAGATE);
+@@ -1599,6 +1604,7 @@ static int do_umount(struct mount *mnt, int flags)
+ 			retval = 0;
+ 		}
+ 	}
++out:
+ 	unlock_mount_hash();
+ 	namespace_unlock();
+ 	return retval;
+@@ -1681,7 +1687,7 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
+ 		goto dput_and_out;
+ 	if (!check_mnt(mnt))
+ 		goto dput_and_out;
+-	if (mnt->mnt.mnt_flags & MNT_LOCKED)
++	if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
+ 		goto dput_and_out;
+ 	retval = -EPERM;
+ 	if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
+@@ -1759,8 +1765,14 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
+ 		for (s = r; s; s = next_mnt(s, r)) {
+ 			if (!(flag & CL_COPY_UNBINDABLE) &&
+ 			    IS_MNT_UNBINDABLE(s)) {
+-				s = skip_mnt_tree(s);
+-				continue;
++				if (s->mnt.mnt_flags & MNT_LOCKED) {
++					/* Both unbindable and locked. */
++					q = ERR_PTR(-EPERM);
++					goto out;
++				} else {
++					s = skip_mnt_tree(s);
++					continue;
++				}
+ 			}
+ 			if (!(flag & CL_COPY_MNT_NS_FILE) &&
+ 			    is_mnt_ns_file(s->mnt.mnt_root)) {
+@@ -1813,7 +1825,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
+ {
+ 	namespace_lock();
+ 	lock_mount_hash();
+-	umount_tree(real_mount(mnt), UMOUNT_SYNC);
++	umount_tree(real_mount(mnt), 0);
+ 	unlock_mount_hash();
+ 	namespace_unlock();
+ }
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index 63498e1a542a..ae91d1e450be 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -879,10 +879,10 @@ EXPORT_SYMBOL_GPL(nfs4_set_ds_client);
+ 
+ /*
+  * Session has been established, and the client marked ready.
+- * Set the mount rsize and wsize with negotiated fore channel
+- * attributes which will be bound checked in nfs_server_set_fsinfo.
++ * Limit the mount rsize, wsize and dtsize using negotiated fore
++ * channel attributes.
+  */
+-static void nfs4_session_set_rwsize(struct nfs_server *server)
++static void nfs4_session_limit_rwsize(struct nfs_server *server)
+ {
+ #ifdef CONFIG_NFS_V4_1
+ 	struct nfs4_session *sess;
+@@ -895,9 +895,11 @@ static void nfs4_session_set_rwsize(struct nfs_server *server)
+ 	server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead;
+ 	server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead;
+ 
+-	if (!server->rsize || server->rsize > server_resp_sz)
++	if (server->dtsize > server_resp_sz)
++		server->dtsize = server_resp_sz;
++	if (server->rsize > server_resp_sz)
+ 		server->rsize = server_resp_sz;
+-	if (!server->wsize || server->wsize > server_rqst_sz)
++	if (server->wsize > server_rqst_sz)
+ 		server->wsize = server_rqst_sz;
+ #endif /* CONFIG_NFS_V4_1 */
+ }
+@@ -944,12 +946,12 @@ static int nfs4_server_common_setup(struct nfs_server *server,
+ 			(unsigned long long) server->fsid.minor);
+ 	nfs_display_fhandle(mntfh, "Pseudo-fs root FH");
+ 
+-	nfs4_session_set_rwsize(server);
+-
+ 	error = nfs_probe_fsinfo(server, mntfh, fattr);
+ 	if (error < 0)
+ 		goto out;
+ 
++	nfs4_session_limit_rwsize(server);
++
+ 	if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
+ 		server->namelen = NFS4_MAXNAMLEN;
+ 
+diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
+index ffecf89c8c1c..49af618e410d 100644
+--- a/fs/ocfs2/dir.c
++++ b/fs/ocfs2/dir.c
+@@ -1896,8 +1896,7 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode,
+ 				/* On error, skip the f_pos to the
+ 				   next block. */
+ 				ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1;
+-				brelse(bh);
+-				continue;
++				break;
+ 			}
+ 			if (le64_to_cpu(de->inode)) {
+ 				unsigned char d_type = DT_UNKNOWN;
+diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
+index 3e3799cdc6e6..9b9fe0588008 100644
+--- a/include/linux/ceph/libceph.h
++++ b/include/linux/ceph/libceph.h
+@@ -72,7 +72,13 @@ struct ceph_options {
+ 
+ #define CEPH_MSG_MAX_FRONT_LEN	(16*1024*1024)
+ #define CEPH_MSG_MAX_MIDDLE_LEN	(16*1024*1024)
+-#define CEPH_MSG_MAX_DATA_LEN	(16*1024*1024)
++
++/*
++ * Handle the largest possible rbd object in one message.
++ * There is no limit on the size of cephfs objects, but it has to obey
++ * rsize and wsize mount options anyway.
++ */
++#define CEPH_MSG_MAX_DATA_LEN	(32*1024*1024)
+ 
+ #define CEPH_AUTH_NAME_DEFAULT   "guest"
+ 
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 685c262e0be8..3957d99e66ea 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -110,6 +110,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
+ 			unsigned long addr, unsigned long sz);
+ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
+ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
++void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
++				unsigned long *start, unsigned long *end);
+ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+ 			      int write);
+ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+@@ -132,6 +134,18 @@ static inline unsigned long hugetlb_total_pages(void)
+ 	return 0;
+ }
+ 
++static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
++						pte_t *ptep)
++{
++	return 0;
++}
++
++static inline void adjust_range_if_pmd_sharing_possible(
++				struct vm_area_struct *vma,
++				unsigned long *start, unsigned long *end)
++{
++}
++
+ #define follow_hugetlb_page(m,v,p,vs,a,b,i,w)	({ BUG(); 0; })
+ #define follow_huge_addr(mm, addr, write)	ERR_PTR(-EINVAL)
+ #define copy_hugetlb_page_range(src, dst, vma)	({ BUG(); 0; })
+diff --git a/include/linux/i8253.h b/include/linux/i8253.h
+index e6bb36a97519..8336b2f6f834 100644
+--- a/include/linux/i8253.h
++++ b/include/linux/i8253.h
+@@ -21,6 +21,7 @@
+ #define PIT_LATCH	((PIT_TICK_RATE + HZ/2) / HZ)
+ 
+ extern raw_spinlock_t i8253_lock;
++extern bool i8253_clear_counter_on_shutdown;
+ extern struct clock_event_device i8253_clockevent;
+ extern void clockevent_i8253_init(bool oneshot);
+ 
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 1f4366567e7d..d4e8077fca96 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -2058,6 +2058,12 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
+ 	return vma;
+ }
+ 
++static inline bool range_in_vma(struct vm_area_struct *vma,
++				unsigned long start, unsigned long end)
++{
++	return (vma && vma->vm_start <= start && end <= vma->vm_end);
++}
++
+ #ifdef CONFIG_MMU
+ pgprot_t vm_get_page_prot(unsigned long vm_flags);
+ void vma_set_page_prot(struct vm_area_struct *vma);
+diff --git a/include/linux/tc.h b/include/linux/tc.h
+index f92511e57cdb..a60639f37963 100644
+--- a/include/linux/tc.h
++++ b/include/linux/tc.h
+@@ -84,6 +84,7 @@ struct tc_dev {
+ 					   device. */
+ 	struct device	dev;		/* Generic device interface. */
+ 	struct resource	resource;	/* Address space of this device. */
++	u64		dma_mask;	/* DMA addressable range. */
+ 	char		vendor[9];
+ 	char		name[9];
+ 	char		firmware[9];
+diff --git a/kernel/bounds.c b/kernel/bounds.c
+index e1d1d1952bfa..c37f68d758db 100644
+--- a/kernel/bounds.c
++++ b/kernel/bounds.c
+@@ -12,7 +12,7 @@
+ #include <linux/log2.h>
+ #include <linux/spinlock_types.h>
+ 
+-void foo(void)
++int main(void)
+ {
+ 	/* The enum constants to put into include/generated/bounds.h */
+ 	DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
+@@ -22,4 +22,6 @@ void foo(void)
+ #endif
+ 	DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
+ 	/* End of constants */
++
++	return 0;
+ }
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 0df2b44dac7c..83cea913983c 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -864,6 +864,9 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
+ 
+ 	local_bh_disable();
+ 	ret = action->thread_fn(action->irq, action->dev_id);
++	if (ret == IRQ_HANDLED)
++		atomic_inc(&desc->threads_handled);
++
+ 	irq_finalize_oneshot(desc, action);
+ 	local_bh_enable();
+ 	return ret;
+@@ -880,6 +883,9 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
+ 	irqreturn_t ret;
+ 
+ 	ret = action->thread_fn(action->irq, action->dev_id);
++	if (ret == IRQ_HANDLED)
++		atomic_inc(&desc->threads_handled);
++
+ 	irq_finalize_oneshot(desc, action);
+ 	return ret;
+ }
+@@ -957,8 +963,6 @@ static int irq_thread(void *data)
+ 		irq_thread_check_affinity(desc, action);
+ 
+ 		action_ret = handler_fn(desc, action);
+-		if (action_ret == IRQ_HANDLED)
+-			atomic_inc(&desc->threads_handled);
+ 		if (action_ret == IRQ_WAKE_THREAD)
+ 			irq_wake_secondary(desc, action);
+ 
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 388bcace62f8..d8daf6c55d2b 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -665,9 +665,10 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
+ }
+ 
+ /* Cancel unoptimizing for reusing */
+-static void reuse_unused_kprobe(struct kprobe *ap)
++static int reuse_unused_kprobe(struct kprobe *ap)
+ {
+ 	struct optimized_kprobe *op;
++	int ret;
+ 
+ 	BUG_ON(!kprobe_unused(ap));
+ 	/*
+@@ -681,8 +682,12 @@ static void reuse_unused_kprobe(struct kprobe *ap)
+ 	/* Enable the probe again */
+ 	ap->flags &= ~KPROBE_FLAG_DISABLED;
+ 	/* Optimize it again (remove from op->list) */
+-	BUG_ON(!kprobe_optready(ap));
++	ret = kprobe_optready(ap);
++	if (ret)
++		return ret;
++
+ 	optimize_kprobe(ap);
++	return 0;
+ }
+ 
+ /* Remove optimized instructions */
+@@ -894,11 +899,16 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
+ #define kprobe_disarmed(p)			kprobe_disabled(p)
+ #define wait_for_kprobe_optimizer()		do {} while (0)
+ 
+-/* There should be no unused kprobes can be reused without optimization */
+-static void reuse_unused_kprobe(struct kprobe *ap)
++static int reuse_unused_kprobe(struct kprobe *ap)
+ {
++	/*
++	 * If the optimized kprobe is NOT supported, the aggr kprobe is
++	 * released at the same time that the last aggregated kprobe is
++	 * unregistered.
++	 * Thus there should be no chance to reuse unused kprobe.
++	 */
+ 	printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
+-	BUG_ON(kprobe_unused(ap));
++	return -EINVAL;
+ }
+ 
+ static void free_aggr_kprobe(struct kprobe *p)
+@@ -1276,9 +1286,12 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
+ 			goto out;
+ 		}
+ 		init_aggr_kprobe(ap, orig_p);
+-	} else if (kprobe_unused(ap))
++	} else if (kprobe_unused(ap)) {
+ 		/* This probe is going to die. Rescue it */
+-		reuse_unused_kprobe(ap);
++		ret = reuse_unused_kprobe(ap);
++		if (ret)
++			goto out;
++	}
+ 
+ 	if (kprobe_gone(ap)) {
+ 		/*
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index 6e171b547a80..774ab79d3ec7 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -3826,7 +3826,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
+ {
+ 	unsigned long flags;
+ 
+-	if (unlikely(!lock_stat))
++	if (unlikely(!lock_stat || !debug_locks))
+ 		return;
+ 
+ 	if (unlikely(current->lockdep_recursion))
+@@ -3846,7 +3846,7 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
+ {
+ 	unsigned long flags;
+ 
+-	if (unlikely(!lock_stat))
++	if (unlikely(!lock_stat || !debug_locks))
+ 		return;
+ 
+ 	if (unlikely(current->lockdep_recursion))
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 0b5613554769..dd689ab22806 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -881,7 +881,12 @@ static void __init log_buf_len_update(unsigned size)
+ /* save requested log_buf_len since it's too early to process it */
+ static int __init log_buf_len_setup(char *str)
+ {
+-	unsigned size = memparse(str, &str);
++	unsigned int size;
++
++	if (!str)
++		return -EINVAL;
++
++	size = memparse(str, &str);
+ 
+ 	log_buf_len_update(size);
+ 
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 8bfbc47f0a23..5b1313309356 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -991,7 +991,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
+ 
+ 	result = TRACE_SIGNAL_IGNORED;
+ 	if (!prepare_signal(sig, t,
+-			from_ancestor_ns || (info == SEND_SIG_FORCED)))
++			from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED)))
+ 		goto ret;
+ 
+ 	pending = group ? &t->signal->shared_pending : &t->pending;
+diff --git a/lib/debug_locks.c b/lib/debug_locks.c
+index 96c4c633d95e..124fdf238b3d 100644
+--- a/lib/debug_locks.c
++++ b/lib/debug_locks.c
+@@ -37,7 +37,7 @@ EXPORT_SYMBOL_GPL(debug_locks_silent);
+  */
+ int debug_locks_off(void)
+ {
+-	if (__debug_locks_off()) {
++	if (debug_locks && __debug_locks_off()) {
+ 		if (!debug_locks_silent) {
+ 			console_verbose();
+ 			return 1;
+diff --git a/mm/gup.c b/mm/gup.c
+index b599526db9f7..018144c4b9ec 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -940,8 +940,6 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
+ 	int locked = 0;
+ 	long ret = 0;
+ 
+-	VM_BUG_ON(start & ~PAGE_MASK);
+-	VM_BUG_ON(len != PAGE_ALIGN(len));
+ 	end = start + len;
+ 
+ 	for (nstart = start; nstart < end; nstart = nend) {
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index a813b03021b7..6f99a0f906bb 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3103,7 +3103,7 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte)
+ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
+ 			    struct vm_area_struct *vma)
+ {
+-	pte_t *src_pte, *dst_pte, entry;
++	pte_t *src_pte, *dst_pte, entry, dst_entry;
+ 	struct page *ptepage;
+ 	unsigned long addr;
+ 	int cow;
+@@ -3131,15 +3131,30 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
+ 			break;
+ 		}
+ 
+-		/* If the pagetables are shared don't copy or take references */
+-		if (dst_pte == src_pte)
++		/*
++		 * If the pagetables are shared don't copy or take references.
++		 * dst_pte == src_pte is the common case of src/dest sharing.
++		 *
++		 * However, src could have 'unshared' and dst shares with
++		 * another vma.  If dst_pte !none, this implies sharing.
++		 * Check here before taking page table lock, and once again
++		 * after taking the lock below.
++		 */
++		dst_entry = huge_ptep_get(dst_pte);
++		if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
+ 			continue;
+ 
+ 		dst_ptl = huge_pte_lock(h, dst, dst_pte);
+ 		src_ptl = huge_pte_lockptr(h, src, src_pte);
+ 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+ 		entry = huge_ptep_get(src_pte);
+-		if (huge_pte_none(entry)) { /* skip none entry */
++		dst_entry = huge_ptep_get(dst_pte);
++		if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
++			/*
++			 * Skip if src entry none.  Also, skip in the
++			 * unlikely case dst entry !none as this implies
++			 * sharing with another vma.
++			 */
+ 			;
+ 		} else if (unlikely(is_hugetlb_entry_migration(entry) ||
+ 				    is_hugetlb_entry_hwpoisoned(entry))) {
+@@ -3537,6 +3552,12 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
+ 		return err;
+ 	ClearPagePrivate(page);
+ 
++	/*
++	 * set page dirty so that it will not be removed from cache/file
++	 * by non-hugetlbfs specific code paths.
++	 */
++	set_page_dirty(page);
++
+ 	spin_lock(&inode->i_lock);
+ 	inode->i_blocks += blocks_per_huge_page(h);
+ 	spin_unlock(&inode->i_lock);
+@@ -4195,12 +4216,40 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
+ 	/*
+ 	 * check on proper vm_flags and page table alignment
+ 	 */
+-	if (vma->vm_flags & VM_MAYSHARE &&
+-	    vma->vm_start <= base && end <= vma->vm_end)
++	if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
+ 		return true;
+ 	return false;
+ }
+ 
++/*
++ * Determine if start,end range within vma could be mapped by shared pmd.
++ * If yes, adjust start and end to cover range associated with possible
++ * shared pmd mappings.
++ */
++void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
++				unsigned long *start, unsigned long *end)
++{
++	unsigned long check_addr = *start;
++
++	if (!(vma->vm_flags & VM_MAYSHARE))
++		return;
++
++	for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
++		unsigned long a_start = check_addr & PUD_MASK;
++		unsigned long a_end = a_start + PUD_SIZE;
++
++		/*
++		 * If sharing is possible, adjust start/end if necessary.
++		 */
++		if (range_in_vma(vma, a_start, a_end)) {
++			if (a_start < *start)
++				*start = a_start;
++			if (a_end > *end)
++				*end = a_end;
++		}
++	}
++}
++
+ /*
+  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
+  * and returns the corresponding pte. While this is not necessary for the
+@@ -4297,6 +4346,11 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+ {
+ 	return 0;
+ }
++
++void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
++				unsigned long *start, unsigned long *end)
++{
++}
+ #define want_pmd_share()	(0)
+ #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
+ 
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index b777590c3e13..be9840bf11d1 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -2010,8 +2010,36 @@ retry_cpuset:
+ 		nmask = policy_nodemask(gfp, pol);
+ 		if (!nmask || node_isset(hpage_node, *nmask)) {
+ 			mpol_cond_put(pol);
+-			page = __alloc_pages_node(hpage_node,
+-						gfp | __GFP_THISNODE, order);
++			/*
++			 * We cannot invoke reclaim if __GFP_THISNODE
++			 * is set. Invoking reclaim with
++			 * __GFP_THISNODE set, would cause THP
++			 * allocations to trigger heavy swapping
++			 * despite there may be tons of free memory
++			 * (including potentially plenty of THP
++			 * already available in the buddy) on all the
++			 * other NUMA nodes.
++			 *
++			 * At most we could invoke compaction when
++			 * __GFP_THISNODE is set (but we would need to
++			 * refrain from invoking reclaim even if
++			 * compaction returned COMPACT_SKIPPED because
++			 * there wasn't not enough memory to succeed
++			 * compaction). For now just avoid
++			 * __GFP_THISNODE instead of limiting the
++			 * allocation path to a strict and single
++			 * compaction invocation.
++			 *
++			 * Supposedly if direct reclaim was enabled by
++			 * the caller, the app prefers THP regardless
++			 * of the node it comes from so this would be
++			 * more desiderable behavior than only
++			 * providing THP originated from the local
++			 * node in such case.
++			 */
++			if (!(gfp & __GFP_DIRECT_RECLAIM))
++				gfp |= __GFP_THISNODE;
++			page = __alloc_pages_node(hpage_node, gfp, order);
+ 			goto out;
+ 		}
+ 	}
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 39f5fbd07486..3074dbcd9621 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2817,10 +2817,6 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+ 	pgoff_t pgoff = addr >> PAGE_SHIFT;
+ 	int error;
+ 
+-	len = PAGE_ALIGN(len);
+-	if (!len)
+-		return addr;
+-
+ 	flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
+ 
+ 	error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
+@@ -2888,12 +2884,19 @@ out:
+ 	return addr;
+ }
+ 
+-unsigned long vm_brk(unsigned long addr, unsigned long len)
++unsigned long vm_brk(unsigned long addr, unsigned long request)
+ {
+ 	struct mm_struct *mm = current->mm;
++	unsigned long len;
+ 	unsigned long ret;
+ 	bool populate;
+ 
++	len = PAGE_ALIGN(request);
++	if (len < request)
++		return -ENOMEM;
++	if (!len)
++		return addr;
++
+ 	down_write(&mm->mmap_sem);
+ 	ret = do_brk(addr, len);
+ 	populate = ((mm->def_flags & VM_LOCKED) != 0);
+diff --git a/mm/rmap.c b/mm/rmap.c
+index 1bceb49aa214..488dda209431 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -1324,12 +1324,41 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+ 	pte_t pteval;
+ 	spinlock_t *ptl;
+ 	int ret = SWAP_AGAIN;
++	unsigned long sh_address;
++	bool pmd_sharing_possible = false;
++	unsigned long spmd_start, spmd_end;
+ 	enum ttu_flags flags = (enum ttu_flags)arg;
+ 
+ 	/* munlock has nothing to gain from examining un-locked vmas */
+ 	if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
+ 		goto out;
+ 
++	/*
++	 * Only use the range_start/end mmu notifiers if huge pmd sharing
++	 * is possible.  In the normal case, mmu_notifier_invalidate_page
++	 * is sufficient as we only unmap a page.  However, if we unshare
++	 * a pmd, we will unmap a PUD_SIZE range.
++	 */
++	if (PageHuge(page)) {
++		spmd_start = address;
++		spmd_end = spmd_start + vma_mmu_pagesize(vma);
++
++		/*
++		 * Check if pmd sharing is possible.  If possible, we could
++		 * unmap a PUD_SIZE range.  spmd_start/spmd_end will be
++		 * modified if sharing is possible.
++		 */
++		adjust_range_if_pmd_sharing_possible(vma, &spmd_start,
++								&spmd_end);
++		if (spmd_end - spmd_start != vma_mmu_pagesize(vma)) {
++			sh_address = address;
++
++			pmd_sharing_possible = true;
++			mmu_notifier_invalidate_range_start(vma->vm_mm,
++							spmd_start, spmd_end);
++		}
++	}
++
+ 	pte = page_check_address(page, mm, address, &ptl, 0);
+ 	if (!pte)
+ 		goto out;
+@@ -1356,6 +1385,30 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+ 		}
+   	}
+ 
++	/*
++	 * Call huge_pmd_unshare to potentially unshare a huge pmd.  Pass
++	 * sh_address as it will be modified if unsharing is successful.
++	 */
++	if (PageHuge(page) && huge_pmd_unshare(mm, &sh_address, pte)) {
++		/*
++		 * huge_pmd_unshare unmapped an entire PMD page.  There is
++		 * no way of knowing exactly which PMDs may be cached for
++		 * this mm, so flush them all.  spmd_start/spmd_end cover
++		 * this PUD_SIZE range.
++		 */
++		flush_cache_range(vma, spmd_start, spmd_end);
++		flush_tlb_range(vma, spmd_start, spmd_end);
++
++		/*
++		 * The ref count of the PMD page was dropped which is part
++		 * of the way map counting is done for shared PMDs.  When
++		 * there is no other sharing, huge_pmd_unshare returns false
++		 * and we will unmap the actual page and drop map count
++		 * to zero.
++		 */
++		goto out_unmap;
++	}
++
+ 	/* Nuke the page table entry. */
+ 	flush_cache_page(vma, address, page_to_pfn(page));
+ 	if (should_defer_flush(mm, flags)) {
+@@ -1450,6 +1503,9 @@ out_unmap:
+ 	if (ret != SWAP_FAIL && ret != SWAP_MLOCK && !(flags & TTU_MUNLOCK))
+ 		mmu_notifier_invalidate_page(mm, address);
+ out:
++	if (pmd_sharing_possible)
++		mmu_notifier_invalidate_range_end(vma->vm_mm,
++							spmd_start, spmd_end);
+ 	return ret;
+ }
+ 
+diff --git a/net/9p/protocol.c b/net/9p/protocol.c
+index 16d287565987..145f80518064 100644
+--- a/net/9p/protocol.c
++++ b/net/9p/protocol.c
+@@ -46,10 +46,15 @@ p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
+ void p9stat_free(struct p9_wstat *stbuf)
+ {
+ 	kfree(stbuf->name);
++	stbuf->name = NULL;
+ 	kfree(stbuf->uid);
++	stbuf->uid = NULL;
+ 	kfree(stbuf->gid);
++	stbuf->gid = NULL;
+ 	kfree(stbuf->muid);
++	stbuf->muid = NULL;
+ 	kfree(stbuf->extension);
++	stbuf->extension = NULL;
+ }
+ EXPORT_SYMBOL(p9stat_free);
+ 
+diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
+index 5169b9b36b6a..cfaacaa023e6 100644
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -1582,7 +1582,7 @@ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def,
+  *
+  * Description:
+  * Parse the packet's IP header looking for a CIPSO option.  Returns a pointer
+- * to the start of the CIPSO option on success, NULL if one if not found.
++ * to the start of the CIPSO option on success, NULL if one is not found.
+  *
+  */
+ unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
+@@ -1592,10 +1592,8 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
+ 	int optlen;
+ 	int taglen;
+ 
+-	for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) {
++	for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 1; ) {
+ 		switch (optptr[0]) {
+-		case IPOPT_CIPSO:
+-			return optptr;
+ 		case IPOPT_END:
+ 			return NULL;
+ 		case IPOPT_NOOP:
+@@ -1604,6 +1602,11 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
+ 		default:
+ 			taglen = optptr[1];
+ 		}
++		if (!taglen || taglen > optlen)
++			return NULL;
++		if (optptr[0] == IPOPT_CIPSO)
++			return optptr;
++
+ 		optlen -= taglen;
+ 		optptr += taglen;
+ 	}
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index a6cbb2104667..71f15da72f02 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -945,7 +945,7 @@ static void call_xpt_users(struct svc_xprt *xprt)
+ 	spin_lock(&xprt->xpt_lock);
+ 	while (!list_empty(&xprt->xpt_users)) {
+ 		u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list);
+-		list_del(&u->list);
++		list_del_init(&u->list);
+ 		u->callback(u);
+ 	}
+ 	spin_unlock(&xprt->xpt_lock);
+diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
+index 4439ac4c1b53..9b8d855e4a87 100644
+--- a/net/sunrpc/xdr.c
++++ b/net/sunrpc/xdr.c
+@@ -639,11 +639,10 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
+ 		WARN_ON_ONCE(xdr->iov);
+ 		return;
+ 	}
+-	if (fraglen) {
++	if (fraglen)
+ 		xdr->end = head->iov_base + head->iov_len;
+-		xdr->page_ptr--;
+-	}
+ 	/* (otherwise assume xdr->end is already set) */
++	xdr->page_ptr--;
+ 	head->iov_len = len;
+ 	buf->len = len;
+ 	xdr->p = head->iov_base + head->iov_len;
+diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
+index 816d175da79a..30aced99bc55 100644
+--- a/security/integrity/ima/ima_fs.c
++++ b/security/integrity/ima/ima_fs.c
+@@ -26,14 +26,14 @@
+ #include "ima.h"
+ 
+ static int valid_policy = 1;
+-#define TMPBUFLEN 12
++
+ static ssize_t ima_show_htable_value(char __user *buf, size_t count,
+ 				     loff_t *ppos, atomic_long_t *val)
+ {
+-	char tmpbuf[TMPBUFLEN];
++	char tmpbuf[32];	/* greater than largest 'long' string value */
+ 	ssize_t len;
+ 
+-	len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
++	len = scnprintf(tmpbuf, sizeof(tmpbuf), "%li\n", atomic_long_read(val));
+ 	return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
+ }
+ 
+diff --git a/sound/pci/ca0106/ca0106.h b/sound/pci/ca0106/ca0106.h
+index 04402c14cb23..9847b669cf3c 100644
+--- a/sound/pci/ca0106/ca0106.h
++++ b/sound/pci/ca0106/ca0106.h
+@@ -582,7 +582,7 @@
+ #define SPI_PL_BIT_R_R		(2<<7)	/* right channel = right */
+ #define SPI_PL_BIT_R_C		(3<<7)	/* right channel = (L+R)/2 */
+ #define SPI_IZD_REG		2
+-#define SPI_IZD_BIT		(1<<4)	/* infinite zero detect */
++#define SPI_IZD_BIT		(0<<4)	/* infinite zero detect */
+ 
+ #define SPI_FMT_REG		3
+ #define SPI_FMT_BIT_RJ		(0<<0)	/* right justified mode */
+diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
+index b17539537b2e..55ec4470f6b6 100644
+--- a/sound/pci/hda/hda_controller.h
++++ b/sound/pci/hda/hda_controller.h
+@@ -151,6 +151,7 @@ struct azx {
+ 	unsigned int msi:1;
+ 	unsigned int probing:1; /* codec probing phase */
+ 	unsigned int snoop:1;
++	unsigned int uc_buffer:1; /* non-cached pages for stream buffers */
+ 	unsigned int align_buffer_size:1;
+ 	unsigned int region_requested:1;
+ 	unsigned int disabled:1; /* disabled by vga_switcheroo */
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 95a82e428f37..ecb07fb036af 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -401,7 +401,7 @@ static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool
+ #ifdef CONFIG_SND_DMA_SGBUF
+ 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG) {
+ 		struct snd_sg_buf *sgbuf = dmab->private_data;
+-		if (chip->driver_type == AZX_DRIVER_CMEDIA)
++		if (!chip->uc_buffer)
+ 			return; /* deal with only CORB/RIRB buffers */
+ 		if (on)
+ 			set_pages_array_wc(sgbuf->page_table, sgbuf->pages);
+@@ -1538,6 +1538,7 @@ static void azx_check_snoop_available(struct azx *chip)
+ 		dev_info(chip->card->dev, "Force to %s mode by module option\n",
+ 			 snoop ? "snoop" : "non-snoop");
+ 		chip->snoop = snoop;
++		chip->uc_buffer = !snoop;
+ 		return;
+ 	}
+ 
+@@ -1558,8 +1559,12 @@ static void azx_check_snoop_available(struct azx *chip)
+ 		snoop = false;
+ 
+ 	chip->snoop = snoop;
+-	if (!snoop)
++	if (!snoop) {
+ 		dev_info(chip->card->dev, "Force to non-snoop mode\n");
++		/* C-Media requires non-cached pages only for CORB/RIRB */
++		if (chip->driver_type != AZX_DRIVER_CMEDIA)
++			chip->uc_buffer = true;
++	}
+ }
+ 
+ static void azx_probe_work(struct work_struct *work)
+@@ -1958,7 +1963,7 @@ static void pcm_mmap_prepare(struct snd_pcm_substream *substream,
+ #ifdef CONFIG_X86
+ 	struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
+ 	struct azx *chip = apcm->chip;
+-	if (!azx_snoop(chip) && chip->driver_type != AZX_DRIVER_CMEDIA)
++	if (chip->uc_buffer)
+ 		area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
+ #endif
+ }
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index a1a3ce8c3f56..aea3cc2abe3a 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -867,6 +867,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410),
+ 	SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410),
+ 	SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD),
++	SND_PCI_QUIRK(0x17aa, 0x3905, "Lenovo G50-30", CXT_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
+diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
+index d995743cb673..58ce62088a39 100644
+--- a/tools/perf/util/trace-event-info.c
++++ b/tools/perf/util/trace-event-info.c
+@@ -507,12 +507,14 @@ struct tracing_data *tracing_data_get(struct list_head *pattrs,
+ 			 "/tmp/perf-XXXXXX");
+ 		if (!mkstemp(tdata->temp_file)) {
+ 			pr_debug("Can't make temp file");
++			free(tdata);
+ 			return NULL;
+ 		}
+ 
+ 		temp_fd = open(tdata->temp_file, O_RDWR);
+ 		if (temp_fd < 0) {
+ 			pr_debug("Can't read '%s'", tdata->temp_file);
++			free(tdata);
+ 			return NULL;
+ 		}
+ 
+diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
+index b67a0ccf5ab9..23baee7b786a 100644
+--- a/tools/perf/util/trace-event-read.c
++++ b/tools/perf/util/trace-event-read.c
+@@ -334,9 +334,12 @@ static int read_event_files(struct pevent *pevent)
+ 		for (x=0; x < count; x++) {
+ 			size = read8(pevent);
+ 			ret = read_event_file(pevent, sys, size);
+-			if (ret)
++			if (ret) {
++				free(sys);
+ 				return ret;
++			}
+ 		}
++		free(sys);
+ 	}
+ 	return 0;
+ }
+diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc
+new file mode 100644
+index 000000000000..88e6c3f43006
+--- /dev/null
++++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc
+@@ -0,0 +1,80 @@
++#!/bin/sh
++# SPDX-License-Identifier: GPL-2.0
++# description: event trigger - test synthetic_events syntax parser
++
++do_reset() {
++    reset_trigger
++    echo > set_event
++    clear_trace
++}
++
++fail() { #msg
++    do_reset
++    echo $1
++    exit_fail
++}
++
++if [ ! -f set_event ]; then
++    echo "event tracing is not supported"
++    exit_unsupported
++fi
++
++if [ ! -f synthetic_events ]; then
++    echo "synthetic event is not supported"
++    exit_unsupported
++fi
++
++reset_tracer
++do_reset
++
++echo "Test synthetic_events syntax parser"
++
++echo > synthetic_events
++
++# synthetic event must have a field
++! echo "myevent" >> synthetic_events
++echo "myevent u64 var1" >> synthetic_events
++
++# synthetic event must be found in synthetic_events
++grep "myevent[[:space:]]u64 var1" synthetic_events
++
++# it is not possible to add same name event
++! echo "myevent u64 var2" >> synthetic_events
++
++# Non-append open will cleanup all events and add new one
++echo "myevent u64 var2" > synthetic_events
++
++# multiple fields with different spaces
++echo "myevent u64 var1; u64 var2;" > synthetic_events
++grep "myevent[[:space:]]u64 var1; u64 var2" synthetic_events
++echo "myevent u64 var1 ; u64 var2 ;" > synthetic_events
++grep "myevent[[:space:]]u64 var1; u64 var2" synthetic_events
++echo "myevent u64 var1 ;u64 var2" > synthetic_events
++grep "myevent[[:space:]]u64 var1; u64 var2" synthetic_events
++
++# test field types
++echo "myevent u32 var" > synthetic_events
++echo "myevent u16 var" > synthetic_events
++echo "myevent u8 var" > synthetic_events
++echo "myevent s64 var" > synthetic_events
++echo "myevent s32 var" > synthetic_events
++echo "myevent s16 var" > synthetic_events
++echo "myevent s8 var" > synthetic_events
++
++echo "myevent char var" > synthetic_events
++echo "myevent int var" > synthetic_events
++echo "myevent long var" > synthetic_events
++echo "myevent pid_t var" > synthetic_events
++
++echo "myevent unsigned char var" > synthetic_events
++echo "myevent unsigned int var" > synthetic_events
++echo "myevent unsigned long var" > synthetic_events
++grep "myevent[[:space:]]unsigned long var" synthetic_events
++
++# test string type
++echo "myevent char var[10]" > synthetic_events
++grep "myevent[[:space:]]char\[10\] var" synthetic_events
++
++do_reset
++
++exit 0


             reply	other threads:[~2018-11-21 12:18 UTC|newest]

Thread overview: 355+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-11-21 12:18 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2022-02-03 11:46 [gentoo-commits] proj/linux-patches:4.4 commit in: / Mike Pagano
2022-01-29 17:47 Mike Pagano
2022-01-27 11:42 Mike Pagano
2022-01-11 12:57 Mike Pagano
2022-01-05 12:57 Mike Pagano
2021-12-29 13:13 Mike Pagano
2021-12-22 14:09 Mike Pagano
2021-12-14 10:38 Mike Pagano
2021-12-08 12:58 Mike Pagano
2021-11-26 12:02 Mike Pagano
2021-11-12 13:39 Mike Pagano
2021-11-02 17:07 Mike Pagano
2021-10-27 12:01 Mike Pagano
2021-10-17 13:15 Mike Pagano
2021-10-09 21:36 Mike Pagano
2021-10-07 10:37 Mike Pagano
2021-10-06 11:33 Mike Pagano
2021-09-26 14:16 Mike Pagano
2021-09-22 11:43 Mike Pagano
2021-09-20 22:07 Mike Pagano
2021-09-03 11:26 Mike Pagano
2021-08-26 14:02 Mike Pagano
2021-08-25 23:20 Mike Pagano
2021-08-15 20:12 Mike Pagano
2021-08-10 16:22 Mike Pagano
2021-08-08 13:47 Mike Pagano
2021-08-04 11:56 Mike Pagano
2021-08-03 12:51 Mike Pagano
2021-07-28 12:39 Mike Pagano
2021-07-20 15:17 Alice Ferrazzi
2021-07-11 14:48 Mike Pagano
2021-06-30 14:29 Mike Pagano
2021-06-17 11:05 Alice Ferrazzi
2021-06-10 11:09 Mike Pagano
2021-06-03 10:43 Alice Ferrazzi
2021-05-26 11:59 Mike Pagano
2021-05-22 10:00 Mike Pagano
2021-04-28 11:08 Alice Ferrazzi
2021-04-16 11:20 Alice Ferrazzi
2021-04-10 13:21 Mike Pagano
2021-04-07 12:10 Mike Pagano
2021-03-30 14:13 Mike Pagano
2021-03-24 12:06 Mike Pagano
2021-03-17 15:39 Mike Pagano
2021-03-11 13:34 Mike Pagano
2021-03-07 15:12 Mike Pagano
2021-03-03 16:34 Alice Ferrazzi
2021-02-23 13:46 Mike Pagano
2021-02-10 10:17 Alice Ferrazzi
2021-02-05 14:57 Alice Ferrazzi
2021-02-03 23:23 Mike Pagano
2021-01-30 13:11 Alice Ferrazzi
2021-01-23 16:33 Mike Pagano
2021-01-17 16:23 Mike Pagano
2021-01-12 20:08 Mike Pagano
2021-01-09 12:53 Mike Pagano
2020-12-29 14:16 Mike Pagano
2020-12-11 12:54 Mike Pagano
2020-12-02 12:17 Mike Pagano
2020-11-24 13:29 Mike Pagano
2020-11-22 19:08 Mike Pagano
2020-11-18 19:21 Mike Pagano
2020-11-11 15:27 Mike Pagano
2020-11-10 13:53 Mike Pagano
2020-10-29 11:14 Mike Pagano
2020-10-17 10:13 Mike Pagano
2020-10-14 20:30 Mike Pagano
2020-10-01 11:41 Mike Pagano
2020-10-01 11:24 Mike Pagano
2020-09-24 16:04 Mike Pagano
2020-09-23 11:51 Mike Pagano
2020-09-23 11:50 Mike Pagano
2020-09-12 17:08 Mike Pagano
2020-09-03 11:32 Mike Pagano
2020-08-26 11:12 Mike Pagano
2020-08-21 11:11 Alice Ferrazzi
2020-07-31 16:10 Mike Pagano
2020-07-22 12:24 Mike Pagano
2020-07-09 12:05 Mike Pagano
2020-07-01 12:09 Mike Pagano
2020-06-22 14:43 Mike Pagano
2020-06-11 11:25 Mike Pagano
2020-06-03 11:35 Mike Pagano
2020-05-27 15:26 Mike Pagano
2020-05-20 11:20 Mike Pagano
2020-05-13 13:01 Mike Pagano
2020-05-11 22:52 Mike Pagano
2020-05-05 17:37 Mike Pagano
2020-05-02 19:20 Mike Pagano
2020-04-24 11:59 Mike Pagano
2020-04-15 18:24 Mike Pagano
2020-04-13 11:14 Mike Pagano
2020-04-02 18:55 Mike Pagano
2020-03-20 11:53 Mike Pagano
2020-03-20 11:51 Mike Pagano
2020-03-20 11:49 Mike Pagano
2020-03-11 10:14 Mike Pagano
2020-02-28 15:24 Mike Pagano
2020-02-14 23:34 Mike Pagano
2020-02-05 14:47 Mike Pagano
2020-01-29 12:36 Mike Pagano
2020-01-23 11:00 Mike Pagano
2020-01-14 22:24 Mike Pagano
2020-01-12 14:48 Mike Pagano
2020-01-04 16:46 Mike Pagano
2019-12-21 14:51 Mike Pagano
2019-12-05 14:47 Alice Ferrazzi
2019-11-29 21:41 Thomas Deutschmann
2019-11-28 23:49 Mike Pagano
2019-11-25 16:25 Mike Pagano
2019-11-16 10:54 Mike Pagano
2019-11-12 20:57 Mike Pagano
2019-11-10 16:13 Mike Pagano
2019-11-06 14:22 Mike Pagano
2019-10-29 10:08 Mike Pagano
2019-10-17 22:18 Mike Pagano
2019-10-07 21:03 Mike Pagano
2019-10-05 20:43 Mike Pagano
2019-09-21 15:56 Mike Pagano
2019-09-20 15:50 Mike Pagano
2019-09-16 12:21 Mike Pagano
2019-09-10 11:10 Mike Pagano
2019-09-06 17:17 Mike Pagano
2019-08-25 17:33 Mike Pagano
2019-08-11 10:58 Mike Pagano
2019-08-06 19:14 Mike Pagano
2019-08-04 16:03 Mike Pagano
2019-07-21 14:36 Mike Pagano
2019-07-10 11:01 Mike Pagano
2019-06-27 11:11 Mike Pagano
2019-06-22 19:01 Mike Pagano
2019-06-17 19:18 Mike Pagano
2019-06-11 17:30 Mike Pagano
2019-06-11 12:38 Mike Pagano
2019-05-16 23:01 Mike Pagano
2019-04-27 17:28 Mike Pagano
2019-04-03 10:49 Mike Pagano
2019-04-03 10:49 Mike Pagano
2019-03-23 14:17 Mike Pagano
2019-02-23 14:40 Mike Pagano
2019-02-20 11:14 Mike Pagano
2019-02-15 23:38 Mike Pagano
2019-02-15 23:35 Mike Pagano
2019-02-08 15:21 Mike Pagano
2019-02-06 20:51 Mike Pagano
2019-02-06  0:05 Mike Pagano
2019-01-26 14:59 Mike Pagano
2019-01-16 23:27 Mike Pagano
2019-01-13 19:46 Mike Pagano
2019-01-13 19:24 Mike Pagano
2018-12-29 22:56 Mike Pagano
2018-12-21 14:40 Mike Pagano
2018-12-17 21:56 Mike Pagano
2018-12-13 11:35 Mike Pagano
2018-12-01 18:35 Mike Pagano
2018-12-01 15:02 Mike Pagano
2018-11-27 16:59 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-10 21:27 Mike Pagano
2018-10-20 12:33 Mike Pagano
2018-10-13 16:35 Mike Pagano
2018-10-10 11:20 Mike Pagano
2018-09-29 13:32 Mike Pagano
2018-09-26 10:44 Mike Pagano
2018-09-19 22:37 Mike Pagano
2018-09-15 10:09 Mike Pagano
2018-09-09 23:26 Mike Pagano
2018-09-05 15:21 Mike Pagano
2018-08-28 22:32 Mike Pagano
2018-08-24 11:41 Mike Pagano
2018-08-22 10:08 Alice Ferrazzi
2018-08-18 18:06 Mike Pagano
2018-08-17 19:24 Mike Pagano
2018-08-15 16:44 Mike Pagano
2018-08-09 10:49 Mike Pagano
2018-08-07 18:14 Mike Pagano
2018-07-28 10:37 Mike Pagano
2018-07-22 15:15 Mike Pagano
2018-07-19 15:27 Mike Pagano
2018-07-17 10:24 Mike Pagano
2018-07-12 16:21 Alice Ferrazzi
2018-07-04 14:26 Mike Pagano
2018-06-16 15:41 Mike Pagano
2018-06-13 14:54 Mike Pagano
2018-06-06 18:00 Mike Pagano
2018-05-30 22:35 Mike Pagano
2018-05-30 11:38 Mike Pagano
2018-05-26 13:43 Mike Pagano
2018-05-16 10:22 Mike Pagano
2018-05-02 16:11 Mike Pagano
2018-04-29 11:48 Mike Pagano
2018-04-24 11:28 Mike Pagano
2018-04-13 22:20 Mike Pagano
2018-04-08 14:25 Mike Pagano
2018-03-31 23:00 Mike Pagano
2018-03-31 22:16 Mike Pagano
2018-03-25 13:42 Mike Pagano
2018-03-22 12:54 Mike Pagano
2018-03-11 18:25 Mike Pagano
2018-03-05  2:52 Alice Ferrazzi
2018-02-28 15:05 Alice Ferrazzi
2018-02-25 15:46 Mike Pagano
2018-02-22 23:20 Mike Pagano
2018-02-17 15:10 Alice Ferrazzi
2018-02-03 21:23 Mike Pagano
2018-01-31 13:36 Alice Ferrazzi
2018-01-23 21:15 Mike Pagano
2018-01-17 10:20 Alice Ferrazzi
2018-01-17  9:18 Alice Ferrazzi
2018-01-15 15:01 Alice Ferrazzi
2018-01-10 11:56 Mike Pagano
2018-01-10 11:48 Mike Pagano
2018-01-05 15:59 Alice Ferrazzi
2018-01-05 15:05 Alice Ferrazzi
2018-01-02 20:12 Mike Pagano
2017-12-25 14:41 Alice Ferrazzi
2017-12-20 12:45 Mike Pagano
2017-12-16 11:46 Alice Ferrazzi
2017-12-09 18:50 Alice Ferrazzi
2017-12-05 11:39 Mike Pagano
2017-11-30 12:25 Alice Ferrazzi
2017-11-24 10:49 Alice Ferrazzi
2017-11-24  9:46 Alice Ferrazzi
2017-11-21  8:40 Alice Ferrazzi
2017-11-18 18:12 Mike Pagano
2017-11-15 16:44 Alice Ferrazzi
2017-11-08 13:50 Mike Pagano
2017-11-02 10:02 Mike Pagano
2017-10-27 10:33 Mike Pagano
2017-10-21 20:13 Mike Pagano
2017-10-18 13:44 Mike Pagano
2017-10-12 12:22 Mike Pagano
2017-10-08 14:25 Mike Pagano
2017-10-05 11:39 Mike Pagano
2017-09-27 10:38 Mike Pagano
2017-09-14 13:37 Mike Pagano
2017-09-13 22:26 Mike Pagano
2017-09-13 14:33 Mike Pagano
2017-09-07 22:42 Mike Pagano
2017-09-02 17:14 Mike Pagano
2017-08-30 10:08 Mike Pagano
2017-08-25 10:53 Mike Pagano
2017-08-16 22:30 Mike Pagano
2017-08-13 16:52 Mike Pagano
2017-08-11 17:44 Mike Pagano
2017-08-07 10:25 Mike Pagano
2017-05-14 13:32 Mike Pagano
2017-05-08 10:40 Mike Pagano
2017-05-03 17:41 Mike Pagano
2017-04-30 18:08 Mike Pagano
2017-04-30 17:59 Mike Pagano
2017-04-27  8:18 Alice Ferrazzi
2017-04-22 17:00 Mike Pagano
2017-04-18 10:21 Mike Pagano
2017-04-12 17:59 Mike Pagano
2017-04-08 13:56 Mike Pagano
2017-03-31 10:43 Mike Pagano
2017-03-30 18:16 Mike Pagano
2017-03-26 11:53 Mike Pagano
2017-03-22 12:28 Mike Pagano
2017-03-18 14:32 Mike Pagano
2017-03-15 14:39 Mike Pagano
2017-03-12 12:17 Mike Pagano
2017-03-02 16:29 Mike Pagano
2017-03-02 16:29 Mike Pagano
2017-02-26 20:45 Mike Pagano
2017-02-24  0:38 Mike Pagano
2017-02-23 20:12 Mike Pagano
2017-02-18 16:27 Alice Ferrazzi
2017-02-15 16:22 Alice Ferrazzi
2017-02-09  8:05 Alice Ferrazzi
2017-02-04 13:47 Alice Ferrazzi
2017-02-01 12:59 Alice Ferrazzi
2017-01-26  8:24 Alice Ferrazzi
2017-01-20 12:45 Alice Ferrazzi
2017-01-15 22:57 Mike Pagano
2017-01-14 14:46 Mike Pagano
2017-01-12 12:11 Mike Pagano
2017-01-09 12:46 Mike Pagano
2017-01-06 23:13 Mike Pagano
2016-12-15 23:41 Mike Pagano
2016-12-11 15:02 Alice Ferrazzi
2016-12-09 13:57 Alice Ferrazzi
2016-12-08  0:03 Mike Pagano
2016-12-02 16:21 Mike Pagano
2016-11-26 18:51 Mike Pagano
2016-11-26 18:40 Mike Pagano
2016-11-22  0:14 Mike Pagano
2016-11-19 11:03 Mike Pagano
2016-11-15 10:05 Alice Ferrazzi
2016-11-10 18:13 Alice Ferrazzi
2016-11-01  3:14 Alice Ferrazzi
2016-10-31 14:09 Alice Ferrazzi
2016-10-28 18:27 Alice Ferrazzi
2016-10-22 13:05 Mike Pagano
2016-10-21 11:10 Mike Pagano
2016-10-16 19:25 Mike Pagano
2016-10-08 19:55 Mike Pagano
2016-09-30 19:07 Mike Pagano
2016-09-24 10:51 Mike Pagano
2016-09-16 19:10 Mike Pagano
2016-09-15 13:58 Mike Pagano
2016-09-09 19:20 Mike Pagano
2016-08-20 16:31 Mike Pagano
2016-08-17 11:48 Mike Pagano
2016-08-10 12:56 Mike Pagano
2016-07-27 19:19 Mike Pagano
2016-07-11 19:59 Mike Pagano
2016-07-02 15:30 Mike Pagano
2016-07-01  0:55 Mike Pagano
2016-06-24 20:40 Mike Pagano
2016-06-08 13:38 Mike Pagano
2016-06-02 18:24 Mike Pagano
2016-05-19 13:00 Mike Pagano
2016-05-12  0:14 Mike Pagano
2016-05-04 23:51 Mike Pagano
2016-04-20 11:27 Mike Pagano
2016-04-12 18:59 Mike Pagano
2016-03-22 22:47 Mike Pagano
2016-03-16 19:43 Mike Pagano
2016-03-10  0:51 Mike Pagano
2016-03-04 11:15 Mike Pagano
2016-02-26  0:02 Mike Pagano
2016-02-19 23:33 Mike Pagano
2016-02-18  0:20 Mike Pagano
2016-02-01  0:19 Mike Pagano
2016-02-01  0:13 Mike Pagano
2016-01-31 23:33 Mike Pagano
2016-01-20 12:38 Mike Pagano
2016-01-10 17:19 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1542802669.1cc2d593aa5e4aa0a4a17970b4866a24eb4284b5.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox