public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Fri,  4 Mar 2016 11:15:39 +0000 (UTC)	[thread overview]
Message-ID: <1457090131.47a90382973671498d5d2d5e308bf5985467506d.mpagano@gentoo> (raw)

commit:     47a90382973671498d5d2d5e308bf5985467506d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Mar  4 11:15:31 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Mar  4 11:15:31 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=47a90382

Linux patch 4.4.4

 0000_README            |     4 +
 1003_linux-4.4.4.patch | 13326 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 13330 insertions(+)

diff --git a/0000_README b/0000_README
index 91631f2..08bdc40 100644
--- a/0000_README
+++ b/0000_README
@@ -55,6 +55,10 @@ Patch:  1002_linux-4.4.3.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.4.3
 
+Patch:  1003_linux-4.4.4.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.4.4
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1003_linux-4.4.4.patch b/1003_linux-4.4.4.patch
new file mode 100644
index 0000000..62b4415
--- /dev/null
+++ b/1003_linux-4.4.4.patch
@@ -0,0 +1,13326 @@
+diff --git a/Documentation/filesystems/efivarfs.txt b/Documentation/filesystems/efivarfs.txt
+index c477af086e65..686a64bba775 100644
+--- a/Documentation/filesystems/efivarfs.txt
++++ b/Documentation/filesystems/efivarfs.txt
+@@ -14,3 +14,10 @@ filesystem.
+ efivarfs is typically mounted like this,
+ 
+ 	mount -t efivarfs none /sys/firmware/efi/efivars
++
++Due to the presence of numerous firmware bugs where removing non-standard
++UEFI variables causes the system firmware to fail to POST, efivarfs
++files that are not well-known standardized variables are created
++as immutable files.  This doesn't prevent removal - "chattr -i" will work -
++but it does prevent this kind of failure from being accomplished
++accidentally.
+diff --git a/Makefile b/Makefile
+index 802be10c40c5..344bc6f27ea1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
+index 258b0e5ad332..68b6092349d7 100644
+--- a/arch/arc/include/asm/irqflags-arcv2.h
++++ b/arch/arc/include/asm/irqflags-arcv2.h
+@@ -22,6 +22,7 @@
+ #define AUX_IRQ_CTRL		0x00E
+ #define AUX_IRQ_ACT		0x043	/* Active Intr across all levels */
+ #define AUX_IRQ_LVL_PEND	0x200	/* Pending Intr across all levels */
++#define AUX_IRQ_HINT		0x201	/* For generating Soft Interrupts */
+ #define AUX_IRQ_PRIORITY	0x206
+ #define ICAUSE			0x40a
+ #define AUX_IRQ_SELECT		0x40b
+@@ -112,6 +113,16 @@ static inline int arch_irqs_disabled(void)
+ 	return arch_irqs_disabled_flags(arch_local_save_flags());
+ }
+ 
++static inline void arc_softirq_trigger(int irq)
++{
++	write_aux_reg(AUX_IRQ_HINT, irq);
++}
++
++static inline void arc_softirq_clear(int irq)
++{
++	write_aux_reg(AUX_IRQ_HINT, 0);
++}
++
+ #else
+ 
+ .macro IRQ_DISABLE  scratch
+diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
+index cbfec79137bf..c1264607bbff 100644
+--- a/arch/arc/kernel/entry-arcv2.S
++++ b/arch/arc/kernel/entry-arcv2.S
+@@ -45,11 +45,12 @@ VECTOR	reserved		; Reserved slots
+ VECTOR	handle_interrupt	; (16) Timer0
+ VECTOR	handle_interrupt	; unused (Timer1)
+ VECTOR	handle_interrupt	; unused (WDT)
+-VECTOR	handle_interrupt	; (19) ICI (inter core interrupt)
+-VECTOR	handle_interrupt
+-VECTOR	handle_interrupt
+-VECTOR	handle_interrupt
+-VECTOR	handle_interrupt	; (23) End of fixed IRQs
++VECTOR	handle_interrupt	; (19) Inter core Interrupt (IPI)
++VECTOR	handle_interrupt	; (20) perf Interrupt
++VECTOR	handle_interrupt	; (21) Software Triggered Intr (Self IPI)
++VECTOR	handle_interrupt	; unused
++VECTOR	handle_interrupt	; (23) unused
++# End of fixed IRQs
+ 
+ .rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8
+ 	VECTOR	handle_interrupt
+@@ -211,7 +212,11 @@ debug_marker_syscall:
+ ; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig
+ ; entry was via Exception in DS which got preempted in kernel).
+ ;
+-; IRQ RTIE won't reliably restore DE bit and/or BTA, needs handling
++; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround
++;
++; Solution is return from Intr w/o any delay slot quirks into a kernel trampoline
++; and from pure kernel mode return to delay slot which handles DS bit/BTA correctly
++
+ .Lintr_ret_to_delay_slot:
+ debug_marker_ds:
+ 
+@@ -222,18 +227,23 @@ debug_marker_ds:
+ 	ld	r2, [sp, PT_ret]
+ 	ld	r3, [sp, PT_status32]
+ 
++	; STAT32 for Int return created from scratch
++	; (No delay dlot, disable Further intr in trampoline)
++
+ 	bic  	r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK
+ 	st	r0, [sp, PT_status32]
+ 
+ 	mov	r1, .Lintr_ret_to_delay_slot_2
+ 	st	r1, [sp, PT_ret]
+ 
++	; Orig exception PC/STAT32 safekept @orig_r0 and @event stack slots
+ 	st	r2, [sp, 0]
+ 	st	r3, [sp, 4]
+ 
+ 	b	.Lisr_ret_fast_path
+ 
+ .Lintr_ret_to_delay_slot_2:
++	; Trampoline to restore orig exception PC/STAT32/BTA/AUX_USER_SP
+ 	sub	sp, sp, SZ_PT_REGS
+ 	st	r9, [sp, -4]
+ 
+@@ -243,11 +253,19 @@ debug_marker_ds:
+ 	ld	r9, [sp, 4]
+ 	sr	r9, [erstatus]
+ 
++	; restore AUX_USER_SP if returning to U mode
++	bbit0	r9, STATUS_U_BIT, 1f
++	ld	r9, [sp, PT_sp]
++	sr	r9, [AUX_USER_SP]
++
++1:
+ 	ld	r9, [sp, 8]
+ 	sr	r9, [erbta]
+ 
+ 	ld	r9, [sp, -4]
+ 	add	sp, sp, SZ_PT_REGS
++
++	; return from pure kernel mode to delay slot
+ 	rtie
+ 
+ END(ret_from_exception)
+diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
+index bd237acdf4f2..30d806ce0c78 100644
+--- a/arch/arc/kernel/mcip.c
++++ b/arch/arc/kernel/mcip.c
+@@ -11,9 +11,12 @@
+ #include <linux/smp.h>
+ #include <linux/irq.h>
+ #include <linux/spinlock.h>
++#include <asm/irqflags-arcv2.h>
+ #include <asm/mcip.h>
+ #include <asm/setup.h>
+ 
++#define SOFTIRQ_IRQ	21
++
+ static char smp_cpuinfo_buf[128];
+ static int idu_detected;
+ 
+@@ -22,6 +25,7 @@ static DEFINE_RAW_SPINLOCK(mcip_lock);
+ static void mcip_setup_per_cpu(int cpu)
+ {
+ 	smp_ipi_irq_setup(cpu, IPI_IRQ);
++	smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
+ }
+ 
+ static void mcip_ipi_send(int cpu)
+@@ -29,6 +33,12 @@ static void mcip_ipi_send(int cpu)
+ 	unsigned long flags;
+ 	int ipi_was_pending;
+ 
++	/* ARConnect can only send IPI to others */
++	if (unlikely(cpu == raw_smp_processor_id())) {
++		arc_softirq_trigger(SOFTIRQ_IRQ);
++		return;
++	}
++
+ 	/*
+ 	 * NOTE: We must spin here if the other cpu hasn't yet
+ 	 * serviced a previous message. This can burn lots
+@@ -63,6 +73,11 @@ static void mcip_ipi_clear(int irq)
+ 	unsigned long flags;
+ 	unsigned int __maybe_unused copy;
+ 
++	if (unlikely(irq == SOFTIRQ_IRQ)) {
++		arc_softirq_clear(irq);
++		return;
++	}
++
+ 	raw_spin_lock_irqsave(&mcip_lock, flags);
+ 
+ 	/* Who sent the IPI */
+diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
+index 259c0ca9c99a..ddbb361267d8 100644
+--- a/arch/arm/Kconfig.debug
++++ b/arch/arm/Kconfig.debug
+@@ -162,10 +162,9 @@ choice
+ 		  mobile SoCs in the Kona family of chips (e.g. bcm28155,
+ 		  bcm11351, etc...)
+ 
+-	config DEBUG_BCM63XX
++	config DEBUG_BCM63XX_UART
+ 		bool "Kernel low-level debugging on BCM63XX UART"
+ 		depends on ARCH_BCM_63XX
+-		select DEBUG_UART_BCM63XX
+ 
+ 	config DEBUG_BERLIN_UART
+ 		bool "Marvell Berlin SoC Debug UART"
+@@ -1348,7 +1347,7 @@ config DEBUG_LL_INCLUDE
+ 	default "debug/vf.S" if DEBUG_VF_UART
+ 	default "debug/vt8500.S" if DEBUG_VT8500_UART0
+ 	default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1
+-	default "debug/bcm63xx.S" if DEBUG_UART_BCM63XX
++	default "debug/bcm63xx.S" if DEBUG_BCM63XX_UART
+ 	default "debug/digicolor.S" if DEBUG_DIGICOLOR_UA0
+ 	default "mach/debug-macro.S"
+ 
+@@ -1364,10 +1363,6 @@ config DEBUG_UART_8250
+ 		ARCH_IOP33X || ARCH_IXP4XX || \
+ 		ARCH_LPC32XX || ARCH_MV78XX0 || ARCH_ORION5X || ARCH_RPC
+ 
+-# Compatibility options for BCM63xx
+-config DEBUG_UART_BCM63XX
+-	def_bool ARCH_BCM_63XX
+-
+ config DEBUG_UART_PHYS
+ 	hex "Physical base address of debug UART"
+ 	default 0x00100a00 if DEBUG_NETX_UART
+@@ -1462,7 +1457,7 @@ config DEBUG_UART_PHYS
+ 	default 0xfffb0000 if DEBUG_OMAP1UART1 || DEBUG_OMAP7XXUART1
+ 	default 0xfffb0800 if DEBUG_OMAP1UART2 || DEBUG_OMAP7XXUART2
+ 	default 0xfffb9800 if DEBUG_OMAP1UART3 || DEBUG_OMAP7XXUART3
+-	default 0xfffe8600 if DEBUG_UART_BCM63XX
++	default 0xfffe8600 if DEBUG_BCM63XX_UART
+ 	default 0xfffff700 if ARCH_IOP33X
+ 	depends on ARCH_EP93XX || \
+ 	        DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
+@@ -1474,7 +1469,7 @@ config DEBUG_UART_PHYS
+ 		DEBUG_RCAR_GEN2_SCIF0 || DEBUG_RCAR_GEN2_SCIF2 || \
+ 		DEBUG_RMOBILE_SCIFA0 || DEBUG_RMOBILE_SCIFA1 || \
+ 		DEBUG_RMOBILE_SCIFA4 || DEBUG_S3C24XX_UART || \
+-		DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \
++		DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
+ 		DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0 || \
+ 		DEBUG_AT91_UART
+ 
+@@ -1515,7 +1510,7 @@ config DEBUG_UART_VIRT
+ 	default 0xfb10c000 if DEBUG_REALVIEW_PB1176_PORT
+ 	default 0xfc40ab00 if DEBUG_BRCMSTB_UART
+ 	default 0xfc705000 if DEBUG_ZTE_ZX
+-	default 0xfcfe8600 if DEBUG_UART_BCM63XX
++	default 0xfcfe8600 if DEBUG_BCM63XX_UART
+ 	default 0xfd000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX
+ 	default 0xfd000000 if ARCH_SPEAR13XX
+ 	default 0xfd012000 if ARCH_MV78XX0
+@@ -1566,7 +1561,7 @@ config DEBUG_UART_VIRT
+ 		DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \
+ 		DEBUG_NETX_UART || \
+ 		DEBUG_QCOM_UARTDM || DEBUG_S3C24XX_UART || \
+-		DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \
++		DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
+ 		DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0
+ 
+ config DEBUG_UART_8250_SHIFT
+diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h
+index 1afe24629d1f..b0c912feaa2f 100644
+--- a/arch/arm/boot/dts/sama5d2-pinfunc.h
++++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
+@@ -90,7 +90,7 @@
+ #define PIN_PA14__I2SC1_MCK		PINMUX_PIN(PIN_PA14, 4, 2)
+ #define PIN_PA14__FLEXCOM3_IO2		PINMUX_PIN(PIN_PA14, 5, 1)
+ #define PIN_PA14__D9			PINMUX_PIN(PIN_PA14, 6, 2)
+-#define PIN_PA15			14
++#define PIN_PA15			15
+ #define PIN_PA15__GPIO			PINMUX_PIN(PIN_PA15, 0, 0)
+ #define PIN_PA15__SPI0_MOSI		PINMUX_PIN(PIN_PA15, 1, 1)
+ #define PIN_PA15__TF1			PINMUX_PIN(PIN_PA15, 2, 1)
+diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
+index 68ee3ce17b82..b4c6d99364f1 100644
+--- a/arch/arm/include/asm/psci.h
++++ b/arch/arm/include/asm/psci.h
+@@ -16,7 +16,7 @@
+ 
+ extern struct smp_operations psci_smp_ops;
+ 
+-#ifdef CONFIG_ARM_PSCI
++#if defined(CONFIG_SMP) && defined(CONFIG_ARM_PSCI)
+ bool psci_smp_available(void);
+ #else
+ static inline bool psci_smp_available(void) { return false; }
+diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
+index 0375c8caa061..9408a994cc91 100644
+--- a/arch/arm/include/asm/xen/page-coherent.h
++++ b/arch/arm/include/asm/xen/page-coherent.h
+@@ -35,14 +35,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+ 	     dma_addr_t dev_addr, unsigned long offset, size_t size,
+ 	     enum dma_data_direction dir, struct dma_attrs *attrs)
+ {
+-	bool local = XEN_PFN_DOWN(dev_addr) == page_to_xen_pfn(page);
++	unsigned long page_pfn = page_to_xen_pfn(page);
++	unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
++	unsigned long compound_pages =
++		(1<<compound_order(page)) * XEN_PFN_PER_PAGE;
++	bool local = (page_pfn <= dev_pfn) &&
++		(dev_pfn - page_pfn < compound_pages);
++
+ 	/*
+-	 * Dom0 is mapped 1:1, while the Linux page can be spanned accross
+-	 * multiple Xen page, it's not possible to have a mix of local and
+-	 * foreign Xen page. So if the first xen_pfn == mfn the page is local
+-	 * otherwise it's a foreign page grant-mapped in dom0. If the page is
+-	 * local we can safely call the native dma_ops function, otherwise we
+-	 * call the xen specific function.
++	 * Dom0 is mapped 1:1, while the Linux page can span across
++	 * multiple Xen pages, it's not possible for it to contain a
++	 * mix of local and foreign Xen pages. So if the first xen_pfn
++	 * == mfn the page is local otherwise it's a foreign page
++	 * grant-mapped in dom0. If the page is local we can safely
++	 * call the native dma_ops function, otherwise we call the xen
++	 * specific function.
+ 	 */
+ 	if (local)
+ 		__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
+index 7b76ce01c21d..8633c703546a 100644
+--- a/arch/arm/mach-omap2/gpmc-onenand.c
++++ b/arch/arm/mach-omap2/gpmc-onenand.c
+@@ -101,10 +101,8 @@ static void omap2_onenand_set_async_mode(void __iomem *onenand_base)
+ 
+ static void set_onenand_cfg(void __iomem *onenand_base)
+ {
+-	u32 reg;
++	u32 reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
+ 
+-	reg = readw(onenand_base + ONENAND_REG_SYS_CFG1);
+-	reg &= ~((0x7 << ONENAND_SYS_CFG1_BRL_SHIFT) | (0x7 << 9));
+ 	reg |=	(latency << ONENAND_SYS_CFG1_BRL_SHIFT) |
+ 		ONENAND_SYS_CFG1_BL_16;
+ 	if (onenand_flags & ONENAND_FLAG_SYNCREAD)
+@@ -123,6 +121,7 @@ static void set_onenand_cfg(void __iomem *onenand_base)
+ 		reg |= ONENAND_SYS_CFG1_VHF;
+ 	else
+ 		reg &= ~ONENAND_SYS_CFG1_VHF;
++
+ 	writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
+ }
+ 
+@@ -289,6 +288,7 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base)
+ 		}
+ 	}
+ 
++	onenand_async.sync_write = true;
+ 	omap2_onenand_calc_async_timings(&t);
+ 
+ 	ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_async);
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index cd822d8454c0..b6c90e5006e4 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -27,6 +27,7 @@ $(warning LSE atomics not supported by binutils)
+ endif
+ 
+ KBUILD_CFLAGS	+= -mgeneral-regs-only $(lseinstr)
++KBUILD_CFLAGS	+= $(call cc-option, -mpc-relative-literal-loads)
+ KBUILD_AFLAGS	+= $(lseinstr)
+ 
+ ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
+diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
+index 2046c0230224..21ed7150fec3 100644
+--- a/arch/mips/include/asm/page.h
++++ b/arch/mips/include/asm/page.h
+@@ -33,7 +33,7 @@
+ #define PAGE_SHIFT	16
+ #endif
+ #define PAGE_SIZE	(_AC(1,UL) << PAGE_SHIFT)
+-#define PAGE_MASK	(~(PAGE_SIZE - 1))
++#define PAGE_MASK	(~((1 << PAGE_SHIFT) - 1))
+ 
+ /*
+  * This is used for calculating the real page sizes
+diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
+index 8957f15e21ec..18826aa15a7c 100644
+--- a/arch/mips/include/asm/pgtable.h
++++ b/arch/mips/include/asm/pgtable.h
+@@ -353,7 +353,7 @@ static inline pte_t pte_mkdirty(pte_t pte)
+ static inline pte_t pte_mkyoung(pte_t pte)
+ {
+ 	pte_val(pte) |= _PAGE_ACCESSED;
+-#ifdef CONFIG_CPU_MIPSR2
++#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+ 	if (!(pte_val(pte) & _PAGE_NO_READ))
+ 		pte_val(pte) |= _PAGE_SILENT_READ;
+ 	else
+@@ -560,7 +560,7 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
+ {
+ 	pmd_val(pmd) |= _PAGE_ACCESSED;
+ 
+-#ifdef CONFIG_CPU_MIPSR2
++#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+ 	if (!(pmd_val(pmd) & _PAGE_NO_READ))
+ 		pmd_val(pmd) |= _PAGE_SILENT_READ;
+ 	else
+diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
+index 6499d93ae68d..47bc45a67e9b 100644
+--- a/arch/mips/include/asm/syscall.h
++++ b/arch/mips/include/asm/syscall.h
+@@ -101,10 +101,8 @@ static inline void syscall_get_arguments(struct task_struct *task,
+ 	/* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
+ 	if ((config_enabled(CONFIG_32BIT) ||
+ 	    test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
+-	    (regs->regs[2] == __NR_syscall)) {
++	    (regs->regs[2] == __NR_syscall))
+ 		i++;
+-		n++;
+-	}
+ 
+ 	while (n--)
+ 		ret |= mips_get_syscall_arg(args++, task, regs, i++);
+diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c
+index bf9f1a77f0e5..a2631a52ca99 100644
+--- a/arch/mips/loongson64/loongson-3/hpet.c
++++ b/arch/mips/loongson64/loongson-3/hpet.c
+@@ -13,6 +13,9 @@
+ #define SMBUS_PCI_REG64		0x64
+ #define SMBUS_PCI_REGB4		0xb4
+ 
++#define HPET_MIN_CYCLES		64
++#define HPET_MIN_PROG_DELTA	(HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
++
+ static DEFINE_SPINLOCK(hpet_lock);
+ DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device);
+ 
+@@ -161,8 +164,9 @@ static int hpet_next_event(unsigned long delta,
+ 	cnt += delta;
+ 	hpet_write(HPET_T0_CMP, cnt);
+ 
+-	res = ((int)(hpet_read(HPET_COUNTER) - cnt) > 0) ? -ETIME : 0;
+-	return res;
++	res = (int)(cnt - hpet_read(HPET_COUNTER));
++
++	return res < HPET_MIN_CYCLES ? -ETIME : 0;
+ }
+ 
+ static irqreturn_t hpet_irq_handler(int irq, void *data)
+@@ -237,7 +241,7 @@ void __init setup_hpet_timer(void)
+ 	cd->cpumask = cpumask_of(cpu);
+ 	clockevent_set_clock(cd, HPET_FREQ);
+ 	cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+-	cd->min_delta_ns = 5000;
++	cd->min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA, cd);
+ 
+ 	clockevents_register_device(cd);
+ 	setup_irq(HPET_T0_IRQ, &hpet_irq);
+diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
+index 1a4738a8f2d3..509832a9836c 100644
+--- a/arch/mips/loongson64/loongson-3/smp.c
++++ b/arch/mips/loongson64/loongson-3/smp.c
+@@ -30,13 +30,13 @@
+ #include "smp.h"
+ 
+ DEFINE_PER_CPU(int, cpu_state);
+-DEFINE_PER_CPU(uint32_t, core0_c0count);
+ 
+ static void *ipi_set0_regs[16];
+ static void *ipi_clear0_regs[16];
+ static void *ipi_status0_regs[16];
+ static void *ipi_en0_regs[16];
+ static void *ipi_mailbox_buf[16];
++static uint32_t core0_c0count[NR_CPUS];
+ 
+ /* read a 32bit value from ipi register */
+ #define loongson3_ipi_read32(addr) readl(addr)
+@@ -275,12 +275,14 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
+ 	if (action & SMP_ASK_C0COUNT) {
+ 		BUG_ON(cpu != 0);
+ 		c0count = read_c0_count();
+-		for (i = 1; i < num_possible_cpus(); i++)
+-			per_cpu(core0_c0count, i) = c0count;
++		c0count = c0count ? c0count : 1;
++		for (i = 1; i < nr_cpu_ids; i++)
++			core0_c0count[i] = c0count;
++		__wbflush(); /* Let others see the result ASAP */
+ 	}
+ }
+ 
+-#define MAX_LOOPS 1111
++#define MAX_LOOPS 800
+ /*
+  * SMP init and finish on secondary CPUs
+  */
+@@ -305,16 +307,20 @@ static void loongson3_init_secondary(void)
+ 		cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
+ 
+ 	i = 0;
+-	__this_cpu_write(core0_c0count, 0);
++	core0_c0count[cpu] = 0;
+ 	loongson3_send_ipi_single(0, SMP_ASK_C0COUNT);
+-	while (!__this_cpu_read(core0_c0count)) {
++	while (!core0_c0count[cpu]) {
+ 		i++;
+ 		cpu_relax();
+ 	}
+ 
+ 	if (i > MAX_LOOPS)
+ 		i = MAX_LOOPS;
+-	initcount = __this_cpu_read(core0_c0count) + i;
++	if (cpu_data[cpu].package)
++		initcount = core0_c0count[cpu] + i;
++	else /* Local access is faster for loops */
++		initcount = core0_c0count[cpu] + i/2;
++
+ 	write_c0_count(initcount);
+ }
+ 
+diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
+index 32e0be27673f..29f73e00253d 100644
+--- a/arch/mips/mm/tlbex.c
++++ b/arch/mips/mm/tlbex.c
+@@ -242,7 +242,7 @@ static void output_pgtable_bits_defines(void)
+ 	pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
+ 	pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
+ #endif
+-#ifdef CONFIG_CPU_MIPSR2
++#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+ 	if (cpu_has_rixi) {
+ #ifdef _PAGE_NO_EXEC_SHIFT
+ 		pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
+diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
+index f69ecaa7ce33..52c1e273f8cd 100644
+--- a/arch/powerpc/kernel/eeh_driver.c
++++ b/arch/powerpc/kernel/eeh_driver.c
+@@ -418,8 +418,7 @@ static void *eeh_rmv_device(void *data, void *userdata)
+ 		eeh_pcid_put(dev);
+ 		if (driver->err_handler &&
+ 		    driver->err_handler->error_detected &&
+-		    driver->err_handler->slot_reset &&
+-		    driver->err_handler->resume)
++		    driver->err_handler->slot_reset)
+ 			return NULL;
+ 	}
+ 
+diff --git a/arch/s390/include/asm/fpu/internal.h b/arch/s390/include/asm/fpu/internal.h
+index 2559b16da525..17d9dcd29d45 100644
+--- a/arch/s390/include/asm/fpu/internal.h
++++ b/arch/s390/include/asm/fpu/internal.h
+@@ -48,6 +48,7 @@ static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs)
+ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
+ {
+ 	fpregs->pad = 0;
++	fpregs->fpc = fpu->fpc;
+ 	if (MACHINE_HAS_VX)
+ 		convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
+ 	else
+@@ -57,6 +58,7 @@ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
+ 
+ static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
+ {
++	fpu->fpc = fpregs->fpc;
+ 	if (MACHINE_HAS_VX)
+ 		convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
+ 	else
+diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
+index efaac2c3bb77..e9a983f40a24 100644
+--- a/arch/s390/include/asm/kvm_host.h
++++ b/arch/s390/include/asm/kvm_host.h
+@@ -506,7 +506,6 @@ struct kvm_vcpu_arch {
+ 	struct kvm_s390_sie_block *sie_block;
+ 	unsigned int      host_acrs[NUM_ACRS];
+ 	struct fpu	  host_fpregs;
+-	struct fpu	  guest_fpregs;
+ 	struct kvm_s390_local_interrupt local_int;
+ 	struct hrtimer    ckc_timer;
+ 	struct kvm_s390_pgm_info pgm;
+diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
+index 9cd248f637c7..dc6c9c604543 100644
+--- a/arch/s390/kernel/asm-offsets.c
++++ b/arch/s390/kernel/asm-offsets.c
+@@ -181,6 +181,7 @@ int main(void)
+ 	OFFSET(__LC_PSW_SAVE_AREA, _lowcore, psw_save_area);
+ 	OFFSET(__LC_PREFIX_SAVE_AREA, _lowcore, prefixreg_save_area);
+ 	OFFSET(__LC_FP_CREG_SAVE_AREA, _lowcore, fpt_creg_save_area);
++	OFFSET(__LC_TOD_PROGREG_SAVE_AREA, _lowcore, tod_progreg_save_area);
+ 	OFFSET(__LC_CPU_TIMER_SAVE_AREA, _lowcore, cpu_timer_save_area);
+ 	OFFSET(__LC_CLOCK_COMP_SAVE_AREA, _lowcore, clock_comp_save_area);
+ 	OFFSET(__LC_AREGS_SAVE_AREA, _lowcore, access_regs_save_area);
+diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
+index 66c94417c0ba..4af60374eba0 100644
+--- a/arch/s390/kernel/compat_signal.c
++++ b/arch/s390/kernel/compat_signal.c
+@@ -271,7 +271,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs,
+ 
+ 	/* Restore high gprs from signal stack */
+ 	if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
+-			     sizeof(&sregs_ext->gprs_high)))
++			     sizeof(sregs_ext->gprs_high)))
+ 		return -EFAULT;
+ 	for (i = 0; i < NUM_GPRS; i++)
+ 		*(__u32 *)&regs->gprs[i] = gprs_high[i];
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 846589281b04..a08d0afd5ff6 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -1268,44 +1268,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+ 	return 0;
+ }
+ 
+-/*
+- * Backs up the current FP/VX register save area on a particular
+- * destination.  Used to switch between different register save
+- * areas.
+- */
+-static inline void save_fpu_to(struct fpu *dst)
+-{
+-	dst->fpc = current->thread.fpu.fpc;
+-	dst->regs = current->thread.fpu.regs;
+-}
+-
+-/*
+- * Switches the FP/VX register save area from which to lazy
+- * restore register contents.
+- */
+-static inline void load_fpu_from(struct fpu *from)
+-{
+-	current->thread.fpu.fpc = from->fpc;
+-	current->thread.fpu.regs = from->regs;
+-}
+-
+ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ {
+ 	/* Save host register state */
+ 	save_fpu_regs();
+-	save_fpu_to(&vcpu->arch.host_fpregs);
+-
+-	if (test_kvm_facility(vcpu->kvm, 129)) {
+-		current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
+-		/*
+-		 * Use the register save area in the SIE-control block
+-		 * for register restore and save in kvm_arch_vcpu_put()
+-		 */
+-		current->thread.fpu.vxrs =
+-			(__vector128 *)&vcpu->run->s.regs.vrs;
+-	} else
+-		load_fpu_from(&vcpu->arch.guest_fpregs);
++	vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
++	vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
+ 
++	/* Depending on MACHINE_HAS_VX, data stored to vrs either
++	 * has vector register or floating point register format.
++	 */
++	current->thread.fpu.regs = vcpu->run->s.regs.vrs;
++	current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
+ 	if (test_fp_ctl(current->thread.fpu.fpc))
+ 		/* User space provided an invalid FPC, let's clear it */
+ 		current->thread.fpu.fpc = 0;
+@@ -1321,19 +1295,13 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+ 	atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+ 	gmap_disable(vcpu->arch.gmap);
+ 
++	/* Save guest register state */
+ 	save_fpu_regs();
++	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
+ 
+-	if (test_kvm_facility(vcpu->kvm, 129))
+-		/*
+-		 * kvm_arch_vcpu_load() set up the register save area to
+-		 * the &vcpu->run->s.regs.vrs and, thus, the vector registers
+-		 * are already saved.  Only the floating-point control must be
+-		 * copied.
+-		 */
+-		vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
+-	else
+-		save_fpu_to(&vcpu->arch.guest_fpregs);
+-	load_fpu_from(&vcpu->arch.host_fpregs);
++	/* Restore host register state */
++	current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
++	current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
+ 
+ 	save_access_regs(vcpu->run->s.regs.acrs);
+ 	restore_access_regs(vcpu->arch.host_acrs);
+@@ -1351,8 +1319,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
+ 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
+ 	vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
+ 	vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
+-	vcpu->arch.guest_fpregs.fpc = 0;
+-	asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
++	/* make sure the new fpc will be lazily loaded */
++	save_fpu_regs();
++	current->thread.fpu.fpc = 0;
+ 	vcpu->arch.sie_block->gbea = 1;
+ 	vcpu->arch.sie_block->pp = 0;
+ 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
+@@ -1501,19 +1470,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
+ 	vcpu->arch.local_int.wq = &vcpu->wq;
+ 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
+ 
+-	/*
+-	 * Allocate a save area for floating-point registers.  If the vector
+-	 * extension is available, register contents are saved in the SIE
+-	 * control block.  The allocated save area is still required in
+-	 * particular places, for example, in kvm_s390_vcpu_store_status().
+-	 */
+-	vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
+-					       GFP_KERNEL);
+-	if (!vcpu->arch.guest_fpregs.fprs) {
+-		rc = -ENOMEM;
+-		goto out_free_sie_block;
+-	}
+-
+ 	rc = kvm_vcpu_init(vcpu, kvm, id);
+ 	if (rc)
+ 		goto out_free_sie_block;
+@@ -1734,19 +1690,27 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ 
+ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+ {
++	/* make sure the new values will be lazily loaded */
++	save_fpu_regs();
+ 	if (test_fp_ctl(fpu->fpc))
+ 		return -EINVAL;
+-	memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
+-	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
+-	save_fpu_regs();
+-	load_fpu_from(&vcpu->arch.guest_fpregs);
++	current->thread.fpu.fpc = fpu->fpc;
++	if (MACHINE_HAS_VX)
++		convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
++	else
++		memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
+ 	return 0;
+ }
+ 
+ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+ {
+-	memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
+-	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
++	/* make sure we have the latest values */
++	save_fpu_regs();
++	if (MACHINE_HAS_VX)
++		convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
++	else
++		memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
++	fpu->fpc = current->thread.fpu.fpc;
+ 	return 0;
+ }
+ 
+@@ -2266,41 +2230,50 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
+ {
+ 	unsigned char archmode = 1;
++	freg_t fprs[NUM_FPRS];
+ 	unsigned int px;
+ 	u64 clkcomp;
+ 	int rc;
+ 
++	px = kvm_s390_get_prefix(vcpu);
+ 	if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
+ 		if (write_guest_abs(vcpu, 163, &archmode, 1))
+ 			return -EFAULT;
+-		gpa = SAVE_AREA_BASE;
++		gpa = 0;
+ 	} else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
+ 		if (write_guest_real(vcpu, 163, &archmode, 1))
+ 			return -EFAULT;
+-		gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
++		gpa = px;
++	} else
++		gpa -= __LC_FPREGS_SAVE_AREA;
++
++	/* manually convert vector registers if necessary */
++	if (MACHINE_HAS_VX) {
++		convert_vx_to_fp(fprs, current->thread.fpu.vxrs);
++		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
++				     fprs, 128);
++	} else {
++		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
++				     vcpu->run->s.regs.vrs, 128);
+ 	}
+-	rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
+-			     vcpu->arch.guest_fpregs.fprs, 128);
+-	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
++	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
+ 			      vcpu->run->s.regs.gprs, 128);
+-	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
++	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
+ 			      &vcpu->arch.sie_block->gpsw, 16);
+-	px = kvm_s390_get_prefix(vcpu);
+-	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
++	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
+ 			      &px, 4);
+-	rc |= write_guest_abs(vcpu,
+-			      gpa + offsetof(struct save_area, fp_ctrl_reg),
+-			      &vcpu->arch.guest_fpregs.fpc, 4);
+-	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
++	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
++			      &vcpu->run->s.regs.fpc, 4);
++	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
+ 			      &vcpu->arch.sie_block->todpr, 4);
+-	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
++	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
+ 			      &vcpu->arch.sie_block->cputm, 8);
+ 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
+-	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
++	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
+ 			      &clkcomp, 8);
+-	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
++	rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
+ 			      &vcpu->run->s.regs.acrs, 64);
+-	rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
++	rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
+ 			      &vcpu->arch.sie_block->gcr, 128);
+ 	return rc ? -EFAULT : 0;
+ }
+@@ -2313,19 +2286,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
+ 	 * it into the save area
+ 	 */
+ 	save_fpu_regs();
+-	if (test_kvm_facility(vcpu->kvm, 129)) {
+-		/*
+-		 * If the vector extension is available, the vector registers
+-		 * which overlaps with floating-point registers are saved in
+-		 * the SIE-control block.  Hence, extract the floating-point
+-		 * registers and the FPC value and store them in the
+-		 * guest_fpregs structure.
+-		 */
+-		vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc;
+-		convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs,
+-				 current->thread.fpu.vxrs);
+-	} else
+-		save_fpu_to(&vcpu->arch.guest_fpregs);
++	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
+ 	save_access_regs(vcpu->run->s.regs.acrs);
+ 
+ 	return kvm_s390_store_status_unloaded(vcpu, addr);
+diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
+index 4d1ee88864e8..18c8b819b0aa 100644
+--- a/arch/s390/mm/extable.c
++++ b/arch/s390/mm/extable.c
+@@ -52,12 +52,16 @@ void sort_extable(struct exception_table_entry *start,
+ 	int i;
+ 
+ 	/* Normalize entries to being relative to the start of the section */
+-	for (p = start, i = 0; p < finish; p++, i += 8)
++	for (p = start, i = 0; p < finish; p++, i += 8) {
+ 		p->insn += i;
++		p->fixup += i + 4;
++	}
+ 	sort(start, finish - start, sizeof(*start), cmp_ex, NULL);
+ 	/* Denormalize all entries */
+-	for (p = start, i = 0; p < finish; p++, i += 8)
++	for (p = start, i = 0; p < finish; p++, i += 8) {
+ 		p->insn -= i;
++		p->fixup -= i + 4;
++	}
+ }
+ 
+ #ifdef CONFIG_MODULES
+diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
+index 30e7ddb27a3a..c690c8e16a96 100644
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -413,7 +413,7 @@ out:
+ 
+ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
+ {
+-	int ret;
++	long ret;
+ 
+ 	if (personality(current->personality) == PER_LINUX32 &&
+ 	    personality(personality) == PER_LINUX)
+diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
+index 47f1ff056a54..22a358ef1b0c 100644
+--- a/arch/um/os-Linux/start_up.c
++++ b/arch/um/os-Linux/start_up.c
+@@ -94,6 +94,8 @@ static int start_ptraced_child(void)
+ {
+ 	int pid, n, status;
+ 
++	fflush(stdout);
++
+ 	pid = fork();
+ 	if (pid == 0)
+ 		ptrace_child();
+diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
+index 6a1ae3751e82..15cfebaa7688 100644
+--- a/arch/x86/entry/entry_64_compat.S
++++ b/arch/x86/entry/entry_64_compat.S
+@@ -267,6 +267,7 @@ ENTRY(entry_INT80_compat)
+ 	 * Interrupts are off on entry.
+ 	 */
+ 	PARAVIRT_ADJUST_EXCEPTION_FRAME
++	ASM_CLAC			/* Do this early to minimize exposure */
+ 	SWAPGS
+ 
+ 	/*
+diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
+index 881b4768644a..e7de5c9a4fbd 100644
+--- a/arch/x86/include/asm/irq.h
++++ b/arch/x86/include/asm/irq.h
+@@ -23,11 +23,13 @@ extern void irq_ctx_init(int cpu);
+ 
+ #define __ARCH_HAS_DO_SOFTIRQ
+ 
++struct irq_desc;
++
+ #ifdef CONFIG_HOTPLUG_CPU
+ #include <linux/cpumask.h>
+ extern int check_irq_vectors_for_cpu_disable(void);
+ extern void fixup_irqs(void);
+-extern void irq_force_complete_move(int);
++extern void irq_force_complete_move(struct irq_desc *desc);
+ #endif
+ 
+ #ifdef CONFIG_HAVE_KVM
+@@ -37,7 +39,6 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
+ extern void (*x86_platform_ipi_callback)(void);
+ extern void native_init_IRQ(void);
+ 
+-struct irq_desc;
+ extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
+ 
+ extern __visible unsigned int do_IRQ(struct pt_regs *regs);
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index f25321894ad2..fdb0fbfb1197 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -2521,6 +2521,7 @@ void __init setup_ioapic_dest(void)
+ {
+ 	int pin, ioapic, irq, irq_entry;
+ 	const struct cpumask *mask;
++	struct irq_desc *desc;
+ 	struct irq_data *idata;
+ 	struct irq_chip *chip;
+ 
+@@ -2536,7 +2537,9 @@ void __init setup_ioapic_dest(void)
+ 		if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq))
+ 			continue;
+ 
+-		idata = irq_get_irq_data(irq);
++		desc = irq_to_desc(irq);
++		raw_spin_lock_irq(&desc->lock);
++		idata = irq_desc_get_irq_data(desc);
+ 
+ 		/*
+ 		 * Honour affinities which have been set in early boot
+@@ -2550,6 +2553,7 @@ void __init setup_ioapic_dest(void)
+ 		/* Might be lapic_chip for irq 0 */
+ 		if (chip->irq_set_affinity)
+ 			chip->irq_set_affinity(idata, mask, false);
++		raw_spin_unlock_irq(&desc->lock);
+ 	}
+ }
+ #endif
+diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
+index 861bc59c8f25..a35f6b5473f4 100644
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -30,7 +30,7 @@ struct apic_chip_data {
+ 
+ struct irq_domain *x86_vector_domain;
+ static DEFINE_RAW_SPINLOCK(vector_lock);
+-static cpumask_var_t vector_cpumask;
++static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask;
+ static struct irq_chip lapic_controller;
+ #ifdef	CONFIG_X86_IO_APIC
+ static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
+@@ -116,35 +116,47 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
+ 	 */
+ 	static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
+ 	static int current_offset = VECTOR_OFFSET_START % 16;
+-	int cpu, err;
++	int cpu, vector;
+ 
+-	if (d->move_in_progress)
++	/*
++	 * If there is still a move in progress or the previous move has not
++	 * been cleaned up completely, tell the caller to come back later.
++	 */
++	if (d->move_in_progress ||
++	    cpumask_intersects(d->old_domain, cpu_online_mask))
+ 		return -EBUSY;
+ 
+ 	/* Only try and allocate irqs on cpus that are present */
+-	err = -ENOSPC;
+ 	cpumask_clear(d->old_domain);
++	cpumask_clear(searched_cpumask);
+ 	cpu = cpumask_first_and(mask, cpu_online_mask);
+ 	while (cpu < nr_cpu_ids) {
+-		int new_cpu, vector, offset;
++		int new_cpu, offset;
+ 
++		/* Get the possible target cpus for @mask/@cpu from the apic */
+ 		apic->vector_allocation_domain(cpu, vector_cpumask, mask);
+ 
++		/*
++		 * Clear the offline cpus from @vector_cpumask for searching
++		 * and verify whether the result overlaps with @mask. If true,
++		 * then the call to apic->cpu_mask_to_apicid_and() will
++		 * succeed as well. If not, no point in trying to find a
++		 * vector in this mask.
++		 */
++		cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask);
++		if (!cpumask_intersects(vector_searchmask, mask))
++			goto next_cpu;
++
+ 		if (cpumask_subset(vector_cpumask, d->domain)) {
+-			err = 0;
+ 			if (cpumask_equal(vector_cpumask, d->domain))
+-				break;
++				goto success;
+ 			/*
+-			 * New cpumask using the vector is a proper subset of
+-			 * the current in use mask. So cleanup the vector
+-			 * allocation for the members that are not used anymore.
++			 * Mark the cpus which are not longer in the mask for
++			 * cleanup.
+ 			 */
+-			cpumask_andnot(d->old_domain, d->domain,
+-				       vector_cpumask);
+-			d->move_in_progress =
+-			   cpumask_intersects(d->old_domain, cpu_online_mask);
+-			cpumask_and(d->domain, d->domain, vector_cpumask);
+-			break;
++			cpumask_andnot(d->old_domain, d->domain, vector_cpumask);
++			vector = d->cfg.vector;
++			goto update;
+ 		}
+ 
+ 		vector = current_vector;
+@@ -156,45 +168,60 @@ next:
+ 			vector = FIRST_EXTERNAL_VECTOR + offset;
+ 		}
+ 
+-		if (unlikely(current_vector == vector)) {
+-			cpumask_or(d->old_domain, d->old_domain,
+-				   vector_cpumask);
+-			cpumask_andnot(vector_cpumask, mask, d->old_domain);
+-			cpu = cpumask_first_and(vector_cpumask,
+-						cpu_online_mask);
+-			continue;
+-		}
++		/* If the search wrapped around, try the next cpu */
++		if (unlikely(current_vector == vector))
++			goto next_cpu;
+ 
+ 		if (test_bit(vector, used_vectors))
+ 			goto next;
+ 
+-		for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
++		for_each_cpu(new_cpu, vector_searchmask) {
+ 			if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
+ 				goto next;
+ 		}
+ 		/* Found one! */
+ 		current_vector = vector;
+ 		current_offset = offset;
+-		if (d->cfg.vector) {
++		/* Schedule the old vector for cleanup on all cpus */
++		if (d->cfg.vector)
+ 			cpumask_copy(d->old_domain, d->domain);
+-			d->move_in_progress =
+-			   cpumask_intersects(d->old_domain, cpu_online_mask);
+-		}
+-		for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
++		for_each_cpu(new_cpu, vector_searchmask)
+ 			per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
+-		d->cfg.vector = vector;
+-		cpumask_copy(d->domain, vector_cpumask);
+-		err = 0;
+-		break;
+-	}
++		goto update;
+ 
+-	if (!err) {
+-		/* cache destination APIC IDs into cfg->dest_apicid */
+-		err = apic->cpu_mask_to_apicid_and(mask, d->domain,
+-						   &d->cfg.dest_apicid);
++next_cpu:
++		/*
++		 * We exclude the current @vector_cpumask from the requested
++		 * @mask and try again with the next online cpu in the
++		 * result. We cannot modify @mask, so we use @vector_cpumask
++		 * as a temporary buffer here as it will be reassigned when
++		 * calling apic->vector_allocation_domain() above.
++		 */
++		cpumask_or(searched_cpumask, searched_cpumask, vector_cpumask);
++		cpumask_andnot(vector_cpumask, mask, searched_cpumask);
++		cpu = cpumask_first_and(vector_cpumask, cpu_online_mask);
++		continue;
+ 	}
++	return -ENOSPC;
+ 
+-	return err;
++update:
++	/*
++	 * Exclude offline cpus from the cleanup mask and set the
++	 * move_in_progress flag when the result is not empty.
++	 */
++	cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
++	d->move_in_progress = !cpumask_empty(d->old_domain);
++	d->cfg.vector = vector;
++	cpumask_copy(d->domain, vector_cpumask);
++success:
++	/*
++	 * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
++	 * as we already established, that mask & d->domain & cpu_online_mask
++	 * is not empty.
++	 */
++	BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain,
++					    &d->cfg.dest_apicid));
++	return 0;
+ }
+ 
+ static int assign_irq_vector(int irq, struct apic_chip_data *data,
+@@ -224,10 +251,8 @@ static int assign_irq_vector_policy(int irq, int node,
+ static void clear_irq_vector(int irq, struct apic_chip_data *data)
+ {
+ 	struct irq_desc *desc;
+-	unsigned long flags;
+ 	int cpu, vector;
+ 
+-	raw_spin_lock_irqsave(&vector_lock, flags);
+ 	BUG_ON(!data->cfg.vector);
+ 
+ 	vector = data->cfg.vector;
+@@ -237,10 +262,13 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
+ 	data->cfg.vector = 0;
+ 	cpumask_clear(data->domain);
+ 
+-	if (likely(!data->move_in_progress)) {
+-		raw_spin_unlock_irqrestore(&vector_lock, flags);
++	/*
++	 * If move is in progress or the old_domain mask is not empty,
++	 * i.e. the cleanup IPI has not been processed yet, we need to remove
++	 * the old references to desc from all cpus vector tables.
++	 */
++	if (!data->move_in_progress && cpumask_empty(data->old_domain))
+ 		return;
+-	}
+ 
+ 	desc = irq_to_desc(irq);
+ 	for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
+@@ -253,7 +281,6 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
+ 		}
+ 	}
+ 	data->move_in_progress = 0;
+-	raw_spin_unlock_irqrestore(&vector_lock, flags);
+ }
+ 
+ void init_irq_alloc_info(struct irq_alloc_info *info,
+@@ -274,19 +301,24 @@ void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
+ static void x86_vector_free_irqs(struct irq_domain *domain,
+ 				 unsigned int virq, unsigned int nr_irqs)
+ {
++	struct apic_chip_data *apic_data;
+ 	struct irq_data *irq_data;
++	unsigned long flags;
+ 	int i;
+ 
+ 	for (i = 0; i < nr_irqs; i++) {
+ 		irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
+ 		if (irq_data && irq_data->chip_data) {
++			raw_spin_lock_irqsave(&vector_lock, flags);
+ 			clear_irq_vector(virq + i, irq_data->chip_data);
+-			free_apic_chip_data(irq_data->chip_data);
++			apic_data = irq_data->chip_data;
++			irq_domain_reset_irq_data(irq_data);
++			raw_spin_unlock_irqrestore(&vector_lock, flags);
++			free_apic_chip_data(apic_data);
+ #ifdef	CONFIG_X86_IO_APIC
+ 			if (virq + i < nr_legacy_irqs())
+ 				legacy_irq_data[virq + i] = NULL;
+ #endif
+-			irq_domain_reset_irq_data(irq_data);
+ 		}
+ 	}
+ }
+@@ -404,6 +436,8 @@ int __init arch_early_irq_init(void)
+ 	arch_init_htirq_domain(x86_vector_domain);
+ 
+ 	BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
++	BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
++	BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL));
+ 
+ 	return arch_early_ioapic_init();
+ }
+@@ -492,14 +526,7 @@ static int apic_set_affinity(struct irq_data *irq_data,
+ 		return -EINVAL;
+ 
+ 	err = assign_irq_vector(irq, data, dest);
+-	if (err) {
+-		if (assign_irq_vector(irq, data,
+-				      irq_data_get_affinity_mask(irq_data)))
+-			pr_err("Failed to recover vector for irq %d\n", irq);
+-		return err;
+-	}
+-
+-	return IRQ_SET_MASK_OK;
++	return err ? err : IRQ_SET_MASK_OK;
+ }
+ 
+ static struct irq_chip lapic_controller = {
+@@ -511,20 +538,12 @@ static struct irq_chip lapic_controller = {
+ #ifdef CONFIG_SMP
+ static void __send_cleanup_vector(struct apic_chip_data *data)
+ {
+-	cpumask_var_t cleanup_mask;
+-
+-	if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
+-		unsigned int i;
+-
+-		for_each_cpu_and(i, data->old_domain, cpu_online_mask)
+-			apic->send_IPI_mask(cpumask_of(i),
+-					    IRQ_MOVE_CLEANUP_VECTOR);
+-	} else {
+-		cpumask_and(cleanup_mask, data->old_domain, cpu_online_mask);
+-		apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
+-		free_cpumask_var(cleanup_mask);
+-	}
++	raw_spin_lock(&vector_lock);
++	cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
+ 	data->move_in_progress = 0;
++	if (!cpumask_empty(data->old_domain))
++		apic->send_IPI_mask(data->old_domain, IRQ_MOVE_CLEANUP_VECTOR);
++	raw_spin_unlock(&vector_lock);
+ }
+ 
+ void send_cleanup_vector(struct irq_cfg *cfg)
+@@ -568,12 +587,25 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
+ 			goto unlock;
+ 
+ 		/*
+-		 * Check if the irq migration is in progress. If so, we
+-		 * haven't received the cleanup request yet for this irq.
++		 * Nothing to cleanup if irq migration is in progress
++		 * or this cpu is not set in the cleanup mask.
+ 		 */
+-		if (data->move_in_progress)
++		if (data->move_in_progress ||
++		    !cpumask_test_cpu(me, data->old_domain))
+ 			goto unlock;
+ 
++		/*
++		 * We have two cases to handle here:
++		 * 1) vector is unchanged but the target mask got reduced
++		 * 2) vector and the target mask has changed
++		 *
++		 * #1 is obvious, but in #2 we have two vectors with the same
++		 * irq descriptor: the old and the new vector. So we need to
++		 * make sure that we only cleanup the old vector. The new
++		 * vector has the current @vector number in the config and
++		 * this cpu is part of the target mask. We better leave that
++		 * one alone.
++		 */
+ 		if (vector == data->cfg.vector &&
+ 		    cpumask_test_cpu(me, data->domain))
+ 			goto unlock;
+@@ -591,6 +623,7 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
+ 			goto unlock;
+ 		}
+ 		__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
++		cpumask_clear_cpu(me, data->old_domain);
+ unlock:
+ 		raw_spin_unlock(&desc->lock);
+ 	}
+@@ -619,12 +652,48 @@ void irq_complete_move(struct irq_cfg *cfg)
+ 	__irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
+ }
+ 
+-void irq_force_complete_move(int irq)
++/*
++ * Called with @desc->lock held and interrupts disabled.
++ */
++void irq_force_complete_move(struct irq_desc *desc)
+ {
+-	struct irq_cfg *cfg = irq_cfg(irq);
++	struct irq_data *irqdata = irq_desc_get_irq_data(desc);
++	struct apic_chip_data *data = apic_chip_data(irqdata);
++	struct irq_cfg *cfg = data ? &data->cfg : NULL;
+ 
+-	if (cfg)
+-		__irq_complete_move(cfg, cfg->vector);
++	if (!cfg)
++		return;
++
++	__irq_complete_move(cfg, cfg->vector);
++
++	/*
++	 * This is tricky. If the cleanup of @data->old_domain has not been
++	 * done yet, then the following setaffinity call will fail with
++	 * -EBUSY. This can leave the interrupt in a stale state.
++	 *
++	 * The cleanup cannot make progress because we hold @desc->lock. So in
++	 * case @data->old_domain is not yet cleaned up, we need to drop the
++	 * lock and acquire it again. @desc cannot go away, because the
++	 * hotplug code holds the sparse irq lock.
++	 */
++	raw_spin_lock(&vector_lock);
++	/* Clean out all offline cpus (including ourself) first. */
++	cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
++	while (!cpumask_empty(data->old_domain)) {
++		raw_spin_unlock(&vector_lock);
++		raw_spin_unlock(&desc->lock);
++		cpu_relax();
++		raw_spin_lock(&desc->lock);
++		/*
++		 * Reevaluate apic_chip_data. It might have been cleared after
++		 * we dropped @desc->lock.
++		 */
++		data = apic_chip_data(irqdata);
++		if (!data)
++			return;
++		raw_spin_lock(&vector_lock);
++	}
++	raw_spin_unlock(&vector_lock);
+ }
+ #endif
+ 
+diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
+index f8062aaf5df9..61521dc19c10 100644
+--- a/arch/x86/kernel/irq.c
++++ b/arch/x86/kernel/irq.c
+@@ -462,7 +462,7 @@ void fixup_irqs(void)
+ 		 * non intr-remapping case, we can't wait till this interrupt
+ 		 * arrives at this cpu before completing the irq move.
+ 		 */
+-		irq_force_complete_move(irq);
++		irq_force_complete_move(desc);
+ 
+ 		if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+ 			break_affinity = 1;
+@@ -470,6 +470,15 @@ void fixup_irqs(void)
+ 		}
+ 
+ 		chip = irq_data_get_irq_chip(data);
++		/*
++		 * The interrupt descriptor might have been cleaned up
++		 * already, but it is not yet removed from the radix tree
++		 */
++		if (!chip) {
++			raw_spin_unlock(&desc->lock);
++			continue;
++		}
++
+ 		if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
+ 			chip->irq_mask(data);
+ 
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 1505587d06e9..b9b09fec173b 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -650,10 +650,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
+ 	u16 sel;
+ 
+ 	la = seg_base(ctxt, addr.seg) + addr.ea;
+-	*linear = la;
+ 	*max_size = 0;
+ 	switch (mode) {
+ 	case X86EMUL_MODE_PROT64:
++		*linear = la;
+ 		if (is_noncanonical_address(la))
+ 			goto bad;
+ 
+@@ -662,6 +662,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
+ 			goto bad;
+ 		break;
+ 	default:
++		*linear = la = (u32)la;
+ 		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
+ 						addr.seg);
+ 		if (!usable)
+@@ -689,7 +690,6 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
+ 			if (size > *max_size)
+ 				goto bad;
+ 		}
+-		la &= (u32)-1;
+ 		break;
+ 	}
+ 	if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
+diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
+index 3058a22a658d..7be8a251363e 100644
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -249,7 +249,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
+ 			return ret;
+ 
+ 		kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
+-		walker->ptes[level] = pte;
++		walker->ptes[level - 1] = pte;
+ 	}
+ 	return 0;
+ }
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 9a2ed8904513..6ef3856aab4b 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2736,6 +2736,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ 	}
+ 
+ 	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
++	vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
+ }
+ 
+ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
+index b2fd67da1701..ef05755a1900 100644
+--- a/arch/x86/mm/mpx.c
++++ b/arch/x86/mm/mpx.c
+@@ -123,7 +123,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
+ 		break;
+ 	}
+ 
+-	if (regno > nr_registers) {
++	if (regno >= nr_registers) {
+ 		WARN_ONCE(1, "decoded an instruction with an invalid register");
+ 		return -EINVAL;
+ 	}
+diff --git a/block/bio.c b/block/bio.c
+index 4f184d938942..d4d144363250 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1090,9 +1090,12 @@ int bio_uncopy_user(struct bio *bio)
+ 	if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
+ 		/*
+ 		 * if we're in a workqueue, the request is orphaned, so
+-		 * don't copy into a random user address space, just free.
++		 * don't copy into a random user address space, just free
++		 * and return -EINTR so user space doesn't expect any data.
+ 		 */
+-		if (current->mm && bio_data_dir(bio) == READ)
++		if (!current->mm)
++			ret = -EINTR;
++		else if (bio_data_dir(bio) == READ)
+ 			ret = bio_copy_to_iter(bio, bmd->iter);
+ 		if (bmd->is_our_pages)
+ 			bio_free_pages(bio);
+diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
+index 3405f7a41e25..5fdac394207a 100644
+--- a/drivers/acpi/acpi_video.c
++++ b/drivers/acpi/acpi_video.c
+@@ -465,6 +465,15 @@ static struct dmi_system_id video_dmi_table[] = {
+ 	 * as brightness control does not work.
+ 	 */
+ 	{
++	 /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
++	 .callback = video_disable_backlight_sysfs_if,
++	 .ident = "Toshiba Portege R700",
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
++		DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"),
++		},
++	},
++	{
+ 	 /* https://bugs.freedesktop.org/show_bug.cgi?id=82634 */
+ 	 .callback = video_disable_backlight_sysfs_if,
+ 	 .ident = "Toshiba Portege R830",
+@@ -473,6 +482,15 @@ static struct dmi_system_id video_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R830"),
+ 		},
+ 	},
++	{
++	 /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
++	 .callback = video_disable_backlight_sysfs_if,
++	 .ident = "Toshiba Satellite R830",
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
++		DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE R830"),
++		},
++	},
+ 	/*
+ 	 * Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set
+ 	 * but the IDs actually follow the Device ID Scheme.
+diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
+index aa45d4802707..11d8209e6e5d 100644
+--- a/drivers/acpi/nfit.c
++++ b/drivers/acpi/nfit.c
+@@ -468,37 +468,16 @@ static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
+ 	nfit_mem->bdw = NULL;
+ }
+ 
+-static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
++static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
+ 		struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
+ {
+ 	u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
+ 	struct nfit_memdev *nfit_memdev;
+ 	struct nfit_flush *nfit_flush;
+-	struct nfit_dcr *nfit_dcr;
+ 	struct nfit_bdw *nfit_bdw;
+ 	struct nfit_idt *nfit_idt;
+ 	u16 idt_idx, range_index;
+ 
+-	list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
+-		if (nfit_dcr->dcr->region_index != dcr)
+-			continue;
+-		nfit_mem->dcr = nfit_dcr->dcr;
+-		break;
+-	}
+-
+-	if (!nfit_mem->dcr) {
+-		dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n",
+-				spa->range_index, __to_nfit_memdev(nfit_mem)
+-				? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR");
+-		return -ENODEV;
+-	}
+-
+-	/*
+-	 * We've found enough to create an nvdimm, optionally
+-	 * find an associated BDW
+-	 */
+-	list_add(&nfit_mem->list, &acpi_desc->dimms);
+-
+ 	list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
+ 		if (nfit_bdw->bdw->region_index != dcr)
+ 			continue;
+@@ -507,12 +486,12 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
+ 	}
+ 
+ 	if (!nfit_mem->bdw)
+-		return 0;
++		return;
+ 
+ 	nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
+ 
+ 	if (!nfit_mem->spa_bdw)
+-		return 0;
++		return;
+ 
+ 	range_index = nfit_mem->spa_bdw->range_index;
+ 	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
+@@ -537,8 +516,6 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
+ 		}
+ 		break;
+ 	}
+-
+-	return 0;
+ }
+ 
+ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
+@@ -547,7 +524,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
+ 	struct nfit_mem *nfit_mem, *found;
+ 	struct nfit_memdev *nfit_memdev;
+ 	int type = nfit_spa_type(spa);
+-	u16 dcr;
+ 
+ 	switch (type) {
+ 	case NFIT_SPA_DCR:
+@@ -558,14 +534,18 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
+ 	}
+ 
+ 	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
+-		int rc;
++		struct nfit_dcr *nfit_dcr;
++		u32 device_handle;
++		u16 dcr;
+ 
+ 		if (nfit_memdev->memdev->range_index != spa->range_index)
+ 			continue;
+ 		found = NULL;
+ 		dcr = nfit_memdev->memdev->region_index;
++		device_handle = nfit_memdev->memdev->device_handle;
+ 		list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
+-			if (__to_nfit_memdev(nfit_mem)->region_index == dcr) {
++			if (__to_nfit_memdev(nfit_mem)->device_handle
++					== device_handle) {
+ 				found = nfit_mem;
+ 				break;
+ 			}
+@@ -578,6 +558,31 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
+ 			if (!nfit_mem)
+ 				return -ENOMEM;
+ 			INIT_LIST_HEAD(&nfit_mem->list);
++			list_add(&nfit_mem->list, &acpi_desc->dimms);
++		}
++
++		list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
++			if (nfit_dcr->dcr->region_index != dcr)
++				continue;
++			/*
++			 * Record the control region for the dimm.  For
++			 * the ACPI 6.1 case, where there are separate
++			 * control regions for the pmem vs blk
++			 * interfaces, be sure to record the extended
++			 * blk details.
++			 */
++			if (!nfit_mem->dcr)
++				nfit_mem->dcr = nfit_dcr->dcr;
++			else if (nfit_mem->dcr->windows == 0
++					&& nfit_dcr->dcr->windows)
++				nfit_mem->dcr = nfit_dcr->dcr;
++			break;
++		}
++
++		if (dcr && !nfit_mem->dcr) {
++			dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
++					spa->range_index, dcr);
++			return -ENODEV;
+ 		}
+ 
+ 		if (type == NFIT_SPA_DCR) {
+@@ -594,6 +599,7 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
+ 				nfit_mem->idt_dcr = nfit_idt->idt;
+ 				break;
+ 			}
++			nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
+ 		} else {
+ 			/*
+ 			 * A single dimm may belong to multiple SPA-PM
+@@ -602,13 +608,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
+ 			 */
+ 			nfit_mem->memdev_pmem = nfit_memdev->memdev;
+ 		}
+-
+-		if (found)
+-			continue;
+-
+-		rc = nfit_mem_add(acpi_desc, nfit_mem, spa);
+-		if (rc)
+-			return rc;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index daaf1c4e1e0f..80e55cb0827b 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -135,14 +135,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
+ 		},
+ 	},
+-	{
+-	.callback = video_detect_force_vendor,
+-	.ident = "Dell Inspiron 5737",
+-	.matches = {
+-		DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-		DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
+-		},
+-	},
+ 
+ 	/*
+ 	 * These models have a working acpi_video backlight control, and using
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index a39e85f9efa9..7d00b7a015ea 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -2074,7 +2074,7 @@ static int binder_thread_write(struct binder_proc *proc,
+ 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+ 				return -EFAULT;
+ 
+-			ptr += sizeof(void *);
++			ptr += sizeof(cookie);
+ 			list_for_each_entry(w, &proc->delivered_death, entry) {
+ 				struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
+ 
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
+index cdf6215a9a22..7dbba387d12a 100644
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
+ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
+ {
+ 	struct ata_port *ap = qc->ap;
+-	unsigned long flags;
+ 
+ 	if (ap->ops->error_handler) {
+ 		if (in_wq) {
+-			spin_lock_irqsave(ap->lock, flags);
+-
+ 			/* EH might have kicked in while host lock is
+ 			 * released.
+ 			 */
+@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
+ 				} else
+ 					ata_port_freeze(ap);
+ 			}
+-
+-			spin_unlock_irqrestore(ap->lock, flags);
+ 		} else {
+ 			if (likely(!(qc->err_mask & AC_ERR_HSM)))
+ 				ata_qc_complete(qc);
+@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
+ 		}
+ 	} else {
+ 		if (in_wq) {
+-			spin_lock_irqsave(ap->lock, flags);
+ 			ata_sff_irq_on(ap);
+ 			ata_qc_complete(qc);
+-			spin_unlock_irqrestore(ap->lock, flags);
+ 		} else
+ 			ata_qc_complete(qc);
+ 	}
+@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
+ {
+ 	struct ata_link *link = qc->dev->link;
+ 	struct ata_eh_info *ehi = &link->eh_info;
+-	unsigned long flags = 0;
+ 	int poll_next;
+ 
++	lockdep_assert_held(ap->lock);
++
+ 	WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
+ 
+ 	/* Make sure ata_sff_qc_issue() does not throw things
+@@ -1112,14 +1106,6 @@ fsm_start:
+ 			}
+ 		}
+ 
+-		/* Send the CDB (atapi) or the first data block (ata pio out).
+-		 * During the state transition, interrupt handler shouldn't
+-		 * be invoked before the data transfer is complete and
+-		 * hsm_task_state is changed. Hence, the following locking.
+-		 */
+-		if (in_wq)
+-			spin_lock_irqsave(ap->lock, flags);
+-
+ 		if (qc->tf.protocol == ATA_PROT_PIO) {
+ 			/* PIO data out protocol.
+ 			 * send first data block.
+@@ -1135,9 +1121,6 @@ fsm_start:
+ 			/* send CDB */
+ 			atapi_send_cdb(ap, qc);
+ 
+-		if (in_wq)
+-			spin_unlock_irqrestore(ap->lock, flags);
+-
+ 		/* if polling, ata_sff_pio_task() handles the rest.
+ 		 * otherwise, interrupt handler takes over from here.
+ 		 */
+@@ -1361,12 +1344,14 @@ static void ata_sff_pio_task(struct work_struct *work)
+ 	u8 status;
+ 	int poll_next;
+ 
++	spin_lock_irq(ap->lock);
++
+ 	BUG_ON(ap->sff_pio_task_link == NULL);
+ 	/* qc can be NULL if timeout occurred */
+ 	qc = ata_qc_from_tag(ap, link->active_tag);
+ 	if (!qc) {
+ 		ap->sff_pio_task_link = NULL;
+-		return;
++		goto out_unlock;
+ 	}
+ 
+ fsm_start:
+@@ -1381,11 +1366,14 @@ fsm_start:
+ 	 */
+ 	status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
+ 	if (status & ATA_BUSY) {
++		spin_unlock_irq(ap->lock);
+ 		ata_msleep(ap, 2);
++		spin_lock_irq(ap->lock);
++
+ 		status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
+ 		if (status & ATA_BUSY) {
+ 			ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
+-			return;
++			goto out_unlock;
+ 		}
+ 	}
+ 
+@@ -1402,6 +1390,8 @@ fsm_start:
+ 	 */
+ 	if (poll_next)
+ 		goto fsm_start;
++out_unlock:
++	spin_unlock_irq(ap->lock);
+ }
+ 
+ /**
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 92f0ee388f9e..968897108c76 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -153,6 +153,10 @@ static const struct usb_device_id btusb_table[] = {
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01),
+ 	  .driver_info = BTUSB_BCM_PATCHRAM },
+ 
++	/* Toshiba Corp - Broadcom based */
++	{ USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01),
++	  .driver_info = BTUSB_BCM_PATCHRAM },
++
+ 	/* Intel Bluetooth USB Bootloader (RAM module) */
+ 	{ USB_DEVICE(0x8087, 0x0a5a),
+ 	  .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC },
+diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
+index 2fe37f708dc7..813003d6ce09 100644
+--- a/drivers/clk/samsung/clk-cpu.c
++++ b/drivers/clk/samsung/clk-cpu.c
+@@ -148,6 +148,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
+ 	unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent);
+ 	unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
+ 	unsigned long div0, div1 = 0, mux_reg;
++	unsigned long flags;
+ 
+ 	/* find out the divider values to use for clock data */
+ 	while ((cfg_data->prate * 1000) != ndata->new_rate) {
+@@ -156,7 +157,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
+ 		cfg_data++;
+ 	}
+ 
+-	spin_lock(cpuclk->lock);
++	spin_lock_irqsave(cpuclk->lock, flags);
+ 
+ 	/*
+ 	 * For the selected PLL clock frequency, get the pre-defined divider
+@@ -212,7 +213,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
+ 				DIV_MASK_ALL);
+ 	}
+ 
+-	spin_unlock(cpuclk->lock);
++	spin_unlock_irqrestore(cpuclk->lock, flags);
+ 	return 0;
+ }
+ 
+@@ -223,6 +224,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
+ 	const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
+ 	unsigned long div = 0, div_mask = DIV_MASK;
+ 	unsigned long mux_reg;
++	unsigned long flags;
+ 
+ 	/* find out the divider values to use for clock data */
+ 	if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
+@@ -233,7 +235,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
+ 		}
+ 	}
+ 
+-	spin_lock(cpuclk->lock);
++	spin_lock_irqsave(cpuclk->lock, flags);
+ 
+ 	/* select mout_apll as the alternate parent */
+ 	mux_reg = readl(base + E4210_SRC_CPU);
+@@ -246,7 +248,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
+ 	}
+ 
+ 	exynos_set_safe_div(base, div, div_mask);
+-	spin_unlock(cpuclk->lock);
++	spin_unlock_irqrestore(cpuclk->lock, flags);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
+index 6ee91401918e..4da2af9694a2 100644
+--- a/drivers/clocksource/tcb_clksrc.c
++++ b/drivers/clocksource/tcb_clksrc.c
+@@ -98,7 +98,8 @@ static int tc_shutdown(struct clock_event_device *d)
+ 
+ 	__raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
+ 	__raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
+-	clk_disable(tcd->clk);
++	if (!clockevent_state_detached(d))
++		clk_disable(tcd->clk);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
+index a92e94b40b5b..dfc3bb410b00 100644
+--- a/drivers/clocksource/vt8500_timer.c
++++ b/drivers/clocksource/vt8500_timer.c
+@@ -50,6 +50,8 @@
+ 
+ #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+ 
++#define MIN_OSCR_DELTA		16
++
+ static void __iomem *regbase;
+ 
+ static cycle_t vt8500_timer_read(struct clocksource *cs)
+@@ -80,7 +82,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles,
+ 		cpu_relax();
+ 	writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL);
+ 
+-	if ((signed)(alarm - clocksource.read(&clocksource)) <= 16)
++	if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA)
+ 		return -ETIME;
+ 
+ 	writel(1, regbase + TIMER_IER_VAL);
+@@ -151,7 +153,7 @@ static void __init vt8500_timer_init(struct device_node *np)
+ 		pr_err("%s: setup_irq failed for %s\n", __func__,
+ 							clockevent.name);
+ 	clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
+-					4, 0xf0000000);
++					MIN_OSCR_DELTA * 2, 0xf0000000);
+ }
+ 
+ CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
+diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
+index b260576ddb12..d994b0f652d3 100644
+--- a/drivers/cpufreq/cpufreq_governor.c
++++ b/drivers/cpufreq/cpufreq_governor.c
+@@ -356,16 +356,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
+ 	if (!have_governor_per_policy())
+ 		cdata->gdbs_data = dbs_data;
+ 
++	policy->governor_data = dbs_data;
++
+ 	ret = sysfs_create_group(get_governor_parent_kobj(policy),
+ 				 get_sysfs_attr(dbs_data));
+ 	if (ret)
+ 		goto reset_gdbs_data;
+ 
+-	policy->governor_data = dbs_data;
+-
+ 	return 0;
+ 
+ reset_gdbs_data:
++	policy->governor_data = NULL;
++
+ 	if (!have_governor_per_policy())
+ 		cdata->gdbs_data = NULL;
+ 	cdata->exit(dbs_data, !policy->governor->initialized);
+@@ -386,16 +388,19 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy,
+ 	if (!cdbs->shared || cdbs->shared->policy)
+ 		return -EBUSY;
+ 
+-	policy->governor_data = NULL;
+ 	if (!--dbs_data->usage_count) {
+ 		sysfs_remove_group(get_governor_parent_kobj(policy),
+ 				   get_sysfs_attr(dbs_data));
+ 
++		policy->governor_data = NULL;
++
+ 		if (!have_governor_per_policy())
+ 			cdata->gdbs_data = NULL;
+ 
+ 		cdata->exit(dbs_data, policy->governor->initialized == 1);
+ 		kfree(dbs_data);
++	} else {
++		policy->governor_data = NULL;
+ 	}
+ 
+ 	free_common_dbs_info(policy, cdata);
+diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
+index 1d99c97defa9..096377232747 100644
+--- a/drivers/cpufreq/pxa2xx-cpufreq.c
++++ b/drivers/cpufreq/pxa2xx-cpufreq.c
+@@ -202,7 +202,7 @@ static void __init pxa_cpufreq_init_voltages(void)
+ 	}
+ }
+ #else
+-static int pxa_cpufreq_change_voltage(struct pxa_freqs *pxa_freq)
++static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
+ {
+ 	return 0;
+ }
+diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
+index 370c661c7d7b..fa00f3a186da 100644
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -1688,6 +1688,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
+ 	list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
+ 		at_xdmac_remove_xfer(atchan, desc);
+ 
++	clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
+ 	clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
+ 	spin_unlock_irqrestore(&atchan->lock, flags);
+ 
+@@ -1820,6 +1821,8 @@ static int atmel_xdmac_resume(struct device *dev)
+ 		atchan = to_at_xdmac_chan(chan);
+ 		at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
+ 		if (at_xdmac_chan_is_cyclic(atchan)) {
++			if (at_xdmac_chan_is_paused(atchan))
++				at_xdmac_device_resume(chan);
+ 			at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
+ 			at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
+ 			at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index 7067b6ddc1db..4f099ea29f83 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -536,16 +536,17 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
+ 
+ /* Called with dwc->lock held and all DMAC interrupts disabled */
+ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
+-		u32 status_err, u32 status_xfer)
++		u32 status_block, u32 status_err, u32 status_xfer)
+ {
+ 	unsigned long flags;
+ 
+-	if (dwc->mask) {
++	if (status_block & dwc->mask) {
+ 		void (*callback)(void *param);
+ 		void *callback_param;
+ 
+ 		dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
+ 				channel_readl(dwc, LLP));
++		dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+ 
+ 		callback = dwc->cdesc->period_callback;
+ 		callback_param = dwc->cdesc->period_callback_param;
+@@ -577,6 +578,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
+ 		channel_writel(dwc, CTL_LO, 0);
+ 		channel_writel(dwc, CTL_HI, 0);
+ 
++		dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+ 		dma_writel(dw, CLEAR.ERROR, dwc->mask);
+ 		dma_writel(dw, CLEAR.XFER, dwc->mask);
+ 
+@@ -585,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
+ 
+ 		spin_unlock_irqrestore(&dwc->lock, flags);
+ 	}
++
++	/* Re-enable interrupts */
++	channel_set_bit(dw, MASK.BLOCK, dwc->mask);
+ }
+ 
+ /* ------------------------------------------------------------------------- */
+@@ -593,10 +598,12 @@ static void dw_dma_tasklet(unsigned long data)
+ {
+ 	struct dw_dma *dw = (struct dw_dma *)data;
+ 	struct dw_dma_chan *dwc;
++	u32 status_block;
+ 	u32 status_xfer;
+ 	u32 status_err;
+ 	int i;
+ 
++	status_block = dma_readl(dw, RAW.BLOCK);
+ 	status_xfer = dma_readl(dw, RAW.XFER);
+ 	status_err = dma_readl(dw, RAW.ERROR);
+ 
+@@ -605,16 +612,15 @@ static void dw_dma_tasklet(unsigned long data)
+ 	for (i = 0; i < dw->dma.chancnt; i++) {
+ 		dwc = &dw->chan[i];
+ 		if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
+-			dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
++			dwc_handle_cyclic(dw, dwc, status_block, status_err,
++					status_xfer);
+ 		else if (status_err & (1 << i))
+ 			dwc_handle_error(dw, dwc);
+ 		else if (status_xfer & (1 << i))
+ 			dwc_scan_descriptors(dw, dwc);
+ 	}
+ 
+-	/*
+-	 * Re-enable interrupts.
+-	 */
++	/* Re-enable interrupts */
+ 	channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
+ 	channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
+ }
+@@ -635,6 +641,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
+ 	 * softirq handler.
+ 	 */
+ 	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
++	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+ 	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
+ 
+ 	status = dma_readl(dw, STATUS_INT);
+@@ -645,6 +652,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
+ 
+ 		/* Try to recover */
+ 		channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
++		channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
+ 		channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
+ 		channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
+ 		channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
+@@ -1111,6 +1119,7 @@ static void dw_dma_off(struct dw_dma *dw)
+ 	dma_writel(dw, CFG, 0);
+ 
+ 	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
++	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+ 	channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
+ 	channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
+ 	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
+@@ -1216,6 +1225,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
+ 
+ 	/* Disable interrupts */
+ 	channel_clear_bit(dw, MASK.XFER, dwc->mask);
++	channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
+ 	channel_clear_bit(dw, MASK.ERROR, dwc->mask);
+ 
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+@@ -1245,7 +1255,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
+ int dw_dma_cyclic_start(struct dma_chan *chan)
+ {
+ 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
+-	struct dw_dma		*dw = to_dw_dma(dwc->chan.device);
++	struct dw_dma		*dw = to_dw_dma(chan->device);
+ 	unsigned long		flags;
+ 
+ 	if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
+@@ -1255,25 +1265,10 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 
+-	/* Assert channel is idle */
+-	if (dma_readl(dw, CH_EN) & dwc->mask) {
+-		dev_err(chan2dev(&dwc->chan),
+-			"%s: BUG: Attempted to start non-idle channel\n",
+-			__func__);
+-		dwc_dump_chan_regs(dwc);
+-		spin_unlock_irqrestore(&dwc->lock, flags);
+-		return -EBUSY;
+-	}
+-
+-	dma_writel(dw, CLEAR.ERROR, dwc->mask);
+-	dma_writel(dw, CLEAR.XFER, dwc->mask);
++	/* Enable interrupts to perform cyclic transfer */
++	channel_set_bit(dw, MASK.BLOCK, dwc->mask);
+ 
+-	/* Setup DMAC channel registers */
+-	channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
+-	channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
+-	channel_writel(dwc, CTL_HI, 0);
+-
+-	channel_set_bit(dw, CH_EN, dwc->mask);
++	dwc_dostart(dwc, dwc->cdesc->desc[0]);
+ 
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
+@@ -1479,6 +1474,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
+ 
+ 	dwc_chan_disable(dw, dwc);
+ 
++	dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+ 	dma_writel(dw, CLEAR.ERROR, dwc->mask);
+ 	dma_writel(dw, CLEAR.XFER, dwc->mask);
+ 
+@@ -1567,9 +1563,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
+ 	/* Force dma off, just in case */
+ 	dw_dma_off(dw);
+ 
+-	/* Disable BLOCK interrupts as well */
+-	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+-
+ 	/* Create a pool of consistent memory blocks for hardware descriptors */
+ 	dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
+ 					 sizeof(struct dw_desc), 4, 0);
+diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
+index 592af5f0cf39..53587377e672 100644
+--- a/drivers/edac/edac_device.c
++++ b/drivers/edac/edac_device.c
+@@ -435,16 +435,13 @@ void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
+  */
+ void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
+ {
+-	int status;
+-
+ 	if (!edac_dev->edac_check)
+ 		return;
+ 
+-	status = cancel_delayed_work(&edac_dev->work);
+-	if (status == 0) {
+-		/* workq instance might be running, wait for it */
+-		flush_workqueue(edac_workqueue);
+-	}
++	edac_dev->op_state = OP_OFFLINE;
++
++	cancel_delayed_work_sync(&edac_dev->work);
++	flush_workqueue(edac_workqueue);
+ }
+ 
+ /*
+diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
+index 77ecd6a4179a..1b2c2187b347 100644
+--- a/drivers/edac/edac_mc.c
++++ b/drivers/edac/edac_mc.c
+@@ -586,18 +586,10 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
+  */
+ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
+ {
+-	int status;
+-
+-	if (mci->op_state != OP_RUNNING_POLL)
+-		return;
+-
+-	status = cancel_delayed_work(&mci->work);
+-	if (status == 0) {
+-		edac_dbg(0, "not canceled, flush the queue\n");
++	mci->op_state = OP_OFFLINE;
+ 
+-		/* workq instance might be running, wait for it */
+-		flush_workqueue(edac_workqueue);
+-	}
++	cancel_delayed_work_sync(&mci->work);
++	flush_workqueue(edac_workqueue);
+ }
+ 
+ /*
+diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
+index a75acea0f674..58aed67b7eba 100644
+--- a/drivers/edac/edac_mc_sysfs.c
++++ b/drivers/edac/edac_mc_sysfs.c
+@@ -880,21 +880,26 @@ static struct device_type mci_attr_type = {
+ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
+ 				 const struct attribute_group **groups)
+ {
++	char *name;
+ 	int i, err;
+ 
+ 	/*
+ 	 * The memory controller needs its own bus, in order to avoid
+ 	 * namespace conflicts at /sys/bus/edac.
+ 	 */
+-	mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
+-	if (!mci->bus->name)
++	name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
++	if (!name)
+ 		return -ENOMEM;
+ 
++	mci->bus->name = name;
++
+ 	edac_dbg(0, "creating bus %s\n", mci->bus->name);
+ 
+ 	err = bus_register(mci->bus);
+-	if (err < 0)
+-		goto fail_free_name;
++	if (err < 0) {
++		kfree(name);
++		return err;
++	}
+ 
+ 	/* get the /sys/devices/system/edac subsys reference */
+ 	mci->dev.type = &mci_attr_type;
+@@ -961,8 +966,8 @@ fail_unregister_dimm:
+ 	device_unregister(&mci->dev);
+ fail_unregister_bus:
+ 	bus_unregister(mci->bus);
+-fail_free_name:
+-	kfree(mci->bus->name);
++	kfree(name);
++
+ 	return err;
+ }
+ 
+@@ -993,10 +998,12 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
+ 
+ void edac_unregister_sysfs(struct mem_ctl_info *mci)
+ {
++	const char *name = mci->bus->name;
++
+ 	edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
+ 	device_unregister(&mci->dev);
+ 	bus_unregister(mci->bus);
+-	kfree(mci->bus->name);
++	kfree(name);
+ }
+ 
+ static void mc_attr_release(struct device *dev)
+diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
+index 2cf44b4db80c..b4b38603b804 100644
+--- a/drivers/edac/edac_pci.c
++++ b/drivers/edac/edac_pci.c
+@@ -274,13 +274,12 @@ static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
+  */
+ static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
+ {
+-	int status;
+-
+ 	edac_dbg(0, "\n");
+ 
+-	status = cancel_delayed_work(&pci->work);
+-	if (status == 0)
+-		flush_workqueue(edac_workqueue);
++	pci->op_state = OP_OFFLINE;
++
++	cancel_delayed_work_sync(&pci->work);
++	flush_workqueue(edac_workqueue);
+ }
+ 
+ /*
+diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
+index 756eca8c4cf8..10e6774ab2a2 100644
+--- a/drivers/firmware/efi/efivars.c
++++ b/drivers/firmware/efi/efivars.c
+@@ -221,7 +221,7 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor,
+ 	}
+ 
+ 	if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
+-	    efivar_validate(name, data, size) == false) {
++	    efivar_validate(vendor, name, data, size) == false) {
+ 		printk(KERN_ERR "efivars: Malformed variable content\n");
+ 		return -EINVAL;
+ 	}
+@@ -447,7 +447,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
+ 	}
+ 
+ 	if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
+-	    efivar_validate(name, data, size) == false) {
++	    efivar_validate(new_var->VendorGuid, name, data,
++			    size) == false) {
+ 		printk(KERN_ERR "efivars: Malformed variable content\n");
+ 		return -EINVAL;
+ 	}
+@@ -540,38 +541,30 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
+ static int
+ efivar_create_sysfs_entry(struct efivar_entry *new_var)
+ {
+-	int i, short_name_size;
++	int short_name_size;
+ 	char *short_name;
+-	unsigned long variable_name_size;
+-	efi_char16_t *variable_name;
++	unsigned long utf8_name_size;
++	efi_char16_t *variable_name = new_var->var.VariableName;
+ 	int ret;
+ 
+-	variable_name = new_var->var.VariableName;
+-	variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t);
+-
+ 	/*
+-	 * Length of the variable bytes in ASCII, plus the '-' separator,
++	 * Length of the variable bytes in UTF8, plus the '-' separator,
+ 	 * plus the GUID, plus trailing NUL
+ 	 */
+-	short_name_size = variable_name_size / sizeof(efi_char16_t)
+-				+ 1 + EFI_VARIABLE_GUID_LEN + 1;
+-
+-	short_name = kzalloc(short_name_size, GFP_KERNEL);
++	utf8_name_size = ucs2_utf8size(variable_name);
++	short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1;
+ 
++	short_name = kmalloc(short_name_size, GFP_KERNEL);
+ 	if (!short_name)
+ 		return -ENOMEM;
+ 
+-	/* Convert Unicode to normal chars (assume top bits are 0),
+-	   ala UTF-8 */
+-	for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) {
+-		short_name[i] = variable_name[i] & 0xFF;
+-	}
++	ucs2_as_utf8(short_name, variable_name, short_name_size);
++
+ 	/* This is ugly, but necessary to separate one vendor's
+ 	   private variables from another's.         */
+-
+-	*(short_name + strlen(short_name)) = '-';
++	short_name[utf8_name_size] = '-';
+ 	efi_guid_to_str(&new_var->var.VendorGuid,
+-			 short_name + strlen(short_name));
++			 short_name + utf8_name_size + 1);
+ 
+ 	new_var->kobj.kset = efivars_kset;
+ 
+diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
+index 70a0fb10517f..7f2ea21c730d 100644
+--- a/drivers/firmware/efi/vars.c
++++ b/drivers/firmware/efi/vars.c
+@@ -165,67 +165,133 @@ validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer,
+ }
+ 
+ struct variable_validate {
++	efi_guid_t vendor;
+ 	char *name;
+ 	bool (*validate)(efi_char16_t *var_name, int match, u8 *data,
+ 			 unsigned long len);
+ };
+ 
++/*
++ * This is the list of variables we need to validate, as well as the
++ * whitelist for what we think is safe not to default to immutable.
++ *
++ * If it has a validate() method that's not NULL, it'll go into the
++ * validation routine.  If not, it is assumed valid, but still used for
++ * whitelisting.
++ *
++ * Note that it's sorted by {vendor,name}, but globbed names must come after
++ * any other name with the same prefix.
++ */
+ static const struct variable_validate variable_validate[] = {
+-	{ "BootNext", validate_uint16 },
+-	{ "BootOrder", validate_boot_order },
+-	{ "DriverOrder", validate_boot_order },
+-	{ "Boot*", validate_load_option },
+-	{ "Driver*", validate_load_option },
+-	{ "ConIn", validate_device_path },
+-	{ "ConInDev", validate_device_path },
+-	{ "ConOut", validate_device_path },
+-	{ "ConOutDev", validate_device_path },
+-	{ "ErrOut", validate_device_path },
+-	{ "ErrOutDev", validate_device_path },
+-	{ "Timeout", validate_uint16 },
+-	{ "Lang", validate_ascii_string },
+-	{ "PlatformLang", validate_ascii_string },
+-	{ "", NULL },
++	{ EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 },
++	{ EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order },
++	{ EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option },
++	{ EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order },
++	{ EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option },
++	{ EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path },
++	{ EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path },
++	{ EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path },
++	{ EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path },
++	{ EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path },
++	{ EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path },
++	{ EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string },
++	{ EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL },
++	{ EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string },
++	{ EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 },
++	{ LINUX_EFI_CRASH_GUID, "*", NULL },
++	{ NULL_GUID, "", NULL },
+ };
+ 
++static bool
++variable_matches(const char *var_name, size_t len, const char *match_name,
++		 int *match)
++{
++	for (*match = 0; ; (*match)++) {
++		char c = match_name[*match];
++		char u = var_name[*match];
++
++		/* Wildcard in the matching name means we've matched */
++		if (c == '*')
++			return true;
++
++		/* Case sensitive match */
++		if (!c && *match == len)
++			return true;
++
++		if (c != u)
++			return false;
++
++		if (!c)
++			return true;
++	}
++	return true;
++}
++
+ bool
+-efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len)
++efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
++		unsigned long data_size)
+ {
+ 	int i;
+-	u16 *unicode_name = var_name;
++	unsigned long utf8_size;
++	u8 *utf8_name;
+ 
+-	for (i = 0; variable_validate[i].validate != NULL; i++) {
+-		const char *name = variable_validate[i].name;
+-		int match;
++	utf8_size = ucs2_utf8size(var_name);
++	utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL);
++	if (!utf8_name)
++		return false;
+ 
+-		for (match = 0; ; match++) {
+-			char c = name[match];
+-			u16 u = unicode_name[match];
++	ucs2_as_utf8(utf8_name, var_name, utf8_size);
++	utf8_name[utf8_size] = '\0';
+ 
+-			/* All special variables are plain ascii */
+-			if (u > 127)
+-				return true;
++	for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
++		const char *name = variable_validate[i].name;
++		int match = 0;
+ 
+-			/* Wildcard in the matching name means we've matched */
+-			if (c == '*')
+-				return variable_validate[i].validate(var_name,
+-							     match, data, len);
++		if (efi_guidcmp(vendor, variable_validate[i].vendor))
++			continue;
+ 
+-			/* Case sensitive match */
+-			if (c != u)
++		if (variable_matches(utf8_name, utf8_size+1, name, &match)) {
++			if (variable_validate[i].validate == NULL)
+ 				break;
+-
+-			/* Reached the end of the string while matching */
+-			if (!c)
+-				return variable_validate[i].validate(var_name,
+-							     match, data, len);
++			kfree(utf8_name);
++			return variable_validate[i].validate(var_name, match,
++							     data, data_size);
+ 		}
+ 	}
+-
++	kfree(utf8_name);
+ 	return true;
+ }
+ EXPORT_SYMBOL_GPL(efivar_validate);
+ 
++bool
++efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
++			     size_t len)
++{
++	int i;
++	bool found = false;
++	int match = 0;
++
++	/*
++	 * Check if our variable is in the validated variables list
++	 */
++	for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
++		if (efi_guidcmp(variable_validate[i].vendor, vendor))
++			continue;
++
++		if (variable_matches(var_name, len,
++				     variable_validate[i].name, &match)) {
++			found = true;
++			break;
++		}
++	}
++
++	/*
++	 * If it's in our list, it is removable.
++	 */
++	return found;
++}
++EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
++
+ static efi_status_t
+ check_var_size(u32 attributes, unsigned long size)
+ {
+@@ -852,7 +918,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
+ 
+ 	*set = false;
+ 
+-	if (efivar_validate(name, data, *size) == false)
++	if (efivar_validate(*vendor, name, data, *size) == false)
+ 		return -EINVAL;
+ 
+ 	/*
+diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
+index 04c270757030..ca066018ea34 100644
+--- a/drivers/gpu/drm/amd/amdgpu/Makefile
++++ b/drivers/gpu/drm/amd/amdgpu/Makefile
+@@ -22,7 +22,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
+ 	amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
+ 
+ # add asic specific block
+-amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \
++amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
+ 	ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
+ 	amdgpu_amdkfd_gfx_v7.o
+ 
+@@ -31,6 +31,7 @@ amdgpu-y += \
+ 
+ # add GMC block
+ amdgpu-y += \
++	gmc_v7_0.o \
+ 	gmc_v8_0.o
+ 
+ # add IH block
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 048cfe073dae..bb1099c549df 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -604,8 +604,6 @@ struct amdgpu_sa_manager {
+ 	uint32_t		align;
+ };
+ 
+-struct amdgpu_sa_bo;
+-
+ /* sub-allocation buffer */
+ struct amdgpu_sa_bo {
+ 	struct list_head		olist;
+@@ -2314,6 +2312,8 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
+ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
+ 				     uint32_t flags);
+ bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
++bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
++				  unsigned long end);
+ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
+ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
+ 				 struct ttm_mem_reg *mem);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index d5b421330145..c961fe093e12 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1744,15 +1744,20 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
+ 	}
+ 
+ 	/* post card */
+-	amdgpu_atom_asic_init(adev->mode_info.atom_context);
++	if (!amdgpu_card_posted(adev))
++		amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ 
+ 	r = amdgpu_resume(adev);
++	if (r)
++		DRM_ERROR("amdgpu_resume failed (%d).\n", r);
+ 
+ 	amdgpu_fence_driver_resume(adev);
+ 
+-	r = amdgpu_ib_ring_tests(adev);
+-	if (r)
+-		DRM_ERROR("ib ring test failed (%d).\n", r);
++	if (resume) {
++		r = amdgpu_ib_ring_tests(adev);
++		if (r)
++			DRM_ERROR("ib ring test failed (%d).\n", r);
++	}
+ 
+ 	r = amdgpu_late_init(adev);
+ 	if (r)
+@@ -1788,6 +1793,7 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
+ 	}
+ 
+ 	drm_kms_helper_poll_enable(dev);
++	drm_helper_hpd_irq_event(dev);
+ 
+ 	if (fbcon) {
+ 		amdgpu_fbdev_set_suspend(adev, 0);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+index 5580d3420c3a..0c713a908304 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+@@ -72,8 +72,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
+ 
+ 	struct drm_crtc *crtc = &amdgpuCrtc->base;
+ 	unsigned long flags;
+-	unsigned i;
+-	int vpos, hpos, stat, min_udelay;
++	unsigned i, repcnt = 4;
++	int vpos, hpos, stat, min_udelay = 0;
+ 	struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
+ 
+ 	amdgpu_flip_wait_fence(adev, &work->excl);
+@@ -96,7 +96,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
+ 	 * In practice this won't execute very often unless on very fast
+ 	 * machines because the time window for this to happen is very small.
+ 	 */
+-	for (;;) {
++	while (amdgpuCrtc->enabled && repcnt--) {
+ 		/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
+ 		 * start in hpos, and to the "fudged earlier" vblank start in
+ 		 * vpos.
+@@ -114,10 +114,22 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
+ 		/* Sleep at least until estimated real start of hw vblank */
+ 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ 		min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
++		if (min_udelay > vblank->framedur_ns / 2000) {
++			/* Don't wait ridiculously long - something is wrong */
++			repcnt = 0;
++			break;
++		}
+ 		usleep_range(min_udelay, 2 * min_udelay);
+ 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ 	};
+ 
++	if (!repcnt)
++		DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
++				 "framedur %d, linedur %d, stat %d, vpos %d, "
++				 "hpos %d\n", work->crtc_id, min_udelay,
++				 vblank->framedur_ns / 1000,
++				 vblank->linedur_ns / 1000, stat, vpos, hpos);
++
+ 	/* do the flip (mmio) */
+ 	adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
+ 	/* set the flip status */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 0508c5cd103a..8d6668cedf6d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -250,11 +250,11 @@ static struct pci_device_id pciidlist[] = {
+ 	{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
+ #endif
+ 	/* topaz */
+-	{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+-	{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+-	{0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+-	{0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+-	{0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
++	{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
++	{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
++	{0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
++	{0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
++	{0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+ 	/* tonga */
+ 	{0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
+ 	{0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+index b1969f2b2038..d4e2780c0796 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+@@ -142,7 +142,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
+ 
+ 		list_for_each_entry(bo, &node->bos, mn_list) {
+ 
+-			if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
++			if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
++							  end))
+ 				continue;
+ 
+ 			r = amdgpu_bo_reserve(bo, true);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index c3ce103b6a33..a2a16acee34d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -399,7 +399,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+ 		}
+ 		if (fpfn > bo->placements[i].fpfn)
+ 			bo->placements[i].fpfn = fpfn;
+-		if (lpfn && lpfn < bo->placements[i].lpfn)
++		if (!bo->placements[i].lpfn ||
++		    (lpfn && lpfn < bo->placements[i].lpfn))
+ 			bo->placements[i].lpfn = lpfn;
+ 		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 22a8c7d3a3ab..03fe25142b78 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -595,8 +595,6 @@ force:
+ 
+ 	/* update display watermarks based on new power state */
+ 	amdgpu_display_bandwidth_update(adev);
+-	/* update displays */
+-	amdgpu_dpm_display_configuration_changed(adev);
+ 
+ 	adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
+ 	adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
+@@ -616,6 +614,9 @@ force:
+ 
+ 	amdgpu_dpm_post_set_power_state(adev);
+ 
++	/* update displays */
++	amdgpu_dpm_display_configuration_changed(adev);
++
+ 	if (adev->pm.funcs->force_performance_level) {
+ 		if (adev->pm.dpm.thermal_active) {
+ 			enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+index 8b88edb0434b..ca72a2e487b9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+@@ -354,12 +354,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
+ 
+ 		for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
+ 			if (fences[i])
+-				fences[count++] = fences[i];
++				fences[count++] = fence_get(fences[i]);
+ 
+ 		if (count) {
+ 			spin_unlock(&sa_manager->wq.lock);
+ 			t = fence_wait_any_timeout(fences, count, false,
+ 						   MAX_SCHEDULE_TIMEOUT);
++			for (i = 0; i < count; ++i)
++				fence_put(fences[i]);
++
+ 			r = (t > 0) ? 0 : t;
+ 			spin_lock(&sa_manager->wq.lock);
+ 		} else {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+index dd005c336c97..181ce39ef5e5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+@@ -293,7 +293,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
+ 		fence = to_amdgpu_fence(sync->sync_to[i]);
+ 
+ 		/* check if we really need to sync */
+-		if (!amdgpu_fence_need_sync(fence, ring))
++		if (!amdgpu_enable_scheduler &&
++		    !amdgpu_fence_need_sync(fence, ring))
+ 			continue;
+ 
+ 		/* prevent GPU deadlocks */
+@@ -303,7 +304,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
+ 		}
+ 
+ 		if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) {
+-			r = fence_wait(&fence->base, true);
++			r = fence_wait(sync->sync_to[i], true);
+ 			if (r)
+ 				return r;
+ 			continue;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 8a1752ff3d8e..1cbb16e15307 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -712,7 +712,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
+ 						       0, PAGE_SIZE,
+ 						       PCI_DMA_BIDIRECTIONAL);
+ 		if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
+-			while (--i) {
++			while (i--) {
+ 				pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
+ 					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ 				gtt->ttm.dma_address[i] = 0;
+@@ -783,6 +783,25 @@ bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm)
+ 	return !!gtt->userptr;
+ }
+ 
++bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
++				  unsigned long end)
++{
++	struct amdgpu_ttm_tt *gtt = (void *)ttm;
++	unsigned long size;
++
++	if (gtt == NULL)
++		return false;
++
++	if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
++		return false;
++
++	size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
++	if (gtt->userptr > end || gtt->userptr + size <= start)
++		return false;
++
++	return true;
++}
++
+ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
+ {
+ 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
+@@ -808,7 +827,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
+ 			flags |= AMDGPU_PTE_SNOOPED;
+ 	}
+ 
+-	if (adev->asic_type >= CHIP_TOPAZ)
++	if (adev->asic_type >= CHIP_TONGA)
+ 		flags |= AMDGPU_PTE_EXECUTABLE;
+ 
+ 	flags |= AMDGPU_PTE_READABLE;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index b53d273eb7a1..39adbb6470d1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1010,13 +1010,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ 		return -EINVAL;
+ 
+ 	/* make sure object fit at this offset */
+-	eaddr = saddr + size;
++	eaddr = saddr + size - 1;
+ 	if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
+ 		return -EINVAL;
+ 
+ 	last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
+-	if (last_pfn > adev->vm_manager.max_pfn) {
+-		dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
++	if (last_pfn >= adev->vm_manager.max_pfn) {
++		dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
+ 			last_pfn, adev->vm_manager.max_pfn);
+ 		return -EINVAL;
+ 	}
+@@ -1025,7 +1025,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ 
+ 	spin_lock(&vm->it_lock);
+-	it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
++	it = interval_tree_iter_first(&vm->va, saddr, eaddr);
+ 	spin_unlock(&vm->it_lock);
+ 	if (it) {
+ 		struct amdgpu_bo_va_mapping *tmp;
+@@ -1046,7 +1046,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ 
+ 	INIT_LIST_HEAD(&mapping->list);
+ 	mapping->it.start = saddr;
+-	mapping->it.last = eaddr - 1;
++	mapping->it.last = eaddr;
+ 	mapping->offset = offset;
+ 	mapping->flags = flags;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index e1dcab98e249..4cb45f4602aa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -90,7 +90,6 @@ MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
+ MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
+ MODULE_FIRMWARE("amdgpu/topaz_me.bin");
+ MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
+-MODULE_FIRMWARE("amdgpu/topaz_mec2.bin");
+ MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
+ 
+ MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
+@@ -807,7 +806,8 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
+ 	adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+ 	adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ 
+-	if (adev->asic_type != CHIP_STONEY) {
++	if ((adev->asic_type != CHIP_STONEY) &&
++	    (adev->asic_type != CHIP_TOPAZ)) {
+ 		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
+ 		err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ 		if (!err) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+index ed8abb58a785..272110cc18c2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+@@ -42,9 +42,39 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
+ 
+ MODULE_FIRMWARE("radeon/bonaire_mc.bin");
+ MODULE_FIRMWARE("radeon/hawaii_mc.bin");
++MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
++
++static const u32 golden_settings_iceland_a11[] =
++{
++	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
++	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
++	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
++	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
++};
++
++static const u32 iceland_mgcg_cgcg_init[] =
++{
++	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
++};
++
++static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
++{
++	switch (adev->asic_type) {
++	case CHIP_TOPAZ:
++		amdgpu_program_register_sequence(adev,
++						 iceland_mgcg_cgcg_init,
++						 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
++		amdgpu_program_register_sequence(adev,
++						 golden_settings_iceland_a11,
++						 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
++		break;
++	default:
++		break;
++	}
++}
+ 
+ /**
+- * gmc8_mc_wait_for_idle - wait for MC idle callback.
++ * gmc7_mc_wait_for_idle - wait for MC idle callback.
+  *
+  * @adev: amdgpu_device pointer
+  *
+@@ -132,13 +162,20 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
+ 	case CHIP_HAWAII:
+ 		chip_name = "hawaii";
+ 		break;
++	case CHIP_TOPAZ:
++		chip_name = "topaz";
++		break;
+ 	case CHIP_KAVERI:
+ 	case CHIP_KABINI:
+ 		return 0;
+ 	default: BUG();
+ 	}
+ 
+-	snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
++	if (adev->asic_type == CHIP_TOPAZ)
++		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
++	else
++		snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
++
+ 	err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
+ 	if (err)
+ 		goto out;
+@@ -980,6 +1017,8 @@ static int gmc_v7_0_hw_init(void *handle)
+ 	int r;
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 
++	gmc_v7_0_init_golden_registers(adev);
++
+ 	gmc_v7_0_mc_program(adev);
+ 
+ 	if (!(adev->flags & AMD_IS_APU)) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index d39028440814..ba4ad00ba8b4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -42,9 +42,7 @@
+ static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
+ static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
+ 
+-MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
+ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
+-MODULE_FIRMWARE("amdgpu/fiji_mc.bin");
+ 
+ static const u32 golden_settings_tonga_a11[] =
+ {
+@@ -75,19 +73,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
+ 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
+ };
+ 
+-static const u32 golden_settings_iceland_a11[] =
+-{
+-	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+-	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+-	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+-	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
+-};
+-
+-static const u32 iceland_mgcg_cgcg_init[] =
+-{
+-	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
+-};
+-
+ static const u32 cz_mgcg_cgcg_init[] =
+ {
+ 	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
+@@ -102,14 +87,6 @@ static const u32 stoney_mgcg_cgcg_init[] =
+ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
+ {
+ 	switch (adev->asic_type) {
+-	case CHIP_TOPAZ:
+-		amdgpu_program_register_sequence(adev,
+-						 iceland_mgcg_cgcg_init,
+-						 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
+-		amdgpu_program_register_sequence(adev,
+-						 golden_settings_iceland_a11,
+-						 (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
+-		break;
+ 	case CHIP_FIJI:
+ 		amdgpu_program_register_sequence(adev,
+ 						 fiji_mgcg_cgcg_init,
+@@ -229,15 +206,10 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
+ 	DRM_DEBUG("\n");
+ 
+ 	switch (adev->asic_type) {
+-	case CHIP_TOPAZ:
+-		chip_name = "topaz";
+-		break;
+ 	case CHIP_TONGA:
+ 		chip_name = "tonga";
+ 		break;
+ 	case CHIP_FIJI:
+-		chip_name = "fiji";
+-		break;
+ 	case CHIP_CARRIZO:
+ 	case CHIP_STONEY:
+ 		return 0;
+@@ -1003,7 +975,7 @@ static int gmc_v8_0_hw_init(void *handle)
+ 
+ 	gmc_v8_0_mc_program(adev);
+ 
+-	if (!(adev->flags & AMD_IS_APU)) {
++	if (adev->asic_type == CHIP_TONGA) {
+ 		r = gmc_v8_0_mc_load_microcode(adev);
+ 		if (r) {
+ 			DRM_ERROR("Failed to load MC firmware!\n");
+diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+index 966d4b2ed9da..090486c18249 100644
+--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
++++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+@@ -432,7 +432,7 @@ static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
+ 		case AMDGPU_UCODE_ID_CP_ME:
+ 			return UCODE_ID_CP_ME_MASK;
+ 		case AMDGPU_UCODE_ID_CP_MEC1:
+-			return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK;
++			return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
+ 		case AMDGPU_UCODE_ID_CP_MEC2:
+ 			return UCODE_ID_CP_MEC_MASK;
+ 		case AMDGPU_UCODE_ID_RLC_G:
+@@ -522,12 +522,6 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
+ 		return -EINVAL;
+ 	}
+ 
+-	if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
+-			&toc->entry[toc->num_entries++])) {
+-		DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
+-		return -EINVAL;
+-	}
+-
+ 	if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
+ 			&toc->entry[toc->num_entries++])) {
+ 		DRM_ERROR("Failed to get firmware entry for SDMA0\n");
+@@ -550,8 +544,8 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
+ 			UCODE_ID_CP_ME_MASK |
+ 			UCODE_ID_CP_PFP_MASK |
+ 			UCODE_ID_CP_MEC_MASK |
+-			UCODE_ID_CP_MEC_JT1_MASK |
+-			UCODE_ID_CP_MEC_JT2_MASK;
++			UCODE_ID_CP_MEC_JT1_MASK;
++
+ 
+ 	if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
+ 		DRM_ERROR("Fail to request SMU load ucode\n");
+diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
+index 204903897b4f..63d6cb3c1110 100644
+--- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
+@@ -122,25 +122,12 @@ static int tonga_dpm_hw_fini(void *handle)
+ 
+ static int tonga_dpm_suspend(void *handle)
+ {
+-	return 0;
++	return tonga_dpm_hw_fini(handle);
+ }
+ 
+ static int tonga_dpm_resume(void *handle)
+ {
+-	int ret;
+-	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+-
+-	mutex_lock(&adev->pm.mutex);
+-
+-	ret = tonga_smu_start(adev);
+-	if (ret) {
+-		DRM_ERROR("SMU start failed\n");
+-		goto fail;
+-	}
+-
+-fail:
+-	mutex_unlock(&adev->pm.mutex);
+-	return ret;
++	return tonga_dpm_hw_init(handle);
+ }
+ 
+ static int tonga_dpm_set_clockgating_state(void *handle,
+diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
+index 2adc1c855e85..7628eb44cce2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vi.c
++++ b/drivers/gpu/drm/amd/amdgpu/vi.c
+@@ -60,6 +60,7 @@
+ #include "vi.h"
+ #include "vi_dpm.h"
+ #include "gmc_v8_0.h"
++#include "gmc_v7_0.h"
+ #include "gfx_v8_0.h"
+ #include "sdma_v2_4.h"
+ #include "sdma_v3_0.h"
+@@ -1128,10 +1129,10 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
+ 	},
+ 	{
+ 		.type = AMD_IP_BLOCK_TYPE_GMC,
+-		.major = 8,
+-		.minor = 0,
++		.major = 7,
++		.minor = 4,
+ 		.rev = 0,
+-		.funcs = &gmc_v8_0_ip_funcs,
++		.funcs = &gmc_v7_0_ip_funcs,
+ 	},
+ 	{
+ 		.type = AMD_IP_BLOCK_TYPE_IH,
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 809959d56d78..39d7e2e15c11 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -798,6 +798,18 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
+ 	return mstb;
+ }
+ 
++static void drm_dp_free_mst_port(struct kref *kref);
++
++static void drm_dp_free_mst_branch_device(struct kref *kref)
++{
++	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
++	if (mstb->port_parent) {
++		if (list_empty(&mstb->port_parent->next))
++			kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
++	}
++	kfree(mstb);
++}
++
+ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+ {
+ 	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
+@@ -805,6 +817,15 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+ 	bool wake_tx = false;
+ 
+ 	/*
++	 * init kref again to be used by ports to remove mst branch when it is
++	 * not needed anymore
++	 */
++	kref_init(kref);
++
++	if (mstb->port_parent && list_empty(&mstb->port_parent->next))
++		kref_get(&mstb->port_parent->kref);
++
++	/*
+ 	 * destroy all ports - don't need lock
+ 	 * as there are no more references to the mst branch
+ 	 * device at this point.
+@@ -830,7 +851,8 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+ 
+ 	if (wake_tx)
+ 		wake_up(&mstb->mgr->tx_waitq);
+-	kfree(mstb);
++
++	kref_put(kref, drm_dp_free_mst_branch_device);
+ }
+ 
+ static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
+@@ -878,6 +900,7 @@ static void drm_dp_destroy_port(struct kref *kref)
+ 			 * from an EDID retrieval */
+ 
+ 			mutex_lock(&mgr->destroy_connector_lock);
++			kref_get(&port->parent->kref);
+ 			list_add(&port->next, &mgr->destroy_connector_list);
+ 			mutex_unlock(&mgr->destroy_connector_lock);
+ 			schedule_work(&mgr->destroy_connector_work);
+@@ -973,17 +996,17 @@ static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u
+ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
+ 				 u8 *rad)
+ {
+-	int lct = port->parent->lct;
++	int parent_lct = port->parent->lct;
+ 	int shift = 4;
+-	int idx = lct / 2;
+-	if (lct > 1) {
+-		memcpy(rad, port->parent->rad, idx);
+-		shift = (lct % 2) ? 4 : 0;
++	int idx = (parent_lct - 1) / 2;
++	if (parent_lct > 1) {
++		memcpy(rad, port->parent->rad, idx + 1);
++		shift = (parent_lct % 2) ? 4 : 0;
+ 	} else
+ 		rad[0] = 0;
+ 
+ 	rad[idx] |= port->port_num << shift;
+-	return lct + 1;
++	return parent_lct + 1;
+ }
+ 
+ /*
+@@ -1013,18 +1036,27 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
+ 	return send_link;
+ }
+ 
+-static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
+-				   struct drm_dp_mst_port *port)
++static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
+ {
+ 	int ret;
+-	if (port->dpcd_rev >= 0x12) {
+-		port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
+-		if (!port->guid_valid) {
+-			ret = drm_dp_send_dpcd_write(mstb->mgr,
+-						     port,
+-						     DP_GUID,
+-						     16, port->guid);
+-			port->guid_valid = true;
++
++	memcpy(mstb->guid, guid, 16);
++
++	if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
++		if (mstb->port_parent) {
++			ret = drm_dp_send_dpcd_write(
++					mstb->mgr,
++					mstb->port_parent,
++					DP_GUID,
++					16,
++					mstb->guid);
++		} else {
++
++			ret = drm_dp_dpcd_write(
++					mstb->mgr->aux,
++					DP_GUID,
++					mstb->guid,
++					16);
+ 		}
+ 	}
+ }
+@@ -1039,7 +1071,7 @@ static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
+ 	snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
+ 	for (i = 0; i < (mstb->lct - 1); i++) {
+ 		int shift = (i % 2) ? 0 : 4;
+-		int port_num = mstb->rad[i / 2] >> shift;
++		int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
+ 		snprintf(temp, sizeof(temp), "-%d", port_num);
+ 		strlcat(proppath, temp, proppath_size);
+ 	}
+@@ -1081,7 +1113,6 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
+ 	port->dpcd_rev = port_msg->dpcd_revision;
+ 	port->num_sdp_streams = port_msg->num_sdp_streams;
+ 	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
+-	memcpy(port->guid, port_msg->peer_guid, 16);
+ 
+ 	/* manage mstb port lists with mgr lock - take a reference
+ 	   for this list */
+@@ -1094,11 +1125,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
+ 
+ 	if (old_ddps != port->ddps) {
+ 		if (port->ddps) {
+-			drm_dp_check_port_guid(mstb, port);
+ 			if (!port->input)
+ 				drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
+ 		} else {
+-			port->guid_valid = false;
+ 			port->available_pbn = 0;
+ 			}
+ 	}
+@@ -1157,10 +1186,8 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
+ 
+ 	if (old_ddps != port->ddps) {
+ 		if (port->ddps) {
+-			drm_dp_check_port_guid(mstb, port);
+ 			dowork = true;
+ 		} else {
+-			port->guid_valid = false;
+ 			port->available_pbn = 0;
+ 		}
+ 	}
+@@ -1190,7 +1217,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
+ 
+ 	for (i = 0; i < lct - 1; i++) {
+ 		int shift = (i % 2) ? 0 : 4;
+-		int port_num = rad[i / 2] >> shift;
++		int port_num = (rad[i / 2] >> shift) & 0xf;
+ 
+ 		list_for_each_entry(port, &mstb->ports, next) {
+ 			if (port->port_num == port_num) {
+@@ -1210,6 +1237,48 @@ out:
+ 	return mstb;
+ }
+ 
++static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
++	struct drm_dp_mst_branch *mstb,
++	uint8_t *guid)
++{
++	struct drm_dp_mst_branch *found_mstb;
++	struct drm_dp_mst_port *port;
++
++	if (memcmp(mstb->guid, guid, 16) == 0)
++		return mstb;
++
++
++	list_for_each_entry(port, &mstb->ports, next) {
++		if (!port->mstb)
++			continue;
++
++		found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
++
++		if (found_mstb)
++			return found_mstb;
++	}
++
++	return NULL;
++}
++
++static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
++	struct drm_dp_mst_topology_mgr *mgr,
++	uint8_t *guid)
++{
++	struct drm_dp_mst_branch *mstb;
++
++	/* find the port by iterating down */
++	mutex_lock(&mgr->lock);
++
++	mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
++
++	if (mstb)
++		kref_get(&mstb->kref);
++
++	mutex_unlock(&mgr->lock);
++	return mstb;
++}
++
+ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ 					       struct drm_dp_mst_branch *mstb)
+ {
+@@ -1320,6 +1389,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
+ 				  struct drm_dp_sideband_msg_tx *txmsg)
+ {
+ 	struct drm_dp_mst_branch *mstb = txmsg->dst;
++	u8 req_type;
+ 
+ 	/* both msg slots are full */
+ 	if (txmsg->seqno == -1) {
+@@ -1336,7 +1406,13 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
+ 			txmsg->seqno = 1;
+ 		mstb->tx_slots[txmsg->seqno] = txmsg;
+ 	}
+-	hdr->broadcast = 0;
++
++	req_type = txmsg->msg[0] & 0x7f;
++	if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
++		req_type == DP_RESOURCE_STATUS_NOTIFY)
++		hdr->broadcast = 1;
++	else
++		hdr->broadcast = 0;
+ 	hdr->path_msg = txmsg->path_msg;
+ 	hdr->lct = mstb->lct;
+ 	hdr->lcr = mstb->lct - 1;
+@@ -1438,26 +1514,18 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
+ }
+ 
+ /* called holding qlock */
+-static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
++static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
++				       struct drm_dp_sideband_msg_tx *txmsg)
+ {
+-	struct drm_dp_sideband_msg_tx *txmsg;
+ 	int ret;
+ 
+ 	/* construct a chunk from the first msg in the tx_msg queue */
+-	if (list_empty(&mgr->tx_msg_upq)) {
+-		mgr->tx_up_in_progress = false;
+-		return;
+-	}
+-
+-	txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
+ 	ret = process_single_tx_qlock(mgr, txmsg, true);
+-	if (ret == 1) {
+-		/* up txmsgs aren't put in slots - so free after we send it */
+-		list_del(&txmsg->next);
+-		kfree(txmsg);
+-	} else if (ret)
++
++	if (ret != 1)
+ 		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+-	mgr->tx_up_in_progress = true;
++
++	txmsg->dst->tx_slots[txmsg->seqno] = NULL;
+ }
+ 
+ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
+@@ -1507,6 +1575,9 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ 				       txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
+ 				       txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
+ 			}
++
++			drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
++
+ 			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
+ 				drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
+ 			}
+@@ -1554,6 +1625,37 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
+ 	return 0;
+ }
+ 
++static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
++{
++	if (!mstb->port_parent)
++		return NULL;
++
++	if (mstb->port_parent->mstb != mstb)
++		return mstb->port_parent;
++
++	return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
++}
++
++static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
++									 struct drm_dp_mst_branch *mstb,
++									 int *port_num)
++{
++	struct drm_dp_mst_branch *rmstb = NULL;
++	struct drm_dp_mst_port *found_port;
++	mutex_lock(&mgr->lock);
++	if (mgr->mst_primary) {
++		found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
++
++		if (found_port) {
++			rmstb = found_port->parent;
++			kref_get(&rmstb->kref);
++			*port_num = found_port->port_num;
++		}
++	}
++	mutex_unlock(&mgr->lock);
++	return rmstb;
++}
++
+ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
+ 				   struct drm_dp_mst_port *port,
+ 				   int id,
+@@ -1561,11 +1663,16 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
+ {
+ 	struct drm_dp_sideband_msg_tx *txmsg;
+ 	struct drm_dp_mst_branch *mstb;
+-	int len, ret;
++	int len, ret, port_num;
+ 
++	port_num = port->port_num;
+ 	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+-	if (!mstb)
+-		return -EINVAL;
++	if (!mstb) {
++		mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
++
++		if (!mstb)
++			return -EINVAL;
++	}
+ 
+ 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ 	if (!txmsg) {
+@@ -1574,7 +1681,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
+ 	}
+ 
+ 	txmsg->dst = mstb;
+-	len = build_allocate_payload(txmsg, port->port_num,
++	len = build_allocate_payload(txmsg, port_num,
+ 				     id,
+ 				     pbn);
+ 
+@@ -1844,11 +1951,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
+ 	drm_dp_encode_up_ack_reply(txmsg, req_type);
+ 
+ 	mutex_lock(&mgr->qlock);
+-	list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
+-	if (!mgr->tx_up_in_progress) {
+-		process_single_up_tx_qlock(mgr);
+-	}
++
++	process_single_up_tx_qlock(mgr, txmsg);
++
+ 	mutex_unlock(&mgr->qlock);
++
++	kfree(txmsg);
+ 	return 0;
+ }
+ 
+@@ -1927,31 +2035,17 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+ 		mgr->mst_primary = mstb;
+ 		kref_get(&mgr->mst_primary->kref);
+ 
+-		{
+-			struct drm_dp_payload reset_pay;
+-			reset_pay.start_slot = 0;
+-			reset_pay.num_slots = 0x3f;
+-			drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
+-		}
+-
+ 		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+-					 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
++							 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+ 		if (ret < 0) {
+ 			goto out_unlock;
+ 		}
+ 
+-
+-		/* sort out guid */
+-		ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
+-		if (ret != 16) {
+-			DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
+-			goto out_unlock;
+-		}
+-
+-		mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
+-		if (!mgr->guid_valid) {
+-			ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
+-			mgr->guid_valid = true;
++		{
++			struct drm_dp_payload reset_pay;
++			reset_pay.start_slot = 0;
++			reset_pay.num_slots = 0x3f;
++			drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
+ 		}
+ 
+ 		queue_work(system_long_wq, &mgr->work);
+@@ -2145,28 +2239,51 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+ 
+ 	if (mgr->up_req_recv.have_eomt) {
+ 		struct drm_dp_sideband_msg_req_body msg;
+-		struct drm_dp_mst_branch *mstb;
++		struct drm_dp_mst_branch *mstb = NULL;
+ 		bool seqno;
+-		mstb = drm_dp_get_mst_branch_device(mgr,
+-						    mgr->up_req_recv.initial_hdr.lct,
+-						    mgr->up_req_recv.initial_hdr.rad);
+-		if (!mstb) {
+-			DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
+-			memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+-			return 0;
++
++		if (!mgr->up_req_recv.initial_hdr.broadcast) {
++			mstb = drm_dp_get_mst_branch_device(mgr,
++							    mgr->up_req_recv.initial_hdr.lct,
++							    mgr->up_req_recv.initial_hdr.rad);
++			if (!mstb) {
++				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
++				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
++				return 0;
++			}
+ 		}
+ 
+ 		seqno = mgr->up_req_recv.initial_hdr.seqno;
+ 		drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
+ 
+ 		if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
+-			drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
++			drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
++
++			if (!mstb)
++				mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
++
++			if (!mstb) {
++				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
++				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
++				return 0;
++			}
++
+ 			drm_dp_update_port(mstb, &msg.u.conn_stat);
++
+ 			DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
+ 			(*mgr->cbs->hotplug)(mgr);
+ 
+ 		} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
+-			drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
++			drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
++			if (!mstb)
++				mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
++
++			if (!mstb) {
++				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
++				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
++				return 0;
++			}
++
+ 			DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
+ 		}
+ 
+@@ -2346,6 +2463,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp
+ 		DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
+ 		if (pbn == port->vcpi.pbn) {
+ 			*slots = port->vcpi.num_slots;
++			drm_dp_put_port(port);
+ 			return true;
+ 		}
+ 	}
+@@ -2505,32 +2623,31 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
+  */
+ int drm_dp_calc_pbn_mode(int clock, int bpp)
+ {
+-	fixed20_12 pix_bw;
+-	fixed20_12 fbpp;
+-	fixed20_12 result;
+-	fixed20_12 margin, tmp;
+-	u32 res;
+-
+-	pix_bw.full = dfixed_const(clock);
+-	fbpp.full = dfixed_const(bpp);
+-	tmp.full = dfixed_const(8);
+-	fbpp.full = dfixed_div(fbpp, tmp);
+-
+-	result.full = dfixed_mul(pix_bw, fbpp);
+-	margin.full = dfixed_const(54);
+-	tmp.full = dfixed_const(64);
+-	margin.full = dfixed_div(margin, tmp);
+-	result.full = dfixed_div(result, margin);
+-
+-	margin.full = dfixed_const(1006);
+-	tmp.full = dfixed_const(1000);
+-	margin.full = dfixed_div(margin, tmp);
+-	result.full = dfixed_mul(result, margin);
+-
+-	result.full = dfixed_div(result, tmp);
+-	result.full = dfixed_ceil(result);
+-	res = dfixed_trunc(result);
+-	return res;
++	u64 kbps;
++	s64 peak_kbps;
++	u32 numerator;
++	u32 denominator;
++
++	kbps = clock * bpp;
++
++	/*
++	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
++	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
++	 * common multiplier to render an integer PBN for all link rate/lane
++	 * counts combinations
++	 * calculate
++	 * peak_kbps *= (1006/1000)
++	 * peak_kbps *= (64/54)
++	 * peak_kbps *= 8    convert to bytes
++	 */
++
++	numerator = 64 * 1006;
++	denominator = 54 * 8 * 1000 * 1000;
++
++	kbps *= numerator;
++	peak_kbps = drm_fixp_from_fraction(kbps, denominator);
++
++	return drm_fixp2int_ceil(peak_kbps);
+ }
+ EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
+ 
+@@ -2538,11 +2655,23 @@ static int test_calc_pbn_mode(void)
+ {
+ 	int ret;
+ 	ret = drm_dp_calc_pbn_mode(154000, 30);
+-	if (ret != 689)
++	if (ret != 689) {
++		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
++				154000, 30, 689, ret);
+ 		return -EINVAL;
++	}
+ 	ret = drm_dp_calc_pbn_mode(234000, 30);
+-	if (ret != 1047)
++	if (ret != 1047) {
++		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
++				234000, 30, 1047, ret);
++		return -EINVAL;
++	}
++	ret = drm_dp_calc_pbn_mode(297000, 24);
++	if (ret != 1063) {
++		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
++				297000, 24, 1063, ret);
+ 		return -EINVAL;
++	}
+ 	return 0;
+ }
+ 
+@@ -2683,6 +2812,13 @@ static void drm_dp_tx_work(struct work_struct *work)
+ 	mutex_unlock(&mgr->qlock);
+ }
+ 
++static void drm_dp_free_mst_port(struct kref *kref)
++{
++	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
++	kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
++	kfree(port);
++}
++
+ static void drm_dp_destroy_connector_work(struct work_struct *work)
+ {
+ 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
+@@ -2703,13 +2839,22 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
+ 		list_del(&port->next);
+ 		mutex_unlock(&mgr->destroy_connector_lock);
+ 
++		kref_init(&port->kref);
++		INIT_LIST_HEAD(&port->next);
++
+ 		mgr->cbs->destroy_connector(mgr, port->connector);
+ 
+ 		drm_dp_port_teardown_pdt(port, port->pdt);
+ 
+-		if (!port->input && port->vcpi.vcpi > 0)
+-			drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+-		kfree(port);
++		if (!port->input && port->vcpi.vcpi > 0) {
++			if (mgr->mst_state) {
++				drm_dp_mst_reset_vcpi_slots(mgr, port);
++				drm_dp_update_payload_part1(mgr);
++				drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
++			}
++		}
++
++		kref_put(&port->kref, drm_dp_free_mst_port);
+ 		send_hotplug = true;
+ 	}
+ 	if (send_hotplug)
+@@ -2736,7 +2881,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
+ 	mutex_init(&mgr->qlock);
+ 	mutex_init(&mgr->payload_lock);
+ 	mutex_init(&mgr->destroy_connector_lock);
+-	INIT_LIST_HEAD(&mgr->tx_msg_upq);
+ 	INIT_LIST_HEAD(&mgr->tx_msg_downq);
+ 	INIT_LIST_HEAD(&mgr->destroy_connector_list);
+ 	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
+diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
+index 607f493ae801..8090989185b2 100644
+--- a/drivers/gpu/drm/drm_irq.c
++++ b/drivers/gpu/drm/drm_irq.c
+@@ -221,6 +221,64 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
+ 		diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0;
+ 	}
+ 
++	/*
++	 * Within a drm_vblank_pre_modeset - drm_vblank_post_modeset
++	 * interval? If so then vblank irqs keep running and it will likely
++	 * happen that the hardware vblank counter is not trustworthy as it
++	 * might reset at some point in that interval and vblank timestamps
++	 * are not trustworthy either in that interval. Iow. this can result
++	 * in a bogus diff >> 1 which must be avoided as it would cause
++	 * random large forward jumps of the software vblank counter.
++	 */
++	if (diff > 1 && (vblank->inmodeset & 0x2)) {
++		DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u"
++			      " due to pre-modeset.\n", pipe, diff);
++		diff = 1;
++	}
++
++	/*
++	 * FIMXE: Need to replace this hack with proper seqlocks.
++	 *
++	 * Restrict the bump of the software vblank counter to a safe maximum
++	 * value of +1 whenever there is the possibility that concurrent readers
++	 * of vblank timestamps could be active at the moment, as the current
++	 * implementation of the timestamp caching and updating is not safe
++	 * against concurrent readers for calls to store_vblank() with a bump
++	 * of anything but +1. A bump != 1 would very likely return corrupted
++	 * timestamps to userspace, because the same slot in the cache could
++	 * be concurrently written by store_vblank() and read by one of those
++	 * readers without the read-retry logic detecting the collision.
++	 *
++	 * Concurrent readers can exist when we are called from the
++	 * drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
++	 * irq callers. However, all those calls to us are happening with the
++	 * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
++	 * can't increase while we are executing. Therefore a zero refcount at
++	 * this point is safe for arbitrary counter bumps if we are called
++	 * outside vblank irq, a non-zero count is not 100% safe. Unfortunately
++	 * we must also accept a refcount of 1, as whenever we are called from
++	 * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
++	 * we must let that one pass through in order to not lose vblank counts
++	 * during vblank irq off - which would completely defeat the whole
++	 * point of this routine.
++	 *
++	 * Whenever we are called from vblank irq, we have to assume concurrent
++	 * readers exist or can show up any time during our execution, even if
++	 * the refcount is currently zero, as vblank irqs are usually only
++	 * enabled due to the presence of readers, and because when we are called
++	 * from vblank irq we can't hold the vbl_lock to protect us from sudden
++	 * bumps in vblank refcount. Therefore also restrict bumps to +1 when
++	 * called from vblank irq.
++	 */
++	if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
++	    (flags & DRM_CALLED_FROM_VBLIRQ))) {
++		DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
++			      "refcount %u, vblirq %u\n", pipe, diff,
++			      atomic_read(&vblank->refcount),
++			      (flags & DRM_CALLED_FROM_VBLIRQ) != 0);
++		diff = 1;
++	}
++
+ 	DRM_DEBUG_VBL("updating vblank count on crtc %u:"
+ 		      " current=%u, diff=%u, hw=%u hw_last=%u\n",
+ 		      pipe, vblank->count, diff, cur_vblank, vblank->last);
+@@ -1313,7 +1371,13 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
+ 	spin_lock_irqsave(&dev->event_lock, irqflags);
+ 
+ 	spin_lock(&dev->vbl_lock);
+-	vblank_disable_and_save(dev, pipe);
++	DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
++		      pipe, vblank->enabled, vblank->inmodeset);
++
++	/* Avoid redundant vblank disables without previous drm_vblank_on(). */
++	if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
++		vblank_disable_and_save(dev, pipe);
++
+ 	wake_up(&vblank->queue);
+ 
+ 	/*
+@@ -1415,6 +1479,9 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
+ 		return;
+ 
+ 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
++	DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
++		      pipe, vblank->enabled, vblank->inmodeset);
++
+ 	/* Drop our private "prevent drm_vblank_get" refcount */
+ 	if (vblank->inmodeset) {
+ 		atomic_dec(&vblank->refcount);
+@@ -1427,8 +1494,7 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
+ 	 * re-enable interrupts if there are users left, or the
+ 	 * user wishes vblank interrupts to be enabled all the time.
+ 	 */
+-	if (atomic_read(&vblank->refcount) != 0 ||
+-	    (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
++	if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
+ 		WARN_ON(drm_vblank_enable(dev, pipe));
+ 	spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+ }
+@@ -1523,6 +1589,7 @@ void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
+ 	if (vblank->inmodeset) {
+ 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ 		dev->vblank_disable_allowed = true;
++		drm_reset_vblank_timestamp(dev, pipe);
+ 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+ 
+ 		if (vblank->inmodeset & 0x2)
+diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
+index c707fa6fca85..e3bdc8b1c32c 100644
+--- a/drivers/gpu/drm/gma500/gem.c
++++ b/drivers/gpu/drm/gma500/gem.c
+@@ -130,7 +130,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
+ 		return ret;
+ 	}
+ 	/* We have the initial and handle reference but need only one now */
+-	drm_gem_object_unreference(&r->gem);
++	drm_gem_object_unreference_unlocked(&r->gem);
+ 	*handlep = handle;
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index b4741d121a74..61fcb3b22297 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -402,6 +402,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
+ 	if (ret)
+ 		goto cleanup_gem_stolen;
+ 
++	intel_setup_gmbus(dev);
++
+ 	/* Important: The output setup functions called by modeset_init need
+ 	 * working irqs for e.g. gmbus and dp aux transfers. */
+ 	intel_modeset_init(dev);
+@@ -451,6 +453,7 @@ cleanup_gem:
+ cleanup_irq:
+ 	intel_guc_ucode_fini(dev);
+ 	drm_irq_uninstall(dev);
++	intel_teardown_gmbus(dev);
+ cleanup_gem_stolen:
+ 	i915_gem_cleanup_stolen(dev);
+ cleanup_vga_switcheroo:
+@@ -1028,7 +1031,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ 
+ 	/* Try to make sure MCHBAR is enabled before poking at it */
+ 	intel_setup_mchbar(dev);
+-	intel_setup_gmbus(dev);
+ 	intel_opregion_setup(dev);
+ 
+ 	i915_gem_load(dev);
+@@ -1099,7 +1101,6 @@ out_gem_unload:
+ 	if (dev->pdev->msi_enabled)
+ 		pci_disable_msi(dev->pdev);
+ 
+-	intel_teardown_gmbus(dev);
+ 	intel_teardown_mchbar(dev);
+ 	pm_qos_remove_request(&dev_priv->pm_qos);
+ 	destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
+@@ -1198,7 +1199,6 @@ int i915_driver_unload(struct drm_device *dev)
+ 
+ 	intel_csr_ucode_fini(dev);
+ 
+-	intel_teardown_gmbus(dev);
+ 	intel_teardown_mchbar(dev);
+ 
+ 	destroy_workqueue(dev_priv->hotplug.dp_wq);
+diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
+index 02ceb7a4b481..0433d25f9d23 100644
+--- a/drivers/gpu/drm/i915/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/i915_gem_context.c
+@@ -340,6 +340,10 @@ void i915_gem_context_reset(struct drm_device *dev)
+ 			i915_gem_context_unreference(lctx);
+ 			ring->last_context = NULL;
+ 		}
++
++		/* Force the GPU state to be reinitialised on enabling */
++		if (ring->default_context)
++			ring->default_context->legacy_hw_ctx.initialized = false;
+ 	}
+ }
+ 
+@@ -708,7 +712,7 @@ static int do_switch(struct drm_i915_gem_request *req)
+ 	if (ret)
+ 		goto unpin_out;
+ 
+-	if (!to->legacy_hw_ctx.initialized) {
++	if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
+ 		hw_flags |= MI_RESTORE_INHIBIT;
+ 		/* NB: If we inhibit the restore, the context is not allowed to
+ 		 * die because future work may end up depending on valid address
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 0d228f909dcb..0f42a2782afc 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -2354,9 +2354,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
+ 				spt_irq_handler(dev, pch_iir);
+ 			else
+ 				cpt_irq_handler(dev, pch_iir);
+-		} else
+-			DRM_ERROR("The master control interrupt lied (SDE)!\n");
+-
++		} else {
++			/*
++			 * Like on previous PCH there seems to be something
++			 * fishy going on with forwarding PCH interrupts.
++			 */
++			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
++		}
+ 	}
+ 
+ 	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
+index a6752a61d99f..7e6158b889da 100644
+--- a/drivers/gpu/drm/i915/intel_ddi.c
++++ b/drivers/gpu/drm/i915/intel_ddi.c
+@@ -1582,7 +1582,8 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
+ 			 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
+ 			 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
+ 			 wrpll_params.central_freq;
+-	} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
++	} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
++		   intel_encoder->type == INTEL_OUTPUT_DP_MST) {
+ 		switch (crtc_state->port_clock / 2) {
+ 		case 81000:
+ 			ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 32cf97346978..f859a5b87ed4 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -11930,11 +11930,21 @@ connected_sink_compute_bpp(struct intel_connector *connector,
+ 		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
+ 	}
+ 
+-	/* Clamp bpp to 8 on screens without EDID 1.4 */
+-	if (connector->base.display_info.bpc == 0 && bpp > 24) {
+-		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
+-			      bpp);
+-		pipe_config->pipe_bpp = 24;
++	/* Clamp bpp to default limit on screens without EDID 1.4 */
++	if (connector->base.display_info.bpc == 0) {
++		int type = connector->base.connector_type;
++		int clamp_bpp = 24;
++
++		/* Fall back to 18 bpp when DP sink capability is unknown. */
++		if (type == DRM_MODE_CONNECTOR_DisplayPort ||
++		    type == DRM_MODE_CONNECTOR_eDP)
++			clamp_bpp = 18;
++
++		if (bpp > clamp_bpp) {
++			DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
++				      bpp, clamp_bpp);
++			pipe_config->pipe_bpp = clamp_bpp;
++		}
+ 	}
+ }
+ 
+@@ -13537,11 +13547,12 @@ intel_check_primary_plane(struct drm_plane *plane,
+ 	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ 	bool can_position = false;
+ 
+-	/* use scaler when colorkey is not required */
+-	if (INTEL_INFO(plane->dev)->gen >= 9 &&
+-	    state->ckey.flags == I915_SET_COLORKEY_NONE) {
+-		min_scale = 1;
+-		max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
++	if (INTEL_INFO(plane->dev)->gen >= 9) {
++		/* use scaler when colorkey is not required */
++		if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
++			min_scale = 1;
++			max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
++		}
+ 		can_position = true;
+ 	}
+ 
+@@ -15565,6 +15576,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
+ 	mutex_lock(&dev->struct_mutex);
+ 	intel_cleanup_gt_powersave(dev);
+ 	mutex_unlock(&dev->struct_mutex);
++
++	intel_teardown_gmbus(dev);
+ }
+ 
+ /*
+diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+index a5e99ac305da..a8912aecc31f 100644
+--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
++++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+@@ -207,7 +207,12 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
+ 	gpio = *data++;
+ 
+ 	/* pull up/down */
+-	action = *data++;
++	action = *data++ & 1;
++
++	if (gpio >= ARRAY_SIZE(gtable)) {
++		DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
++		goto out;
++	}
+ 
+ 	function = gtable[gpio].function_reg;
+ 	pad = gtable[gpio].pad_reg;
+@@ -226,6 +231,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
+ 	vlv_gpio_nc_write(dev_priv, pad, val);
+ 	mutex_unlock(&dev_priv->sb_lock);
+ 
++out:
+ 	return data;
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
+index b17785719598..d7a6437d9da2 100644
+--- a/drivers/gpu/drm/i915/intel_hotplug.c
++++ b/drivers/gpu/drm/i915/intel_hotplug.c
+@@ -468,9 +468,14 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
+ 	list_for_each_entry(connector, &mode_config->connector_list, head) {
+ 		struct intel_connector *intel_connector = to_intel_connector(connector);
+ 		connector->polled = intel_connector->polled;
+-		if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+-			connector->polled = DRM_CONNECTOR_POLL_HPD;
++
++		/* MST has a dynamic intel_connector->encoder and it's reprobing
++		 * is all handled by the MST helpers. */
+ 		if (intel_connector->mst_port)
++			continue;
++
++		if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
++		    intel_connector->encoder->hpd_pin > HPD_NONE)
+ 			connector->polled = DRM_CONNECTOR_POLL_HPD;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
+index 8324654037b6..f3bee54c414f 100644
+--- a/drivers/gpu/drm/i915/intel_i2c.c
++++ b/drivers/gpu/drm/i915/intel_i2c.c
+@@ -675,7 +675,7 @@ int intel_setup_gmbus(struct drm_device *dev)
+ 	return 0;
+ 
+ err:
+-	while (--pin) {
++	while (pin--) {
+ 		if (!intel_gmbus_is_valid_pin(dev_priv, pin))
+ 			continue;
+ 
+diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
+index 88e12bdf79e2..d69547a65dbb 100644
+--- a/drivers/gpu/drm/i915/intel_lrc.c
++++ b/drivers/gpu/drm/i915/intel_lrc.c
+@@ -1706,6 +1706,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
+ 	if (flush_domains) {
+ 		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ 		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
++		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ 		flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 9461a238f5d5..f6b2a814e629 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -347,6 +347,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
+ 	if (flush_domains) {
+ 		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ 		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
++		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ 		flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ 	}
+ 	if (invalidate_domains) {
+@@ -419,6 +420,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
+ 	if (flush_domains) {
+ 		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ 		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
++		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ 		flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ 	}
+ 	if (invalidate_domains) {
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index 2e7cbe933533..2a5ed7460354 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -969,10 +969,13 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
+ 
+ 		NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
+ 
++		mutex_lock(&drm->dev->mode_config.mutex);
+ 		if (plugged)
+ 			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ 		else
+ 			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
++		mutex_unlock(&drm->dev->mode_config.mutex);
++
+ 		drm_helper_hpd_irq_event(connector->dev);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
+index 64c8d932d5f1..58a3f7cf2fb3 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_display.c
++++ b/drivers/gpu/drm/nouveau/nouveau_display.c
+@@ -634,10 +634,6 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
+ 		nv_crtc->lut.depth = 0;
+ 	}
+ 
+-	/* Make sure that drm and hw vblank irqs get resumed if needed. */
+-	for (head = 0; head < dev->mode_config.num_crtc; head++)
+-		drm_vblank_on(dev, head);
+-
+ 	/* This should ensure we don't hit a locking problem when someone
+ 	 * wakes us up via a connector.  We should never go into suspend
+ 	 * while the display is on anyways.
+@@ -647,6 +643,10 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
+ 
+ 	drm_helper_resume_force_mode(dev);
+ 
++	/* Make sure that drm and hw vblank irqs get resumed if needed. */
++	for (head = 0; head < dev->mode_config.num_crtc; head++)
++		drm_vblank_on(dev, head);
++
+ 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ 		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ 
+diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
+index 60e32c4e4e49..35ecc0d0458f 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
++++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
+@@ -24,7 +24,7 @@
+ static int nouveau_platform_probe(struct platform_device *pdev)
+ {
+ 	const struct nvkm_device_tegra_func *func;
+-	struct nvkm_device *device;
++	struct nvkm_device *device = NULL;
+ 	struct drm_device *drm;
+ 	int ret;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+index 7f8a42721eb2..e7e581d6a8ff 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+@@ -252,32 +252,40 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
+ 
+ 	if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
+ 		return -ENOMEM;
+-	*pdevice = &tdev->device;
++
+ 	tdev->func = func;
+ 	tdev->pdev = pdev;
+ 	tdev->irq = -1;
+ 
+ 	tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
+-	if (IS_ERR(tdev->vdd))
+-		return PTR_ERR(tdev->vdd);
++	if (IS_ERR(tdev->vdd)) {
++		ret = PTR_ERR(tdev->vdd);
++		goto free;
++	}
+ 
+ 	tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
+-	if (IS_ERR(tdev->rst))
+-		return PTR_ERR(tdev->rst);
++	if (IS_ERR(tdev->rst)) {
++		ret = PTR_ERR(tdev->rst);
++		goto free;
++	}
+ 
+ 	tdev->clk = devm_clk_get(&pdev->dev, "gpu");
+-	if (IS_ERR(tdev->clk))
+-		return PTR_ERR(tdev->clk);
++	if (IS_ERR(tdev->clk)) {
++		ret = PTR_ERR(tdev->clk);
++		goto free;
++	}
+ 
+ 	tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
+-	if (IS_ERR(tdev->clk_pwr))
+-		return PTR_ERR(tdev->clk_pwr);
++	if (IS_ERR(tdev->clk_pwr)) {
++		ret = PTR_ERR(tdev->clk_pwr);
++		goto free;
++	}
+ 
+ 	nvkm_device_tegra_probe_iommu(tdev);
+ 
+ 	ret = nvkm_device_tegra_power_up(tdev);
+ 	if (ret)
+-		return ret;
++		goto remove;
+ 
+ 	tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
+ 	ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
+@@ -285,9 +293,19 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
+ 			       cfg, dbg, detect, mmio, subdev_mask,
+ 			       &tdev->device);
+ 	if (ret)
+-		return ret;
++		goto powerdown;
++
++	*pdevice = &tdev->device;
+ 
+ 	return 0;
++
++powerdown:
++	nvkm_device_tegra_power_down(tdev);
++remove:
++	nvkm_device_tegra_remove_iommu(tdev);
++free:
++	kfree(tdev);
++	return ret;
+ }
+ #else
+ int
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
+index 74e2f7c6c07e..9688970eca47 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
+@@ -328,6 +328,7 @@ nvkm_dp_train(struct work_struct *w)
+ 		.outp = outp,
+ 	}, *dp = &_dp;
+ 	u32 datarate = 0;
++	u8  pwr;
+ 	int ret;
+ 
+ 	if (!outp->base.info.location && disp->func->sor.magic)
+@@ -355,6 +356,15 @@ nvkm_dp_train(struct work_struct *w)
+ 	/* disable link interrupt handling during link training */
+ 	nvkm_notify_put(&outp->irq);
+ 
++	/* ensure sink is not in a low-power state */
++	if (!nvkm_rdaux(outp->aux, DPCD_SC00, &pwr, 1)) {
++		if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
++			pwr &= ~DPCD_SC00_SET_POWER;
++			pwr |=  DPCD_SC00_SET_POWER_D0;
++			nvkm_wraux(outp->aux, DPCD_SC00, &pwr, 1);
++		}
++	}
++
+ 	/* enable down-spreading and execute pre-train script from vbios */
+ 	dp_link_train_init(dp, outp->dpcd[3] & 0x01);
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
+index 9596290329c7..6e10c5e0ef11 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
+@@ -71,5 +71,11 @@
+ #define DPCD_LS0C_LANE1_POST_CURSOR2                                       0x0c
+ #define DPCD_LS0C_LANE0_POST_CURSOR2                                       0x03
+ 
++/* DPCD Sink Control */
++#define DPCD_SC00                                                       0x00600
++#define DPCD_SC00_SET_POWER                                                0x03
++#define DPCD_SC00_SET_POWER_D0                                             0x01
++#define DPCD_SC00_SET_POWER_D3                                             0x03
++
+ void nvkm_dp_train(struct work_struct *);
+ #endif
+diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
+index 2ae8577497ca..7c2e78201ead 100644
+--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
++++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
+@@ -168,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
+ 		       cmd->command_size))
+ 		return -EFAULT;
+ 
+-	reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
++	reloc_info = kmalloc_array(cmd->relocs_num,
++				   sizeof(struct qxl_reloc_info), GFP_KERNEL);
+ 	if (!reloc_info)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
+index 752072771388..367a916f364e 100644
+--- a/drivers/gpu/drm/radeon/dce6_afmt.c
++++ b/drivers/gpu/drm/radeon/dce6_afmt.c
+@@ -301,6 +301,14 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
+ 	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+ 	 */
+ 	if (ASIC_IS_DCE8(rdev)) {
++		unsigned int div = (RREG32(DENTIST_DISPCLK_CNTL) &
++			DENTIST_DPREFCLK_WDIVIDER_MASK) >>
++			DENTIST_DPREFCLK_WDIVIDER_SHIFT;
++		div = radeon_audio_decode_dfs_div(div);
++
++		if (div)
++			clock = clock * 100 / div;
++
+ 		WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
+ 		WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
+ 	} else {
+diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
+index 9953356fe263..3cf04a2f44bb 100644
+--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
++++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
+@@ -289,6 +289,16 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev,
+ 	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
+ 	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+ 	 */
++	if (ASIC_IS_DCE41(rdev)) {
++		unsigned int div = (RREG32(DCE41_DENTIST_DISPCLK_CNTL) &
++			DENTIST_DPREFCLK_WDIVIDER_MASK) >>
++			DENTIST_DPREFCLK_WDIVIDER_SHIFT;
++		div = radeon_audio_decode_dfs_div(div);
++
++		if (div)
++			clock = 100 * clock / div;
++	}
++
+ 	WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
+ 	WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
+ }
+diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
+index 4aa5f755572b..13b6029d65cc 100644
+--- a/drivers/gpu/drm/radeon/evergreend.h
++++ b/drivers/gpu/drm/radeon/evergreend.h
+@@ -511,6 +511,11 @@
+ #define DCCG_AUDIO_DTO1_CNTL              0x05cc
+ #       define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3)
+ 
++#define DCE41_DENTIST_DISPCLK_CNTL			0x049c
++#       define DENTIST_DPREFCLK_WDIVIDER(x)		(((x) & 0x7f) << 24)
++#       define DENTIST_DPREFCLK_WDIVIDER_MASK		(0x7f << 24)
++#       define DENTIST_DPREFCLK_WDIVIDER_SHIFT		24
++
+ /* DCE 4.0 AFMT */
+ #define HDMI_CONTROL                         0x7030
+ #       define HDMI_KEEPOUT_MODE             (1 << 0)
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index 87db64983ea8..5580568088bb 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -268,6 +268,7 @@ struct radeon_clock {
+ 	uint32_t current_dispclk;
+ 	uint32_t dp_extclk;
+ 	uint32_t max_pixel_clock;
++	uint32_t vco_freq;
+ };
+ 
+ /*
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 8f285244c839..de9a2ffcf5f7 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -437,7 +437,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ 	}
+ 
+ 	/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
+-	if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
++	if (((dev->pdev->device == 0x9802) ||
++	     (dev->pdev->device == 0x9805) ||
++	     (dev->pdev->device == 0x9806)) &&
+ 	    (dev->pdev->subsystem_vendor == 0x1734) &&
+ 	    (dev->pdev->subsystem_device == 0x11bd)) {
+ 		if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
+@@ -448,14 +450,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ 		}
+ 	}
+ 
+-	/* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
+-	if ((dev->pdev->device == 0x9805) &&
+-	    (dev->pdev->subsystem_vendor == 0x1734) &&
+-	    (dev->pdev->subsystem_device == 0x11bd)) {
+-		if (*connector_type == DRM_MODE_CONNECTOR_VGA)
+-			return false;
+-	}
+-
+ 	return true;
+ }
+ 
+@@ -1112,6 +1106,31 @@ union firmware_info {
+ 	ATOM_FIRMWARE_INFO_V2_2 info_22;
+ };
+ 
++union igp_info {
++	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
++	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
++	struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
++	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
++	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
++};
++
++static void radeon_atombios_get_dentist_vco_freq(struct radeon_device *rdev)
++{
++	struct radeon_mode_info *mode_info = &rdev->mode_info;
++	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
++	union igp_info *igp_info;
++	u8 frev, crev;
++	u16 data_offset;
++
++	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
++			&frev, &crev, &data_offset)) {
++		igp_info = (union igp_info *)(mode_info->atom_context->bios +
++			data_offset);
++		rdev->clock.vco_freq =
++			le32_to_cpu(igp_info->info_6.ulDentistVCOFreq);
++	}
++}
++
+ bool radeon_atom_get_clock_info(struct drm_device *dev)
+ {
+ 	struct radeon_device *rdev = dev->dev_private;
+@@ -1263,20 +1282,25 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
+ 		rdev->mode_info.firmware_flags =
+ 			le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
+ 
++		if (ASIC_IS_DCE8(rdev))
++			rdev->clock.vco_freq =
++				le32_to_cpu(firmware_info->info_22.ulGPUPLL_OutputFreq);
++		else if (ASIC_IS_DCE5(rdev))
++			rdev->clock.vco_freq = rdev->clock.current_dispclk;
++		else if (ASIC_IS_DCE41(rdev))
++			radeon_atombios_get_dentist_vco_freq(rdev);
++		else
++			rdev->clock.vco_freq = rdev->clock.current_dispclk;
++
++		if (rdev->clock.vco_freq == 0)
++			rdev->clock.vco_freq = 360000;	/* 3.6 GHz */
++
+ 		return true;
+ 	}
+ 
+ 	return false;
+ }
+ 
+-union igp_info {
+-	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+-	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+-	struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+-	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+-	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
+-};
+-
+ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
+ {
+ 	struct radeon_mode_info *mode_info = &rdev->mode_info;
+diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
+index 2c02e99b5f95..b214663b370d 100644
+--- a/drivers/gpu/drm/radeon/radeon_audio.c
++++ b/drivers/gpu/drm/radeon/radeon_audio.c
+@@ -739,9 +739,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ 	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+-	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+-	struct radeon_connector_atom_dig *dig_connector =
+-		radeon_connector->con_priv;
+ 
+ 	if (!dig || !dig->afmt)
+ 		return;
+@@ -753,10 +750,7 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
+ 		radeon_audio_write_speaker_allocation(encoder);
+ 		radeon_audio_write_sad_regs(encoder);
+ 		radeon_audio_write_latency_fields(encoder, mode);
+-		if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
+-			radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
+-		else
+-			radeon_audio_set_dto(encoder, dig_connector->dp_clock);
++		radeon_audio_set_dto(encoder, rdev->clock.vco_freq * 10);
+ 		radeon_audio_set_audio_packet(encoder);
+ 		radeon_audio_select_pin(encoder);
+ 
+@@ -781,3 +775,15 @@ void radeon_audio_dpms(struct drm_encoder *encoder, int mode)
+ 	if (radeon_encoder->audio && radeon_encoder->audio->dpms)
+ 		radeon_encoder->audio->dpms(encoder, mode == DRM_MODE_DPMS_ON);
+ }
++
++unsigned int radeon_audio_decode_dfs_div(unsigned int div)
++{
++	if (div >= 8 && div < 64)
++		return (div - 8) * 25 + 200;
++	else if (div >= 64 && div < 96)
++		return (div - 64) * 50 + 1600;
++	else if (div >= 96 && div < 128)
++		return (div - 96) * 100 + 3200;
++	else
++		return 0;
++}
+diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
+index 059cc3012062..5c70cceaa4a6 100644
+--- a/drivers/gpu/drm/radeon/radeon_audio.h
++++ b/drivers/gpu/drm/radeon/radeon_audio.h
+@@ -79,5 +79,6 @@ void radeon_audio_fini(struct radeon_device *rdev);
+ void radeon_audio_mode_set(struct drm_encoder *encoder,
+ 	struct drm_display_mode *mode);
+ void radeon_audio_dpms(struct drm_encoder *encoder, int mode);
++unsigned int radeon_audio_decode_dfs_div(unsigned int div);
+ 
+ #endif
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index c566993a2ec3..d690df545b4d 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -1744,6 +1744,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
+ 	}
+ 
+ 	drm_kms_helper_poll_enable(dev);
++	drm_helper_hpd_irq_event(dev);
+ 
+ 	/* set the power state here in case we are a PX system or headless */
+ 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index 1eca0acac016..13767d21835f 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -403,7 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
+ 	struct drm_crtc *crtc = &radeon_crtc->base;
+ 	unsigned long flags;
+ 	int r;
+-	int vpos, hpos, stat, min_udelay;
++	int vpos, hpos, stat, min_udelay = 0;
++	unsigned repcnt = 4;
+ 	struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
+ 
+         down_read(&rdev->exclusive_lock);
+@@ -454,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
+ 	 * In practice this won't execute very often unless on very fast
+ 	 * machines because the time window for this to happen is very small.
+ 	 */
+-	for (;;) {
++	while (radeon_crtc->enabled && repcnt--) {
+ 		/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
+ 		 * start in hpos, and to the "fudged earlier" vblank start in
+ 		 * vpos.
+@@ -472,10 +473,22 @@ static void radeon_flip_work_func(struct work_struct *__work)
+ 		/* Sleep at least until estimated real start of hw vblank */
+ 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ 		min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
++		if (min_udelay > vblank->framedur_ns / 2000) {
++			/* Don't wait ridiculously long - something is wrong */
++			repcnt = 0;
++			break;
++		}
+ 		usleep_range(min_udelay, 2 * min_udelay);
+ 		spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ 	};
+ 
++	if (!repcnt)
++		DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
++				 "framedur %d, linedur %d, stat %d, vpos %d, "
++				 "hpos %d\n", work->crtc_id, min_udelay,
++				 vblank->framedur_ns / 1000,
++				 vblank->linedur_ns / 1000, stat, vpos, hpos);
++
+ 	/* do the flip (mmio) */
+ 	radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index 84d45633d28c..fb6ad143873f 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -33,6 +33,7 @@
+ #include <linux/slab.h>
+ #include <drm/drmP.h>
+ #include <drm/radeon_drm.h>
++#include <drm/drm_cache.h>
+ #include "radeon.h"
+ #include "radeon_trace.h"
+ 
+@@ -245,6 +246,12 @@ int radeon_bo_create(struct radeon_device *rdev,
+ 		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
+ 			      "better performance thanks to write-combining\n");
+ 	bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
++#else
++	/* For architectures that don't support WC memory,
++	 * mask out the WC flag from the BO
++	 */
++	if (!drm_arch_can_wc_memory())
++		bo->flags &= ~RADEON_GEM_GTT_WC;
+ #endif
+ 
+ 	radeon_ttm_placement_from_domain(bo, domain);
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index 59abebd6b5dc..2081a60d08fb 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -1075,8 +1075,6 @@ force:
+ 
+ 	/* update display watermarks based on new power state */
+ 	radeon_bandwidth_update(rdev);
+-	/* update displays */
+-	radeon_dpm_display_configuration_changed(rdev);
+ 
+ 	rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
+ 	rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
+@@ -1097,6 +1095,9 @@ force:
+ 
+ 	radeon_dpm_post_set_power_state(rdev);
+ 
++	/* update displays */
++	radeon_dpm_display_configuration_changed(rdev);
++
+ 	if (rdev->asic->dpm.force_performance_level) {
+ 		if (rdev->pm.dpm.thermal_active) {
+ 			enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
+diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
+index c507896aca45..197b157b73d0 100644
+--- a/drivers/gpu/drm/radeon/radeon_sa.c
++++ b/drivers/gpu/drm/radeon/radeon_sa.c
+@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
+ 			/* see if we can skip over some allocations */
+ 		} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
+ 
++		for (i = 0; i < RADEON_NUM_RINGS; ++i)
++			radeon_fence_ref(fences[i]);
++
+ 		spin_unlock(&sa_manager->wq.lock);
+ 		r = radeon_fence_wait_any(rdev, fences, false);
++		for (i = 0; i < RADEON_NUM_RINGS; ++i)
++			radeon_fence_unref(&fences[i]);
+ 		spin_lock(&sa_manager->wq.lock);
+ 		/* if we have nothing to wait for block */
+ 		if (r == -ENOENT) {
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index e34307459e50..e06ac546a90f 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -758,7 +758,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
+ 						       0, PAGE_SIZE,
+ 						       PCI_DMA_BIDIRECTIONAL);
+ 		if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
+-			while (--i) {
++			while (i--) {
+ 				pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
+ 					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ 				gtt->ttm.dma_address[i] = 0;
+diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
+index 48d97c040f49..3979632b9225 100644
+--- a/drivers/gpu/drm/radeon/radeon_vm.c
++++ b/drivers/gpu/drm/radeon/radeon_vm.c
+@@ -455,15 +455,15 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+ 
+ 	if (soffset) {
+ 		/* make sure object fit at this offset */
+-		eoffset = soffset + size;
++		eoffset = soffset + size - 1;
+ 		if (soffset >= eoffset) {
+ 			r = -EINVAL;
+ 			goto error_unreserve;
+ 		}
+ 
+ 		last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
+-		if (last_pfn > rdev->vm_manager.max_pfn) {
+-			dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
++		if (last_pfn >= rdev->vm_manager.max_pfn) {
++			dev_err(rdev->dev, "va above limit (0x%08X >= 0x%08X)\n",
+ 				last_pfn, rdev->vm_manager.max_pfn);
+ 			r = -EINVAL;
+ 			goto error_unreserve;
+@@ -478,7 +478,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+ 	eoffset /= RADEON_GPU_PAGE_SIZE;
+ 	if (soffset || eoffset) {
+ 		struct interval_tree_node *it;
+-		it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
++		it = interval_tree_iter_first(&vm->va, soffset, eoffset);
+ 		if (it && it != &bo_va->it) {
+ 			struct radeon_bo_va *tmp;
+ 			tmp = container_of(it, struct radeon_bo_va, it);
+@@ -518,7 +518,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+ 	if (soffset || eoffset) {
+ 		spin_lock(&vm->status_lock);
+ 		bo_va->it.start = soffset;
+-		bo_va->it.last = eoffset - 1;
++		bo_va->it.last = eoffset;
+ 		list_add(&bo_va->vm_status, &vm->cleared);
+ 		spin_unlock(&vm->status_lock);
+ 		interval_tree_insert(&bo_va->it, &vm->va);
+@@ -888,7 +888,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm,
+ 	unsigned i;
+ 
+ 	start >>= radeon_vm_block_size;
+-	end >>= radeon_vm_block_size;
++	end = (end - 1) >> radeon_vm_block_size;
+ 
+ 	for (i = start; i <= end; ++i)
+ 		radeon_bo_fence(vm->page_tables[i].bo, fence, true);
+diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
+index 4c4a7218a3bd..d1a7b58dd291 100644
+--- a/drivers/gpu/drm/radeon/sid.h
++++ b/drivers/gpu/drm/radeon/sid.h
+@@ -915,6 +915,11 @@
+ #define DCCG_AUDIO_DTO1_PHASE                           0x05c0
+ #define DCCG_AUDIO_DTO1_MODULE                          0x05c4
+ 
++#define DENTIST_DISPCLK_CNTL				0x0490
++#	define DENTIST_DPREFCLK_WDIVIDER(x)		(((x) & 0x7f) << 24)
++#	define DENTIST_DPREFCLK_WDIVIDER_MASK		(0x7f << 24)
++#	define DENTIST_DPREFCLK_WDIVIDER_SHIFT		24
++
+ #define AFMT_AUDIO_SRC_CONTROL                          0x713c
+ #define		AFMT_AUDIO_SRC_SELECT(x)		(((x) & 7) << 0)
+ /* AFMT_AUDIO_SRC_SELECT
+diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c
+index 07a0d378e122..a01efe39a820 100644
+--- a/drivers/gpu/drm/radeon/vce_v1_0.c
++++ b/drivers/gpu/drm/radeon/vce_v1_0.c
+@@ -178,12 +178,12 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
+ 		return -EINVAL;
+ 	}
+ 
+-	for (i = 0; i < sign->num; ++i) {
+-		if (sign->val[i].chip_id == chip_id)
++	for (i = 0; i < le32_to_cpu(sign->num); ++i) {
++		if (le32_to_cpu(sign->val[i].chip_id) == chip_id)
+ 			break;
+ 	}
+ 
+-	if (i == sign->num)
++	if (i == le32_to_cpu(sign->num))
+ 		return -EINVAL;
+ 
+ 	data += (256 - 64) / 4;
+@@ -191,18 +191,18 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
+ 	data[1] = sign->val[i].nonce[1];
+ 	data[2] = sign->val[i].nonce[2];
+ 	data[3] = sign->val[i].nonce[3];
+-	data[4] = sign->len + 64;
++	data[4] = cpu_to_le32(le32_to_cpu(sign->len) + 64);
+ 
+ 	memset(&data[5], 0, 44);
+ 	memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign));
+ 
+-	data += data[4] / 4;
++	data += le32_to_cpu(data[4]) / 4;
+ 	data[0] = sign->val[i].sigval[0];
+ 	data[1] = sign->val[i].sigval[1];
+ 	data[2] = sign->val[i].sigval[2];
+ 	data[3] = sign->val[i].sigval[3];
+ 
+-	rdev->vce.keyselect = sign->val[i].keyselect;
++	rdev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+index 6377e8151000..67cebb23c940 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+@@ -247,7 +247,7 @@ static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
+ {
+ 	struct vmw_cmdbuf_man *man = header->man;
+ 
+-	BUG_ON(!spin_is_locked(&man->lock));
++	lockdep_assert_held_once(&man->lock);
+ 
+ 	if (header->inline_space) {
+ 		vmw_cmdbuf_header_inline_free(header);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index c49812b80dd0..24fb348a44e1 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -25,6 +25,7 @@
+  *
+  **************************************************************************/
+ #include <linux/module.h>
++#include <linux/console.h>
+ 
+ #include <drm/drmP.h>
+ #include "vmwgfx_drv.h"
+@@ -1538,6 +1539,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ static int __init vmwgfx_init(void)
+ {
+ 	int ret;
++
++#ifdef CONFIG_VGA_CONSOLE
++	if (vgacon_text_force())
++		return -EINVAL;
++#endif
++
+ 	ret = drm_pci_init(&driver, &vmw_pci_driver);
+ 	if (ret)
+ 		DRM_ERROR("Failed initializing DRM.\n");
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index 9b4bb9e74d73..7c2e118a77b0 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -763,21 +763,25 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
+ 	uint32_t format;
+ 	struct drm_vmw_size content_base_size;
+ 	struct vmw_resource *res;
++	unsigned int bytes_pp;
+ 	int ret;
+ 
+ 	switch (mode_cmd->depth) {
+ 	case 32:
+ 	case 24:
+ 		format = SVGA3D_X8R8G8B8;
++		bytes_pp = 4;
+ 		break;
+ 
+ 	case 16:
+ 	case 15:
+ 		format = SVGA3D_R5G6B5;
++		bytes_pp = 2;
+ 		break;
+ 
+ 	case 8:
+ 		format = SVGA3D_P8;
++		bytes_pp = 1;
+ 		break;
+ 
+ 	default:
+@@ -785,7 +789,7 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
+ 		return -EINVAL;
+ 	}
+ 
+-	content_base_size.width  = mode_cmd->width;
++	content_base_size.width  = mode_cmd->pitch / bytes_pp;
+ 	content_base_size.height = mode_cmd->height;
+ 	content_base_size.depth  = 1;
+ 
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index c4dcab048cb8..9098f13f2f44 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -630,10 +630,19 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
+ 	 *    on the ring. We will not signal if more data is
+ 	 *    to be placed.
+ 	 *
++	 * Based on the channel signal state, we will decide
++	 * which signaling policy will be applied.
++	 *
+ 	 * If we cannot write to the ring-buffer; signal the host
+ 	 * even if we may not have written anything. This is a rare
+ 	 * enough condition that it should not matter.
+ 	 */
++
++	if (channel->signal_policy)
++		signal = true;
++	else
++		kick_q = true;
++
+ 	if (((ret == 0) && kick_q && signal) || (ret))
+ 		vmbus_setevent(channel);
+ 
+@@ -733,10 +742,19 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
+ 	 *    on the ring. We will not signal if more data is
+ 	 *    to be placed.
+ 	 *
++	 * Based on the channel signal state, we will decide
++	 * which signaling policy will be applied.
++	 *
+ 	 * If we cannot write to the ring-buffer; signal the host
+ 	 * even if we may not have written anything. This is a rare
+ 	 * enough condition that it should not matter.
+ 	 */
++
++	if (channel->signal_policy)
++		signal = true;
++	else
++		kick_q = true;
++
+ 	if (((ret == 0) && kick_q && signal) || (ret))
+ 		vmbus_setevent(channel);
+ 
+diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
+index f155b8380481..2b3105c8aed3 100644
+--- a/drivers/hwmon/ads1015.c
++++ b/drivers/hwmon/ads1015.c
+@@ -126,7 +126,7 @@ static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
+ 	struct ads1015_data *data = i2c_get_clientdata(client);
+ 	unsigned int pga = data->channel_data[channel].pga;
+ 	int fullscale = fullscale_table[pga];
+-	const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
++	const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
+ 
+ 	return DIV_ROUND_CLOSEST(reg * fullscale, mask);
+ }
+diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
+index c8487894b312..c43318d3416e 100644
+--- a/drivers/hwmon/dell-smm-hwmon.c
++++ b/drivers/hwmon/dell-smm-hwmon.c
+@@ -932,6 +932,17 @@ MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
+ static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
+ 	{
+ 		/*
++		 * CPU fan speed going up and down on Dell Studio XPS 8000
++		 * for unknown reasons.
++		 */
++		.ident = "Dell Studio XPS 8000",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8000"),
++		},
++	},
++	{
++		/*
+ 		 * CPU fan speed going up and down on Dell Studio XPS 8100
+ 		 * for unknown reasons.
+ 		 */
+diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
+index 82de3deeb18a..685568b1236d 100644
+--- a/drivers/hwmon/gpio-fan.c
++++ b/drivers/hwmon/gpio-fan.c
+@@ -406,16 +406,11 @@ static int gpio_fan_get_cur_state(struct thermal_cooling_device *cdev,
+ 				  unsigned long *state)
+ {
+ 	struct gpio_fan_data *fan_data = cdev->devdata;
+-	int r;
+ 
+ 	if (!fan_data)
+ 		return -EINVAL;
+ 
+-	r = get_fan_speed_index(fan_data);
+-	if (r < 0)
+-		return r;
+-
+-	*state = r;
++	*state = fan_data->speed_index;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
+index e25492137d8b..93738dfbf631 100644
+--- a/drivers/hwtracing/coresight/coresight.c
++++ b/drivers/hwtracing/coresight/coresight.c
+@@ -548,7 +548,7 @@ static int coresight_name_match(struct device *dev, void *data)
+ 	to_match = data;
+ 	i_csdev = to_coresight_device(dev);
+ 
+-	if (!strcmp(to_match, dev_name(&i_csdev->dev)))
++	if (to_match && !strcmp(to_match, dev_name(&i_csdev->dev)))
+ 		return 1;
+ 
+ 	return 0;
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index f62d69799a9c..27fa0cb09538 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -1271,6 +1271,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 	switch (dev->device) {
+ 	case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS:
+ 	case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS:
++	case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
++	case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
+ 	case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
+ 		priv->features |= FEATURE_I2C_BLOCK_READ;
+ 		priv->features |= FEATURE_IRQ;
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 0a26dd6d9b19..d6d2b3582910 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -782,11 +782,11 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
+ 	wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
+ 
+ 	/* Check if the device started its remove_one */
+-	spin_lock_irq(&cm.lock);
++	spin_lock_irqsave(&cm.lock, flags);
+ 	if (!cm_dev->going_down)
+ 		queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
+ 				   msecs_to_jiffies(wait_time));
+-	spin_unlock_irq(&cm.lock);
++	spin_unlock_irqrestore(&cm.lock, flags);
+ 
+ 	cm_id_priv->timewait_info = NULL;
+ }
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 2d762a2ecd81..17a15c56028c 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -453,7 +453,7 @@ static inline int cma_validate_port(struct ib_device *device, u8 port,
+ 	if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
+ 		return ret;
+ 
+-	if (dev_type == ARPHRD_ETHER)
++	if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port))
+ 		ndev = dev_get_by_index(&init_net, bound_if_index);
+ 
+ 	ret = ib_find_cached_gid_by_port(device, gid, port, ndev, NULL);
+diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
+index cb78b1e9bcd9..f504ba73e5dc 100644
+--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
++++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
+@@ -149,7 +149,7 @@ static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_en
+ 	error = l2t_send(tdev, skb, l2e);
+ 	if (error < 0)
+ 		kfree_skb(skb);
+-	return error;
++	return error < 0 ? error : 0;
+ }
+ 
+ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
+@@ -165,7 +165,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
+ 	error = cxgb3_ofld_send(tdev, skb);
+ 	if (error < 0)
+ 		kfree_skb(skb);
+-	return error;
++	return error < 0 ? error : 0;
+ }
+ 
+ static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 7e97cb55a6bf..c4e091528390 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -275,7 +275,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ 	props->max_sge = min(max_rq_sg, max_sq_sg);
+ 	props->max_sge_rd = props->max_sge;
+ 	props->max_cq		   = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
+-	props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_eq_sz)) - 1;
++	props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
+ 	props->max_mr		   = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
+ 	props->max_pd		   = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
+ 	props->max_qp_rd_atom	   = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
+diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
+index 40f85bb3e0d3..3eff35c2d453 100644
+--- a/drivers/infiniband/hw/qib/qib_qp.c
++++ b/drivers/infiniband/hw/qib/qib_qp.c
+@@ -100,9 +100,10 @@ static u32 credit_table[31] = {
+ 	32768                   /* 1E */
+ };
+ 
+-static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
++static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map,
++			 gfp_t gfp)
+ {
+-	unsigned long page = get_zeroed_page(GFP_KERNEL);
++	unsigned long page = get_zeroed_page(gfp);
+ 
+ 	/*
+ 	 * Free the page if someone raced with us installing it.
+@@ -121,7 +122,7 @@ static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
+  * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
+  */
+ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
+-		     enum ib_qp_type type, u8 port)
++		     enum ib_qp_type type, u8 port, gfp_t gfp)
+ {
+ 	u32 i, offset, max_scan, qpn;
+ 	struct qpn_map *map;
+@@ -151,7 +152,7 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
+ 	max_scan = qpt->nmaps - !offset;
+ 	for (i = 0;;) {
+ 		if (unlikely(!map->page)) {
+-			get_map_page(qpt, map);
++			get_map_page(qpt, map, gfp);
+ 			if (unlikely(!map->page))
+ 				break;
+ 		}
+@@ -983,13 +984,21 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ 	size_t sz;
+ 	size_t sg_list_sz;
+ 	struct ib_qp *ret;
++	gfp_t gfp;
++
+ 
+ 	if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
+ 	    init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
+-	    init_attr->create_flags) {
+-		ret = ERR_PTR(-EINVAL);
+-		goto bail;
+-	}
++	    init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
++		return ERR_PTR(-EINVAL);
++
++	/* GFP_NOIO is applicable in RC QPs only */
++	if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
++	    init_attr->qp_type != IB_QPT_RC)
++		return ERR_PTR(-EINVAL);
++
++	gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
++			GFP_NOIO : GFP_KERNEL;
+ 
+ 	/* Check receive queue parameters if no SRQ is specified. */
+ 	if (!init_attr->srq) {
+@@ -1021,7 +1030,8 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ 		sz = sizeof(struct qib_sge) *
+ 			init_attr->cap.max_send_sge +
+ 			sizeof(struct qib_swqe);
+-		swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
++		swq = __vmalloc((init_attr->cap.max_send_wr + 1) * sz,
++				gfp, PAGE_KERNEL);
+ 		if (swq == NULL) {
+ 			ret = ERR_PTR(-ENOMEM);
+ 			goto bail;
+@@ -1037,13 +1047,13 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ 		} else if (init_attr->cap.max_recv_sge > 1)
+ 			sg_list_sz = sizeof(*qp->r_sg_list) *
+ 				(init_attr->cap.max_recv_sge - 1);
+-		qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
++		qp = kzalloc(sz + sg_list_sz, gfp);
+ 		if (!qp) {
+ 			ret = ERR_PTR(-ENOMEM);
+ 			goto bail_swq;
+ 		}
+ 		RCU_INIT_POINTER(qp->next, NULL);
+-		qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
++		qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), gfp);
+ 		if (!qp->s_hdr) {
+ 			ret = ERR_PTR(-ENOMEM);
+ 			goto bail_qp;
+@@ -1058,8 +1068,16 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ 			qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
+ 			sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
+ 				sizeof(struct qib_rwqe);
+-			qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
+-						   qp->r_rq.size * sz);
++			if (gfp != GFP_NOIO)
++				qp->r_rq.wq = vmalloc_user(
++						sizeof(struct qib_rwq) +
++						qp->r_rq.size * sz);
++			else
++				qp->r_rq.wq = __vmalloc(
++						sizeof(struct qib_rwq) +
++						qp->r_rq.size * sz,
++						gfp, PAGE_KERNEL);
++
+ 			if (!qp->r_rq.wq) {
+ 				ret = ERR_PTR(-ENOMEM);
+ 				goto bail_qp;
+@@ -1090,7 +1108,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ 		dev = to_idev(ibpd->device);
+ 		dd = dd_from_dev(dev);
+ 		err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
+-				init_attr->port_num);
++				init_attr->port_num, gfp);
+ 		if (err < 0) {
+ 			ret = ERR_PTR(err);
+ 			vfree(qp->r_rq.wq);
+diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
+index f8ea069a3eaf..b2fb5286dbd9 100644
+--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c
++++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
+@@ -286,15 +286,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ 	struct qib_ibdev *dev = to_idev(ibqp->device);
+ 	struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
+ 	struct qib_mcast *mcast = NULL;
+-	struct qib_mcast_qp *p, *tmp;
++	struct qib_mcast_qp *p, *tmp, *delp = NULL;
+ 	struct rb_node *n;
+ 	int last = 0;
+ 	int ret;
+ 
+-	if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
+-		ret = -EINVAL;
+-		goto bail;
+-	}
++	if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
++		return -EINVAL;
+ 
+ 	spin_lock_irq(&ibp->lock);
+ 
+@@ -303,8 +301,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ 	while (1) {
+ 		if (n == NULL) {
+ 			spin_unlock_irq(&ibp->lock);
+-			ret = -EINVAL;
+-			goto bail;
++			return -EINVAL;
+ 		}
+ 
+ 		mcast = rb_entry(n, struct qib_mcast, rb_node);
+@@ -328,6 +325,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ 		 */
+ 		list_del_rcu(&p->list);
+ 		mcast->n_attached--;
++		delp = p;
+ 
+ 		/* If this was the last attached QP, remove the GID too. */
+ 		if (list_empty(&mcast->qp_list)) {
+@@ -338,15 +336,16 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ 	}
+ 
+ 	spin_unlock_irq(&ibp->lock);
++	/* QP not attached */
++	if (!delp)
++		return -EINVAL;
++	/*
++	 * Wait for any list walkers to finish before freeing the
++	 * list element.
++	 */
++	wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
++	qib_mcast_qp_free(delp);
+ 
+-	if (p) {
+-		/*
+-		 * Wait for any list walkers to finish before freeing the
+-		 * list element.
+-		 */
+-		wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
+-		qib_mcast_qp_free(p);
+-	}
+ 	if (last) {
+ 		atomic_dec(&mcast->refcount);
+ 		wait_event(mcast->wait, !atomic_read(&mcast->refcount));
+@@ -355,11 +354,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ 		dev->n_mcast_grps_allocated--;
+ 		spin_unlock_irq(&dev->n_mcast_grps_lock);
+ 	}
+-
+-	ret = 0;
+-
+-bail:
+-	return ret;
++	return 0;
+ }
+ 
+ int qib_mcast_tree_empty(struct qib_ibport *ibp)
+diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
+index b12a5d58546f..37199b9b2cfa 100644
+--- a/drivers/irqchip/irq-atmel-aic-common.c
++++ b/drivers/irqchip/irq-atmel-aic-common.c
+@@ -86,7 +86,7 @@ int aic_common_set_priority(int priority, unsigned *val)
+ 	    priority > AT91_AIC_IRQ_MAX_PRIORITY)
+ 		return -EINVAL;
+ 
+-	*val &= AT91_AIC_PRIOR;
++	*val &= ~AT91_AIC_PRIOR;
+ 	*val |= priority;
+ 
+ 	return 0;
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index e23d1d18f9d6..a159529f9d53 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -597,11 +597,6 @@ static void its_unmask_irq(struct irq_data *d)
+ 	lpi_set_config(d, true);
+ }
+ 
+-static void its_eoi_irq(struct irq_data *d)
+-{
+-	gic_write_eoir(d->hwirq);
+-}
+-
+ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ 			    bool force)
+ {
+@@ -638,7 +633,7 @@ static struct irq_chip its_irq_chip = {
+ 	.name			= "ITS",
+ 	.irq_mask		= its_mask_irq,
+ 	.irq_unmask		= its_unmask_irq,
+-	.irq_eoi		= its_eoi_irq,
++	.irq_eoi		= irq_chip_eoi_parent,
+ 	.irq_set_affinity	= its_set_affinity,
+ 	.irq_compose_msi_msg	= its_irq_compose_msi_msg,
+ };
+diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
+index c22e2d40cb30..efe50845939d 100644
+--- a/drivers/irqchip/irq-mxs.c
++++ b/drivers/irqchip/irq-mxs.c
+@@ -241,6 +241,7 @@ static int __init asm9260_of_init(struct device_node *np,
+ 		writel(0, icoll_priv.intr + i);
+ 
+ 	icoll_add_domain(np, ASM9260_NUM_IRQS);
++	set_handle_irq(icoll_handle_irq);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
+index 8587d0f8d8c0..f6cb1b8bb981 100644
+--- a/drivers/irqchip/irq-omap-intc.c
++++ b/drivers/irqchip/irq-omap-intc.c
+@@ -47,6 +47,7 @@
+ #define INTC_ILR0		0x0100
+ 
+ #define ACTIVEIRQ_MASK		0x7f	/* omap2/3 active interrupt bits */
++#define SPURIOUSIRQ_MASK	(0x1ffffff << 7)
+ #define INTCPS_NR_ILR_REGS	128
+ #define INTCPS_NR_MIR_REGS	4
+ 
+@@ -330,11 +331,35 @@ static int __init omap_init_irq(u32 base, struct device_node *node)
+ static asmlinkage void __exception_irq_entry
+ omap_intc_handle_irq(struct pt_regs *regs)
+ {
++	extern unsigned long irq_err_count;
+ 	u32 irqnr;
+ 
+ 	irqnr = intc_readl(INTC_SIR);
++
++	/*
++	 * A spurious IRQ can result if interrupt that triggered the
++	 * sorting is no longer active during the sorting (10 INTC
++	 * functional clock cycles after interrupt assertion). Or a
++	 * change in interrupt mask affected the result during sorting
++	 * time. There is no special handling required except ignoring
++	 * the SIR register value just read and retrying.
++	 * See section 6.2.5 of AM335x TRM Literature Number: SPRUH73K
++	 *
++	 * Many a times, a spurious interrupt situation has been fixed
++	 * by adding a flush for the posted write acking the IRQ in
++	 * the device driver. Typically, this is going be the device
++	 * driver whose interrupt was handled just before the spurious
++	 * IRQ occurred. Pay attention to those device drivers if you
++	 * run into hitting the spurious IRQ condition below.
++	 */
++	if (unlikely((irqnr & SPURIOUSIRQ_MASK) == SPURIOUSIRQ_MASK)) {
++		pr_err_once("%s: spurious irq!\n", __func__);
++		irq_err_count++;
++		omap_ack_irq(NULL);
++		return;
++	}
++
+ 	irqnr &= ACTIVEIRQ_MASK;
+-	WARN_ONCE(!irqnr, "Spurious IRQ ?\n");
+ 	handle_domain_irq(domain, irqnr, regs);
+ }
+ 
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 83392f856dfd..22b9e34ceb75 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -1741,6 +1741,7 @@ static void bch_btree_gc(struct cache_set *c)
+ 	do {
+ 		ret = btree_root(gc_root, c, &op, &writes, &stats);
+ 		closure_sync(&writes);
++		cond_resched();
+ 
+ 		if (ret && ret != -EAGAIN)
+ 			pr_warn("gc failed!");
+@@ -2162,8 +2163,10 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
+ 		rw_lock(true, b, b->level);
+ 
+ 		if (b->key.ptr[0] != btree_ptr ||
+-		    b->seq != seq + 1)
++                   b->seq != seq + 1) {
++                       op->lock = b->level;
+ 			goto out;
++               }
+ 	}
+ 
+ 	SET_KEY_PTRS(check_key, 1);
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 679a093a3bf6..8d0ead98eb6e 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -685,6 +685,8 @@ static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
+ 	WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
+ 	     sysfs_create_link(&c->kobj, &d->kobj, d->name),
+ 	     "Couldn't create device <-> cache set symlinks");
++
++	clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
+ }
+ 
+ static void bcache_device_detach(struct bcache_device *d)
+@@ -847,8 +849,11 @@ void bch_cached_dev_run(struct cached_dev *dc)
+ 	buf[SB_LABEL_SIZE] = '\0';
+ 	env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
+ 
+-	if (atomic_xchg(&dc->running, 1))
++	if (atomic_xchg(&dc->running, 1)) {
++		kfree(env[1]);
++		kfree(env[2]);
+ 		return;
++	}
+ 
+ 	if (!d->c &&
+ 	    BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
+@@ -1933,6 +1938,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
+ 			else
+ 				err = "device busy";
+ 			mutex_unlock(&bch_register_lock);
++			if (attr == &ksysfs_register_quiet)
++				goto out;
+ 		}
+ 		goto err;
+ 	}
+@@ -1971,8 +1978,7 @@ out:
+ err_close:
+ 	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+ err:
+-	if (attr != &ksysfs_register_quiet)
+-		pr_info("error opening %s: %s", path, err);
++	pr_info("error opening %s: %s", path, err);
+ 	ret = -EINVAL;
+ 	goto out;
+ }
+@@ -2066,8 +2072,10 @@ static int __init bcache_init(void)
+ 	closure_debug_init();
+ 
+ 	bcache_major = register_blkdev(0, "bcache");
+-	if (bcache_major < 0)
++	if (bcache_major < 0) {
++		unregister_reboot_notifier(&reboot);
+ 		return bcache_major;
++	}
+ 
+ 	if (!(bcache_wq = create_workqueue("bcache")) ||
+ 	    !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index b23f88d9f18c..b9346cd9cda1 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -323,6 +323,10 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
+ 
+ static bool dirty_pred(struct keybuf *buf, struct bkey *k)
+ {
++	struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
++
++	BUG_ON(KEY_INODE(k) != dc->disk.id);
++
+ 	return KEY_DIRTY(k);
+ }
+ 
+@@ -372,11 +376,24 @@ next:
+ 	}
+ }
+ 
++/*
++ * Returns true if we scanned the entire disk
++ */
+ static bool refill_dirty(struct cached_dev *dc)
+ {
+ 	struct keybuf *buf = &dc->writeback_keys;
++	struct bkey start = KEY(dc->disk.id, 0, 0);
+ 	struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
+-	bool searched_from_start = false;
++	struct bkey start_pos;
++
++	/*
++	 * make sure keybuf pos is inside the range for this disk - at bringup
++	 * we might not be attached yet so this disk's inode nr isn't
++	 * initialized then
++	 */
++	if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
++	    bkey_cmp(&buf->last_scanned, &end) > 0)
++		buf->last_scanned = start;
+ 
+ 	if (dc->partial_stripes_expensive) {
+ 		refill_full_stripes(dc);
+@@ -384,14 +401,20 @@ static bool refill_dirty(struct cached_dev *dc)
+ 			return false;
+ 	}
+ 
+-	if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
+-		buf->last_scanned = KEY(dc->disk.id, 0, 0);
+-		searched_from_start = true;
+-	}
+-
++	start_pos = buf->last_scanned;
+ 	bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
+ 
+-	return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
++	if (bkey_cmp(&buf->last_scanned, &end) < 0)
++		return false;
++
++	/*
++	 * If we get to the end start scanning again from the beginning, and
++	 * only scan up to where we initially started scanning from:
++	 */
++	buf->last_scanned = start;
++	bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
++
++	return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
+ }
+ 
+ static int bch_writeback_thread(void *arg)
+diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
+index 0a9dab187b79..073a042aed24 100644
+--- a/drivers/md/bcache/writeback.h
++++ b/drivers/md/bcache/writeback.h
+@@ -63,7 +63,8 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
+ 
+ static inline void bch_writeback_queue(struct cached_dev *dc)
+ {
+-	wake_up_process(dc->writeback_thread);
++	if (!IS_ERR_OR_NULL(dc->writeback_thread))
++		wake_up_process(dc->writeback_thread);
+ }
+ 
+ static inline void bch_writeback_add(struct cached_dev *dc)
+diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
+index fae34e7a0b1e..12b5216c2cfe 100644
+--- a/drivers/md/dm-exception-store.h
++++ b/drivers/md/dm-exception-store.h
+@@ -69,7 +69,7 @@ struct dm_exception_store_type {
+ 	 * Update the metadata with this exception.
+ 	 */
+ 	void (*commit_exception) (struct dm_exception_store *store,
+-				  struct dm_exception *e,
++				  struct dm_exception *e, int valid,
+ 				  void (*callback) (void *, int success),
+ 				  void *callback_context);
+ 
+diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
+index 3164b8bce294..4d3909393f2c 100644
+--- a/drivers/md/dm-snap-persistent.c
++++ b/drivers/md/dm-snap-persistent.c
+@@ -695,7 +695,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
+ }
+ 
+ static void persistent_commit_exception(struct dm_exception_store *store,
+-					struct dm_exception *e,
++					struct dm_exception *e, int valid,
+ 					void (*callback) (void *, int success),
+ 					void *callback_context)
+ {
+@@ -704,6 +704,9 @@ static void persistent_commit_exception(struct dm_exception_store *store,
+ 	struct core_exception ce;
+ 	struct commit_callback *cb;
+ 
++	if (!valid)
++		ps->valid = 0;
++
+ 	ce.old_chunk = e->old_chunk;
+ 	ce.new_chunk = e->new_chunk;
+ 	write_exception(ps, ps->current_committed++, &ce);
+diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
+index 9b7c8c8049d6..4d50a12cf00c 100644
+--- a/drivers/md/dm-snap-transient.c
++++ b/drivers/md/dm-snap-transient.c
+@@ -52,12 +52,12 @@ static int transient_prepare_exception(struct dm_exception_store *store,
+ }
+ 
+ static void transient_commit_exception(struct dm_exception_store *store,
+-				       struct dm_exception *e,
++				       struct dm_exception *e, int valid,
+ 				       void (*callback) (void *, int success),
+ 				       void *callback_context)
+ {
+ 	/* Just succeed */
+-	callback(callback_context, 1);
++	callback(callback_context, valid);
+ }
+ 
+ static void transient_usage(struct dm_exception_store *store,
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index c06b74e91cd6..61f184ad081c 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -1438,8 +1438,9 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
+ 	dm_table_event(s->ti->table);
+ }
+ 
+-static void pending_complete(struct dm_snap_pending_exception *pe, int success)
++static void pending_complete(void *context, int success)
+ {
++	struct dm_snap_pending_exception *pe = context;
+ 	struct dm_exception *e;
+ 	struct dm_snapshot *s = pe->snap;
+ 	struct bio *origin_bios = NULL;
+@@ -1509,24 +1510,13 @@ out:
+ 	free_pending_exception(pe);
+ }
+ 
+-static void commit_callback(void *context, int success)
+-{
+-	struct dm_snap_pending_exception *pe = context;
+-
+-	pending_complete(pe, success);
+-}
+-
+ static void complete_exception(struct dm_snap_pending_exception *pe)
+ {
+ 	struct dm_snapshot *s = pe->snap;
+ 
+-	if (unlikely(pe->copy_error))
+-		pending_complete(pe, 0);
+-
+-	else
+-		/* Update the metadata if we are persistent */
+-		s->store->type->commit_exception(s->store, &pe->e,
+-						 commit_callback, pe);
++	/* Update the metadata if we are persistent */
++	s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
++					 pending_complete, pe);
+ }
+ 
+ /*
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 63903a5a5d9e..a1cc797fe88f 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -3453,8 +3453,8 @@ static void pool_postsuspend(struct dm_target *ti)
+ 	struct pool_c *pt = ti->private;
+ 	struct pool *pool = pt->pool;
+ 
+-	cancel_delayed_work(&pool->waker);
+-	cancel_delayed_work(&pool->no_space_timeout);
++	cancel_delayed_work_sync(&pool->waker);
++	cancel_delayed_work_sync(&pool->no_space_timeout);
+ 	flush_workqueue(pool->wq);
+ 	(void) commit(pool);
+ }
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 5df40480228b..dd834927bc66 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1191,6 +1191,8 @@ static void dm_unprep_request(struct request *rq)
+ 
+ 	if (clone)
+ 		free_rq_clone(clone);
++	else if (!tio->md->queue->mq_ops)
++		free_rq_tio(tio);
+ }
+ 
+ /*
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
+index fca6dbcf9a47..7e44005595c1 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -152,12 +152,9 @@ static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result)
+ 
+ static int brb_pop(struct bop_ring_buffer *brb)
+ {
+-	struct block_op *bop;
+-
+ 	if (brb_empty(brb))
+ 		return -ENODATA;
+ 
+-	bop = brb->bops + brb->begin;
+ 	brb->begin = brb_next(brb, brb->begin);
+ 
+ 	return 0;
+diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
+index c38ef1a72b4a..e2a3833170e3 100644
+--- a/drivers/media/dvb-core/dvb_frontend.c
++++ b/drivers/media/dvb-core/dvb_frontend.c
+@@ -2313,9 +2313,9 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
+ 		dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n",
+ 				 __func__, c->delivery_system, fe->ops.info.type);
+ 
+-		/* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't
+-		 * do it, it is done for it. */
+-		info->caps |= FE_CAN_INVERSION_AUTO;
++		/* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */
++		if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT))
++			info->caps |= FE_CAN_INVERSION_AUTO;
+ 		err = 0;
+ 		break;
+ 	}
+diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
+index 0e209b56c76c..c6abeb4fba9d 100644
+--- a/drivers/media/dvb-frontends/tda1004x.c
++++ b/drivers/media/dvb-frontends/tda1004x.c
+@@ -903,9 +903,18 @@ static int tda1004x_get_fe(struct dvb_frontend *fe)
+ {
+ 	struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
+ 	struct tda1004x_state* state = fe->demodulator_priv;
++	int status;
+ 
+ 	dprintk("%s\n", __func__);
+ 
++	status = tda1004x_read_byte(state, TDA1004X_STATUS_CD);
++	if (status == -1)
++		return -EIO;
++
++	/* Only update the properties cache if device is locked */
++	if (!(status & 8))
++		return 0;
++
+ 	// inversion status
+ 	fe_params->inversion = INVERSION_OFF;
+ 	if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20)
+diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
+index 7830aef3db45..40f77685cc4a 100644
+--- a/drivers/media/rc/sunxi-cir.c
++++ b/drivers/media/rc/sunxi-cir.c
+@@ -153,6 +153,8 @@ static int sunxi_ir_probe(struct platform_device *pdev)
+ 	if (!ir)
+ 		return -ENOMEM;
+ 
++	spin_lock_init(&ir->ir_lock);
++
+ 	if (of_device_is_compatible(dn, "allwinner,sun5i-a13-ir"))
+ 		ir->fifo_size = 64;
+ 	else
+diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
+index ce157edd45fa..0e1ca2b00e61 100644
+--- a/drivers/media/tuners/si2157.c
++++ b/drivers/media/tuners/si2157.c
+@@ -168,6 +168,7 @@ static int si2157_init(struct dvb_frontend *fe)
+ 		len = fw->data[fw->size - remaining];
+ 		if (len > SI2157_ARGLEN) {
+ 			dev_err(&client->dev, "Bad firmware length\n");
++			ret = -EINVAL;
+ 			goto err_release_firmware;
+ 		}
+ 		memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
+diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
+index 146071b8e116..bfff1d1c70ab 100644
+--- a/drivers/media/usb/gspca/ov534.c
++++ b/drivers/media/usb/gspca/ov534.c
+@@ -1491,8 +1491,13 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
+ 	struct v4l2_fract *tpf = &cp->timeperframe;
+ 	struct sd *sd = (struct sd *) gspca_dev;
+ 
+-	/* Set requested framerate */
+-	sd->frame_rate = tpf->denominator / tpf->numerator;
++	if (tpf->numerator == 0 || tpf->denominator == 0)
++		/* Set default framerate */
++		sd->frame_rate = 30;
++	else
++		/* Set requested framerate */
++		sd->frame_rate = tpf->denominator / tpf->numerator;
++
+ 	if (gspca_dev->streaming)
+ 		set_frame_rate(gspca_dev);
+ 
+diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c
+index c70ff406b07a..c028a5c2438e 100644
+--- a/drivers/media/usb/gspca/topro.c
++++ b/drivers/media/usb/gspca/topro.c
+@@ -4802,7 +4802,11 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
+ 	struct v4l2_fract *tpf = &cp->timeperframe;
+ 	int fr, i;
+ 
+-	sd->framerate = tpf->denominator / tpf->numerator;
++	if (tpf->numerator == 0 || tpf->denominator == 0)
++		sd->framerate = 30;
++	else
++		sd->framerate = tpf->denominator / tpf->numerator;
++
+ 	if (gspca_dev->streaming)
+ 		setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure));
+ 
+diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
+index 27b4b9e7c0c2..502984c724ff 100644
+--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
++++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
+@@ -822,10 +822,10 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
+ 		return res | POLLERR;
+ 
+ 	/*
+-	 * For output streams you can write as long as there are fewer buffers
+-	 * queued than there are buffers available.
++	 * For output streams you can call write() as long as there are fewer
++	 * buffers queued than there are buffers available.
+ 	 */
+-	if (q->is_output && q->queued_count < q->num_buffers)
++	if (q->is_output && q->fileio && q->queued_count < q->num_buffers)
+ 		return res | POLLOUT | POLLWRNORM;
+ 
+ 	if (list_empty(&q->done_list)) {
+diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
+index c241e15cacb1..cbd4331fb45c 100644
+--- a/drivers/misc/cxl/vphb.c
++++ b/drivers/misc/cxl/vphb.c
+@@ -203,7 +203,7 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
+ 	mask <<= shift;
+ 	val <<= shift;
+ 
+-	v = (in_le32(ioaddr) & ~mask) || (val & mask);
++	v = (in_le32(ioaddr) & ~mask) | (val & mask);
+ 
+ 	out_le32(ioaddr, v);
+ 	return PCIBIOS_SUCCESSFUL;
+diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
+index b2f2486b3d75..80f9afcb1382 100644
+--- a/drivers/misc/mei/main.c
++++ b/drivers/misc/mei/main.c
+@@ -458,7 +458,11 @@ static int mei_ioctl_client_notify_request(struct file *file, u32 request)
+ {
+ 	struct mei_cl *cl = file->private_data;
+ 
+-	return mei_cl_notify_request(cl, file, request);
++	if (request != MEI_HBM_NOTIFICATION_START &&
++	    request != MEI_HBM_NOTIFICATION_STOP)
++		return -EINVAL;
++
++	return mei_cl_notify_request(cl, file, (u8)request);
+ }
+ 
+ /**
+@@ -657,7 +661,9 @@ out:
+  * @file: pointer to file structure
+  * @band: band bitmap
+  *
+- * Return: poll mask
++ * Return: negative on error,
++ *         0 if it did no changes,
++ *         and positive a process was added or deleted
+  */
+ static int mei_fasync(int fd, struct file *file, int band)
+ {
+@@ -665,7 +671,7 @@ static int mei_fasync(int fd, struct file *file, int band)
+ 	struct mei_cl *cl = file->private_data;
+ 
+ 	if (!mei_cl_is_connected(cl))
+-		return POLLERR;
++		return -ENODEV;
+ 
+ 	return fasync_helper(fd, file, band, &cl->ev_async);
+ }
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 3a9a79ec4343..3d5087b03999 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -1076,8 +1076,7 @@ static int mmc_select_hs400(struct mmc_card *card)
+ 	mmc_set_clock(host, max_dtr);
+ 
+ 	/* Switch card to HS mode */
+-	val = EXT_CSD_TIMING_HS |
+-	      card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
++	val = EXT_CSD_TIMING_HS;
+ 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ 			   EXT_CSD_HS_TIMING, val,
+ 			   card->ext_csd.generic_cmd6_time,
+@@ -1160,8 +1159,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
+ 	mmc_set_clock(host, max_dtr);
+ 
+ 	/* Switch HS400 to HS DDR */
+-	val = EXT_CSD_TIMING_HS |
+-	      card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
++	val = EXT_CSD_TIMING_HS;
+ 	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
+ 			   val, card->ext_csd.generic_cmd6_time,
+ 			   true, send_status, true);
+diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
+index 141eaa923e18..967535d76e34 100644
+--- a/drivers/mmc/core/sd.c
++++ b/drivers/mmc/core/sd.c
+@@ -626,9 +626,9 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
+ 	 * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
+ 	 */
+ 	if (!mmc_host_is_spi(card->host) &&
+-		(card->sd_bus_speed == UHS_SDR50_BUS_SPEED ||
+-		 card->sd_bus_speed == UHS_DDR50_BUS_SPEED ||
+-		 card->sd_bus_speed == UHS_SDR104_BUS_SPEED)) {
++		(card->host->ios.timing == MMC_TIMING_UHS_SDR50 ||
++		 card->host->ios.timing == MMC_TIMING_UHS_DDR50 ||
++		 card->host->ios.timing == MMC_TIMING_UHS_SDR104)) {
+ 		err = mmc_execute_tuning(card);
+ 
+ 		/*
+@@ -638,7 +638,7 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
+ 		 * difference between v3.00 and 3.01 spec means that CMD19
+ 		 * tuning is also available for DDR50 mode.
+ 		 */
+-		if (err && card->sd_bus_speed == UHS_DDR50_BUS_SPEED) {
++		if (err && card->host->ios.timing == MMC_TIMING_UHS_DDR50) {
+ 			pr_warn("%s: ddr50 tuning failed\n",
+ 				mmc_hostname(card->host));
+ 			err = 0;
+diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
+index 16d838e6d623..467b3cf80c44 100644
+--- a/drivers/mmc/core/sdio.c
++++ b/drivers/mmc/core/sdio.c
+@@ -535,8 +535,8 @@ static int mmc_sdio_init_uhs_card(struct mmc_card *card)
+ 	 * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
+ 	 */
+ 	if (!mmc_host_is_spi(card->host) &&
+-	    ((card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR50) ||
+-	     (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)))
++	    ((card->host->ios.timing == MMC_TIMING_UHS_SDR50) ||
++	      (card->host->ios.timing == MMC_TIMING_UHS_SDR104)))
+ 		err = mmc_execute_tuning(card);
+ out:
+ 	return err;
+@@ -630,7 +630,7 @@ try_again:
+ 	 */
+ 	if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) {
+ 		err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
+-					ocr);
++					ocr_card);
+ 		if (err == -EAGAIN) {
+ 			sdio_reset(host);
+ 			mmc_go_idle(host);
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
+index fb266745f824..acece3299756 100644
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -1886,7 +1886,7 @@ static struct amba_id mmci_ids[] = {
+ 	{
+ 		.id     = 0x00280180,
+ 		.mask   = 0x00ffffff,
+-		.data	= &variant_u300,
++		.data	= &variant_nomadik,
+ 	},
+ 	{
+ 		.id     = 0x00480180,
+diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
+index ce08896b9d69..28a057fae0a1 100644
+--- a/drivers/mmc/host/pxamci.c
++++ b/drivers/mmc/host/pxamci.c
+@@ -804,7 +804,7 @@ static int pxamci_probe(struct platform_device *pdev)
+ 		dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
+ 		goto out;
+ 	} else {
+-		mmc->caps |= host->pdata->gpio_card_ro_invert ?
++		mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
+ 			0 : MMC_CAP2_RO_ACTIVE_HIGH;
+ 	}
+ 
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+index f6047fc94062..a5cda926d38e 100644
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -146,6 +146,33 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
+ 	.ops = &sdhci_acpi_ops_int,
+ };
+ 
++static int bxt_get_cd(struct mmc_host *mmc)
++{
++	int gpio_cd = mmc_gpio_get_cd(mmc);
++	struct sdhci_host *host = mmc_priv(mmc);
++	unsigned long flags;
++	int ret = 0;
++
++	if (!gpio_cd)
++		return 0;
++
++	pm_runtime_get_sync(mmc->parent);
++
++	spin_lock_irqsave(&host->lock, flags);
++
++	if (host->flags & SDHCI_DEVICE_DEAD)
++		goto out;
++
++	ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
++out:
++	spin_unlock_irqrestore(&host->lock, flags);
++
++	pm_runtime_mark_last_busy(mmc->parent);
++	pm_runtime_put_autosuspend(mmc->parent);
++
++	return ret;
++}
++
+ static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev,
+ 				      const char *hid, const char *uid)
+ {
+@@ -196,6 +223,9 @@ static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
+ 
+ 	/* Platform specific code during sd probe slot goes here */
+ 
++	if (hid && !strcmp(hid, "80865ACA"))
++		host->mmc_host_ops.get_cd = bxt_get_cd;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index cf7ad458b4f4..45ee07d3a761 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -277,7 +277,7 @@ static int spt_select_drive_strength(struct sdhci_host *host,
+ 	if (sdhci_pci_spt_drive_strength > 0)
+ 		drive_strength = sdhci_pci_spt_drive_strength & 0xf;
+ 	else
+-		drive_strength = 1; /* 33-ohm */
++		drive_strength = 0; /* Default 50-ohm */
+ 
+ 	if ((mmc_driver_type_mask(drive_strength) & card_drv) == 0)
+ 		drive_strength = 0; /* Default 50-ohm */
+@@ -330,6 +330,33 @@ static void spt_read_drive_strength(struct sdhci_host *host)
+ 	sdhci_pci_spt_drive_strength = 0x10 | ((val >> 12) & 0xf);
+ }
+ 
++static int bxt_get_cd(struct mmc_host *mmc)
++{
++	int gpio_cd = mmc_gpio_get_cd(mmc);
++	struct sdhci_host *host = mmc_priv(mmc);
++	unsigned long flags;
++	int ret = 0;
++
++	if (!gpio_cd)
++		return 0;
++
++	pm_runtime_get_sync(mmc->parent);
++
++	spin_lock_irqsave(&host->lock, flags);
++
++	if (host->flags & SDHCI_DEVICE_DEAD)
++		goto out;
++
++	ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
++out:
++	spin_unlock_irqrestore(&host->lock, flags);
++
++	pm_runtime_mark_last_busy(mmc->parent);
++	pm_runtime_put_autosuspend(mmc->parent);
++
++	return ret;
++}
++
+ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
+ {
+ 	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
+@@ -362,6 +389,10 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
+ 	slot->cd_con_id = NULL;
+ 	slot->cd_idx = 0;
+ 	slot->cd_override_level = true;
++	if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
++	    slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
++		slot->host->mmc_host_ops.get_cd = bxt_get_cd;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index b48565ed5616..8814eb6b83bf 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -540,9 +540,12 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
+ 
+ 		BUG_ON(len > 65536);
+ 
+-		/* tran, valid */
+-		sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID);
+-		desc += host->desc_sz;
++		if (len) {
++			/* tran, valid */
++			sdhci_adma_write_desc(host, desc, addr, len,
++					      ADMA2_TRAN_VALID);
++			desc += host->desc_sz;
++		}
+ 
+ 		/*
+ 		 * If this triggers then we have a calculation bug
+@@ -1364,7 +1367,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+ 	sdhci_runtime_pm_get(host);
+ 
+ 	/* Firstly check card presence */
+-	present = sdhci_do_get_cd(host);
++	present = mmc->ops->get_cd(mmc);
+ 
+ 	spin_lock_irqsave(&host->lock, flags);
+ 
+@@ -2760,7 +2763,7 @@ static int sdhci_runtime_pm_put(struct sdhci_host *host)
+ 
+ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
+ {
+-	if (host->runtime_suspended || host->bus_on)
++	if (host->bus_on)
+ 		return;
+ 	host->bus_on = true;
+ 	pm_runtime_get_noresume(host->mmc->parent);
+@@ -2768,7 +2771,7 @@ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
+ 
+ static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
+ {
+-	if (host->runtime_suspended || !host->bus_on)
++	if (!host->bus_on)
+ 		return;
+ 	host->bus_on = false;
+ 	pm_runtime_put_noidle(host->mmc->parent);
+@@ -2861,6 +2864,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
+ 
+ 	host = mmc_priv(mmc);
+ 	host->mmc = mmc;
++	host->mmc_host_ops = sdhci_ops;
++	mmc->ops = &host->mmc_host_ops;
+ 
+ 	return host;
+ }
+@@ -3057,7 +3062,6 @@ int sdhci_add_host(struct sdhci_host *host)
+ 	/*
+ 	 * Set host parameters.
+ 	 */
+-	mmc->ops = &sdhci_ops;
+ 	max_clk = host->max_clk;
+ 
+ 	if (host->ops->get_min_clock)
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 9d4aa31b683a..9c331ac5ad6b 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -425,6 +425,7 @@ struct sdhci_host {
+ 
+ 	/* Internal data */
+ 	struct mmc_host *mmc;	/* MMC structure */
++	struct mmc_host_ops mmc_host_ops;	/* MMC host ops */
+ 	u64 dma_mask;		/* custom DMA mask */
+ 
+ #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
+index 4498e92116b8..b47122d3e8d8 100644
+--- a/drivers/mmc/host/usdhi6rol0.c
++++ b/drivers/mmc/host/usdhi6rol0.c
+@@ -1634,7 +1634,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
+ 	struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
+ 	struct mmc_request *mrq = host->mrq;
+ 	struct mmc_data *data = mrq ? mrq->data : NULL;
+-	struct scatterlist *sg = host->sg ?: data->sg;
++	struct scatterlist *sg;
+ 
+ 	dev_warn(mmc_dev(host->mmc),
+ 		 "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n",
+@@ -1666,6 +1666,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
+ 	case USDHI6_WAIT_FOR_MWRITE:
+ 	case USDHI6_WAIT_FOR_READ:
+ 	case USDHI6_WAIT_FOR_WRITE:
++		sg = host->sg ?: data->sg;
+ 		dev_dbg(mmc_dev(host->mmc),
+ 			"%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n",
+ 			data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx,
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index f1692e418fe4..28bbca0af238 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -214,6 +214,8 @@ static void bond_uninit(struct net_device *bond_dev);
+ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
+ 						struct rtnl_link_stats64 *stats);
+ static void bond_slave_arr_handler(struct work_struct *work);
++static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
++				  int mod);
+ 
+ /*---------------------------- General routines -----------------------------*/
+ 
+@@ -2418,7 +2420,7 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
+ 		 struct slave *slave)
+ {
+ 	struct arphdr *arp = (struct arphdr *)skb->data;
+-	struct slave *curr_active_slave;
++	struct slave *curr_active_slave, *curr_arp_slave;
+ 	unsigned char *arp_ptr;
+ 	__be32 sip, tip;
+ 	int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
+@@ -2465,26 +2467,41 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
+ 		     &sip, &tip);
+ 
+ 	curr_active_slave = rcu_dereference(bond->curr_active_slave);
++	curr_arp_slave = rcu_dereference(bond->current_arp_slave);
+ 
+-	/* Backup slaves won't see the ARP reply, but do come through
+-	 * here for each ARP probe (so we swap the sip/tip to validate
+-	 * the probe).  In a "redundant switch, common router" type of
+-	 * configuration, the ARP probe will (hopefully) travel from
+-	 * the active, through one switch, the router, then the other
+-	 * switch before reaching the backup.
++	/* We 'trust' the received ARP enough to validate it if:
++	 *
++	 * (a) the slave receiving the ARP is active (which includes the
++	 * current ARP slave, if any), or
++	 *
++	 * (b) the receiving slave isn't active, but there is a currently
++	 * active slave and it received valid arp reply(s) after it became
++	 * the currently active slave, or
++	 *
++	 * (c) there is an ARP slave that sent an ARP during the prior ARP
++	 * interval, and we receive an ARP reply on any slave.  We accept
++	 * these because switch FDB update delays may deliver the ARP
++	 * reply to a slave other than the sender of the ARP request.
+ 	 *
+-	 * We 'trust' the arp requests if there is an active slave and
+-	 * it received valid arp reply(s) after it became active. This
+-	 * is done to avoid endless looping when we can't reach the
++	 * Note: for (b), backup slaves are receiving the broadcast ARP
++	 * request, not a reply.  This request passes from the sending
++	 * slave through the L2 switch(es) to the receiving slave.  Since
++	 * this is checking the request, sip/tip are swapped for
++	 * validation.
++	 *
++	 * This is done to avoid endless looping when we can't reach the
+ 	 * arp_ip_target and fool ourselves with our own arp requests.
+ 	 */
+-
+ 	if (bond_is_active_slave(slave))
+ 		bond_validate_arp(bond, slave, sip, tip);
+ 	else if (curr_active_slave &&
+ 		 time_after(slave_last_rx(bond, curr_active_slave),
+ 			    curr_active_slave->last_link_up))
+ 		bond_validate_arp(bond, slave, tip, sip);
++	else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
++		 bond_time_in_interval(bond,
++				       dev_trans_start(curr_arp_slave->dev), 1))
++		bond_validate_arp(bond, slave, sip, tip);
+ 
+ out_unlock:
+ 	if (arp != (struct arphdr *)skb->data)
+diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
+index fc5b75675cd8..eb7192fab593 100644
+--- a/drivers/net/can/usb/ems_usb.c
++++ b/drivers/net/can/usb/ems_usb.c
+@@ -117,6 +117,9 @@ MODULE_LICENSE("GPL v2");
+  */
+ #define EMS_USB_ARM7_CLOCK 8000000
+ 
++#define CPC_TX_QUEUE_TRIGGER_LOW	25
++#define CPC_TX_QUEUE_TRIGGER_HIGH	35
++
+ /*
+  * CAN-Message representation in a CPC_MSG. Message object type is
+  * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
+@@ -278,6 +281,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
+ 	switch (urb->status) {
+ 	case 0:
+ 		dev->free_slots = dev->intr_in_buffer[1];
++		if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
++			if (netif_queue_stopped(netdev)){
++				netif_wake_queue(netdev);
++			}
++		}
+ 		break;
+ 
+ 	case -ECONNRESET: /* unlink */
+@@ -526,8 +534,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
+ 	/* Release context */
+ 	context->echo_index = MAX_TX_URBS;
+ 
+-	if (netif_queue_stopped(netdev))
+-		netif_wake_queue(netdev);
+ }
+ 
+ /*
+@@ -587,7 +593,7 @@ static int ems_usb_start(struct ems_usb *dev)
+ 	int err, i;
+ 
+ 	dev->intr_in_buffer[0] = 0;
+-	dev->free_slots = 15; /* initial size */
++	dev->free_slots = 50; /* initial size */
+ 
+ 	for (i = 0; i < MAX_RX_URBS; i++) {
+ 		struct urb *urb = NULL;
+@@ -835,7 +841,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
+ 
+ 		/* Slow down tx path */
+ 		if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
+-		    dev->free_slots < 5) {
++		    dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
+ 			netif_stop_queue(netdev);
+ 		}
+ 	}
+diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
+index b06dba05594a..2dea39b5cb0b 100644
+--- a/drivers/net/dsa/mv88e6xxx.c
++++ b/drivers/net/dsa/mv88e6xxx.c
+@@ -1519,7 +1519,7 @@ int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
+ 
+ 	/* no PVID with ranges, otherwise it's a bug */
+ 	if (pvid)
+-		err = _mv88e6xxx_port_pvid_set(ds, port, vid);
++		err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end);
+ unlock:
+ 	mutex_unlock(&ps->smi_mutex);
+ 
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 79789d8e52da..ca5ac5d6f4e6 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -7833,6 +7833,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
+ 	return ret;
+ }
+ 
++static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
++{
++	/* Check if we will never have enough descriptors,
++	 * as gso_segs can be more than current ring size
++	 */
++	return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
++}
++
+ static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
+ 
+ /* Use GSO to workaround all TSO packets that meet HW bug conditions
+@@ -7936,14 +7944,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		 * vlan encapsulated.
+ 		 */
+ 		if (skb->protocol == htons(ETH_P_8021Q) ||
+-		    skb->protocol == htons(ETH_P_8021AD))
+-			return tg3_tso_bug(tp, tnapi, txq, skb);
++		    skb->protocol == htons(ETH_P_8021AD)) {
++			if (tg3_tso_bug_gso_check(tnapi, skb))
++				return tg3_tso_bug(tp, tnapi, txq, skb);
++			goto drop;
++		}
+ 
+ 		if (!skb_is_gso_v6(skb)) {
+ 			if (unlikely((ETH_HLEN + hdr_len) > 80) &&
+-			    tg3_flag(tp, TSO_BUG))
+-				return tg3_tso_bug(tp, tnapi, txq, skb);
+-
++			    tg3_flag(tp, TSO_BUG)) {
++				if (tg3_tso_bug_gso_check(tnapi, skb))
++					return tg3_tso_bug(tp, tnapi, txq, skb);
++				goto drop;
++			}
+ 			ip_csum = iph->check;
+ 			ip_tot_len = iph->tot_len;
+ 			iph->check = 0;
+@@ -8075,7 +8088,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	if (would_hit_hwbug) {
+ 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
+ 
+-		if (mss) {
++		if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
+ 			/* If it's a TSO packet, do GSO instead of
+ 			 * allocating and copying to a large linear SKB
+ 			 */
+diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
+index 1671fa3332c2..7ba6d530b0c0 100644
+--- a/drivers/net/ethernet/cisco/enic/enic.h
++++ b/drivers/net/ethernet/cisco/enic/enic.h
+@@ -33,7 +33,7 @@
+ 
+ #define DRV_NAME		"enic"
+ #define DRV_DESCRIPTION		"Cisco VIC Ethernet NIC Driver"
+-#define DRV_VERSION		"2.3.0.12"
++#define DRV_VERSION		"2.3.0.20"
+ #define DRV_COPYRIGHT		"Copyright 2008-2013 Cisco Systems, Inc"
+ 
+ #define ENIC_BARS_MAX		6
+diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
+index 1ffd1050860b..1fdf5fe12a95 100644
+--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
++++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
+@@ -298,7 +298,8 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ 			  int wait)
+ {
+ 	struct devcmd2_controller *dc2c = vdev->devcmd2;
+-	struct devcmd2_result *result = dc2c->result + dc2c->next_result;
++	struct devcmd2_result *result;
++	u8 color;
+ 	unsigned int i;
+ 	int delay, err;
+ 	u32 fetch_index, new_posted;
+@@ -336,13 +337,17 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ 	if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
+ 		return 0;
+ 
++	result = dc2c->result + dc2c->next_result;
++	color = dc2c->color;
++
++	dc2c->next_result++;
++	if (dc2c->next_result == dc2c->result_size) {
++		dc2c->next_result = 0;
++		dc2c->color = dc2c->color ? 0 : 1;
++	}
++
+ 	for (delay = 0; delay < wait; delay++) {
+-		if (result->color == dc2c->color) {
+-			dc2c->next_result++;
+-			if (dc2c->next_result == dc2c->result_size) {
+-				dc2c->next_result = 0;
+-				dc2c->color = dc2c->color ? 0 : 1;
+-			}
++		if (result->color == color) {
+ 			if (result->error) {
+ 				err = result->error;
+ 				if (err != ERR_ECMDUNKNOWN ||
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+index 038f9ce391e6..1494997c4f7e 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+@@ -236,6 +236,24 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
+ 	.enable		= mlx4_en_phc_enable,
+ };
+ 
++#define MLX4_EN_WRAP_AROUND_SEC	10ULL
++
++/* This function calculates the max shift that enables the user range
++ * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
++ */
++static u32 freq_to_shift(u16 freq)
++{
++	u32 freq_khz = freq * 1000;
++	u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
++	u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
++		max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
++	/* calculate max possible multiplier in order to fit in 64bit */
++	u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
++
++	/* This comes from the reverse of clocksource_khz2mult */
++	return ilog2(div_u64(max_mul * freq_khz, 1000000));
++}
++
+ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
+ {
+ 	struct mlx4_dev *dev = mdev->dev;
+@@ -254,12 +272,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
+ 	memset(&mdev->cycles, 0, sizeof(mdev->cycles));
+ 	mdev->cycles.read = mlx4_en_read_clock;
+ 	mdev->cycles.mask = CLOCKSOURCE_MASK(48);
+-	/* Using shift to make calculation more accurate. Since current HW
+-	 * clock frequency is 427 MHz, and cycles are given using a 48 bits
+-	 * register, the biggest shift when calculating using u64, is 14
+-	 * (max_cycles * multiplier < 2^64)
+-	 */
+-	mdev->cycles.shift = 14;
++	mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
+ 	mdev->cycles.mult =
+ 		clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
+ 	mdev->nominal_c_mult = mdev->cycles.mult;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index 7869f97de5da..67e9633ea9c7 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -2381,8 +2381,6 @@ out:
+ 	/* set offloads */
+ 	priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ 				      NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
+-	priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+-	priv->dev->features    |= NETIF_F_GSO_UDP_TUNNEL;
+ }
+ 
+ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
+@@ -2393,8 +2391,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
+ 	/* unset offloads */
+ 	priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ 				      NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
+-	priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
+-	priv->dev->features    &= ~NETIF_F_GSO_UDP_TUNNEL;
+ 
+ 	ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
+ 				  VXLAN_STEER_BY_OUTER_MAC, 0);
+@@ -3020,6 +3016,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 		priv->rss_hash_fn = ETH_RSS_HASH_TOP;
+ 	}
+ 
++	if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
++		dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
++		dev->features    |= NETIF_F_GSO_UDP_TUNNEL;
++	}
++
+ 	mdev->pndev[port] = dev;
+ 	mdev->upper[port] = NULL;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
+index ee99e67187f5..3904b5fc0b7c 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
+@@ -238,11 +238,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
+ 	stats->collisions = 0;
+ 	stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
+ 	stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
+-	stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
++	stats->rx_over_errors = 0;
+ 	stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
+ 	stats->rx_frame_errors = 0;
+ 	stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+-	stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
++	stats->rx_missed_errors = 0;
+ 	stats->tx_aborted_errors = 0;
+ 	stats->tx_carrier_errors = 0;
+ 	stats->tx_fifo_errors = 0;
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+index 617fb22b5d81..7dbeafa65934 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+@@ -45,6 +45,7 @@
+ #include <linux/if_bridge.h>
+ #include <linux/workqueue.h>
+ #include <linux/jiffies.h>
++#include <linux/rtnetlink.h>
+ #include <net/switchdev.h>
+ 
+ #include "spectrum.h"
+@@ -812,6 +813,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
+ 
+ 	mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
+ 
++	rtnl_lock();
+ 	do {
+ 		mlxsw_reg_sfn_pack(sfn_pl);
+ 		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
+@@ -824,6 +826,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
+ 			mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
+ 
+ 	} while (num_rec);
++	rtnl_unlock();
+ 
+ 	kfree(sfn_pl);
+ 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
+diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
+index e9f2349e98bc..52ec3d6e056a 100644
+--- a/drivers/net/ethernet/rocker/rocker.c
++++ b/drivers/net/ethernet/rocker/rocker.c
+@@ -3531,12 +3531,14 @@ static void rocker_port_fdb_learn_work(struct work_struct *work)
+ 	info.addr = lw->addr;
+ 	info.vid = lw->vid;
+ 
++	rtnl_lock();
+ 	if (learned && removing)
+ 		call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
+ 					 lw->rocker_port->dev, &info.info);
+ 	else if (learned && !removing)
+ 		call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
+ 					 lw->rocker_port->dev, &info.info);
++	rtnl_unlock();
+ 
+ 	rocker_port_kfree(lw->trans, work);
+ }
+diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
+index 47b711739ba9..e6cefd0e3262 100644
+--- a/drivers/net/phy/dp83640.c
++++ b/drivers/net/phy/dp83640.c
+@@ -845,6 +845,11 @@ static void decode_rxts(struct dp83640_private *dp83640,
+ 	struct skb_shared_hwtstamps *shhwtstamps = NULL;
+ 	struct sk_buff *skb;
+ 	unsigned long flags;
++	u8 overflow;
++
++	overflow = (phy_rxts->ns_hi >> 14) & 0x3;
++	if (overflow)
++		pr_debug("rx timestamp queue overflow, count %d\n", overflow);
+ 
+ 	spin_lock_irqsave(&dp83640->rx_lock, flags);
+ 
+@@ -887,6 +892,7 @@ static void decode_txts(struct dp83640_private *dp83640,
+ 	struct skb_shared_hwtstamps shhwtstamps;
+ 	struct sk_buff *skb;
+ 	u64 ns;
++	u8 overflow;
+ 
+ 	/* We must already have the skb that triggered this. */
+ 
+@@ -896,6 +902,17 @@ static void decode_txts(struct dp83640_private *dp83640,
+ 		pr_debug("have timestamp but tx_queue empty\n");
+ 		return;
+ 	}
++
++	overflow = (phy_txts->ns_hi >> 14) & 0x3;
++	if (overflow) {
++		pr_debug("tx timestamp queue overflow, count %d\n", overflow);
++		while (skb) {
++			skb_complete_tx_timestamp(skb, NULL);
++			skb = skb_dequeue(&dp83640->tx_queue);
++		}
++		return;
++	}
++
+ 	ns = phy2txts(phy_txts);
+ 	memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ 	shhwtstamps.hwtstamp = ns_to_ktime(ns);
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index 0a37f840fcc5..4e0068e775f9 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -395,6 +395,8 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
+ 
+ 		if (!__pppoe_xmit(sk_pppox(relay_po), skb))
+ 			goto abort_put;
++
++		sock_put(sk_pppox(relay_po));
+ 	} else {
+ 		if (sock_queue_rcv_skb(sk, skb))
+ 			goto abort_kfree;
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index 597c53e0a2ec..f7e8c79349ad 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -129,24 +129,27 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr)
+ 	return i < MAX_CALLID;
+ }
+ 
+-static int add_chan(struct pppox_sock *sock)
++static int add_chan(struct pppox_sock *sock,
++		    struct pptp_addr *sa)
+ {
+ 	static int call_id;
+ 
+ 	spin_lock(&chan_lock);
+-	if (!sock->proto.pptp.src_addr.call_id)	{
++	if (!sa->call_id)	{
+ 		call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
+ 		if (call_id == MAX_CALLID) {
+ 			call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
+ 			if (call_id == MAX_CALLID)
+ 				goto out_err;
+ 		}
+-		sock->proto.pptp.src_addr.call_id = call_id;
+-	} else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap))
++		sa->call_id = call_id;
++	} else if (test_bit(sa->call_id, callid_bitmap)) {
+ 		goto out_err;
++	}
+ 
+-	set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
+-	rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock);
++	sock->proto.pptp.src_addr = *sa;
++	set_bit(sa->call_id, callid_bitmap);
++	rcu_assign_pointer(callid_sock[sa->call_id], sock);
+ 	spin_unlock(&chan_lock);
+ 
+ 	return 0;
+@@ -416,7 +419,6 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
+ 	struct sock *sk = sock->sk;
+ 	struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
+ 	struct pppox_sock *po = pppox_sk(sk);
+-	struct pptp_opt *opt = &po->proto.pptp;
+ 	int error = 0;
+ 
+ 	if (sockaddr_len < sizeof(struct sockaddr_pppox))
+@@ -424,10 +426,22 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
+ 
+ 	lock_sock(sk);
+ 
+-	opt->src_addr = sp->sa_addr.pptp;
+-	if (add_chan(po))
++	if (sk->sk_state & PPPOX_DEAD) {
++		error = -EALREADY;
++		goto out;
++	}
++
++	if (sk->sk_state & PPPOX_BOUND) {
+ 		error = -EBUSY;
++		goto out;
++	}
++
++	if (add_chan(po, &sp->sa_addr.pptp))
++		error = -EBUSY;
++	else
++		sk->sk_state |= PPPOX_BOUND;
+ 
++out:
+ 	release_sock(sk);
+ 	return error;
+ }
+@@ -498,7 +512,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 	}
+ 
+ 	opt->dst_addr = sp->sa_addr.pptp;
+-	sk->sk_state = PPPOX_CONNECTED;
++	sk->sk_state |= PPPOX_CONNECTED;
+ 
+  end:
+ 	release_sock(sk);
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 5fccc5a8153f..982e0acd1a36 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -492,6 +492,7 @@ static const struct usb_device_id products[] = {
+ 
+ 	/* 3. Combined interface devices matching on interface number */
+ 	{QMI_FIXED_INTF(0x0408, 0xea42, 4)},	/* Yota / Megafon M100-1 */
++	{QMI_FIXED_INTF(0x05c6, 0x6001, 3)},	/* 4G LTE usb-modem U901 */
+ 	{QMI_FIXED_INTF(0x05c6, 0x7000, 0)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x7001, 1)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x7002, 1)},
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 405a7b6cca25..e0fcda4ddd55 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1984,11 +1984,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ 				     vxlan->cfg.port_max, true);
+ 
+ 	if (info) {
+-		if (info->key.tun_flags & TUNNEL_CSUM)
+-			flags |= VXLAN_F_UDP_CSUM;
+-		else
+-			flags &= ~VXLAN_F_UDP_CSUM;
+-
+ 		ttl = info->key.ttl;
+ 		tos = info->key.tos;
+ 
+@@ -2003,8 +1998,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ 			goto drop;
+ 		sk = vxlan->vn4_sock->sock->sk;
+ 
+-		if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT))
+-			df = htons(IP_DF);
++		if (info) {
++			if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
++				df = htons(IP_DF);
++
++			if (info->key.tun_flags & TUNNEL_CSUM)
++				flags |= VXLAN_F_UDP_CSUM;
++			else
++				flags &= ~VXLAN_F_UDP_CSUM;
++		}
+ 
+ 		memset(&fl4, 0, sizeof(fl4));
+ 		fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
+@@ -2102,6 +2104,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ 			return;
+ 		}
+ 
++		if (info) {
++			if (info->key.tun_flags & TUNNEL_CSUM)
++				flags &= ~VXLAN_F_UDP_ZERO_CSUM6_TX;
++			else
++				flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
++		}
++
+ 		ttl = ttl ? : ip6_dst_hoplimit(ndst);
+ 		err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr,
+ 				      0, ttl, src_port, dst_port, htonl(vni << 8), md,
+diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
+index e18629a16fb0..0961f33de05e 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
++++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
+@@ -1154,6 +1154,9 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
+ 
+ 	priv->ucode_loaded = false;
+ 	iwl_trans_stop_device(priv->trans);
++	ret = iwl_trans_start_hw(priv->trans);
++	if (ret)
++		goto out;
+ 
+ 	priv->wowlan = true;
+ 
+diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
+index d6e0c1b5c20c..8215d7405f64 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
++++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
+@@ -1267,6 +1267,10 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+ 		return -EBUSY;
+ 	}
+ 
++	/* we don't support "match all" in the firmware */
++	if (!req->n_match_sets)
++		return -EOPNOTSUPP;
++
+ 	ret = iwl_mvm_check_running_scans(mvm, type);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
+index 639761fb2bfb..d58c094f2f04 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
+@@ -384,6 +384,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ 	{IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
+ 	{IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
+@@ -401,10 +402,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ 	{IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
+-	{IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
+-	{IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
+ 	{IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
+diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
+index 90283453073c..8c7204738aa3 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
+@@ -7,6 +7,7 @@
+  *
+  * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
+  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
++ * Copyright(c) 2016 Intel Deutschland GmbH
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of version 2 of the GNU General Public License as
+@@ -33,6 +34,7 @@
+  *
+  * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
+  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
++ * Copyright(c) 2016 Intel Deutschland GmbH
+  * All rights reserved.
+  *
+  * Redistribution and use in source and binary forms, with or without
+@@ -924,9 +926,16 @@ monitor:
+ 	if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
+ 		iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
+ 			       trans_pcie->fw_mon_phys >> dest->base_shift);
+-		iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
+-			       (trans_pcie->fw_mon_phys +
+-				trans_pcie->fw_mon_size) >> dest->end_shift);
++		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
++			iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
++				       (trans_pcie->fw_mon_phys +
++					trans_pcie->fw_mon_size - 256) >>
++						dest->end_shift);
++		else
++			iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
++				       (trans_pcie->fw_mon_phys +
++					trans_pcie->fw_mon_size) >>
++						dest->end_shift);
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
+index f46c9d7f6528..7f471bff435c 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
++++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
+@@ -801,7 +801,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
+ 								      hw_queue);
+ 			if (rx_remained_cnt == 0)
+ 				return;
+-
++			buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc[
++				rtlpci->rx_ring[rxring_idx].idx];
++			pdesc = (struct rtl_rx_desc *)skb->data;
+ 		} else {	/* rx descriptor */
+ 			pdesc = &rtlpci->rx_ring[rxring_idx].desc[
+ 				rtlpci->rx_ring[rxring_idx].idx];
+@@ -824,13 +826,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
+ 		new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
+ 		if (unlikely(!new_skb))
+ 			goto no_new;
+-		if (rtlpriv->use_new_trx_flow) {
+-			buffer_desc =
+-			  &rtlpci->rx_ring[rxring_idx].buffer_desc
+-				[rtlpci->rx_ring[rxring_idx].idx];
+-			/*means rx wifi info*/
+-			pdesc = (struct rtl_rx_desc *)skb->data;
+-		}
+ 		memset(&rx_status , 0 , sizeof(rx_status));
+ 		rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
+ 						 &rx_status, (u8 *)pdesc, skb);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+index 11344121c55e..47e32cb0ec1a 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+@@ -88,8 +88,6 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
+ 	u8 tid;
+ 
+ 	rtl8188ee_bt_reg_init(hw);
+-	rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
+-
+ 	rtlpriv->dm.dm_initialgain_enable = 1;
+ 	rtlpriv->dm.dm_flag = 0;
+ 	rtlpriv->dm.disable_framebursting = 0;
+@@ -138,6 +136,11 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
+ 	rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
+ 	rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+ 	rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
++	rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
++	rtlpriv->cfg->mod_params->sw_crypto =
++		rtlpriv->cfg->mod_params->sw_crypto;
++	rtlpriv->cfg->mod_params->disable_watchdog =
++		rtlpriv->cfg->mod_params->disable_watchdog;
+ 	if (rtlpriv->cfg->mod_params->disable_watchdog)
+ 		pr_info("watchdog disabled\n");
+ 	if (!rtlpriv->psc.inactiveps)
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+index de6cb6c3a48c..4780bdc63b2b 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+@@ -139,6 +139,8 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
+ 	rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
+ 	rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+ 	rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
++	rtlpriv->cfg->mod_params->sw_crypto =
++		rtlpriv->cfg->mod_params->sw_crypto;
+ 	if (!rtlpriv->psc.inactiveps)
+ 		pr_info("rtl8192ce: Power Save off (module option)\n");
+ 	if (!rtlpriv->psc.fwctrl_lps)
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+index fd4a5353d216..7c6f7f0d18c6 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+@@ -65,6 +65,8 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
+ 	rtlpriv->dm.disable_framebursting = false;
+ 	rtlpriv->dm.thermalvalue = 0;
+ 	rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
++	rtlpriv->cfg->mod_params->sw_crypto =
++		rtlpriv->cfg->mod_params->sw_crypto;
+ 
+ 	/* for firmware buf */
+ 	rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+index b19d0398215f..c6e09a19de1a 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+@@ -376,8 +376,8 @@ module_param_named(swlps, rtl92de_mod_params.swctrl_lps, bool, 0444);
+ module_param_named(fwlps, rtl92de_mod_params.fwctrl_lps, bool, 0444);
+ MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
+ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
+-MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
+-MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
++MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
++MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
+ MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+ 
+ static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+index e1fd27c888bf..31baca41ac2f 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+@@ -187,6 +187,8 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
+ 	rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
+ 	rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+ 	rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
++	rtlpriv->cfg->mod_params->sw_crypto =
++		rtlpriv->cfg->mod_params->sw_crypto;
+ 	if (!rtlpriv->psc.inactiveps)
+ 		pr_info("Power Save off (module option)\n");
+ 	if (!rtlpriv->psc.fwctrl_lps)
+@@ -425,8 +427,8 @@ module_param_named(swlps, rtl92se_mod_params.swctrl_lps, bool, 0444);
+ module_param_named(fwlps, rtl92se_mod_params.fwctrl_lps, bool, 0444);
+ MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
+ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
+-MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
+-MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
++MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
++MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
+ MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+ 
+ static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+index 3859b3e3d158..ff49a8c0ff61 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+@@ -150,6 +150,11 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
+ 	rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
+ 	rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+ 	rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
++	rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
++	rtlpriv->cfg->mod_params->sw_crypto =
++		rtlpriv->cfg->mod_params->sw_crypto;
++	rtlpriv->cfg->mod_params->disable_watchdog =
++		rtlpriv->cfg->mod_params->disable_watchdog;
+ 	if (rtlpriv->cfg->mod_params->disable_watchdog)
+ 		pr_info("watchdog disabled\n");
+ 	rtlpriv->psc.reg_fwctrl_lps = 3;
+@@ -267,6 +272,8 @@ static struct rtl_mod_params rtl8723e_mod_params = {
+ 	.swctrl_lps = false,
+ 	.fwctrl_lps = true,
+ 	.debug = DBG_EMERG,
++	.msi_support = false,
++	.disable_watchdog = false,
+ };
+ 
+ static struct rtl_hal_cfg rtl8723e_hal_cfg = {
+@@ -383,12 +390,14 @@ module_param_named(debug, rtl8723e_mod_params.debug, int, 0444);
+ module_param_named(ips, rtl8723e_mod_params.inactiveps, bool, 0444);
+ module_param_named(swlps, rtl8723e_mod_params.swctrl_lps, bool, 0444);
+ module_param_named(fwlps, rtl8723e_mod_params.fwctrl_lps, bool, 0444);
++module_param_named(msi, rtl8723e_mod_params.msi_support, bool, 0444);
+ module_param_named(disable_watchdog, rtl8723e_mod_params.disable_watchdog,
+ 		   bool, 0444);
+ MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
+ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
+ MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
+ MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
++MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
+ MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+ MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+index d091f1d5f91e..a78eaeda0008 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+@@ -93,7 +93,6 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
+ 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ 
+ 	rtl8723be_bt_reg_init(hw);
+-	rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
+ 	rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
+ 
+ 	rtlpriv->dm.dm_initialgain_enable = 1;
+@@ -151,6 +150,10 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
+ 	rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+ 	rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
+ 	rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
++	rtlpriv->cfg->mod_params->sw_crypto =
++		 rtlpriv->cfg->mod_params->sw_crypto;
++	rtlpriv->cfg->mod_params->disable_watchdog =
++		 rtlpriv->cfg->mod_params->disable_watchdog;
+ 	if (rtlpriv->cfg->mod_params->disable_watchdog)
+ 		pr_info("watchdog disabled\n");
+ 	rtlpriv->psc.reg_fwctrl_lps = 3;
+@@ -267,6 +270,9 @@ static struct rtl_mod_params rtl8723be_mod_params = {
+ 	.inactiveps = true,
+ 	.swctrl_lps = false,
+ 	.fwctrl_lps = true,
++	.msi_support = false,
++	.disable_watchdog = false,
++	.debug = DBG_EMERG,
+ };
+ 
+ static struct rtl_hal_cfg rtl8723be_hal_cfg = {
+diff --git a/drivers/of/irq.c b/drivers/of/irq.c
+index 4fa916dffc91..72a2c1969646 100644
+--- a/drivers/of/irq.c
++++ b/drivers/of/irq.c
+@@ -636,6 +636,13 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
+ 		msi_base = be32_to_cpup(msi_map + 2);
+ 		rid_len = be32_to_cpup(msi_map + 3);
+ 
++		if (rid_base & ~map_mask) {
++			dev_err(parent_dev,
++				"Invalid msi-map translation - msi-map-mask (0x%x) ignores rid-base (0x%x)\n",
++				map_mask, rid_base);
++			return rid_out;
++		}
++
+ 		msi_controller_node = of_find_node_by_phandle(phandle);
+ 
+ 		matched = (masked_rid >= rid_base &&
+@@ -655,7 +662,7 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
+ 	if (!matched)
+ 		return rid_out;
+ 
+-	rid_out = masked_rid + msi_base;
++	rid_out = masked_rid - rid_base + msi_base;
+ 	dev_dbg(dev,
+ 		"msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n",
+ 		dev_name(parent_dev), map_mask, rid_base, msi_base,
+diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
+index ff538568a617..0b3e0bfa7be5 100644
+--- a/drivers/pci/hotplug/acpiphp_glue.c
++++ b/drivers/pci/hotplug/acpiphp_glue.c
+@@ -953,8 +953,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
+ {
+ 	pci_lock_rescan_remove();
+ 
+-	if (slot->flags & SLOT_IS_GOING_AWAY)
++	if (slot->flags & SLOT_IS_GOING_AWAY) {
++		pci_unlock_rescan_remove();
+ 		return -ENODEV;
++	}
+ 
+ 	/* configure all functions */
+ 	if (!(slot->flags & SLOT_ENABLED))
+diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
+index 0bf82a20a0fb..48d21e0edd56 100644
+--- a/drivers/pci/pcie/aer/aerdrv.c
++++ b/drivers/pci/pcie/aer/aerdrv.c
+@@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
+ 	rpc->rpd = dev;
+ 	INIT_WORK(&rpc->dpc_handler, aer_isr);
+ 	mutex_init(&rpc->rpc_mutex);
+-	init_waitqueue_head(&rpc->wait_release);
+ 
+ 	/* Use PCIe bus function to store rpc into PCIe device */
+ 	set_service_data(dev, rpc);
+@@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev)
+ 		if (rpc->isr)
+ 			free_irq(dev->irq, dev);
+ 
+-		wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
+-
++		flush_work(&rpc->dpc_handler);
+ 		aer_disable_rootport(rpc);
+ 		kfree(rpc);
+ 		set_service_data(dev, NULL);
+diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
+index 84420b7c9456..945c939a86c5 100644
+--- a/drivers/pci/pcie/aer/aerdrv.h
++++ b/drivers/pci/pcie/aer/aerdrv.h
+@@ -72,7 +72,6 @@ struct aer_rpc {
+ 					 * recovery on the same
+ 					 * root port hierarchy
+ 					 */
+-	wait_queue_head_t wait_release;
+ };
+ 
+ struct aer_broadcast_data {
+diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
+index fba785e9df75..4e14de0f0f98 100644
+--- a/drivers/pci/pcie/aer/aerdrv_core.c
++++ b/drivers/pci/pcie/aer/aerdrv_core.c
+@@ -811,8 +811,6 @@ void aer_isr(struct work_struct *work)
+ 	while (get_e_source(rpc, &e_src))
+ 		aer_isr_one_error(p_device, &e_src);
+ 	mutex_unlock(&rpc->rpc_mutex);
+-
+-	wake_up(&rpc->wait_release);
+ }
+ 
+ /**
+diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
+index c777b97207d5..5f70fee59a94 100644
+--- a/drivers/pci/xen-pcifront.c
++++ b/drivers/pci/xen-pcifront.c
+@@ -53,7 +53,7 @@ struct pcifront_device {
+ };
+ 
+ struct pcifront_sd {
+-	int domain;
++	struct pci_sysdata sd;
+ 	struct pcifront_device *pdev;
+ };
+ 
+@@ -67,7 +67,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
+ 				    unsigned int domain, unsigned int bus,
+ 				    struct pcifront_device *pdev)
+ {
+-	sd->domain = domain;
++	/* Because we do not expose that information via XenBus. */
++	sd->sd.node = first_online_node;
++	sd->sd.domain = domain;
+ 	sd->pdev = pdev;
+ }
+ 
+@@ -468,8 +470,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
+ 	dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
+ 		 domain, bus);
+ 
+-	bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
+-	sd = kmalloc(sizeof(*sd), GFP_KERNEL);
++	bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
++	sd = kzalloc(sizeof(*sd), GFP_KERNEL);
+ 	if (!bus_entry || !sd) {
+ 		err = -ENOMEM;
+ 		goto err_out;
+diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
+index 8c7f27db6ad3..e7e574dc667a 100644
+--- a/drivers/phy/phy-core.c
++++ b/drivers/phy/phy-core.c
+@@ -275,20 +275,21 @@ EXPORT_SYMBOL_GPL(phy_exit);
+ 
+ int phy_power_on(struct phy *phy)
+ {
+-	int ret;
++	int ret = 0;
+ 
+ 	if (!phy)
+-		return 0;
++		goto out;
+ 
+ 	if (phy->pwr) {
+ 		ret = regulator_enable(phy->pwr);
+ 		if (ret)
+-			return ret;
++			goto out;
+ 	}
+ 
+ 	ret = phy_pm_runtime_get_sync(phy);
+ 	if (ret < 0 && ret != -ENOTSUPP)
+-		return ret;
++		goto err_pm_sync;
++
+ 	ret = 0; /* Override possible ret == -ENOTSUPP */
+ 
+ 	mutex_lock(&phy->mutex);
+@@ -296,19 +297,20 @@ int phy_power_on(struct phy *phy)
+ 		ret = phy->ops->power_on(phy);
+ 		if (ret < 0) {
+ 			dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
+-			goto out;
++			goto err_pwr_on;
+ 		}
+ 	}
+ 	++phy->power_count;
+ 	mutex_unlock(&phy->mutex);
+ 	return 0;
+ 
+-out:
++err_pwr_on:
+ 	mutex_unlock(&phy->mutex);
+ 	phy_pm_runtime_put_sync(phy);
++err_pm_sync:
+ 	if (phy->pwr)
+ 		regulator_disable(phy->pwr);
+-
++out:
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(phy_power_on);
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index a313dfc0245f..d78ee151c9e4 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -865,6 +865,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
+ 		},
+ 	},
+ 	{
++		.ident = "Lenovo ideapad Y700-17ISK",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-17ISK"),
++		},
++	},
++	{
+ 		.ident = "Lenovo Yoga 2 11 / 13 / Pro",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+@@ -893,6 +900,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
+ 		},
+ 	},
+ 	{
++		.ident = "Lenovo Yoga 700",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 700"),
++		},
++	},
++	{
+ 		.ident = "Lenovo Yoga 900",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
+index c01302989ee4..b0f62141ea4d 100644
+--- a/drivers/platform/x86/toshiba_acpi.c
++++ b/drivers/platform/x86/toshiba_acpi.c
+@@ -2484,6 +2484,14 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
+ 	brightness = __get_lcd_brightness(dev);
+ 	if (brightness < 0)
+ 		return 0;
++	/*
++	 * If transflective backlight is supported and the brightness is zero
++	 * (lowest brightness level), the set_lcd_brightness function will
++	 * activate the transflective backlight, making the LCD appear to be
++	 * turned off, simply increment the brightness level to avoid that.
++	 */
++	if (dev->tr_backlight_supported && brightness == 0)
++		brightness++;
+ 	ret = set_lcd_brightness(dev, brightness);
+ 	if (ret) {
+ 		pr_debug("Backlight method is read-only, disabling backlight support\n");
+diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
+index 8df0b0e62976..00676208080e 100644
+--- a/drivers/regulator/Kconfig
++++ b/drivers/regulator/Kconfig
+@@ -446,6 +446,7 @@ config REGULATOR_MC13892
+ config REGULATOR_MT6311
+ 	tristate "MediaTek MT6311 PMIC"
+ 	depends on I2C
++	select REGMAP_I2C
+ 	help
+ 	  Say y here to select this option to enable the power regulator of
+ 	  MediaTek MT6311 PMIC.
+diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
+index 35de22fdb7a0..f2e1a39ce0f3 100644
+--- a/drivers/regulator/axp20x-regulator.c
++++ b/drivers/regulator/axp20x-regulator.c
+@@ -27,8 +27,8 @@
+ #define AXP20X_IO_ENABLED		0x03
+ #define AXP20X_IO_DISABLED		0x07
+ 
+-#define AXP22X_IO_ENABLED		0x04
+-#define AXP22X_IO_DISABLED		0x03
++#define AXP22X_IO_ENABLED		0x03
++#define AXP22X_IO_DISABLED		0x04
+ 
+ #define AXP20X_WORKMODE_DCDC2_MASK	BIT(2)
+ #define AXP20X_WORKMODE_DCDC3_MASK	BIT(1)
+diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
+index a263c10359e1..4abfbdb285ec 100644
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -3031,6 +3031,7 @@ static void dasd_setup_queue(struct dasd_block *block)
+ 		max = block->base->discipline->max_blocks << block->s2b_shift;
+ 	}
+ 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue);
++	block->request_queue->limits.max_dev_sectors = max;
+ 	blk_queue_logical_block_size(block->request_queue,
+ 				     block->bp_block);
+ 	blk_queue_max_hw_sectors(block->request_queue, max);
+diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
+index 184b1dbeb554..286782c60da4 100644
+--- a/drivers/s390/block/dasd_alias.c
++++ b/drivers/s390/block/dasd_alias.c
+@@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
+ 		spin_unlock_irqrestore(&lcu->lock, flags);
+ 		cancel_work_sync(&lcu->suc_data.worker);
+ 		spin_lock_irqsave(&lcu->lock, flags);
+-		if (device == lcu->suc_data.device)
++		if (device == lcu->suc_data.device) {
++			dasd_put_device(device);
+ 			lcu->suc_data.device = NULL;
++		}
+ 	}
+ 	was_pending = 0;
+ 	if (device == lcu->ruac_data.device) {
+@@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
+ 		was_pending = 1;
+ 		cancel_delayed_work_sync(&lcu->ruac_data.dwork);
+ 		spin_lock_irqsave(&lcu->lock, flags);
+-		if (device == lcu->ruac_data.device)
++		if (device == lcu->ruac_data.device) {
++			dasd_put_device(device);
+ 			lcu->ruac_data.device = NULL;
++		}
+ 	}
+ 	private->lcu = NULL;
+ 	spin_unlock_irqrestore(&lcu->lock, flags);
+@@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work)
+ 	if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
+ 		DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
+ 			    " alias data in lcu (rc = %d), retry later", rc);
+-		schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
++		if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
++			dasd_put_device(device);
+ 	} else {
++		dasd_put_device(device);
+ 		lcu->ruac_data.device = NULL;
+ 		lcu->flags &= ~UPDATE_PENDING;
+ 	}
+@@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
+ 	 */
+ 	if (!usedev)
+ 		return -EINVAL;
++	dasd_get_device(usedev);
+ 	lcu->ruac_data.device = usedev;
+-	schedule_delayed_work(&lcu->ruac_data.dwork, 0);
++	if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
++		dasd_put_device(usedev);
+ 	return 0;
+ }
+ 
+@@ -723,7 +731,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
+ 	ASCEBC((char *) &cqr->magic, 4);
+ 	ccw = cqr->cpaddr;
+ 	ccw->cmd_code = DASD_ECKD_CCW_RSCK;
+-	ccw->flags = 0 ;
++	ccw->flags = CCW_FLAG_SLI;
+ 	ccw->count = 16;
+ 	ccw->cda = (__u32)(addr_t) cqr->data;
+ 	((char *)cqr->data)[0] = reason;
+@@ -930,6 +938,7 @@ static void summary_unit_check_handling_work(struct work_struct *work)
+ 	/* 3. read new alias configuration */
+ 	_schedule_lcu_update(lcu, device);
+ 	lcu->suc_data.device = NULL;
++	dasd_put_device(device);
+ 	spin_unlock_irqrestore(&lcu->lock, flags);
+ }
+ 
+@@ -989,6 +998,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
+ 	}
+ 	lcu->suc_data.reason = reason;
+ 	lcu->suc_data.device = device;
++	dasd_get_device(device);
+ 	spin_unlock(&lcu->lock);
+-	schedule_work(&lcu->suc_data.worker);
++	if (!schedule_work(&lcu->suc_data.worker))
++		dasd_put_device(device);
+ };
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 16a1935cc9c1..e197c6f39de2 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -2192,7 +2192,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
+ 	/* Clear outstanding commands array. */
+ 	for (que = 0; que < ha->max_req_queues; que++) {
+ 		req = ha->req_q_map[que];
+-		if (!req)
++		if (!req || !test_bit(que, ha->req_qid_map))
+ 			continue;
+ 		req->out_ptr = (void *)(req->ring + req->length);
+ 		*req->out_ptr = 0;
+@@ -2209,7 +2209,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
+ 
+ 	for (que = 0; que < ha->max_rsp_queues; que++) {
+ 		rsp = ha->rsp_q_map[que];
+-		if (!rsp)
++		if (!rsp || !test_bit(que, ha->rsp_qid_map))
+ 			continue;
+ 		rsp->in_ptr = (void *)(rsp->ring + rsp->length);
+ 		*rsp->in_ptr = 0;
+@@ -4961,7 +4961,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
+ 
+ 	for (i = 1; i < ha->max_rsp_queues; i++) {
+ 		rsp = ha->rsp_q_map[i];
+-		if (rsp) {
++		if (rsp && test_bit(i, ha->rsp_qid_map)) {
+ 			rsp->options &= ~BIT_0;
+ 			ret = qla25xx_init_rsp_que(base_vha, rsp);
+ 			if (ret != QLA_SUCCESS)
+@@ -4976,8 +4976,8 @@ qla25xx_init_queues(struct qla_hw_data *ha)
+ 	}
+ 	for (i = 1; i < ha->max_req_queues; i++) {
+ 		req = ha->req_q_map[i];
+-		if (req) {
+-		/* Clear outstanding commands array. */
++		if (req && test_bit(i, ha->req_qid_map)) {
++			/* Clear outstanding commands array. */
+ 			req->options &= ~BIT_0;
+ 			ret = qla25xx_init_req_que(base_vha, req);
+ 			if (ret != QLA_SUCCESS)
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index ccf6a7f99024..0e59731f95ad 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -3018,9 +3018,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
+ 		    "MSI-X: Failed to enable support "
+ 		    "-- %d/%d\n Retry with %d vectors.\n",
+ 		    ha->msix_count, ret, ret);
++		ha->msix_count = ret;
++		ha->max_rsp_queues = ha->msix_count - 1;
+ 	}
+-	ha->msix_count = ret;
+-	ha->max_rsp_queues = ha->msix_count - 1;
+ 	ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
+ 				ha->msix_count, GFP_KERNEL);
+ 	if (!ha->msix_entries) {
+diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
+index c5dd594f6c31..cf7ba52bae66 100644
+--- a/drivers/scsi/qla2xxx/qla_mid.c
++++ b/drivers/scsi/qla2xxx/qla_mid.c
+@@ -600,7 +600,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
+ 	/* Delete request queues */
+ 	for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
+ 		req = ha->req_q_map[cnt];
+-		if (req) {
++		if (req && test_bit(cnt, ha->req_qid_map)) {
+ 			ret = qla25xx_delete_req_que(vha, req);
+ 			if (ret != QLA_SUCCESS) {
+ 				ql_log(ql_log_warn, vha, 0x00ea,
+@@ -614,7 +614,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
+ 	/* Delete response queues */
+ 	for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
+ 		rsp = ha->rsp_q_map[cnt];
+-		if (rsp) {
++		if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
+ 			ret = qla25xx_delete_rsp_que(vha, rsp);
+ 			if (ret != QLA_SUCCESS) {
+ 				ql_log(ql_log_warn, vha, 0x00eb,
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index bfa9a64c316b..fc6674db4f2d 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -397,6 +397,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
+ 	int cnt;
+ 
+ 	for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
++		if (!test_bit(cnt, ha->req_qid_map))
++			continue;
++
+ 		req = ha->req_q_map[cnt];
+ 		qla2x00_free_req_que(ha, req);
+ 	}
+@@ -404,6 +407,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
+ 	ha->req_q_map = NULL;
+ 
+ 	for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
++		if (!test_bit(cnt, ha->rsp_qid_map))
++			continue;
++
+ 		rsp = ha->rsp_q_map[cnt];
+ 		qla2x00_free_rsp_que(ha, rsp);
+ 	}
+diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
+index ddbe2e7ac14d..c3e622524604 100644
+--- a/drivers/scsi/qla2xxx/qla_tmpl.c
++++ b/drivers/scsi/qla2xxx/qla_tmpl.c
+@@ -395,6 +395,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
+ 	if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
+ 		for (i = 0; i < vha->hw->max_req_queues; i++) {
+ 			struct req_que *req = vha->hw->req_q_map[i];
++
++			if (!test_bit(i, vha->hw->req_qid_map))
++				continue;
++
+ 			if (req || !buf) {
+ 				length = req ?
+ 				    req->length : REQUEST_ENTRY_CNT_24XX;
+@@ -408,6 +412,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
+ 	} else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
+ 		for (i = 0; i < vha->hw->max_rsp_queues; i++) {
+ 			struct rsp_que *rsp = vha->hw->rsp_q_map[i];
++
++			if (!test_bit(i, vha->hw->rsp_qid_map))
++				continue;
++
+ 			if (rsp || !buf) {
+ 				length = rsp ?
+ 				    rsp->length : RESPONSE_ENTRY_CNT_MQ;
+@@ -634,6 +642,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
+ 	if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
+ 		for (i = 0; i < vha->hw->max_req_queues; i++) {
+ 			struct req_que *req = vha->hw->req_q_map[i];
++
++			if (!test_bit(i, vha->hw->req_qid_map))
++				continue;
++
+ 			if (req || !buf) {
+ 				qla27xx_insert16(i, buf, len);
+ 				qla27xx_insert16(1, buf, len);
+@@ -645,6 +657,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
+ 	} else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
+ 		for (i = 0; i < vha->hw->max_rsp_queues; i++) {
+ 			struct rsp_que *rsp = vha->hw->rsp_q_map[i];
++
++			if (!test_bit(i, vha->hw->rsp_qid_map))
++				continue;
++
+ 			if (rsp || !buf) {
+ 				qla27xx_insert16(i, buf, len);
+ 				qla27xx_insert16(1, buf, len);
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 84fa4c46eaa6..bb669d32ccd0 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2893,7 +2893,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 	    sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
+ 	    sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE)
+ 		rw_max = q->limits.io_opt =
+-			logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
++			sdkp->opt_xfer_blocks * sdp->sector_size;
+ 	else
+ 		rw_max = BLK_DEF_MAX_SECTORS;
+ 
+diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
+index aebad36391c9..8feac599e9ab 100644
+--- a/drivers/spi/spi-atmel.c
++++ b/drivers/spi/spi-atmel.c
+@@ -1571,6 +1571,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
+ 
+ 	as->use_cs_gpios = true;
+ 	if (atmel_spi_is_v2(as) &&
++	    pdev->dev.of_node &&
+ 	    !of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) {
+ 		as->use_cs_gpios = false;
+ 		master->num_chipselect = 4;
+diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
+index 1f8903d356e5..ed8283e7397a 100644
+--- a/drivers/spi/spi-omap2-mcspi.c
++++ b/drivers/spi/spi-omap2-mcspi.c
+@@ -1024,6 +1024,16 @@ static int omap2_mcspi_setup(struct spi_device *spi)
+ 		spi->controller_state = cs;
+ 		/* Link this to context save list */
+ 		list_add_tail(&cs->node, &ctx->cs);
++
++		if (gpio_is_valid(spi->cs_gpio)) {
++			ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
++			if (ret) {
++				dev_err(&spi->dev, "failed to request gpio\n");
++				return ret;
++			}
++			gpio_direction_output(spi->cs_gpio,
++					 !(spi->mode & SPI_CS_HIGH));
++		}
+ 	}
+ 
+ 	if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
+@@ -1032,15 +1042,6 @@ static int omap2_mcspi_setup(struct spi_device *spi)
+ 			return ret;
+ 	}
+ 
+-	if (gpio_is_valid(spi->cs_gpio)) {
+-		ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
+-		if (ret) {
+-			dev_err(&spi->dev, "failed to request gpio\n");
+-			return ret;
+-		}
+-		gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+-	}
+-
+ 	ret = pm_runtime_get_sync(mcspi->dev);
+ 	if (ret < 0)
+ 		return ret;
+diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
+index 79ac19246548..70b8f4fabfad 100644
+--- a/drivers/staging/panel/panel.c
++++ b/drivers/staging/panel/panel.c
+@@ -825,8 +825,7 @@ static void lcd_write_cmd_s(int cmd)
+ 	lcd_send_serial(0x1F);	/* R/W=W, RS=0 */
+ 	lcd_send_serial(cmd & 0x0F);
+ 	lcd_send_serial((cmd >> 4) & 0x0F);
+-	/* the shortest command takes at least 40 us */
+-	usleep_range(40, 100);
++	udelay(40);		/* the shortest command takes at least 40 us */
+ 	spin_unlock_irq(&pprt_lock);
+ }
+ 
+@@ -837,8 +836,7 @@ static void lcd_write_data_s(int data)
+ 	lcd_send_serial(0x5F);	/* R/W=W, RS=1 */
+ 	lcd_send_serial(data & 0x0F);
+ 	lcd_send_serial((data >> 4) & 0x0F);
+-	/* the shortest data takes at least 40 us */
+-	usleep_range(40, 100);
++	udelay(40);		/* the shortest data takes at least 40 us */
+ 	spin_unlock_irq(&pprt_lock);
+ }
+ 
+@@ -848,20 +846,19 @@ static void lcd_write_cmd_p8(int cmd)
+ 	spin_lock_irq(&pprt_lock);
+ 	/* present the data to the data port */
+ 	w_dtr(pprt, cmd);
+-	/* maintain the data during 20 us before the strobe */
+-	usleep_range(20, 100);
++	udelay(20);	/* maintain the data during 20 us before the strobe */
+ 
+ 	bits.e = BIT_SET;
+ 	bits.rs = BIT_CLR;
+ 	bits.rw = BIT_CLR;
+ 	set_ctrl_bits();
+ 
+-	usleep_range(40, 100);	/* maintain the strobe during 40 us */
++	udelay(40);	/* maintain the strobe during 40 us */
+ 
+ 	bits.e = BIT_CLR;
+ 	set_ctrl_bits();
+ 
+-	usleep_range(120, 500);	/* the shortest command takes at least 120 us */
++	udelay(120);	/* the shortest command takes at least 120 us */
+ 	spin_unlock_irq(&pprt_lock);
+ }
+ 
+@@ -871,20 +868,19 @@ static void lcd_write_data_p8(int data)
+ 	spin_lock_irq(&pprt_lock);
+ 	/* present the data to the data port */
+ 	w_dtr(pprt, data);
+-	/* maintain the data during 20 us before the strobe */
+-	usleep_range(20, 100);
++	udelay(20);	/* maintain the data during 20 us before the strobe */
+ 
+ 	bits.e = BIT_SET;
+ 	bits.rs = BIT_SET;
+ 	bits.rw = BIT_CLR;
+ 	set_ctrl_bits();
+ 
+-	usleep_range(40, 100);	/* maintain the strobe during 40 us */
++	udelay(40);	/* maintain the strobe during 40 us */
+ 
+ 	bits.e = BIT_CLR;
+ 	set_ctrl_bits();
+ 
+-	usleep_range(45, 100);	/* the shortest data takes at least 45 us */
++	udelay(45);	/* the shortest data takes at least 45 us */
+ 	spin_unlock_irq(&pprt_lock);
+ }
+ 
+@@ -894,7 +890,7 @@ static void lcd_write_cmd_tilcd(int cmd)
+ 	spin_lock_irq(&pprt_lock);
+ 	/* present the data to the control port */
+ 	w_ctr(pprt, cmd);
+-	usleep_range(60, 120);
++	udelay(60);
+ 	spin_unlock_irq(&pprt_lock);
+ }
+ 
+@@ -904,7 +900,7 @@ static void lcd_write_data_tilcd(int data)
+ 	spin_lock_irq(&pprt_lock);
+ 	/* present the data to the data port */
+ 	w_dtr(pprt, data);
+-	usleep_range(60, 120);
++	udelay(60);
+ 	spin_unlock_irq(&pprt_lock);
+ }
+ 
+@@ -947,7 +943,7 @@ static void lcd_clear_fast_s(void)
+ 		lcd_send_serial(0x5F);	/* R/W=W, RS=1 */
+ 		lcd_send_serial(' ' & 0x0F);
+ 		lcd_send_serial((' ' >> 4) & 0x0F);
+-		usleep_range(40, 100);	/* the shortest data takes at least 40 us */
++		udelay(40);	/* the shortest data takes at least 40 us */
+ 	}
+ 	spin_unlock_irq(&pprt_lock);
+ 
+@@ -971,7 +967,7 @@ static void lcd_clear_fast_p8(void)
+ 		w_dtr(pprt, ' ');
+ 
+ 		/* maintain the data during 20 us before the strobe */
+-		usleep_range(20, 100);
++		udelay(20);
+ 
+ 		bits.e = BIT_SET;
+ 		bits.rs = BIT_SET;
+@@ -979,13 +975,13 @@ static void lcd_clear_fast_p8(void)
+ 		set_ctrl_bits();
+ 
+ 		/* maintain the strobe during 40 us */
+-		usleep_range(40, 100);
++		udelay(40);
+ 
+ 		bits.e = BIT_CLR;
+ 		set_ctrl_bits();
+ 
+ 		/* the shortest data takes at least 45 us */
+-		usleep_range(45, 100);
++		udelay(45);
+ 	}
+ 	spin_unlock_irq(&pprt_lock);
+ 
+@@ -1007,7 +1003,7 @@ static void lcd_clear_fast_tilcd(void)
+ 	for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
+ 		/* present the data to the data port */
+ 		w_dtr(pprt, ' ');
+-		usleep_range(60, 120);
++		udelay(60);
+ 	}
+ 
+ 	spin_unlock_irq(&pprt_lock);
+diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
+index 3b5835b28128..a5bbb338f275 100644
+--- a/drivers/staging/speakup/serialio.c
++++ b/drivers/staging/speakup/serialio.c
+@@ -6,6 +6,11 @@
+ #include "spk_priv.h"
+ #include "serialio.h"
+ 
++#include <linux/serial_core.h>
++/* WARNING:  Do not change this to <linux/serial.h> without testing that
++ * SERIAL_PORT_DFNS does get defined to the appropriate value. */
++#include <asm/serial.h>
++
+ #ifndef SERIAL_PORT_DFNS
+ #define SERIAL_PORT_DFNS
+ #endif
+@@ -23,9 +28,15 @@ const struct old_serial_port *spk_serial_init(int index)
+ 	int baud = 9600, quot = 0;
+ 	unsigned int cval = 0;
+ 	int cflag = CREAD | HUPCL | CLOCAL | B9600 | CS8;
+-	const struct old_serial_port *ser = rs_table + index;
++	const struct old_serial_port *ser;
+ 	int err;
+ 
++	if (index >= ARRAY_SIZE(rs_table)) {
++		pr_info("no port info for ttyS%d\n", index);
++		return NULL;
++	}
++	ser = rs_table + index;
++
+ 	/*	Divisor, bytesize and parity */
+ 	quot = ser->baud_base / baud;
+ 	cval = cflag & (CSIZE | CSTOPB);
+diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
+index 28fb3016370f..88029cc6de5e 100644
+--- a/drivers/target/target_core_tmr.c
++++ b/drivers/target/target_core_tmr.c
+@@ -68,23 +68,25 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
+ 
+ 	if (dev) {
+ 		spin_lock_irqsave(&dev->se_tmr_lock, flags);
+-		list_del(&tmr->tmr_list);
++		list_del_init(&tmr->tmr_list);
+ 		spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
+ 	}
+ 
+ 	kfree(tmr);
+ }
+ 
+-static void core_tmr_handle_tas_abort(
+-	struct se_node_acl *tmr_nacl,
+-	struct se_cmd *cmd,
+-	int tas)
++static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
+ {
+-	bool remove = true;
++	unsigned long flags;
++	bool remove = true, send_tas;
+ 	/*
+ 	 * TASK ABORTED status (TAS) bit support
+ 	 */
+-	if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
++	spin_lock_irqsave(&cmd->t_state_lock, flags);
++	send_tas = (cmd->transport_state & CMD_T_TAS);
++	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++
++	if (send_tas) {
+ 		remove = false;
+ 		transport_send_task_abort(cmd);
+ 	}
+@@ -107,6 +109,46 @@ static int target_check_cdb_and_preempt(struct list_head *list,
+ 	return 1;
+ }
+ 
++static bool __target_check_io_state(struct se_cmd *se_cmd,
++				    struct se_session *tmr_sess, int tas)
++{
++	struct se_session *sess = se_cmd->se_sess;
++
++	assert_spin_locked(&sess->sess_cmd_lock);
++	WARN_ON_ONCE(!irqs_disabled());
++	/*
++	 * If command already reached CMD_T_COMPLETE state within
++	 * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
++	 * this se_cmd has been passed to fabric driver and will
++	 * not be aborted.
++	 *
++	 * Otherwise, obtain a local se_cmd->cmd_kref now for TMR
++	 * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
++	 * long as se_cmd->cmd_kref is still active unless zero.
++	 */
++	spin_lock(&se_cmd->t_state_lock);
++	if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
++		pr_debug("Attempted to abort io tag: %llu already complete or"
++			" fabric stop, skipping\n", se_cmd->tag);
++		spin_unlock(&se_cmd->t_state_lock);
++		return false;
++	}
++	if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
++		pr_debug("Attempted to abort io tag: %llu already shutdown,"
++			" skipping\n", se_cmd->tag);
++		spin_unlock(&se_cmd->t_state_lock);
++		return false;
++	}
++	se_cmd->transport_state |= CMD_T_ABORTED;
++
++	if ((tmr_sess != se_cmd->se_sess) && tas)
++		se_cmd->transport_state |= CMD_T_TAS;
++
++	spin_unlock(&se_cmd->t_state_lock);
++
++	return kref_get_unless_zero(&se_cmd->cmd_kref);
++}
++
+ void core_tmr_abort_task(
+ 	struct se_device *dev,
+ 	struct se_tmr_req *tmr,
+@@ -130,34 +172,22 @@ void core_tmr_abort_task(
+ 		if (tmr->ref_task_tag != ref_tag)
+ 			continue;
+ 
+-		if (!kref_get_unless_zero(&se_cmd->cmd_kref))
+-			continue;
+-
+ 		printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
+ 			se_cmd->se_tfo->get_fabric_name(), ref_tag);
+ 
+-		spin_lock(&se_cmd->t_state_lock);
+-		if (se_cmd->transport_state & CMD_T_COMPLETE) {
+-			printk("ABORT_TASK: ref_tag: %llu already complete,"
+-			       " skipping\n", ref_tag);
+-			spin_unlock(&se_cmd->t_state_lock);
++		if (!__target_check_io_state(se_cmd, se_sess, 0)) {
+ 			spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+-
+ 			target_put_sess_cmd(se_cmd);
+-
+ 			goto out;
+ 		}
+-		se_cmd->transport_state |= CMD_T_ABORTED;
+-		spin_unlock(&se_cmd->t_state_lock);
+-
+ 		list_del_init(&se_cmd->se_cmd_list);
+ 		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ 
+ 		cancel_work_sync(&se_cmd->work);
+ 		transport_wait_for_tasks(se_cmd);
+ 
+-		target_put_sess_cmd(se_cmd);
+ 		transport_cmd_finish_abort(se_cmd, true);
++		target_put_sess_cmd(se_cmd);
+ 
+ 		printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
+ 				" ref_tag: %llu\n", ref_tag);
+@@ -178,9 +208,11 @@ static void core_tmr_drain_tmr_list(
+ 	struct list_head *preempt_and_abort_list)
+ {
+ 	LIST_HEAD(drain_tmr_list);
++	struct se_session *sess;
+ 	struct se_tmr_req *tmr_p, *tmr_pp;
+ 	struct se_cmd *cmd;
+ 	unsigned long flags;
++	bool rc;
+ 	/*
+ 	 * Release all pending and outgoing TMRs aside from the received
+ 	 * LUN_RESET tmr..
+@@ -206,17 +238,39 @@ static void core_tmr_drain_tmr_list(
+ 		if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
+ 			continue;
+ 
++		sess = cmd->se_sess;
++		if (WARN_ON_ONCE(!sess))
++			continue;
++
++		spin_lock(&sess->sess_cmd_lock);
+ 		spin_lock(&cmd->t_state_lock);
+-		if (!(cmd->transport_state & CMD_T_ACTIVE)) {
++		if (!(cmd->transport_state & CMD_T_ACTIVE) ||
++		     (cmd->transport_state & CMD_T_FABRIC_STOP)) {
+ 			spin_unlock(&cmd->t_state_lock);
++			spin_unlock(&sess->sess_cmd_lock);
+ 			continue;
+ 		}
+ 		if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
+ 			spin_unlock(&cmd->t_state_lock);
++			spin_unlock(&sess->sess_cmd_lock);
+ 			continue;
+ 		}
++		if (sess->sess_tearing_down || cmd->cmd_wait_set) {
++			spin_unlock(&cmd->t_state_lock);
++			spin_unlock(&sess->sess_cmd_lock);
++			continue;
++		}
++		cmd->transport_state |= CMD_T_ABORTED;
+ 		spin_unlock(&cmd->t_state_lock);
+ 
++		rc = kref_get_unless_zero(&cmd->cmd_kref);
++		if (!rc) {
++			printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
++			spin_unlock(&sess->sess_cmd_lock);
++			continue;
++		}
++		spin_unlock(&sess->sess_cmd_lock);
++
+ 		list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
+ 	}
+ 	spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
+@@ -230,20 +284,26 @@ static void core_tmr_drain_tmr_list(
+ 			(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
+ 			tmr_p->function, tmr_p->response, cmd->t_state);
+ 
++		cancel_work_sync(&cmd->work);
++		transport_wait_for_tasks(cmd);
++
+ 		transport_cmd_finish_abort(cmd, 1);
++		target_put_sess_cmd(cmd);
+ 	}
+ }
+ 
+ static void core_tmr_drain_state_list(
+ 	struct se_device *dev,
+ 	struct se_cmd *prout_cmd,
+-	struct se_node_acl *tmr_nacl,
++	struct se_session *tmr_sess,
+ 	int tas,
+ 	struct list_head *preempt_and_abort_list)
+ {
+ 	LIST_HEAD(drain_task_list);
++	struct se_session *sess;
+ 	struct se_cmd *cmd, *next;
+ 	unsigned long flags;
++	int rc;
+ 
+ 	/*
+ 	 * Complete outstanding commands with TASK_ABORTED SAM status.
+@@ -282,6 +342,16 @@ static void core_tmr_drain_state_list(
+ 		if (prout_cmd == cmd)
+ 			continue;
+ 
++		sess = cmd->se_sess;
++		if (WARN_ON_ONCE(!sess))
++			continue;
++
++		spin_lock(&sess->sess_cmd_lock);
++		rc = __target_check_io_state(cmd, tmr_sess, tas);
++		spin_unlock(&sess->sess_cmd_lock);
++		if (!rc)
++			continue;
++
+ 		list_move_tail(&cmd->state_list, &drain_task_list);
+ 		cmd->state_active = false;
+ 	}
+@@ -289,7 +359,7 @@ static void core_tmr_drain_state_list(
+ 
+ 	while (!list_empty(&drain_task_list)) {
+ 		cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
+-		list_del(&cmd->state_list);
++		list_del_init(&cmd->state_list);
+ 
+ 		pr_debug("LUN_RESET: %s cmd: %p"
+ 			" ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d"
+@@ -313,16 +383,11 @@ static void core_tmr_drain_state_list(
+ 		 * loop above, but we do it down here given that
+ 		 * cancel_work_sync may block.
+ 		 */
+-		if (cmd->t_state == TRANSPORT_COMPLETE)
+-			cancel_work_sync(&cmd->work);
+-
+-		spin_lock_irqsave(&cmd->t_state_lock, flags);
+-		target_stop_cmd(cmd, &flags);
+-
+-		cmd->transport_state |= CMD_T_ABORTED;
+-		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++		cancel_work_sync(&cmd->work);
++		transport_wait_for_tasks(cmd);
+ 
+-		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas);
++		core_tmr_handle_tas_abort(cmd, tas);
++		target_put_sess_cmd(cmd);
+ 	}
+ }
+ 
+@@ -334,6 +399,7 @@ int core_tmr_lun_reset(
+ {
+ 	struct se_node_acl *tmr_nacl = NULL;
+ 	struct se_portal_group *tmr_tpg = NULL;
++	struct se_session *tmr_sess = NULL;
+ 	int tas;
+         /*
+ 	 * TASK_ABORTED status bit, this is configurable via ConfigFS
+@@ -352,8 +418,9 @@ int core_tmr_lun_reset(
+ 	 * or struct se_device passthrough..
+ 	 */
+ 	if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
+-		tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
+-		tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
++		tmr_sess = tmr->task_cmd->se_sess;
++		tmr_nacl = tmr_sess->se_node_acl;
++		tmr_tpg = tmr_sess->se_tpg;
+ 		if (tmr_nacl && tmr_tpg) {
+ 			pr_debug("LUN_RESET: TMR caller fabric: %s"
+ 				" initiator port %s\n",
+@@ -366,7 +433,7 @@ int core_tmr_lun_reset(
+ 		dev->transport->name, tas);
+ 
+ 	core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
+-	core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
++	core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
+ 				preempt_and_abort_list);
+ 
+ 	/*
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 4fdcee2006d1..94f4ffac723f 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -528,9 +528,6 @@ void transport_deregister_session(struct se_session *se_sess)
+ }
+ EXPORT_SYMBOL(transport_deregister_session);
+ 
+-/*
+- * Called with cmd->t_state_lock held.
+- */
+ static void target_remove_from_state_list(struct se_cmd *cmd)
+ {
+ 	struct se_device *dev = cmd->se_dev;
+@@ -555,10 +552,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
+ {
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&cmd->t_state_lock, flags);
+-	if (write_pending)
+-		cmd->t_state = TRANSPORT_WRITE_PENDING;
+-
+ 	if (remove_from_lists) {
+ 		target_remove_from_state_list(cmd);
+ 
+@@ -568,6 +561,10 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
+ 		cmd->se_lun = NULL;
+ 	}
+ 
++	spin_lock_irqsave(&cmd->t_state_lock, flags);
++	if (write_pending)
++		cmd->t_state = TRANSPORT_WRITE_PENDING;
++
+ 	/*
+ 	 * Determine if frontend context caller is requesting the stopping of
+ 	 * this command for frontend exceptions.
+@@ -621,6 +618,8 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
+ 
+ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+ {
++	bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
++
+ 	if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
+ 		transport_lun_remove_cmd(cmd);
+ 	/*
+@@ -632,7 +631,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+ 
+ 	if (transport_cmd_check_stop_to_fabric(cmd))
+ 		return;
+-	if (remove)
++	if (remove && ack_kref)
+ 		transport_put_cmd(cmd);
+ }
+ 
+@@ -700,7 +699,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
+ 	 * Check for case where an explicit ABORT_TASK has been received
+ 	 * and transport_wait_for_tasks() will be waiting for completion..
+ 	 */
+-	if (cmd->transport_state & CMD_T_ABORTED &&
++	if (cmd->transport_state & CMD_T_ABORTED ||
+ 	    cmd->transport_state & CMD_T_STOP) {
+ 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ 		complete_all(&cmd->t_transport_stop_comp);
+@@ -1850,19 +1849,21 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
+ 	return true;
+ }
+ 
++static int __transport_check_aborted_status(struct se_cmd *, int);
++
+ void target_execute_cmd(struct se_cmd *cmd)
+ {
+ 	/*
+-	 * If the received CDB has aleady been aborted stop processing it here.
+-	 */
+-	if (transport_check_aborted_status(cmd, 1))
+-		return;
+-
+-	/*
+ 	 * Determine if frontend context caller is requesting the stopping of
+ 	 * this command for frontend exceptions.
++	 *
++	 * If the received CDB has aleady been aborted stop processing it here.
+ 	 */
+ 	spin_lock_irq(&cmd->t_state_lock);
++	if (__transport_check_aborted_status(cmd, 1)) {
++		spin_unlock_irq(&cmd->t_state_lock);
++		return;
++	}
+ 	if (cmd->transport_state & CMD_T_STOP) {
+ 		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
+ 			__func__, __LINE__, cmd->tag);
+@@ -2213,20 +2214,14 @@ static inline void transport_free_pages(struct se_cmd *cmd)
+ }
+ 
+ /**
+- * transport_release_cmd - free a command
+- * @cmd:       command to free
++ * transport_put_cmd - release a reference to a command
++ * @cmd:       command to release
+  *
+- * This routine unconditionally frees a command, and reference counting
+- * or list removal must be done in the caller.
++ * This routine releases our reference to the command and frees it if possible.
+  */
+-static int transport_release_cmd(struct se_cmd *cmd)
++static int transport_put_cmd(struct se_cmd *cmd)
+ {
+ 	BUG_ON(!cmd->se_tfo);
+-
+-	if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+-		core_tmr_release_req(cmd->se_tmr_req);
+-	if (cmd->t_task_cdb != cmd->__t_task_cdb)
+-		kfree(cmd->t_task_cdb);
+ 	/*
+ 	 * If this cmd has been setup with target_get_sess_cmd(), drop
+ 	 * the kref and call ->release_cmd() in kref callback.
+@@ -2234,18 +2229,6 @@ static int transport_release_cmd(struct se_cmd *cmd)
+ 	return target_put_sess_cmd(cmd);
+ }
+ 
+-/**
+- * transport_put_cmd - release a reference to a command
+- * @cmd:       command to release
+- *
+- * This routine releases our reference to the command and frees it if possible.
+- */
+-static int transport_put_cmd(struct se_cmd *cmd)
+-{
+-	transport_free_pages(cmd);
+-	return transport_release_cmd(cmd);
+-}
+-
+ void *transport_kmap_data_sg(struct se_cmd *cmd)
+ {
+ 	struct scatterlist *sg = cmd->t_data_sg;
+@@ -2441,34 +2424,58 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
+ 	}
+ }
+ 
+-int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
++static bool
++__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
++			   unsigned long *flags);
++
++static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
+ {
+ 	unsigned long flags;
++
++	spin_lock_irqsave(&cmd->t_state_lock, flags);
++	__transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
++	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++}
++
++int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
++{
+ 	int ret = 0;
++	bool aborted = false, tas = false;
+ 
+ 	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
+ 		if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+-			 transport_wait_for_tasks(cmd);
++			target_wait_free_cmd(cmd, &aborted, &tas);
+ 
+-		ret = transport_release_cmd(cmd);
++		if (!aborted || tas)
++			ret = transport_put_cmd(cmd);
+ 	} else {
+ 		if (wait_for_tasks)
+-			transport_wait_for_tasks(cmd);
++			target_wait_free_cmd(cmd, &aborted, &tas);
+ 		/*
+ 		 * Handle WRITE failure case where transport_generic_new_cmd()
+ 		 * has already added se_cmd to state_list, but fabric has
+ 		 * failed command before I/O submission.
+ 		 */
+-		if (cmd->state_active) {
+-			spin_lock_irqsave(&cmd->t_state_lock, flags);
++		if (cmd->state_active)
+ 			target_remove_from_state_list(cmd);
+-			spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+-		}
+ 
+ 		if (cmd->se_lun)
+ 			transport_lun_remove_cmd(cmd);
+ 
+-		ret = transport_put_cmd(cmd);
++		if (!aborted || tas)
++			ret = transport_put_cmd(cmd);
++	}
++	/*
++	 * If the task has been internally aborted due to TMR ABORT_TASK
++	 * or LUN_RESET, target_core_tmr.c is responsible for performing
++	 * the remaining calls to target_put_sess_cmd(), and not the
++	 * callers of this function.
++	 */
++	if (aborted) {
++		pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
++		wait_for_completion(&cmd->cmd_wait_comp);
++		cmd->se_tfo->release_cmd(cmd);
++		ret = 1;
+ 	}
+ 	return ret;
+ }
+@@ -2508,26 +2515,46 @@ out:
+ }
+ EXPORT_SYMBOL(target_get_sess_cmd);
+ 
++static void target_free_cmd_mem(struct se_cmd *cmd)
++{
++	transport_free_pages(cmd);
++
++	if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
++		core_tmr_release_req(cmd->se_tmr_req);
++	if (cmd->t_task_cdb != cmd->__t_task_cdb)
++		kfree(cmd->t_task_cdb);
++}
++
+ static void target_release_cmd_kref(struct kref *kref)
+ {
+ 	struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
+ 	struct se_session *se_sess = se_cmd->se_sess;
+ 	unsigned long flags;
++	bool fabric_stop;
+ 
+ 	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ 	if (list_empty(&se_cmd->se_cmd_list)) {
+ 		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
++		target_free_cmd_mem(se_cmd);
+ 		se_cmd->se_tfo->release_cmd(se_cmd);
+ 		return;
+ 	}
+-	if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
++
++	spin_lock(&se_cmd->t_state_lock);
++	fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
++	spin_unlock(&se_cmd->t_state_lock);
++
++	if (se_cmd->cmd_wait_set || fabric_stop) {
++		list_del_init(&se_cmd->se_cmd_list);
+ 		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
++		target_free_cmd_mem(se_cmd);
+ 		complete(&se_cmd->cmd_wait_comp);
+ 		return;
+ 	}
+-	list_del(&se_cmd->se_cmd_list);
++	list_del_init(&se_cmd->se_cmd_list);
+ 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ 
++	target_free_cmd_mem(se_cmd);
+ 	se_cmd->se_tfo->release_cmd(se_cmd);
+ }
+ 
+@@ -2539,6 +2566,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
+ 	struct se_session *se_sess = se_cmd->se_sess;
+ 
+ 	if (!se_sess) {
++		target_free_cmd_mem(se_cmd);
+ 		se_cmd->se_tfo->release_cmd(se_cmd);
+ 		return 1;
+ 	}
+@@ -2555,6 +2583,7 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
+ {
+ 	struct se_cmd *se_cmd;
+ 	unsigned long flags;
++	int rc;
+ 
+ 	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ 	if (se_sess->sess_tearing_down) {
+@@ -2564,8 +2593,15 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
+ 	se_sess->sess_tearing_down = 1;
+ 	list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
+ 
+-	list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
+-		se_cmd->cmd_wait_set = 1;
++	list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
++		rc = kref_get_unless_zero(&se_cmd->cmd_kref);
++		if (rc) {
++			se_cmd->cmd_wait_set = 1;
++			spin_lock(&se_cmd->t_state_lock);
++			se_cmd->transport_state |= CMD_T_FABRIC_STOP;
++			spin_unlock(&se_cmd->t_state_lock);
++		}
++	}
+ 
+ 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ }
+@@ -2578,15 +2614,25 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
+ {
+ 	struct se_cmd *se_cmd, *tmp_cmd;
+ 	unsigned long flags;
++	bool tas;
+ 
+ 	list_for_each_entry_safe(se_cmd, tmp_cmd,
+ 				&se_sess->sess_wait_list, se_cmd_list) {
+-		list_del(&se_cmd->se_cmd_list);
++		list_del_init(&se_cmd->se_cmd_list);
+ 
+ 		pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
+ 			" %d\n", se_cmd, se_cmd->t_state,
+ 			se_cmd->se_tfo->get_cmd_state(se_cmd));
+ 
++		spin_lock_irqsave(&se_cmd->t_state_lock, flags);
++		tas = (se_cmd->transport_state & CMD_T_TAS);
++		spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
++
++		if (!target_put_sess_cmd(se_cmd)) {
++			if (tas)
++				target_put_sess_cmd(se_cmd);
++		}
++
+ 		wait_for_completion(&se_cmd->cmd_wait_comp);
+ 		pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
+ 			" fabric state: %d\n", se_cmd, se_cmd->t_state,
+@@ -2608,53 +2654,75 @@ void transport_clear_lun_ref(struct se_lun *lun)
+ 	wait_for_completion(&lun->lun_ref_comp);
+ }
+ 
+-/**
+- * transport_wait_for_tasks - wait for completion to occur
+- * @cmd:	command to wait
+- *
+- * Called from frontend fabric context to wait for storage engine
+- * to pause and/or release frontend generated struct se_cmd.
+- */
+-bool transport_wait_for_tasks(struct se_cmd *cmd)
++static bool
++__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
++			   bool *aborted, bool *tas, unsigned long *flags)
++	__releases(&cmd->t_state_lock)
++	__acquires(&cmd->t_state_lock)
+ {
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(&cmd->t_state_lock, flags);
++	assert_spin_locked(&cmd->t_state_lock);
++	WARN_ON_ONCE(!irqs_disabled());
++
++	if (fabric_stop)
++		cmd->transport_state |= CMD_T_FABRIC_STOP;
++
++	if (cmd->transport_state & CMD_T_ABORTED)
++		*aborted = true;
++
++	if (cmd->transport_state & CMD_T_TAS)
++		*tas = true;
++
+ 	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
+-	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
+-		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+ 		return false;
+-	}
+ 
+ 	if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
+-	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
+-		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+ 		return false;
+-	}
+ 
+-	if (!(cmd->transport_state & CMD_T_ACTIVE)) {
+-		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++	if (!(cmd->transport_state & CMD_T_ACTIVE))
++		return false;
++
++	if (fabric_stop && *aborted)
+ 		return false;
+-	}
+ 
+ 	cmd->transport_state |= CMD_T_STOP;
+ 
+-	pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n",
+-		cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
++	pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
++		 " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
++		 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
+ 
+-	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++	spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
+ 
+ 	wait_for_completion(&cmd->t_transport_stop_comp);
+ 
+-	spin_lock_irqsave(&cmd->t_state_lock, flags);
++	spin_lock_irqsave(&cmd->t_state_lock, *flags);
+ 	cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
+ 
+-	pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n",
+-		cmd->tag);
++	pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
++		 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
++
++	return true;
++}
+ 
++/**
++ * transport_wait_for_tasks - wait for completion to occur
++ * @cmd:	command to wait
++ *
++ * Called from frontend fabric context to wait for storage engine
++ * to pause and/or release frontend generated struct se_cmd.
++ */
++bool transport_wait_for_tasks(struct se_cmd *cmd)
++{
++	unsigned long flags;
++	bool ret, aborted = false, tas = false;
++
++	spin_lock_irqsave(&cmd->t_state_lock, flags);
++	ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
+ 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ 
+-	return true;
++	return ret;
+ }
+ EXPORT_SYMBOL(transport_wait_for_tasks);
+ 
+@@ -2836,28 +2904,49 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
+ }
+ EXPORT_SYMBOL(transport_send_check_condition_and_sense);
+ 
+-int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
++static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
++	__releases(&cmd->t_state_lock)
++	__acquires(&cmd->t_state_lock)
+ {
++	assert_spin_locked(&cmd->t_state_lock);
++	WARN_ON_ONCE(!irqs_disabled());
++
+ 	if (!(cmd->transport_state & CMD_T_ABORTED))
+ 		return 0;
+-
+ 	/*
+ 	 * If cmd has been aborted but either no status is to be sent or it has
+ 	 * already been sent, just return
+ 	 */
+-	if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
++	if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
++		if (send_status)
++			cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
+ 		return 1;
++	}
+ 
+-	pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n",
+-		 cmd->t_task_cdb[0], cmd->tag);
++	pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
++		" 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
+ 
+ 	cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
+ 	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+ 	trace_target_cmd_complete(cmd);
++
++	spin_unlock_irq(&cmd->t_state_lock);
+ 	cmd->se_tfo->queue_status(cmd);
++	spin_lock_irq(&cmd->t_state_lock);
+ 
+ 	return 1;
+ }
++
++int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
++{
++	int ret;
++
++	spin_lock_irq(&cmd->t_state_lock);
++	ret = __transport_check_aborted_status(cmd, send_status);
++	spin_unlock_irq(&cmd->t_state_lock);
++
++	return ret;
++}
+ EXPORT_SYMBOL(transport_check_aborted_status);
+ 
+ void transport_send_task_abort(struct se_cmd *cmd)
+@@ -2879,11 +2968,17 @@ void transport_send_task_abort(struct se_cmd *cmd)
+ 	 */
+ 	if (cmd->data_direction == DMA_TO_DEVICE) {
+ 		if (cmd->se_tfo->write_pending_status(cmd) != 0) {
+-			cmd->transport_state |= CMD_T_ABORTED;
++			spin_lock_irqsave(&cmd->t_state_lock, flags);
++			if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
++				spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++				goto send_abort;
++			}
+ 			cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
++			spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ 			return;
+ 		}
+ 	}
++send_abort:
+ 	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+ 
+ 	transport_lun_remove_cmd(cmd);
+@@ -2900,8 +2995,17 @@ static void target_tmr_work(struct work_struct *work)
+ 	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
+ 	struct se_device *dev = cmd->se_dev;
+ 	struct se_tmr_req *tmr = cmd->se_tmr_req;
++	unsigned long flags;
+ 	int ret;
+ 
++	spin_lock_irqsave(&cmd->t_state_lock, flags);
++	if (cmd->transport_state & CMD_T_ABORTED) {
++		tmr->response = TMR_FUNCTION_REJECTED;
++		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++		goto check_stop;
++	}
++	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++
+ 	switch (tmr->function) {
+ 	case TMR_ABORT_TASK:
+ 		core_tmr_abort_task(dev, tmr, cmd->se_sess);
+@@ -2934,9 +3038,17 @@ static void target_tmr_work(struct work_struct *work)
+ 		break;
+ 	}
+ 
++	spin_lock_irqsave(&cmd->t_state_lock, flags);
++	if (cmd->transport_state & CMD_T_ABORTED) {
++		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++		goto check_stop;
++	}
+ 	cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
++	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++
+ 	cmd->se_tfo->queue_tm_rsp(cmd);
+ 
++check_stop:
+ 	transport_cmd_check_stop_to_fabric(cmd);
+ }
+ 
+diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
+index 2f9f7086ac3d..ea9366ad3e6b 100644
+--- a/drivers/thermal/step_wise.c
++++ b/drivers/thermal/step_wise.c
+@@ -63,6 +63,19 @@ static unsigned long get_target_state(struct thermal_instance *instance,
+ 	next_target = instance->target;
+ 	dev_dbg(&cdev->device, "cur_state=%ld\n", cur_state);
+ 
++	if (!instance->initialized) {
++		if (throttle) {
++			next_target = (cur_state + 1) >= instance->upper ?
++					instance->upper :
++					((cur_state + 1) < instance->lower ?
++					instance->lower : (cur_state + 1));
++		} else {
++			next_target = THERMAL_NO_TARGET;
++		}
++
++		return next_target;
++	}
++
+ 	switch (trend) {
+ 	case THERMAL_TREND_RAISING:
+ 		if (throttle) {
+@@ -149,7 +162,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
+ 		dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
+ 					old_target, (int)instance->target);
+ 
+-		if (old_target == instance->target)
++		if (instance->initialized && old_target == instance->target)
+ 			continue;
+ 
+ 		/* Activate a passive thermal instance */
+@@ -161,7 +174,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
+ 			instance->target == THERMAL_NO_TARGET)
+ 			update_passive_instance(tz, trip_type, -1);
+ 
+-
++		instance->initialized = true;
+ 		instance->cdev->updated = false; /* cdev needs update */
+ 	}
+ 
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index d9e525cc9c1c..ba08b5521382 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -37,6 +37,7 @@
+ #include <linux/of.h>
+ #include <net/netlink.h>
+ #include <net/genetlink.h>
++#include <linux/suspend.h>
+ 
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/thermal.h>
+@@ -59,6 +60,8 @@ static LIST_HEAD(thermal_governor_list);
+ static DEFINE_MUTEX(thermal_list_lock);
+ static DEFINE_MUTEX(thermal_governor_lock);
+ 
++static atomic_t in_suspend;
++
+ static struct thermal_governor *def_governor;
+ 
+ static struct thermal_governor *__find_governor(const char *name)
+@@ -532,14 +535,31 @@ static void update_temperature(struct thermal_zone_device *tz)
+ 	mutex_unlock(&tz->lock);
+ 
+ 	trace_thermal_temperature(tz);
+-	dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
+-				tz->last_temperature, tz->temperature);
++	if (tz->last_temperature == THERMAL_TEMP_INVALID)
++		dev_dbg(&tz->device, "last_temperature N/A, current_temperature=%d\n",
++			tz->temperature);
++	else
++		dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
++			tz->last_temperature, tz->temperature);
++}
++
++static void thermal_zone_device_reset(struct thermal_zone_device *tz)
++{
++	struct thermal_instance *pos;
++
++	tz->temperature = THERMAL_TEMP_INVALID;
++	tz->passive = 0;
++	list_for_each_entry(pos, &tz->thermal_instances, tz_node)
++		pos->initialized = false;
+ }
+ 
+ void thermal_zone_device_update(struct thermal_zone_device *tz)
+ {
+ 	int count;
+ 
++	if (atomic_read(&in_suspend))
++		return;
++
+ 	if (!tz->ops->get_temp)
+ 		return;
+ 
+@@ -1321,6 +1341,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+ 	if (!result) {
+ 		list_add_tail(&dev->tz_node, &tz->thermal_instances);
+ 		list_add_tail(&dev->cdev_node, &cdev->thermal_instances);
++		atomic_set(&tz->need_update, 1);
+ 	}
+ 	mutex_unlock(&cdev->lock);
+ 	mutex_unlock(&tz->lock);
+@@ -1430,6 +1451,7 @@ __thermal_cooling_device_register(struct device_node *np,
+ 				  const struct thermal_cooling_device_ops *ops)
+ {
+ 	struct thermal_cooling_device *cdev;
++	struct thermal_zone_device *pos = NULL;
+ 	int result;
+ 
+ 	if (type && strlen(type) >= THERMAL_NAME_LENGTH)
+@@ -1474,6 +1496,12 @@ __thermal_cooling_device_register(struct device_node *np,
+ 	/* Update binding information for 'this' new cdev */
+ 	bind_cdev(cdev);
+ 
++	mutex_lock(&thermal_list_lock);
++	list_for_each_entry(pos, &thermal_tz_list, node)
++		if (atomic_cmpxchg(&pos->need_update, 1, 0))
++			thermal_zone_device_update(pos);
++	mutex_unlock(&thermal_list_lock);
++
+ 	return cdev;
+ }
+ 
+@@ -1806,6 +1834,8 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
+ 	tz->trips = trips;
+ 	tz->passive_delay = passive_delay;
+ 	tz->polling_delay = polling_delay;
++	/* A new thermal zone needs to be updated anyway. */
++	atomic_set(&tz->need_update, 1);
+ 
+ 	dev_set_name(&tz->device, "thermal_zone%d", tz->id);
+ 	result = device_register(&tz->device);
+@@ -1900,7 +1930,10 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
+ 
+ 	INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check);
+ 
+-	thermal_zone_device_update(tz);
++	thermal_zone_device_reset(tz);
++	/* Update the new thermal zone and mark it as already updated. */
++	if (atomic_cmpxchg(&tz->need_update, 1, 0))
++		thermal_zone_device_update(tz);
+ 
+ 	return tz;
+ 
+@@ -2140,6 +2173,36 @@ static void thermal_unregister_governors(void)
+ 	thermal_gov_power_allocator_unregister();
+ }
+ 
++static int thermal_pm_notify(struct notifier_block *nb,
++				unsigned long mode, void *_unused)
++{
++	struct thermal_zone_device *tz;
++
++	switch (mode) {
++	case PM_HIBERNATION_PREPARE:
++	case PM_RESTORE_PREPARE:
++	case PM_SUSPEND_PREPARE:
++		atomic_set(&in_suspend, 1);
++		break;
++	case PM_POST_HIBERNATION:
++	case PM_POST_RESTORE:
++	case PM_POST_SUSPEND:
++		atomic_set(&in_suspend, 0);
++		list_for_each_entry(tz, &thermal_tz_list, node) {
++			thermal_zone_device_reset(tz);
++			thermal_zone_device_update(tz);
++		}
++		break;
++	default:
++		break;
++	}
++	return 0;
++}
++
++static struct notifier_block thermal_pm_nb = {
++	.notifier_call = thermal_pm_notify,
++};
++
+ static int __init thermal_init(void)
+ {
+ 	int result;
+@@ -2160,6 +2223,11 @@ static int __init thermal_init(void)
+ 	if (result)
+ 		goto exit_netlink;
+ 
++	result = register_pm_notifier(&thermal_pm_nb);
++	if (result)
++		pr_warn("Thermal: Can not register suspend notifier, return %d\n",
++			result);
++
+ 	return 0;
+ 
+ exit_netlink:
+@@ -2179,6 +2247,7 @@ error:
+ 
+ static void __exit thermal_exit(void)
+ {
++	unregister_pm_notifier(&thermal_pm_nb);
+ 	of_thermal_destroy_zones();
+ 	genetlink_exit();
+ 	class_unregister(&thermal_class);
+diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
+index d7ac1fccd659..749d41abfbab 100644
+--- a/drivers/thermal/thermal_core.h
++++ b/drivers/thermal/thermal_core.h
+@@ -41,6 +41,7 @@ struct thermal_instance {
+ 	struct thermal_zone_device *tz;
+ 	struct thermal_cooling_device *cdev;
+ 	int trip;
++	bool initialized;
+ 	unsigned long upper;	/* Highest cooling state for this trip point */
+ 	unsigned long lower;	/* Lowest cooling state for this trip point */
+ 	unsigned long target;	/* expected cooling state */
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index e4c70dce3e7c..fa4e23930614 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1841,6 +1841,11 @@ static const struct usb_device_id acm_ids[] = {
+ 	},
+ #endif
+ 
++	/*Samsung phone in firmware update mode */
++	{ USB_DEVICE(0x04e8, 0x685d),
++	.driver_info = IGNORE_DEVICE,
++	},
++
+ 	/* Exclude Infineon Flash Loader utility */
+ 	{ USB_DEVICE(0x058b, 0x0041),
+ 	.driver_info = IGNORE_DEVICE,
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 36f1cb74588c..78be201d81f4 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -853,7 +853,6 @@ struct dwc3 {
+ 	unsigned		pullups_connected:1;
+ 	unsigned		resize_fifos:1;
+ 	unsigned		setup_packet_pending:1;
+-	unsigned		start_config_issued:1;
+ 	unsigned		three_stage_setup:1;
+ 	unsigned		usb3_lpm_capable:1;
+ 
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index 5320e939e090..b13912d5fa99 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -555,7 +555,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+ 	int ret;
+ 	u32 reg;
+ 
+-	dwc->start_config_issued = false;
+ 	cfg = le16_to_cpu(ctrl->wValue);
+ 
+ 	switch (state) {
+@@ -737,10 +736,6 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+ 		dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
+ 		ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
+ 		break;
+-	case USB_REQ_SET_INTERFACE:
+-		dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
+-		dwc->start_config_issued = false;
+-		/* Fall through */
+ 	default:
+ 		dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
+ 		ret = dwc3_ep0_delegate_req(dwc, ctrl);
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index a58376fd65fe..69ffe6e8d77f 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -388,24 +388,66 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
+ 	dep->trb_pool_dma = 0;
+ }
+ 
++static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
++
++/**
++ * dwc3_gadget_start_config - Configure EP resources
++ * @dwc: pointer to our controller context structure
++ * @dep: endpoint that is being enabled
++ *
++ * The assignment of transfer resources cannot perfectly follow the
++ * data book due to the fact that the controller driver does not have
++ * all knowledge of the configuration in advance. It is given this
++ * information piecemeal by the composite gadget framework after every
++ * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
++ * programming model in this scenario can cause errors. For two
++ * reasons:
++ *
++ * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
++ * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
++ * multiple interfaces.
++ *
++ * 2) The databook does not mention doing more DEPXFERCFG for new
++ * endpoint on alt setting (8.1.6).
++ *
++ * The following simplified method is used instead:
++ *
++ * All hardware endpoints can be assigned a transfer resource and this
++ * setting will stay persistent until either a core reset or
++ * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
++ * do DEPXFERCFG for every hardware endpoint as well. We are
++ * guaranteed that there are as many transfer resources as endpoints.
++ *
++ * This function is called for each endpoint when it is being enabled
++ * but is triggered only when called for EP0-out, which always happens
++ * first, and which should only happen in one of the above conditions.
++ */
+ static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
+ {
+ 	struct dwc3_gadget_ep_cmd_params params;
+ 	u32			cmd;
++	int			i;
++	int			ret;
++
++	if (dep->number)
++		return 0;
+ 
+ 	memset(&params, 0x00, sizeof(params));
++	cmd = DWC3_DEPCMD_DEPSTARTCFG;
+ 
+-	if (dep->number != 1) {
+-		cmd = DWC3_DEPCMD_DEPSTARTCFG;
+-		/* XferRscIdx == 0 for ep0 and 2 for the remaining */
+-		if (dep->number > 1) {
+-			if (dwc->start_config_issued)
+-				return 0;
+-			dwc->start_config_issued = true;
+-			cmd |= DWC3_DEPCMD_PARAM(2);
+-		}
++	ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
++	if (ret)
++		return ret;
+ 
+-		return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
++	for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
++		struct dwc3_ep *dep = dwc->eps[i];
++
++		if (!dep)
++			continue;
++
++		ret = dwc3_gadget_set_xfer_resource(dwc, dep);
++		if (ret)
++			return ret;
+ 	}
+ 
+ 	return 0;
+@@ -519,10 +561,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
+ 		struct dwc3_trb	*trb_st_hw;
+ 		struct dwc3_trb	*trb_link;
+ 
+-		ret = dwc3_gadget_set_xfer_resource(dwc, dep);
+-		if (ret)
+-			return ret;
+-
+ 		dep->endpoint.desc = desc;
+ 		dep->comp_desc = comp_desc;
+ 		dep->type = usb_endpoint_type(desc);
+@@ -1604,8 +1642,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
+ 	}
+ 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+ 
+-	dwc->start_config_issued = false;
+-
+ 	/* Start with SuperSpeed Default */
+ 	dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
+ 
+@@ -2202,7 +2238,6 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
+ 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ 
+ 	dwc3_disconnect_gadget(dwc);
+-	dwc->start_config_issued = false;
+ 
+ 	dwc->gadget.speed = USB_SPEED_UNKNOWN;
+ 	dwc->setup_packet_pending = false;
+@@ -2253,7 +2288,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
+ 
+ 	dwc3_stop_active_transfers(dwc);
+ 	dwc3_clear_stall_all_ep(dwc);
+-	dwc->start_config_issued = false;
+ 
+ 	/* Reset device address to zero */
+ 	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 1dd9919081f8..a7caf53d8b5e 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ 	{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ 	{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
++	{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
++	{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
+ 	{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ 	{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
+ 	{ USB_DEVICE(0x1BA4, 0x0002) },	/* Silicon Labs 358x factory default */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index db86e512e0fc..8849439a8f18 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -315,6 +315,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TOSHIBA_PRODUCT_G450			0x0d45
+ 
+ #define ALINK_VENDOR_ID				0x1e0e
++#define SIMCOM_PRODUCT_SIM7100E			0x9001 /* Yes, ALINK_VENDOR_ID */
+ #define ALINK_PRODUCT_PH300			0x9100
+ #define ALINK_PRODUCT_3GU			0x9200
+ 
+@@ -607,6 +608,10 @@ static const struct option_blacklist_info zte_1255_blacklist = {
+ 	.reserved = BIT(3) | BIT(4),
+ };
+ 
++static const struct option_blacklist_info simcom_sim7100e_blacklist = {
++	.reserved = BIT(5) | BIT(6),
++};
++
+ static const struct option_blacklist_info telit_le910_blacklist = {
+ 	.sendsetup = BIT(0),
+ 	.reserved = BIT(1) | BIT(2),
+@@ -1122,6 +1127,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
+ 	{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
++	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+@@ -1645,6 +1652,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
+ 	{ USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
++	  .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
+ 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
+ 	  .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
+ 	},
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
+index 7efc32945810..7d3e5d0e9aa4 100644
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -209,8 +209,8 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
+ 	 */
+ 	if (vb->num_pfns != 0)
+ 		tell_host(vb, vb->deflate_vq);
+-	mutex_unlock(&vb->balloon_lock);
+ 	release_pages_balloon(vb);
++	mutex_unlock(&vb->balloon_lock);
+ 	return num_freed_pages;
+ }
+ 
+diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
+index 78f804af6c20..2046a68ad0ba 100644
+--- a/drivers/virtio/virtio_pci_common.c
++++ b/drivers/virtio/virtio_pci_common.c
+@@ -545,6 +545,7 @@ err_enable_device:
+ static void virtio_pci_remove(struct pci_dev *pci_dev)
+ {
+ 	struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
++	struct device *dev = get_device(&vp_dev->vdev.dev);
+ 
+ 	unregister_virtio_device(&vp_dev->vdev);
+ 
+@@ -554,6 +555,7 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
+ 		virtio_pci_modern_remove(vp_dev);
+ 
+ 	pci_disable_device(pci_dev);
++	put_device(dev);
+ }
+ 
+ static struct pci_driver virtio_pci_driver = {
+diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
+index 73dafdc494aa..fb0221434f81 100644
+--- a/drivers/xen/xen-pciback/pciback_ops.c
++++ b/drivers/xen/xen-pciback/pciback_ops.c
+@@ -227,8 +227,9 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
+ 	/*
+ 	 * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
+ 	 * to access the BARs where the MSI-X entries reside.
++	 * But VF devices are unique in which the PF needs to be checked.
+ 	 */
+-	pci_read_config_word(dev, PCI_COMMAND, &cmd);
++	pci_read_config_word(pci_physfn(dev), PCI_COMMAND, &cmd);
+ 	if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
+ 		return -ENXIO;
+ 
+@@ -332,6 +333,9 @@ void xen_pcibk_do_op(struct work_struct *data)
+ 	struct xen_pcibk_dev_data *dev_data = NULL;
+ 	struct xen_pci_op *op = &pdev->op;
+ 	int test_intx = 0;
++#ifdef CONFIG_PCI_MSI
++	unsigned int nr = 0;
++#endif
+ 
+ 	*op = pdev->sh_info->op;
+ 	barrier();
+@@ -360,6 +364,7 @@ void xen_pcibk_do_op(struct work_struct *data)
+ 			op->err = xen_pcibk_disable_msi(pdev, dev, op);
+ 			break;
+ 		case XEN_PCI_OP_enable_msix:
++			nr = op->value;
+ 			op->err = xen_pcibk_enable_msix(pdev, dev, op);
+ 			break;
+ 		case XEN_PCI_OP_disable_msix:
+@@ -382,7 +387,7 @@ void xen_pcibk_do_op(struct work_struct *data)
+ 	if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
+ 		unsigned int i;
+ 
+-		for (i = 0; i < op->value; i++)
++		for (i = 0; i < nr; i++)
+ 			pdev->sh_info->op.msix_entries[i].vector =
+ 				op->msix_entries[i].vector;
+ 	}
+diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
+index ad4eb1024d1f..51387d75c7bf 100644
+--- a/drivers/xen/xen-scsiback.c
++++ b/drivers/xen/xen-scsiback.c
+@@ -939,12 +939,12 @@ out:
+ 	spin_unlock_irqrestore(&info->v2p_lock, flags);
+ 
+ out_free:
+-	mutex_lock(&tpg->tv_tpg_mutex);
+-	tpg->tv_tpg_fe_count--;
+-	mutex_unlock(&tpg->tv_tpg_mutex);
+-
+-	if (err)
++	if (err) {
++		mutex_lock(&tpg->tv_tpg_mutex);
++		tpg->tv_tpg_fe_count--;
++		mutex_unlock(&tpg->tv_tpg_mutex);
+ 		kfree(new);
++	}
+ 
+ 	return err;
+ }
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 0ddca6734494..4958360a44f7 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1582,8 +1582,23 @@ int btrfs_init_fs_root(struct btrfs_root *root)
+ 	ret = get_anon_bdev(&root->anon_dev);
+ 	if (ret)
+ 		goto free_writers;
++
++	mutex_lock(&root->objectid_mutex);
++	ret = btrfs_find_highest_objectid(root,
++					&root->highest_objectid);
++	if (ret) {
++		mutex_unlock(&root->objectid_mutex);
++		goto free_root_dev;
++	}
++
++	ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
++
++	mutex_unlock(&root->objectid_mutex);
++
+ 	return 0;
+ 
++free_root_dev:
++	free_anon_bdev(root->anon_dev);
+ free_writers:
+ 	btrfs_free_subvolume_writers(root->subv_writers);
+ fail:
+@@ -2667,6 +2682,7 @@ int open_ctree(struct super_block *sb,
+ 	if (btrfs_check_super_csum(bh->b_data)) {
+ 		printk(KERN_ERR "BTRFS: superblock checksum mismatch\n");
+ 		err = -EINVAL;
++		brelse(bh);
+ 		goto fail_alloc;
+ 	}
+ 
+@@ -2899,6 +2915,18 @@ retry_root_backup:
+ 	tree_root->commit_root = btrfs_root_node(tree_root);
+ 	btrfs_set_root_refs(&tree_root->root_item, 1);
+ 
++	mutex_lock(&tree_root->objectid_mutex);
++	ret = btrfs_find_highest_objectid(tree_root,
++					&tree_root->highest_objectid);
++	if (ret) {
++		mutex_unlock(&tree_root->objectid_mutex);
++		goto recovery_tree_root;
++	}
++
++	ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
++
++	mutex_unlock(&tree_root->objectid_mutex);
++
+ 	ret = btrfs_read_roots(fs_info, tree_root);
+ 	if (ret)
+ 		goto recovery_tree_root;
+diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
+index 767a6056ac45..07573dc1614a 100644
+--- a/fs/btrfs/inode-map.c
++++ b/fs/btrfs/inode-map.c
+@@ -515,7 +515,7 @@ out:
+ 	return ret;
+ }
+ 
+-static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
++int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
+ {
+ 	struct btrfs_path *path;
+ 	int ret;
+@@ -555,13 +555,6 @@ int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
+ 	int ret;
+ 	mutex_lock(&root->objectid_mutex);
+ 
+-	if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) {
+-		ret = btrfs_find_highest_objectid(root,
+-						  &root->highest_objectid);
+-		if (ret)
+-			goto out;
+-	}
+-
+ 	if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
+ 		ret = -ENOSPC;
+ 		goto out;
+diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h
+index ddb347bfee23..c8e864b2d530 100644
+--- a/fs/btrfs/inode-map.h
++++ b/fs/btrfs/inode-map.h
+@@ -9,5 +9,6 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
+ 			 struct btrfs_trans_handle *trans);
+ 
+ int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid);
++int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid);
+ 
+ #endif
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 54b5f0de623b..52fc1b5e9f03 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -6493,7 +6493,7 @@ out_unlock_inode:
+ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
+ 		      struct dentry *dentry)
+ {
+-	struct btrfs_trans_handle *trans;
++	struct btrfs_trans_handle *trans = NULL;
+ 	struct btrfs_root *root = BTRFS_I(dir)->root;
+ 	struct inode *inode = d_inode(old_dentry);
+ 	u64 index;
+@@ -6519,6 +6519,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
+ 	trans = btrfs_start_transaction(root, 5);
+ 	if (IS_ERR(trans)) {
+ 		err = PTR_ERR(trans);
++		trans = NULL;
+ 		goto fail;
+ 	}
+ 
+@@ -6552,9 +6553,10 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
+ 		btrfs_log_new_name(trans, inode, NULL, parent);
+ 	}
+ 
+-	btrfs_end_transaction(trans, root);
+ 	btrfs_balance_delayed_items(root);
+ fail:
++	if (trans)
++		btrfs_end_transaction(trans, root);
+ 	if (drop_inode) {
+ 		inode_dec_link_count(inode);
+ 		iput(inode);
+@@ -8548,15 +8550,28 @@ int btrfs_readpage(struct file *file, struct page *page)
+ static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
+ {
+ 	struct extent_io_tree *tree;
+-
++	struct inode *inode = page->mapping->host;
++	int ret;
+ 
+ 	if (current->flags & PF_MEMALLOC) {
+ 		redirty_page_for_writepage(wbc, page);
+ 		unlock_page(page);
+ 		return 0;
+ 	}
++
++	/*
++	 * If we are under memory pressure we will call this directly from the
++	 * VM, we need to make sure we have the inode referenced for the ordered
++	 * extent.  If not just return like we didn't do anything.
++	 */
++	if (!igrab(inode)) {
++		redirty_page_for_writepage(wbc, page);
++		return AOP_WRITEPAGE_ACTIVATE;
++	}
+ 	tree = &BTRFS_I(page->mapping->host)->io_tree;
+-	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
++	ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc);
++	btrfs_add_delayed_iput(inode);
++	return ret;
+ }
+ 
+ static int btrfs_writepages(struct address_space *mapping,
+@@ -9650,9 +9665,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
+ 	/*
+ 	 * 2 items for inode item and ref
+ 	 * 2 items for dir items
++	 * 1 item for updating parent inode item
++	 * 1 item for the inline extent item
+ 	 * 1 item for xattr if selinux is on
+ 	 */
+-	trans = btrfs_start_transaction(root, 5);
++	trans = btrfs_start_transaction(root, 7);
+ 	if (IS_ERR(trans))
+ 		return PTR_ERR(trans);
+ 
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 08fd3f0f34fd..f07d01bc4875 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -568,6 +568,10 @@ static noinline int create_subvol(struct inode *dir,
+ 		goto fail;
+ 	}
+ 
++	mutex_lock(&new_root->objectid_mutex);
++	new_root->highest_objectid = new_dirid;
++	mutex_unlock(&new_root->objectid_mutex);
++
+ 	/*
+ 	 * insert the directory item
+ 	 */
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 355a458cba1a..63a6152be04b 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -1469,7 +1469,21 @@ static int read_symlink(struct btrfs_root *root,
+ 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ 	if (ret < 0)
+ 		goto out;
+-	BUG_ON(ret);
++	if (ret) {
++		/*
++		 * An empty symlink inode. Can happen in rare error paths when
++		 * creating a symlink (transaction committed before the inode
++		 * eviction handler removed the symlink inode items and a crash
++		 * happened in between or the subvol was snapshoted in between).
++		 * Print an informative message to dmesg/syslog so that the user
++		 * can delete the symlink.
++		 */
++		btrfs_err(root->fs_info,
++			  "Found empty symlink inode %llu at root %llu",
++			  ino, root->root_key.objectid);
++		ret = -EIO;
++		goto out;
++	}
+ 
+ 	ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ 			struct btrfs_file_extent_item);
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 24154e422945..fe609b81dd1b 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -1956,6 +1956,8 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
+  * there are other factors that may change the result (like a new metadata
+  * chunk).
+  *
++ * If metadata is exhausted, f_bavail will be 0.
++ *
+  * FIXME: not accurate for mixed block groups, total and free/used are ok,
+  * available appears slightly larger.
+  */
+@@ -1967,11 +1969,13 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ 	struct btrfs_space_info *found;
+ 	u64 total_used = 0;
+ 	u64 total_free_data = 0;
++	u64 total_free_meta = 0;
+ 	int bits = dentry->d_sb->s_blocksize_bits;
+ 	__be32 *fsid = (__be32 *)fs_info->fsid;
+ 	unsigned factor = 1;
+ 	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
+ 	int ret;
++	u64 thresh = 0;
+ 
+ 	/*
+ 	 * holding chunk_muext to avoid allocating new chunks, holding
+@@ -1997,6 +2001,8 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ 				}
+ 			}
+ 		}
++		if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
++			total_free_meta += found->disk_total - found->disk_used;
+ 
+ 		total_used += found->disk_used;
+ 	}
+@@ -2019,6 +2025,24 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ 	buf->f_bavail += div_u64(total_free_data, factor);
+ 	buf->f_bavail = buf->f_bavail >> bits;
+ 
++	/*
++	 * We calculate the remaining metadata space minus global reserve. If
++	 * this is (supposedly) smaller than zero, there's no space. But this
++	 * does not hold in practice, the exhausted state happens where's still
++	 * some positive delta. So we apply some guesswork and compare the
++	 * delta to a 4M threshold.  (Practically observed delta was ~2M.)
++	 *
++	 * We probably cannot calculate the exact threshold value because this
++	 * depends on the internal reservations requested by various
++	 * operations, so some operations that consume a few metadata will
++	 * succeed even if the Avail is zero. But this is better than the other
++	 * way around.
++	 */
++	thresh = 4 * 1024 * 1024;
++
++	if (total_free_meta - thresh < block_rsv->size)
++		buf->f_bavail = 0;
++
+ 	buf->f_type = BTRFS_SUPER_MAGIC;
+ 	buf->f_bsize = dentry->d_sb->s_blocksize;
+ 	buf->f_namelen = BTRFS_NAME_LEN;
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 9e084477d320..9c62a6f9757a 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -232,6 +232,7 @@ static struct btrfs_device *__alloc_device(void)
+ 	spin_lock_init(&dev->reada_lock);
+ 	atomic_set(&dev->reada_in_flight, 0);
+ 	atomic_set(&dev->dev_stats_ccnt, 0);
++	btrfs_device_data_ordered_init(dev);
+ 	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
+ 	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
+ 
+diff --git a/fs/direct-io.c b/fs/direct-io.c
+index 602e8441bc0f..01171d8a6ee9 100644
+--- a/fs/direct-io.c
++++ b/fs/direct-io.c
+@@ -472,8 +472,8 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
+ 		dio->io_error = -EIO;
+ 
+ 	if (dio->is_async && dio->rw == READ && dio->should_dirty) {
+-		bio_check_pages_dirty(bio);	/* transfers ownership */
+ 		err = bio->bi_error;
++		bio_check_pages_dirty(bio);	/* transfers ownership */
+ 	} else {
+ 		bio_for_each_segment_all(bvec, bio, i) {
+ 			struct page *page = bvec->bv_page;
+diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
+index 90001da9abfd..66842e55c48c 100644
+--- a/fs/efivarfs/file.c
++++ b/fs/efivarfs/file.c
+@@ -10,6 +10,7 @@
+ #include <linux/efi.h>
+ #include <linux/fs.h>
+ #include <linux/slab.h>
++#include <linux/mount.h>
+ 
+ #include "internal.h"
+ 
+@@ -103,9 +104,78 @@ out_free:
+ 	return size;
+ }
+ 
++static int
++efivarfs_ioc_getxflags(struct file *file, void __user *arg)
++{
++	struct inode *inode = file->f_mapping->host;
++	unsigned int i_flags;
++	unsigned int flags = 0;
++
++	i_flags = inode->i_flags;
++	if (i_flags & S_IMMUTABLE)
++		flags |= FS_IMMUTABLE_FL;
++
++	if (copy_to_user(arg, &flags, sizeof(flags)))
++		return -EFAULT;
++	return 0;
++}
++
++static int
++efivarfs_ioc_setxflags(struct file *file, void __user *arg)
++{
++	struct inode *inode = file->f_mapping->host;
++	unsigned int flags;
++	unsigned int i_flags = 0;
++	int error;
++
++	if (!inode_owner_or_capable(inode))
++		return -EACCES;
++
++	if (copy_from_user(&flags, arg, sizeof(flags)))
++		return -EFAULT;
++
++	if (flags & ~FS_IMMUTABLE_FL)
++		return -EOPNOTSUPP;
++
++	if (!capable(CAP_LINUX_IMMUTABLE))
++		return -EPERM;
++
++	if (flags & FS_IMMUTABLE_FL)
++		i_flags |= S_IMMUTABLE;
++
++
++	error = mnt_want_write_file(file);
++	if (error)
++		return error;
++
++	mutex_lock(&inode->i_mutex);
++	inode_set_flags(inode, i_flags, S_IMMUTABLE);
++	mutex_unlock(&inode->i_mutex);
++
++	mnt_drop_write_file(file);
++
++	return 0;
++}
++
++long
++efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p)
++{
++	void __user *arg = (void __user *)p;
++
++	switch (cmd) {
++	case FS_IOC_GETFLAGS:
++		return efivarfs_ioc_getxflags(file, arg);
++	case FS_IOC_SETFLAGS:
++		return efivarfs_ioc_setxflags(file, arg);
++	}
++
++	return -ENOTTY;
++}
++
+ const struct file_operations efivarfs_file_operations = {
+ 	.open	= simple_open,
+ 	.read	= efivarfs_file_read,
+ 	.write	= efivarfs_file_write,
+ 	.llseek	= no_llseek,
++	.unlocked_ioctl = efivarfs_file_ioctl,
+ };
+diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
+index 3381b9da9ee6..e2ab6d0497f2 100644
+--- a/fs/efivarfs/inode.c
++++ b/fs/efivarfs/inode.c
+@@ -15,7 +15,8 @@
+ #include "internal.h"
+ 
+ struct inode *efivarfs_get_inode(struct super_block *sb,
+-				const struct inode *dir, int mode, dev_t dev)
++				const struct inode *dir, int mode,
++				dev_t dev, bool is_removable)
+ {
+ 	struct inode *inode = new_inode(sb);
+ 
+@@ -23,6 +24,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb,
+ 		inode->i_ino = get_next_ino();
+ 		inode->i_mode = mode;
+ 		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
++		inode->i_flags = is_removable ? 0 : S_IMMUTABLE;
+ 		switch (mode & S_IFMT) {
+ 		case S_IFREG:
+ 			inode->i_fop = &efivarfs_file_operations;
+@@ -102,22 +104,17 @@ static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid)
+ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
+ 			  umode_t mode, bool excl)
+ {
+-	struct inode *inode;
++	struct inode *inode = NULL;
+ 	struct efivar_entry *var;
+ 	int namelen, i = 0, err = 0;
++	bool is_removable = false;
+ 
+ 	if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len))
+ 		return -EINVAL;
+ 
+-	inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0);
+-	if (!inode)
+-		return -ENOMEM;
+-
+ 	var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
+-	if (!var) {
+-		err = -ENOMEM;
+-		goto out;
+-	}
++	if (!var)
++		return -ENOMEM;
+ 
+ 	/* length of the variable name itself: remove GUID and separator */
+ 	namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
+@@ -125,6 +122,16 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
+ 	efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1,
+ 			&var->var.VendorGuid);
+ 
++	if (efivar_variable_is_removable(var->var.VendorGuid,
++					 dentry->d_name.name, namelen))
++		is_removable = true;
++
++	inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable);
++	if (!inode) {
++		err = -ENOMEM;
++		goto out;
++	}
++
+ 	for (i = 0; i < namelen; i++)
+ 		var->var.VariableName[i] = dentry->d_name.name[i];
+ 
+@@ -138,7 +145,8 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
+ out:
+ 	if (err) {
+ 		kfree(var);
+-		iput(inode);
++		if (inode)
++			iput(inode);
+ 	}
+ 	return err;
+ }
+diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h
+index b5ff16addb7c..b4505188e799 100644
+--- a/fs/efivarfs/internal.h
++++ b/fs/efivarfs/internal.h
+@@ -15,7 +15,8 @@ extern const struct file_operations efivarfs_file_operations;
+ extern const struct inode_operations efivarfs_dir_inode_operations;
+ extern bool efivarfs_valid_name(const char *str, int len);
+ extern struct inode *efivarfs_get_inode(struct super_block *sb,
+-			const struct inode *dir, int mode, dev_t dev);
++			const struct inode *dir, int mode, dev_t dev,
++			bool is_removable);
+ 
+ extern struct list_head efivarfs_list;
+ 
+diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
+index 86a2121828c3..abb244b06024 100644
+--- a/fs/efivarfs/super.c
++++ b/fs/efivarfs/super.c
+@@ -118,8 +118,9 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
+ 	struct dentry *dentry, *root = sb->s_root;
+ 	unsigned long size = 0;
+ 	char *name;
+-	int len, i;
++	int len;
+ 	int err = -ENOMEM;
++	bool is_removable = false;
+ 
+ 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ 	if (!entry)
+@@ -128,15 +129,17 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
+ 	memcpy(entry->var.VariableName, name16, name_size);
+ 	memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
+ 
+-	len = ucs2_strlen(entry->var.VariableName);
++	len = ucs2_utf8size(entry->var.VariableName);
+ 
+ 	/* name, plus '-', plus GUID, plus NUL*/
+ 	name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL);
+ 	if (!name)
+ 		goto fail;
+ 
+-	for (i = 0; i < len; i++)
+-		name[i] = entry->var.VariableName[i] & 0xFF;
++	ucs2_as_utf8(name, entry->var.VariableName, len);
++
++	if (efivar_variable_is_removable(entry->var.VendorGuid, name, len))
++		is_removable = true;
+ 
+ 	name[len] = '-';
+ 
+@@ -144,7 +147,8 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
+ 
+ 	name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
+ 
+-	inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0);
++	inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0,
++				   is_removable);
+ 	if (!inode)
+ 		goto fail_name;
+ 
+@@ -200,7 +204,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
+ 	sb->s_d_op		= &efivarfs_d_ops;
+ 	sb->s_time_gran         = 1;
+ 
+-	inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0);
++	inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true);
+ 	if (!inode)
+ 		return -ENOMEM;
+ 	inode->i_op = &efivarfs_dir_inode_operations;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index ea433a7f4bca..06bda0361e7c 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -657,6 +657,34 @@ has_zeroout:
+ 	return retval;
+ }
+ 
++/*
++ * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
++ * we have to be careful as someone else may be manipulating b_state as well.
++ */
++static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
++{
++	unsigned long old_state;
++	unsigned long new_state;
++
++	flags &= EXT4_MAP_FLAGS;
++
++	/* Dummy buffer_head? Set non-atomically. */
++	if (!bh->b_page) {
++		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
++		return;
++	}
++	/*
++	 * Someone else may be modifying b_state. Be careful! This is ugly but
++	 * once we get rid of using bh as a container for mapping information
++	 * to pass to / from get_block functions, this can go away.
++	 */
++	do {
++		old_state = READ_ONCE(bh->b_state);
++		new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
++	} while (unlikely(
++		 cmpxchg(&bh->b_state, old_state, new_state) != old_state));
++}
++
+ /* Maximum number of blocks we map for direct IO at once. */
+ #define DIO_MAX_BLOCKS 4096
+ 
+@@ -693,7 +721,7 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
+ 		ext4_io_end_t *io_end = ext4_inode_aio(inode);
+ 
+ 		map_bh(bh, inode->i_sb, map.m_pblk);
+-		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
++		ext4_update_bh_state(bh, map.m_flags);
+ 		if (IS_DAX(inode) && buffer_unwritten(bh)) {
+ 			/*
+ 			 * dgc: I suspect unwritten conversion on ext4+DAX is
+@@ -1669,7 +1697,7 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
+ 		return ret;
+ 
+ 	map_bh(bh, inode->i_sb, map.m_pblk);
+-	bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
++	ext4_update_bh_state(bh, map.m_flags);
+ 
+ 	if (buffer_unwritten(bh)) {
+ 		/* A delayed write to unwritten bh should be marked
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 023f6a1f23cd..e5232bbcbe3d 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -317,6 +317,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
+ 	struct inode_switch_wbs_context *isw =
+ 		container_of(work, struct inode_switch_wbs_context, work);
+ 	struct inode *inode = isw->inode;
++	struct super_block *sb = inode->i_sb;
+ 	struct address_space *mapping = inode->i_mapping;
+ 	struct bdi_writeback *old_wb = inode->i_wb;
+ 	struct bdi_writeback *new_wb = isw->new_wb;
+@@ -423,6 +424,7 @@ skip_switch:
+ 	wb_put(new_wb);
+ 
+ 	iput(inode);
++	deactivate_super(sb);
+ 	kfree(isw);
+ }
+ 
+@@ -469,11 +471,14 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
+ 
+ 	/* while holding I_WB_SWITCH, no one else can update the association */
+ 	spin_lock(&inode->i_lock);
++
+ 	if (inode->i_state & (I_WB_SWITCH | I_FREEING) ||
+-	    inode_to_wb(inode) == isw->new_wb) {
+-		spin_unlock(&inode->i_lock);
+-		goto out_free;
+-	}
++	    inode_to_wb(inode) == isw->new_wb)
++		goto out_unlock;
++
++	if (!atomic_inc_not_zero(&inode->i_sb->s_active))
++		goto out_unlock;
++
+ 	inode->i_state |= I_WB_SWITCH;
+ 	spin_unlock(&inode->i_lock);
+ 
+@@ -489,6 +494,8 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
+ 	call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
+ 	return;
+ 
++out_unlock:
++	spin_unlock(&inode->i_lock);
+ out_free:
+ 	if (isw->new_wb)
+ 		wb_put(isw->new_wb);
+diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
+index 2ac99db3750e..5a7b3229b956 100644
+--- a/fs/hostfs/hostfs_kern.c
++++ b/fs/hostfs/hostfs_kern.c
+@@ -730,15 +730,13 @@ static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+ 
+ 	init_special_inode(inode, mode, dev);
+ 	err = do_mknod(name, mode, MAJOR(dev), MINOR(dev));
+-	if (!err)
++	if (err)
+ 		goto out_free;
+ 
+ 	err = read_name(inode, name);
+ 	__putname(name);
+ 	if (err)
+ 		goto out_put;
+-	if (err)
+-		goto out_put;
+ 
+ 	d_instantiate(dentry, inode);
+ 	return 0;
+diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
+index ae4d5a1fa4c9..bffb908acbd4 100644
+--- a/fs/hpfs/namei.c
++++ b/fs/hpfs/namei.c
+@@ -375,12 +375,11 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
+ 	struct inode *inode = d_inode(dentry);
+ 	dnode_secno dno;
+ 	int r;
+-	int rep = 0;
+ 	int err;
+ 
+ 	hpfs_lock(dir->i_sb);
+ 	hpfs_adjust_length(name, &len);
+-again:
++
+ 	err = -ENOENT;
+ 	de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
+ 	if (!de)
+@@ -400,33 +399,9 @@ again:
+ 		hpfs_error(dir->i_sb, "there was error when removing dirent");
+ 		err = -EFSERROR;
+ 		break;
+-	case 2:		/* no space for deleting, try to truncate file */
+-
++	case 2:		/* no space for deleting */
+ 		err = -ENOSPC;
+-		if (rep++)
+-			break;
+-
+-		dentry_unhash(dentry);
+-		if (!d_unhashed(dentry)) {
+-			hpfs_unlock(dir->i_sb);
+-			return -ENOSPC;
+-		}
+-		if (generic_permission(inode, MAY_WRITE) ||
+-		    !S_ISREG(inode->i_mode) ||
+-		    get_write_access(inode)) {
+-			d_rehash(dentry);
+-		} else {
+-			struct iattr newattrs;
+-			/*pr_info("truncating file before delete.\n");*/
+-			newattrs.ia_size = 0;
+-			newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
+-			err = notify_change(dentry, &newattrs, NULL);
+-			put_write_access(inode);
+-			if (!err)
+-				goto again;
+-		}
+-		hpfs_unlock(dir->i_sb);
+-		return -ENOSPC;
++		break;
+ 	default:
+ 		drop_nlink(inode);
+ 		err = 0;
+diff --git a/fs/locks.c b/fs/locks.c
+index 0d2b3267e2a3..6333263b7bc8 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -2182,7 +2182,6 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
+ 		goto out;
+ 	}
+ 
+-again:
+ 	error = flock_to_posix_lock(filp, file_lock, &flock);
+ 	if (error)
+ 		goto out;
+@@ -2224,19 +2223,22 @@ again:
+ 	 * Attempt to detect a close/fcntl race and recover by
+ 	 * releasing the lock that was just acquired.
+ 	 */
+-	/*
+-	 * we need that spin_lock here - it prevents reordering between
+-	 * update of i_flctx->flc_posix and check for it done in close().
+-	 * rcu_read_lock() wouldn't do.
+-	 */
+-	spin_lock(&current->files->file_lock);
+-	f = fcheck(fd);
+-	spin_unlock(&current->files->file_lock);
+-	if (!error && f != filp && flock.l_type != F_UNLCK) {
+-		flock.l_type = F_UNLCK;
+-		goto again;
++	if (!error && file_lock->fl_type != F_UNLCK) {
++		/*
++		 * We need that spin_lock here - it prevents reordering between
++		 * update of i_flctx->flc_posix and check for it done in
++		 * close(). rcu_read_lock() wouldn't do.
++		 */
++		spin_lock(&current->files->file_lock);
++		f = fcheck(fd);
++		spin_unlock(&current->files->file_lock);
++		if (f != filp) {
++			file_lock->fl_type = F_UNLCK;
++			error = do_lock_file_wait(filp, cmd, file_lock);
++			WARN_ON_ONCE(error);
++			error = -EBADF;
++		}
+ 	}
+-
+ out:
+ 	locks_free_lock(file_lock);
+ 	return error;
+@@ -2322,7 +2324,6 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
+ 		goto out;
+ 	}
+ 
+-again:
+ 	error = flock64_to_posix_lock(filp, file_lock, &flock);
+ 	if (error)
+ 		goto out;
+@@ -2364,14 +2365,22 @@ again:
+ 	 * Attempt to detect a close/fcntl race and recover by
+ 	 * releasing the lock that was just acquired.
+ 	 */
+-	spin_lock(&current->files->file_lock);
+-	f = fcheck(fd);
+-	spin_unlock(&current->files->file_lock);
+-	if (!error && f != filp && flock.l_type != F_UNLCK) {
+-		flock.l_type = F_UNLCK;
+-		goto again;
++	if (!error && file_lock->fl_type != F_UNLCK) {
++		/*
++		 * We need that spin_lock here - it prevents reordering between
++		 * update of i_flctx->flc_posix and check for it done in
++		 * close(). rcu_read_lock() wouldn't do.
++		 */
++		spin_lock(&current->files->file_lock);
++		f = fcheck(fd);
++		spin_unlock(&current->files->file_lock);
++		if (f != filp) {
++			file_lock->fl_type = F_UNLCK;
++			error = do_lock_file_wait(filp, cmd, file_lock);
++			WARN_ON_ONCE(error);
++			error = -EBADF;
++		}
+ 	}
+-
+ out:
+ 	locks_free_lock(file_lock);
+ 	return error;
+diff --git a/fs/namei.c b/fs/namei.c
+index 0c3974cd3ecd..d8ee4da93650 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1711,6 +1711,11 @@ static inline int should_follow_link(struct nameidata *nd, struct path *link,
+ 		return 0;
+ 	if (!follow)
+ 		return 0;
++	/* make sure that d_is_symlink above matches inode */
++	if (nd->flags & LOOKUP_RCU) {
++		if (read_seqcount_retry(&link->dentry->d_seq, seq))
++			return -ECHILD;
++	}
+ 	return pick_link(nd, link, inode, seq);
+ }
+ 
+@@ -1742,11 +1747,11 @@ static int walk_component(struct nameidata *nd, int flags)
+ 		if (err < 0)
+ 			return err;
+ 
+-		inode = d_backing_inode(path.dentry);
+ 		seq = 0;	/* we are already out of RCU mode */
+ 		err = -ENOENT;
+ 		if (d_is_negative(path.dentry))
+ 			goto out_path_put;
++		inode = d_backing_inode(path.dentry);
+ 	}
+ 
+ 	if (flags & WALK_PUT)
+@@ -3130,12 +3135,12 @@ retry_lookup:
+ 		return error;
+ 
+ 	BUG_ON(nd->flags & LOOKUP_RCU);
+-	inode = d_backing_inode(path.dentry);
+ 	seq = 0;	/* out of RCU mode, so the value doesn't matter */
+ 	if (unlikely(d_is_negative(path.dentry))) {
+ 		path_to_nameidata(&path, nd);
+ 		return -ENOENT;
+ 	}
++	inode = d_backing_inode(path.dentry);
+ finish_lookup:
+ 	if (nd->depth)
+ 		put_link(nd);
+@@ -3144,11 +3149,6 @@ finish_lookup:
+ 	if (unlikely(error))
+ 		return error;
+ 
+-	if (unlikely(d_is_symlink(path.dentry)) && !(open_flag & O_PATH)) {
+-		path_to_nameidata(&path, nd);
+-		return -ELOOP;
+-	}
+-
+ 	if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) {
+ 		path_to_nameidata(&path, nd);
+ 	} else {
+@@ -3167,6 +3167,10 @@ finish_open:
+ 		return error;
+ 	}
+ 	audit_inode(nd->name, nd->path.dentry, 0);
++	if (unlikely(d_is_symlink(nd->path.dentry)) && !(open_flag & O_PATH)) {
++		error = -ELOOP;
++		goto out;
++	}
+ 	error = -EISDIR;
+ 	if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
+ 		goto out;
+@@ -3210,6 +3214,10 @@ opened:
+ 			goto exit_fput;
+ 	}
+ out:
++	if (unlikely(error > 0)) {
++		WARN_ON(1);
++		error = -EINVAL;
++	}
+ 	if (got_write)
+ 		mnt_drop_write(nd->path.mnt);
+ 	path_put(&save_parent);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index f496ed721d27..98a44157353a 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2461,9 +2461,9 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
+ 		dentry = d_add_unique(dentry, igrab(state->inode));
+ 		if (dentry == NULL) {
+ 			dentry = opendata->dentry;
+-		} else if (dentry != ctx->dentry) {
++		} else {
+ 			dput(ctx->dentry);
+-			ctx->dentry = dget(dentry);
++			ctx->dentry = dentry;
+ 		}
+ 		nfs_set_verifier(dentry,
+ 				nfs_save_change_attribute(d_inode(opendata->dir)));
+diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
+index 7f604727f487..e6795c7c76a8 100644
+--- a/fs/ocfs2/aops.c
++++ b/fs/ocfs2/aops.c
+@@ -956,6 +956,7 @@ clean_orphan:
+ 		tmp_ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh,
+ 				update_isize, end);
+ 		if (tmp_ret < 0) {
++			ocfs2_inode_unlock(inode, 1);
+ 			ret = tmp_ret;
+ 			mlog_errno(ret);
+ 			brelse(di_bh);
+diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
+index 0419485891f2..0f1c6f315cdc 100644
+--- a/include/asm-generic/cputime_nsecs.h
++++ b/include/asm-generic/cputime_nsecs.h
+@@ -75,7 +75,7 @@ typedef u64 __nocast cputime64_t;
+  */
+ static inline cputime_t timespec_to_cputime(const struct timespec *val)
+ {
+-	u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
++	u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
+ 	return (__force cputime_t) ret;
+ }
+ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
+@@ -91,7 +91,8 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
+  */
+ static inline cputime_t timeval_to_cputime(const struct timeval *val)
+ {
+-	u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
++	u64 ret = (u64)val->tv_sec * NSEC_PER_SEC +
++			val->tv_usec * NSEC_PER_USEC;
+ 	return (__force cputime_t) ret;
+ }
+ static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
+diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
+index 7bfb063029d8..461a0558bca4 100644
+--- a/include/drm/drm_cache.h
++++ b/include/drm/drm_cache.h
+@@ -35,4 +35,13 @@
+ 
+ void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
+ 
++static inline bool drm_arch_can_wc_memory(void)
++{
++#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
++	return false;
++#else
++	return true;
++#endif
++}
++
+ #endif
+diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
+index 5340099741ae..f356f9716474 100644
+--- a/include/drm/drm_dp_mst_helper.h
++++ b/include/drm/drm_dp_mst_helper.h
+@@ -44,8 +44,6 @@ struct drm_dp_vcpi {
+ /**
+  * struct drm_dp_mst_port - MST port
+  * @kref: reference count for this port.
+- * @guid_valid: for DP 1.2 devices if we have validated the GUID.
+- * @guid: guid for DP 1.2 device on this port.
+  * @port_num: port number
+  * @input: if this port is an input port.
+  * @mcs: message capability status - DP 1.2 spec.
+@@ -70,10 +68,6 @@ struct drm_dp_vcpi {
+ struct drm_dp_mst_port {
+ 	struct kref kref;
+ 
+-	/* if dpcd 1.2 device is on this port - its GUID info */
+-	bool guid_valid;
+-	u8 guid[16];
+-
+ 	u8 port_num;
+ 	bool input;
+ 	bool mcs;
+@@ -109,10 +103,12 @@ struct drm_dp_mst_port {
+  * @tx_slots: transmission slots for this device.
+  * @last_seqno: last sequence number used to talk to this.
+  * @link_address_sent: if a link address message has been sent to this device yet.
++ * @guid: guid for DP 1.2 branch device. port under this branch can be
++ * identified by port #.
+  *
+  * This structure represents an MST branch device, there is one
+- * primary branch device at the root, along with any others connected
+- * to downstream ports
++ * primary branch device at the root, along with any other branches connected
++ * to downstream port of parent branches.
+  */
+ struct drm_dp_mst_branch {
+ 	struct kref kref;
+@@ -131,6 +127,9 @@ struct drm_dp_mst_branch {
+ 	struct drm_dp_sideband_msg_tx *tx_slots[2];
+ 	int last_seqno;
+ 	bool link_address_sent;
++
++	/* global unique identifier to identify branch devices */
++	u8 guid[16];
+ };
+ 
+ 
+@@ -405,11 +404,9 @@ struct drm_dp_payload {
+  * @conn_base_id: DRM connector ID this mgr is connected to.
+  * @down_rep_recv: msg receiver state for down replies.
+  * @up_req_recv: msg receiver state for up requests.
+- * @lock: protects mst state, primary, guid, dpcd.
++ * @lock: protects mst state, primary, dpcd.
+  * @mst_state: if this manager is enabled for an MST capable port.
+  * @mst_primary: pointer to the primary branch device.
+- * @guid_valid: GUID valid for the primary branch device.
+- * @guid: GUID for primary port.
+  * @dpcd: cache of DPCD for primary port.
+  * @pbn_div: PBN to slots divisor.
+  *
+@@ -431,13 +428,11 @@ struct drm_dp_mst_topology_mgr {
+ 	struct drm_dp_sideband_msg_rx up_req_recv;
+ 
+ 	/* pointer to info about the initial MST device */
+-	struct mutex lock; /* protects mst_state + primary + guid + dpcd */
++	struct mutex lock; /* protects mst_state + primary + dpcd */
+ 
+ 	bool mst_state;
+ 	struct drm_dp_mst_branch *mst_primary;
+-	/* primary MST device GUID */
+-	bool guid_valid;
+-	u8 guid[16];
++
+ 	u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ 	u8 sink_count;
+ 	int pbn_div;
+@@ -450,9 +445,7 @@ struct drm_dp_mst_topology_mgr {
+ 	   the mstb tx_slots and txmsg->state once they are queued */
+ 	struct mutex qlock;
+ 	struct list_head tx_msg_downq;
+-	struct list_head tx_msg_upq;
+ 	bool tx_down_in_progress;
+-	bool tx_up_in_progress;
+ 
+ 	/* payload info + lock for it */
+ 	struct mutex payload_lock;
+diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
+index d639049a613d..553210c02ee0 100644
+--- a/include/drm/drm_fixed.h
++++ b/include/drm/drm_fixed.h
+@@ -73,18 +73,28 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
+ #define DRM_FIXED_ONE		(1ULL << DRM_FIXED_POINT)
+ #define DRM_FIXED_DECIMAL_MASK	(DRM_FIXED_ONE - 1)
+ #define DRM_FIXED_DIGITS_MASK	(~DRM_FIXED_DECIMAL_MASK)
++#define DRM_FIXED_EPSILON	1LL
++#define DRM_FIXED_ALMOST_ONE	(DRM_FIXED_ONE - DRM_FIXED_EPSILON)
+ 
+ static inline s64 drm_int2fixp(int a)
+ {
+ 	return ((s64)a) << DRM_FIXED_POINT;
+ }
+ 
+-static inline int drm_fixp2int(int64_t a)
++static inline int drm_fixp2int(s64 a)
+ {
+ 	return ((s64)a) >> DRM_FIXED_POINT;
+ }
+ 
+-static inline unsigned drm_fixp_msbset(int64_t a)
++static inline int drm_fixp2int_ceil(s64 a)
++{
++	if (a > 0)
++		return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
++	else
++		return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
++}
++
++static inline unsigned drm_fixp_msbset(s64 a)
+ {
+ 	unsigned shift, sign = (a >> 63) & 1;
+ 
+@@ -136,6 +146,45 @@ static inline s64 drm_fixp_div(s64 a, s64 b)
+ 	return result;
+ }
+ 
++static inline s64 drm_fixp_from_fraction(s64 a, s64 b)
++{
++	s64 res;
++	bool a_neg = a < 0;
++	bool b_neg = b < 0;
++	u64 a_abs = a_neg ? -a : a;
++	u64 b_abs = b_neg ? -b : b;
++	u64 rem;
++
++	/* determine integer part */
++	u64 res_abs  = div64_u64_rem(a_abs, b_abs, &rem);
++
++	/* determine fractional part */
++	{
++		u32 i = DRM_FIXED_POINT;
++
++		do {
++			rem <<= 1;
++			res_abs <<= 1;
++			if (rem >= b_abs) {
++				res_abs |= 1;
++				rem -= b_abs;
++			}
++		} while (--i != 0);
++	}
++
++	/* round up LSB */
++	{
++		u64 summand = (rem << 1) >= b_abs;
++
++		res_abs += summand;
++	}
++
++	res = (s64) res_abs;
++	if (a_neg ^ b_neg)
++		res = -res;
++	return res;
++}
++
+ static inline s64 drm_fixp_exp(s64 x)
+ {
+ 	s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000);
+diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
+index 71b1d6cdcb5d..8dbd7879fdc6 100644
+--- a/include/linux/ceph/messenger.h
++++ b/include/linux/ceph/messenger.h
+@@ -220,6 +220,7 @@ struct ceph_connection {
+ 	struct ceph_entity_addr actual_peer_addr;
+ 
+ 	/* message out temps */
++	struct ceph_msg_header out_hdr;
+ 	struct ceph_msg *out_msg;        /* sending message (== tail of
+ 					    out_sent) */
+ 	bool out_msg_done;
+@@ -229,7 +230,6 @@ struct ceph_connection {
+ 	int out_kvec_left;   /* kvec's left in out_kvec */
+ 	int out_skip;        /* skip this many bytes */
+ 	int out_kvec_bytes;  /* total bytes left */
+-	bool out_kvec_is_msg; /* kvec refers to out_msg */
+ 	int out_more;        /* there is more data after the kvecs */
+ 	__le64 out_temp_ack; /* for writing an ack */
+ 	struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
+diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
+index 06b77f9dd3f2..8e30faeab183 100644
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -133,6 +133,12 @@ struct cgroup_subsys_state {
+ 	 */
+ 	u64 serial_nr;
+ 
++	/*
++	 * Incremented by online self and children.  Used to guarantee that
++	 * parents are not offlined before their children.
++	 */
++	atomic_t online_cnt;
++
+ 	/* percpu_ref killing and RCU release */
+ 	struct rcu_head rcu_head;
+ 	struct work_struct destroy_work;
+diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
+index 85a868ccb493..fea160ee5803 100644
+--- a/include/linux/cpuset.h
++++ b/include/linux/cpuset.h
+@@ -137,6 +137,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
+ 	task_unlock(current);
+ }
+ 
++extern void cpuset_post_attach_flush(void);
++
+ #else /* !CONFIG_CPUSETS */
+ 
+ static inline bool cpusets_enabled(void) { return false; }
+@@ -243,6 +245,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
+ 	return false;
+ }
+ 
++static inline void cpuset_post_attach_flush(void)
++{
++}
++
+ #endif /* !CONFIG_CPUSETS */
+ 
+ #endif /* _LINUX_CPUSET_H */
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 569b5a866bb1..47be3ad7d3e5 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -1199,7 +1199,10 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
+ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
+ 				       struct list_head *head, bool remove);
+ 
+-bool efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len);
++bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
++		     unsigned long data_size);
++bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
++				  size_t len);
+ 
+ extern struct work_struct efivar_work;
+ void efivar_run_worker(void);
+diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
+index 8fdc17b84739..ae6a711dcd1d 100644
+--- a/include/linux/hyperv.h
++++ b/include/linux/hyperv.h
+@@ -630,6 +630,11 @@ struct hv_input_signal_event_buffer {
+ 	struct hv_input_signal_event event;
+ };
+ 
++enum hv_signal_policy {
++	HV_SIGNAL_POLICY_DEFAULT = 0,
++	HV_SIGNAL_POLICY_EXPLICIT,
++};
++
+ struct vmbus_channel {
+ 	/* Unique channel id */
+ 	int id;
+@@ -757,8 +762,21 @@ struct vmbus_channel {
+ 	 * link up channels based on their CPU affinity.
+ 	 */
+ 	struct list_head percpu_list;
++	/*
++	 * Host signaling policy: The default policy will be
++	 * based on the ring buffer state. We will also support
++	 * a policy where the client driver can have explicit
++	 * signaling control.
++	 */
++	enum hv_signal_policy  signal_policy;
+ };
+ 
++static inline void set_channel_signal_state(struct vmbus_channel *c,
++					    enum hv_signal_policy policy)
++{
++	c->signal_policy = policy;
++}
++
+ static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
+ {
+ 	c->batched_reading = state;
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index c0e961474a52..5455b660bd88 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -544,9 +544,7 @@ extern int  nfs_readpage_async(struct nfs_open_context *, struct inode *,
+ 
+ static inline loff_t nfs_size_to_loff_t(__u64 size)
+ {
+-	if (size > (__u64) OFFSET_MAX - 1)
+-		return OFFSET_MAX - 1;
+-	return (loff_t) size;
++	return min_t(u64, size, OFFSET_MAX);
+ }
+ 
+ static inline ino_t
+diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
+index 50777b5b1e4c..92d112aeec68 100644
+--- a/include/linux/shmem_fs.h
++++ b/include/linux/shmem_fs.h
+@@ -15,10 +15,7 @@ struct shmem_inode_info {
+ 	unsigned int		seals;		/* shmem seals */
+ 	unsigned long		flags;
+ 	unsigned long		alloced;	/* data pages alloced to file */
+-	union {
+-		unsigned long	swapped;	/* subtotal assigned to swap */
+-		char		*symlink;	/* unswappable short symlink */
+-	};
++	unsigned long		swapped;	/* subtotal assigned to swap */
+ 	struct shared_policy	policy;		/* NUMA memory alloc policy */
+ 	struct list_head	swaplist;	/* chain of maybes on swap */
+ 	struct simple_xattrs	xattrs;		/* list of xattrs */
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 9147f9f34cbe..75f136a22a5e 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -219,6 +219,7 @@ struct sk_buff;
+ #else
+ #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
+ #endif
++extern int sysctl_max_skb_frags;
+ 
+ typedef struct skb_frag_struct skb_frag_t;
+ 
+diff --git a/include/linux/thermal.h b/include/linux/thermal.h
+index 613c29bd6baf..e13a1ace50e9 100644
+--- a/include/linux/thermal.h
++++ b/include/linux/thermal.h
+@@ -43,6 +43,9 @@
+ /* Default weight of a bound cooling device */
+ #define THERMAL_WEIGHT_DEFAULT 0
+ 
++/* use value, which < 0K, to indicate an invalid/uninitialized temperature */
++#define THERMAL_TEMP_INVALID	-274000
++
+ /* Unit conversion macros */
+ #define DECI_KELVIN_TO_CELSIUS(t)	({			\
+ 	long _t = (t);						\
+@@ -167,6 +170,7 @@ struct thermal_attr {
+  * @forced_passive:	If > 0, temperature at which to switch on all ACPI
+  *			processor cooling devices.  Currently only used by the
+  *			step-wise governor.
++ * @need_update:	if equals 1, thermal_zone_device_update needs to be invoked.
+  * @ops:	operations this &thermal_zone_device supports
+  * @tzp:	thermal zone parameters
+  * @governor:	pointer to the governor for this thermal zone
+@@ -194,6 +198,7 @@ struct thermal_zone_device {
+ 	int emul_temperature;
+ 	int passive;
+ 	unsigned int forced_passive;
++	atomic_t need_update;
+ 	struct thermal_zone_device_ops *ops;
+ 	struct thermal_zone_params *tzp;
+ 	struct thermal_governor *governor;
+diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h
+index cbb20afdbc01..bb679b48f408 100644
+--- a/include/linux/ucs2_string.h
++++ b/include/linux/ucs2_string.h
+@@ -11,4 +11,8 @@ unsigned long ucs2_strlen(const ucs2_char_t *s);
+ unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
+ int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
+ 
++unsigned long ucs2_utf8size(const ucs2_char_t *src);
++unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src,
++			   unsigned long maxlength);
++
+ #endif /* _LINUX_UCS2_STRING_H_ */
+diff --git a/include/net/af_unix.h b/include/net/af_unix.h
+index 2a91a0561a47..9b4c418bebd8 100644
+--- a/include/net/af_unix.h
++++ b/include/net/af_unix.h
+@@ -6,8 +6,8 @@
+ #include <linux/mutex.h>
+ #include <net/sock.h>
+ 
+-void unix_inflight(struct file *fp);
+-void unix_notinflight(struct file *fp);
++void unix_inflight(struct user_struct *user, struct file *fp);
++void unix_notinflight(struct user_struct *user, struct file *fp);
+ void unix_gc(void);
+ void wait_for_unix_gc(void);
+ struct sock *unix_get_socket(struct file *filp);
+diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
+index 6816f0fa5693..30a56ab2ccfb 100644
+--- a/include/net/dst_metadata.h
++++ b/include/net/dst_metadata.h
+@@ -44,6 +44,24 @@ static inline bool skb_valid_dst(const struct sk_buff *skb)
+ 	return dst && !(dst->flags & DST_METADATA);
+ }
+ 
++static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a,
++				       const struct sk_buff *skb_b)
++{
++	const struct metadata_dst *a, *b;
++
++	if (!(skb_a->_skb_refdst | skb_b->_skb_refdst))
++		return 0;
++
++	a = (const struct metadata_dst *) skb_dst(skb_a);
++	b = (const struct metadata_dst *) skb_dst(skb_b);
++
++	if (!a != !b || a->u.tun_info.options_len != b->u.tun_info.options_len)
++		return 1;
++
++	return memcmp(&a->u.tun_info, &b->u.tun_info,
++		      sizeof(a->u.tun_info) + a->u.tun_info.options_len);
++}
++
+ struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags);
+ struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags);
+ 
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index 481fe1c9044c..49dcad4fe99e 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -270,8 +270,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
+ 					    struct sock *newsk,
+ 					    const struct request_sock *req);
+ 
+-void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
+-			      struct sock *child);
++struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
++				      struct request_sock *req,
++				      struct sock *child);
+ void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+ 				   unsigned long timeout);
+ struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
+diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
+index 877f682989b8..295d291269e2 100644
+--- a/include/net/ip6_route.h
++++ b/include/net/ip6_route.h
+@@ -64,8 +64,16 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
+ 
+ void ip6_route_input(struct sk_buff *skb);
+ 
+-struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
+-				   struct flowi6 *fl6);
++struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
++					 struct flowi6 *fl6, int flags);
++
++static inline struct dst_entry *ip6_route_output(struct net *net,
++						 const struct sock *sk,
++						 struct flowi6 *fl6)
++{
++	return ip6_route_output_flags(net, sk, fl6, 0);
++}
++
+ struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
+ 				   int flags);
+ 
+diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
+index 9f4df68105ab..3f98233388fb 100644
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -61,6 +61,7 @@ struct fib_nh_exception {
+ 	struct rtable __rcu		*fnhe_rth_input;
+ 	struct rtable __rcu		*fnhe_rth_output;
+ 	unsigned long			fnhe_stamp;
++	struct rcu_head			rcu;
+ };
+ 
+ struct fnhe_hash_bucket {
+diff --git a/include/net/scm.h b/include/net/scm.h
+index 262532d111f5..59fa93c01d2a 100644
+--- a/include/net/scm.h
++++ b/include/net/scm.h
+@@ -21,6 +21,7 @@ struct scm_creds {
+ struct scm_fp_list {
+ 	short			count;
+ 	short			max;
++	struct user_struct	*user;
+ 	struct file		*fp[SCM_MAX_FD];
+ };
+ 
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index f80e74c5ad18..414d822bc1db 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -449,7 +449,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
+ 
+ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
+ void tcp_v4_mtu_reduced(struct sock *sk);
+-void tcp_req_err(struct sock *sk, u32 seq);
++void tcp_req_err(struct sock *sk, u32 seq, bool abort);
+ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
+ struct sock *tcp_create_openreq_child(const struct sock *sk,
+ 				      struct request_sock *req,
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index aabf0aca0171..689f4d207122 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -138,6 +138,7 @@ enum se_cmd_flags_table {
+ 	SCF_COMPARE_AND_WRITE		= 0x00080000,
+ 	SCF_COMPARE_AND_WRITE_POST	= 0x00100000,
+ 	SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
++	SCF_ACK_KREF			= 0x00400000,
+ };
+ 
+ /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
+@@ -490,6 +491,8 @@ struct se_cmd {
+ #define CMD_T_DEV_ACTIVE	(1 << 7)
+ #define CMD_T_REQUEST_STOP	(1 << 8)
+ #define CMD_T_BUSY		(1 << 9)
++#define CMD_T_TAS		(1 << 10)
++#define CMD_T_FABRIC_STOP	(1 << 11)
+ 	spinlock_t		t_state_lock;
+ 	struct kref		cmd_kref;
+ 	struct completion	t_transport_stop_comp;
+diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
+index c2e5d6cb34e3..ebd10e624598 100644
+--- a/include/uapi/linux/Kbuild
++++ b/include/uapi/linux/Kbuild
+@@ -307,7 +307,7 @@ header-y += nfs_mount.h
+ header-y += nl80211.h
+ header-y += n_r3964.h
+ header-y += nubus.h
+-header-y += nvme.h
++header-y += nvme_ioctl.h
+ header-y += nvram.h
+ header-y += omap3isp.h
+ header-y += omapfb.h
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index d1d3e8f57de9..2e7f7ab739e4 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2082,7 +2082,7 @@ static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
+ 		/* adjust offset of jmps if necessary */
+ 		if (i < pos && i + insn->off + 1 > pos)
+ 			insn->off += delta;
+-		else if (i > pos && i + insn->off + 1 < pos)
++		else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
+ 			insn->off -= delta;
+ 	}
+ }
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 470f6536b9e8..fb1ecfd2decd 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -57,7 +57,7 @@
+ #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
+ #include <linux/kthread.h>
+ #include <linux/delay.h>
+-
++#include <linux/cpuset.h>
+ #include <linux/atomic.h>
+ 
+ /*
+@@ -2764,6 +2764,7 @@ out_unlock_rcu:
+ out_unlock_threadgroup:
+ 	percpu_up_write(&cgroup_threadgroup_rwsem);
+ 	cgroup_kn_unlock(of->kn);
++	cpuset_post_attach_flush();
+ 	return ret ?: nbytes;
+ }
+ 
+@@ -4783,6 +4784,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
+ 	INIT_LIST_HEAD(&css->sibling);
+ 	INIT_LIST_HEAD(&css->children);
+ 	css->serial_nr = css_serial_nr_next++;
++	atomic_set(&css->online_cnt, 0);
+ 
+ 	if (cgroup_parent(cgrp)) {
+ 		css->parent = cgroup_css(cgroup_parent(cgrp), ss);
+@@ -4805,6 +4807,10 @@ static int online_css(struct cgroup_subsys_state *css)
+ 	if (!ret) {
+ 		css->flags |= CSS_ONLINE;
+ 		rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
++
++		atomic_inc(&css->online_cnt);
++		if (css->parent)
++			atomic_inc(&css->parent->online_cnt);
+ 	}
+ 	return ret;
+ }
+@@ -5036,10 +5042,15 @@ static void css_killed_work_fn(struct work_struct *work)
+ 		container_of(work, struct cgroup_subsys_state, destroy_work);
+ 
+ 	mutex_lock(&cgroup_mutex);
+-	offline_css(css);
+-	mutex_unlock(&cgroup_mutex);
+ 
+-	css_put(css);
++	do {
++		offline_css(css);
++		css_put(css);
++		/* @css can't go away while we're holding cgroup_mutex */
++		css = css->parent;
++	} while (css && atomic_dec_and_test(&css->online_cnt));
++
++	mutex_unlock(&cgroup_mutex);
+ }
+ 
+ /* css kill confirmation processing requires process context, bounce */
+@@ -5048,8 +5059,10 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
+ 	struct cgroup_subsys_state *css =
+ 		container_of(ref, struct cgroup_subsys_state, refcnt);
+ 
+-	INIT_WORK(&css->destroy_work, css_killed_work_fn);
+-	queue_work(cgroup_destroy_wq, &css->destroy_work);
++	if (atomic_dec_and_test(&css->online_cnt)) {
++		INIT_WORK(&css->destroy_work, css_killed_work_fn);
++		queue_work(cgroup_destroy_wq, &css->destroy_work);
++	}
+ }
+ 
+ /**
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 02a8ea5c9963..2ade632197d5 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -286,6 +286,8 @@ static struct cpuset top_cpuset = {
+ static DEFINE_MUTEX(cpuset_mutex);
+ static DEFINE_SPINLOCK(callback_lock);
+ 
++static struct workqueue_struct *cpuset_migrate_mm_wq;
++
+ /*
+  * CPU / memory hotplug is handled asynchronously.
+  */
+@@ -971,31 +973,51 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
+ }
+ 
+ /*
+- * cpuset_migrate_mm
+- *
+- *    Migrate memory region from one set of nodes to another.
+- *
+- *    Temporarilly set tasks mems_allowed to target nodes of migration,
+- *    so that the migration code can allocate pages on these nodes.
+- *
+- *    While the mm_struct we are migrating is typically from some
+- *    other task, the task_struct mems_allowed that we are hacking
+- *    is for our current task, which must allocate new pages for that
+- *    migrating memory region.
++ * Migrate memory region from one set of nodes to another.  This is
++ * performed asynchronously as it can be called from process migration path
++ * holding locks involved in process management.  All mm migrations are
++ * performed in the queued order and can be waited for by flushing
++ * cpuset_migrate_mm_wq.
+  */
+ 
++struct cpuset_migrate_mm_work {
++	struct work_struct	work;
++	struct mm_struct	*mm;
++	nodemask_t		from;
++	nodemask_t		to;
++};
++
++static void cpuset_migrate_mm_workfn(struct work_struct *work)
++{
++	struct cpuset_migrate_mm_work *mwork =
++		container_of(work, struct cpuset_migrate_mm_work, work);
++
++	/* on a wq worker, no need to worry about %current's mems_allowed */
++	do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
++	mmput(mwork->mm);
++	kfree(mwork);
++}
++
+ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
+ 							const nodemask_t *to)
+ {
+-	struct task_struct *tsk = current;
+-
+-	tsk->mems_allowed = *to;
++	struct cpuset_migrate_mm_work *mwork;
+ 
+-	do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
++	mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
++	if (mwork) {
++		mwork->mm = mm;
++		mwork->from = *from;
++		mwork->to = *to;
++		INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
++		queue_work(cpuset_migrate_mm_wq, &mwork->work);
++	} else {
++		mmput(mm);
++	}
++}
+ 
+-	rcu_read_lock();
+-	guarantee_online_mems(task_cs(tsk), &tsk->mems_allowed);
+-	rcu_read_unlock();
++void cpuset_post_attach_flush(void)
++{
++	flush_workqueue(cpuset_migrate_mm_wq);
+ }
+ 
+ /*
+@@ -1096,7 +1118,8 @@ static void update_tasks_nodemask(struct cpuset *cs)
+ 		mpol_rebind_mm(mm, &cs->mems_allowed);
+ 		if (migrate)
+ 			cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
+-		mmput(mm);
++		else
++			mmput(mm);
+ 	}
+ 	css_task_iter_end(&it);
+ 
+@@ -1541,11 +1564,11 @@ static void cpuset_attach(struct cgroup_taskset *tset)
+ 			 * @old_mems_allowed is the right nodesets that we
+ 			 * migrate mm from.
+ 			 */
+-			if (is_memory_migrate(cs)) {
++			if (is_memory_migrate(cs))
+ 				cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
+ 						  &cpuset_attach_nodemask_to);
+-			}
+-			mmput(mm);
++			else
++				mmput(mm);
+ 		}
+ 	}
+ 
+@@ -1710,6 +1733,7 @@ out_unlock:
+ 	mutex_unlock(&cpuset_mutex);
+ 	kernfs_unbreak_active_protection(of->kn);
+ 	css_put(&cs->css);
++	flush_workqueue(cpuset_migrate_mm_wq);
+ 	return retval ?: nbytes;
+ }
+ 
+@@ -2355,6 +2379,9 @@ void __init cpuset_init_smp(void)
+ 	top_cpuset.effective_mems = node_states[N_MEMORY];
+ 
+ 	register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
++
++	cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
++	BUG_ON(!cpuset_migrate_mm_wq);
+ }
+ 
+ /**
+diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
+index a302cf9a2126..57bff7857e87 100644
+--- a/kernel/irq/handle.c
++++ b/kernel/irq/handle.c
+@@ -138,7 +138,8 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
+ 	unsigned int flags = 0, irq = desc->irq_data.irq;
+ 	struct irqaction *action = desc->action;
+ 
+-	do {
++	/* action might have become NULL since we dropped the lock */
++	while (action) {
+ 		irqreturn_t res;
+ 
+ 		trace_irq_handler_entry(irq, action);
+@@ -173,7 +174,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
+ 
+ 		retval |= res;
+ 		action = action->next;
+-	} while (action);
++	}
+ 
+ 	add_interrupt_randomness(irq, flags);
+ 
+diff --git a/kernel/memremap.c b/kernel/memremap.c
+index 7a4e473cea4d..25ced161ebeb 100644
+--- a/kernel/memremap.c
++++ b/kernel/memremap.c
+@@ -133,8 +133,10 @@ void *devm_memremap(struct device *dev, resource_size_t offset,
+ 	if (addr) {
+ 		*ptr = addr;
+ 		devres_add(dev, ptr);
+-	} else
++	} else {
+ 		devres_free(ptr);
++		return ERR_PTR(-ENXIO);
++	}
+ 
+ 	return addr;
+ }
+diff --git a/kernel/resource.c b/kernel/resource.c
+index f150dbbe6f62..249b1eb1e6e1 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -1083,9 +1083,10 @@ struct resource * __request_region(struct resource *parent,
+ 		if (!conflict)
+ 			break;
+ 		if (conflict != parent) {
+-			parent = conflict;
+-			if (!(conflict->flags & IORESOURCE_BUSY))
++			if (!(conflict->flags & IORESOURCE_BUSY)) {
++				parent = conflict;
+ 				continue;
++			}
+ 		}
+ 		if (conflict->flags & flags & IORESOURCE_MUXED) {
+ 			add_wait_queue(&muxed_resource_wait, &wait);
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index 580ac2d4024f..15a1795bbba1 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -316,24 +316,24 @@ static inline void seccomp_sync_threads(void)
+ 		put_seccomp_filter(thread);
+ 		smp_store_release(&thread->seccomp.filter,
+ 				  caller->seccomp.filter);
++
++		/*
++		 * Don't let an unprivileged task work around
++		 * the no_new_privs restriction by creating
++		 * a thread that sets it up, enters seccomp,
++		 * then dies.
++		 */
++		if (task_no_new_privs(caller))
++			task_set_no_new_privs(thread);
++
+ 		/*
+ 		 * Opt the other thread into seccomp if needed.
+ 		 * As threads are considered to be trust-realm
+ 		 * equivalent (see ptrace_may_access), it is safe to
+ 		 * allow one thread to transition the other.
+ 		 */
+-		if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) {
+-			/*
+-			 * Don't let an unprivileged task work around
+-			 * the no_new_privs restriction by creating
+-			 * a thread that sets it up, enters seccomp,
+-			 * then dies.
+-			 */
+-			if (task_no_new_privs(caller))
+-				task_set_no_new_privs(thread);
+-
++		if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
+ 			seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
+-		}
+ 	}
+ }
+ 
+diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
+index ce033c7aa2e8..9cff0ab82b63 100644
+--- a/kernel/time/posix-clock.c
++++ b/kernel/time/posix-clock.c
+@@ -69,10 +69,10 @@ static ssize_t posix_clock_read(struct file *fp, char __user *buf,
+ static unsigned int posix_clock_poll(struct file *fp, poll_table *wait)
+ {
+ 	struct posix_clock *clk = get_posix_clock(fp);
+-	int result = 0;
++	unsigned int result = 0;
+ 
+ 	if (!clk)
+-		return -ENODEV;
++		return POLLERR;
+ 
+ 	if (clk->ops.poll)
+ 		result = clk->ops.poll(clk, fp, wait);
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 7c7ec4515983..22c57e191a23 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -977,9 +977,9 @@ static void tick_nohz_switch_to_nohz(void)
+ 	/* Get the next period */
+ 	next = tick_init_jiffy_update();
+ 
+-	hrtimer_forward_now(&ts->sched_timer, tick_period);
+ 	hrtimer_set_expires(&ts->sched_timer, next);
+-	tick_program_event(next, 1);
++	hrtimer_forward_now(&ts->sched_timer, tick_period);
++	tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
+ 	tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
+ }
+ 
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index d563c1960302..99188ee5d9d0 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -305,8 +305,7 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
+ 
+ 	delta = timekeeping_get_delta(tkr);
+ 
+-	nsec = delta * tkr->mult + tkr->xtime_nsec;
+-	nsec >>= tkr->shift;
++	nsec = (delta * tkr->mult + tkr->xtime_nsec) >> tkr->shift;
+ 
+ 	/* If arch requires, add in get_arch_timeoffset() */
+ 	return nsec + arch_gettimeoffset();
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 4f6ef6912e00..debf6e878076 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -869,7 +869,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
+ 		 * The ftrace subsystem is for showing formats only.
+ 		 * They can not be enabled or disabled via the event files.
+ 		 */
+-		if (call->class && call->class->reg)
++		if (call->class && call->class->reg &&
++		    !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
+ 			return file;
+ 	}
+ 
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index c579dbab2e36..450c21fd0e6e 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -568,6 +568,16 @@ static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
+ 						  int node)
+ {
+ 	assert_rcu_or_wq_mutex_or_pool_mutex(wq);
++
++	/*
++	 * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
++	 * delayed item is pending.  The plan is to keep CPU -> NODE
++	 * mapping valid and stable across CPU on/offlines.  Once that
++	 * happens, this workaround can be removed.
++	 */
++	if (unlikely(node == NUMA_NO_NODE))
++		return wq->dfl_pwq;
++
+ 	return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
+ }
+ 
+@@ -1458,13 +1468,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
+ 	timer_stats_timer_set_start_info(&dwork->timer);
+ 
+ 	dwork->wq = wq;
+-	/* timer isn't guaranteed to run in this cpu, record earlier */
+-	if (cpu == WORK_CPU_UNBOUND)
+-		cpu = raw_smp_processor_id();
+ 	dwork->cpu = cpu;
+ 	timer->expires = jiffies + delay;
+ 
+-	add_timer_on(timer, cpu);
++	if (unlikely(cpu != WORK_CPU_UNBOUND))
++		add_timer_on(timer, cpu);
++	else
++		add_timer(timer);
+ }
+ 
+ /**
+diff --git a/lib/Kconfig b/lib/Kconfig
+index f0df318104e7..1a48744253d7 100644
+--- a/lib/Kconfig
++++ b/lib/Kconfig
+@@ -210,9 +210,11 @@ config RANDOM32_SELFTEST
+ # compression support is select'ed if needed
+ #
+ config 842_COMPRESS
++	select CRC32
+ 	tristate
+ 
+ config 842_DECOMPRESS
++	select CRC32
+ 	tristate
+ 
+ config ZLIB_INFLATE
+diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
+index 6f500ef2301d..f0b323abb4c6 100644
+--- a/lib/ucs2_string.c
++++ b/lib/ucs2_string.c
+@@ -49,3 +49,65 @@ ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len)
+         }
+ }
+ EXPORT_SYMBOL(ucs2_strncmp);
++
++unsigned long
++ucs2_utf8size(const ucs2_char_t *src)
++{
++	unsigned long i;
++	unsigned long j = 0;
++
++	for (i = 0; i < ucs2_strlen(src); i++) {
++		u16 c = src[i];
++
++		if (c >= 0x800)
++			j += 3;
++		else if (c >= 0x80)
++			j += 2;
++		else
++			j += 1;
++	}
++
++	return j;
++}
++EXPORT_SYMBOL(ucs2_utf8size);
++
++/*
++ * copy at most maxlength bytes of whole utf8 characters to dest from the
++ * ucs2 string src.
++ *
++ * The return value is the number of characters copied, not including the
++ * final NUL character.
++ */
++unsigned long
++ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength)
++{
++	unsigned int i;
++	unsigned long j = 0;
++	unsigned long limit = ucs2_strnlen(src, maxlength);
++
++	for (i = 0; maxlength && i < limit; i++) {
++		u16 c = src[i];
++
++		if (c >= 0x800) {
++			if (maxlength < 3)
++				break;
++			maxlength -= 3;
++			dest[j++] = 0xe0 | (c & 0xf000) >> 12;
++			dest[j++] = 0x80 | (c & 0x0fc0) >> 6;
++			dest[j++] = 0x80 | (c & 0x003f);
++		} else if (c >= 0x80) {
++			if (maxlength < 2)
++				break;
++			maxlength -= 2;
++			dest[j++] = 0xc0 | (c & 0x7c0) >> 6;
++			dest[j++] = 0x80 | (c & 0x03f);
++		} else {
++			maxlength -= 1;
++			dest[j++] = c & 0x7f;
++		}
++	}
++	if (maxlength)
++		dest[j] = '\0';
++	return j;
++}
++EXPORT_SYMBOL(ucs2_as_utf8);
+diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
+index d3116be5a00f..300117f1a08f 100644
+--- a/mm/balloon_compaction.c
++++ b/mm/balloon_compaction.c
+@@ -61,6 +61,7 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
+ 	bool dequeued_page;
+ 
+ 	dequeued_page = false;
++	spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+ 	list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
+ 		/*
+ 		 * Block others from accessing the 'page' while we get around
+@@ -75,15 +76,14 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
+ 				continue;
+ 			}
+ #endif
+-			spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+ 			balloon_page_delete(page);
+ 			__count_vm_event(BALLOON_DEFLATE);
+-			spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+ 			unlock_page(page);
+ 			dequeued_page = true;
+ 			break;
+ 		}
+ 	}
++	spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+ 
+ 	if (!dequeued_page) {
+ 		/*
+diff --git a/mm/memory.c b/mm/memory.c
+index c387430f06c3..b80bf4746b67 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3399,8 +3399,18 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	if (unlikely(pmd_none(*pmd)) &&
+ 	    unlikely(__pte_alloc(mm, vma, pmd, address)))
+ 		return VM_FAULT_OOM;
+-	/* if an huge pmd materialized from under us just retry later */
+-	if (unlikely(pmd_trans_huge(*pmd)))
++	/*
++	 * If a huge pmd materialized under us just retry later.  Use
++	 * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
++	 * didn't become pmd_trans_huge under us and then back to pmd_none, as
++	 * a result of MADV_DONTNEED running immediately after a huge pmd fault
++	 * in a different thread of this mm, in turn leading to a misleading
++	 * pmd_trans_huge() retval.  All we have to ensure is that it is a
++	 * regular pmd that we can walk with pte_offset_map() and we can do that
++	 * through an atomic read in C, which is what pmd_trans_unstable()
++	 * provides.
++	 */
++	if (unlikely(pmd_trans_unstable(pmd)))
+ 		return 0;
+ 	/*
+ 	 * A regular pmd is established and it can't morph into a huge pmd
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 7890d0bb5e23..6d17e0ab42d4 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1578,7 +1578,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
+ 					 (GFP_HIGHUSER_MOVABLE |
+ 					  __GFP_THISNODE | __GFP_NOMEMALLOC |
+ 					  __GFP_NORETRY | __GFP_NOWARN) &
+-					 ~(__GFP_IO | __GFP_FS), 0);
++					 ~__GFP_RECLAIM, 0);
+ 
+ 	return newpage;
+ }
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 2afcdbbdb685..ea5a70cfc1d8 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -620,8 +620,7 @@ static void shmem_evict_inode(struct inode *inode)
+ 			list_del_init(&info->swaplist);
+ 			mutex_unlock(&shmem_swaplist_mutex);
+ 		}
+-	} else
+-		kfree(info->symlink);
++	}
+ 
+ 	simple_xattrs_free(&info->xattrs);
+ 	WARN_ON(inode->i_blocks);
+@@ -2462,13 +2461,12 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
+ 	info = SHMEM_I(inode);
+ 	inode->i_size = len-1;
+ 	if (len <= SHORT_SYMLINK_LEN) {
+-		info->symlink = kmemdup(symname, len, GFP_KERNEL);
+-		if (!info->symlink) {
++		inode->i_link = kmemdup(symname, len, GFP_KERNEL);
++		if (!inode->i_link) {
+ 			iput(inode);
+ 			return -ENOMEM;
+ 		}
+ 		inode->i_op = &shmem_short_symlink_operations;
+-		inode->i_link = info->symlink;
+ 	} else {
+ 		error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
+ 		if (error) {
+@@ -3083,6 +3081,7 @@ static struct inode *shmem_alloc_inode(struct super_block *sb)
+ static void shmem_destroy_callback(struct rcu_head *head)
+ {
+ 	struct inode *inode = container_of(head, struct inode, i_rcu);
++	kfree(inode->i_link);
+ 	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
+ }
+ 
+diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
+index 9e9cca3689a0..795ddd8b2f77 100644
+--- a/net/bluetooth/6lowpan.c
++++ b/net/bluetooth/6lowpan.c
+@@ -307,6 +307,9 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
+ 
+ 	/* check that it's our buffer */
+ 	if (lowpan_is_ipv6(*skb_network_header(skb))) {
++		/* Pull off the 1-byte of 6lowpan header. */
++		skb_pull(skb, 1);
++
+ 		/* Copy the packet so that the IPv6 header is
+ 		 * properly aligned.
+ 		 */
+@@ -317,6 +320,7 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
+ 
+ 		local_skb->protocol = htons(ETH_P_IPV6);
+ 		local_skb->pkt_type = PACKET_HOST;
++		local_skb->dev = dev;
+ 
+ 		skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
+ 
+@@ -335,6 +339,8 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
+ 		if (!local_skb)
+ 			goto drop;
+ 
++		local_skb->dev = dev;
++
+ 		ret = iphc_decompress(local_skb, dev, chan);
+ 		if (ret < 0) {
+ 			kfree_skb(local_skb);
+@@ -343,7 +349,6 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
+ 
+ 		local_skb->protocol = htons(ETH_P_IPV6);
+ 		local_skb->pkt_type = PACKET_HOST;
+-		local_skb->dev = dev;
+ 
+ 		if (give_skb_to_upper(local_skb, dev)
+ 				!= NET_RX_SUCCESS) {
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 85b82f7adbd2..24e9410923d0 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -722,8 +722,12 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
+ 	if (hci_update_random_address(req, false, &own_addr_type))
+ 		return;
+ 
++	/* Set window to be the same value as the interval to enable
++	 * continuous scanning.
++	 */
+ 	cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
+-	cp.scan_window = cpu_to_le16(hdev->le_scan_window);
++	cp.scan_window = cp.scan_interval;
++
+ 	bacpy(&cp.peer_addr, &conn->dst);
+ 	cp.peer_addr_type = conn->dst_type;
+ 	cp.own_address_type = own_addr_type;
+diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
+index 981f8a202c27..02778c5bc149 100644
+--- a/net/bluetooth/hci_request.c
++++ b/net/bluetooth/hci_request.c
+@@ -175,21 +175,29 @@ static u8 update_white_list(struct hci_request *req)
+ 	 * command to remove it from the controller.
+ 	 */
+ 	list_for_each_entry(b, &hdev->le_white_list, list) {
+-		struct hci_cp_le_del_from_white_list cp;
++		/* If the device is neither in pend_le_conns nor
++		 * pend_le_reports then remove it from the whitelist.
++		 */
++		if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
++					       &b->bdaddr, b->bdaddr_type) &&
++		    !hci_pend_le_action_lookup(&hdev->pend_le_reports,
++					       &b->bdaddr, b->bdaddr_type)) {
++			struct hci_cp_le_del_from_white_list cp;
++
++			cp.bdaddr_type = b->bdaddr_type;
++			bacpy(&cp.bdaddr, &b->bdaddr);
+ 
+-		if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
+-					      &b->bdaddr, b->bdaddr_type) ||
+-		    hci_pend_le_action_lookup(&hdev->pend_le_reports,
+-					      &b->bdaddr, b->bdaddr_type)) {
+-			white_list_entries++;
++			hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
++				    sizeof(cp), &cp);
+ 			continue;
+ 		}
+ 
+-		cp.bdaddr_type = b->bdaddr_type;
+-		bacpy(&cp.bdaddr, &b->bdaddr);
++		if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
++			/* White list can not be used with RPAs */
++			return 0x00;
++		}
+ 
+-		hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
+-			    sizeof(cp), &cp);
++		white_list_entries++;
+ 	}
+ 
+ 	/* Since all no longer valid white list entries have been
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index ffed8a1d4f27..4b175df35184 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -1072,22 +1072,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
+ 			hcon->dst_type = smp->remote_irk->addr_type;
+ 			queue_work(hdev->workqueue, &conn->id_addr_update_work);
+ 		}
+-
+-		/* When receiving an indentity resolving key for
+-		 * a remote device that does not use a resolvable
+-		 * private address, just remove the key so that
+-		 * it is possible to use the controller white
+-		 * list for scanning.
+-		 *
+-		 * Userspace will have been told to not store
+-		 * this key at this point. So it is safe to
+-		 * just remove it.
+-		 */
+-		if (!bacmp(&smp->remote_irk->rpa, BDADDR_ANY)) {
+-			list_del_rcu(&smp->remote_irk->list);
+-			kfree_rcu(smp->remote_irk, rcu);
+-			smp->remote_irk = NULL;
+-		}
+ 	}
+ 
+ 	if (smp->csrk) {
+diff --git a/net/bridge/br.c b/net/bridge/br.c
+index a1abe4936fe1..3addc05b9a16 100644
+--- a/net/bridge/br.c
++++ b/net/bridge/br.c
+@@ -121,6 +121,7 @@ static struct notifier_block br_device_notifier = {
+ 	.notifier_call = br_device_event
+ };
+ 
++/* called with RTNL */
+ static int br_switchdev_event(struct notifier_block *unused,
+ 			      unsigned long event, void *ptr)
+ {
+@@ -130,7 +131,6 @@ static int br_switchdev_event(struct notifier_block *unused,
+ 	struct switchdev_notifier_fdb_info *fdb_info;
+ 	int err = NOTIFY_DONE;
+ 
+-	rtnl_lock();
+ 	p = br_port_get_rtnl(dev);
+ 	if (!p)
+ 		goto out;
+@@ -155,7 +155,6 @@ static int br_switchdev_event(struct notifier_block *unused,
+ 	}
+ 
+ out:
+-	rtnl_unlock();
+ 	return err;
+ }
+ 
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 9981039ef4ff..63ae5dd24fc5 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -672,6 +672,8 @@ static void reset_connection(struct ceph_connection *con)
+ 	}
+ 	con->in_seq = 0;
+ 	con->in_seq_acked = 0;
++
++	con->out_skip = 0;
+ }
+ 
+ /*
+@@ -771,6 +773,8 @@ static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
+ 
+ static void con_out_kvec_reset(struct ceph_connection *con)
+ {
++	BUG_ON(con->out_skip);
++
+ 	con->out_kvec_left = 0;
+ 	con->out_kvec_bytes = 0;
+ 	con->out_kvec_cur = &con->out_kvec[0];
+@@ -779,9 +783,9 @@ static void con_out_kvec_reset(struct ceph_connection *con)
+ static void con_out_kvec_add(struct ceph_connection *con,
+ 				size_t size, void *data)
+ {
+-	int index;
++	int index = con->out_kvec_left;
+ 
+-	index = con->out_kvec_left;
++	BUG_ON(con->out_skip);
+ 	BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
+ 
+ 	con->out_kvec[index].iov_len = size;
+@@ -790,6 +794,27 @@ static void con_out_kvec_add(struct ceph_connection *con,
+ 	con->out_kvec_bytes += size;
+ }
+ 
++/*
++ * Chop off a kvec from the end.  Return residual number of bytes for
++ * that kvec, i.e. how many bytes would have been written if the kvec
++ * hadn't been nuked.
++ */
++static int con_out_kvec_skip(struct ceph_connection *con)
++{
++	int off = con->out_kvec_cur - con->out_kvec;
++	int skip = 0;
++
++	if (con->out_kvec_bytes > 0) {
++		skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len;
++		BUG_ON(con->out_kvec_bytes < skip);
++		BUG_ON(!con->out_kvec_left);
++		con->out_kvec_bytes -= skip;
++		con->out_kvec_left--;
++	}
++
++	return skip;
++}
++
+ #ifdef CONFIG_BLOCK
+ 
+ /*
+@@ -1175,6 +1200,13 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
+ 	return new_piece;
+ }
+ 
++static size_t sizeof_footer(struct ceph_connection *con)
++{
++	return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ?
++	    sizeof(struct ceph_msg_footer) :
++	    sizeof(struct ceph_msg_footer_old);
++}
++
+ static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
+ {
+ 	BUG_ON(!msg);
+@@ -1197,7 +1229,6 @@ static void prepare_write_message_footer(struct ceph_connection *con)
+ 	m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
+ 
+ 	dout("prepare_write_message_footer %p\n", con);
+-	con->out_kvec_is_msg = true;
+ 	con->out_kvec[v].iov_base = &m->footer;
+ 	if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
+ 		if (con->ops->sign_message)
+@@ -1225,7 +1256,6 @@ static void prepare_write_message(struct ceph_connection *con)
+ 	u32 crc;
+ 
+ 	con_out_kvec_reset(con);
+-	con->out_kvec_is_msg = true;
+ 	con->out_msg_done = false;
+ 
+ 	/* Sneak an ack in there first?  If we can get it into the same
+@@ -1265,18 +1295,19 @@ static void prepare_write_message(struct ceph_connection *con)
+ 
+ 	/* tag + hdr + front + middle */
+ 	con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
+-	con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
++	con_out_kvec_add(con, sizeof(con->out_hdr), &con->out_hdr);
+ 	con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
+ 
+ 	if (m->middle)
+ 		con_out_kvec_add(con, m->middle->vec.iov_len,
+ 			m->middle->vec.iov_base);
+ 
+-	/* fill in crc (except data pages), footer */
++	/* fill in hdr crc and finalize hdr */
+ 	crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
+ 	con->out_msg->hdr.crc = cpu_to_le32(crc);
+-	con->out_msg->footer.flags = 0;
++	memcpy(&con->out_hdr, &con->out_msg->hdr, sizeof(con->out_hdr));
+ 
++	/* fill in front and middle crc, footer */
+ 	crc = crc32c(0, m->front.iov_base, m->front.iov_len);
+ 	con->out_msg->footer.front_crc = cpu_to_le32(crc);
+ 	if (m->middle) {
+@@ -1288,6 +1319,7 @@ static void prepare_write_message(struct ceph_connection *con)
+ 	dout("%s front_crc %u middle_crc %u\n", __func__,
+ 	     le32_to_cpu(con->out_msg->footer.front_crc),
+ 	     le32_to_cpu(con->out_msg->footer.middle_crc));
++	con->out_msg->footer.flags = 0;
+ 
+ 	/* is there a data payload? */
+ 	con->out_msg->footer.data_crc = 0;
+@@ -1492,7 +1524,6 @@ static int write_partial_kvec(struct ceph_connection *con)
+ 		}
+ 	}
+ 	con->out_kvec_left = 0;
+-	con->out_kvec_is_msg = false;
+ 	ret = 1;
+ out:
+ 	dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
+@@ -1584,6 +1615,7 @@ static int write_partial_skip(struct ceph_connection *con)
+ {
+ 	int ret;
+ 
++	dout("%s %p %d left\n", __func__, con, con->out_skip);
+ 	while (con->out_skip > 0) {
+ 		size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE);
+ 
+@@ -2313,9 +2345,9 @@ static int read_partial_message(struct ceph_connection *con)
+ 			ceph_pr_addr(&con->peer_addr.in_addr),
+ 			seq, con->in_seq + 1);
+ 		con->in_base_pos = -front_len - middle_len - data_len -
+-			sizeof(m->footer);
++			sizeof_footer(con);
+ 		con->in_tag = CEPH_MSGR_TAG_READY;
+-		return 0;
++		return 1;
+ 	} else if ((s64)seq - (s64)con->in_seq > 1) {
+ 		pr_err("read_partial_message bad seq %lld expected %lld\n",
+ 		       seq, con->in_seq + 1);
+@@ -2338,10 +2370,10 @@ static int read_partial_message(struct ceph_connection *con)
+ 			/* skip this message */
+ 			dout("alloc_msg said skip message\n");
+ 			con->in_base_pos = -front_len - middle_len - data_len -
+-				sizeof(m->footer);
++				sizeof_footer(con);
+ 			con->in_tag = CEPH_MSGR_TAG_READY;
+ 			con->in_seq++;
+-			return 0;
++			return 1;
+ 		}
+ 
+ 		BUG_ON(!con->in_msg);
+@@ -2506,13 +2538,13 @@ more:
+ 
+ more_kvec:
+ 	/* kvec data queued? */
+-	if (con->out_skip) {
+-		ret = write_partial_skip(con);
++	if (con->out_kvec_left) {
++		ret = write_partial_kvec(con);
+ 		if (ret <= 0)
+ 			goto out;
+ 	}
+-	if (con->out_kvec_left) {
+-		ret = write_partial_kvec(con);
++	if (con->out_skip) {
++		ret = write_partial_skip(con);
+ 		if (ret <= 0)
+ 			goto out;
+ 	}
+@@ -3050,16 +3082,31 @@ void ceph_msg_revoke(struct ceph_msg *msg)
+ 		ceph_msg_put(msg);
+ 	}
+ 	if (con->out_msg == msg) {
+-		dout("%s %p msg %p - was sending\n", __func__, con, msg);
+-		con->out_msg = NULL;
+-		if (con->out_kvec_is_msg) {
+-			con->out_skip = con->out_kvec_bytes;
+-			con->out_kvec_is_msg = false;
++		BUG_ON(con->out_skip);
++		/* footer */
++		if (con->out_msg_done) {
++			con->out_skip += con_out_kvec_skip(con);
++		} else {
++			BUG_ON(!msg->data_length);
++			if (con->peer_features & CEPH_FEATURE_MSG_AUTH)
++				con->out_skip += sizeof(msg->footer);
++			else
++				con->out_skip += sizeof(msg->old_footer);
+ 		}
++		/* data, middle, front */
++		if (msg->data_length)
++			con->out_skip += msg->cursor.total_resid;
++		if (msg->middle)
++			con->out_skip += con_out_kvec_skip(con);
++		con->out_skip += con_out_kvec_skip(con);
++
++		dout("%s %p msg %p - was sending, will write %d skip %d\n",
++		     __func__, con, msg, con->out_kvec_bytes, con->out_skip);
+ 		msg->hdr.seq = 0;
+-
++		con->out_msg = NULL;
+ 		ceph_msg_put(msg);
+ 	}
++
+ 	mutex_unlock(&con->mutex);
+ }
+ 
+diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
+index f8f235930d88..a28e47ff1b1b 100644
+--- a/net/ceph/osd_client.c
++++ b/net/ceph/osd_client.c
+@@ -2843,8 +2843,8 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
+ 	mutex_lock(&osdc->request_mutex);
+ 	req = __lookup_request(osdc, tid);
+ 	if (!req) {
+-		pr_warn("%s osd%d tid %llu unknown, skipping\n",
+-			__func__, osd->o_osd, tid);
++		dout("%s osd%d tid %llu unknown, skipping\n", __func__,
++		     osd->o_osd, tid);
+ 		m = NULL;
+ 		*skip = 1;
+ 		goto out;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 7f00f2439770..9efbdb3ff78a 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4145,6 +4145,7 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
+ 
+ 		diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
+ 		diffs |= p->vlan_tci ^ skb->vlan_tci;
++		diffs |= skb_metadata_dst_cmp(p, skb);
+ 		if (maclen == ETH_HLEN)
+ 			diffs |= compare_ether_header(skb_mac_header(p),
+ 						      skb_mac_header(skb));
+@@ -4342,10 +4343,12 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
+ 		break;
+ 
+ 	case GRO_MERGED_FREE:
+-		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
++		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
++			skb_dst_drop(skb);
+ 			kmem_cache_free(skbuff_head_cache, skb);
+-		else
++		} else {
+ 			__kfree_skb(skb);
++		}
+ 		break;
+ 
+ 	case GRO_HELD:
+@@ -7125,8 +7128,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+ 	dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
+ 	setup(dev);
+ 
+-	if (!dev->tx_queue_len)
++	if (!dev->tx_queue_len) {
+ 		dev->priv_flags |= IFF_NO_QUEUE;
++		dev->tx_queue_len = 1;
++	}
+ 
+ 	dev->num_tx_queues = txqs;
+ 	dev->real_num_tx_queues = txqs;
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index d79699c9d1b9..12e700332010 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -208,7 +208,6 @@ ip:
+ 	case htons(ETH_P_IPV6): {
+ 		const struct ipv6hdr *iph;
+ 		struct ipv6hdr _iph;
+-		__be32 flow_label;
+ 
+ ipv6:
+ 		iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
+@@ -230,8 +229,12 @@ ipv6:
+ 			key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ 		}
+ 
+-		flow_label = ip6_flowlabel(iph);
+-		if (flow_label) {
++		if ((dissector_uses_key(flow_dissector,
++					FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
++		     (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
++		    ip6_flowlabel(iph)) {
++			__be32 flow_label = ip6_flowlabel(iph);
++
+ 			if (dissector_uses_key(flow_dissector,
+ 					       FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
+ 				key_tags = skb_flow_dissector_target(flow_dissector,
+@@ -396,6 +399,13 @@ ip_proto_again:
+ 				goto out_bad;
+ 			proto = eth->h_proto;
+ 			nhoff += sizeof(*eth);
++
++			/* Cap headers that we access via pointers at the
++			 * end of the Ethernet header as our maximum alignment
++			 * at that point is only 2 bytes.
++			 */
++			if (NET_IP_ALIGN)
++				hlen = nhoff;
+ 		}
+ 
+ 		key_control->flags |= FLOW_DIS_ENCAPSULATION;
+diff --git a/net/core/scm.c b/net/core/scm.c
+index 8a1741b14302..dce0acb929f1 100644
+--- a/net/core/scm.c
++++ b/net/core/scm.c
+@@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
+ 		*fplp = fpl;
+ 		fpl->count = 0;
+ 		fpl->max = SCM_MAX_FD;
++		fpl->user = NULL;
+ 	}
+ 	fpp = &fpl->fp[fpl->count];
+ 
+@@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
+ 		*fpp++ = file;
+ 		fpl->count++;
+ 	}
++
++	if (!fpl->user)
++		fpl->user = get_uid(current_user());
++
+ 	return num;
+ }
+ 
+@@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm)
+ 		scm->fp = NULL;
+ 		for (i=fpl->count-1; i>=0; i--)
+ 			fput(fpl->fp[i]);
++		free_uid(fpl->user);
+ 		kfree(fpl);
+ 	}
+ }
+@@ -336,6 +342,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
+ 		for (i = 0; i < fpl->count; i++)
+ 			get_file(fpl->fp[i]);
+ 		new_fpl->max = new_fpl->count;
++		new_fpl->user = get_uid(fpl->user);
+ 	}
+ 	return new_fpl;
+ }
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index b2df375ec9c2..5bf88f58bee7 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -79,6 +79,8 @@
+ 
+ struct kmem_cache *skbuff_head_cache __read_mostly;
+ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
++int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
++EXPORT_SYMBOL(sysctl_max_skb_frags);
+ 
+ /**
+  *	skb_panic - private function for out-of-line support
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index 95b6139d710c..a6beb7b6ae55 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -26,6 +26,7 @@ static int zero = 0;
+ static int one = 1;
+ static int min_sndbuf = SOCK_MIN_SNDBUF;
+ static int min_rcvbuf = SOCK_MIN_RCVBUF;
++static int max_skb_frags = MAX_SKB_FRAGS;
+ 
+ static int net_msg_warn;	/* Unused, but still a sysctl */
+ 
+@@ -392,6 +393,15 @@ static struct ctl_table net_core_table[] = {
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dointvec
+ 	},
++	{
++		.procname	= "max_skb_frags",
++		.data		= &sysctl_max_skb_frags,
++		.maxlen		= sizeof(int),
++		.mode		= 0644,
++		.proc_handler	= proc_dointvec_minmax,
++		.extra1		= &one,
++		.extra2		= &max_skb_frags,
++	},
+ 	{ }
+ };
+ 
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index 5684e14932bd..902d606324a0 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -824,26 +824,26 @@ lookup:
+ 
+ 	if (sk->sk_state == DCCP_NEW_SYN_RECV) {
+ 		struct request_sock *req = inet_reqsk(sk);
+-		struct sock *nsk = NULL;
++		struct sock *nsk;
+ 
+ 		sk = req->rsk_listener;
+-		if (likely(sk->sk_state == DCCP_LISTEN)) {
+-			nsk = dccp_check_req(sk, skb, req);
+-		} else {
++		if (unlikely(sk->sk_state != DCCP_LISTEN)) {
+ 			inet_csk_reqsk_queue_drop_and_put(sk, req);
+ 			goto lookup;
+ 		}
++		sock_hold(sk);
++		nsk = dccp_check_req(sk, skb, req);
+ 		if (!nsk) {
+ 			reqsk_put(req);
+-			goto discard_it;
++			goto discard_and_relse;
+ 		}
+ 		if (nsk == sk) {
+-			sock_hold(sk);
+ 			reqsk_put(req);
+ 		} else if (dccp_child_process(sk, nsk, skb)) {
+ 			dccp_v4_ctl_send_reset(sk, skb);
+-			goto discard_it;
++			goto discard_and_relse;
+ 		} else {
++			sock_put(sk);
+ 			return 0;
+ 		}
+ 	}
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 9c6d0508e63a..b8608b71a66d 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -691,26 +691,26 @@ lookup:
+ 
+ 	if (sk->sk_state == DCCP_NEW_SYN_RECV) {
+ 		struct request_sock *req = inet_reqsk(sk);
+-		struct sock *nsk = NULL;
++		struct sock *nsk;
+ 
+ 		sk = req->rsk_listener;
+-		if (likely(sk->sk_state == DCCP_LISTEN)) {
+-			nsk = dccp_check_req(sk, skb, req);
+-		} else {
++		if (unlikely(sk->sk_state != DCCP_LISTEN)) {
+ 			inet_csk_reqsk_queue_drop_and_put(sk, req);
+ 			goto lookup;
+ 		}
++		sock_hold(sk);
++		nsk = dccp_check_req(sk, skb, req);
+ 		if (!nsk) {
+ 			reqsk_put(req);
+-			goto discard_it;
++			goto discard_and_relse;
+ 		}
+ 		if (nsk == sk) {
+-			sock_hold(sk);
+ 			reqsk_put(req);
+ 		} else if (dccp_child_process(sk, nsk, skb)) {
+ 			dccp_v6_ctl_send_reset(sk, skb);
+-			goto discard_it;
++			goto discard_and_relse;
+ 		} else {
++			sock_put(sk);
+ 			return 0;
+ 		}
+ 	}
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index cebd9d31e65a..f6303b17546b 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -1847,7 +1847,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
+ 	if (err < 0)
+ 		goto errout;
+ 
+-	err = EINVAL;
++	err = -EINVAL;
+ 	if (!tb[NETCONFA_IFINDEX])
+ 		goto errout;
+ 
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 46b9c887bede..64148914803a 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -789,14 +789,16 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req,
+ 	reqsk_put(req);
+ }
+ 
+-void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
+-			      struct sock *child)
++struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
++				      struct request_sock *req,
++				      struct sock *child)
+ {
+ 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
+ 
+ 	spin_lock(&queue->rskq_lock);
+ 	if (unlikely(sk->sk_state != TCP_LISTEN)) {
+ 		inet_child_forget(sk, req, child);
++		child = NULL;
+ 	} else {
+ 		req->sk = child;
+ 		req->dl_next = NULL;
+@@ -808,6 +810,7 @@ void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
+ 		sk_acceptq_added(sk);
+ 	}
+ 	spin_unlock(&queue->rskq_lock);
++	return child;
+ }
+ EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
+ 
+@@ -817,11 +820,8 @@ struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
+ 	if (own_req) {
+ 		inet_csk_reqsk_queue_drop(sk, req);
+ 		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
+-		inet_csk_reqsk_queue_add(sk, req, child);
+-		/* Warning: caller must not call reqsk_put(req);
+-		 * child stole last reference on it.
+-		 */
+-		return child;
++		if (inet_csk_reqsk_queue_add(sk, req, child))
++			return child;
+ 	}
+ 	/* Too bad, another child took ownership of the request, undo. */
+ 	bh_unlock_sock(child);
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index 1fe55ae81781..b8a0607dab96 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -661,6 +661,7 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
+ 	struct ipq *qp;
+ 
+ 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
++	skb_orphan(skb);
+ 
+ 	/* Lookup (or create) queue header */
+ 	qp = ip_find(net, ip_hdr(skb), user, vif);
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 5f73a7c03e27..a50124260f5a 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -249,6 +249,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
+ 		switch (cmsg->cmsg_type) {
+ 		case IP_RETOPTS:
+ 			err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
++
++			/* Our caller is responsible for freeing ipc->opt */
+ 			err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
+ 					     err < 40 ? err : 40);
+ 			if (err)
+diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
+index 6fb869f646bf..a04dee536b8e 100644
+--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
++++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
+@@ -27,8 +27,6 @@ static int nf_ct_ipv4_gather_frags(struct net *net, struct sk_buff *skb,
+ {
+ 	int err;
+ 
+-	skb_orphan(skb);
+-
+ 	local_bh_disable();
+ 	err = ip_defrag(net, skb, user);
+ 	local_bh_enable();
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index e89094ab5ddb..aa67e0e64b69 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -746,8 +746,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 
+ 	if (msg->msg_controllen) {
+ 		err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
+-		if (err)
++		if (unlikely(err)) {
++			kfree(ipc.opt);
+ 			return err;
++		}
+ 		if (ipc.opt)
+ 			free = 1;
+ 	}
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index bc35f1842512..7113bae4e6a0 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -547,8 +547,10 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 
+ 	if (msg->msg_controllen) {
+ 		err = ip_cmsg_send(net, msg, &ipc, false);
+-		if (err)
++		if (unlikely(err)) {
++			kfree(ipc.opt);
+ 			goto out;
++		}
+ 		if (ipc.opt)
+ 			free = 1;
+ 	}
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 85f184e429c6..02c62299d717 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -129,6 +129,7 @@ static int ip_rt_mtu_expires __read_mostly	= 10 * 60 * HZ;
+ static int ip_rt_min_pmtu __read_mostly		= 512 + 20 + 20;
+ static int ip_rt_min_advmss __read_mostly	= 256;
+ 
++static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
+ /*
+  *	Interface to generic destination cache.
+  */
+@@ -755,7 +756,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
+ 				struct fib_nh *nh = &FIB_RES_NH(res);
+ 
+ 				update_or_create_fnhe(nh, fl4->daddr, new_gw,
+-						      0, 0);
++						0, jiffies + ip_rt_gc_timeout);
+ 			}
+ 			if (kill_route)
+ 				rt->dst.obsolete = DST_OBSOLETE_KILL;
+@@ -1556,6 +1557,36 @@ static void ip_handle_martian_source(struct net_device *dev,
+ #endif
+ }
+ 
++static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
++{
++	struct fnhe_hash_bucket *hash;
++	struct fib_nh_exception *fnhe, __rcu **fnhe_p;
++	u32 hval = fnhe_hashfun(daddr);
++
++	spin_lock_bh(&fnhe_lock);
++
++	hash = rcu_dereference_protected(nh->nh_exceptions,
++					 lockdep_is_held(&fnhe_lock));
++	hash += hval;
++
++	fnhe_p = &hash->chain;
++	fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
++	while (fnhe) {
++		if (fnhe->fnhe_daddr == daddr) {
++			rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
++				fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
++			fnhe_flush_routes(fnhe);
++			kfree_rcu(fnhe, rcu);
++			break;
++		}
++		fnhe_p = &fnhe->fnhe_next;
++		fnhe = rcu_dereference_protected(fnhe->fnhe_next,
++						 lockdep_is_held(&fnhe_lock));
++	}
++
++	spin_unlock_bh(&fnhe_lock);
++}
++
+ /* called in rcu_read_lock() section */
+ static int __mkroute_input(struct sk_buff *skb,
+ 			   const struct fib_result *res,
+@@ -1609,11 +1640,20 @@ static int __mkroute_input(struct sk_buff *skb,
+ 
+ 	fnhe = find_exception(&FIB_RES_NH(*res), daddr);
+ 	if (do_cache) {
+-		if (fnhe)
++		if (fnhe) {
+ 			rth = rcu_dereference(fnhe->fnhe_rth_input);
+-		else
+-			rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
++			if (rth && rth->dst.expires &&
++			    time_after(jiffies, rth->dst.expires)) {
++				ip_del_fnhe(&FIB_RES_NH(*res), daddr);
++				fnhe = NULL;
++			} else {
++				goto rt_cache;
++			}
++		}
++
++		rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
+ 
++rt_cache:
+ 		if (rt_cache_valid(rth)) {
+ 			skb_dst_set_noref(skb, &rth->dst);
+ 			goto out;
+@@ -2014,19 +2054,29 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
+ 		struct fib_nh *nh = &FIB_RES_NH(*res);
+ 
+ 		fnhe = find_exception(nh, fl4->daddr);
+-		if (fnhe)
++		if (fnhe) {
+ 			prth = &fnhe->fnhe_rth_output;
+-		else {
+-			if (unlikely(fl4->flowi4_flags &
+-				     FLOWI_FLAG_KNOWN_NH &&
+-				     !(nh->nh_gw &&
+-				       nh->nh_scope == RT_SCOPE_LINK))) {
+-				do_cache = false;
+-				goto add;
++			rth = rcu_dereference(*prth);
++			if (rth && rth->dst.expires &&
++			    time_after(jiffies, rth->dst.expires)) {
++				ip_del_fnhe(nh, fl4->daddr);
++				fnhe = NULL;
++			} else {
++				goto rt_cache;
+ 			}
+-			prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
+ 		}
++
++		if (unlikely(fl4->flowi4_flags &
++			     FLOWI_FLAG_KNOWN_NH &&
++			     !(nh->nh_gw &&
++			       nh->nh_scope == RT_SCOPE_LINK))) {
++			do_cache = false;
++			goto add;
++		}
++		prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
+ 		rth = rcu_dereference(*prth);
++
++rt_cache:
+ 		if (rt_cache_valid(rth)) {
+ 			dst_hold(&rth->dst);
+ 			return rth;
+@@ -2569,7 +2619,6 @@ void ip_rt_multicast_event(struct in_device *in_dev)
+ }
+ 
+ #ifdef CONFIG_SYSCTL
+-static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
+ static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
+ static int ip_rt_gc_min_interval __read_mostly	= HZ / 2;
+ static int ip_rt_gc_elasticity __read_mostly	= 8;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index c82cca18c90f..036a76ba2ac2 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -279,6 +279,7 @@
+ 
+ #include <asm/uaccess.h>
+ #include <asm/ioctls.h>
++#include <asm/unaligned.h>
+ #include <net/busy_poll.h>
+ 
+ int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
+@@ -938,7 +939,7 @@ new_segment:
+ 
+ 		i = skb_shinfo(skb)->nr_frags;
+ 		can_coalesce = skb_can_coalesce(skb, i, page, offset);
+-		if (!can_coalesce && i >= MAX_SKB_FRAGS) {
++		if (!can_coalesce && i >= sysctl_max_skb_frags) {
+ 			tcp_mark_push(tp, skb);
+ 			goto new_segment;
+ 		}
+@@ -1211,7 +1212,7 @@ new_segment:
+ 
+ 			if (!skb_can_coalesce(skb, i, pfrag->page,
+ 					      pfrag->offset)) {
+-				if (i == MAX_SKB_FRAGS || !sg) {
++				if (i == sysctl_max_skb_frags || !sg) {
+ 					tcp_mark_push(tp, skb);
+ 					goto new_segment;
+ 				}
+@@ -2637,6 +2638,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
+ 	const struct inet_connection_sock *icsk = inet_csk(sk);
+ 	u32 now = tcp_time_stamp;
+ 	unsigned int start;
++	u64 rate64;
+ 	u32 rate;
+ 
+ 	memset(info, 0, sizeof(*info));
+@@ -2702,15 +2704,17 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
+ 	info->tcpi_total_retrans = tp->total_retrans;
+ 
+ 	rate = READ_ONCE(sk->sk_pacing_rate);
+-	info->tcpi_pacing_rate = rate != ~0U ? rate : ~0ULL;
++	rate64 = rate != ~0U ? rate : ~0ULL;
++	put_unaligned(rate64, &info->tcpi_pacing_rate);
+ 
+ 	rate = READ_ONCE(sk->sk_max_pacing_rate);
+-	info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL;
++	rate64 = rate != ~0U ? rate : ~0ULL;
++	put_unaligned(rate64, &info->tcpi_max_pacing_rate);
+ 
+ 	do {
+ 		start = u64_stats_fetch_begin_irq(&tp->syncp);
+-		info->tcpi_bytes_acked = tp->bytes_acked;
+-		info->tcpi_bytes_received = tp->bytes_received;
++		put_unaligned(tp->bytes_acked, &info->tcpi_bytes_acked);
++		put_unaligned(tp->bytes_received, &info->tcpi_bytes_received);
+ 	} while (u64_stats_fetch_retry_irq(&tp->syncp, start));
+ 	info->tcpi_segs_out = tp->segs_out;
+ 	info->tcpi_segs_in = tp->segs_in;
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index d8841a2f1569..8c7e63163e92 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -312,7 +312,7 @@ static void do_redirect(struct sk_buff *skb, struct sock *sk)
+ 
+ 
+ /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
+-void tcp_req_err(struct sock *sk, u32 seq)
++void tcp_req_err(struct sock *sk, u32 seq, bool abort)
+ {
+ 	struct request_sock *req = inet_reqsk(sk);
+ 	struct net *net = sock_net(sk);
+@@ -324,7 +324,7 @@ void tcp_req_err(struct sock *sk, u32 seq)
+ 
+ 	if (seq != tcp_rsk(req)->snt_isn) {
+ 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+-	} else {
++	} else if (abort) {
+ 		/*
+ 		 * Still in SYN_RECV, just remove it silently.
+ 		 * There is no good way to pass the error to the newly
+@@ -384,7 +384,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
+ 	}
+ 	seq = ntohl(th->seq);
+ 	if (sk->sk_state == TCP_NEW_SYN_RECV)
+-		return tcp_req_err(sk, seq);
++		return tcp_req_err(sk, seq,
++				  type == ICMP_PARAMETERPROB ||
++				  type == ICMP_TIME_EXCEEDED ||
++				  (type == ICMP_DEST_UNREACH &&
++				   (code == ICMP_NET_UNREACH ||
++				    code == ICMP_HOST_UNREACH)));
+ 
+ 	bh_lock_sock(sk);
+ 	/* If too many ICMPs get dropped on busy
+@@ -705,7 +710,8 @@ release_sk1:
+    outside socket context is ugly, certainly. What can I do?
+  */
+ 
+-static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
++static void tcp_v4_send_ack(struct net *net,
++			    struct sk_buff *skb, u32 seq, u32 ack,
+ 			    u32 win, u32 tsval, u32 tsecr, int oif,
+ 			    struct tcp_md5sig_key *key,
+ 			    int reply_flags, u8 tos)
+@@ -720,7 +726,6 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
+ 			];
+ 	} rep;
+ 	struct ip_reply_arg arg;
+-	struct net *net = dev_net(skb_dst(skb)->dev);
+ 
+ 	memset(&rep.th, 0, sizeof(struct tcphdr));
+ 	memset(&arg, 0, sizeof(arg));
+@@ -782,7 +787,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
+ 	struct inet_timewait_sock *tw = inet_twsk(sk);
+ 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
+ 
+-	tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
++	tcp_v4_send_ack(sock_net(sk), skb,
++			tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+ 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
+ 			tcp_time_stamp + tcptw->tw_ts_offset,
+ 			tcptw->tw_ts_recent,
+@@ -801,8 +807,10 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
+ 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
+ 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
+ 	 */
+-	tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
+-			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
++	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
++					     tcp_sk(sk)->snd_nxt;
++
++	tcp_v4_send_ack(sock_net(sk), skb, seq,
+ 			tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
+ 			tcp_time_stamp,
+ 			req->ts_recent,
+@@ -1586,28 +1594,30 @@ process:
+ 
+ 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
+ 		struct request_sock *req = inet_reqsk(sk);
+-		struct sock *nsk = NULL;
++		struct sock *nsk;
+ 
+ 		sk = req->rsk_listener;
+-		if (tcp_v4_inbound_md5_hash(sk, skb))
+-			goto discard_and_relse;
+-		if (likely(sk->sk_state == TCP_LISTEN)) {
+-			nsk = tcp_check_req(sk, skb, req, false);
+-		} else {
++		if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
++			reqsk_put(req);
++			goto discard_it;
++		}
++		if (unlikely(sk->sk_state != TCP_LISTEN)) {
+ 			inet_csk_reqsk_queue_drop_and_put(sk, req);
+ 			goto lookup;
+ 		}
++		sock_hold(sk);
++		nsk = tcp_check_req(sk, skb, req, false);
+ 		if (!nsk) {
+ 			reqsk_put(req);
+-			goto discard_it;
++			goto discard_and_relse;
+ 		}
+ 		if (nsk == sk) {
+-			sock_hold(sk);
+ 			reqsk_put(req);
+ 		} else if (tcp_child_process(sk, nsk, skb)) {
+ 			tcp_v4_send_reset(nsk, skb);
+-			goto discard_it;
++			goto discard_and_relse;
+ 		} else {
++			sock_put(sk);
+ 			return 0;
+ 		}
+ 	}
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index c43890848641..7f8ab46adf61 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -966,8 +966,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	if (msg->msg_controllen) {
+ 		err = ip_cmsg_send(sock_net(sk), msg, &ipc,
+ 				   sk->sk_family == AF_INET6);
+-		if (err)
++		if (unlikely(err)) {
++			kfree(ipc.opt);
+ 			return err;
++		}
+ 		if (ipc.opt)
+ 			free = 1;
+ 		connected = 0;
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 1f21087accab..e8d3da0817d3 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -583,7 +583,7 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
+ 	if (err < 0)
+ 		goto errout;
+ 
+-	err = EINVAL;
++	err = -EINVAL;
+ 	if (!tb[NETCONFA_IFINDEX])
+ 		goto errout;
+ 
+@@ -3506,6 +3506,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
+ {
+ 	struct inet6_dev *idev = ifp->idev;
+ 	struct net_device *dev = idev->dev;
++	bool notify = false;
+ 
+ 	addrconf_join_solict(dev, &ifp->addr);
+ 
+@@ -3551,7 +3552,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
+ 			/* Because optimistic nodes can use this address,
+ 			 * notify listeners. If DAD fails, RTM_DELADDR is sent.
+ 			 */
+-			ipv6_ifa_notify(RTM_NEWADDR, ifp);
++			notify = true;
+ 		}
+ 	}
+ 
+@@ -3559,6 +3560,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
+ out:
+ 	spin_unlock(&ifp->lock);
+ 	read_unlock_bh(&idev->lock);
++	if (notify)
++		ipv6_ifa_notify(RTM_NEWADDR, ifp);
+ }
+ 
+ static void addrconf_dad_start(struct inet6_ifaddr *ifp)
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index 517c55b01ba8..428162155280 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -162,6 +162,9 @@ ipv4_connected:
+ 	fl6.fl6_dport = inet->inet_dport;
+ 	fl6.fl6_sport = inet->inet_sport;
+ 
++	if (!fl6.flowi6_oif)
++		fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
++
+ 	if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
+ 		fl6.flowi6_oif = np->mcast_oif;
+ 
+diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
+index 1f9ebe3cbb4a..dc2db4f7b182 100644
+--- a/net/ipv6/ip6_flowlabel.c
++++ b/net/ipv6/ip6_flowlabel.c
+@@ -540,12 +540,13 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
+ 		}
+ 		spin_lock_bh(&ip6_sk_fl_lock);
+ 		for (sflp = &np->ipv6_fl_list;
+-		     (sfl = rcu_dereference(*sflp)) != NULL;
++		     (sfl = rcu_dereference_protected(*sflp,
++						      lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
+ 		     sflp = &sfl->next) {
+ 			if (sfl->fl->label == freq.flr_label) {
+ 				if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
+ 					np->flow_label &= ~IPV6_FLOWLABEL_MASK;
+-				*sflp = rcu_dereference(sfl->next);
++				*sflp = sfl->next;
+ 				spin_unlock_bh(&ip6_sk_fl_lock);
+ 				fl_release(sfl->fl);
+ 				kfree_rcu(sfl, rcu);
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 6473889f1736..31144c486c52 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -909,6 +909,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
+ 	struct rt6_info *rt;
+ #endif
+ 	int err;
++	int flags = 0;
+ 
+ 	/* The correct way to handle this would be to do
+ 	 * ip6_route_get_saddr, and then ip6_route_output; however,
+@@ -940,10 +941,13 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
+ 			dst_release(*dst);
+ 			*dst = NULL;
+ 		}
++
++		if (fl6->flowi6_oif)
++			flags |= RT6_LOOKUP_F_IFACE;
+ 	}
+ 
+ 	if (!*dst)
+-		*dst = ip6_route_output(net, sk, fl6);
++		*dst = ip6_route_output_flags(net, sk, fl6, flags);
+ 
+ 	err = (*dst)->error;
+ 	if (err)
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 826e6aa44f8d..3f164d3aaee2 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1174,11 +1174,10 @@ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table
+ 	return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
+ }
+ 
+-struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
+-				    struct flowi6 *fl6)
++struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
++					 struct flowi6 *fl6, int flags)
+ {
+ 	struct dst_entry *dst;
+-	int flags = 0;
+ 	bool any_src;
+ 
+ 	dst = l3mdev_rt6_dst_by_oif(net, fl6);
+@@ -1199,7 +1198,7 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
+ 
+ 	return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
+ }
+-EXPORT_SYMBOL(ip6_route_output);
++EXPORT_SYMBOL_GPL(ip6_route_output_flags);
+ 
+ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
+ {
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index bd100b47c717..b8d405623f4f 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -328,6 +328,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 	struct tcp_sock *tp;
+ 	__u32 seq, snd_una;
+ 	struct sock *sk;
++	bool fatal;
+ 	int err;
+ 
+ 	sk = __inet6_lookup_established(net, &tcp_hashinfo,
+@@ -346,8 +347,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 		return;
+ 	}
+ 	seq = ntohl(th->seq);
++	fatal = icmpv6_err_convert(type, code, &err);
+ 	if (sk->sk_state == TCP_NEW_SYN_RECV)
+-		return tcp_req_err(sk, seq);
++		return tcp_req_err(sk, seq, fatal);
+ 
+ 	bh_lock_sock(sk);
+ 	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
+@@ -401,7 +403,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 		goto out;
+ 	}
+ 
+-	icmpv6_err_convert(type, code, &err);
+ 
+ 	/* Might be for an request_sock */
+ 	switch (sk->sk_state) {
+@@ -1387,7 +1388,7 @@ process:
+ 
+ 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
+ 		struct request_sock *req = inet_reqsk(sk);
+-		struct sock *nsk = NULL;
++		struct sock *nsk;
+ 
+ 		sk = req->rsk_listener;
+ 		tcp_v6_fill_cb(skb, hdr, th);
+@@ -1395,24 +1396,24 @@ process:
+ 			reqsk_put(req);
+ 			goto discard_it;
+ 		}
+-		if (likely(sk->sk_state == TCP_LISTEN)) {
+-			nsk = tcp_check_req(sk, skb, req, false);
+-		} else {
++		if (unlikely(sk->sk_state != TCP_LISTEN)) {
+ 			inet_csk_reqsk_queue_drop_and_put(sk, req);
+ 			goto lookup;
+ 		}
++		sock_hold(sk);
++		nsk = tcp_check_req(sk, skb, req, false);
+ 		if (!nsk) {
+ 			reqsk_put(req);
+-			goto discard_it;
++			goto discard_and_relse;
+ 		}
+ 		if (nsk == sk) {
+-			sock_hold(sk);
+ 			reqsk_put(req);
+ 			tcp_v6_restore_cb(skb);
+ 		} else if (tcp_child_process(sk, nsk, skb)) {
+ 			tcp_v6_send_reset(nsk, skb);
+-			goto discard_it;
++			goto discard_and_relse;
+ 		} else {
++			sock_put(sk);
+ 			return 0;
+ 		}
+ 	}
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index 435608c4306d..20ab7b2ec463 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -708,6 +708,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
+ 	if (!addr || addr->sa_family != AF_IUCV)
+ 		return -EINVAL;
+ 
++	if (addr_len < sizeof(struct sockaddr_iucv))
++		return -EINVAL;
++
+ 	lock_sock(sk);
+ 	if (sk->sk_state != IUCV_OPEN) {
+ 		err = -EBADFD;
+diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
+index f93c5be612a7..2caaa84ce92d 100644
+--- a/net/l2tp/l2tp_netlink.c
++++ b/net/l2tp/l2tp_netlink.c
+@@ -124,8 +124,13 @@ static int l2tp_tunnel_notify(struct genl_family *family,
+ 	ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
+ 				  NLM_F_ACK, tunnel, cmd);
+ 
+-	if (ret >= 0)
+-		return genlmsg_multicast_allns(family, msg, 0,	0, GFP_ATOMIC);
++	if (ret >= 0) {
++		ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
++		/* We don't care if no one is listening */
++		if (ret == -ESRCH)
++			ret = 0;
++		return ret;
++	}
+ 
+ 	nlmsg_free(msg);
+ 
+@@ -147,8 +152,13 @@ static int l2tp_session_notify(struct genl_family *family,
+ 	ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
+ 				   NLM_F_ACK, session, cmd);
+ 
+-	if (ret >= 0)
+-		return genlmsg_multicast_allns(family, msg, 0,	0, GFP_ATOMIC);
++	if (ret >= 0) {
++		ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
++		/* We don't care if no one is listening */
++		if (ret == -ESRCH)
++			ret = 0;
++		return ret;
++	}
+ 
+ 	nlmsg_free(msg);
+ 
+diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
+index 337bb5d78003..6a12b0f5cac8 100644
+--- a/net/mac80211/ibss.c
++++ b/net/mac80211/ibss.c
+@@ -1732,7 +1732,6 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
+ 		if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
+ 			continue;
+ 		sdata->u.ibss.last_scan_completed = jiffies;
+-		ieee80211_queue_work(&local->hw, &sdata->work);
+ 	}
+ 	mutex_unlock(&local->iflist_mtx);
+ }
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index fa28500f28fd..6f85b6ab8e51 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -1370,17 +1370,6 @@ out:
+ 	sdata_unlock(sdata);
+ }
+ 
+-void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
+-{
+-	struct ieee80211_sub_if_data *sdata;
+-
+-	rcu_read_lock();
+-	list_for_each_entry_rcu(sdata, &local->interfaces, list)
+-		if (ieee80211_vif_is_mesh(&sdata->vif) &&
+-		    ieee80211_sdata_running(sdata))
+-			ieee80211_queue_work(&local->hw, &sdata->work);
+-	rcu_read_unlock();
+-}
+ 
+ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
+ {
+diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
+index a1596344c3ba..4a8019f79fb2 100644
+--- a/net/mac80211/mesh.h
++++ b/net/mac80211/mesh.h
+@@ -362,14 +362,10 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
+ 	return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP;
+ }
+ 
+-void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
+-
+ void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
+ void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata);
+ void ieee80211s_stop(void);
+ #else
+-static inline void
+-ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {}
+ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
+ { return false; }
+ static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 3aa04344942b..83097c3832d1 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -4003,8 +4003,6 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
+ 		if (!ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
+ 			ieee80211_queue_work(&sdata->local->hw,
+ 					     &sdata->u.mgd.monitor_work);
+-		/* and do all the other regular work too */
+-		ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+ 	}
+ }
+ 
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index a413e52f7691..acbe182b75d1 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -314,6 +314,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
+ 	bool was_scanning = local->scanning;
+ 	struct cfg80211_scan_request *scan_req;
+ 	struct ieee80211_sub_if_data *scan_sdata;
++	struct ieee80211_sub_if_data *sdata;
+ 
+ 	lockdep_assert_held(&local->mtx);
+ 
+@@ -373,7 +374,16 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
+ 
+ 	ieee80211_mlme_notify_scan_completed(local);
+ 	ieee80211_ibss_notify_scan_completed(local);
+-	ieee80211_mesh_notify_scan_completed(local);
++
++	/* Requeue all the work that might have been ignored while
++	 * the scan was in progress; if there was none this will
++	 * just be a no-op for the particular interface.
++	 */
++	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
++		if (ieee80211_sdata_running(sdata))
++			ieee80211_queue_work(&sdata->local->hw, &sdata->work);
++	}
++
+ 	if (was_scanning)
+ 		ieee80211_start_next_roc(local);
+ }
+diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
+index 1605691d9414..d933cb89efac 100644
+--- a/net/openvswitch/vport-vxlan.c
++++ b/net/openvswitch/vport-vxlan.c
+@@ -90,7 +90,7 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
+ 	int err;
+ 	struct vxlan_config conf = {
+ 		.no_share = true,
+-		.flags = VXLAN_F_COLLECT_METADATA,
++		.flags = VXLAN_F_COLLECT_METADATA | VXLAN_F_UDP_ZERO_CSUM6_RX,
+ 	};
+ 
+ 	if (!options) {
+diff --git a/net/rfkill/core.c b/net/rfkill/core.c
+index f53bf3b6558b..cf5b69ab1829 100644
+--- a/net/rfkill/core.c
++++ b/net/rfkill/core.c
+@@ -1095,17 +1095,6 @@ static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
+ 	return res;
+ }
+ 
+-static bool rfkill_readable(struct rfkill_data *data)
+-{
+-	bool r;
+-
+-	mutex_lock(&data->mtx);
+-	r = !list_empty(&data->events);
+-	mutex_unlock(&data->mtx);
+-
+-	return r;
+-}
+-
+ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
+ 			       size_t count, loff_t *pos)
+ {
+@@ -1122,8 +1111,11 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
+ 			goto out;
+ 		}
+ 		mutex_unlock(&data->mtx);
++		/* since we re-check and it just compares pointers,
++		 * using !list_empty() without locking isn't a problem
++		 */
+ 		ret = wait_event_interruptible(data->read_wait,
+-					       rfkill_readable(data));
++					       !list_empty(&data->events));
+ 		mutex_lock(&data->mtx);
+ 
+ 		if (ret)
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index b5c2cf2aa6d4..af1acf009866 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1852,6 +1852,7 @@ reset:
+ 	}
+ 
+ 	tp = old_tp;
++	protocol = tc_skb_protocol(skb);
+ 	goto reclassify;
+ #endif
+ }
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 3d9ea9a48289..8b4ff315695e 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -60,6 +60,8 @@
+ #include <net/inet_common.h>
+ #include <net/inet_ecn.h>
+ 
++#define MAX_SCTP_PORT_HASH_ENTRIES (64 * 1024)
++
+ /* Global data structures. */
+ struct sctp_globals sctp_globals __read_mostly;
+ 
+@@ -1352,6 +1354,8 @@ static __init int sctp_init(void)
+ 	unsigned long limit;
+ 	int max_share;
+ 	int order;
++	int num_entries;
++	int max_entry_order;
+ 
+ 	sock_skb_cb_check_size(sizeof(struct sctp_ulpevent));
+ 
+@@ -1404,14 +1408,24 @@ static __init int sctp_init(void)
+ 
+ 	/* Size and allocate the association hash table.
+ 	 * The methodology is similar to that of the tcp hash tables.
++	 * Though not identical.  Start by getting a goal size
+ 	 */
+ 	if (totalram_pages >= (128 * 1024))
+ 		goal = totalram_pages >> (22 - PAGE_SHIFT);
+ 	else
+ 		goal = totalram_pages >> (24 - PAGE_SHIFT);
+ 
+-	for (order = 0; (1UL << order) < goal; order++)
+-		;
++	/* Then compute the page order for said goal */
++	order = get_order(goal);
++
++	/* Now compute the required page order for the maximum sized table we
++	 * want to create
++	 */
++	max_entry_order = get_order(MAX_SCTP_PORT_HASH_ENTRIES *
++				    sizeof(struct sctp_bind_hashbucket));
++
++	/* Limit the page order by that maximum hash table size */
++	order = min(order, max_entry_order);
+ 
+ 	do {
+ 		sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE /
+@@ -1445,20 +1459,35 @@ static __init int sctp_init(void)
+ 		INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
+ 	}
+ 
+-	/* Allocate and initialize the SCTP port hash table.  */
++	/* Allocate and initialize the SCTP port hash table.
++	 * Note that order is initalized to start at the max sized
++	 * table we want to support.  If we can't get that many pages
++	 * reduce the order and try again
++	 */
+ 	do {
+-		sctp_port_hashsize = (1UL << order) * PAGE_SIZE /
+-					sizeof(struct sctp_bind_hashbucket);
+-		if ((sctp_port_hashsize > (64 * 1024)) && order > 0)
+-			continue;
+ 		sctp_port_hashtable = (struct sctp_bind_hashbucket *)
+ 			__get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order);
+ 	} while (!sctp_port_hashtable && --order > 0);
++
+ 	if (!sctp_port_hashtable) {
+ 		pr_err("Failed bind hash alloc\n");
+ 		status = -ENOMEM;
+ 		goto err_bhash_alloc;
+ 	}
++
++	/* Now compute the number of entries that will fit in the
++	 * port hash space we allocated
++	 */
++	num_entries = (1UL << order) * PAGE_SIZE /
++		      sizeof(struct sctp_bind_hashbucket);
++
++	/* And finish by rounding it down to the nearest power of two
++	 * this wastes some memory of course, but its needed because
++	 * the hash function operates based on the assumption that
++	 * that the number of entries is a power of two
++	 */
++	sctp_port_hashsize = rounddown_pow_of_two(num_entries);
++
+ 	for (i = 0; i < sctp_port_hashsize; i++) {
+ 		spin_lock_init(&sctp_port_hashtable[i].lock);
+ 		INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index ef1d90fdc773..be1489fc3234 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -5542,6 +5542,7 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
+ 	struct sctp_hmac_algo_param *hmacs;
+ 	__u16 data_len = 0;
+ 	u32 num_idents;
++	int i;
+ 
+ 	if (!ep->auth_enable)
+ 		return -EACCES;
+@@ -5559,8 +5560,12 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
+ 		return -EFAULT;
+ 	if (put_user(num_idents, &p->shmac_num_idents))
+ 		return -EFAULT;
+-	if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
+-		return -EFAULT;
++	for (i = 0; i < num_idents; i++) {
++		__u16 hmacid = ntohs(hmacs->hmac_ids[i]);
++
++		if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
++			return -EFAULT;
++	}
+ 	return 0;
+ }
+ 
+@@ -6640,6 +6645,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
+ 
+ 			if (cmsgs->srinfo->sinfo_flags &
+ 			    ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
++			      SCTP_SACK_IMMEDIATELY |
+ 			      SCTP_ABORT | SCTP_EOF))
+ 				return -EINVAL;
+ 			break;
+@@ -6663,6 +6669,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
+ 
+ 			if (cmsgs->sinfo->snd_flags &
+ 			    ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
++			      SCTP_SACK_IMMEDIATELY |
+ 			      SCTP_ABORT | SCTP_EOF))
+ 				return -EINVAL;
+ 			break;
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index 5e4f815c2b34..21e20353178e 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -1225,7 +1225,7 @@ int qword_get(char **bpp, char *dest, int bufsize)
+ 	if (bp[0] == '\\' && bp[1] == 'x') {
+ 		/* HEX STRING */
+ 		bp += 2;
+-		while (len < bufsize) {
++		while (len < bufsize - 1) {
+ 			int h, l;
+ 
+ 			h = hex_to_bin(bp[0]);
+diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
+index f34e535e93bd..d5d7132ac847 100644
+--- a/net/switchdev/switchdev.c
++++ b/net/switchdev/switchdev.c
+@@ -20,6 +20,7 @@
+ #include <linux/list.h>
+ #include <linux/workqueue.h>
+ #include <linux/if_vlan.h>
++#include <linux/rtnetlink.h>
+ #include <net/ip_fib.h>
+ #include <net/switchdev.h>
+ 
+@@ -565,7 +566,6 @@ int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
+ }
+ EXPORT_SYMBOL_GPL(switchdev_port_obj_dump);
+ 
+-static DEFINE_MUTEX(switchdev_mutex);
+ static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
+ 
+ /**
+@@ -580,9 +580,9 @@ int register_switchdev_notifier(struct notifier_block *nb)
+ {
+ 	int err;
+ 
+-	mutex_lock(&switchdev_mutex);
++	rtnl_lock();
+ 	err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
+-	mutex_unlock(&switchdev_mutex);
++	rtnl_unlock();
+ 	return err;
+ }
+ EXPORT_SYMBOL_GPL(register_switchdev_notifier);
+@@ -598,9 +598,9 @@ int unregister_switchdev_notifier(struct notifier_block *nb)
+ {
+ 	int err;
+ 
+-	mutex_lock(&switchdev_mutex);
++	rtnl_lock();
+ 	err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
+-	mutex_unlock(&switchdev_mutex);
++	rtnl_unlock();
+ 	return err;
+ }
+ EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
+@@ -614,16 +614,17 @@ EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
+  *	Call all network notifier blocks. This should be called by driver
+  *	when it needs to propagate hardware event.
+  *	Return values are same as for atomic_notifier_call_chain().
++ *	rtnl_lock must be held.
+  */
+ int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
+ 			     struct switchdev_notifier_info *info)
+ {
+ 	int err;
+ 
++	ASSERT_RTNL();
++
+ 	info->dev = dev;
+-	mutex_lock(&switchdev_mutex);
+ 	err = raw_notifier_call_chain(&switchdev_notif_chain, val, info);
+-	mutex_unlock(&switchdev_mutex);
+ 	return err;
+ }
+ EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
+diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
+index 9dc239dfe192..92e367a0a5ce 100644
+--- a/net/tipc/bcast.c
++++ b/net/tipc/bcast.c
+@@ -399,8 +399,10 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
+ 
+ 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
+ 			  NLM_F_MULTI, TIPC_NL_LINK_GET);
+-	if (!hdr)
++	if (!hdr) {
++		tipc_bcast_unlock(net);
+ 		return -EMSGSIZE;
++	}
+ 
+ 	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
+ 	if (!attrs)
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index 20cddec0a43c..3926b561f873 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -168,12 +168,6 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
+ 	skb_queue_head_init(&n_ptr->bc_entry.inputq1);
+ 	__skb_queue_head_init(&n_ptr->bc_entry.arrvq);
+ 	skb_queue_head_init(&n_ptr->bc_entry.inputq2);
+-	hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
+-	list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+-		if (n_ptr->addr < temp_node->addr)
+-			break;
+-	}
+-	list_add_tail_rcu(&n_ptr->list, &temp_node->list);
+ 	n_ptr->state = SELF_DOWN_PEER_LEAVING;
+ 	n_ptr->signature = INVALID_NODE_SIG;
+ 	n_ptr->active_links[0] = INVALID_BEARER_ID;
+@@ -193,6 +187,12 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
+ 	tipc_node_get(n_ptr);
+ 	setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr);
+ 	n_ptr->keepalive_intv = U32_MAX;
++	hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
++	list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
++		if (n_ptr->addr < temp_node->addr)
++			break;
++	}
++	list_add_tail_rcu(&n_ptr->list, &temp_node->list);
+ exit:
+ 	spin_unlock_bh(&tn->node_list_lock);
+ 	return n_ptr;
+diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
+index 350cca33ee0a..69ee2eeef968 100644
+--- a/net/tipc/subscr.c
++++ b/net/tipc/subscr.c
+@@ -289,15 +289,14 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid,
+ 				struct sockaddr_tipc *addr, void *usr_data,
+ 				void *buf, size_t len)
+ {
+-	struct tipc_subscriber *subscriber = usr_data;
++	struct tipc_subscriber *subscrb = usr_data;
+ 	struct tipc_subscription *sub = NULL;
+ 	struct tipc_net *tn = net_generic(net, tipc_net_id);
+ 
+-	tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscriber, &sub);
+-	if (sub)
+-		tipc_nametbl_subscribe(sub);
+-	else
+-		tipc_conn_terminate(tn->topsrv, subscriber->conid);
++	if (tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscrb, &sub))
++		return tipc_conn_terminate(tn->topsrv, subscrb->conid);
++
++	tipc_nametbl_subscribe(sub);
+ }
+ 
+ /* Handle one request to establish a new subscriber */
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index e3f85bc8b135..898a53a562b8 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1496,7 +1496,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+ 	UNIXCB(skb).fp = NULL;
+ 
+ 	for (i = scm->fp->count-1; i >= 0; i--)
+-		unix_notinflight(scm->fp->fp[i]);
++		unix_notinflight(scm->fp->user, scm->fp->fp[i]);
+ }
+ 
+ static void unix_destruct_scm(struct sk_buff *skb)
+@@ -1561,7 +1561,7 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+ 		return -ENOMEM;
+ 
+ 	for (i = scm->fp->count - 1; i >= 0; i--)
+-		unix_inflight(scm->fp->fp[i]);
++		unix_inflight(scm->fp->user, scm->fp->fp[i]);
+ 	return max_level;
+ }
+ 
+@@ -1781,7 +1781,12 @@ restart_locked:
+ 			goto out_unlock;
+ 	}
+ 
+-	if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
++	/* other == sk && unix_peer(other) != sk if
++	 * - unix_peer(sk) == NULL, destination address bound to sk
++	 * - unix_peer(sk) == sk by time of get but disconnected before lock
++	 */
++	if (other != sk &&
++	    unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
+ 		if (timeo) {
+ 			timeo = unix_wait_for_peer(other, timeo);
+ 
+@@ -2270,13 +2275,15 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
+ 	size_t size = state->size;
+ 	unsigned int last_len;
+ 
+-	err = -EINVAL;
+-	if (sk->sk_state != TCP_ESTABLISHED)
++	if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
++		err = -EINVAL;
+ 		goto out;
++	}
+ 
+-	err = -EOPNOTSUPP;
+-	if (flags & MSG_OOB)
++	if (unlikely(flags & MSG_OOB)) {
++		err = -EOPNOTSUPP;
+ 		goto out;
++	}
+ 
+ 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
+ 	timeo = sock_rcvtimeo(sk, noblock);
+@@ -2322,9 +2329,11 @@ again:
+ 				goto unlock;
+ 
+ 			unix_state_unlock(sk);
+-			err = -EAGAIN;
+-			if (!timeo)
++			if (!timeo) {
++				err = -EAGAIN;
+ 				break;
++			}
++
+ 			mutex_unlock(&u->readlock);
+ 
+ 			timeo = unix_stream_data_wait(sk, timeo, last,
+@@ -2332,6 +2341,7 @@ again:
+ 
+ 			if (signal_pending(current)) {
+ 				err = sock_intr_errno(timeo);
++				scm_destroy(&scm);
+ 				goto out;
+ 			}
+ 
+diff --git a/net/unix/diag.c b/net/unix/diag.c
+index c512f64d5287..4d9679701a6d 100644
+--- a/net/unix/diag.c
++++ b/net/unix/diag.c
+@@ -220,7 +220,7 @@ done:
+ 	return skb->len;
+ }
+ 
+-static struct sock *unix_lookup_by_ino(int ino)
++static struct sock *unix_lookup_by_ino(unsigned int ino)
+ {
+ 	int i;
+ 	struct sock *sk;
+diff --git a/net/unix/garbage.c b/net/unix/garbage.c
+index 8fcdc2283af5..6a0d48525fcf 100644
+--- a/net/unix/garbage.c
++++ b/net/unix/garbage.c
+@@ -116,7 +116,7 @@ struct sock *unix_get_socket(struct file *filp)
+  * descriptor if it is for an AF_UNIX socket.
+  */
+ 
+-void unix_inflight(struct file *fp)
++void unix_inflight(struct user_struct *user, struct file *fp)
+ {
+ 	struct sock *s = unix_get_socket(fp);
+ 
+@@ -133,11 +133,11 @@ void unix_inflight(struct file *fp)
+ 		}
+ 		unix_tot_inflight++;
+ 	}
+-	fp->f_cred->user->unix_inflight++;
++	user->unix_inflight++;
+ 	spin_unlock(&unix_gc_lock);
+ }
+ 
+-void unix_notinflight(struct file *fp)
++void unix_notinflight(struct user_struct *user, struct file *fp)
+ {
+ 	struct sock *s = unix_get_socket(fp);
+ 
+@@ -152,7 +152,7 @@ void unix_notinflight(struct file *fp)
+ 			list_del_init(&u->link);
+ 		unix_tot_inflight--;
+ 	}
+-	fp->f_cred->user->unix_inflight--;
++	user->unix_inflight--;
+ 	spin_unlock(&unix_gc_lock);
+ }
+ 
+diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
+index dacf71a43ad4..ba6c34ea5429 100755
+--- a/scripts/link-vmlinux.sh
++++ b/scripts/link-vmlinux.sh
+@@ -62,7 +62,7 @@ vmlinux_link()
+ 			-Wl,--start-group                                    \
+ 				 ${KBUILD_VMLINUX_MAIN}                      \
+ 			-Wl,--end-group                                      \
+-			-lutil -lrt ${1}
++			-lutil -lrt -lpthread ${1}
+ 		rm -f linux
+ 	fi
+ }
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index ff81026f6ddb..7c57c7fcf5a2 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -398,12 +398,10 @@ static int smk_copy_relabel(struct list_head *nhead, struct list_head *ohead,
+  */
+ static inline unsigned int smk_ptrace_mode(unsigned int mode)
+ {
+-	switch (mode) {
+-	case PTRACE_MODE_READ:
+-		return MAY_READ;
+-	case PTRACE_MODE_ATTACH:
++	if (mode & PTRACE_MODE_ATTACH)
+ 		return MAY_READWRITE;
+-	}
++	if (mode & PTRACE_MODE_READ)
++		return MAY_READ;
+ 
+ 	return 0;
+ }
+diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
+index d3c19c970a06..cb6ed10816d4 100644
+--- a/security/yama/yama_lsm.c
++++ b/security/yama/yama_lsm.c
+@@ -281,7 +281,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
+ 	int rc = 0;
+ 
+ 	/* require ptrace target be a child of ptracer on attach */
+-	if (mode == PTRACE_MODE_ATTACH) {
++	if (mode & PTRACE_MODE_ATTACH) {
+ 		switch (ptrace_scope) {
+ 		case YAMA_SCOPE_DISABLED:
+ 			/* No additional restrictions. */
+@@ -307,7 +307,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
+ 		}
+ 	}
+ 
+-	if (rc) {
++	if (rc && (mode & PTRACE_MODE_NOAUDIT) == 0) {
+ 		printk_ratelimited(KERN_NOTICE
+ 			"ptrace of pid %d was attempted by: %s (pid %d)\n",
+ 			child->pid, current->comm, current->pid);
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 2c13298e80b7..2ff692dd2c5f 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -357,7 +357,10 @@ enum {
+ 					((pci)->device == 0x0d0c) || \
+ 					((pci)->device == 0x160c))
+ 
+-#define IS_BROXTON(pci)	((pci)->device == 0x5a98)
++#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
++#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
++#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
++#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
+ 
+ static char *driver_short_names[] = {
+ 	[AZX_DRIVER_ICH] = "HDA Intel",
+@@ -534,13 +537,13 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
+ 
+ 	if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
+ 		snd_hdac_set_codec_wakeup(bus, true);
+-	if (IS_BROXTON(pci)) {
++	if (IS_SKL_PLUS(pci)) {
+ 		pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
+ 		val = val & ~INTEL_HDA_CGCTL_MISCBDCGE;
+ 		pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
+ 	}
+ 	azx_init_chip(chip, full_reset);
+-	if (IS_BROXTON(pci)) {
++	if (IS_SKL_PLUS(pci)) {
+ 		pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
+ 		val = val | INTEL_HDA_CGCTL_MISCBDCGE;
+ 		pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
+@@ -549,7 +552,7 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
+ 		snd_hdac_set_codec_wakeup(bus, false);
+ 
+ 	/* reduce dma latency to avoid noise */
+-	if (IS_BROXTON(pci))
++	if (IS_BXT(pci))
+ 		bxt_reduce_dma_latency(chip);
+ }
+ 
+@@ -971,11 +974,6 @@ static int azx_resume(struct device *dev)
+ /* put codec down to D3 at hibernation for Intel SKL+;
+  * otherwise BIOS may still access the codec and screw up the driver
+  */
+-#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
+-#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
+-#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
+-#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
+-
+ static int azx_freeze_noirq(struct device *dev)
+ {
+ 	struct pci_dev *pci = to_pci_dev(dev);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index efd4980cffb8..72fa58dd7723 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4749,6 +4749,7 @@ enum {
+ 	ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
+ 	ALC293_FIXUP_LENOVO_SPK_NOISE,
+ 	ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
++	ALC255_FIXUP_DELL_SPK_NOISE,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -5368,6 +5369,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc233_fixup_lenovo_line2_mic_hotkey,
+ 	},
++	[ALC255_FIXUP_DELL_SPK_NOISE] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_disable_aamix,
++		.chained = true,
++		.chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -5410,6 +5417,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
+ 	SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
+ 	SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
++	SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
+ 	SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
+index 96234b638249..5d51d6ff08e6 100644
+--- a/tools/hv/hv_vss_daemon.c
++++ b/tools/hv/hv_vss_daemon.c
+@@ -254,7 +254,7 @@ int main(int argc, char *argv[])
+ 			syslog(LOG_ERR, "Illegal op:%d\n", op);
+ 		}
+ 		vss_msg->error = error;
+-		len = write(vss_fd, &error, sizeof(struct hv_vss_msg));
++		len = write(vss_fd, vss_msg, sizeof(struct hv_vss_msg));
+ 		if (len != sizeof(struct hv_vss_msg)) {
+ 			syslog(LOG_ERR, "write failed; error: %d %s", errno,
+ 			       strerror(errno));
+diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
+index 2d9d8306dbd3..4a3a72cb5805 100644
+--- a/tools/perf/util/stat.c
++++ b/tools/perf/util/stat.c
+@@ -310,7 +310,6 @@ int perf_stat_process_counter(struct perf_stat_config *config,
+ 	int i, ret;
+ 
+ 	aggr->val = aggr->ena = aggr->run = 0;
+-	init_stats(ps->res_stats);
+ 
+ 	if (counter->per_pkg)
+ 		zero_per_pkg(counter);
+diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
+index 77edcdcc016b..057278448515 100755
+--- a/tools/testing/selftests/efivarfs/efivarfs.sh
++++ b/tools/testing/selftests/efivarfs/efivarfs.sh
+@@ -88,7 +88,11 @@ test_delete()
+ 		exit 1
+ 	fi
+ 
+-	rm $file
++	rm $file 2>/dev/null
++	if [ $? -ne 0 ]; then
++		chattr -i $file
++		rm $file
++	fi
+ 
+ 	if [ -e $file ]; then
+ 		echo "$file couldn't be deleted" >&2
+@@ -111,6 +115,7 @@ test_zero_size_delete()
+ 		exit 1
+ 	fi
+ 
++	chattr -i $file
+ 	printf "$attrs" > $file
+ 
+ 	if [ -e $file ]; then
+@@ -141,7 +146,11 @@ test_valid_filenames()
+ 			echo "$file could not be created" >&2
+ 			ret=1
+ 		else
+-			rm $file
++			rm $file 2>/dev/null
++			if [ $? -ne 0 ]; then
++				chattr -i $file
++				rm $file
++			fi
+ 		fi
+ 	done
+ 
+@@ -174,7 +183,11 @@ test_invalid_filenames()
+ 
+ 		if [ -e $file ]; then
+ 			echo "Creating $file should have failed" >&2
+-			rm $file
++			rm $file 2>/dev/null
++			if [ $? -ne 0 ]; then
++				chattr -i $file
++				rm $file
++			fi
+ 			ret=1
+ 		fi
+ 	done
+diff --git a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c
+index 8c0764407b3c..4af74f733036 100644
+--- a/tools/testing/selftests/efivarfs/open-unlink.c
++++ b/tools/testing/selftests/efivarfs/open-unlink.c
+@@ -1,10 +1,68 @@
++#include <errno.h>
+ #include <stdio.h>
+ #include <stdint.h>
+ #include <stdlib.h>
+ #include <unistd.h>
++#include <sys/ioctl.h>
+ #include <sys/types.h>
+ #include <sys/stat.h>
+ #include <fcntl.h>
++#include <linux/fs.h>
++
++static int set_immutable(const char *path, int immutable)
++{
++	unsigned int flags;
++	int fd;
++	int rc;
++	int error;
++
++	fd = open(path, O_RDONLY);
++	if (fd < 0)
++		return fd;
++
++	rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
++	if (rc < 0) {
++		error = errno;
++		close(fd);
++		errno = error;
++		return rc;
++	}
++
++	if (immutable)
++		flags |= FS_IMMUTABLE_FL;
++	else
++		flags &= ~FS_IMMUTABLE_FL;
++
++	rc = ioctl(fd, FS_IOC_SETFLAGS, &flags);
++	error = errno;
++	close(fd);
++	errno = error;
++	return rc;
++}
++
++static int get_immutable(const char *path)
++{
++	unsigned int flags;
++	int fd;
++	int rc;
++	int error;
++
++	fd = open(path, O_RDONLY);
++	if (fd < 0)
++		return fd;
++
++	rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
++	if (rc < 0) {
++		error = errno;
++		close(fd);
++		errno = error;
++		return rc;
++	}
++	close(fd);
++	if (flags & FS_IMMUTABLE_FL)
++		return 1;
++	return 0;
++}
+ 
+ int main(int argc, char **argv)
+ {
+@@ -27,7 +85,7 @@ int main(int argc, char **argv)
+ 	buf[4] = 0;
+ 
+ 	/* create a test variable */
+-	fd = open(path, O_WRONLY | O_CREAT);
++	fd = open(path, O_WRONLY | O_CREAT, 0600);
+ 	if (fd < 0) {
+ 		perror("open(O_WRONLY)");
+ 		return EXIT_FAILURE;
+@@ -41,6 +99,18 @@ int main(int argc, char **argv)
+ 
+ 	close(fd);
+ 
++	rc = get_immutable(path);
++	if (rc < 0) {
++		perror("ioctl(FS_IOC_GETFLAGS)");
++		return EXIT_FAILURE;
++	} else if (rc) {
++		rc = set_immutable(path, 0);
++		if (rc < 0) {
++			perror("ioctl(FS_IOC_SETFLAGS)");
++			return EXIT_FAILURE;
++		}
++	}
++
+ 	fd = open(path, O_RDONLY);
+ 	if (fd < 0) {
+ 		perror("open");
+diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
+index 7a2f449bd85d..5d10f104f3eb 100644
+--- a/virt/kvm/arm/vgic.c
++++ b/virt/kvm/arm/vgic.c
+@@ -1875,8 +1875,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
+ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
+ {
+ 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+-
+-	int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
++	int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
++	int sz = nr_longs * sizeof(unsigned long);
+ 	vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
+ 	vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
+ 	vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
+diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
+index 77d42be6970e..4f70d12e392d 100644
+--- a/virt/kvm/async_pf.c
++++ b/virt/kvm/async_pf.c
+@@ -173,7 +173,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
+ 	 * do alloc nowait since if we are going to sleep anyway we
+ 	 * may as well sleep faulting in page
+ 	 */
+-	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
++	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
+ 	if (!work)
+ 		return 0;
+ 


             reply	other threads:[~2016-03-04 11:15 UTC|newest]

Thread overview: 355+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-03-04 11:15 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2022-02-03 11:46 [gentoo-commits] proj/linux-patches:4.4 commit in: / Mike Pagano
2022-01-29 17:47 Mike Pagano
2022-01-27 11:42 Mike Pagano
2022-01-11 12:57 Mike Pagano
2022-01-05 12:57 Mike Pagano
2021-12-29 13:13 Mike Pagano
2021-12-22 14:09 Mike Pagano
2021-12-14 10:38 Mike Pagano
2021-12-08 12:58 Mike Pagano
2021-11-26 12:02 Mike Pagano
2021-11-12 13:39 Mike Pagano
2021-11-02 17:07 Mike Pagano
2021-10-27 12:01 Mike Pagano
2021-10-17 13:15 Mike Pagano
2021-10-09 21:36 Mike Pagano
2021-10-07 10:37 Mike Pagano
2021-10-06 11:33 Mike Pagano
2021-09-26 14:16 Mike Pagano
2021-09-22 11:43 Mike Pagano
2021-09-20 22:07 Mike Pagano
2021-09-03 11:26 Mike Pagano
2021-08-26 14:02 Mike Pagano
2021-08-25 23:20 Mike Pagano
2021-08-15 20:12 Mike Pagano
2021-08-10 16:22 Mike Pagano
2021-08-08 13:47 Mike Pagano
2021-08-04 11:56 Mike Pagano
2021-08-03 12:51 Mike Pagano
2021-07-28 12:39 Mike Pagano
2021-07-20 15:17 Alice Ferrazzi
2021-07-11 14:48 Mike Pagano
2021-06-30 14:29 Mike Pagano
2021-06-17 11:05 Alice Ferrazzi
2021-06-10 11:09 Mike Pagano
2021-06-03 10:43 Alice Ferrazzi
2021-05-26 11:59 Mike Pagano
2021-05-22 10:00 Mike Pagano
2021-04-28 11:08 Alice Ferrazzi
2021-04-16 11:20 Alice Ferrazzi
2021-04-10 13:21 Mike Pagano
2021-04-07 12:10 Mike Pagano
2021-03-30 14:13 Mike Pagano
2021-03-24 12:06 Mike Pagano
2021-03-17 15:39 Mike Pagano
2021-03-11 13:34 Mike Pagano
2021-03-07 15:12 Mike Pagano
2021-03-03 16:34 Alice Ferrazzi
2021-02-23 13:46 Mike Pagano
2021-02-10 10:17 Alice Ferrazzi
2021-02-05 14:57 Alice Ferrazzi
2021-02-03 23:23 Mike Pagano
2021-01-30 13:11 Alice Ferrazzi
2021-01-23 16:33 Mike Pagano
2021-01-17 16:23 Mike Pagano
2021-01-12 20:08 Mike Pagano
2021-01-09 12:53 Mike Pagano
2020-12-29 14:16 Mike Pagano
2020-12-11 12:54 Mike Pagano
2020-12-02 12:17 Mike Pagano
2020-11-24 13:29 Mike Pagano
2020-11-22 19:08 Mike Pagano
2020-11-18 19:21 Mike Pagano
2020-11-11 15:27 Mike Pagano
2020-11-10 13:53 Mike Pagano
2020-10-29 11:14 Mike Pagano
2020-10-17 10:13 Mike Pagano
2020-10-14 20:30 Mike Pagano
2020-10-01 11:41 Mike Pagano
2020-10-01 11:24 Mike Pagano
2020-09-24 16:04 Mike Pagano
2020-09-23 11:51 Mike Pagano
2020-09-23 11:50 Mike Pagano
2020-09-12 17:08 Mike Pagano
2020-09-03 11:32 Mike Pagano
2020-08-26 11:12 Mike Pagano
2020-08-21 11:11 Alice Ferrazzi
2020-07-31 16:10 Mike Pagano
2020-07-22 12:24 Mike Pagano
2020-07-09 12:05 Mike Pagano
2020-07-01 12:09 Mike Pagano
2020-06-22 14:43 Mike Pagano
2020-06-11 11:25 Mike Pagano
2020-06-03 11:35 Mike Pagano
2020-05-27 15:26 Mike Pagano
2020-05-20 11:20 Mike Pagano
2020-05-13 13:01 Mike Pagano
2020-05-11 22:52 Mike Pagano
2020-05-05 17:37 Mike Pagano
2020-05-02 19:20 Mike Pagano
2020-04-24 11:59 Mike Pagano
2020-04-15 18:24 Mike Pagano
2020-04-13 11:14 Mike Pagano
2020-04-02 18:55 Mike Pagano
2020-03-20 11:53 Mike Pagano
2020-03-20 11:51 Mike Pagano
2020-03-20 11:49 Mike Pagano
2020-03-11 10:14 Mike Pagano
2020-02-28 15:24 Mike Pagano
2020-02-14 23:34 Mike Pagano
2020-02-05 14:47 Mike Pagano
2020-01-29 12:36 Mike Pagano
2020-01-23 11:00 Mike Pagano
2020-01-14 22:24 Mike Pagano
2020-01-12 14:48 Mike Pagano
2020-01-04 16:46 Mike Pagano
2019-12-21 14:51 Mike Pagano
2019-12-05 14:47 Alice Ferrazzi
2019-11-29 21:41 Thomas Deutschmann
2019-11-28 23:49 Mike Pagano
2019-11-25 16:25 Mike Pagano
2019-11-16 10:54 Mike Pagano
2019-11-12 20:57 Mike Pagano
2019-11-10 16:13 Mike Pagano
2019-11-06 14:22 Mike Pagano
2019-10-29 10:08 Mike Pagano
2019-10-17 22:18 Mike Pagano
2019-10-07 21:03 Mike Pagano
2019-10-05 20:43 Mike Pagano
2019-09-21 15:56 Mike Pagano
2019-09-20 15:50 Mike Pagano
2019-09-16 12:21 Mike Pagano
2019-09-10 11:10 Mike Pagano
2019-09-06 17:17 Mike Pagano
2019-08-25 17:33 Mike Pagano
2019-08-11 10:58 Mike Pagano
2019-08-06 19:14 Mike Pagano
2019-08-04 16:03 Mike Pagano
2019-07-21 14:36 Mike Pagano
2019-07-10 11:01 Mike Pagano
2019-06-27 11:11 Mike Pagano
2019-06-22 19:01 Mike Pagano
2019-06-17 19:18 Mike Pagano
2019-06-11 17:30 Mike Pagano
2019-06-11 12:38 Mike Pagano
2019-05-16 23:01 Mike Pagano
2019-04-27 17:28 Mike Pagano
2019-04-03 10:49 Mike Pagano
2019-04-03 10:49 Mike Pagano
2019-03-23 14:17 Mike Pagano
2019-02-23 14:40 Mike Pagano
2019-02-20 11:14 Mike Pagano
2019-02-15 23:38 Mike Pagano
2019-02-15 23:35 Mike Pagano
2019-02-08 15:21 Mike Pagano
2019-02-06 20:51 Mike Pagano
2019-02-06  0:05 Mike Pagano
2019-01-26 14:59 Mike Pagano
2019-01-16 23:27 Mike Pagano
2019-01-13 19:46 Mike Pagano
2019-01-13 19:24 Mike Pagano
2018-12-29 22:56 Mike Pagano
2018-12-21 14:40 Mike Pagano
2018-12-17 21:56 Mike Pagano
2018-12-13 11:35 Mike Pagano
2018-12-01 18:35 Mike Pagano
2018-12-01 15:02 Mike Pagano
2018-11-27 16:59 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 12:18 Mike Pagano
2018-11-10 21:27 Mike Pagano
2018-10-20 12:33 Mike Pagano
2018-10-13 16:35 Mike Pagano
2018-10-10 11:20 Mike Pagano
2018-09-29 13:32 Mike Pagano
2018-09-26 10:44 Mike Pagano
2018-09-19 22:37 Mike Pagano
2018-09-15 10:09 Mike Pagano
2018-09-09 23:26 Mike Pagano
2018-09-05 15:21 Mike Pagano
2018-08-28 22:32 Mike Pagano
2018-08-24 11:41 Mike Pagano
2018-08-22 10:08 Alice Ferrazzi
2018-08-18 18:06 Mike Pagano
2018-08-17 19:24 Mike Pagano
2018-08-15 16:44 Mike Pagano
2018-08-09 10:49 Mike Pagano
2018-08-07 18:14 Mike Pagano
2018-07-28 10:37 Mike Pagano
2018-07-22 15:15 Mike Pagano
2018-07-19 15:27 Mike Pagano
2018-07-17 10:24 Mike Pagano
2018-07-12 16:21 Alice Ferrazzi
2018-07-04 14:26 Mike Pagano
2018-06-16 15:41 Mike Pagano
2018-06-13 14:54 Mike Pagano
2018-06-06 18:00 Mike Pagano
2018-05-30 22:35 Mike Pagano
2018-05-30 11:38 Mike Pagano
2018-05-26 13:43 Mike Pagano
2018-05-16 10:22 Mike Pagano
2018-05-02 16:11 Mike Pagano
2018-04-29 11:48 Mike Pagano
2018-04-24 11:28 Mike Pagano
2018-04-13 22:20 Mike Pagano
2018-04-08 14:25 Mike Pagano
2018-03-31 23:00 Mike Pagano
2018-03-31 22:16 Mike Pagano
2018-03-25 13:42 Mike Pagano
2018-03-22 12:54 Mike Pagano
2018-03-11 18:25 Mike Pagano
2018-03-05  2:52 Alice Ferrazzi
2018-02-28 15:05 Alice Ferrazzi
2018-02-25 15:46 Mike Pagano
2018-02-22 23:20 Mike Pagano
2018-02-17 15:10 Alice Ferrazzi
2018-02-03 21:23 Mike Pagano
2018-01-31 13:36 Alice Ferrazzi
2018-01-23 21:15 Mike Pagano
2018-01-17 10:20 Alice Ferrazzi
2018-01-17  9:18 Alice Ferrazzi
2018-01-15 15:01 Alice Ferrazzi
2018-01-10 11:56 Mike Pagano
2018-01-10 11:48 Mike Pagano
2018-01-05 15:59 Alice Ferrazzi
2018-01-05 15:05 Alice Ferrazzi
2018-01-02 20:12 Mike Pagano
2017-12-25 14:41 Alice Ferrazzi
2017-12-20 12:45 Mike Pagano
2017-12-16 11:46 Alice Ferrazzi
2017-12-09 18:50 Alice Ferrazzi
2017-12-05 11:39 Mike Pagano
2017-11-30 12:25 Alice Ferrazzi
2017-11-24 10:49 Alice Ferrazzi
2017-11-24  9:46 Alice Ferrazzi
2017-11-21  8:40 Alice Ferrazzi
2017-11-18 18:12 Mike Pagano
2017-11-15 16:44 Alice Ferrazzi
2017-11-08 13:50 Mike Pagano
2017-11-02 10:02 Mike Pagano
2017-10-27 10:33 Mike Pagano
2017-10-21 20:13 Mike Pagano
2017-10-18 13:44 Mike Pagano
2017-10-12 12:22 Mike Pagano
2017-10-08 14:25 Mike Pagano
2017-10-05 11:39 Mike Pagano
2017-09-27 10:38 Mike Pagano
2017-09-14 13:37 Mike Pagano
2017-09-13 22:26 Mike Pagano
2017-09-13 14:33 Mike Pagano
2017-09-07 22:42 Mike Pagano
2017-09-02 17:14 Mike Pagano
2017-08-30 10:08 Mike Pagano
2017-08-25 10:53 Mike Pagano
2017-08-16 22:30 Mike Pagano
2017-08-13 16:52 Mike Pagano
2017-08-11 17:44 Mike Pagano
2017-08-07 10:25 Mike Pagano
2017-05-14 13:32 Mike Pagano
2017-05-08 10:40 Mike Pagano
2017-05-03 17:41 Mike Pagano
2017-04-30 18:08 Mike Pagano
2017-04-30 17:59 Mike Pagano
2017-04-27  8:18 Alice Ferrazzi
2017-04-22 17:00 Mike Pagano
2017-04-18 10:21 Mike Pagano
2017-04-12 17:59 Mike Pagano
2017-04-08 13:56 Mike Pagano
2017-03-31 10:43 Mike Pagano
2017-03-30 18:16 Mike Pagano
2017-03-26 11:53 Mike Pagano
2017-03-22 12:28 Mike Pagano
2017-03-18 14:32 Mike Pagano
2017-03-15 14:39 Mike Pagano
2017-03-12 12:17 Mike Pagano
2017-03-02 16:29 Mike Pagano
2017-03-02 16:29 Mike Pagano
2017-02-26 20:45 Mike Pagano
2017-02-24  0:38 Mike Pagano
2017-02-23 20:12 Mike Pagano
2017-02-18 16:27 Alice Ferrazzi
2017-02-15 16:22 Alice Ferrazzi
2017-02-09  8:05 Alice Ferrazzi
2017-02-04 13:47 Alice Ferrazzi
2017-02-01 12:59 Alice Ferrazzi
2017-01-26  8:24 Alice Ferrazzi
2017-01-20 12:45 Alice Ferrazzi
2017-01-15 22:57 Mike Pagano
2017-01-14 14:46 Mike Pagano
2017-01-12 12:11 Mike Pagano
2017-01-09 12:46 Mike Pagano
2017-01-06 23:13 Mike Pagano
2016-12-15 23:41 Mike Pagano
2016-12-11 15:02 Alice Ferrazzi
2016-12-09 13:57 Alice Ferrazzi
2016-12-08  0:03 Mike Pagano
2016-12-02 16:21 Mike Pagano
2016-11-26 18:51 Mike Pagano
2016-11-26 18:40 Mike Pagano
2016-11-22  0:14 Mike Pagano
2016-11-19 11:03 Mike Pagano
2016-11-15 10:05 Alice Ferrazzi
2016-11-10 18:13 Alice Ferrazzi
2016-11-01  3:14 Alice Ferrazzi
2016-10-31 14:09 Alice Ferrazzi
2016-10-28 18:27 Alice Ferrazzi
2016-10-22 13:05 Mike Pagano
2016-10-21 11:10 Mike Pagano
2016-10-16 19:25 Mike Pagano
2016-10-08 19:55 Mike Pagano
2016-09-30 19:07 Mike Pagano
2016-09-24 10:51 Mike Pagano
2016-09-16 19:10 Mike Pagano
2016-09-15 13:58 Mike Pagano
2016-09-09 19:20 Mike Pagano
2016-08-20 16:31 Mike Pagano
2016-08-17 11:48 Mike Pagano
2016-08-10 12:56 Mike Pagano
2016-07-27 19:19 Mike Pagano
2016-07-11 19:59 Mike Pagano
2016-07-02 15:30 Mike Pagano
2016-07-01  0:55 Mike Pagano
2016-06-24 20:40 Mike Pagano
2016-06-08 13:38 Mike Pagano
2016-06-02 18:24 Mike Pagano
2016-05-19 13:00 Mike Pagano
2016-05-12  0:14 Mike Pagano
2016-05-04 23:51 Mike Pagano
2016-04-20 11:27 Mike Pagano
2016-04-12 18:59 Mike Pagano
2016-03-22 22:47 Mike Pagano
2016-03-16 19:43 Mike Pagano
2016-03-10  0:51 Mike Pagano
2016-02-26  0:02 Mike Pagano
2016-02-19 23:33 Mike Pagano
2016-02-18  0:20 Mike Pagano
2016-02-01  0:19 Mike Pagano
2016-02-01  0:13 Mike Pagano
2016-01-31 23:33 Mike Pagano
2016-01-20 12:38 Mike Pagano
2016-01-10 17:19 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1457090131.47a90382973671498d5d2d5e308bf5985467506d.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox