From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:3.18 commit in: /
Date: Sat, 7 Mar 2015 14:28:04 +0000 (UTC) [thread overview]
Message-ID: <1425738482.1298219e93424dd2f652e663db1e893f55d34646.mpagano@gentoo> (raw)
commit: 1298219e93424dd2f652e663db1e893f55d34646
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Mar 7 14:28:02 2015 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Mar 7 14:28:02 2015 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1298219e
Linux patch 3.18.9
0000_README | 4 +
1008_linux-3.18.9.patch | 6044 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 6048 insertions(+)
diff --git a/0000_README b/0000_README
index 7c4ea05..8161304 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch: 1007_linux-3.18.8.patch
From: http://www.kernel.org
Desc: Linux 3.18.8
+Patch: 1008_linux-3.18.9.patch
+From: http://www.kernel.org
+Desc: Linux 3.18.9
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1008_linux-3.18.9.patch b/1008_linux-3.18.9.patch
new file mode 100644
index 0000000..8888040
--- /dev/null
+++ b/1008_linux-3.18.9.patch
@@ -0,0 +1,6044 @@
+diff --git a/Makefile b/Makefile
+index 0b3f8a1b3715..62b333802a0e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 18
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Diseased Newt
+
+diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
+index 6b0b7f7ef783..7670f33b9ce2 100644
+--- a/arch/arc/include/asm/pgtable.h
++++ b/arch/arc/include/asm/pgtable.h
+@@ -259,7 +259,8 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
+ #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
+
+ #define pte_page(x) (mem_map + \
+- (unsigned long)(((pte_val(x) - PAGE_OFFSET) >> PAGE_SHIFT)))
++ (unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \
++ PAGE_SHIFT)))
+
+ #define mk_pte(page, pgprot) \
+ ({ \
+diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
+index 6cc25ed912ee..2c6248d9a9ef 100644
+--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
++++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
+@@ -195,6 +195,7 @@
+
+ &usb0 {
+ status = "okay";
++ dr_mode = "peripheral";
+ };
+
+ &usb1 {
+diff --git a/arch/arm/boot/dts/bcm63138.dtsi b/arch/arm/boot/dts/bcm63138.dtsi
+index f3bb2dd6269e..c97844cac4e7 100644
+--- a/arch/arm/boot/dts/bcm63138.dtsi
++++ b/arch/arm/boot/dts/bcm63138.dtsi
+@@ -66,8 +66,9 @@
+ reg = <0x1d000 0x1000>;
+ cache-unified;
+ cache-level = <2>;
+- cache-sets = <16>;
+- cache-size = <0x80000>;
++ cache-size = <524288>;
++ cache-sets = <1024>;
++ cache-line-size = <32>;
+ interrupts = <GIC_PPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
+index 8acf5d85c99d..f76fe94267d6 100644
+--- a/arch/arm/boot/dts/tegra20.dtsi
++++ b/arch/arm/boot/dts/tegra20.dtsi
+@@ -68,9 +68,9 @@
+ reset-names = "2d";
+ };
+
+- gr3d@54140000 {
++ gr3d@54180000 {
+ compatible = "nvidia,tegra20-gr3d";
+- reg = <0x54140000 0x00040000>;
++ reg = <0x54180000 0x00040000>;
+ clocks = <&tegra_car TEGRA20_CLK_GR3D>;
+ resets = <&tegra_car 24>;
+ reset-names = "3d";
+@@ -130,9 +130,9 @@
+ status = "disabled";
+ };
+
+- dsi@542c0000 {
++ dsi@54300000 {
+ compatible = "nvidia,tegra20-dsi";
+- reg = <0x542c0000 0x00040000>;
++ reg = <0x54300000 0x00040000>;
+ clocks = <&tegra_car TEGRA20_CLK_DSI>;
+ resets = <&tegra_car 48>;
+ reset-names = "dsi";
+diff --git a/arch/arm/mach-mvebu/system-controller.c b/arch/arm/mach-mvebu/system-controller.c
+index a068cb5c2ce8..c6c132acd7a6 100644
+--- a/arch/arm/mach-mvebu/system-controller.c
++++ b/arch/arm/mach-mvebu/system-controller.c
+@@ -126,7 +126,7 @@ int mvebu_system_controller_get_soc_id(u32 *dev, u32 *rev)
+ return -ENODEV;
+ }
+
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) && defined(CONFIG_MACH_MVEBU_V7)
+ void mvebu_armada375_smp_wa_init(void)
+ {
+ u32 dev, rev;
+diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+index 5684f112654b..4e9d2a97c2cb 100644
+--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
++++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+@@ -2017,7 +2017,7 @@ static struct omap_hwmod dra7xx_uart3_hwmod = {
+ .class = &dra7xx_uart_hwmod_class,
+ .clkdm_name = "l4per_clkdm",
+ .main_clk = "uart3_gfclk_mux",
+- .flags = HWMOD_SWSUP_SIDLE_ACT,
++ .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP4UART3_FLAGS,
+ .prcm = {
+ .omap4 = {
+ .clkctrl_offs = DRA7XX_CM_L4PER_UART3_CLKCTRL_OFFSET,
+diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
+index 06022b235730..89f790dda93e 100644
+--- a/arch/arm/mach-pxa/corgi.c
++++ b/arch/arm/mach-pxa/corgi.c
+@@ -26,6 +26,7 @@
+ #include <linux/i2c.h>
+ #include <linux/i2c/pxa-i2c.h>
+ #include <linux/io.h>
++#include <linux/regulator/machine.h>
+ #include <linux/spi/spi.h>
+ #include <linux/spi/ads7846.h>
+ #include <linux/spi/corgi_lcd.h>
+@@ -752,6 +753,8 @@ static void __init corgi_init(void)
+ sharpsl_nand_partitions[1].size = 53 * 1024 * 1024;
+
+ platform_add_devices(devices, ARRAY_SIZE(devices));
++
++ regulator_has_full_constraints();
+ }
+
+ static void __init fixup_corgi(struct tag *tags, char **cmdline)
+diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
+index c66ad4edc5e3..5fb41ad6e3bc 100644
+--- a/arch/arm/mach-pxa/hx4700.c
++++ b/arch/arm/mach-pxa/hx4700.c
+@@ -893,6 +893,8 @@ static void __init hx4700_init(void)
+ mdelay(10);
+ gpio_set_value(GPIO71_HX4700_ASIC3_nRESET, 1);
+ mdelay(10);
++
++ regulator_has_full_constraints();
+ }
+
+ MACHINE_START(H4700, "HP iPAQ HX4700")
+diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
+index 131991629116..e81d216b05e4 100644
+--- a/arch/arm/mach-pxa/poodle.c
++++ b/arch/arm/mach-pxa/poodle.c
+@@ -25,6 +25,7 @@
+ #include <linux/gpio.h>
+ #include <linux/i2c.h>
+ #include <linux/i2c/pxa-i2c.h>
++#include <linux/regulator/machine.h>
+ #include <linux/spi/spi.h>
+ #include <linux/spi/ads7846.h>
+ #include <linux/spi/pxa2xx_spi.h>
+@@ -455,6 +456,7 @@ static void __init poodle_init(void)
+ pxa_set_i2c_info(NULL);
+ i2c_register_board_info(0, ARRAY_AND_SIZE(poodle_i2c_devices));
+ poodle_init_spi();
++ regulator_has_full_constraints();
+ }
+
+ static void __init fixup_poodle(struct tag *tags, char **cmdline)
+diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
+index 6645d1e31f14..34853d5dfda2 100644
+--- a/arch/arm/mach-sa1100/pm.c
++++ b/arch/arm/mach-sa1100/pm.c
+@@ -81,6 +81,7 @@ static int sa11x0_pm_enter(suspend_state_t state)
+ /*
+ * Ensure not to come back here if it wasn't intended
+ */
++ RCSR = RCSR_SMR;
+ PSPR = 0;
+
+ /*
+diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
+index b2cfba16c4e8..18865136156f 100644
+--- a/arch/arm/mach-vexpress/Kconfig
++++ b/arch/arm/mach-vexpress/Kconfig
+@@ -75,6 +75,7 @@ config ARCH_VEXPRESS_TC2_PM
+ depends on MCPM
+ select ARM_CCI
+ select ARCH_VEXPRESS_SPC
++ select ARM_CPU_SUSPEND
+ help
+ Support for CPU and cluster power management on Versatile Express
+ with a TC2 (A15x2 A7x3) big.LITTLE core tile.
+diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
+index 1b9ad02837cf..76920d4040d4 100644
+--- a/arch/arm64/kernel/signal32.c
++++ b/arch/arm64/kernel/signal32.c
+@@ -154,8 +154,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
+ case __SI_TIMER:
+ err |= __put_user(from->si_tid, &to->si_tid);
+ err |= __put_user(from->si_overrun, &to->si_overrun);
+- err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr,
+- &to->si_ptr);
++ err |= __put_user(from->si_int, &to->si_int);
+ break;
+ case __SI_POLL:
+ err |= __put_user(from->si_band, &to->si_band);
+@@ -184,7 +183,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
+ case __SI_MESGQ: /* But this is */
+ err |= __put_user(from->si_pid, &to->si_pid);
+ err |= __put_user(from->si_uid, &to->si_uid);
+- err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr);
++ err |= __put_user(from->si_int, &to->si_int);
+ break;
+ default: /* this is just in case for now ... */
+ err |= __put_user(from->si_pid, &to->si_pid);
+diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h
+index 881071c07942..13272fd5a5ba 100644
+--- a/arch/metag/include/asm/processor.h
++++ b/arch/metag/include/asm/processor.h
+@@ -149,8 +149,8 @@ extern void exit_thread(void);
+
+ unsigned long get_wchan(struct task_struct *p);
+
+-#define KSTK_EIP(tsk) ((tsk)->thread.kernel_context->CurrPC)
+-#define KSTK_ESP(tsk) ((tsk)->thread.kernel_context->AX[0].U0)
++#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ctx.CurrPC)
++#define KSTK_ESP(tsk) (task_pt_regs(tsk)->ctx.AX[0].U0)
+
+ #define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0)
+
+diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c
+index d7557cde271a..3fff11ec7dc0 100644
+--- a/arch/mips/alchemy/common/clock.c
++++ b/arch/mips/alchemy/common/clock.c
+@@ -128,6 +128,8 @@ static unsigned long alchemy_clk_cpu_recalc(struct clk_hw *hw,
+ t = 396000000;
+ else {
+ t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f;
++ if (alchemy_get_cputype() < ALCHEMY_CPU_AU1300)
++ t &= 0x3f;
+ t *= parent_rate;
+ }
+
+diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
+index 6caf8766b80f..71fef0af9c9a 100644
+--- a/arch/mips/include/asm/asmmacro.h
++++ b/arch/mips/include/asm/asmmacro.h
+@@ -304,7 +304,7 @@
+ .set push
+ .set noat
+ SET_HARDFLOAT
+- add $1, \base, \off
++ addu $1, \base, \off
+ .word LDD_MSA_INSN | (\wd << 6)
+ .set pop
+ .endm
+@@ -313,7 +313,7 @@
+ .set push
+ .set noat
+ SET_HARDFLOAT
+- add $1, \base, \off
++ addu $1, \base, \off
+ .word STD_MSA_INSN | (\wd << 6)
+ .set pop
+ .endm
+diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
+index a6c9ccb33c5c..c3f4f2d2e108 100644
+--- a/arch/mips/include/asm/cpu-info.h
++++ b/arch/mips/include/asm/cpu-info.h
+@@ -84,6 +84,11 @@ struct cpuinfo_mips {
+ * (shifted by _CACHE_SHIFT)
+ */
+ unsigned int writecombine;
++ /*
++ * Simple counter to prevent enabling HTW in nested
++ * htw_start/htw_stop calls
++ */
++ unsigned int htw_seq;
+ } __attribute__((aligned(SMP_CACHE_BYTES)));
+
+ extern struct cpuinfo_mips cpu_data[];
+diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
+index 2f82568a3ee4..bc01579a907a 100644
+--- a/arch/mips/include/asm/mmu_context.h
++++ b/arch/mips/include/asm/mmu_context.h
+@@ -25,7 +25,6 @@ do { \
+ if (cpu_has_htw) { \
+ write_c0_pwbase(pgd); \
+ back_to_back_c0_hazard(); \
+- htw_reset(); \
+ } \
+ } while (0)
+
+@@ -142,6 +141,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ unsigned long flags;
+ local_irq_save(flags);
+
++ htw_stop();
+ /* Check if our ASID is of an older version and thus invalid */
+ if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
+ get_new_mmu_context(next, cpu);
+@@ -154,6 +154,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ */
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ cpumask_set_cpu(cpu, mm_cpumask(next));
++ htw_start();
+
+ local_irq_restore(flags);
+ }
+@@ -180,6 +181,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
+
+ local_irq_save(flags);
+
++ htw_stop();
+ /* Unconditionally get a new ASID. */
+ get_new_mmu_context(next, cpu);
+
+@@ -189,6 +191,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
+ /* mark mmu ownership change */
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ cpumask_set_cpu(cpu, mm_cpumask(next));
++ htw_start();
+
+ local_irq_restore(flags);
+ }
+@@ -203,6 +206,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
+ unsigned long flags;
+
+ local_irq_save(flags);
++ htw_stop();
+
+ if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
+ get_new_mmu_context(mm, cpu);
+@@ -211,6 +215,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
+ /* will get a new context next time */
+ cpu_context(cpu, mm) = 0;
+ }
++ htw_start();
+ local_irq_restore(flags);
+ }
+
+diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
+index d6d1928539b1..bc3fc4fdc9ab 100644
+--- a/arch/mips/include/asm/pgtable.h
++++ b/arch/mips/include/asm/pgtable.h
+@@ -99,29 +99,35 @@ extern void paging_init(void);
+
+ #define htw_stop() \
+ do { \
+- if (cpu_has_htw) \
+- write_c0_pwctl(read_c0_pwctl() & \
+- ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
++ unsigned long flags; \
++ \
++ if (cpu_has_htw) { \
++ local_irq_save(flags); \
++ if(!raw_current_cpu_data.htw_seq++) { \
++ write_c0_pwctl(read_c0_pwctl() & \
++ ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
++ back_to_back_c0_hazard(); \
++ } \
++ local_irq_restore(flags); \
++ } \
+ } while(0)
+
+ #define htw_start() \
+ do { \
+- if (cpu_has_htw) \
+- write_c0_pwctl(read_c0_pwctl() | \
+- (1 << MIPS_PWCTL_PWEN_SHIFT)); \
+-} while(0)
+-
+-
+-#define htw_reset() \
+-do { \
++ unsigned long flags; \
++ \
+ if (cpu_has_htw) { \
+- htw_stop(); \
+- back_to_back_c0_hazard(); \
+- htw_start(); \
+- back_to_back_c0_hazard(); \
++ local_irq_save(flags); \
++ if (!--raw_current_cpu_data.htw_seq) { \
++ write_c0_pwctl(read_c0_pwctl() | \
++ (1 << MIPS_PWCTL_PWEN_SHIFT)); \
++ back_to_back_c0_hazard(); \
++ } \
++ local_irq_restore(flags); \
+ } \
+ } while(0)
+
++
+ extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+ pte_t pteval);
+
+@@ -153,12 +159,13 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
+ {
+ pte_t null = __pte(0);
+
++ htw_stop();
+ /* Preserve global status for the pair */
+ if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
+ null.pte_low = null.pte_high = _PAGE_GLOBAL;
+
+ set_pte_at(mm, addr, ptep, null);
+- htw_reset();
++ htw_start();
+ }
+ #else
+
+@@ -188,6 +195,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
+
+ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
++ htw_stop();
+ #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
+ /* Preserve global status for the pair */
+ if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
+@@ -195,7 +203,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
+ else
+ #endif
+ set_pte_at(mm, addr, ptep, __pte(0));
+- htw_reset();
++ htw_start();
+ }
+ #endif
+
+diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
+index 0384b05ab5a0..55b759a0019e 100644
+--- a/arch/mips/kernel/cps-vec.S
++++ b/arch/mips/kernel/cps-vec.S
+@@ -99,11 +99,11 @@ not_nmi:
+ xori t2, t1, 0x7
+ beqz t2, 1f
+ li t3, 32
+- addi t1, t1, 1
++ addiu t1, t1, 1
+ sllv t1, t3, t1
+ 1: /* At this point t1 == I-cache sets per way */
+ _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
+- addi t2, t2, 1
++ addiu t2, t2, 1
+ mul t1, t1, t0
+ mul t1, t1, t2
+
+@@ -126,11 +126,11 @@ icache_done:
+ xori t2, t1, 0x7
+ beqz t2, 1f
+ li t3, 32
+- addi t1, t1, 1
++ addiu t1, t1, 1
+ sllv t1, t3, t1
+ 1: /* At this point t1 == D-cache sets per way */
+ _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
+- addi t2, t2, 1
++ addiu t2, t2, 1
+ mul t1, t1, t0
+ mul t1, t1, t2
+
+@@ -250,7 +250,7 @@ LEAF(mips_cps_core_init)
+ mfc0 t0, CP0_MVPCONF0
+ srl t0, t0, MVPCONF0_PVPE_SHIFT
+ andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
+- addi t7, t0, 1
++ addiu t7, t0, 1
+
+ /* If there's only 1, we're done */
+ beqz t0, 2f
+@@ -280,7 +280,7 @@ LEAF(mips_cps_core_init)
+ mttc0 t0, CP0_TCHALT
+
+ /* Next VPE */
+- addi t5, t5, 1
++ addiu t5, t5, 1
+ slt t0, t5, t7
+ bnez t0, 1b
+ nop
+@@ -317,7 +317,7 @@ LEAF(mips_cps_boot_vpes)
+ mfc0 t1, CP0_MVPCONF0
+ srl t1, t1, MVPCONF0_PVPE_SHIFT
+ andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
+- addi t1, t1, 1
++ addiu t1, t1, 1
+
+ /* Calculate a mask for the VPE ID from EBase.CPUNum */
+ clz t1, t1
+@@ -424,7 +424,7 @@ LEAF(mips_cps_boot_vpes)
+
+ /* Next VPE */
+ 2: srl t6, t6, 1
+- addi t5, t5, 1
++ addiu t5, t5, 1
+ bnez t6, 1b
+ nop
+
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index dc49cf30c2db..5d6e59f20750 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -367,8 +367,10 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
+ if (config3 & MIPS_CONF3_MSA)
+ c->ases |= MIPS_ASE_MSA;
+ /* Only tested on 32-bit cores */
+- if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT))
++ if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT)) {
++ c->htw_seq = 0;
+ c->options |= MIPS_CPU_HTW;
++ }
+
+ return config3 & MIPS_CONF_M;
+ }
+diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
+index 2607c3a4ff7e..1b2452e2be67 100644
+--- a/arch/mips/kernel/mips_ksyms.c
++++ b/arch/mips/kernel/mips_ksyms.c
+@@ -14,6 +14,8 @@
+ #include <linux/mm.h>
+ #include <asm/uaccess.h>
+ #include <asm/ftrace.h>
++#include <asm/fpu.h>
++#include <asm/msa.h>
+
+ extern void *__bzero(void *__s, size_t __count);
+ extern long __strncpy_from_kernel_nocheck_asm(char *__to,
+@@ -34,6 +36,14 @@ extern long __strnlen_user_nocheck_asm(const char *s);
+ extern long __strnlen_user_asm(const char *s);
+
+ /*
++ * Core architecture code
++ */
++EXPORT_SYMBOL_GPL(_save_fp);
++#ifdef CONFIG_CPU_HAS_MSA
++EXPORT_SYMBOL_GPL(_save_msa);
++#endif
++
++/*
+ * String functions
+ */
+ EXPORT_SYMBOL(memset);
+diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
+index d7279c03c517..4a68b176d6e4 100644
+--- a/arch/mips/kvm/locore.S
++++ b/arch/mips/kvm/locore.S
+@@ -434,7 +434,7 @@ __kvm_mips_return_to_guest:
+ /* Setup status register for running guest in UM */
+ .set at
+ or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
+- and v1, v1, ~ST0_CU0
++ and v1, v1, ~(ST0_CU0 | ST0_MX)
+ .set noat
+ mtc0 v1, CP0_STATUS
+ ehb
+diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
+index e3b21e51ff7e..270bbd41769e 100644
+--- a/arch/mips/kvm/mips.c
++++ b/arch/mips/kvm/mips.c
+@@ -15,9 +15,11 @@
+ #include <linux/vmalloc.h>
+ #include <linux/fs.h>
+ #include <linux/bootmem.h>
++#include <asm/fpu.h>
+ #include <asm/page.h>
+ #include <asm/cacheflush.h>
+ #include <asm/mmu_context.h>
++#include <asm/pgtable.h>
+
+ #include <linux/kvm_host.h>
+
+@@ -378,6 +380,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ vcpu->mmio_needed = 0;
+ }
+
++ lose_fpu(1);
++
+ local_irq_disable();
+ /* Check if we have any exceptions/interrupts pending */
+ kvm_mips_deliver_interrupts(vcpu,
+@@ -385,8 +389,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+
+ kvm_guest_enter();
+
++ /* Disable hardware page table walking while in guest */
++ htw_stop();
++
+ r = __kvm_mips_vcpu_run(run, vcpu);
+
++ /* Re-enable HTW before enabling interrupts */
++ htw_start();
++
+ kvm_guest_exit();
+ local_irq_enable();
+
+@@ -980,9 +990,6 @@ static void kvm_mips_set_c0_status(void)
+ {
+ uint32_t status = read_c0_status();
+
+- if (cpu_has_fpu)
+- status |= (ST0_CU1);
+-
+ if (cpu_has_dsp)
+ status |= (ST0_MX);
+
+@@ -1002,6 +1009,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
++ /* re-enable HTW before enabling interrupts */
++ htw_start();
++
+ /* Set a default exit reason */
+ run->exit_reason = KVM_EXIT_UNKNOWN;
+ run->ready_for_interrupt_injection = 1;
+@@ -1136,6 +1146,9 @@ skip_emul:
+ }
+ }
+
++ /* Disable HTW before returning to guest or host */
++ htw_stop();
++
+ return ret;
+ }
+
+diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
+index ad56edc39919..e8bb33b2d3cc 100644
+--- a/arch/powerpc/sysdev/axonram.c
++++ b/arch/powerpc/sysdev/axonram.c
+@@ -156,7 +156,7 @@ axon_ram_direct_access(struct block_device *device, sector_t sector,
+ }
+
+ *kaddr = (void *)(bank->ph_addr + offset);
+- *pfn = virt_to_phys(kaddr) >> PAGE_SHIFT;
++ *pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT;
+
+ return 0;
+ }
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index 4fc3fed636dc..29e2e5aa2111 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -613,7 +613,7 @@ no_timer:
+ __unset_cpu_idle(vcpu);
+ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+- hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
++ hrtimer_cancel(&vcpu->arch.ckc_timer);
+ return 0;
+ }
+
+@@ -633,10 +633,20 @@ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
+ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
+ {
+ struct kvm_vcpu *vcpu;
++ u64 now, sltime;
+
+ vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
+- kvm_s390_vcpu_wakeup(vcpu);
++ now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
++ sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
+
++ /*
++ * If the monotonic clock runs faster than the tod clock we might be
++ * woken up too early and have to go back to sleep to avoid deadlocks.
++ */
++ if (vcpu->arch.sie_block->ckc > now &&
++ hrtimer_forward_now(timer, ns_to_ktime(sltime)))
++ return HRTIMER_RESTART;
++ kvm_s390_vcpu_wakeup(vcpu);
+ return HRTIMER_NORESTART;
+ }
+
+@@ -840,6 +850,8 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
+ list_add_tail(&inti->list, &iter->list);
+ }
+ atomic_set(&fi->active, 1);
++ if (atomic_read(&kvm->online_vcpus) == 0)
++ goto unlock_fi;
+ sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
+ if (sigcpu == KVM_MAX_VCPUS) {
+ do {
+@@ -864,6 +876,7 @@ int kvm_s390_inject_vm(struct kvm *kvm,
+ struct kvm_s390_interrupt *s390int)
+ {
+ struct kvm_s390_interrupt_info *inti;
++ int rc;
+
+ inti = kzalloc(sizeof(*inti), GFP_KERNEL);
+ if (!inti)
+@@ -911,7 +924,10 @@ int kvm_s390_inject_vm(struct kvm *kvm,
+ trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
+ 2);
+
+- return __inject_vm(kvm, inti);
++ rc = __inject_vm(kvm, inti);
++ if (rc)
++ kfree(inti);
++ return rc;
+ }
+
+ void kvm_s390_reinject_io_int(struct kvm *kvm,
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 55aade49b6d1..ced09d8738b4 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -662,7 +662,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+ if (rc)
+ return rc;
+ }
+- hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
++ hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
+ get_cpu_id(&vcpu->arch.cpu_id);
+ vcpu->arch.cpu_id.version = 0xff;
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index 6a1a8458c042..30c0acf4ea6c 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -36,6 +36,7 @@ vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/aslr.o
+ $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
+
+ vmlinux-objs-$(CONFIG_EFI_STUB) += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o
++vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
+
+ $(obj)/vmlinux: $(vmlinux-objs-y) FORCE
+ $(call if_changed,ld)
+diff --git a/arch/x86/boot/compressed/efi_stub_64.S b/arch/x86/boot/compressed/efi_stub_64.S
+index 7ff3632806b1..99494dff2113 100644
+--- a/arch/x86/boot/compressed/efi_stub_64.S
++++ b/arch/x86/boot/compressed/efi_stub_64.S
+@@ -3,28 +3,3 @@
+ #include <asm/processor-flags.h>
+
+ #include "../../platform/efi/efi_stub_64.S"
+-
+-#ifdef CONFIG_EFI_MIXED
+- .code64
+- .text
+-ENTRY(efi64_thunk)
+- push %rbp
+- push %rbx
+-
+- subq $16, %rsp
+- leaq efi_exit32(%rip), %rax
+- movl %eax, 8(%rsp)
+- leaq efi_gdt64(%rip), %rax
+- movl %eax, 4(%rsp)
+- movl %eax, 2(%rax) /* Fixup the gdt base address */
+- leaq efi32_boot_gdt(%rip), %rax
+- movl %eax, (%rsp)
+-
+- call __efi64_thunk
+-
+- addq $16, %rsp
+- pop %rbx
+- pop %rbp
+- ret
+-ENDPROC(efi64_thunk)
+-#endif /* CONFIG_EFI_MIXED */
+diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
+new file mode 100644
+index 000000000000..630384a4c14a
+--- /dev/null
++++ b/arch/x86/boot/compressed/efi_thunk_64.S
+@@ -0,0 +1,196 @@
++/*
++ * Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming
++ *
++ * Early support for invoking 32-bit EFI services from a 64-bit kernel.
++ *
++ * Because this thunking occurs before ExitBootServices() we have to
++ * restore the firmware's 32-bit GDT before we make EFI serivce calls,
++ * since the firmware's 32-bit IDT is still currently installed and it
++ * needs to be able to service interrupts.
++ *
++ * On the plus side, we don't have to worry about mangling 64-bit
++ * addresses into 32-bits because we're executing with an identify
++ * mapped pagetable and haven't transitioned to 64-bit virtual addresses
++ * yet.
++ */
++
++#include <linux/linkage.h>
++#include <asm/msr.h>
++#include <asm/page_types.h>
++#include <asm/processor-flags.h>
++#include <asm/segment.h>
++
++ .code64
++ .text
++ENTRY(efi64_thunk)
++ push %rbp
++ push %rbx
++
++ subq $8, %rsp
++ leaq efi_exit32(%rip), %rax
++ movl %eax, 4(%rsp)
++ leaq efi_gdt64(%rip), %rax
++ movl %eax, (%rsp)
++ movl %eax, 2(%rax) /* Fixup the gdt base address */
++
++ movl %ds, %eax
++ push %rax
++ movl %es, %eax
++ push %rax
++ movl %ss, %eax
++ push %rax
++
++ /*
++ * Convert x86-64 ABI params to i386 ABI
++ */
++ subq $32, %rsp
++ movl %esi, 0x0(%rsp)
++ movl %edx, 0x4(%rsp)
++ movl %ecx, 0x8(%rsp)
++ movq %r8, %rsi
++ movl %esi, 0xc(%rsp)
++ movq %r9, %rsi
++ movl %esi, 0x10(%rsp)
++
++ sgdt save_gdt(%rip)
++
++ leaq 1f(%rip), %rbx
++ movq %rbx, func_rt_ptr(%rip)
++
++ /*
++ * Switch to gdt with 32-bit segments. This is the firmware GDT
++ * that was installed when the kernel started executing. This
++ * pointer was saved at the EFI stub entry point in head_64.S.
++ */
++ leaq efi32_boot_gdt(%rip), %rax
++ lgdt (%rax)
++
++ pushq $__KERNEL_CS
++ leaq efi_enter32(%rip), %rax
++ pushq %rax
++ lretq
++
++1: addq $32, %rsp
++
++ lgdt save_gdt(%rip)
++
++ pop %rbx
++ movl %ebx, %ss
++ pop %rbx
++ movl %ebx, %es
++ pop %rbx
++ movl %ebx, %ds
++
++ /*
++ * Convert 32-bit status code into 64-bit.
++ */
++ test %rax, %rax
++ jz 1f
++ movl %eax, %ecx
++ andl $0x0fffffff, %ecx
++ andl $0xf0000000, %eax
++ shl $32, %rax
++ or %rcx, %rax
++1:
++ addq $8, %rsp
++ pop %rbx
++ pop %rbp
++ ret
++ENDPROC(efi64_thunk)
++
++ENTRY(efi_exit32)
++ movq func_rt_ptr(%rip), %rax
++ push %rax
++ mov %rdi, %rax
++ ret
++ENDPROC(efi_exit32)
++
++ .code32
++/*
++ * EFI service pointer must be in %edi.
++ *
++ * The stack should represent the 32-bit calling convention.
++ */
++ENTRY(efi_enter32)
++ movl $__KERNEL_DS, %eax
++ movl %eax, %ds
++ movl %eax, %es
++ movl %eax, %ss
++
++ /* Reload pgtables */
++ movl %cr3, %eax
++ movl %eax, %cr3
++
++ /* Disable paging */
++ movl %cr0, %eax
++ btrl $X86_CR0_PG_BIT, %eax
++ movl %eax, %cr0
++
++ /* Disable long mode via EFER */
++ movl $MSR_EFER, %ecx
++ rdmsr
++ btrl $_EFER_LME, %eax
++ wrmsr
++
++ call *%edi
++
++ /* We must preserve return value */
++ movl %eax, %edi
++
++ /*
++ * Some firmware will return with interrupts enabled. Be sure to
++ * disable them before we switch GDTs.
++ */
++ cli
++
++ movl 56(%esp), %eax
++ movl %eax, 2(%eax)
++ lgdtl (%eax)
++
++ movl %cr4, %eax
++ btsl $(X86_CR4_PAE_BIT), %eax
++ movl %eax, %cr4
++
++ movl %cr3, %eax
++ movl %eax, %cr3
++
++ movl $MSR_EFER, %ecx
++ rdmsr
++ btsl $_EFER_LME, %eax
++ wrmsr
++
++ xorl %eax, %eax
++ lldt %ax
++
++ movl 60(%esp), %eax
++ pushl $__KERNEL_CS
++ pushl %eax
++
++ /* Enable paging */
++ movl %cr0, %eax
++ btsl $X86_CR0_PG_BIT, %eax
++ movl %eax, %cr0
++ lret
++ENDPROC(efi_enter32)
++
++ .data
++ .balign 8
++ .global efi32_boot_gdt
++efi32_boot_gdt: .word 0
++ .quad 0
++
++save_gdt: .word 0
++ .quad 0
++func_rt_ptr: .quad 0
++
++ .global efi_gdt64
++efi_gdt64:
++ .word efi_gdt64_end - efi_gdt64
++ .long 0 /* Filled out by user */
++ .word 0
++ .quad 0x0000000000000000 /* NULL descriptor */
++ .quad 0x00af9a000000ffff /* __KERNEL_CS */
++ .quad 0x00cf92000000ffff /* __KERNEL_DS */
++ .quad 0x0080890000000000 /* TS descriptor */
++ .quad 0x0000000000000000 /* TS continued */
++efi_gdt64_end:
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index a142e77693e1..a3eadfdc3e04 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -604,18 +604,24 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
+
+ int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
+ {
+- int irq;
++ int rc, irq, trigger, polarity;
+
+ if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
+ *irqp = gsi;
+- } else {
+- irq = mp_map_gsi_to_irq(gsi,
+- IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK);
+- if (irq < 0)
+- return -1;
+- *irqp = irq;
++ return 0;
+ }
+- return 0;
++
++ rc = acpi_get_override_irq(gsi, &trigger, &polarity);
++ if (rc == 0) {
++ trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
++ polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
++ irq = acpi_register_gsi(NULL, gsi, trigger, polarity);
++ if (irq >= 0) {
++ *irqp = irq;
++ return 0;
++ }
++ }
++ return -1;
+ }
+ EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
+
+diff --git a/arch/x86/kernel/pmc_atom.c b/arch/x86/kernel/pmc_atom.c
+index 0ee5025e0fa4..8bb9a611ca23 100644
+--- a/arch/x86/kernel/pmc_atom.c
++++ b/arch/x86/kernel/pmc_atom.c
+@@ -217,6 +217,8 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc, struct pci_dev *pdev)
+ if (!dir)
+ return -ENOMEM;
+
++ pmc->dbgfs_dir = dir;
++
+ f = debugfs_create_file("dev_state", S_IFREG | S_IRUGO,
+ dir, pmc, &pmc_dev_state_ops);
+ if (!f) {
+@@ -229,7 +231,7 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc, struct pci_dev *pdev)
+ dev_err(&pdev->dev, "sleep_state register failed\n");
+ goto err;
+ }
+- pmc->dbgfs_dir = dir;
++
+ return 0;
+ err:
+ pmc_dbgfs_unregister(pmc);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 506488cfa385..8b92cf4b165a 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1237,21 +1237,22 @@ void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
+ {
+ #ifdef CONFIG_X86_64
+ bool vcpus_matched;
+- bool do_request = false;
+ struct kvm_arch *ka = &vcpu->kvm->arch;
+ struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
+
+ vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
+ atomic_read(&vcpu->kvm->online_vcpus));
+
+- if (vcpus_matched && gtod->clock.vclock_mode == VCLOCK_TSC)
+- if (!ka->use_master_clock)
+- do_request = 1;
+-
+- if (!vcpus_matched && ka->use_master_clock)
+- do_request = 1;
+-
+- if (do_request)
++ /*
++ * Once the masterclock is enabled, always perform request in
++ * order to update it.
++ *
++ * In order to enable masterclock, the host clocksource must be TSC
++ * and the vcpus need to have matched TSCs. When that happens,
++ * perform request to enable masterclock.
++ */
++ if (ka->use_master_clock ||
++ (gtod->clock.vclock_mode == VCLOCK_TSC && vcpus_matched))
+ kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
+
+ trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
+diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
+index 207d9aef662d..448ee8912d9b 100644
+--- a/arch/x86/mm/gup.c
++++ b/arch/x86/mm/gup.c
+@@ -172,7 +172,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
+ */
+ if (pmd_none(pmd) || pmd_trans_splitting(pmd))
+ return 0;
+- if (unlikely(pmd_large(pmd))) {
++ if (unlikely(pmd_large(pmd) || !pmd_present(pmd))) {
+ /*
+ * NUMA hinting faults need to be handled in the GUP
+ * slowpath for accounting purposes and so that they
+diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
+index 8b977ebf9388..006cc914994b 100644
+--- a/arch/x86/mm/hugetlbpage.c
++++ b/arch/x86/mm/hugetlbpage.c
+@@ -66,9 +66,15 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
+ return ERR_PTR(-EINVAL);
+ }
+
++/*
++ * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
++ * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
++ * Otherwise, returns 0.
++ */
+ int pmd_huge(pmd_t pmd)
+ {
+- return !!(pmd_val(pmd) & _PAGE_PSE);
++ return !pmd_none(pmd) &&
++ (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
+ }
+
+ int pud_huge(pud_t pud)
+diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
+index 919b91205cd4..df4552bd239e 100644
+--- a/arch/x86/mm/mmap.c
++++ b/arch/x86/mm/mmap.c
+@@ -35,12 +35,12 @@ struct va_alignment __read_mostly va_align = {
+ .flags = -1,
+ };
+
+-static unsigned int stack_maxrandom_size(void)
++static unsigned long stack_maxrandom_size(void)
+ {
+- unsigned int max = 0;
++ unsigned long max = 0;
+ if ((current->flags & PF_RANDOMIZE) &&
+ !(current->personality & ADDR_NO_RANDOMIZE)) {
+- max = ((-1U) & STACK_RND_MASK) << PAGE_SHIFT;
++ max = ((-1UL) & STACK_RND_MASK) << PAGE_SHIFT;
+ }
+
+ return max;
+diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
+index 093f5f4272d3..6b3cf7c4e5c2 100644
+--- a/arch/x86/pci/xen.c
++++ b/arch/x86/pci/xen.c
+@@ -452,52 +452,6 @@ int __init pci_xen_hvm_init(void)
+ }
+
+ #ifdef CONFIG_XEN_DOM0
+-static __init void xen_setup_acpi_sci(void)
+-{
+- int rc;
+- int trigger, polarity;
+- int gsi = acpi_sci_override_gsi;
+- int irq = -1;
+- int gsi_override = -1;
+-
+- if (!gsi)
+- return;
+-
+- rc = acpi_get_override_irq(gsi, &trigger, &polarity);
+- if (rc) {
+- printk(KERN_WARNING "xen: acpi_get_override_irq failed for acpi"
+- " sci, rc=%d\n", rc);
+- return;
+- }
+- trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
+- polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
+-
+- printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d "
+- "polarity=%d\n", gsi, trigger, polarity);
+-
+- /* Before we bind the GSI to a Linux IRQ, check whether
+- * we need to override it with bus_irq (IRQ) value. Usually for
+- * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so:
+- * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level)
+- * but there are oddballs where the IRQ != GSI:
+- * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level)
+- * which ends up being: gsi_to_irq[9] == 20
+- * (which is what acpi_gsi_to_irq ends up calling when starting the
+- * the ACPI interpreter and keels over since IRQ 9 has not been
+- * setup as we had setup IRQ 20 for it).
+- */
+- if (acpi_gsi_to_irq(gsi, &irq) == 0) {
+- /* Use the provided value if it's valid. */
+- if (irq >= 0)
+- gsi_override = irq;
+- }
+-
+- gsi = xen_register_gsi(gsi, gsi_override, trigger, polarity);
+- printk(KERN_INFO "xen: acpi sci %d\n", gsi);
+-
+- return;
+-}
+-
+ int __init pci_xen_initial_domain(void)
+ {
+ int irq;
+@@ -509,7 +463,6 @@ int __init pci_xen_initial_domain(void)
+ x86_msi.msi_mask_irq = xen_nop_msi_mask_irq;
+ x86_msi.msix_mask_irq = xen_nop_msix_mask_irq;
+ #endif
+- xen_setup_acpi_sci();
+ __acpi_register_gsi = acpi_register_gsi_xen;
+ /* Pre-allocate legacy irqs */
+ for (irq = 0; irq < nr_legacy_irqs(); irq++) {
+diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
+index 5fcda7272550..86d0f9e08dd9 100644
+--- a/arch/x86/platform/efi/efi_stub_64.S
++++ b/arch/x86/platform/efi/efi_stub_64.S
+@@ -91,167 +91,6 @@ ENTRY(efi_call)
+ ret
+ ENDPROC(efi_call)
+
+-#ifdef CONFIG_EFI_MIXED
+-
+-/*
+- * We run this function from the 1:1 mapping.
+- *
+- * This function must be invoked with a 1:1 mapped stack.
+- */
+-ENTRY(__efi64_thunk)
+- movl %ds, %eax
+- push %rax
+- movl %es, %eax
+- push %rax
+- movl %ss, %eax
+- push %rax
+-
+- subq $32, %rsp
+- movl %esi, 0x0(%rsp)
+- movl %edx, 0x4(%rsp)
+- movl %ecx, 0x8(%rsp)
+- movq %r8, %rsi
+- movl %esi, 0xc(%rsp)
+- movq %r9, %rsi
+- movl %esi, 0x10(%rsp)
+-
+- sgdt save_gdt(%rip)
+-
+- leaq 1f(%rip), %rbx
+- movq %rbx, func_rt_ptr(%rip)
+-
+- /* Switch to gdt with 32-bit segments */
+- movl 64(%rsp), %eax
+- lgdt (%rax)
+-
+- leaq efi_enter32(%rip), %rax
+- pushq $__KERNEL_CS
+- pushq %rax
+- lretq
+-
+-1: addq $32, %rsp
+-
+- lgdt save_gdt(%rip)
+-
+- pop %rbx
+- movl %ebx, %ss
+- pop %rbx
+- movl %ebx, %es
+- pop %rbx
+- movl %ebx, %ds
+-
+- /*
+- * Convert 32-bit status code into 64-bit.
+- */
+- test %rax, %rax
+- jz 1f
+- movl %eax, %ecx
+- andl $0x0fffffff, %ecx
+- andl $0xf0000000, %eax
+- shl $32, %rax
+- or %rcx, %rax
+-1:
+- ret
+-ENDPROC(__efi64_thunk)
+-
+-ENTRY(efi_exit32)
+- movq func_rt_ptr(%rip), %rax
+- push %rax
+- mov %rdi, %rax
+- ret
+-ENDPROC(efi_exit32)
+-
+- .code32
+-/*
+- * EFI service pointer must be in %edi.
+- *
+- * The stack should represent the 32-bit calling convention.
+- */
+-ENTRY(efi_enter32)
+- movl $__KERNEL_DS, %eax
+- movl %eax, %ds
+- movl %eax, %es
+- movl %eax, %ss
+-
+- /* Reload pgtables */
+- movl %cr3, %eax
+- movl %eax, %cr3
+-
+- /* Disable paging */
+- movl %cr0, %eax
+- btrl $X86_CR0_PG_BIT, %eax
+- movl %eax, %cr0
+-
+- /* Disable long mode via EFER */
+- movl $MSR_EFER, %ecx
+- rdmsr
+- btrl $_EFER_LME, %eax
+- wrmsr
+-
+- call *%edi
+-
+- /* We must preserve return value */
+- movl %eax, %edi
+-
+- /*
+- * Some firmware will return with interrupts enabled. Be sure to
+- * disable them before we switch GDTs.
+- */
+- cli
+-
+- movl 68(%esp), %eax
+- movl %eax, 2(%eax)
+- lgdtl (%eax)
+-
+- movl %cr4, %eax
+- btsl $(X86_CR4_PAE_BIT), %eax
+- movl %eax, %cr4
+-
+- movl %cr3, %eax
+- movl %eax, %cr3
+-
+- movl $MSR_EFER, %ecx
+- rdmsr
+- btsl $_EFER_LME, %eax
+- wrmsr
+-
+- xorl %eax, %eax
+- lldt %ax
+-
+- movl 72(%esp), %eax
+- pushl $__KERNEL_CS
+- pushl %eax
+-
+- /* Enable paging */
+- movl %cr0, %eax
+- btsl $X86_CR0_PG_BIT, %eax
+- movl %eax, %cr0
+- lret
+-ENDPROC(efi_enter32)
+-
+- .data
+- .balign 8
+- .global efi32_boot_gdt
+-efi32_boot_gdt: .word 0
+- .quad 0
+-
+-save_gdt: .word 0
+- .quad 0
+-func_rt_ptr: .quad 0
+-
+- .global efi_gdt64
+-efi_gdt64:
+- .word efi_gdt64_end - efi_gdt64
+- .long 0 /* Filled out by user */
+- .word 0
+- .quad 0x0000000000000000 /* NULL descriptor */
+- .quad 0x00af9a000000ffff /* __KERNEL_CS */
+- .quad 0x00cf92000000ffff /* __KERNEL_DS */
+- .quad 0x0080890000000000 /* TS descriptor */
+- .quad 0x0000000000000000 /* TS continued */
+-efi_gdt64_end:
+-#endif /* CONFIG_EFI_MIXED */
+-
+ .data
+ ENTRY(efi_scratch)
+ .fill 3,8,0
+diff --git a/arch/x86/platform/efi/efi_thunk_64.S b/arch/x86/platform/efi/efi_thunk_64.S
+index 8806fa73e6e6..ff85d28c50f2 100644
+--- a/arch/x86/platform/efi/efi_thunk_64.S
++++ b/arch/x86/platform/efi/efi_thunk_64.S
+@@ -1,9 +1,26 @@
+ /*
+ * Copyright (C) 2014 Intel Corporation; author Matt Fleming
++ *
++ * Support for invoking 32-bit EFI runtime services from a 64-bit
++ * kernel.
++ *
++ * The below thunking functions are only used after ExitBootServices()
++ * has been called. This simplifies things considerably as compared with
++ * the early EFI thunking because we can leave all the kernel state
++ * intact (GDT, IDT, etc) and simply invoke the the 32-bit EFI runtime
++ * services from __KERNEL32_CS. This means we can continue to service
++ * interrupts across an EFI mixed mode call.
++ *
++ * We do however, need to handle the fact that we're running in a full
++ * 64-bit virtual address space. Things like the stack and instruction
++ * addresses need to be accessible by the 32-bit firmware, so we rely on
++ * using the identity mappings in the EFI page table to access the stack
++ * and kernel text (see efi_setup_page_tables()).
+ */
+
+ #include <linux/linkage.h>
+ #include <asm/page_types.h>
++#include <asm/segment.h>
+
+ .text
+ .code64
+@@ -33,14 +50,6 @@ ENTRY(efi64_thunk)
+ leaq efi_exit32(%rip), %rbx
+ subq %rax, %rbx
+ movl %ebx, 8(%rsp)
+- leaq efi_gdt64(%rip), %rbx
+- subq %rax, %rbx
+- movl %ebx, 2(%ebx)
+- movl %ebx, 4(%rsp)
+- leaq efi_gdt32(%rip), %rbx
+- subq %rax, %rbx
+- movl %ebx, 2(%ebx)
+- movl %ebx, (%rsp)
+
+ leaq __efi64_thunk(%rip), %rbx
+ subq %rax, %rbx
+@@ -52,14 +61,92 @@ ENTRY(efi64_thunk)
+ retq
+ ENDPROC(efi64_thunk)
+
+- .data
+-efi_gdt32:
+- .word efi_gdt32_end - efi_gdt32
+- .long 0 /* Filled out above */
+- .word 0
+- .quad 0x0000000000000000 /* NULL descriptor */
+- .quad 0x00cf9a000000ffff /* __KERNEL_CS */
+- .quad 0x00cf93000000ffff /* __KERNEL_DS */
+-efi_gdt32_end:
++/*
++ * We run this function from the 1:1 mapping.
++ *
++ * This function must be invoked with a 1:1 mapped stack.
++ */
++ENTRY(__efi64_thunk)
++ movl %ds, %eax
++ push %rax
++ movl %es, %eax
++ push %rax
++ movl %ss, %eax
++ push %rax
++
++ subq $32, %rsp
++ movl %esi, 0x0(%rsp)
++ movl %edx, 0x4(%rsp)
++ movl %ecx, 0x8(%rsp)
++ movq %r8, %rsi
++ movl %esi, 0xc(%rsp)
++ movq %r9, %rsi
++ movl %esi, 0x10(%rsp)
++
++ leaq 1f(%rip), %rbx
++ movq %rbx, func_rt_ptr(%rip)
++
++ /* Switch to 32-bit descriptor */
++ pushq $__KERNEL32_CS
++ leaq efi_enter32(%rip), %rax
++ pushq %rax
++ lretq
++
++1: addq $32, %rsp
++
++ pop %rbx
++ movl %ebx, %ss
++ pop %rbx
++ movl %ebx, %es
++ pop %rbx
++ movl %ebx, %ds
+
++ /*
++ * Convert 32-bit status code into 64-bit.
++ */
++ test %rax, %rax
++ jz 1f
++ movl %eax, %ecx
++ andl $0x0fffffff, %ecx
++ andl $0xf0000000, %eax
++ shl $32, %rax
++ or %rcx, %rax
++1:
++ ret
++ENDPROC(__efi64_thunk)
++
++ENTRY(efi_exit32)
++ movq func_rt_ptr(%rip), %rax
++ push %rax
++ mov %rdi, %rax
++ ret
++ENDPROC(efi_exit32)
++
++ .code32
++/*
++ * EFI service pointer must be in %edi.
++ *
++ * The stack should represent the 32-bit calling convention.
++ */
++ENTRY(efi_enter32)
++ movl $__KERNEL_DS, %eax
++ movl %eax, %ds
++ movl %eax, %es
++ movl %eax, %ss
++
++ call *%edi
++
++ /* We must preserve return value */
++ movl %eax, %edi
++
++ movl 72(%esp), %eax
++ pushl $__KERNEL_CS
++ pushl %eax
++
++ lret
++ENDPROC(efi_enter32)
++
++ .data
++ .balign 8
++func_rt_ptr: .quad 0
+ efi_saved_sp: .quad 0
+diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
+index ff18dab6b585..702ae29b8d90 100644
+--- a/block/blk-mq-tag.c
++++ b/block/blk-mq-tag.c
+@@ -500,6 +500,7 @@ static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
+ bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
+ if (!bt->bs) {
+ kfree(bt->map);
++ bt->map = NULL;
+ return -ENOMEM;
+ }
+
+diff --git a/block/blk-throttle.c b/block/blk-throttle.c
+index 9273d0969ebd..5b9c6d5c3636 100644
+--- a/block/blk-throttle.c
++++ b/block/blk-throttle.c
+@@ -1292,6 +1292,9 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
+ struct blkg_rwstat rwstat = { }, tmp;
+ int i, cpu;
+
++ if (tg->stats_cpu == NULL)
++ return 0;
++
+ for_each_possible_cpu(cpu) {
+ struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
+
+diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
+index 6f2751d305de..5da8e6e9ab4b 100644
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -3590,6 +3590,11 @@ retry:
+
+ blkcg = bio_blkcg(bio);
+ cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
++ if (!cfqg) {
++ cfqq = &cfqd->oom_cfqq;
++ goto out;
++ }
++
+ cfqq = cic_to_cfqq(cic, is_sync);
+
+ /*
+@@ -3626,7 +3631,7 @@ retry:
+ } else
+ cfqq = &cfqd->oom_cfqq;
+ }
+-
++out:
+ if (new_cfqq)
+ kmem_cache_free(cfq_pool, new_cfqq);
+
+@@ -3656,12 +3661,17 @@ static struct cfq_queue *
+ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
+ struct bio *bio, gfp_t gfp_mask)
+ {
+- const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
+- const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
++ int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
++ int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
+ struct cfq_queue **async_cfqq = NULL;
+ struct cfq_queue *cfqq = NULL;
+
+ if (!is_sync) {
++ if (!ioprio_valid(cic->ioprio)) {
++ struct task_struct *tsk = current;
++ ioprio = task_nice_ioprio(tsk);
++ ioprio_class = task_nice_ioclass(tsk);
++ }
+ async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
+ cfqq = *async_cfqq;
+ }
+diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
+index 93d160661f4c..41e9c199e874 100644
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -105,7 +105,7 @@ static void lpss_uart_setup(struct lpss_private_data *pdata)
+ }
+ }
+
+-static void byt_i2c_setup(struct lpss_private_data *pdata)
++static void lpss_deassert_reset(struct lpss_private_data *pdata)
+ {
+ unsigned int offset;
+ u32 val;
+@@ -114,9 +114,18 @@ static void byt_i2c_setup(struct lpss_private_data *pdata)
+ val = readl(pdata->mmio_base + offset);
+ val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
+ writel(val, pdata->mmio_base + offset);
++}
++
++#define LPSS_I2C_ENABLE 0x6c
++
++static void byt_i2c_setup(struct lpss_private_data *pdata)
++{
++ lpss_deassert_reset(pdata);
+
+ if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
+ pdata->fixed_clk_rate = 133000000;
++
++ writel(0, pdata->mmio_base + LPSS_I2C_ENABLE);
+ }
+
+ static struct lpss_device_desc lpt_dev_desc = {
+@@ -166,6 +175,12 @@ static struct lpss_device_desc byt_i2c_dev_desc = {
+ .setup = byt_i2c_setup,
+ };
+
++static struct lpss_device_desc bsw_spi_dev_desc = {
++ .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
++ .prv_offset = 0x400,
++ .setup = lpss_deassert_reset,
++};
++
+ #else
+
+ #define LPSS_ADDR(desc) (0UL)
+@@ -198,7 +213,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
+ /* Braswell LPSS devices */
+ { "80862288", LPSS_ADDR(byt_pwm_dev_desc) },
+ { "8086228A", LPSS_ADDR(byt_uart_dev_desc) },
+- { "8086228E", LPSS_ADDR(byt_spi_dev_desc) },
++ { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
+ { "808622C1", LPSS_ADDR(byt_i2c_dev_desc) },
+
+ { "INT3430", LPSS_ADDR(lpt_dev_desc) },
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 086240cd29c3..fe1678c4ff89 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -106,6 +106,7 @@ static const struct usb_device_id ath3k_table[] = {
+ { USB_DEVICE(0x13d3, 0x3393) },
+ { USB_DEVICE(0x13d3, 0x3402) },
+ { USB_DEVICE(0x13d3, 0x3408) },
++ { USB_DEVICE(0x13d3, 0x3423) },
+ { USB_DEVICE(0x13d3, 0x3432) },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+@@ -158,6 +159,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+
+ /* Atheros AR5BBU22 with sflash firmware */
+@@ -170,6 +172,8 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+ #define USB_REQ_DFU_DNLOAD 1
+ #define BULK_SIZE 4096
+ #define FW_HDR_SIZE 20
++#define TIMEGAP_USEC_MIN 50
++#define TIMEGAP_USEC_MAX 100
+
+ static int ath3k_load_firmware(struct usb_device *udev,
+ const struct firmware *firmware)
+@@ -201,6 +205,9 @@ static int ath3k_load_firmware(struct usb_device *udev,
+ pipe = usb_sndbulkpipe(udev, 0x02);
+
+ while (count) {
++ /* workaround the compatibility issue with xHCI controller*/
++ usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX);
++
+ size = min_t(uint, count, BULK_SIZE);
+ memcpy(send_buf, firmware->data + sent, size);
+
+@@ -298,6 +305,9 @@ static int ath3k_load_fwfile(struct usb_device *udev,
+ pipe = usb_sndbulkpipe(udev, 0x02);
+
+ while (count) {
++ /* workaround the compatibility issue with xHCI controller*/
++ usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX);
++
+ size = min_t(uint, count, BULK_SIZE);
+ memcpy(send_buf, firmware->data + sent, size);
+
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 091c813df8e9..f0e2f721c8ce 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -107,15 +107,23 @@ static const struct usb_device_id btusb_table[] = {
+ { USB_DEVICE(0x0b05, 0x17cb) },
+ { USB_DEVICE(0x413c, 0x8197) },
+
++ /* Broadcom BCM20702B0 (Dynex/Insignia) */
++ { USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM },
++
+ /* Foxconn - Hon Hai */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01) },
+
++ /* Lite-On Technology - Broadcom based */
++ { USB_VENDOR_AND_INTERFACE_INFO(0x04ca, 0xff, 0x01, 0x01),
++ .driver_info = BTUSB_BCM_PATCHRAM },
++
+ /* Broadcom devices with vendor specific id */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01),
+ .driver_info = BTUSB_BCM_PATCHRAM },
+
+ /* ASUSTek Computer - Broadcom based */
+- { USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01) },
++ { USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01),
++ .driver_info = BTUSB_BCM_PATCHRAM },
+
+ /* Belkin F8065bf - Broadcom based */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
+@@ -183,6 +191,7 @@ static const struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 04645c09fe5e..9cd6968e2f92 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -569,19 +569,19 @@ static void fast_mix(struct fast_pool *f)
+ __u32 c = f->pool[2], d = f->pool[3];
+
+ a += b; c += d;
+- b = rol32(a, 6); d = rol32(c, 27);
++ b = rol32(b, 6); d = rol32(d, 27);
+ d ^= a; b ^= c;
+
+ a += b; c += d;
+- b = rol32(a, 16); d = rol32(c, 14);
++ b = rol32(b, 16); d = rol32(d, 14);
+ d ^= a; b ^= c;
+
+ a += b; c += d;
+- b = rol32(a, 6); d = rol32(c, 27);
++ b = rol32(b, 6); d = rol32(d, 27);
+ d ^= a; b ^= c;
+
+ a += b; c += d;
+- b = rol32(a, 16); d = rol32(c, 14);
++ b = rol32(b, 16); d = rol32(d, 14);
+ d ^= a; b ^= c;
+
+ f->pool[0] = a; f->pool[1] = b;
+diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
+index 6af17002a115..cfb9089887bd 100644
+--- a/drivers/char/tpm/tpm-interface.c
++++ b/drivers/char/tpm/tpm-interface.c
+@@ -1122,7 +1122,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
+
+ /* Make chip available */
+ spin_lock(&driver_lock);
+- list_add_rcu(&chip->list, &tpm_chip_list);
++ list_add_tail_rcu(&chip->list, &tpm_chip_list);
+ spin_unlock(&driver_lock);
+
+ return chip;
+diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
+index 77272925dee6..503a85ae176c 100644
+--- a/drivers/char/tpm/tpm_i2c_atmel.c
++++ b/drivers/char/tpm/tpm_i2c_atmel.c
+@@ -168,6 +168,10 @@ static int i2c_atmel_probe(struct i2c_client *client,
+
+ chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data),
+ GFP_KERNEL);
++ if (!chip->vendor.priv) {
++ rc = -ENOMEM;
++ goto out_err;
++ }
+
+ /* Default timeouts */
+ chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
+index 7b158efd49f7..23c7b137a7fd 100644
+--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
++++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
+@@ -538,6 +538,11 @@ static int i2c_nuvoton_probe(struct i2c_client *client,
+
+ chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data),
+ GFP_KERNEL);
++ if (!chip->vendor.priv) {
++ rc = -ENOMEM;
++ goto out_err;
++ }
++
+ init_waitqueue_head(&chip->vendor.read_queue);
+ init_waitqueue_head(&chip->vendor.int_queue);
+
+diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c
+index 4669e3713428..7d1c540fa26a 100644
+--- a/drivers/char/tpm/tpm_i2c_stm_st33.c
++++ b/drivers/char/tpm/tpm_i2c_stm_st33.c
+@@ -487,7 +487,7 @@ static int tpm_stm_i2c_send(struct tpm_chip *chip, unsigned char *buf,
+ if (burstcnt < 0)
+ return burstcnt;
+ size = min_t(int, len - i - 1, burstcnt);
+- ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf, size);
++ ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf + i, size);
+ if (ret < 0)
+ goto out_err;
+
+diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
+index af74c57e5090..eff9d5870034 100644
+--- a/drivers/char/tpm/tpm_ibmvtpm.c
++++ b/drivers/char/tpm/tpm_ibmvtpm.c
+@@ -148,7 +148,8 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+ crq.len = (u16)count;
+ crq.data = ibmvtpm->rtce_dma_handle;
+
+- rc = ibmvtpm_send_crq(ibmvtpm->vdev, word[0], word[1]);
++ rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]),
++ cpu_to_be64(word[1]));
+ if (rc != H_SUCCESS) {
+ dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
+ rc = 0;
+@@ -186,7 +187,8 @@ static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
+ crq.valid = (u8)IBMVTPM_VALID_CMD;
+ crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE;
+
+- rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
++ rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
++ cpu_to_be64(buf[1]));
+ if (rc != H_SUCCESS)
+ dev_err(ibmvtpm->dev,
+ "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
+@@ -212,7 +214,8 @@ static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
+ crq.valid = (u8)IBMVTPM_VALID_CMD;
+ crq.msg = (u8)VTPM_GET_VERSION;
+
+- rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
++ rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
++ cpu_to_be64(buf[1]));
+ if (rc != H_SUCCESS)
+ dev_err(ibmvtpm->dev,
+ "ibmvtpm_crq_get_version failed rc=%d\n", rc);
+@@ -307,6 +310,14 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
+ static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
+ {
+ struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
++
++ /* ibmvtpm initializes at probe time, so the data we are
++ * asking for may not be set yet. Estimate that 4K required
++ * for TCE-mapped buffer in addition to CRQ.
++ */
++ if (!ibmvtpm)
++ return CRQ_RES_BUF_SIZE + PAGE_SIZE;
++
+ return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
+ }
+
+@@ -327,7 +338,8 @@ static int tpm_ibmvtpm_suspend(struct device *dev)
+ crq.valid = (u8)IBMVTPM_VALID_CMD;
+ crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND;
+
+- rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
++ rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
++ cpu_to_be64(buf[1]));
+ if (rc != H_SUCCESS)
+ dev_err(ibmvtpm->dev,
+ "tpm_ibmvtpm_suspend failed rc=%d\n", rc);
+@@ -472,11 +484,11 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
+ case IBMVTPM_VALID_CMD:
+ switch (crq->msg) {
+ case VTPM_GET_RTCE_BUFFER_SIZE_RES:
+- if (crq->len <= 0) {
++ if (be16_to_cpu(crq->len) <= 0) {
+ dev_err(ibmvtpm->dev, "Invalid rtce size\n");
+ return;
+ }
+- ibmvtpm->rtce_size = crq->len;
++ ibmvtpm->rtce_size = be16_to_cpu(crq->len);
+ ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
+ GFP_KERNEL);
+ if (!ibmvtpm->rtce_buf) {
+@@ -497,11 +509,11 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
+
+ return;
+ case VTPM_GET_VERSION_RES:
+- ibmvtpm->vtpm_version = crq->data;
++ ibmvtpm->vtpm_version = be32_to_cpu(crq->data);
+ return;
+ case VTPM_TPM_COMMAND_RES:
+ /* len of the data in rtce buffer */
+- ibmvtpm->res_len = crq->len;
++ ibmvtpm->res_len = be16_to_cpu(crq->len);
+ wake_up_interruptible(&ibmvtpm->wq);
+ return;
+ default:
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index 2c46734b266d..51350cd0847e 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -75,6 +75,10 @@ enum tis_defaults {
+ #define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
+ #define TPM_RID(l) (0x0F04 | ((l) << 12))
+
++struct priv_data {
++ bool irq_tested;
++};
++
+ static LIST_HEAD(tis_chips);
+ static DEFINE_MUTEX(tis_lock);
+
+@@ -338,12 +342,27 @@ out_err:
+ return rc;
+ }
+
++static void disable_interrupts(struct tpm_chip *chip)
++{
++ u32 intmask;
++
++ intmask =
++ ioread32(chip->vendor.iobase +
++ TPM_INT_ENABLE(chip->vendor.locality));
++ intmask &= ~TPM_GLOBAL_INT_ENABLE;
++ iowrite32(intmask,
++ chip->vendor.iobase +
++ TPM_INT_ENABLE(chip->vendor.locality));
++ free_irq(chip->vendor.irq, chip);
++ chip->vendor.irq = 0;
++}
++
+ /*
+ * If interrupts are used (signaled by an irq set in the vendor structure)
+ * tpm.c can skip polling for the data to be available as the interrupt is
+ * waited for here
+ */
+-static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
++static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len)
+ {
+ int rc;
+ u32 ordinal;
+@@ -373,6 +392,30 @@ out_err:
+ return rc;
+ }
+
++static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
++{
++ int rc, irq;
++ struct priv_data *priv = chip->vendor.priv;
++
++ if (!chip->vendor.irq || priv->irq_tested)
++ return tpm_tis_send_main(chip, buf, len);
++
++ /* Verify receipt of the expected IRQ */
++ irq = chip->vendor.irq;
++ chip->vendor.irq = 0;
++ rc = tpm_tis_send_main(chip, buf, len);
++ chip->vendor.irq = irq;
++ if (!priv->irq_tested)
++ msleep(1);
++ if (!priv->irq_tested) {
++ disable_interrupts(chip);
++ dev_err(chip->dev,
++ FW_BUG "TPM interrupt not working, polling instead\n");
++ }
++ priv->irq_tested = true;
++ return rc;
++}
++
+ struct tis_vendor_timeout_override {
+ u32 did_vid;
+ unsigned long timeout_us[4];
+@@ -505,6 +548,7 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
+ if (interrupt == 0)
+ return IRQ_NONE;
+
++ ((struct priv_data *)chip->vendor.priv)->irq_tested = true;
+ if (interrupt & TPM_INTF_DATA_AVAIL_INT)
+ wake_up_interruptible(&chip->vendor.read_queue);
+ if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
+@@ -534,9 +578,14 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
+ u32 vendor, intfcaps, intmask;
+ int rc, i, irq_s, irq_e, probe;
+ struct tpm_chip *chip;
++ struct priv_data *priv;
+
++ priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL);
++ if (priv == NULL)
++ return -ENOMEM;
+ if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
+ return -ENODEV;
++ chip->vendor.priv = priv;
+
+ chip->vendor.iobase = ioremap(start, len);
+ if (!chip->vendor.iobase) {
+@@ -605,19 +654,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
+ if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
+ dev_dbg(dev, "\tData Avail Int Support\n");
+
+- /* get the timeouts before testing for irqs */
+- if (tpm_get_timeouts(chip)) {
+- dev_err(dev, "Could not get TPM timeouts and durations\n");
+- rc = -ENODEV;
+- goto out_err;
+- }
+-
+- if (tpm_do_selftest(chip)) {
+- dev_err(dev, "TPM self test failed\n");
+- rc = -ENODEV;
+- goto out_err;
+- }
+-
+ /* INTERRUPT Setup */
+ init_waitqueue_head(&chip->vendor.read_queue);
+ init_waitqueue_head(&chip->vendor.int_queue);
+@@ -719,6 +755,18 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
+ }
+ }
+
++ if (tpm_get_timeouts(chip)) {
++ dev_err(dev, "Could not get TPM timeouts and durations\n");
++ rc = -ENODEV;
++ goto out_err;
++ }
++
++ if (tpm_do_selftest(chip)) {
++ dev_err(dev, "TPM self test failed\n");
++ rc = -ENODEV;
++ goto out_err;
++ }
++
+ INIT_LIST_HEAD(&chip->vendor.list);
+ mutex_lock(&tis_lock);
+ list_add(&chip->vendor.list, &tis_chips);
+diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
+index 32a3d25795d3..68ab42356d0e 100644
+--- a/drivers/clocksource/mtk_timer.c
++++ b/drivers/clocksource/mtk_timer.c
+@@ -224,6 +224,8 @@ static void __init mtk_timer_init(struct device_node *node)
+ }
+ rate = clk_get_rate(clk);
+
++ mtk_timer_global_reset(evt);
++
+ if (request_irq(evt->dev.irq, mtk_timer_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) {
+ pr_warn("failed to setup irq %d\n", evt->dev.irq);
+@@ -232,8 +234,6 @@ static void __init mtk_timer_init(struct device_node *node)
+
+ evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
+
+- mtk_timer_global_reset(evt);
+-
+ /* Configure clock source */
+ mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN);
+ clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC),
+@@ -241,10 +241,11 @@ static void __init mtk_timer_init(struct device_node *node)
+
+ /* Configure clock event */
+ mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT);
+- mtk_timer_enable_irq(evt, GPT_CLK_EVT);
+-
+ clockevents_config_and_register(&evt->dev, rate, 0x3,
+ 0xffffffff);
++
++ mtk_timer_enable_irq(evt, GPT_CLK_EVT);
++
+ return;
+
+ err_clk_disable:
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 4473eba1d6b0..e3bf702b5588 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1409,9 +1409,10 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
+ unsigned long flags;
+ struct cpufreq_policy *policy;
+
+- read_lock_irqsave(&cpufreq_driver_lock, flags);
++ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+- read_unlock_irqrestore(&cpufreq_driver_lock, flags);
++ per_cpu(cpufreq_cpu_data, cpu) = NULL;
++ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ if (!policy) {
+ pr_debug("%s: No cpu_data found\n", __func__);
+@@ -1466,7 +1467,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
+ }
+ }
+
+- per_cpu(cpufreq_cpu_data, cpu) = NULL;
+ return 0;
+ }
+
+diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
+index 2fd53eaaec20..d6d425773fa4 100644
+--- a/drivers/cpufreq/s3c2416-cpufreq.c
++++ b/drivers/cpufreq/s3c2416-cpufreq.c
+@@ -263,7 +263,7 @@ out:
+ }
+
+ #ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
+-static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
++static void s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
+ {
+ int count, v, i, found;
+ struct cpufreq_frequency_table *pos;
+@@ -333,7 +333,7 @@ static struct notifier_block s3c2416_cpufreq_reboot_notifier = {
+ .notifier_call = s3c2416_cpufreq_reboot_notifier_evt,
+ };
+
+-static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
++static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
+ {
+ struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
+ struct cpufreq_frequency_table *pos;
+diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
+index d00f1cee4509..733aa5153e74 100644
+--- a/drivers/cpufreq/s3c24xx-cpufreq.c
++++ b/drivers/cpufreq/s3c24xx-cpufreq.c
+@@ -144,11 +144,6 @@ static void s3c_cpufreq_setfvco(struct s3c_cpufreq_config *cfg)
+ (cfg->info->set_fvco)(cfg);
+ }
+
+-static inline void s3c_cpufreq_resume_clocks(void)
+-{
+- cpu_cur.info->resume_clocks();
+-}
+-
+ static inline void s3c_cpufreq_updateclk(struct clk *clk,
+ unsigned int freq)
+ {
+@@ -417,9 +412,6 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
+
+ last_target = ~0; /* invalidate last_target setting */
+
+- /* first, find out what speed we resumed at. */
+- s3c_cpufreq_resume_clocks();
+-
+ /* whilst we will be called later on, we try and re-set the
+ * cpu frequencies as soon as possible so that we do not end
+ * up resuming devices and then immediately having to re-set
+@@ -454,7 +446,7 @@ static struct cpufreq_driver s3c24xx_driver = {
+ };
+
+
+-int __init s3c_cpufreq_register(struct s3c_cpufreq_info *info)
++int s3c_cpufreq_register(struct s3c_cpufreq_info *info)
+ {
+ if (!info || !info->name) {
+ printk(KERN_ERR "%s: failed to pass valid information\n",
+diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
+index 7047821a7f8a..4ab7a2156672 100644
+--- a/drivers/cpufreq/speedstep-lib.c
++++ b/drivers/cpufreq/speedstep-lib.c
+@@ -400,6 +400,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
+
+ pr_debug("previous speed is %u\n", prev_speed);
+
++ preempt_disable();
+ local_irq_save(flags);
+
+ /* switch to low state */
+@@ -464,6 +465,8 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
+
+ out:
+ local_irq_restore(flags);
++ preempt_enable();
++
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(speedstep_get_freqs);
+diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
+index 5fc96d5d656b..819229e824fb 100644
+--- a/drivers/cpufreq/speedstep-smi.c
++++ b/drivers/cpufreq/speedstep-smi.c
+@@ -156,6 +156,7 @@ static void speedstep_set_state(unsigned int state)
+ return;
+
+ /* Disable IRQs */
++ preempt_disable();
+ local_irq_save(flags);
+
+ command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
+@@ -166,9 +167,19 @@ static void speedstep_set_state(unsigned int state)
+
+ do {
+ if (retry) {
++ /*
++ * We need to enable interrupts, otherwise the blockage
++ * won't resolve.
++ *
++ * We disable preemption so that other processes don't
++ * run. If other processes were running, they could
++ * submit more DMA requests, making the blockage worse.
++ */
+ pr_debug("retry %u, previous result %u, waiting...\n",
+ retry, result);
++ local_irq_enable();
+ mdelay(retry * 50);
++ local_irq_disable();
+ }
+ retry++;
+ __asm__ __volatile__(
+@@ -185,6 +196,7 @@ static void speedstep_set_state(unsigned int state)
+
+ /* enable IRQs */
+ local_irq_restore(flags);
++ preempt_enable();
+
+ if (new_state == state)
+ pr_debug("change to %u MHz succeeded after %u tries "
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index bbd65149cdb2..c7236ba92b8e 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -2039,14 +2039,20 @@ static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
+
+ static inline void decode_bus_error(int node_id, struct mce *m)
+ {
+- struct mem_ctl_info *mci = mcis[node_id];
+- struct amd64_pvt *pvt = mci->pvt_info;
++ struct mem_ctl_info *mci;
++ struct amd64_pvt *pvt;
+ u8 ecc_type = (m->status >> 45) & 0x3;
+ u8 xec = XEC(m->status, 0x1f);
+ u16 ec = EC(m->status);
+ u64 sys_addr;
+ struct err_info err;
+
++ mci = edac_mc_find(node_id);
++ if (!mci)
++ return;
++
++ pvt = mci->pvt_info;
++
+ /* Bail out early if this was an 'observed' error */
+ if (PP(ec) == NBSL_PP_OBS)
+ return;
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index e9bb1af67c8d..3cdaac8944eb 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -2297,7 +2297,7 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_ibridge_table);
+ type = IVY_BRIDGE;
+ break;
+- case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
++ case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
+ rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_sbridge_table);
+ type = SANDY_BRIDGE;
+ break;
+@@ -2306,8 +2306,11 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ type = HASWELL;
+ break;
+ }
+- if (unlikely(rc < 0))
++ if (unlikely(rc < 0)) {
++ edac_dbg(0, "couldn't get all devices for 0x%x\n", pdev->device);
+ goto fail0;
++ }
++
+ mc = 0;
+
+ list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
+@@ -2320,7 +2323,7 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ goto fail1;
+ }
+
+- sbridge_printk(KERN_INFO, "Driver loaded.\n");
++ sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION);
+
+ mutex_unlock(&sbridge_edac_lock);
+ return 0;
+diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
+index 22052d84c63b..a8c783099af9 100644
+--- a/drivers/gpio/gpio-tps65912.c
++++ b/drivers/gpio/gpio-tps65912.c
+@@ -26,9 +26,12 @@ struct tps65912_gpio_data {
+ struct gpio_chip gpio_chip;
+ };
+
++#define to_tgd(gc) container_of(gc, struct tps65912_gpio_data, gpio_chip)
++
+ static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
+ {
+- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
++ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
++ struct tps65912 *tps65912 = tps65912_gpio->tps65912;
+ int val;
+
+ val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset);
+@@ -42,7 +45,8 @@ static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
+ static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
+ int value)
+ {
+- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
++ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
++ struct tps65912 *tps65912 = tps65912_gpio->tps65912;
+
+ if (value)
+ tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
+@@ -55,7 +59,8 @@ static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
+ static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
+ int value)
+ {
+- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
++ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
++ struct tps65912 *tps65912 = tps65912_gpio->tps65912;
+
+ /* Set the initial value */
+ tps65912_gpio_set(gc, offset, value);
+@@ -66,7 +71,8 @@ static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
+
+ static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset)
+ {
+- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
++ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
++ struct tps65912 *tps65912 = tps65912_gpio->tps65912;
+
+ return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
+ GPIO_CFG_MASK);
+diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
+index 08261f2b3a82..26645a847bb8 100644
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -46,12 +46,13 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
+
+ ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
+ if (ret < 0) {
+- /* We've found the gpio chip, but the translation failed.
+- * Return true to stop looking and return the translation
+- * error via out_gpio
++ /* We've found a gpio chip, but the translation failed.
++ * Store translation error in out_gpio.
++ * Return false to keep looking, as more than one gpio chip
++ * could be registered per of-node.
+ */
+ gg_data->out_gpio = ERR_PTR(ret);
+- return true;
++ return false;
+ }
+
+ gg_data->out_gpio = gpiochip_get_desc(gc, ret);
+diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
+index 80e33e0abc52..6d7c9c580ceb 100644
+--- a/drivers/hid/i2c-hid/i2c-hid.c
++++ b/drivers/hid/i2c-hid/i2c-hid.c
+@@ -370,7 +370,10 @@ static int i2c_hid_hwreset(struct i2c_client *client)
+ static void i2c_hid_get_input(struct i2c_hid *ihid)
+ {
+ int ret, ret_size;
+- int size = ihid->bufsize;
++ int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
++
++ if (size > ihid->bufsize)
++ size = ihid->bufsize;
+
+ ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
+ if (ret != size) {
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 40b35be34f8d..2f2f38f4d83c 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -560,7 +560,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
+ if (test_bit(WriteMostly, &rdev->flags)) {
+ /* Don't balance among write-mostly, just
+ * use the first as a last resort */
+- if (best_disk < 0) {
++ if (best_dist_disk < 0) {
+ if (is_badblock(rdev, this_sector, sectors,
+ &first_bad, &bad_sectors)) {
+ if (first_bad < this_sector)
+@@ -569,7 +569,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
+ best_good_sectors = first_bad - this_sector;
+ } else
+ best_good_sectors = sectors;
+- best_disk = disk;
++ best_dist_disk = disk;
++ best_pending_disk = disk;
+ }
+ continue;
+ }
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index b98765f6f77f..8577cc7db47e 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -3102,7 +3102,8 @@ static void handle_stripe_dirtying(struct r5conf *conf,
+ * generate correct data from the parity.
+ */
+ if (conf->max_degraded == 2 ||
+- (recovery_cp < MaxSector && sh->sector >= recovery_cp)) {
++ (recovery_cp < MaxSector && sh->sector >= recovery_cp &&
++ s->failed == 0)) {
+ /* Calculate the real rcw later - for now make it
+ * look like rcw is cheaper
+ */
+diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
+index 1cd93be281ed..64a759c017d4 100644
+--- a/drivers/media/dvb-frontends/si2168.c
++++ b/drivers/media/dvb-frontends/si2168.c
+@@ -605,6 +605,8 @@ static const struct dvb_frontend_ops si2168_ops = {
+ .delsys = {SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A},
+ .info = {
+ .name = "Silicon Labs Si2168",
++ .symbol_rate_min = 1000000,
++ .symbol_rate_max = 7200000,
+ .caps = FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
+index 3aac88f1d54a..736277236ba2 100644
+--- a/drivers/media/platform/Kconfig
++++ b/drivers/media/platform/Kconfig
+@@ -56,10 +56,8 @@ config VIDEO_VIU
+
+ config VIDEO_TIMBERDALE
+ tristate "Support for timberdale Video In/LogiWIN"
+- depends on VIDEO_V4L2 && I2C && DMADEVICES
+- depends on MFD_TIMBERDALE || COMPILE_TEST
+- select DMA_ENGINE
+- select TIMB_DMA
++ depends on VIDEO_V4L2 && I2C
++ depends on (MFD_TIMBERDALE && TIMB_DMA) || COMPILE_TEST
+ select VIDEO_ADV7180
+ select VIDEOBUF_DMA_CONTIG
+ ---help---
+diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
+index 8d3b74c5a717..fc369b033484 100644
+--- a/drivers/media/rc/rc-main.c
++++ b/drivers/media/rc/rc-main.c
+@@ -1021,16 +1021,16 @@ static ssize_t store_protocols(struct device *device,
+ goto out;
+ }
+
+- if (new_protocols == old_protocols) {
+- rc = len;
+- goto out;
++ if (new_protocols != old_protocols) {
++ *current_protocols = new_protocols;
++ IR_dprintk(1, "Protocols changed to 0x%llx\n",
++ (long long)new_protocols);
+ }
+
+- *current_protocols = new_protocols;
+- IR_dprintk(1, "Protocols changed to 0x%llx\n", (long long)new_protocols);
+-
+ /*
+- * If the protocol is changed the filter needs updating.
++ * If a protocol change was attempted the filter may need updating, even
++ * if the actual protocol mask hasn't changed (since the driver may have
++ * cleared the filter).
+ * Try setting the same filter with the new protocol (if any).
+ * Fall back to clearing the filter.
+ */
+diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
+index 9f2c5459b73a..2273ce78f5c8 100644
+--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
++++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
+@@ -344,15 +344,17 @@ static void lme2510_int_response(struct urb *lme_urb)
+
+ usb_submit_urb(lme_urb, GFP_ATOMIC);
+
+- /* interrupt urb is due every 48 msecs while streaming
+- * add 12msecs for system lag */
+- st->int_urb_due = jiffies + msecs_to_jiffies(60);
++ /* Interrupt urb is due every 48 msecs while streaming the buffer
++ * stores up to 4 periods if missed. Allow 200 msec for next interrupt.
++ */
++ st->int_urb_due = jiffies + msecs_to_jiffies(200);
+ }
+
+ static int lme2510_int_read(struct dvb_usb_adapter *adap)
+ {
+ struct dvb_usb_device *d = adap_to_d(adap);
+ struct lme2510_state *lme_int = adap_to_priv(adap);
++ struct usb_host_endpoint *ep;
+
+ lme_int->lme_urb = usb_alloc_urb(0, GFP_ATOMIC);
+
+@@ -374,6 +376,12 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
+ adap,
+ 8);
+
++ /* Quirk of pipe reporting PIPE_BULK but behaves as interrupt */
++ ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe);
++
++ if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
++ lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa),
++
+ lme_int->lme_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ usb_submit_urb(lme_int->lme_urb, GFP_ATOMIC);
+diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
+index 957c7ae30efe..1ead904a7043 100644
+--- a/drivers/media/usb/em28xx/em28xx-audio.c
++++ b/drivers/media/usb/em28xx/em28xx-audio.c
+@@ -821,7 +821,7 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
+ if (urb_size > ep_size * npackets)
+ npackets = DIV_ROUND_UP(urb_size, ep_size);
+
+- em28xx_info("Number of URBs: %d, with %d packets and %d size",
++ em28xx_info("Number of URBs: %d, with %d packets and %d size\n",
+ num_urb, npackets, urb_size);
+
+ /* Estimate the bytes per period */
+@@ -982,7 +982,7 @@ static int em28xx_audio_fini(struct em28xx *dev)
+ return 0;
+ }
+
+- em28xx_info("Closing audio extension");
++ em28xx_info("Closing audio extension\n");
+
+ if (dev->adev.sndcard) {
+ snd_card_disconnect(dev->adev.sndcard);
+@@ -1006,7 +1006,7 @@ static int em28xx_audio_suspend(struct em28xx *dev)
+ if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR)
+ return 0;
+
+- em28xx_info("Suspending audio extension");
++ em28xx_info("Suspending audio extension\n");
+ em28xx_deinit_isoc_audio(dev);
+ atomic_set(&dev->adev.stream_started, 0);
+ return 0;
+@@ -1020,7 +1020,7 @@ static int em28xx_audio_resume(struct em28xx *dev)
+ if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR)
+ return 0;
+
+- em28xx_info("Resuming audio extension");
++ em28xx_info("Resuming audio extension\n");
+ /* Nothing to do other than schedule_work() ?? */
+ schedule_work(&dev->adev.wq_trigger);
+ return 0;
+diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
+index 901cf2b952d7..84dd4aec70fb 100644
+--- a/drivers/media/usb/em28xx/em28xx-core.c
++++ b/drivers/media/usb/em28xx/em28xx-core.c
+@@ -1122,7 +1122,7 @@ int em28xx_suspend_extension(struct em28xx *dev)
+ {
+ const struct em28xx_ops *ops = NULL;
+
+- em28xx_info("Suspending extensions");
++ em28xx_info("Suspending extensions\n");
+ mutex_lock(&em28xx_devlist_mutex);
+ list_for_each_entry(ops, &em28xx_extension_devlist, next) {
+ if (ops->suspend)
+@@ -1136,7 +1136,7 @@ int em28xx_resume_extension(struct em28xx *dev)
+ {
+ const struct em28xx_ops *ops = NULL;
+
+- em28xx_info("Resuming extensions");
++ em28xx_info("Resuming extensions\n");
+ mutex_lock(&em28xx_devlist_mutex);
+ list_for_each_entry(ops, &em28xx_extension_devlist, next) {
+ if (ops->resume)
+diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
+index 9682c52d67d1..41a6864ab8d2 100644
+--- a/drivers/media/usb/em28xx/em28xx-dvb.c
++++ b/drivers/media/usb/em28xx/em28xx-dvb.c
+@@ -1667,7 +1667,7 @@ static int em28xx_dvb_fini(struct em28xx *dev)
+ if (!dev->dvb)
+ return 0;
+
+- em28xx_info("Closing DVB extension");
++ em28xx_info("Closing DVB extension\n");
+
+ dvb = dev->dvb;
+ client = dvb->i2c_client_tuner;
+@@ -1718,17 +1718,17 @@ static int em28xx_dvb_suspend(struct em28xx *dev)
+ if (!dev->board.has_dvb)
+ return 0;
+
+- em28xx_info("Suspending DVB extension");
++ em28xx_info("Suspending DVB extension\n");
+ if (dev->dvb) {
+ struct em28xx_dvb *dvb = dev->dvb;
+
+ if (dvb->fe[0]) {
+ ret = dvb_frontend_suspend(dvb->fe[0]);
+- em28xx_info("fe0 suspend %d", ret);
++ em28xx_info("fe0 suspend %d\n", ret);
+ }
+ if (dvb->fe[1]) {
+ dvb_frontend_suspend(dvb->fe[1]);
+- em28xx_info("fe1 suspend %d", ret);
++ em28xx_info("fe1 suspend %d\n", ret);
+ }
+ }
+
+@@ -1745,18 +1745,18 @@ static int em28xx_dvb_resume(struct em28xx *dev)
+ if (!dev->board.has_dvb)
+ return 0;
+
+- em28xx_info("Resuming DVB extension");
++ em28xx_info("Resuming DVB extension\n");
+ if (dev->dvb) {
+ struct em28xx_dvb *dvb = dev->dvb;
+
+ if (dvb->fe[0]) {
+ ret = dvb_frontend_resume(dvb->fe[0]);
+- em28xx_info("fe0 resume %d", ret);
++ em28xx_info("fe0 resume %d\n", ret);
+ }
+
+ if (dvb->fe[1]) {
+ ret = dvb_frontend_resume(dvb->fe[1]);
+- em28xx_info("fe1 resume %d", ret);
++ em28xx_info("fe1 resume %d\n", ret);
+ }
+ }
+
+diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
+index 23f8f6afa2e0..b31e275faa2e 100644
+--- a/drivers/media/usb/em28xx/em28xx-input.c
++++ b/drivers/media/usb/em28xx/em28xx-input.c
+@@ -833,7 +833,7 @@ static int em28xx_ir_fini(struct em28xx *dev)
+ return 0;
+ }
+
+- em28xx_info("Closing input extension");
++ em28xx_info("Closing input extension\n");
+
+ em28xx_shutdown_buttons(dev);
+
+@@ -863,7 +863,7 @@ static int em28xx_ir_suspend(struct em28xx *dev)
+ if (dev->is_audio_only)
+ return 0;
+
+- em28xx_info("Suspending input extension");
++ em28xx_info("Suspending input extension\n");
+ if (ir)
+ cancel_delayed_work_sync(&ir->work);
+ cancel_delayed_work_sync(&dev->buttons_query_work);
+@@ -880,7 +880,7 @@ static int em28xx_ir_resume(struct em28xx *dev)
+ if (dev->is_audio_only)
+ return 0;
+
+- em28xx_info("Resuming input extension");
++ em28xx_info("Resuming input extension\n");
+ /* if suspend calls ir_raw_event_unregister(), the should call
+ ir_raw_event_register() */
+ if (ir)
+diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
+index 03d5ece0319c..e0f4be8c5a19 100644
+--- a/drivers/media/usb/em28xx/em28xx-video.c
++++ b/drivers/media/usb/em28xx/em28xx-video.c
+@@ -1956,7 +1956,7 @@ static int em28xx_v4l2_fini(struct em28xx *dev)
+ if (v4l2 == NULL)
+ return 0;
+
+- em28xx_info("Closing video extension");
++ em28xx_info("Closing video extension\n");
+
+ mutex_lock(&dev->lock);
+
+@@ -2005,7 +2005,7 @@ static int em28xx_v4l2_suspend(struct em28xx *dev)
+ if (!dev->has_video)
+ return 0;
+
+- em28xx_info("Suspending video extension");
++ em28xx_info("Suspending video extension\n");
+ em28xx_stop_urbs(dev);
+ return 0;
+ }
+@@ -2018,7 +2018,7 @@ static int em28xx_v4l2_resume(struct em28xx *dev)
+ if (!dev->has_video)
+ return 0;
+
+- em28xx_info("Resuming video extension");
++ em28xx_info("Resuming video extension\n");
+ /* what do we do here */
+ return 0;
+ }
+diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
+index 432aec8dd3ce..350a28af98aa 100644
+--- a/drivers/misc/mei/hw-me.c
++++ b/drivers/misc/mei/hw-me.c
+@@ -242,7 +242,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
+ if ((hcsr & H_RST) == H_RST) {
+ dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
+ hcsr &= ~H_RST;
+- mei_me_reg_write(hw, H_CSR, hcsr);
++ mei_hcsr_set(hw, hcsr);
+ hcsr = mei_hcsr_read(hw);
+ }
+
+@@ -335,6 +335,7 @@ static int mei_me_hw_ready_wait(struct mei_device *dev)
+ return -ETIME;
+ }
+
++ mei_me_hw_reset_release(dev);
+ dev->recvd_hw_ready = false;
+ return 0;
+ }
+@@ -729,9 +730,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
+ /* check if we need to start the dev */
+ if (!mei_host_is_ready(dev)) {
+ if (mei_hw_is_ready(dev)) {
+- mei_me_hw_reset_release(dev);
+ dev_dbg(dev->dev, "we need to start the dev.\n");
+-
+ dev->recvd_hw_ready = true;
+ wake_up(&dev->wait_hw_ready);
+ } else {
+diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
+index 5036d7d39529..38251da7ba32 100644
+--- a/drivers/mmc/host/sdhci-pxav3.c
++++ b/drivers/mmc/host/sdhci-pxav3.c
+@@ -112,6 +112,38 @@ static int mv_conf_mbus_windows(struct platform_device *pdev,
+ return 0;
+ }
+
++static int armada_38x_quirks(struct platform_device *pdev,
++ struct sdhci_host *host)
++{
++ struct device_node *np = pdev->dev.of_node;
++
++ host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
++ /*
++ * According to erratum 'FE-2946959' both SDR50 and DDR50
++ * modes require specific clock adjustments in SDIO3
++ * Configuration register, if the adjustment is not done,
++ * remove them from the capabilities.
++ */
++ host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
++ host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50);
++
++ /*
++ * According to erratum 'ERR-7878951' Armada 38x SDHCI
++ * controller has different capabilities than the ones shown
++ * in its registers
++ */
++ host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
++ if (of_property_read_bool(np, "no-1-8-v")) {
++ host->caps &= ~SDHCI_CAN_VDD_180;
++ host->mmc->caps &= ~MMC_CAP_1_8V_DDR;
++ } else {
++ host->caps &= ~SDHCI_CAN_VDD_330;
++ }
++ host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_USE_SDR50_TUNING);
++
++ return 0;
++}
++
+ static void pxav3_reset(struct sdhci_host *host, u8 mask)
+ {
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+@@ -261,8 +293,8 @@ static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev)
+ if (!pdata)
+ return NULL;
+
+- of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles);
+- if (clk_delay_cycles > 0)
++ if (!of_property_read_u32(np, "mrvl,clk-delay-cycles",
++ &clk_delay_cycles))
+ pdata->clk_delay_cycles = clk_delay_cycles;
+
+ return pdata;
+@@ -295,7 +327,13 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
++ /* enable 1/8V DDR capable */
++ host->mmc->caps |= MMC_CAP_1_8V_DDR;
++
+ if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
++ ret = armada_38x_quirks(pdev, host);
++ if (ret < 0)
++ goto err_clk_get;
+ ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
+ if (ret < 0)
+ goto err_mbus_win;
+@@ -314,9 +352,6 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
+ pltfm_host->clk = clk;
+ clk_prepare_enable(clk);
+
+- /* enable 1/8V DDR capable */
+- host->mmc->caps |= MMC_CAP_1_8V_DDR;
+-
+ match = of_match_device(of_match_ptr(sdhci_pxav3_of_match), &pdev->dev);
+ if (match) {
+ ret = mmc_of_parse(host->mmc);
+@@ -355,10 +390,11 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
+ }
+ }
+
+- pm_runtime_enable(&pdev->dev);
+- pm_runtime_get_sync(&pdev->dev);
++ pm_runtime_get_noresume(&pdev->dev);
++ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, PXAV3_RPM_DELAY_MS);
+ pm_runtime_use_autosuspend(&pdev->dev);
++ pm_runtime_enable(&pdev->dev);
+ pm_suspend_ignore_children(&pdev->dev, 1);
+
+ ret = sdhci_add_host(host);
+@@ -381,8 +417,8 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
+ return 0;
+
+ err_add_host:
+- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
++ pm_runtime_put_noidle(&pdev->dev);
+ err_of_parse:
+ err_cd_req:
+ clk_disable_unprepare(clk);
+diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+index b6d2683da3a9..f71c22f8beea 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+@@ -668,9 +668,6 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
+ mvmvif->uploaded = false;
+ mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+
+- /* does this make sense at all? */
+- mvmvif->color++;
+-
+ spin_lock_bh(&mvm->time_event_lock);
+ iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
+ spin_unlock_bh(&mvm->time_event_lock);
+@@ -1014,7 +1011,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
+
+ ret = iwl_mvm_power_update_mac(mvm);
+ if (ret)
+- goto out_release;
++ goto out_remove_mac;
+
+ /* beacon filtering */
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
+index c6a517c771df..5928c9db49ce 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
+@@ -902,6 +902,11 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ sta_id = ba_notif->sta_id;
+ tid = ba_notif->tid;
+
++ if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
++ tid >= IWL_MAX_TID_COUNT,
++ "sta_id %d tid %d", sta_id, tid))
++ return 0;
++
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
+index eb8e2984c5e9..62ea2b550832 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
++++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
+@@ -722,7 +722,12 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
+ iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
+ trans_pcie->kw.dma >> 4);
+
+- iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr);
++ /*
++ * Send 0 as the scd_base_addr since the device may have be reset
++ * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
++ * contain garbage.
++ */
++ iwl_pcie_tx_start(trans, 0);
+ }
+
+ /*
+diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
+index c70efb9a6e78..e25faacf58b7 100644
+--- a/drivers/net/wireless/rtlwifi/pci.c
++++ b/drivers/net/wireless/rtlwifi/pci.c
+@@ -816,11 +816,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
+
+ /* get a new skb - if fail, old one will be reused */
+ new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
+- if (unlikely(!new_skb)) {
+- pr_err("Allocation of new skb failed in %s\n",
+- __func__);
++ if (unlikely(!new_skb))
+ goto no_new;
+- }
+ if (rtlpriv->use_new_trx_flow) {
+ buffer_desc =
+ &rtlpci->rx_ring[rxring_idx].buffer_desc
+diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h
+index 5e832306dba9..d4567d12e07e 100644
+--- a/drivers/net/wireless/rtlwifi/pci.h
++++ b/drivers/net/wireless/rtlwifi/pci.h
+@@ -325,4 +325,11 @@ static inline void pci_write32_async(struct rtl_priv *rtlpriv,
+ writel(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+ }
+
++static inline u16 calc_fifo_space(u16 rp, u16 wp)
++{
++ if (rp <= wp)
++ return RTL_PCI_MAX_RX_COUNT - 1 + rp - wp;
++ return rp - wp - 1;
++}
++
+ #endif
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
+index 45c128b91f7f..c5d4b8013cde 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
+@@ -666,7 +666,6 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
+ struct sk_buff *skb = NULL;
+
+ u32 totalpacketlen;
+- bool rtstatus;
+ u8 u1rsvdpageloc[5] = { 0 };
+ bool b_dlok = false;
+
+@@ -728,10 +727,7 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
+ memcpy((u8 *)skb_put(skb, totalpacketlen),
+ &reserved_page_packet, totalpacketlen);
+
+- rtstatus = rtl_cmd_send_packet(hw, skb);
+-
+- if (rtstatus)
+- b_dlok = true;
++ b_dlok = true;
+
+ if (b_dlok) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD ,
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
+index 1a87edca2c3f..b461b3128da5 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
+@@ -85,29 +85,6 @@ static void _rtl92ee_enable_bcn_sub_func(struct ieee80211_hw *hw)
+ _rtl92ee_set_bcn_ctrl_reg(hw, 0, BIT(1));
+ }
+
+-static void _rtl92ee_return_beacon_queue_skb(struct ieee80211_hw *hw)
+-{
+- struct rtl_priv *rtlpriv = rtl_priv(hw);
+- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+- struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
+- unsigned long flags;
+-
+- spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+- while (skb_queue_len(&ring->queue)) {
+- struct rtl_tx_buffer_desc *entry =
+- &ring->buffer_desc[ring->idx];
+- struct sk_buff *skb = __skb_dequeue(&ring->queue);
+-
+- pci_unmap_single(rtlpci->pdev,
+- rtlpriv->cfg->ops->get_desc(
+- (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
+- skb->len, PCI_DMA_TODEVICE);
+- kfree_skb(skb);
+- ring->idx = (ring->idx + 1) % ring->entries;
+- }
+- spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+-}
+-
+ static void _rtl92ee_disable_bcn_sub_func(struct ieee80211_hw *hw)
+ {
+ _rtl92ee_set_bcn_ctrl_reg(hw, BIT(1), 0);
+@@ -403,9 +380,6 @@ static void _rtl92ee_download_rsvd_page(struct ieee80211_hw *hw)
+ rtl_write_byte(rtlpriv, REG_DWBCN0_CTRL + 2,
+ bcnvalid_reg | BIT(0));
+
+- /* Return Beacon TCB */
+- _rtl92ee_return_beacon_queue_skb(hw);
+-
+ /* download rsvd page */
+ rtl92ee_set_fw_rsvdpagepkt(hw, false);
+
+@@ -1163,6 +1137,139 @@ void rtl92ee_enable_hw_security_config(struct ieee80211_hw *hw)
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
+ }
+
++static bool _rtl8192ee_check_pcie_dma_hang(struct rtl_priv *rtlpriv)
++{
++ u8 tmp;
++
++ /* write reg 0x350 Bit[26]=1. Enable debug port. */
++ tmp = rtl_read_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 3);
++ if (!(tmp & BIT(2))) {
++ rtl_write_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 3,
++ tmp | BIT(2));
++ mdelay(100); /* Suggested by DD Justin_tsai. */
++ }
++
++ /* read reg 0x350 Bit[25] if 1 : RX hang
++ * read reg 0x350 Bit[24] if 1 : TX hang
++ */
++ tmp = rtl_read_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 3);
++ if ((tmp & BIT(0)) || (tmp & BIT(1))) {
++ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
++ "CheckPcieDMAHang8192EE(): true!!\n");
++ return true;
++ }
++ return false;
++}
++
++static void _rtl8192ee_reset_pcie_interface_dma(struct rtl_priv *rtlpriv,
++ bool mac_power_on)
++{
++ u8 tmp;
++ bool release_mac_rx_pause;
++ u8 backup_pcie_dma_pause;
++
++ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
++ "ResetPcieInterfaceDMA8192EE()\n");
++
++ /* Revise Note: Follow the document "PCIe RX DMA Hang Reset Flow_v03"
++ * released by SD1 Alan.
++ */
++
++ /* 1. disable register write lock
++ * write 0x1C bit[1:0] = 2'h0
++ * write 0xCC bit[2] = 1'b1
++ */
++ tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL);
++ tmp &= ~(BIT(1) | BIT(0));
++ rtl_write_byte(rtlpriv, REG_RSV_CTRL, tmp);
++ tmp = rtl_read_byte(rtlpriv, REG_PMC_DBG_CTRL2);
++ tmp |= BIT(2);
++ rtl_write_byte(rtlpriv, REG_PMC_DBG_CTRL2, tmp);
++
++ /* 2. Check and pause TRX DMA
++ * write 0x284 bit[18] = 1'b1
++ * write 0x301 = 0xFF
++ */
++ tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
++ if (tmp & BIT(2)) {
++ /* Already pause before the function for another reason. */
++ release_mac_rx_pause = false;
++ } else {
++ rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, (tmp | BIT(2)));
++ release_mac_rx_pause = true;
++ }
++
++ backup_pcie_dma_pause = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG + 1);
++ if (backup_pcie_dma_pause != 0xFF)
++ rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0xFF);
++
++ if (mac_power_on) {
++ /* 3. reset TRX function
++ * write 0x100 = 0x00
++ */
++ rtl_write_byte(rtlpriv, REG_CR, 0);
++ }
++
++ /* 4. Reset PCIe DMA
++ * write 0x003 bit[0] = 0
++ */
++ tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
++ tmp &= ~(BIT(0));
++ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp);
++
++ /* 5. Enable PCIe DMA
++ * write 0x003 bit[0] = 1
++ */
++ tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
++ tmp |= BIT(0);
++ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp);
++
++ if (mac_power_on) {
++ /* 6. enable TRX function
++ * write 0x100 = 0xFF
++ */
++ rtl_write_byte(rtlpriv, REG_CR, 0xFF);
++
++ /* We should init LLT & RQPN and
++ * prepare Tx/Rx descrptor address later
++ * because MAC function is reset.
++ */
++ }
++
++ /* 7. Restore PCIe autoload down bit
++ * write 0xF8 bit[17] = 1'b1
++ */
++ tmp = rtl_read_byte(rtlpriv, REG_MAC_PHY_CTRL_NORMAL + 2);
++ tmp |= BIT(1);
++ rtl_write_byte(rtlpriv, REG_MAC_PHY_CTRL_NORMAL + 2, tmp);
++
++ /* In MAC power on state, BB and RF maybe in ON state,
++ * if we release TRx DMA here
++ * it will cause packets to be started to Tx/Rx,
++ * so we release Tx/Rx DMA later.
++ */
++ if (!mac_power_on) {
++ /* 8. release TRX DMA
++ * write 0x284 bit[18] = 1'b0
++ * write 0x301 = 0x00
++ */
++ if (release_mac_rx_pause) {
++ tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
++ rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL,
++ (tmp & (~BIT(2))));
++ }
++ rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1,
++ backup_pcie_dma_pause);
++ }
++
++ /* 9. lock system register
++ * write 0xCC bit[2] = 1'b0
++ */
++ tmp = rtl_read_byte(rtlpriv, REG_PMC_DBG_CTRL2);
++ tmp &= ~(BIT(2));
++ rtl_write_byte(rtlpriv, REG_PMC_DBG_CTRL2, tmp);
++}
++
+ int rtl92ee_hw_init(struct ieee80211_hw *hw)
+ {
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+@@ -1188,6 +1295,13 @@ int rtl92ee_hw_init(struct ieee80211_hw *hw)
+ rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_92E;
+ }
+
++ if (_rtl8192ee_check_pcie_dma_hang(rtlpriv)) {
++ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "92ee dma hang!\n");
++ _rtl8192ee_reset_pcie_interface_dma(rtlpriv,
++ rtlhal->mac_func_enable);
++ rtlhal->mac_func_enable = false;
++ }
++
+ rtstatus = _rtl92ee_init_mac(hw);
+
+ rtl_write_byte(rtlpriv, 0x577, 0x03);
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h
+index 3f2a9596e7cd..1eaa1fab550d 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h
++++ b/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h
+@@ -77,9 +77,11 @@
+ #define REG_HIMRE 0x00B8
+ #define REG_HISRE 0x00BC
+
++#define REG_PMC_DBG_CTRL2 0x00CC
+ #define REG_EFUSE_ACCESS 0x00CF
+ #define REG_HPON_FSM 0x00EC
+ #define REG_SYS_CFG1 0x00F0
++#define REG_MAC_PHY_CTRL_NORMAL 0x00F8
+ #define REG_SYS_CFG2 0x00FC
+
+ #define REG_CR 0x0100
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
+index 2fcbef1d029f..00690040be37 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
+@@ -512,6 +512,10 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr;
+ u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+
++ if (GET_RX_STATUS_DESC_RPT_SEL(pdesc) == 0)
++ status->packet_report_type = NORMAL_RX;
++ else
++ status->packet_report_type = C2H_PACKET;
+ status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc);
+ status->rx_drvinfo_size = (u8)GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
+ RX_DRV_INFO_SIZE_UNIT;
+@@ -654,14 +658,7 @@ u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index)
+ if (!start_rx)
+ return 0;
+
+- if ((last_read_point > (RX_DESC_NUM_92E / 2)) &&
+- (read_point <= (RX_DESC_NUM_92E / 2))) {
+- remind_cnt = RX_DESC_NUM_92E - write_point;
+- } else {
+- remind_cnt = (read_point >= write_point) ?
+- (read_point - write_point) :
+- (RX_DESC_NUM_92E - write_point + read_point);
+- }
++ remind_cnt = calc_fifo_space(read_point, write_point);
+
+ if (remind_cnt == 0)
+ return 0;
+@@ -1207,8 +1204,7 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index)
+ static u8 stop_report_cnt;
+ struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
+
+- /*checking Read/Write Point each interrupt wastes CPU */
+- if (stop_report_cnt > 15 || !rtlpriv->link_info.busytraffic) {
++ {
+ u16 point_diff = 0;
+ u16 cur_tx_rp, cur_tx_wp;
+ u32 tmpu32 = 0;
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
+index 6f9be1c7515c..8effef9b13dd 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
++++ b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
+@@ -542,6 +542,8 @@
+ LE_BITS_TO_4BYTE(__pdesc+8, 12, 4)
+ #define GET_RX_DESC_RX_IS_QOS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+8, 16, 1)
++#define GET_RX_STATUS_DESC_RPT_SEL(__pdesc) \
++ LE_BITS_TO_4BYTE(__pdesc+8, 28, 1)
+
+ #define GET_RX_DESC_RXMCS(__pdesc) \
+ LE_BITS_TO_4BYTE(__pdesc+12, 0, 7)
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index 2b3c89425bb5..b720e7816a72 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -1389,7 +1389,7 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
+ if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev)))
+ return -ENOMEM;
+
+- if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x",
++ if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X",
+ pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device,
+ (u8)(pdev->class >> 16), (u8)(pdev->class >> 8),
+diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
+index f955edb9bea7..eb0ad530dc43 100644
+--- a/drivers/pci/rom.c
++++ b/drivers/pci/rom.c
+@@ -71,6 +71,7 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
+ {
+ void __iomem *image;
+ int last_image;
++ unsigned length;
+
+ image = rom;
+ do {
+@@ -93,9 +94,9 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
+ if (readb(pds + 3) != 'R')
+ break;
+ last_image = readb(pds + 21) & 0x80;
+- /* this length is reliable */
+- image += readw(pds + 16) * 512;
+- } while (!last_image);
++ length = readw(pds + 16);
++ image += length * 512;
++ } while (length && !last_image);
+
+ /* never return a size larger than the PCI resource window */
+ /* there are known ROMs that get the size wrong */
+diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
+index ff765d8e1a09..ce364a41842a 100644
+--- a/drivers/platform/x86/samsung-laptop.c
++++ b/drivers/platform/x86/samsung-laptop.c
+@@ -353,6 +353,7 @@ struct samsung_quirks {
+ bool broken_acpi_video;
+ bool four_kbd_backlight_levels;
+ bool enable_kbd_backlight;
++ bool use_native_backlight;
+ };
+
+ static struct samsung_quirks samsung_unknown = {};
+@@ -361,6 +362,10 @@ static struct samsung_quirks samsung_broken_acpi_video = {
+ .broken_acpi_video = true,
+ };
+
++static struct samsung_quirks samsung_use_native_backlight = {
++ .use_native_backlight = true,
++};
++
+ static struct samsung_quirks samsung_np740u3e = {
+ .four_kbd_backlight_levels = true,
+ .enable_kbd_backlight = true,
+@@ -1507,7 +1512,7 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "N150P"),
+ DMI_MATCH(DMI_BOARD_NAME, "N150P"),
+ },
+- .driver_data = &samsung_broken_acpi_video,
++ .driver_data = &samsung_use_native_backlight,
+ },
+ {
+ .callback = samsung_dmi_matched,
+@@ -1517,7 +1522,7 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"),
+ DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"),
+ },
+- .driver_data = &samsung_broken_acpi_video,
++ .driver_data = &samsung_use_native_backlight,
+ },
+ {
+ .callback = samsung_dmi_matched,
+@@ -1557,7 +1562,7 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "N250P"),
+ DMI_MATCH(DMI_BOARD_NAME, "N250P"),
+ },
+- .driver_data = &samsung_broken_acpi_video,
++ .driver_data = &samsung_use_native_backlight,
+ },
+ {
+ .callback = samsung_dmi_matched,
+@@ -1616,6 +1621,15 @@ static int __init samsung_init(void)
+ pr_info("Disabling ACPI video driver\n");
+ acpi_video_unregister();
+ }
++
++ if (samsung->quirks->use_native_backlight) {
++ pr_info("Using native backlight driver\n");
++ /* Tell acpi-video to not handle the backlight */
++ acpi_video_dmi_promote_vendor();
++ acpi_video_unregister();
++ /* And also do not handle it ourselves */
++ samsung->handle_backlight = false;
++ }
+ #endif
+
+ ret = samsung_platform_init(samsung);
+diff --git a/drivers/power/88pm860x_charger.c b/drivers/power/88pm860x_charger.c
+index de029bbc1cc1..5ccca8743ce6 100644
+--- a/drivers/power/88pm860x_charger.c
++++ b/drivers/power/88pm860x_charger.c
+@@ -711,6 +711,7 @@ static int pm860x_charger_probe(struct platform_device *pdev)
+ return 0;
+
+ out_irq:
++ power_supply_unregister(&info->usb);
+ while (--i >= 0)
+ free_irq(info->irq[i], info);
+ out:
+diff --git a/drivers/power/bq24190_charger.c b/drivers/power/bq24190_charger.c
+index ad3ff8fbfbbb..e4c95e1a6733 100644
+--- a/drivers/power/bq24190_charger.c
++++ b/drivers/power/bq24190_charger.c
+@@ -929,7 +929,7 @@ static void bq24190_charger_init(struct power_supply *charger)
+ charger->properties = bq24190_charger_properties;
+ charger->num_properties = ARRAY_SIZE(bq24190_charger_properties);
+ charger->supplied_to = bq24190_charger_supplied_to;
+- charger->num_supplies = ARRAY_SIZE(bq24190_charger_supplied_to);
++ charger->num_supplicants = ARRAY_SIZE(bq24190_charger_supplied_to);
+ charger->get_property = bq24190_charger_get_property;
+ charger->set_property = bq24190_charger_set_property;
+ charger->property_is_writeable = bq24190_charger_property_is_writeable;
+diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
+index 7536933d0ab9..e5deb11bf1d5 100644
+--- a/drivers/power/gpio-charger.c
++++ b/drivers/power/gpio-charger.c
+@@ -168,7 +168,7 @@ static int gpio_charger_suspend(struct device *dev)
+
+ if (device_may_wakeup(dev))
+ gpio_charger->wakeup_enabled =
+- enable_irq_wake(gpio_charger->irq);
++ !enable_irq_wake(gpio_charger->irq);
+
+ return 0;
+ }
+@@ -178,7 +178,7 @@ static int gpio_charger_resume(struct device *dev)
+ struct platform_device *pdev = to_platform_device(dev);
+ struct gpio_charger *gpio_charger = platform_get_drvdata(pdev);
+
+- if (gpio_charger->wakeup_enabled)
++ if (device_may_wakeup(dev) && gpio_charger->wakeup_enabled)
+ disable_irq_wake(gpio_charger->irq);
+ power_supply_changed(&gpio_charger->charger);
+
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 5e881e5e67b6..6e503802947a 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -3556,7 +3556,6 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
+ int i;
+ u32 max_cmd;
+ u32 sge_sz;
+- u32 sgl_sz;
+ u32 total_sz;
+ u32 frame_count;
+ struct megasas_cmd *cmd;
+@@ -3575,24 +3574,23 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
+ }
+
+ /*
+- * Calculated the number of 64byte frames required for SGL
+- */
+- sgl_sz = sge_sz * instance->max_num_sge;
+- frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE;
+- frame_count = 15;
+-
+- /*
+- * We need one extra frame for the MFI command
++ * For MFI controllers.
++ * max_num_sge = 60
++ * max_sge_sz = 16 byte (sizeof megasas_sge_skinny)
++ * Total 960 byte (15 MFI frame of 64 byte)
++ *
++ * Fusion adapter require only 3 extra frame.
++ * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
++ * max_sge_sz = 12 byte (sizeof megasas_sge64)
++ * Total 192 byte (3 MFI frame of 64 byte)
+ */
+- frame_count++;
+-
++ frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1);
+ total_sz = MEGAMFI_FRAME_SIZE * frame_count;
+ /*
+ * Use DMA pool facility provided by PCI layer
+ */
+ instance->frame_dma_pool = pci_pool_create("megasas frame pool",
+- instance->pdev, total_sz, 64,
+- 0);
++ instance->pdev, total_sz, 256, 0);
+
+ if (!instance->frame_dma_pool) {
+ printk(KERN_DEBUG "megasas: failed to setup frame pool\n");
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
+index 0f66d0ef0b26..7d2d424bc8d0 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
+@@ -170,6 +170,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
+ struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
+ struct MR_FW_RAID_MAP *pFwRaidMap = NULL;
+ int i;
++ u16 ld_count;
+
+
+ struct MR_DRV_RAID_MAP_ALL *drv_map =
+@@ -189,9 +190,10 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
+ fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
+ fusion->ld_map[(instance->map_id & 1)];
+ pFwRaidMap = &fw_map_old->raidMap;
++ ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
+
+ #if VD_EXT_DEBUG
+- for (i = 0; i < le16_to_cpu(pFwRaidMap->ldCount); i++) {
++ for (i = 0; i < ld_count; i++) {
+ dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x "
+ "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n",
+ instance->unique_id, i,
+@@ -203,12 +205,15 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
+
+ memset(drv_map, 0, fusion->drv_map_sz);
+ pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
+- pDrvRaidMap->ldCount = (__le16)pFwRaidMap->ldCount;
++ pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
+ pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
+ for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
+ pDrvRaidMap->ldTgtIdToLd[i] =
+ (u8)pFwRaidMap->ldTgtIdToLd[i];
+- for (i = 0; i < le16_to_cpu(pDrvRaidMap->ldCount); i++) {
++ for (i = (MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS);
++ i < MAX_LOGICAL_DRIVES_EXT; i++)
++ pDrvRaidMap->ldTgtIdToLd[i] = 0xff;
++ for (i = 0; i < ld_count; i++) {
+ pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
+ #if VD_EXT_DEBUG
+ dev_dbg(&instance->pdev->dev,
+@@ -250,7 +255,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
+ struct LD_LOAD_BALANCE_INFO *lbInfo;
+ PLD_SPAN_INFO ldSpanInfo;
+ struct MR_LD_RAID *raid;
+- int ldCount, num_lds;
++ u16 ldCount, num_lds;
+ u16 ld;
+ u32 expected_size;
+
+@@ -354,7 +359,7 @@ static int getSpanInfo(struct MR_DRV_RAID_MAP_ALL *map,
+
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
+ ld = MR_TargetIdToLdGet(ldCount, map);
+- if (ld >= MAX_LOGICAL_DRIVES_EXT)
++ if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
+ continue;
+ raid = MR_LdRaidGet(ld, map);
+ dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n",
+@@ -1155,7 +1160,7 @@ void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
+
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
+ ld = MR_TargetIdToLdGet(ldCount, map);
+- if (ld >= MAX_LOGICAL_DRIVES_EXT)
++ if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
+ continue;
+ raid = MR_LdRaidGet(ld, map);
+ for (element = 0; element < MAX_QUAD_DEPTH; element++) {
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+index 9d9c27cd4687..554395634592 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+@@ -101,6 +101,8 @@ megasas_enable_intr_fusion(struct megasas_instance *instance)
+ {
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
++
++ instance->mask_interrupts = 0;
+ /* For Thunderbolt/Invader also clear intr on enable */
+ writel(~0, ®s->outbound_intr_status);
+ readl(®s->outbound_intr_status);
+@@ -109,7 +111,6 @@ megasas_enable_intr_fusion(struct megasas_instance *instance)
+
+ /* Dummy readl to force pci flush */
+ readl(®s->outbound_intr_mask);
+- instance->mask_interrupts = 0;
+ }
+
+ /**
+@@ -696,12 +697,11 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
+ cpu_to_le32(lower_32_bits(ioc_init_handle));
+ init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
+
+- req_desc.Words = 0;
++ req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr));
++ req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr));
+ req_desc.MFAIo.RequestFlags =
+ (MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
+- MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+- cpu_to_le32s((u32 *)&req_desc.MFAIo);
+- req_desc.Words |= cpu_to_le64(cmd->frame_phys_addr);
++ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+
+ /*
+ * disable the intr before firing the init frame
+@@ -1753,9 +1753,19 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
+ if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS)
+ goto NonFastPath;
+
++ /*
++ * For older firmware, Driver should not access ldTgtIdToLd
++ * beyond index 127 and for Extended VD firmware, ldTgtIdToLd
++ * should not go beyond 255.
++ */
++
++ if ((!fusion->fast_path_io) ||
++ (device_id >= instance->fw_supported_vd_count))
++ goto NonFastPath;
++
+ ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
+- if ((ld >= instance->fw_supported_vd_count) ||
+- (!fusion->fast_path_io))
++
++ if (ld >= instance->fw_supported_vd_count)
+ goto NonFastPath;
+
+ raid = MR_LdRaidGet(ld, local_map_ptr);
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
+index 0d183d521bdd..a7f216f27ae3 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
+@@ -304,14 +304,9 @@ struct MPI2_RAID_SCSI_IO_REQUEST {
+ * MPT RAID MFA IO Descriptor.
+ */
+ struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR {
+-#if defined(__BIG_ENDIAN_BITFIELD)
+- u32 MessageAddress1:24; /* bits 31:8*/
+- u32 RequestFlags:8;
+-#else
+ u32 RequestFlags:8;
+- u32 MessageAddress1:24; /* bits 31:8*/
+-#endif
+- u32 MessageAddress2; /* bits 61:32 */
++ u32 MessageAddress1:24;
++ u32 MessageAddress2;
+ };
+
+ /* Default Request Descriptor */
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 60354449d9ed..843594c2583d 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -1376,6 +1376,17 @@ sg_rq_end_io(struct request *rq, int uptodate)
+ }
+ /* Rely on write phase to clean out srp status values, so no "else" */
+
++ /*
++ * Free the request as soon as it is complete so that its resources
++ * can be reused without waiting for userspace to read() the
++ * result. But keep the associated bio (if any) around until
++ * blk_rq_unmap_user() can be called from user context.
++ */
++ srp->rq = NULL;
++ if (rq->cmd != rq->__cmd)
++ kfree(rq->cmd);
++ __blk_put_request(rq->q, rq);
++
+ write_lock_irqsave(&sfp->rq_list_lock, iflags);
+ if (unlikely(srp->orphan)) {
+ if (sfp->keep_orphan)
+@@ -1710,7 +1721,22 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
+ return -ENOMEM;
+ }
+
+- rq = blk_get_request(q, rw, GFP_ATOMIC);
++ /*
++ * NOTE
++ *
++ * With scsi-mq enabled, there are a fixed number of preallocated
++ * requests equal in number to shost->can_queue. If all of the
++ * preallocated requests are already in use, then using GFP_ATOMIC with
++ * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
++ * will cause blk_get_request() to sleep until an active command
++ * completes, freeing up a request. Neither option is ideal, but
++ * GFP_KERNEL is the better choice to prevent userspace from getting an
++ * unexpected EWOULDBLOCK.
++ *
++ * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
++ * does not sleep except under memory pressure.
++ */
++ rq = blk_get_request(q, rw, GFP_KERNEL);
+ if (IS_ERR(rq)) {
+ kfree(long_cmdp);
+ return PTR_ERR(rq);
+@@ -1803,10 +1829,10 @@ sg_finish_rem_req(Sg_request *srp)
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_finish_rem_req: res_used=%d\n",
+ (int) srp->res_used));
+- if (srp->rq) {
+- if (srp->bio)
+- ret = blk_rq_unmap_user(srp->bio);
++ if (srp->bio)
++ ret = blk_rq_unmap_user(srp->bio);
+
++ if (srp->rq) {
+ if (srp->rq->cmd != srp->rq->__cmd)
+ kfree(srp->rq->cmd);
+ blk_put_request(srp->rq);
+diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c
+index 601e9cc61e98..bb2890e79ca0 100644
+--- a/drivers/target/iscsi/iscsi_target_tq.c
++++ b/drivers/target/iscsi/iscsi_target_tq.c
+@@ -24,36 +24,22 @@
+ #include "iscsi_target_tq.h"
+ #include "iscsi_target.h"
+
+-static LIST_HEAD(active_ts_list);
+ static LIST_HEAD(inactive_ts_list);
+-static DEFINE_SPINLOCK(active_ts_lock);
+ static DEFINE_SPINLOCK(inactive_ts_lock);
+ static DEFINE_SPINLOCK(ts_bitmap_lock);
+
+-static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts)
+-{
+- spin_lock(&active_ts_lock);
+- list_add_tail(&ts->ts_list, &active_ts_list);
+- iscsit_global->active_ts++;
+- spin_unlock(&active_ts_lock);
+-}
+-
+ static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
+ {
++ if (!list_empty(&ts->ts_list)) {
++ WARN_ON(1);
++ return;
++ }
+ spin_lock(&inactive_ts_lock);
+ list_add_tail(&ts->ts_list, &inactive_ts_list);
+ iscsit_global->inactive_ts++;
+ spin_unlock(&inactive_ts_lock);
+ }
+
+-static void iscsi_del_ts_from_active_list(struct iscsi_thread_set *ts)
+-{
+- spin_lock(&active_ts_lock);
+- list_del(&ts->ts_list);
+- iscsit_global->active_ts--;
+- spin_unlock(&active_ts_lock);
+-}
+-
+ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
+ {
+ struct iscsi_thread_set *ts;
+@@ -66,7 +52,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
+
+ ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list);
+
+- list_del(&ts->ts_list);
++ list_del_init(&ts->ts_list);
+ iscsit_global->inactive_ts--;
+ spin_unlock(&inactive_ts_lock);
+
+@@ -204,8 +190,6 @@ static void iscsi_deallocate_extra_thread_sets(void)
+
+ void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
+ {
+- iscsi_add_ts_to_active_list(ts);
+-
+ spin_lock_bh(&ts->ts_state_lock);
+ conn->thread_set = ts;
+ ts->conn = conn;
+@@ -397,7 +381,6 @@ struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)
+
+ if (ts->delay_inactive && (--ts->thread_count == 0)) {
+ spin_unlock_bh(&ts->ts_state_lock);
+- iscsi_del_ts_from_active_list(ts);
+
+ if (!iscsit_global->in_shutdown)
+ iscsi_deallocate_extra_thread_sets();
+@@ -452,7 +435,6 @@ struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)
+
+ if (ts->delay_inactive && (--ts->thread_count == 0)) {
+ spin_unlock_bh(&ts->ts_state_lock);
+- iscsi_del_ts_from_active_list(ts);
+
+ if (!iscsit_global->in_shutdown)
+ iscsi_deallocate_extra_thread_sets();
+diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
+index 7c4447a5c0f4..082304dcbe8c 100644
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -210,6 +210,9 @@ static int pty_signal(struct tty_struct *tty, int sig)
+ unsigned long flags;
+ struct pid *pgrp;
+
++ if (sig != SIGINT && sig != SIGQUIT && sig != SIGTSTP)
++ return -EINVAL;
++
+ if (tty->link) {
+ spin_lock_irqsave(&tty->link->ctrl_lock, flags);
+ pgrp = get_pid(tty->link->pgrp);
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index edde3eca055d..6ee5c6cefac0 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -2577,7 +2577,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
+
+ ret = atmel_init_port(port, pdev);
+ if (ret)
+- goto err;
++ goto err_clear_bit;
+
+ if (!atmel_use_pdc_rx(&port->uart)) {
+ ret = -ENOMEM;
+@@ -2626,6 +2626,8 @@ err_alloc_ring:
+ clk_put(port->clk);
+ port->clk = NULL;
+ }
++err_clear_bit:
++ clear_bit(port->uart.line, atmel_ports_in_use);
+ err:
+ return ret;
+ }
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 6dd53af546a3..eb9bc7e1dbaa 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -506,9 +506,6 @@ static inline void lpuart_prepare_rx(struct lpuart_port *sport)
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+- init_timer(&sport->lpuart_timer);
+- sport->lpuart_timer.function = lpuart_timer_func;
+- sport->lpuart_timer.data = (unsigned long)sport;
+ sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout;
+ add_timer(&sport->lpuart_timer);
+
+@@ -758,18 +755,18 @@ out:
+ static irqreturn_t lpuart_int(int irq, void *dev_id)
+ {
+ struct lpuart_port *sport = dev_id;
+- unsigned char sts;
++ unsigned char sts, crdma;
+
+ sts = readb(sport->port.membase + UARTSR1);
++ crdma = readb(sport->port.membase + UARTCR5);
+
+- if (sts & UARTSR1_RDRF) {
++ if (sts & UARTSR1_RDRF && !(crdma & UARTCR5_RDMAS)) {
+ if (sport->lpuart_dma_use)
+ lpuart_prepare_rx(sport);
+ else
+ lpuart_rxint(irq, dev_id);
+ }
+- if (sts & UARTSR1_TDRE &&
+- !(readb(sport->port.membase + UARTCR5) & UARTCR5_TDMAS)) {
++ if (sts & UARTSR1_TDRE && !(crdma & UARTCR5_TDMAS)) {
+ if (sport->lpuart_dma_use)
+ lpuart_pio_tx(sport);
+ else
+@@ -1106,7 +1103,10 @@ static int lpuart_startup(struct uart_port *port)
+ sport->lpuart_dma_use = false;
+ } else {
+ sport->lpuart_dma_use = true;
++ setup_timer(&sport->lpuart_timer, lpuart_timer_func,
++ (unsigned long)sport);
+ temp = readb(port->membase + UARTCR5);
++ temp &= ~UARTCR5_RDMAS;
+ writeb(temp | UARTCR5_TDMAS, port->membase + UARTCR5);
+ }
+
+@@ -1180,6 +1180,8 @@ static void lpuart_shutdown(struct uart_port *port)
+ devm_free_irq(port->dev, port->irq, sport);
+
+ if (sport->lpuart_dma_use) {
++ del_timer_sync(&sport->lpuart_timer);
++
+ lpuart_dma_tx_free(port);
+ lpuart_dma_rx_free(port);
+ }
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index b33b00b386de..53c25bca7d05 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -498,6 +498,7 @@ void invert_screen(struct vc_data *vc, int offset, int count, int viewed)
+ #endif
+ if (DO_UPDATE(vc))
+ do_update_region(vc, (unsigned long) p, count);
++ notify_update(vc);
+ }
+
+ /* used by selection: complement pointer position */
+@@ -514,6 +515,7 @@ void complement_pos(struct vc_data *vc, int offset)
+ scr_writew(old, screenpos(vc, old_offset, 1));
+ if (DO_UPDATE(vc))
+ vc->vc_sw->con_putc(vc, old, oldy, oldx);
++ notify_update(vc);
+ }
+
+ old_offset = offset;
+@@ -531,8 +533,8 @@ void complement_pos(struct vc_data *vc, int offset)
+ oldy = (offset >> 1) / vc->vc_cols;
+ vc->vc_sw->con_putc(vc, new, oldy, oldx);
+ }
++ notify_update(vc);
+ }
+-
+ }
+
+ static void insert_char(struct vc_data *vc, unsigned int nr)
+diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
+index 684ef70dc09d..506b969ea7fd 100644
+--- a/drivers/usb/core/buffer.c
++++ b/drivers/usb/core/buffer.c
+@@ -22,17 +22,25 @@
+ */
+
+ /* FIXME tune these based on pool statistics ... */
+-static const size_t pool_max[HCD_BUFFER_POOLS] = {
+- /* platforms without dma-friendly caches might need to
+- * prevent cacheline sharing...
+- */
+- 32,
+- 128,
+- 512,
+- PAGE_SIZE / 2
+- /* bigger --> allocate pages */
++static size_t pool_max[HCD_BUFFER_POOLS] = {
++ 32, 128, 512, 2048,
+ };
+
++void __init usb_init_pool_max(void)
++{
++ /*
++ * The pool_max values must never be smaller than
++ * ARCH_KMALLOC_MINALIGN.
++ */
++ if (ARCH_KMALLOC_MINALIGN <= 32)
++ ; /* Original value is okay */
++ else if (ARCH_KMALLOC_MINALIGN <= 64)
++ pool_max[0] = 64;
++ else if (ARCH_KMALLOC_MINALIGN <= 128)
++ pool_max[0] = 0; /* Don't use this pool */
++ else
++ BUILD_BUG(); /* We don't allow this */
++}
+
+ /* SETUP primitives */
+
+diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
+index 9bffd26cea05..d7a6d8bc510b 100644
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -275,21 +275,6 @@ static int usb_unbind_device(struct device *dev)
+ return 0;
+ }
+
+-/*
+- * Cancel any pending scheduled resets
+- *
+- * [see usb_queue_reset_device()]
+- *
+- * Called after unconfiguring / when releasing interfaces. See
+- * comments in __usb_queue_reset_device() regarding
+- * udev->reset_running.
+- */
+-static void usb_cancel_queued_reset(struct usb_interface *iface)
+-{
+- if (iface->reset_running == 0)
+- cancel_work_sync(&iface->reset_ws);
+-}
+-
+ /* called from driver core with dev locked */
+ static int usb_probe_interface(struct device *dev)
+ {
+@@ -380,7 +365,6 @@ static int usb_probe_interface(struct device *dev)
+ usb_set_intfdata(intf, NULL);
+ intf->needs_remote_wakeup = 0;
+ intf->condition = USB_INTERFACE_UNBOUND;
+- usb_cancel_queued_reset(intf);
+
+ /* If the LPM disable succeeded, balance the ref counts. */
+ if (!lpm_disable_error)
+@@ -425,7 +409,6 @@ static int usb_unbind_interface(struct device *dev)
+ usb_disable_interface(udev, intf, false);
+
+ driver->disconnect(intf);
+- usb_cancel_queued_reset(intf);
+
+ /* Free streams */
+ for (i = 0, j = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
+@@ -1801,6 +1784,18 @@ static int autosuspend_check(struct usb_device *udev)
+ dev_dbg(&udev->dev, "remote wakeup needed for autosuspend\n");
+ return -EOPNOTSUPP;
+ }
++
++ /*
++ * If the device is a direct child of the root hub and the HCD
++ * doesn't handle wakeup requests, don't allow autosuspend when
++ * wakeup is needed.
++ */
++ if (w && udev->parent == udev->bus->root_hub &&
++ bus_to_hcd(udev->bus)->cant_recv_wakeups) {
++ dev_dbg(&udev->dev, "HCD doesn't handle wakeup requests\n");
++ return -EOPNOTSUPP;
++ }
++
+ udev->do_remote_wakeup = w;
+ return 0;
+ }
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index a6efb4184f2b..0009fc847eee 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -1618,6 +1618,7 @@ static int unlink1(struct usb_hcd *hcd, struct urb *urb, int status)
+ int usb_hcd_unlink_urb (struct urb *urb, int status)
+ {
+ struct usb_hcd *hcd;
++ struct usb_device *udev = urb->dev;
+ int retval = -EIDRM;
+ unsigned long flags;
+
+@@ -1629,20 +1630,19 @@ int usb_hcd_unlink_urb (struct urb *urb, int status)
+ spin_lock_irqsave(&hcd_urb_unlink_lock, flags);
+ if (atomic_read(&urb->use_count) > 0) {
+ retval = 0;
+- usb_get_dev(urb->dev);
++ usb_get_dev(udev);
+ }
+ spin_unlock_irqrestore(&hcd_urb_unlink_lock, flags);
+ if (retval == 0) {
+ hcd = bus_to_hcd(urb->dev->bus);
+ retval = unlink1(hcd, urb, status);
+- usb_put_dev(urb->dev);
++ if (retval == 0)
++ retval = -EINPROGRESS;
++ else if (retval != -EIDRM && retval != -EBUSY)
++ dev_dbg(&udev->dev, "hcd_unlink_urb %p fail %d\n",
++ urb, retval);
++ usb_put_dev(udev);
+ }
+-
+- if (retval == 0)
+- retval = -EINPROGRESS;
+- else if (retval != -EIDRM && retval != -EBUSY)
+- dev_dbg(&urb->dev->dev, "hcd_unlink_urb %p fail %d\n",
+- urb, retval);
+ return retval;
+ }
+
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index b649fef2e35d..2246954d7df3 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -5591,26 +5591,19 @@ EXPORT_SYMBOL_GPL(usb_reset_device);
+ * possible; depending on how the driver attached to each interface
+ * handles ->pre_reset(), the second reset might happen or not.
+ *
+- * - If a driver is unbound and it had a pending reset, the reset will
+- * be cancelled.
++ * - If the reset is delayed so long that the interface is unbound from
++ * its driver, the reset will be skipped.
+ *
+- * - This function can be called during .probe() or .disconnect()
+- * times. On return from .disconnect(), any pending resets will be
+- * cancelled.
+- *
+- * There is no no need to lock/unlock the @reset_ws as schedule_work()
+- * does its own.
+- *
+- * NOTE: We don't do any reference count tracking because it is not
+- * needed. The lifecycle of the work_struct is tied to the
+- * usb_interface. Before destroying the interface we cancel the
+- * work_struct, so the fact that work_struct is queued and or
+- * running means the interface (and thus, the device) exist and
+- * are referenced.
++ * - This function can be called during .probe(). It can also be called
++ * during .disconnect(), but doing so is pointless because the reset
++ * will not occur. If you really want to reset the device during
++ * .disconnect(), call usb_reset_device() directly -- but watch out
++ * for nested unbinding issues!
+ */
+ void usb_queue_reset_device(struct usb_interface *iface)
+ {
+- schedule_work(&iface->reset_ws);
++ if (schedule_work(&iface->reset_ws))
++ usb_get_intf(iface);
+ }
+ EXPORT_SYMBOL_GPL(usb_queue_reset_device);
+
+diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
+index f7b7713cfb2a..f368d2053da5 100644
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -1551,6 +1551,7 @@ static void usb_release_interface(struct device *dev)
+ altsetting_to_usb_interface_cache(intf->altsetting);
+
+ kref_put(&intfc->ref, usb_release_interface_cache);
++ usb_put_dev(interface_to_usbdev(intf));
+ kfree(intf);
+ }
+
+@@ -1626,24 +1627,6 @@ static struct usb_interface_assoc_descriptor *find_iad(struct usb_device *dev,
+
+ /*
+ * Internal function to queue a device reset
+- *
+- * This is initialized into the workstruct in 'struct
+- * usb_device->reset_ws' that is launched by
+- * message.c:usb_set_configuration() when initializing each 'struct
+- * usb_interface'.
+- *
+- * It is safe to get the USB device without reference counts because
+- * the life cycle of @iface is bound to the life cycle of @udev. Then,
+- * this function will be ran only if @iface is alive (and before
+- * freeing it any scheduled instances of it will have been cancelled).
+- *
+- * We need to set a flag (usb_dev->reset_running) because when we call
+- * the reset, the interfaces might be unbound. The current interface
+- * cannot try to remove the queued work as it would cause a deadlock
+- * (you cannot remove your work from within your executing
+- * workqueue). This flag lets it know, so that
+- * usb_cancel_queued_reset() doesn't try to do it.
+- *
+ * See usb_queue_reset_device() for more details
+ */
+ static void __usb_queue_reset_device(struct work_struct *ws)
+@@ -1655,11 +1638,10 @@ static void __usb_queue_reset_device(struct work_struct *ws)
+
+ rc = usb_lock_device_for_reset(udev, iface);
+ if (rc >= 0) {
+- iface->reset_running = 1;
+ usb_reset_device(udev);
+- iface->reset_running = 0;
+ usb_unlock_device(udev);
+ }
++ usb_put_intf(iface); /* Undo _get_ in usb_queue_reset_device() */
+ }
+
+
+@@ -1854,6 +1836,7 @@ free_interfaces:
+ dev_set_name(&intf->dev, "%d-%s:%d.%d",
+ dev->bus->busnum, dev->devpath,
+ configuration, alt->desc.bInterfaceNumber);
++ usb_get_dev(dev);
+ }
+ kfree(new_interfaces);
+
+diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
+index 2dd2362198d2..29ee9363faa5 100644
+--- a/drivers/usb/core/usb.c
++++ b/drivers/usb/core/usb.c
+@@ -1051,6 +1051,7 @@ static int __init usb_init(void)
+ pr_info("%s: USB support disabled\n", usbcore_name);
+ return 0;
+ }
++ usb_init_pool_max();
+
+ retval = usb_debugfs_init();
+ if (retval)
+diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
+index e752c3098f38..d2a856528471 100644
+--- a/drivers/usb/host/isp1760-hcd.c
++++ b/drivers/usb/host/isp1760-hcd.c
+@@ -2247,6 +2247,9 @@ struct usb_hcd *isp1760_register(phys_addr_t res_start, resource_size_t res_len,
+ hcd->rsrc_start = res_start;
+ hcd->rsrc_len = res_len;
+
++ /* This driver doesn't support wakeup requests */
++ hcd->cant_recv_wakeups = 1;
++
+ ret = usb_add_hcd(hcd, irq, irqflags);
+ if (ret)
+ goto err_unmap;
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index f4c56fc1a9f6..f40c856ff758 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -56,6 +56,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
+ { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
+ { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
++ { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
+ { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
+ { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
+ { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
+diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
+index f8bb36f9d9ce..bf1940706422 100644
+--- a/drivers/xen/manage.c
++++ b/drivers/xen/manage.c
+@@ -105,10 +105,16 @@ static void do_suspend(void)
+
+ err = freeze_processes();
+ if (err) {
+- pr_err("%s: freeze failed %d\n", __func__, err);
++ pr_err("%s: freeze processes failed %d\n", __func__, err);
+ goto out;
+ }
+
++ err = freeze_kernel_threads();
++ if (err) {
++ pr_err("%s: freeze kernel threads failed %d\n", __func__, err);
++ goto out_thaw;
++ }
++
+ err = dpm_suspend_start(PMSG_FREEZE);
+ if (err) {
+ pr_err("%s: dpm_suspend_start %d\n", __func__, err);
+diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
+index 3e32146472a5..d30c6b246342 100644
+--- a/drivers/xen/xen-scsiback.c
++++ b/drivers/xen/xen-scsiback.c
+@@ -712,12 +712,11 @@ static int prepare_pending_reqs(struct vscsibk_info *info,
+ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
+ {
+ struct vscsiif_back_ring *ring = &info->ring;
+- struct vscsiif_request *ring_req;
++ struct vscsiif_request ring_req;
+ struct vscsibk_pend *pending_req;
+ RING_IDX rc, rp;
+ int err, more_to_do;
+ uint32_t result;
+- uint8_t act;
+
+ rc = ring->req_cons;
+ rp = ring->sring->req_prod;
+@@ -738,11 +737,10 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
+ if (!pending_req)
+ return 1;
+
+- ring_req = RING_GET_REQUEST(ring, rc);
++ ring_req = *RING_GET_REQUEST(ring, rc);
+ ring->req_cons = ++rc;
+
+- act = ring_req->act;
+- err = prepare_pending_reqs(info, ring_req, pending_req);
++ err = prepare_pending_reqs(info, &ring_req, pending_req);
+ if (err) {
+ switch (err) {
+ case -ENODEV:
+@@ -758,9 +756,9 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
+ return 1;
+ }
+
+- switch (act) {
++ switch (ring_req.act) {
+ case VSCSIIF_ACT_SCSI_CDB:
+- if (scsiback_gnttab_data_map(ring_req, pending_req)) {
++ if (scsiback_gnttab_data_map(&ring_req, pending_req)) {
+ scsiback_fast_flush_area(pending_req);
+ scsiback_do_resp_with_sense(NULL,
+ DRIVER_ERROR << 24, 0, pending_req);
+@@ -771,7 +769,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
+ break;
+ case VSCSIIF_ACT_SCSI_ABORT:
+ scsiback_device_action(pending_req, TMR_ABORT_TASK,
+- ring_req->ref_rqid);
++ ring_req.ref_rqid);
+ break;
+ case VSCSIIF_ACT_SCSI_RESET:
+ scsiback_device_action(pending_req, TMR_LUN_RESET, 0);
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index d8fc0605b9d2..e1efcaa1b245 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -554,11 +554,12 @@ out:
+
+ static unsigned long randomize_stack_top(unsigned long stack_top)
+ {
+- unsigned int random_variable = 0;
++ unsigned long random_variable = 0;
+
+ if ((current->flags & PF_RANDOMIZE) &&
+ !(current->personality & ADDR_NO_RANDOMIZE)) {
+- random_variable = get_random_int() & STACK_RND_MASK;
++ random_variable = (unsigned long) get_random_int();
++ random_variable &= STACK_RND_MASK;
+ random_variable <<= PAGE_SHIFT;
+ }
+ #ifdef CONFIG_STACK_GROWSUP
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 150822ee0a0b..c81ce0c7c1a9 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -2609,32 +2609,23 @@ static int key_search(struct extent_buffer *b, struct btrfs_key *key,
+ return 0;
+ }
+
+-int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *found_path,
++int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
+ u64 iobjectid, u64 ioff, u8 key_type,
+ struct btrfs_key *found_key)
+ {
+ int ret;
+ struct btrfs_key key;
+ struct extent_buffer *eb;
+- struct btrfs_path *path;
++
++ ASSERT(path);
+
+ key.type = key_type;
+ key.objectid = iobjectid;
+ key.offset = ioff;
+
+- if (found_path == NULL) {
+- path = btrfs_alloc_path();
+- if (!path)
+- return -ENOMEM;
+- } else
+- path = found_path;
+-
+ ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
+- if ((ret < 0) || (found_key == NULL)) {
+- if (path != found_path)
+- btrfs_free_path(path);
++ if ((ret < 0) || (found_key == NULL))
+ return ret;
+- }
+
+ eb = path->nodes[0];
+ if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 97676731190c..b1709831b1a1 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1630,6 +1630,7 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
+ bool check_ref)
+ {
+ struct btrfs_root *root;
++ struct btrfs_path *path;
+ int ret;
+
+ if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
+@@ -1669,8 +1670,14 @@ again:
+ if (ret)
+ goto fail;
+
+- ret = btrfs_find_item(fs_info->tree_root, NULL, BTRFS_ORPHAN_OBJECTID,
++ path = btrfs_alloc_path();
++ if (!path) {
++ ret = -ENOMEM;
++ goto fail;
++ }
++ ret = btrfs_find_item(fs_info->tree_root, path, BTRFS_ORPHAN_OBJECTID,
+ location->objectid, BTRFS_ORPHAN_ITEM_KEY, NULL);
++ btrfs_free_path(path);
+ if (ret < 0)
+ goto fail;
+ if (ret == 0)
+@@ -2496,7 +2503,7 @@ int open_ctree(struct super_block *sb,
+ features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
+
+ if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
+- printk(KERN_ERR "BTRFS: has skinny extents\n");
++ printk(KERN_INFO "BTRFS: has skinny extents\n");
+
+ /*
+ * flag our filesystem as having big metadata blocks if
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 7d96cc961663..ee1c60454a5f 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -488,8 +488,20 @@ insert:
+ src_item = (struct btrfs_inode_item *)src_ptr;
+ dst_item = (struct btrfs_inode_item *)dst_ptr;
+
+- if (btrfs_inode_generation(eb, src_item) == 0)
++ if (btrfs_inode_generation(eb, src_item) == 0) {
++ struct extent_buffer *dst_eb = path->nodes[0];
++
++ if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
++ S_ISREG(btrfs_inode_mode(dst_eb, dst_item))) {
++ struct btrfs_map_token token;
++ u64 ino_size = btrfs_inode_size(eb, src_item);
++
++ btrfs_init_map_token(&token);
++ btrfs_set_token_inode_size(dst_eb, dst_item,
++ ino_size, &token);
++ }
+ goto no_copy;
++ }
+
+ if (overwrite_root &&
+ S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
+@@ -1257,10 +1269,19 @@ static int insert_orphan_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, u64 offset)
+ {
+ int ret;
+- ret = btrfs_find_item(root, NULL, BTRFS_ORPHAN_OBJECTID,
++ struct btrfs_path *path;
++
++ path = btrfs_alloc_path();
++ if (!path)
++ return -ENOMEM;
++
++ ret = btrfs_find_item(root, path, BTRFS_ORPHAN_OBJECTID,
+ offset, BTRFS_ORPHAN_ITEM_KEY, NULL);
+ if (ret > 0)
+ ret = btrfs_insert_orphan_item(trans, root, offset);
++
++ btrfs_free_path(path);
++
+ return ret;
+ }
+
+@@ -3209,7 +3230,8 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
+ static void fill_inode_item(struct btrfs_trans_handle *trans,
+ struct extent_buffer *leaf,
+ struct btrfs_inode_item *item,
+- struct inode *inode, int log_inode_only)
++ struct inode *inode, int log_inode_only,
++ u64 logged_isize)
+ {
+ struct btrfs_map_token token;
+
+@@ -3222,7 +3244,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
+ * to say 'update this inode with these values'
+ */
+ btrfs_set_token_inode_generation(leaf, item, 0, &token);
+- btrfs_set_token_inode_size(leaf, item, 0, &token);
++ btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
+ } else {
+ btrfs_set_token_inode_generation(leaf, item,
+ BTRFS_I(inode)->generation,
+@@ -3274,7 +3296,7 @@ static int log_inode_item(struct btrfs_trans_handle *trans,
+ return ret;
+ inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_inode_item);
+- fill_inode_item(trans, path->nodes[0], inode_item, inode, 0);
++ fill_inode_item(trans, path->nodes[0], inode_item, inode, 0, 0);
+ btrfs_release_path(path);
+ return 0;
+ }
+@@ -3283,7 +3305,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
+ struct inode *inode,
+ struct btrfs_path *dst_path,
+ struct btrfs_path *src_path, u64 *last_extent,
+- int start_slot, int nr, int inode_only)
++ int start_slot, int nr, int inode_only,
++ u64 logged_isize)
+ {
+ unsigned long src_offset;
+ unsigned long dst_offset;
+@@ -3340,7 +3363,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
+ dst_path->slots[0],
+ struct btrfs_inode_item);
+ fill_inode_item(trans, dst_path->nodes[0], inode_item,
+- inode, inode_only == LOG_INODE_EXISTS);
++ inode, inode_only == LOG_INODE_EXISTS,
++ logged_isize);
+ } else {
+ copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
+ src_offset, ins_sizes[i]);
+@@ -3886,6 +3910,33 @@ process:
+ return ret;
+ }
+
++static int logged_inode_size(struct btrfs_root *log, struct inode *inode,
++ struct btrfs_path *path, u64 *size_ret)
++{
++ struct btrfs_key key;
++ int ret;
++
++ key.objectid = btrfs_ino(inode);
++ key.type = BTRFS_INODE_ITEM_KEY;
++ key.offset = 0;
++
++ ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
++ if (ret < 0) {
++ return ret;
++ } else if (ret > 0) {
++ *size_ret = i_size_read(inode);
++ } else {
++ struct btrfs_inode_item *item;
++
++ item = btrfs_item_ptr(path->nodes[0], path->slots[0],
++ struct btrfs_inode_item);
++ *size_ret = btrfs_inode_size(path->nodes[0], item);
++ }
++
++ btrfs_release_path(path);
++ return 0;
++}
++
+ /* log a single inode in the tree log.
+ * At least one parent directory for this inode must exist in the tree
+ * or be logged already.
+@@ -3923,6 +3974,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
+ bool fast_search = false;
+ u64 ino = btrfs_ino(inode);
+ struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
++ u64 logged_isize = 0;
+
+ path = btrfs_alloc_path();
+ if (!path)
+@@ -3976,6 +4028,25 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
+ max_key_type = BTRFS_XATTR_ITEM_KEY;
+ ret = drop_objectid_items(trans, log, path, ino, max_key_type);
+ } else {
++ if (inode_only == LOG_INODE_EXISTS) {
++ /*
++ * Make sure the new inode item we write to the log has
++ * the same isize as the current one (if it exists).
++ * This is necessary to prevent data loss after log
++ * replay, and also to prevent doing a wrong expanding
++ * truncate - for e.g. create file, write 4K into offset
++ * 0, fsync, write 4K into offset 4096, add hard link,
++ * fsync some other file (to sync log), power fail - if
++ * we use the inode's current i_size, after log replay
++ * we get a 8Kb file, with the last 4Kb extent as a hole
++ * (zeroes), as if an expanding truncate happened,
++ * instead of getting a file of 4Kb only.
++ */
++ err = logged_inode_size(log, inode, path,
++ &logged_isize);
++ if (err)
++ goto out_unlock;
++ }
+ if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ &BTRFS_I(inode)->runtime_flags)) {
+ clear_bit(BTRFS_INODE_COPY_EVERYTHING,
+@@ -4031,7 +4102,8 @@ again:
+ }
+
+ ret = copy_items(trans, inode, dst_path, path, &last_extent,
+- ins_start_slot, ins_nr, inode_only);
++ ins_start_slot, ins_nr, inode_only,
++ logged_isize);
+ if (ret < 0) {
+ err = ret;
+ goto out_unlock;
+@@ -4055,7 +4127,7 @@ next_slot:
+ if (ins_nr) {
+ ret = copy_items(trans, inode, dst_path, path,
+ &last_extent, ins_start_slot,
+- ins_nr, inode_only);
++ ins_nr, inode_only, logged_isize);
+ if (ret < 0) {
+ err = ret;
+ goto out_unlock;
+@@ -4076,7 +4148,8 @@ next_slot:
+ }
+ if (ins_nr) {
+ ret = copy_items(trans, inode, dst_path, path, &last_extent,
+- ins_start_slot, ins_nr, inode_only);
++ ins_start_slot, ins_nr, inode_only,
++ logged_isize);
+ if (ret < 0) {
+ err = ret;
+ goto out_unlock;
+diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
+index 7654e87b0428..9ad5ba4b299b 100644
+--- a/fs/jffs2/scan.c
++++ b/fs/jffs2/scan.c
+@@ -510,6 +510,10 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
+ sumlen = c->sector_size - je32_to_cpu(sm->offset);
+ sumptr = buf + buf_size - sumlen;
+
++ /* sm->offset maybe wrong but MAGIC maybe right */
++ if (sumlen > c->sector_size)
++ goto full_scan;
++
+ /* Now, make sure the summary itself is available */
+ if (sumlen > buf_size) {
+ /* Need to kmalloc for this. */
+@@ -544,6 +548,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
+ }
+ }
+
++full_scan:
+ buf_ofs = jeb->offset;
+
+ if (!buf_size) {
+diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
+index 9106f42c472c..67bdc0bb9aea 100644
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -65,7 +65,7 @@ static inline struct sockaddr *nsm_addr(const struct nsm_handle *nsm)
+ return (struct sockaddr *)&nsm->sm_addr;
+ }
+
+-static struct rpc_clnt *nsm_create(struct net *net)
++static struct rpc_clnt *nsm_create(struct net *net, const char *nodename)
+ {
+ struct sockaddr_in sin = {
+ .sin_family = AF_INET,
+@@ -77,6 +77,7 @@ static struct rpc_clnt *nsm_create(struct net *net)
+ .address = (struct sockaddr *)&sin,
+ .addrsize = sizeof(sin),
+ .servername = "rpc.statd",
++ .nodename = nodename,
+ .program = &nsm_program,
+ .version = NSM_VERSION,
+ .authflavor = RPC_AUTH_NULL,
+@@ -102,7 +103,7 @@ out:
+ return clnt;
+ }
+
+-static struct rpc_clnt *nsm_client_get(struct net *net)
++static struct rpc_clnt *nsm_client_get(struct net *net, const char *nodename)
+ {
+ struct rpc_clnt *clnt, *new;
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
+@@ -111,7 +112,7 @@ static struct rpc_clnt *nsm_client_get(struct net *net)
+ if (clnt != NULL)
+ goto out;
+
+- clnt = new = nsm_create(net);
++ clnt = new = nsm_create(net, nodename);
+ if (IS_ERR(clnt))
+ goto out;
+
+@@ -190,19 +191,23 @@ int nsm_monitor(const struct nlm_host *host)
+ struct nsm_res res;
+ int status;
+ struct rpc_clnt *clnt;
++ const char *nodename = NULL;
+
+ dprintk("lockd: nsm_monitor(%s)\n", nsm->sm_name);
+
+ if (nsm->sm_monitored)
+ return 0;
+
++ if (host->h_rpcclnt)
++ nodename = host->h_rpcclnt->cl_nodename;
++
+ /*
+ * Choose whether to record the caller_name or IP address of
+ * this peer in the local rpc.statd's database.
+ */
+ nsm->sm_mon_name = nsm_use_hostnames ? nsm->sm_name : nsm->sm_addrbuf;
+
+- clnt = nsm_client_get(host->net);
++ clnt = nsm_client_get(host->net, nodename);
+ if (IS_ERR(clnt)) {
+ status = PTR_ERR(clnt);
+ dprintk("lockd: failed to create NSM upcall transport, "
+diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
+index b8fb3a4ef649..351be9205bf8 100644
+--- a/fs/nfs/callback.c
++++ b/fs/nfs/callback.c
+@@ -128,22 +128,24 @@ nfs41_callback_svc(void *vrqstp)
+ if (try_to_freeze())
+ continue;
+
+- prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
++ prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_UNINTERRUPTIBLE);
+ spin_lock_bh(&serv->sv_cb_lock);
+ if (!list_empty(&serv->sv_cb_list)) {
+ req = list_first_entry(&serv->sv_cb_list,
+ struct rpc_rqst, rq_bc_list);
+ list_del(&req->rq_bc_list);
+ spin_unlock_bh(&serv->sv_cb_lock);
++ finish_wait(&serv->sv_cb_waitq, &wq);
+ dprintk("Invoking bc_svc_process()\n");
+ error = bc_svc_process(serv, req, rqstp);
+ dprintk("bc_svc_process() returned w/ error code= %d\n",
+ error);
+ } else {
+ spin_unlock_bh(&serv->sv_cb_lock);
+- schedule();
++ /* schedule_timeout to game the hung task watchdog */
++ schedule_timeout(60 * HZ);
++ finish_wait(&serv->sv_cb_waitq, &wq);
+ }
+- finish_wait(&serv->sv_cb_waitq, &wq);
+ }
+ return 0;
+ }
+diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
+index f4ccfe6521ec..02f8d09e119f 100644
+--- a/fs/nfs/callback_xdr.c
++++ b/fs/nfs/callback_xdr.c
+@@ -464,8 +464,10 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,
+
+ for (i = 0; i < args->csa_nrclists; i++) {
+ status = decode_rc_list(xdr, &args->csa_rclists[i]);
+- if (status)
++ if (status) {
++ args->csa_nrclists = i;
+ goto out_free;
++ }
+ }
+ }
+ status = 0;
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 294692ff83b1..a094b0c34ac3 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -242,7 +242,7 @@ static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
+ void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
+ struct nfs_direct_req *dreq)
+ {
+- cinfo->lock = &dreq->lock;
++ cinfo->lock = &dreq->inode->i_lock;
+ cinfo->mds = &dreq->mds_cinfo;
+ cinfo->ds = &dreq->ds_cinfo;
+ cinfo->dreq = dreq;
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index efaa31c70fbe..e1acc1c4ce65 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -377,7 +377,7 @@ extern struct rpc_stat nfs_rpcstat;
+
+ extern int __init register_nfs_fs(void);
+ extern void __exit unregister_nfs_fs(void);
+-extern void nfs_sb_active(struct super_block *sb);
++extern bool nfs_sb_active(struct super_block *sb);
+ extern void nfs_sb_deactive(struct super_block *sb);
+
+ /* namespace.c */
+@@ -495,6 +495,26 @@ extern int nfs41_walk_client_list(struct nfs_client *clp,
+ struct nfs_client **result,
+ struct rpc_cred *cred);
+
++static inline struct inode *nfs_igrab_and_active(struct inode *inode)
++{
++ inode = igrab(inode);
++ if (inode != NULL && !nfs_sb_active(inode->i_sb)) {
++ iput(inode);
++ inode = NULL;
++ }
++ return inode;
++}
++
++static inline void nfs_iput_and_deactive(struct inode *inode)
++{
++ if (inode != NULL) {
++ struct super_block *sb = inode->i_sb;
++
++ iput(inode);
++ nfs_sb_deactive(sb);
++ }
++}
++
+ /*
+ * Determine the device name as a string
+ */
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 83f3a7d7466e..cd617076aeca 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5130,9 +5130,13 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
+ static void nfs4_delegreturn_release(void *calldata)
+ {
+ struct nfs4_delegreturndata *data = calldata;
++ struct inode *inode = data->inode;
+
+- if (data->roc)
+- pnfs_roc_release(data->inode);
++ if (inode) {
++ if (data->roc)
++ pnfs_roc_release(inode);
++ nfs_iput_and_deactive(inode);
++ }
+ kfree(calldata);
+ }
+
+@@ -5189,9 +5193,9 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
+ nfs_fattr_init(data->res.fattr);
+ data->timestamp = jiffies;
+ data->rpc_status = 0;
+- data->inode = inode;
+- data->roc = list_empty(&NFS_I(inode)->open_files) ?
+- pnfs_roc(inode) : false;
++ data->inode = nfs_igrab_and_active(inode);
++ if (data->inode)
++ data->roc = nfs4_roc(inode);
+
+ task_setup_data.callback_data = data;
+ msg.rpc_argp = &data->args;
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 0a5dda4d85c2..883ee88e5f5e 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1445,19 +1445,19 @@ pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *r
+ {
+ u64 rd_size = req->wb_bytes;
+
+- WARN_ON_ONCE(pgio->pg_lseg != NULL);
+-
+- if (pgio->pg_dreq == NULL)
+- rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
+- else
+- rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
+-
+- pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+- req->wb_context,
+- req_offset(req),
+- rd_size,
+- IOMODE_READ,
+- GFP_KERNEL);
++ if (pgio->pg_lseg == NULL) {
++ if (pgio->pg_dreq == NULL)
++ rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
++ else
++ rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
++
++ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
++ req->wb_context,
++ req_offset(req),
++ rd_size,
++ IOMODE_READ,
++ GFP_KERNEL);
++ }
+ /* If no lseg, fall back to read through mds */
+ if (pgio->pg_lseg == NULL)
+ nfs_pageio_reset_read_mds(pgio);
+@@ -1469,14 +1469,13 @@ void
+ pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *req, u64 wb_size)
+ {
+- WARN_ON_ONCE(pgio->pg_lseg != NULL);
+-
+- pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+- req->wb_context,
+- req_offset(req),
+- wb_size,
+- IOMODE_RW,
+- GFP_NOFS);
++ if (pgio->pg_lseg == NULL)
++ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
++ req->wb_context,
++ req_offset(req),
++ wb_size,
++ IOMODE_RW,
++ GFP_NOFS);
+ /* If no lseg, fall back to write through mds */
+ if (pgio->pg_lseg == NULL)
+ nfs_pageio_reset_write_mds(pgio);
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index 31a11b0e885d..368d9395d2e7 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -405,12 +405,15 @@ void __exit unregister_nfs_fs(void)
+ unregister_filesystem(&nfs_fs_type);
+ }
+
+-void nfs_sb_active(struct super_block *sb)
++bool nfs_sb_active(struct super_block *sb)
+ {
+ struct nfs_server *server = NFS_SB(sb);
+
+- if (atomic_inc_return(&server->active) == 1)
+- atomic_inc(&sb->s_active);
++ if (!atomic_inc_not_zero(&sb->s_active))
++ return false;
++ if (atomic_inc_return(&server->active) != 1)
++ atomic_dec(&sb->s_active);
++ return true;
+ }
+ EXPORT_SYMBOL_GPL(nfs_sb_active);
+
+diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
+index 10b653930ee2..465223b7592e 100644
+--- a/fs/ocfs2/quota_local.c
++++ b/fs/ocfs2/quota_local.c
+@@ -701,8 +701,8 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
+ /* We don't need the lock and we have to acquire quota file locks
+ * which will later depend on this lock */
+ mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
+- info->dqi_maxblimit = 0x7fffffffffffffffLL;
+- info->dqi_maxilimit = 0x7fffffffffffffffLL;
++ info->dqi_max_spc_limit = 0x7fffffffffffffffLL;
++ info->dqi_max_ino_limit = 0x7fffffffffffffffLL;
+ oinfo = kmalloc(sizeof(struct ocfs2_mem_dqinfo), GFP_NOFS);
+ if (!oinfo) {
+ mlog(ML_ERROR, "failed to allocate memory for ocfs2 quota"
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 4e0388cffe3d..e8972bcddfb4 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -1034,7 +1034,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+ struct vm_area_struct *vma;
+ struct pagemapread *pm = walk->private;
+ spinlock_t *ptl;
+- pte_t *pte;
++ pte_t *pte, *orig_pte;
+ int err = 0;
+
+ /* find the first VMA at or above 'addr' */
+@@ -1095,15 +1095,19 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+ BUG_ON(is_vm_hugetlb_page(vma));
+
+ /* Addresses in the VMA. */
+- for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
++ orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
++ for (; addr < min(end, vma->vm_end); pte++, addr += PAGE_SIZE) {
+ pagemap_entry_t pme;
+- pte = pte_offset_map(pmd, addr);
++
+ pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
+- pte_unmap(pte);
+ err = add_to_pagemap(addr, &pme, pm);
+ if (err)
+- return err;
++ break;
+ }
++ pte_unmap_unlock(orig_pte, ptl);
++
++ if (err)
++ return err;
+
+ if (addr == end)
+ break;
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 9340228aff6e..05fea2ac116c 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -2380,16 +2380,6 @@ out:
+ }
+ EXPORT_SYMBOL(dquot_quota_on_mount);
+
+-static inline qsize_t qbtos(qsize_t blocks)
+-{
+- return blocks << QIF_DQBLKSIZE_BITS;
+-}
+-
+-static inline qsize_t stoqb(qsize_t space)
+-{
+- return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
+-}
+-
+ /* Generic routine for getting common part of quota structure */
+ static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
+ {
+@@ -2439,13 +2429,13 @@ static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
+ return -EINVAL;
+
+ if (((di->d_fieldmask & QC_SPC_SOFT) &&
+- stoqb(di->d_spc_softlimit) > dqi->dqi_maxblimit) ||
++ di->d_spc_softlimit > dqi->dqi_max_spc_limit) ||
+ ((di->d_fieldmask & QC_SPC_HARD) &&
+- stoqb(di->d_spc_hardlimit) > dqi->dqi_maxblimit) ||
++ di->d_spc_hardlimit > dqi->dqi_max_spc_limit) ||
+ ((di->d_fieldmask & QC_INO_SOFT) &&
+- (di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
++ (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) ||
+ ((di->d_fieldmask & QC_INO_HARD) &&
+- (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
++ (di->d_ino_hardlimit > dqi->dqi_max_ino_limit)))
+ return -ERANGE;
+
+ spin_lock(&dq_data_lock);
+diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c
+index 469c6848b322..8fe79beced5c 100644
+--- a/fs/quota/quota_v1.c
++++ b/fs/quota/quota_v1.c
+@@ -169,8 +169,8 @@ static int v1_read_file_info(struct super_block *sb, int type)
+ }
+ ret = 0;
+ /* limits are stored as unsigned 32-bit data */
+- dqopt->info[type].dqi_maxblimit = 0xffffffff;
+- dqopt->info[type].dqi_maxilimit = 0xffffffff;
++ dqopt->info[type].dqi_max_spc_limit = 0xffffffffULL << QUOTABLOCK_BITS;
++ dqopt->info[type].dqi_max_ino_limit = 0xffffffff;
+ dqopt->info[type].dqi_igrace =
+ dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
+ dqopt->info[type].dqi_bgrace =
+diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
+index 02751ec695c5..d1a8054bba9a 100644
+--- a/fs/quota/quota_v2.c
++++ b/fs/quota/quota_v2.c
+@@ -117,12 +117,12 @@ static int v2_read_file_info(struct super_block *sb, int type)
+ qinfo = info->dqi_priv;
+ if (version == 0) {
+ /* limits are stored as unsigned 32-bit data */
+- info->dqi_maxblimit = 0xffffffff;
+- info->dqi_maxilimit = 0xffffffff;
++ info->dqi_max_spc_limit = 0xffffffffULL << QUOTABLOCK_BITS;
++ info->dqi_max_ino_limit = 0xffffffff;
+ } else {
+- /* used space is stored as unsigned 64-bit value */
+- info->dqi_maxblimit = 0xffffffffffffffffULL; /* 2^64-1 */
+- info->dqi_maxilimit = 0xffffffffffffffffULL;
++ /* used space is stored as unsigned 64-bit value in bytes */
++ info->dqi_max_spc_limit = 0xffffffffffffffffULL; /* 2^64-1 */
++ info->dqi_max_ino_limit = 0xffffffffffffffffULL;
+ }
+ info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
+ info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index 5bc71d9a674a..7b72b7dd8906 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -1288,6 +1288,7 @@ static int udf_read_inode(struct inode *inode, bool hidden_inode)
+ struct kernel_lb_addr *iloc = &iinfo->i_location;
+ unsigned int link_count;
+ unsigned int indirections = 0;
++ int bs = inode->i_sb->s_blocksize;
+ int ret = -EIO;
+
+ reread:
+@@ -1374,38 +1375,35 @@ reread:
+ if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
+ iinfo->i_efe = 1;
+ iinfo->i_use = 0;
+- ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
++ ret = udf_alloc_i_data(inode, bs -
+ sizeof(struct extendedFileEntry));
+ if (ret)
+ goto out;
+ memcpy(iinfo->i_ext.i_data,
+ bh->b_data + sizeof(struct extendedFileEntry),
+- inode->i_sb->s_blocksize -
+- sizeof(struct extendedFileEntry));
++ bs - sizeof(struct extendedFileEntry));
+ } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
+ iinfo->i_efe = 0;
+ iinfo->i_use = 0;
+- ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+- sizeof(struct fileEntry));
++ ret = udf_alloc_i_data(inode, bs - sizeof(struct fileEntry));
+ if (ret)
+ goto out;
+ memcpy(iinfo->i_ext.i_data,
+ bh->b_data + sizeof(struct fileEntry),
+- inode->i_sb->s_blocksize - sizeof(struct fileEntry));
++ bs - sizeof(struct fileEntry));
+ } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
+ iinfo->i_efe = 0;
+ iinfo->i_use = 1;
+ iinfo->i_lenAlloc = le32_to_cpu(
+ ((struct unallocSpaceEntry *)bh->b_data)->
+ lengthAllocDescs);
+- ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
++ ret = udf_alloc_i_data(inode, bs -
+ sizeof(struct unallocSpaceEntry));
+ if (ret)
+ goto out;
+ memcpy(iinfo->i_ext.i_data,
+ bh->b_data + sizeof(struct unallocSpaceEntry),
+- inode->i_sb->s_blocksize -
+- sizeof(struct unallocSpaceEntry));
++ bs - sizeof(struct unallocSpaceEntry));
+ return 0;
+ }
+
+@@ -1489,6 +1487,15 @@ reread:
+ }
+ inode->i_generation = iinfo->i_unique;
+
++ /*
++ * Sanity check length of allocation descriptors and extended attrs to
++ * avoid integer overflows
++ */
++ if (iinfo->i_lenEAttr > bs || iinfo->i_lenAlloc > bs)
++ goto out;
++ /* Now do exact checks */
++ if (udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc > bs)
++ goto out;
+ /* Sanity checks for files in ICB so that we don't get confused later */
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+ /*
+@@ -1498,8 +1505,7 @@ reread:
+ if (iinfo->i_lenAlloc != inode->i_size)
+ goto out;
+ /* File in ICB has to fit in there... */
+- if (inode->i_size > inode->i_sb->s_blocksize -
+- udf_file_entry_alloc_offset(inode))
++ if (inode->i_size > bs - udf_file_entry_alloc_offset(inode))
+ goto out;
+ }
+
+diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
+index 79c981984dca..661666ecac22 100644
+--- a/fs/xfs/libxfs/xfs_bmap.c
++++ b/fs/xfs/libxfs/xfs_bmap.c
+@@ -976,7 +976,11 @@ xfs_bmap_local_to_extents(
+ *firstblock = args.fsbno;
+ bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
+
+- /* initialise the block and copy the data */
++ /*
++ * Initialise the block and copy the data
++ *
++ * Note: init_fn must set the buffer log item type correctly!
++ */
+ init_fn(tp, bp, ip, ifp);
+
+ /* account for the change in fork size and log everything */
+diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c
+index 5782f037eab4..a7dce9aea5b5 100644
+--- a/fs/xfs/libxfs/xfs_symlink_remote.c
++++ b/fs/xfs/libxfs/xfs_symlink_remote.c
+@@ -180,6 +180,8 @@ xfs_symlink_local_to_remote(
+ struct xfs_mount *mp = ip->i_mount;
+ char *buf;
+
++ xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SYMLINK_BUF);
++
+ if (!xfs_sb_version_hascrc(&mp->m_sb)) {
+ bp->b_ops = NULL;
+ memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
+diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
+index f15969543326..1a6c9b936789 100644
+--- a/fs/xfs/xfs_buf_item.c
++++ b/fs/xfs/xfs_buf_item.c
+@@ -319,6 +319,10 @@ xfs_buf_item_format(
+ ASSERT(atomic_read(&bip->bli_refcount) > 0);
+ ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
+ (bip->bli_flags & XFS_BLI_STALE));
++ ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
++ (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
++ && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
++
+
+ /*
+ * If it is an inode buffer, transfer the in-memory state to the
+diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
+index 8ed049d1e332..3cc309a19ea8 100644
+--- a/fs/xfs/xfs_inode.c
++++ b/fs/xfs/xfs_inode.c
+@@ -2000,6 +2000,7 @@ xfs_iunlink(
+ agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
+ offset = offsetof(xfs_agi_t, agi_unlinked) +
+ (sizeof(xfs_agino_t) * bucket_index);
++ xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
+ xfs_trans_log_buf(tp, agibp, offset,
+ (offset + sizeof(xfs_agino_t) - 1));
+ return 0;
+@@ -2091,6 +2092,7 @@ xfs_iunlink_remove(
+ agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
+ offset = offsetof(xfs_agi_t, agi_unlinked) +
+ (sizeof(xfs_agino_t) * bucket_index);
++ xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
+ xfs_trans_log_buf(tp, agibp, offset,
+ (offset + sizeof(xfs_agino_t) - 1));
+ } else {
+diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
+index d68f23021af3..cf2bc2dd702e 100644
+--- a/fs/xfs/xfs_qm.c
++++ b/fs/xfs/xfs_qm.c
+@@ -844,6 +844,11 @@ xfs_qm_reset_dqcounts(
+ */
+ xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
+ "xfs_quotacheck");
++ /*
++ * Reset type in case we are reusing group quota file for
++ * project quotas or vice versa
++ */
++ ddq->d_flags = type;
+ ddq->d_bcount = 0;
+ ddq->d_icount = 0;
+ ddq->d_rtbcount = 0;
+diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
+index 30e8e3410955..32dfdb5d3e6b 100644
+--- a/fs/xfs/xfs_trans.c
++++ b/fs/xfs/xfs_trans.c
+@@ -474,6 +474,7 @@ xfs_trans_apply_sb_deltas(
+ whole = 1;
+ }
+
++ xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
+ if (whole)
+ /*
+ * Log the whole thing, the fields are noncontiguous.
+diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
+index 1c804b057fb1..7ee1774edee5 100644
+--- a/include/linux/fsnotify.h
++++ b/include/linux/fsnotify.h
+@@ -101,8 +101,10 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
+ new_dir_mask |= FS_ISDIR;
+ }
+
+- fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie);
+- fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie);
++ fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name,
++ fs_cookie);
++ fsnotify(new_dir, new_dir_mask, source, FSNOTIFY_EVENT_INODE, new_name,
++ fs_cookie);
+
+ if (target)
+ fsnotify_link_count(target);
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index 47ebb4fafd87..d77a08d25223 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -1328,7 +1328,7 @@ struct nfs_commit_completion_ops {
+ };
+
+ struct nfs_commit_info {
+- spinlock_t *lock;
++ spinlock_t *lock; /* inode->i_lock */
+ struct nfs_mds_commit_info *mds;
+ struct pnfs_ds_commit_info *ds;
+ struct nfs_direct_req *dreq; /* O_DIRECT request */
+diff --git a/include/linux/quota.h b/include/linux/quota.h
+index 224fb8154f8f..8b0877faa7f5 100644
+--- a/include/linux/quota.h
++++ b/include/linux/quota.h
+@@ -211,8 +211,8 @@ struct mem_dqinfo {
+ unsigned long dqi_flags;
+ unsigned int dqi_bgrace;
+ unsigned int dqi_igrace;
+- qsize_t dqi_maxblimit;
+- qsize_t dqi_maxilimit;
++ qsize_t dqi_max_spc_limit;
++ qsize_t dqi_max_ino_limit;
+ void *dqi_priv;
+ };
+
+diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
+index 70736b98c721..b363a0f6dfe1 100644
+--- a/include/linux/sunrpc/clnt.h
++++ b/include/linux/sunrpc/clnt.h
+@@ -57,7 +57,7 @@ struct rpc_clnt {
+ const struct rpc_timeout *cl_timeout; /* Timeout strategy */
+
+ int cl_nodelen; /* nodename length */
+- char cl_nodename[UNX_MAXNODENAME];
++ char cl_nodename[UNX_MAXNODENAME+1];
+ struct rpc_pipe_dir_head cl_pipedir_objects;
+ struct rpc_clnt * cl_parent; /* Points to parent of clones */
+ struct rpc_rtt cl_rtt_default;
+@@ -109,6 +109,7 @@ struct rpc_create_args {
+ struct sockaddr *saddress;
+ const struct rpc_timeout *timeout;
+ const char *servername;
++ const char *nodename;
+ const struct rpc_program *program;
+ u32 prognumber; /* overrides program->number */
+ u32 version;
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 447a7e2fc19b..3827bffc11a7 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -127,10 +127,6 @@ enum usb_interface_condition {
+ * to the sysfs representation for that device.
+ * @pm_usage_cnt: PM usage counter for this interface
+ * @reset_ws: Used for scheduling resets from atomic context.
+- * @reset_running: set to 1 if the interface is currently running a
+- * queued reset so that usb_cancel_queued_reset() doesn't try to
+- * remove from the workqueue when running inside the worker
+- * thread. See __usb_queue_reset_device().
+ * @resetting_device: USB core reset the device, so use alt setting 0 as
+ * current; needs bandwidth alloc after reset.
+ *
+@@ -181,7 +177,6 @@ struct usb_interface {
+ unsigned needs_remote_wakeup:1; /* driver requires remote wakeup */
+ unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */
+ unsigned needs_binding:1; /* needs delayed unbind/rebind */
+- unsigned reset_running:1;
+ unsigned resetting_device:1; /* true: bandwidth alloc after reset */
+
+ struct device dev; /* interface specific device info */
+diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
+index cd96a2bc3388..2f48e1756cbd 100644
+--- a/include/linux/usb/hcd.h
++++ b/include/linux/usb/hcd.h
+@@ -146,6 +146,8 @@ struct usb_hcd {
+ unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */
+ unsigned can_do_streams:1; /* HC supports streams */
+ unsigned tpl_support:1; /* OTG & EH TPL support */
++ unsigned cant_recv_wakeups:1;
++ /* wakeup requests from downstream aren't received */
+
+ unsigned int irq; /* irq allocated */
+ void __iomem *regs; /* device memory/io */
+@@ -450,6 +452,7 @@ extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
+ #endif /* CONFIG_PCI */
+
+ /* pci-ish (pdev null is ok) buffer alloc/mapping support */
++void usb_init_pool_max(void);
+ int hcd_buffer_create(struct usb_hcd *hcd);
+ void hcd_buffer_destroy(struct usb_hcd *hcd);
+
+diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
+index a6fd939f202d..3ebb168b9afc 100644
+--- a/include/net/cipso_ipv4.h
++++ b/include/net/cipso_ipv4.h
+@@ -121,13 +121,6 @@ extern int cipso_v4_rbm_strictvalid;
+ #endif
+
+ /*
+- * Helper Functions
+- */
+-
+-#define CIPSO_V4_OPTEXIST(x) (IPCB(x)->opt.cipso != 0)
+-#define CIPSO_V4_OPTPTR(x) (skb_network_header(x) + IPCB(x)->opt.cipso)
+-
+-/*
+ * DOI List Functions
+ */
+
+@@ -190,7 +183,7 @@ static inline int cipso_v4_doi_domhsh_remove(struct cipso_v4_doi *doi_def,
+
+ #ifdef CONFIG_NETLABEL
+ void cipso_v4_cache_invalidate(void);
+-int cipso_v4_cache_add(const struct sk_buff *skb,
++int cipso_v4_cache_add(const unsigned char *cipso_ptr,
+ const struct netlbl_lsm_secattr *secattr);
+ #else
+ static inline void cipso_v4_cache_invalidate(void)
+@@ -198,7 +191,7 @@ static inline void cipso_v4_cache_invalidate(void)
+ return;
+ }
+
+-static inline int cipso_v4_cache_add(const struct sk_buff *skb,
++static inline int cipso_v4_cache_add(const unsigned char *cipso_ptr,
+ const struct netlbl_lsm_secattr *secattr)
+ {
+ return 0;
+@@ -211,6 +204,8 @@ static inline int cipso_v4_cache_add(const struct sk_buff *skb,
+
+ #ifdef CONFIG_NETLABEL
+ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway);
++int cipso_v4_getattr(const unsigned char *cipso,
++ struct netlbl_lsm_secattr *secattr);
+ int cipso_v4_sock_setattr(struct sock *sk,
+ const struct cipso_v4_doi *doi_def,
+ const struct netlbl_lsm_secattr *secattr);
+@@ -226,6 +221,7 @@ int cipso_v4_skbuff_setattr(struct sk_buff *skb,
+ int cipso_v4_skbuff_delattr(struct sk_buff *skb);
+ int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
+ struct netlbl_lsm_secattr *secattr);
++unsigned char *cipso_v4_optptr(const struct sk_buff *skb);
+ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option);
+ #else
+ static inline void cipso_v4_error(struct sk_buff *skb,
+@@ -235,6 +231,12 @@ static inline void cipso_v4_error(struct sk_buff *skb,
+ return;
+ }
+
++static inline int cipso_v4_getattr(const unsigned char *cipso,
++ struct netlbl_lsm_secattr *secattr)
++{
++ return -ENOSYS;
++}
++
+ static inline int cipso_v4_sock_setattr(struct sock *sk,
+ const struct cipso_v4_doi *doi_def,
+ const struct netlbl_lsm_secattr *secattr)
+@@ -282,6 +284,11 @@ static inline int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
+ return -ENOSYS;
+ }
+
++static inline unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
++{
++ return NULL;
++}
++
+ static inline int cipso_v4_validate(const struct sk_buff *skb,
+ unsigned char **option)
+ {
+diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
+index 379650b984f8..6ffdc96059a0 100644
+--- a/kernel/debug/kdb/kdb_main.c
++++ b/kernel/debug/kdb/kdb_main.c
+@@ -2535,7 +2535,7 @@ static int kdb_summary(int argc, const char **argv)
+ #define K(x) ((x) << (PAGE_SHIFT - 10))
+ kdb_printf("\nMemTotal: %8lu kB\nMemFree: %8lu kB\n"
+ "Buffers: %8lu kB\n",
+- val.totalram, val.freeram, val.bufferram);
++ K(val.totalram), K(val.freeram), K(val.bufferram));
+ return 0;
+ }
+
+diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
+index 28bf91c60a0b..85fb3d632bd8 100644
+--- a/kernel/time/ntp.c
++++ b/kernel/time/ntp.c
+@@ -633,10 +633,14 @@ int ntp_validate_timex(struct timex *txc)
+ if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
+ return -EPERM;
+
+- if (txc->modes & ADJ_FREQUENCY) {
+- if (LONG_MIN / PPM_SCALE > txc->freq)
++ /*
++ * Check for potential multiplication overflows that can
++ * only happen on 64-bit systems:
++ */
++ if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
++ if (LLONG_MIN / PPM_SCALE > txc->freq)
+ return -EINVAL;
+- if (LONG_MAX / PPM_SCALE < txc->freq)
++ if (LLONG_MAX / PPM_SCALE < txc->freq)
+ return -EINVAL;
+ }
+
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index a56e07c8d15b..f4fbbfcdf399 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -450,7 +450,10 @@ int ring_buffer_print_page_header(struct trace_seq *s)
+ struct rb_irq_work {
+ struct irq_work work;
+ wait_queue_head_t waiters;
++ wait_queue_head_t full_waiters;
+ bool waiters_pending;
++ bool full_waiters_pending;
++ bool wakeup_full;
+ };
+
+ /*
+@@ -532,6 +535,10 @@ static void rb_wake_up_waiters(struct irq_work *work)
+ struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
+
+ wake_up_all(&rbwork->waiters);
++ if (rbwork->wakeup_full) {
++ rbwork->wakeup_full = false;
++ wake_up_all(&rbwork->full_waiters);
++ }
+ }
+
+ /**
+@@ -556,9 +563,11 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
+ * data in any cpu buffer, or a specific buffer, put the
+ * caller on the appropriate wait queue.
+ */
+- if (cpu == RING_BUFFER_ALL_CPUS)
++ if (cpu == RING_BUFFER_ALL_CPUS) {
+ work = &buffer->irq_work;
+- else {
++ /* Full only makes sense on per cpu reads */
++ full = false;
++ } else {
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return -ENODEV;
+ cpu_buffer = buffer->buffers[cpu];
+@@ -567,7 +576,10 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
+
+
+ while (true) {
+- prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
++ if (full)
++ prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
++ else
++ prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
+
+ /*
+ * The events can happen in critical sections where
+@@ -589,7 +601,10 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
+ * that is necessary is that the wake up happens after
+ * a task has been queued. It's OK for spurious wake ups.
+ */
+- work->waiters_pending = true;
++ if (full)
++ work->full_waiters_pending = true;
++ else
++ work->waiters_pending = true;
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+@@ -618,7 +633,10 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
+ schedule();
+ }
+
+- finish_wait(&work->waiters, &wait);
++ if (full)
++ finish_wait(&work->full_waiters, &wait);
++ else
++ finish_wait(&work->waiters, &wait);
+
+ return ret;
+ }
+@@ -1233,6 +1251,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
+ init_completion(&cpu_buffer->update_done);
+ init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
+ init_waitqueue_head(&cpu_buffer->irq_work.waiters);
++ init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
+
+ bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
+ GFP_KERNEL, cpu_to_node(cpu));
+@@ -2804,6 +2823,8 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
+ static __always_inline void
+ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
+ {
++ bool pagebusy;
++
+ if (buffer->irq_work.waiters_pending) {
+ buffer->irq_work.waiters_pending = false;
+ /* irq_work_queue() supplies it's own memory barriers */
+@@ -2815,6 +2836,15 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
+ /* irq_work_queue() supplies it's own memory barriers */
+ irq_work_queue(&cpu_buffer->irq_work.work);
+ }
++
++ pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
++
++ if (!pagebusy && cpu_buffer->irq_work.full_waiters_pending) {
++ cpu_buffer->irq_work.wakeup_full = true;
++ cpu_buffer->irq_work.full_waiters_pending = false;
++ /* irq_work_queue() supplies it's own memory barriers */
++ irq_work_queue(&cpu_buffer->irq_work.work);
++ }
+ }
+
+ /**
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 426962b04183..72c71345db81 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4916,7 +4916,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
+ *fpos += written;
+
+ out_unlock:
+- for (i = 0; i < nr_pages; i++){
++ for (i = nr_pages - 1; i >= 0; i--) {
+ kunmap_atomic(map_page[i]);
+ put_page(pages[i]);
+ }
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 9fd722769927..f08fec71ec5a 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3659,6 +3659,8 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ {
+ struct page *page;
+
++ if (!pmd_present(*pmd))
++ return NULL;
+ page = pte_page(*(pte_t *)pmd);
+ if (page)
+ page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index f09b6b65cf6b..9ebc394ea5e5 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -1392,8 +1392,12 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
+ * implementations are not known of and in order to not over
+ * complicate our implementation, simply pretend that we never
+ * received an IRK for such a device.
++ *
++ * The Identity Address must also be a Static Random or Public
++ * Address, which hci_is_identity_address() checks for.
+ */
+- if (!bacmp(&info->bdaddr, BDADDR_ANY)) {
++ if (!bacmp(&info->bdaddr, BDADDR_ANY) ||
++ !hci_is_identity_address(&info->bdaddr, info->addr_type)) {
+ BT_ERR("Ignoring IRK with no identity address");
+ goto distribute;
+ }
+diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
+index 6f164289bde8..b0cf1f287aed 100644
+--- a/net/ceph/osd_client.c
++++ b/net/ceph/osd_client.c
+@@ -1006,14 +1006,24 @@ static void put_osd(struct ceph_osd *osd)
+ */
+ static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
+ {
+- dout("__remove_osd %p\n", osd);
++ dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
+ WARN_ON(!list_empty(&osd->o_requests));
+ WARN_ON(!list_empty(&osd->o_linger_requests));
+
+- rb_erase(&osd->o_node, &osdc->osds);
+ list_del_init(&osd->o_osd_lru);
+- ceph_con_close(&osd->o_con);
+- put_osd(osd);
++ rb_erase(&osd->o_node, &osdc->osds);
++ RB_CLEAR_NODE(&osd->o_node);
++}
++
++static void remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
++{
++ dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
++
++ if (!RB_EMPTY_NODE(&osd->o_node)) {
++ ceph_con_close(&osd->o_con);
++ __remove_osd(osdc, osd);
++ put_osd(osd);
++ }
+ }
+
+ static void remove_all_osds(struct ceph_osd_client *osdc)
+@@ -1023,7 +1033,7 @@ static void remove_all_osds(struct ceph_osd_client *osdc)
+ while (!RB_EMPTY_ROOT(&osdc->osds)) {
+ struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
+ struct ceph_osd, o_node);
+- __remove_osd(osdc, osd);
++ remove_osd(osdc, osd);
+ }
+ mutex_unlock(&osdc->request_mutex);
+ }
+@@ -1064,7 +1074,7 @@ static void remove_old_osds(struct ceph_osd_client *osdc)
+ list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
+ if (time_before(jiffies, osd->lru_ttl))
+ break;
+- __remove_osd(osdc, osd);
++ remove_osd(osdc, osd);
+ }
+ mutex_unlock(&osdc->request_mutex);
+ }
+@@ -1079,8 +1089,7 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
+ dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
+ if (list_empty(&osd->o_requests) &&
+ list_empty(&osd->o_linger_requests)) {
+- __remove_osd(osdc, osd);
+-
++ remove_osd(osdc, osd);
+ return -ENODEV;
+ }
+
+@@ -1884,6 +1893,7 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
+ {
+ struct rb_node *p, *n;
+
++ dout("%s %p\n", __func__, osdc);
+ for (p = rb_first(&osdc->osds); p; p = n) {
+ struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
+
+diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
+index 4715f25dfe03..bc7c9662a904 100644
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -376,20 +376,18 @@ static int cipso_v4_cache_check(const unsigned char *key,
+ * negative values on failure.
+ *
+ */
+-int cipso_v4_cache_add(const struct sk_buff *skb,
++int cipso_v4_cache_add(const unsigned char *cipso_ptr,
+ const struct netlbl_lsm_secattr *secattr)
+ {
+ int ret_val = -EPERM;
+ u32 bkt;
+ struct cipso_v4_map_cache_entry *entry = NULL;
+ struct cipso_v4_map_cache_entry *old_entry = NULL;
+- unsigned char *cipso_ptr;
+ u32 cipso_ptr_len;
+
+ if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0)
+ return 0;
+
+- cipso_ptr = CIPSO_V4_OPTPTR(skb);
+ cipso_ptr_len = cipso_ptr[1];
+
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+@@ -1577,6 +1575,33 @@ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def,
+ }
+
+ /**
++ * cipso_v4_optptr - Find the CIPSO option in the packet
++ * @skb: the packet
++ *
++ * Description:
++ * Parse the packet's IP header looking for a CIPSO option. Returns a pointer
++ * to the start of the CIPSO option on success, NULL if one if not found.
++ *
++ */
++unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
++{
++ const struct iphdr *iph = ip_hdr(skb);
++ unsigned char *optptr = (unsigned char *)&(ip_hdr(skb)[1]);
++ int optlen;
++ int taglen;
++
++ for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) {
++ if (optptr[0] == IPOPT_CIPSO)
++ return optptr;
++ taglen = optptr[1];
++ optlen -= taglen;
++ optptr += taglen;
++ }
++
++ return NULL;
++}
++
++/**
+ * cipso_v4_validate - Validate a CIPSO option
+ * @option: the start of the option, on error it is set to point to the error
+ *
+@@ -2117,8 +2142,8 @@ void cipso_v4_req_delattr(struct request_sock *req)
+ * on success and negative values on failure.
+ *
+ */
+-static int cipso_v4_getattr(const unsigned char *cipso,
+- struct netlbl_lsm_secattr *secattr)
++int cipso_v4_getattr(const unsigned char *cipso,
++ struct netlbl_lsm_secattr *secattr)
+ {
+ int ret_val = -ENOMSG;
+ u32 doi;
+@@ -2303,22 +2328,6 @@ int cipso_v4_skbuff_delattr(struct sk_buff *skb)
+ return 0;
+ }
+
+-/**
+- * cipso_v4_skbuff_getattr - Get the security attributes from the CIPSO option
+- * @skb: the packet
+- * @secattr: the security attributes
+- *
+- * Description:
+- * Parse the given packet's CIPSO option and return the security attributes.
+- * Returns zero on success and negative values on failure.
+- *
+- */
+-int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
+- struct netlbl_lsm_secattr *secattr)
+-{
+- return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr);
+-}
+-
+ /*
+ * Setup Functions
+ */
+diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
+index a845cd4cf21e..28cddc85b700 100644
+--- a/net/netlabel/netlabel_kapi.c
++++ b/net/netlabel/netlabel_kapi.c
+@@ -1065,10 +1065,12 @@ int netlbl_skbuff_getattr(const struct sk_buff *skb,
+ u16 family,
+ struct netlbl_lsm_secattr *secattr)
+ {
++ unsigned char *ptr;
++
+ switch (family) {
+ case AF_INET:
+- if (CIPSO_V4_OPTEXIST(skb) &&
+- cipso_v4_skbuff_getattr(skb, secattr) == 0)
++ ptr = cipso_v4_optptr(skb);
++ if (ptr && cipso_v4_getattr(ptr, secattr) == 0)
+ return 0;
+ break;
+ #if IS_ENABLED(CONFIG_IPV6)
+@@ -1094,7 +1096,7 @@ int netlbl_skbuff_getattr(const struct sk_buff *skb,
+ */
+ void netlbl_skbuff_err(struct sk_buff *skb, int error, int gateway)
+ {
+- if (CIPSO_V4_OPTEXIST(skb))
++ if (cipso_v4_optptr(skb))
+ cipso_v4_error(skb, error, gateway);
+ }
+
+@@ -1126,11 +1128,14 @@ void netlbl_cache_invalidate(void)
+ int netlbl_cache_add(const struct sk_buff *skb,
+ const struct netlbl_lsm_secattr *secattr)
+ {
++ unsigned char *ptr;
++
+ if ((secattr->flags & NETLBL_SECATTR_CACHE) == 0)
+ return -ENOMSG;
+
+- if (CIPSO_V4_OPTEXIST(skb))
+- return cipso_v4_cache_add(skb, secattr);
++ ptr = cipso_v4_optptr(skb);
++ if (ptr)
++ return cipso_v4_cache_add(ptr, secattr);
+
+ return -ENOMSG;
+ }
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 9acd6ce88db7..ae46f0198608 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -286,10 +286,8 @@ static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt,
+
+ static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename)
+ {
+- clnt->cl_nodelen = strlen(nodename);
+- if (clnt->cl_nodelen > UNX_MAXNODENAME)
+- clnt->cl_nodelen = UNX_MAXNODENAME;
+- memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen);
++ clnt->cl_nodelen = strlcpy(clnt->cl_nodename,
++ nodename, sizeof(clnt->cl_nodename));
+ }
+
+ static int rpc_client_register(struct rpc_clnt *clnt,
+@@ -360,6 +358,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
+ const struct rpc_version *version;
+ struct rpc_clnt *clnt = NULL;
+ const struct rpc_timeout *timeout;
++ const char *nodename = args->nodename;
+ int err;
+
+ /* sanity check the name before trying to print it */
+@@ -415,8 +414,10 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
+
+ atomic_set(&clnt->cl_count, 1);
+
++ if (nodename == NULL)
++ nodename = utsname()->nodename;
+ /* save the nodename */
+- rpc_clnt_set_nodename(clnt, utsname()->nodename);
++ rpc_clnt_set_nodename(clnt, nodename);
+
+ err = rpc_client_register(clnt, args->authflavor, args->client_name);
+ if (err)
+@@ -571,6 +572,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
+ if (xprt == NULL)
+ goto out_err;
+ args->servername = xprt->servername;
++ args->nodename = clnt->cl_nodename;
+
+ new = rpc_new_client(args, xprt, clnt);
+ if (IS_ERR(new)) {
+diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
+index 1891a1022c17..74b75c3ce39d 100644
+--- a/net/sunrpc/rpcb_clnt.c
++++ b/net/sunrpc/rpcb_clnt.c
+@@ -355,7 +355,8 @@ out:
+ return result;
+ }
+
+-static struct rpc_clnt *rpcb_create(struct net *net, const char *hostname,
++static struct rpc_clnt *rpcb_create(struct net *net, const char *nodename,
++ const char *hostname,
+ struct sockaddr *srvaddr, size_t salen,
+ int proto, u32 version)
+ {
+@@ -365,6 +366,7 @@ static struct rpc_clnt *rpcb_create(struct net *net, const char *hostname,
+ .address = srvaddr,
+ .addrsize = salen,
+ .servername = hostname,
++ .nodename = nodename,
+ .program = &rpcb_program,
+ .version = version,
+ .authflavor = RPC_AUTH_UNIX,
+@@ -740,7 +742,9 @@ void rpcb_getport_async(struct rpc_task *task)
+ dprintk("RPC: %5u %s: trying rpcbind version %u\n",
+ task->tk_pid, __func__, bind_version);
+
+- rpcb_clnt = rpcb_create(xprt->xprt_net, xprt->servername, sap, salen,
++ rpcb_clnt = rpcb_create(xprt->xprt_net,
++ clnt->cl_nodename,
++ xprt->servername, sap, salen,
+ xprt->prot, bind_version);
+ if (IS_ERR(rpcb_clnt)) {
+ status = PTR_ERR(rpcb_clnt);
+diff --git a/security/smack/smack.h b/security/smack/smack.h
+index b828a379377c..b48359c0da32 100644
+--- a/security/smack/smack.h
++++ b/security/smack/smack.h
+@@ -298,6 +298,16 @@ static inline struct smack_known *smk_of_task(const struct task_smack *tsp)
+ return tsp->smk_task;
+ }
+
++static inline struct smack_known *smk_of_task_struct(const struct task_struct *t)
++{
++ struct smack_known *skp;
++
++ rcu_read_lock();
++ skp = smk_of_task(__task_cred(t)->security);
++ rcu_read_unlock();
++ return skp;
++}
++
+ /*
+ * Present a pointer to the forked smack label entry in an task blob.
+ */
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index d515ec25ae9f..9d3c64af1ca9 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -43,8 +43,6 @@
+ #include <linux/binfmts.h>
+ #include "smack.h"
+
+-#define task_security(task) (task_cred_xxx((task), security))
+-
+ #define TRANS_TRUE "TRUE"
+ #define TRANS_TRUE_SIZE 4
+
+@@ -119,7 +117,7 @@ static int smk_bu_current(char *note, struct smack_known *oskp,
+ static int smk_bu_task(struct task_struct *otp, int mode, int rc)
+ {
+ struct task_smack *tsp = current_security();
+- struct task_smack *otsp = task_security(otp);
++ struct smack_known *smk_task = smk_of_task_struct(otp);
+ char acc[SMK_NUM_ACCESS_TYPE + 1];
+
+ if (rc <= 0)
+@@ -127,7 +125,7 @@ static int smk_bu_task(struct task_struct *otp, int mode, int rc)
+
+ smk_bu_mode(mode, acc);
+ pr_info("Smack Bringup: (%s %s %s) %s to %s\n",
+- tsp->smk_task->smk_known, otsp->smk_task->smk_known, acc,
++ tsp->smk_task->smk_known, smk_task->smk_known, acc,
+ current->comm, otp->comm);
+ return 0;
+ }
+@@ -344,7 +342,8 @@ static int smk_ptrace_rule_check(struct task_struct *tracer,
+ saip = &ad;
+ }
+
+- tsp = task_security(tracer);
++ rcu_read_lock();
++ tsp = __task_cred(tracer)->security;
+ tracer_known = smk_of_task(tsp);
+
+ if ((mode & PTRACE_MODE_ATTACH) &&
+@@ -364,11 +363,14 @@ static int smk_ptrace_rule_check(struct task_struct *tracer,
+ tracee_known->smk_known,
+ 0, rc, saip);
+
++ rcu_read_unlock();
+ return rc;
+ }
+
+ /* In case of rule==SMACK_PTRACE_DEFAULT or mode==PTRACE_MODE_READ */
+ rc = smk_tskacc(tsp, tracee_known, smk_ptrace_mode(mode), saip);
++
++ rcu_read_unlock();
+ return rc;
+ }
+
+@@ -395,7 +397,7 @@ static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode)
+ if (rc != 0)
+ return rc;
+
+- skp = smk_of_task(task_security(ctp));
++ skp = smk_of_task_struct(ctp);
+
+ rc = smk_ptrace_rule_check(current, skp, mode, __func__);
+ return rc;
+@@ -1825,7 +1827,7 @@ static int smk_curacc_on_task(struct task_struct *p, int access,
+ const char *caller)
+ {
+ struct smk_audit_info ad;
+- struct smack_known *skp = smk_of_task(task_security(p));
++ struct smack_known *skp = smk_of_task_struct(p);
+ int rc;
+
+ smk_ad_init(&ad, caller, LSM_AUDIT_DATA_TASK);
+@@ -1878,7 +1880,7 @@ static int smack_task_getsid(struct task_struct *p)
+ */
+ static void smack_task_getsecid(struct task_struct *p, u32 *secid)
+ {
+- struct smack_known *skp = smk_of_task(task_security(p));
++ struct smack_known *skp = smk_of_task_struct(p);
+
+ *secid = skp->smk_secid;
+ }
+@@ -1985,7 +1987,7 @@ static int smack_task_kill(struct task_struct *p, struct siginfo *info,
+ {
+ struct smk_audit_info ad;
+ struct smack_known *skp;
+- struct smack_known *tkp = smk_of_task(task_security(p));
++ struct smack_known *tkp = smk_of_task_struct(p);
+ int rc;
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
+@@ -2039,7 +2041,7 @@ static int smack_task_wait(struct task_struct *p)
+ static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
+ {
+ struct inode_smack *isp = inode->i_security;
+- struct smack_known *skp = smk_of_task(task_security(p));
++ struct smack_known *skp = smk_of_task_struct(p);
+
+ isp->smk_inode = skp;
+ }
+@@ -3199,7 +3201,7 @@ unlockandout:
+ */
+ static int smack_getprocattr(struct task_struct *p, char *name, char **value)
+ {
+- struct smack_known *skp = smk_of_task(task_security(p));
++ struct smack_known *skp = smk_of_task_struct(p);
+ char *cp;
+ int slen;
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index c879c3709eae..50762cf62b2d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4805,6 +4805,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
+ /* ALC282 */
++ SND_PCI_QUIRK(0x103c, 0x21f9, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2236, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 605d14003d25..6d36c5b78805 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -99,6 +99,7 @@ enum {
+ STAC_HP_ENVY_BASS,
+ STAC_HP_BNB13_EQ,
+ STAC_HP_ENVY_TS_BASS,
++ STAC_92HD83XXX_GPIO10_EAPD,
+ STAC_92HD83XXX_MODELS
+ };
+
+@@ -2141,6 +2142,19 @@ static void stac92hd83xxx_fixup_headset_jack(struct hda_codec *codec,
+ spec->headset_jack = 1;
+ }
+
++static void stac92hd83xxx_fixup_gpio10_eapd(struct hda_codec *codec,
++ const struct hda_fixup *fix,
++ int action)
++{
++ struct sigmatel_spec *spec = codec->spec;
++
++ if (action != HDA_FIXUP_ACT_PRE_PROBE)
++ return;
++ spec->eapd_mask = spec->gpio_mask = spec->gpio_dir =
++ spec->gpio_data = 0x10;
++ spec->eapd_switch = 0;
++}
++
+ static const struct hda_verb hp_bnb13_eq_verbs[] = {
+ /* 44.1KHz base */
+ { 0x22, 0x7A6, 0x3E },
+@@ -2656,6 +2670,10 @@ static const struct hda_fixup stac92hd83xxx_fixups[] = {
+ {}
+ },
+ },
++ [STAC_92HD83XXX_GPIO10_EAPD] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = stac92hd83xxx_fixup_gpio10_eapd,
++ },
+ };
+
+ static const struct hda_model_fixup stac92hd83xxx_models[] = {
+@@ -2861,6 +2879,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = {
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x148a,
+ "HP Mini", STAC_92HD83XXX_HP_LED),
+ SND_PCI_QUIRK_VENDOR(PCI_VENDOR_ID_HP, "HP", STAC_92HD83XXX_HP),
++ SND_PCI_QUIRK(PCI_VENDOR_ID_TOSHIBA, 0xfa91,
++ "Toshiba Satellite S50D", STAC_92HD83XXX_GPIO10_EAPD),
+ {} /* terminator */
+ };
+
+diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
+index 6abc2ac8fffb..e76857277695 100644
+--- a/sound/pci/riptide/riptide.c
++++ b/sound/pci/riptide/riptide.c
+@@ -2030,32 +2030,43 @@ snd_riptide_joystick_probe(struct pci_dev *pci, const struct pci_device_id *id)
+ {
+ static int dev;
+ struct gameport *gameport;
++ int ret;
+
+ if (dev >= SNDRV_CARDS)
+ return -ENODEV;
++
+ if (!enable[dev]) {
+- dev++;
+- return -ENOENT;
++ ret = -ENOENT;
++ goto inc_dev;
+ }
+
+- if (!joystick_port[dev++])
+- return 0;
++ if (!joystick_port[dev]) {
++ ret = 0;
++ goto inc_dev;
++ }
+
+ gameport = gameport_allocate_port();
+- if (!gameport)
+- return -ENOMEM;
++ if (!gameport) {
++ ret = -ENOMEM;
++ goto inc_dev;
++ }
+ if (!request_region(joystick_port[dev], 8, "Riptide gameport")) {
+ snd_printk(KERN_WARNING
+ "Riptide: cannot grab gameport 0x%x\n",
+ joystick_port[dev]);
+ gameport_free_port(gameport);
+- return -EBUSY;
++ ret = -EBUSY;
++ goto inc_dev;
+ }
+
+ gameport->io = joystick_port[dev];
+ gameport_register_port(gameport);
+ pci_set_drvdata(pci, gameport);
+- return 0;
++
++ ret = 0;
++inc_dev:
++ dev++;
++ return ret;
+ }
+
+ static void snd_riptide_joystick_remove(struct pci_dev *pci)
+diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
+index 52d86af3ef2d..fcf91ee0328d 100644
+--- a/sound/pci/rme9652/hdspm.c
++++ b/sound/pci/rme9652/hdspm.c
+@@ -6114,6 +6114,9 @@ static int snd_hdspm_playback_open(struct snd_pcm_substream *substream)
+ snd_pcm_hw_constraint_minmax(runtime,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ 64, 8192);
++ snd_pcm_hw_constraint_minmax(runtime,
++ SNDRV_PCM_HW_PARAM_PERIODS,
++ 2, 2);
+ break;
+ }
+
+@@ -6188,6 +6191,9 @@ static int snd_hdspm_capture_open(struct snd_pcm_substream *substream)
+ snd_pcm_hw_constraint_minmax(runtime,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ 64, 8192);
++ snd_pcm_hw_constraint_minmax(runtime,
++ SNDRV_PCM_HW_PARAM_PERIODS,
++ 2, 2);
+ break;
+ }
+
+diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
+index 9bd8b4f63303..7134f9ebf2f3 100644
+--- a/sound/soc/codecs/rt5670.c
++++ b/sound/soc/codecs/rt5670.c
+@@ -2439,6 +2439,7 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5670 = {
+ static const struct regmap_config rt5670_regmap = {
+ .reg_bits = 8,
+ .val_bits = 16,
++ .use_single_rw = true,
+ .max_register = RT5670_VENDOR_ID2 + 1 + (ARRAY_SIZE(rt5670_ranges) *
+ RT5670_PR_SPACING),
+ .volatile_reg = rt5670_volatile_register,
+diff --git a/sound/soc/davinci/Kconfig b/sound/soc/davinci/Kconfig
+index 8e948c63f3d9..2b81ca418d2a 100644
+--- a/sound/soc/davinci/Kconfig
++++ b/sound/soc/davinci/Kconfig
+@@ -58,13 +58,12 @@ choice
+ depends on MACH_DAVINCI_DM365_EVM
+
+ config SND_DM365_AIC3X_CODEC
+- bool "Audio Codec - AIC3101"
++ tristate "Audio Codec - AIC3101"
+ help
+ Say Y if you want to add support for AIC3101 audio codec
+
+ config SND_DM365_VOICE_CODEC
+ tristate "Voice Codec - CQ93VC"
+- depends on SND_DAVINCI_SOC
+ select MFD_DAVINCI_VOICECODEC
+ select SND_DAVINCI_SOC_VCIF
+ select SND_SOC_CQ0093VC
+diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
+index 595eee341e90..a08a877fc10c 100644
+--- a/sound/soc/pxa/mioa701_wm9713.c
++++ b/sound/soc/pxa/mioa701_wm9713.c
+@@ -81,7 +81,7 @@ static int rear_amp_power(struct snd_soc_codec *codec, int power)
+ static int rear_amp_event(struct snd_soc_dapm_widget *widget,
+ struct snd_kcontrol *kctl, int event)
+ {
+- struct snd_soc_codec *codec = widget->codec;
++ struct snd_soc_codec *codec = widget->dapm->card->rtd[0].codec;
+
+ return rear_amp_power(codec, SND_SOC_DAPM_EVENT_ON(event));
+ }
+diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c
+index 47b78b3f0325..6da965bdbc2c 100644
+--- a/tools/perf/util/cloexec.c
++++ b/tools/perf/util/cloexec.c
+@@ -25,6 +25,10 @@ static int perf_flag_probe(void)
+ if (cpu < 0)
+ cpu = 0;
+
++ /*
++ * Using -1 for the pid is a workaround to avoid gratuitous jump label
++ * changes.
++ */
+ while (1) {
+ /* check cloexec flag */
+ fd = sys_perf_event_open(&attr, pid, cpu, -1,
+@@ -47,16 +51,24 @@ static int perf_flag_probe(void)
+ err, strerror_r(err, sbuf, sizeof(sbuf)));
+
+ /* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */
+- fd = sys_perf_event_open(&attr, pid, cpu, -1, 0);
++ while (1) {
++ fd = sys_perf_event_open(&attr, pid, cpu, -1, 0);
++ if (fd < 0 && pid == -1 && errno == EACCES) {
++ pid = 0;
++ continue;
++ }
++ break;
++ }
+ err = errno;
+
++ if (fd >= 0)
++ close(fd);
++
+ if (WARN_ONCE(fd < 0 && err != EBUSY,
+ "perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n",
+ err, strerror_r(err, sbuf, sizeof(sbuf))))
+ return -1;
+
+- close(fd);
+-
+ return 0;
+ }
+
next reply other threads:[~2015-03-07 14:28 UTC|newest]
Thread overview: 67+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-03-07 14:28 Mike Pagano [this message]
-- strict thread matches above, loose matches on Subject: below --
2017-04-18 10:20 [gentoo-commits] proj/linux-patches:3.18 commit in: / Mike Pagano
2017-03-02 16:33 Mike Pagano
2017-03-02 16:33 Mike Pagano
2017-02-08 11:16 Mike Pagano
2017-01-18 21:46 Mike Pagano
2017-01-18 21:38 Mike Pagano
2016-12-09 0:21 Mike Pagano
2016-11-30 15:42 Mike Pagano
2016-11-25 22:57 Mike Pagano
2016-11-01 9:36 Alice Ferrazzi
2016-10-12 19:51 Mike Pagano
2016-09-18 12:43 Mike Pagano
2016-08-22 23:27 Mike Pagano
2016-08-10 12:54 Mike Pagano
2016-07-31 15:27 Mike Pagano
2016-07-15 14:46 Mike Pagano
2016-07-13 23:28 Mike Pagano
2016-07-01 20:50 Mike Pagano
2016-06-23 11:44 Mike Pagano
2016-06-08 11:20 Mike Pagano
2016-05-24 12:03 Mike Pagano
2016-05-12 0:10 Mike Pagano
2016-04-20 11:21 Mike Pagano
2016-04-06 11:21 Mike Pagano
2016-03-17 22:50 Mike Pagano
2016-03-05 21:08 Mike Pagano
2016-02-16 16:34 Mike Pagano
2016-01-31 15:36 Mike Pagano
2016-01-20 14:35 Mike Pagano
2015-12-17 17:12 Mike Pagano
2015-11-03 18:39 Mike Pagano
2015-10-30 18:38 Mike Pagano
2015-10-03 17:45 Mike Pagano
2015-09-10 0:15 Mike Pagano
2015-08-21 12:57 Mike Pagano
2015-07-30 12:43 Mike Pagano
2015-07-22 10:13 Mike Pagano
2015-07-10 23:44 Mike Pagano
2015-06-19 15:22 Mike Pagano
2015-05-22 0:48 Mike Pagano
2015-05-13 14:27 Mike Pagano
2015-04-29 17:31 Mike Pagano
2015-04-27 17:18 Mike Pagano
2015-04-05 0:05 Mike Pagano
2015-03-28 19:46 Mike Pagano
2015-03-24 23:19 Mike Pagano
2015-03-21 20:02 Mike Pagano
2015-02-27 13:26 Mike Pagano
2015-02-14 20:32 Mike Pagano
2015-02-13 1:35 Mike Pagano
2015-02-11 14:35 Mike Pagano
2015-02-07 1:07 Mike Pagano
2015-01-30 11:01 Mike Pagano
2015-01-28 23:56 Anthony G. Basile
2015-01-28 23:55 Anthony G. Basile
2015-01-28 22:18 Anthony G. Basile
2015-01-16 18:31 Mike Pagano
2015-01-16 0:28 Mike Pagano
2015-01-09 13:39 Mike Pagano
2015-01-05 14:39 Mike Pagano
2015-01-04 19:03 Mike Pagano
2015-01-02 19:06 Mike Pagano
2015-01-01 14:15 Mike Pagano
2014-12-16 19:44 Mike Pagano
2014-12-09 20:49 Mike Pagano
2014-11-26 0:36 Mike Pagano
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1425738482.1298219e93424dd2f652e663db1e893f55d34646.mpagano@gentoo \
--to=mpagano@gentoo.org \
--cc=gentoo-commits@lists.gentoo.org \
--cc=gentoo-dev@lists.gentoo.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox