public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Fri, 24 Jun 2016 20:40:18 +0000 (UTC)	[thread overview]
Message-ID: <1466800804.0a21ca48b5ca15cd0f5699ea94da60754a272c0c.mpagano@gentoo> (raw)

commit:     0a21ca48b5ca15cd0f5699ea94da60754a272c0c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jun 24 20:40:04 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jun 24 20:40:04 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0a21ca48

Linux patch 4.4.14

 0000_README             |    4 +
 1013_linux-4.4.14.patch | 5210 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5214 insertions(+)

diff --git a/0000_README b/0000_README
index 7a07f5e..9f33955 100644
--- a/0000_README
+++ b/0000_README
@@ -95,6 +95,10 @@ Patch:  1012_linux-4.4.13.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.4.13
 
+Patch:  1013_linux-4.4.14.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.4.14
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1013_linux-4.4.14.patch b/1013_linux-4.4.14.patch
new file mode 100644
index 0000000..075c39a
--- /dev/null
+++ b/1013_linux-4.4.14.patch
@@ -0,0 +1,5210 @@
+diff --git a/Makefile b/Makefile
+index f4b33cdf991a..fadbb9d73c6d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 13
++SUBLEVEL = 14
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
+index ef9119f7462e..4d9375814b53 100644
+--- a/arch/arm/kernel/ptrace.c
++++ b/arch/arm/kernel/ptrace.c
+@@ -733,8 +733,8 @@ static int vfp_set(struct task_struct *target,
+ 	if (ret)
+ 		return ret;
+ 
+-	vfp_flush_hwstate(thread);
+ 	thread->vfpstate.hard = new_vfp;
++	vfp_flush_hwstate(thread);
+ 
+ 	return 0;
+ }
+diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
+index faad6df49e5b..bc6492b9a924 100644
+--- a/arch/arm64/include/asm/elf.h
++++ b/arch/arm64/include/asm/elf.h
+@@ -156,14 +156,14 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ #define STACK_RND_MASK			(0x3ffff >> (PAGE_SHIFT - 12))
+ #endif
+ 
+-#ifdef CONFIG_COMPAT
+-
+ #ifdef __AARCH64EB__
+ #define COMPAT_ELF_PLATFORM		("v8b")
+ #else
+ #define COMPAT_ELF_PLATFORM		("v8l")
+ #endif
+ 
++#ifdef CONFIG_COMPAT
++
+ #define COMPAT_ELF_ET_DYN_BASE		(2 * TASK_SIZE_32 / 3)
+ 
+ /* AArch32 registers. */
+diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
+index a5f234039616..0166cfbc866c 100644
+--- a/arch/arm64/kernel/cpuinfo.c
++++ b/arch/arm64/kernel/cpuinfo.c
+@@ -22,6 +22,8 @@
+ 
+ #include <linux/bitops.h>
+ #include <linux/bug.h>
++#include <linux/compat.h>
++#include <linux/elf.h>
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+ #include <linux/personality.h>
+@@ -102,6 +104,7 @@ static const char *const compat_hwcap2_str[] = {
+ static int c_show(struct seq_file *m, void *v)
+ {
+ 	int i, j;
++	bool compat = personality(current->personality) == PER_LINUX32;
+ 
+ 	for_each_online_cpu(i) {
+ 		struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
+@@ -113,6 +116,9 @@ static int c_show(struct seq_file *m, void *v)
+ 		 * "processor".  Give glibc what it expects.
+ 		 */
+ 		seq_printf(m, "processor\t: %d\n", i);
++		if (compat)
++			seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
++				   MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
+ 
+ 		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+ 			   loops_per_jiffy / (500000UL/HZ),
+@@ -125,7 +131,7 @@ static int c_show(struct seq_file *m, void *v)
+ 		 * software which does already (at least for 32-bit).
+ 		 */
+ 		seq_puts(m, "Features\t:");
+-		if (personality(current->personality) == PER_LINUX32) {
++		if (compat) {
+ #ifdef CONFIG_COMPAT
+ 			for (j = 0; compat_hwcap_str[j]; j++)
+ 				if (compat_elf_hwcap & (1 << j))
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 40f5522245a2..4c1a118c1d09 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -109,7 +109,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
+ 	 * PTE_RDONLY is cleared by default in the asm below, so set it in
+ 	 * back if necessary (read-only or clean PTE).
+ 	 */
+-	if (!pte_write(entry) || !dirty)
++	if (!pte_write(entry) || !pte_sw_dirty(entry))
+ 		pte_val(entry) |= PTE_RDONLY;
+ 
+ 	/*
+diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
+index 3f832c3dd8f5..041153f5cf93 100644
+--- a/arch/mips/include/asm/processor.h
++++ b/arch/mips/include/asm/processor.h
+@@ -45,7 +45,7 @@ extern unsigned int vced_count, vcei_count;
+  * User space process size: 2GB. This is hardcoded into a few places,
+  * so don't change it unless you know what you are doing.
+  */
+-#define TASK_SIZE	0x7fff8000UL
++#define TASK_SIZE	0x80000000UL
+ #endif
+ 
+ #define STACK_TOP_MAX	TASK_SIZE
+diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
+index d7c0acb35ec2..8d49614d600d 100644
+--- a/arch/parisc/kernel/unaligned.c
++++ b/arch/parisc/kernel/unaligned.c
+@@ -666,7 +666,7 @@ void handle_unaligned(struct pt_regs *regs)
+ 		break;
+ 	}
+ 
+-	if (modify && R1(regs->iir))
++	if (ret == 0 && modify && R1(regs->iir))
+ 		regs->gr[R1(regs->iir)] = newbase;
+ 
+ 
+@@ -677,6 +677,14 @@ void handle_unaligned(struct pt_regs *regs)
+ 
+ 	if (ret)
+ 	{
++		/*
++		 * The unaligned handler failed.
++		 * If we were called by __get_user() or __put_user() jump
++		 * to it's exception fixup handler instead of crashing.
++		 */
++		if (!user_mode(regs) && fixup_exception(regs))
++			return;
++
+ 		printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
+ 		die_if_kernel("Unaligned data reference", regs, 28);
+ 
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index 2220f7a60def..070fa8552051 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -707,7 +707,7 @@
+ #define   MMCR0_FCWAIT	0x00000002UL /* freeze counter in WAIT state */
+ #define   MMCR0_FCHV	0x00000001UL /* freeze conditions in hypervisor mode */
+ #define SPRN_MMCR1	798
+-#define SPRN_MMCR2	769
++#define SPRN_MMCR2	785
+ #define SPRN_MMCRA	0x312
+ #define   MMCRA_SDSYNC	0x80000000UL /* SDAR synced with SIAR */
+ #define   MMCRA_SDAR_DCACHE_MISS 0x40000000UL
+@@ -744,13 +744,13 @@
+ #define SPRN_PMC6	792
+ #define SPRN_PMC7	793
+ #define SPRN_PMC8	794
+-#define SPRN_SIAR	780
+-#define SPRN_SDAR	781
+ #define SPRN_SIER	784
+ #define   SIER_SIPR		0x2000000	/* Sampled MSR_PR */
+ #define   SIER_SIHV		0x1000000	/* Sampled MSR_HV */
+ #define   SIER_SIAR_VALID	0x0400000	/* SIAR contents valid */
+ #define   SIER_SDAR_VALID	0x0200000	/* SDAR contents valid */
++#define SPRN_SIAR	796
++#define SPRN_SDAR	797
+ #define SPRN_TACR	888
+ #define SPRN_TCSCR	889
+ #define SPRN_CSIGR	890
+diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
+index 92dea8df6b26..e52b82b71d79 100644
+--- a/arch/powerpc/kernel/prom_init.c
++++ b/arch/powerpc/kernel/prom_init.c
+@@ -655,6 +655,7 @@ unsigned char ibm_architecture_vec[] = {
+ 	W(0xffff0000), W(0x003e0000),	/* POWER6 */
+ 	W(0xffff0000), W(0x003f0000),	/* POWER7 */
+ 	W(0xffff0000), W(0x004b0000),	/* POWER8E */
++	W(0xffff0000), W(0x004c0000),   /* POWER8NVL */
+ 	W(0xffff0000), W(0x004d0000),	/* POWER8 */
+ 	W(0xffffffff), W(0x0f000004),	/* all 2.07-compliant */
+ 	W(0xffffffff), W(0x0f000003),	/* all 2.06-compliant */
+diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
+index ac3ffd97e059..405baaf96864 100644
+--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
++++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
+@@ -615,29 +615,50 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
+ {
+ 	int config_addr;
+ 	int ret;
++	/* Waiting 0.2s maximum before skipping configuration */
++	int max_wait = 200;
+ 
+ 	/* Figure out the PE address */
+ 	config_addr = pe->config_addr;
+ 	if (pe->addr)
+ 		config_addr = pe->addr;
+ 
+-	/* Use new configure-pe function, if supported */
+-	if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
+-		ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
+-				config_addr, BUID_HI(pe->phb->buid),
+-				BUID_LO(pe->phb->buid));
+-	} else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
+-		ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
+-				config_addr, BUID_HI(pe->phb->buid),
+-				BUID_LO(pe->phb->buid));
+-	} else {
+-		return -EFAULT;
+-	}
++	while (max_wait > 0) {
++		/* Use new configure-pe function, if supported */
++		if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
++			ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
++					config_addr, BUID_HI(pe->phb->buid),
++					BUID_LO(pe->phb->buid));
++		} else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
++			ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
++					config_addr, BUID_HI(pe->phb->buid),
++					BUID_LO(pe->phb->buid));
++		} else {
++			return -EFAULT;
++		}
+ 
+-	if (ret)
+-		pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
+-			__func__, pe->phb->global_number, pe->addr, ret);
++		if (!ret)
++			return ret;
++
++		/*
++		 * If RTAS returns a delay value that's above 100ms, cut it
++		 * down to 100ms in case firmware made a mistake.  For more
++		 * on how these delay values work see rtas_busy_delay_time
++		 */
++		if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
++		    ret <= RTAS_EXTENDED_DELAY_MAX)
++			ret = RTAS_EXTENDED_DELAY_MIN+2;
++
++		max_wait -= rtas_busy_delay_time(ret);
++
++		if (max_wait < 0)
++			break;
++
++		rtas_busy_delay(ret);
++	}
+ 
++	pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
++		__func__, pe->phb->global_number, pe->addr, ret);
+ 	return ret;
+ }
+ 
+diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h
+index f010c93a88b1..fda605dbc1b4 100644
+--- a/arch/s390/net/bpf_jit.h
++++ b/arch/s390/net/bpf_jit.h
+@@ -37,7 +37,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
+  *	      |		      |     |
+  *	      +---------------+     |
+  *	      | 8 byte skbp   |     |
+- * R15+170 -> +---------------+     |
++ * R15+176 -> +---------------+     |
+  *	      | 8 byte hlen   |     |
+  * R15+168 -> +---------------+     |
+  *	      | 4 byte align  |     |
+@@ -58,7 +58,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
+ #define STK_OFF		(STK_SPACE - STK_160_UNUSED)
+ #define STK_OFF_TMP	160	/* Offset of tmp buffer on stack */
+ #define STK_OFF_HLEN	168	/* Offset of SKB header length on stack */
+-#define STK_OFF_SKBP	170	/* Offset of SKB pointer on stack */
++#define STK_OFF_SKBP	176	/* Offset of SKB pointer on stack */
+ 
+ #define STK_OFF_R6	(160 - 11 * 8)	/* Offset of r6 on stack */
+ #define STK_OFF_TCCNT	(160 - 12 * 8)	/* Offset of tail_call_cnt on stack */
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index 9a0c4c22e536..0e2919dd8df3 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -45,7 +45,7 @@ struct bpf_jit {
+ 	int labels[1];		/* Labels for local jumps */
+ };
+ 
+-#define BPF_SIZE_MAX	0x7ffff	/* Max size for program (20 bit signed displ) */
++#define BPF_SIZE_MAX	0xffff	/* Max size for program (16 bit branches) */
+ 
+ #define SEEN_SKB	1	/* skb access */
+ #define SEEN_MEM	2	/* use mem[] for temporary storage */
+@@ -446,7 +446,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit, bool is_classic)
+ 		emit_load_skb_data_hlen(jit);
+ 	if (jit->seen & SEEN_SKB_CHANGE)
+ 		/* stg %b1,ST_OFF_SKBP(%r0,%r15) */
+-		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15,
++		EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
+ 			      STK_OFF_SKBP);
+ 	/* Clear A (%b0) and X (%b7) registers for converted BPF programs */
+ 	if (is_classic) {
+diff --git a/arch/sparc/include/asm/head_64.h b/arch/sparc/include/asm/head_64.h
+index 10e9dabc4c41..f0700cfeedd7 100644
+--- a/arch/sparc/include/asm/head_64.h
++++ b/arch/sparc/include/asm/head_64.h
+@@ -15,6 +15,10 @@
+ 
+ #define	PTREGS_OFF	(STACK_BIAS + STACKFRAME_SZ)
+ 
++#define	RTRAP_PSTATE		(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
++#define	RTRAP_PSTATE_IRQOFF	(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
++#define RTRAP_PSTATE_AG_IRQOFF	(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
++
+ #define __CHEETAH_ID	0x003e0014
+ #define __JALAPENO_ID	0x003e0016
+ #define __SERRANO_ID	0x003e0022
+diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
+index 131d36fcd07a..408b715c95a5 100644
+--- a/arch/sparc/include/asm/pgtable_64.h
++++ b/arch/sparc/include/asm/pgtable_64.h
+@@ -375,7 +375,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
+ #define pgprot_noncached pgprot_noncached
+ 
+ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+-static inline pte_t pte_mkhuge(pte_t pte)
++static inline unsigned long __pte_huge_mask(void)
+ {
+ 	unsigned long mask;
+ 
+@@ -390,8 +390,19 @@ static inline pte_t pte_mkhuge(pte_t pte)
+ 	: "=r" (mask)
+ 	: "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
+ 
+-	return __pte(pte_val(pte) | mask);
++	return mask;
++}
++
++static inline pte_t pte_mkhuge(pte_t pte)
++{
++	return __pte(pte_val(pte) | __pte_huge_mask());
++}
++
++static inline bool is_hugetlb_pte(pte_t pte)
++{
++	return !!(pte_val(pte) & __pte_huge_mask());
+ }
++
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ static inline pmd_t pmd_mkhuge(pmd_t pmd)
+ {
+@@ -403,6 +414,11 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
+ 	return __pmd(pte_val(pte));
+ }
+ #endif
++#else
++static inline bool is_hugetlb_pte(pte_t pte)
++{
++	return false;
++}
+ #endif
+ 
+ static inline pte_t pte_mkdirty(pte_t pte)
+@@ -865,6 +881,19 @@ static inline unsigned long pud_pfn(pud_t pud)
+ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
+ 		   pte_t *ptep, pte_t orig, int fullmm);
+ 
++static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
++				pte_t *ptep, pte_t orig, int fullmm)
++{
++	/* It is more efficient to let flush_tlb_kernel_range()
++	 * handle init_mm tlb flushes.
++	 *
++	 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
++	 *             and SUN4V pte layout, so this inline test is fine.
++	 */
++	if (likely(mm != &init_mm) && pte_accessible(mm, orig))
++		tlb_batch_add(mm, vaddr, ptep, orig, fullmm);
++}
++
+ #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+ 					    unsigned long addr,
+@@ -881,15 +910,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
+ 	pte_t orig = *ptep;
+ 
+ 	*ptep = pte;
+-
+-	/* It is more efficient to let flush_tlb_kernel_range()
+-	 * handle init_mm tlb flushes.
+-	 *
+-	 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
+-	 *             and SUN4V pte layout, so this inline test is fine.
+-	 */
+-	if (likely(mm != &init_mm) && pte_accessible(mm, orig))
+-		tlb_batch_add(mm, addr, ptep, orig, fullmm);
++	maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm);
+ }
+ 
+ #define set_pte_at(mm,addr,ptep,pte)	\
+diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
+index dea1cfa2122b..a8e192e90700 100644
+--- a/arch/sparc/include/asm/tlbflush_64.h
++++ b/arch/sparc/include/asm/tlbflush_64.h
+@@ -8,6 +8,7 @@
+ #define TLB_BATCH_NR	192
+ 
+ struct tlb_batch {
++	bool huge;
+ 	struct mm_struct *mm;
+ 	unsigned long tlb_nr;
+ 	unsigned long active;
+@@ -16,7 +17,7 @@ struct tlb_batch {
+ 
+ void flush_tsb_kernel_range(unsigned long start, unsigned long end);
+ void flush_tsb_user(struct tlb_batch *tb);
+-void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
++void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge);
+ 
+ /* TLB flush operations. */
+ 
+diff --git a/arch/sparc/include/asm/ttable.h b/arch/sparc/include/asm/ttable.h
+index 71b5a67522ab..781b9f1dbdc2 100644
+--- a/arch/sparc/include/asm/ttable.h
++++ b/arch/sparc/include/asm/ttable.h
+@@ -589,8 +589,8 @@ user_rtt_fill_64bit:					\
+ 	 restored;					\
+ 	nop; nop; nop; nop; nop; nop;			\
+ 	nop; nop; nop; nop; nop;			\
+-	ba,a,pt	%xcc, user_rtt_fill_fixup;		\
+-	ba,a,pt	%xcc, user_rtt_fill_fixup;		\
++	ba,a,pt	%xcc, user_rtt_fill_fixup_dax;		\
++	ba,a,pt	%xcc, user_rtt_fill_fixup_mna;		\
+ 	ba,a,pt	%xcc, user_rtt_fill_fixup;
+ 
+ 
+@@ -652,8 +652,8 @@ user_rtt_fill_32bit:					\
+ 	 restored;					\
+ 	nop; nop; nop; nop; nop;			\
+ 	nop; nop; nop;					\
+-	ba,a,pt	%xcc, user_rtt_fill_fixup;		\
+-	ba,a,pt	%xcc, user_rtt_fill_fixup;		\
++	ba,a,pt	%xcc, user_rtt_fill_fixup_dax;		\
++	ba,a,pt	%xcc, user_rtt_fill_fixup_mna;		\
+ 	ba,a,pt	%xcc, user_rtt_fill_fixup;
+ 
+ 
+diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
+index 7cf9c6ea3f1f..fdb13327fded 100644
+--- a/arch/sparc/kernel/Makefile
++++ b/arch/sparc/kernel/Makefile
+@@ -21,6 +21,7 @@ CFLAGS_REMOVE_perf_event.o := -pg
+ CFLAGS_REMOVE_pcr.o := -pg
+ endif
+ 
++obj-$(CONFIG_SPARC64)   += urtt_fill.o
+ obj-$(CONFIG_SPARC32)   += entry.o wof.o wuf.o
+ obj-$(CONFIG_SPARC32)   += etrap_32.o
+ obj-$(CONFIG_SPARC32)   += rtrap_32.o
+diff --git a/arch/sparc/kernel/cherrs.S b/arch/sparc/kernel/cherrs.S
+index 4ee1ad420862..655628def68e 100644
+--- a/arch/sparc/kernel/cherrs.S
++++ b/arch/sparc/kernel/cherrs.S
+@@ -214,8 +214,7 @@ do_dcpe_tl1_nonfatal:	/* Ok we may use interrupt globals safely. */
+ 	subcc		%g1, %g2, %g1		! Next cacheline
+ 	bge,pt		%icc, 1b
+ 	 nop
+-	ba,pt		%xcc, dcpe_icpe_tl1_common
+-	 nop
++	ba,a,pt		%xcc, dcpe_icpe_tl1_common
+ 
+ do_dcpe_tl1_fatal:
+ 	sethi		%hi(1f), %g7
+@@ -224,8 +223,7 @@ do_dcpe_tl1_fatal:
+ 	mov		0x2, %o0
+ 	call		cheetah_plus_parity_error
+ 	 add		%sp, PTREGS_OFF, %o1
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		do_dcpe_tl1,.-do_dcpe_tl1
+ 
+ 	.globl		do_icpe_tl1
+@@ -259,8 +257,7 @@ do_icpe_tl1_nonfatal:	/* Ok we may use interrupt globals safely. */
+ 	subcc		%g1, %g2, %g1
+ 	bge,pt		%icc, 1b
+ 	 nop
+-	ba,pt		%xcc, dcpe_icpe_tl1_common
+-	 nop
++	ba,a,pt		%xcc, dcpe_icpe_tl1_common
+ 
+ do_icpe_tl1_fatal:
+ 	sethi		%hi(1f), %g7
+@@ -269,8 +266,7 @@ do_icpe_tl1_fatal:
+ 	mov		0x3, %o0
+ 	call		cheetah_plus_parity_error
+ 	 add		%sp, PTREGS_OFF, %o1
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		do_icpe_tl1,.-do_icpe_tl1
+ 	
+ 	.type		dcpe_icpe_tl1_common,#function
+@@ -456,7 +452,7 @@ __cheetah_log_error:
+ 	 cmp		%g2, 0x63
+ 	be		c_cee
+ 	 nop
+-	ba,pt		%xcc, c_deferred
++	ba,a,pt		%xcc, c_deferred
+ 	.size		__cheetah_log_error,.-__cheetah_log_error
+ 
+ 	/* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
+diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
+index 33c02b15f478..a83707c83be8 100644
+--- a/arch/sparc/kernel/entry.S
++++ b/arch/sparc/kernel/entry.S
+@@ -948,7 +948,24 @@ linux_syscall_trace:
+ 	cmp	%o0, 0
+ 	bne	3f
+ 	 mov	-ENOSYS, %o0
++
++	/* Syscall tracing can modify the registers.  */
++	ld	[%sp + STACKFRAME_SZ + PT_G1], %g1
++	sethi	%hi(sys_call_table), %l7
++	ld	[%sp + STACKFRAME_SZ + PT_I0], %i0
++	or	%l7, %lo(sys_call_table), %l7
++	ld	[%sp + STACKFRAME_SZ + PT_I1], %i1
++	ld	[%sp + STACKFRAME_SZ + PT_I2], %i2
++	ld	[%sp + STACKFRAME_SZ + PT_I3], %i3
++	ld	[%sp + STACKFRAME_SZ + PT_I4], %i4
++	ld	[%sp + STACKFRAME_SZ + PT_I5], %i5
++	cmp	%g1, NR_syscalls
++	bgeu	3f
++	 mov	-ENOSYS, %o0
++
++	sll	%g1, 2, %l4
+ 	mov	%i0, %o0
++	ld	[%l7 + %l4], %l7
+ 	mov	%i1, %o1
+ 	mov	%i2, %o2
+ 	mov	%i3, %o3
+diff --git a/arch/sparc/kernel/fpu_traps.S b/arch/sparc/kernel/fpu_traps.S
+index a6864826a4bd..336d2750fe78 100644
+--- a/arch/sparc/kernel/fpu_traps.S
++++ b/arch/sparc/kernel/fpu_traps.S
+@@ -100,8 +100,8 @@ do_fpdis:
+ 	fmuld		%f0, %f2, %f26
+ 	faddd		%f0, %f2, %f28
+ 	fmuld		%f0, %f2, %f30
+-	b,pt		%xcc, fpdis_exit
+-	 nop
++	ba,a,pt		%xcc, fpdis_exit
++
+ 2:	andcc		%g5, FPRS_DU, %g0
+ 	bne,pt		%icc, 3f
+ 	 fzero		%f32
+@@ -144,8 +144,8 @@ do_fpdis:
+ 	fmuld		%f32, %f34, %f58
+ 	faddd		%f32, %f34, %f60
+ 	fmuld		%f32, %f34, %f62
+-	ba,pt		%xcc, fpdis_exit
+-	 nop
++	ba,a,pt		%xcc, fpdis_exit
++
+ 3:	mov		SECONDARY_CONTEXT, %g3
+ 	add		%g6, TI_FPREGS, %g1
+ 
+@@ -197,8 +197,7 @@ fpdis_exit2:
+ fp_other_bounce:
+ 	call		do_fpother
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		fp_other_bounce,.-fp_other_bounce
+ 
+ 	.align		32
+diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
+index f2d30cab5b3f..51faf92ace00 100644
+--- a/arch/sparc/kernel/head_64.S
++++ b/arch/sparc/kernel/head_64.S
+@@ -461,9 +461,8 @@ sun4v_chip_type:
+ 	subcc	%g3, 1, %g3
+ 	bne,pt	%xcc, 41b
+ 	add	%g1, 1, %g1
+-	mov	SUN4V_CHIP_SPARC64X, %g4
+ 	ba,pt	%xcc, 5f
+-	nop
++	 mov	SUN4V_CHIP_SPARC64X, %g4
+ 
+ 49:
+ 	mov	SUN4V_CHIP_UNKNOWN, %g4
+@@ -548,8 +547,7 @@ sun4u_init:
+ 	stxa		%g0, [%g7] ASI_DMMU
+ 	membar	#Sync
+ 
+-	ba,pt		%xcc, sun4u_continue
+-	 nop
++	ba,a,pt		%xcc, sun4u_continue
+ 
+ sun4v_init:
+ 	/* Set ctx 0 */
+@@ -560,14 +558,12 @@ sun4v_init:
+ 	mov		SECONDARY_CONTEXT, %g7
+ 	stxa		%g0, [%g7] ASI_MMU
+ 	membar		#Sync
+-	ba,pt		%xcc, niagara_tlb_fixup
+-	 nop
++	ba,a,pt		%xcc, niagara_tlb_fixup
+ 
+ sun4u_continue:
+ 	BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup)
+ 
+-	ba,pt	%xcc, spitfire_tlb_fixup
+-	 nop
++	ba,a,pt	%xcc, spitfire_tlb_fixup
+ 
+ niagara_tlb_fixup:
+ 	mov	3, %g2		/* Set TLB type to hypervisor. */
+@@ -639,8 +635,7 @@ niagara_patch:
+ 	call	hypervisor_patch_cachetlbops
+ 	 nop
+ 
+-	ba,pt	%xcc, tlb_fixup_done
+-	 nop
++	ba,a,pt	%xcc, tlb_fixup_done
+ 
+ cheetah_tlb_fixup:
+ 	mov	2, %g2		/* Set TLB type to cheetah+. */
+@@ -659,8 +654,7 @@ cheetah_tlb_fixup:
+ 	call	cheetah_patch_cachetlbops
+ 	 nop
+ 
+-	ba,pt	%xcc, tlb_fixup_done
+-	 nop
++	ba,a,pt	%xcc, tlb_fixup_done
+ 
+ spitfire_tlb_fixup:
+ 	/* Set TLB type to spitfire. */
+@@ -782,8 +776,7 @@ setup_trap_table:
+ 	call	%o1
+ 	 add	%sp, (2047 + 128), %o0
+ 
+-	ba,pt	%xcc, 2f
+-	 nop
++	ba,a,pt	%xcc, 2f
+ 
+ 1:	sethi	%hi(sparc64_ttable_tl0), %o0
+ 	set	prom_set_trap_table_name, %g2
+@@ -822,8 +815,7 @@ setup_trap_table:
+ 
+ 	BRANCH_IF_ANY_CHEETAH(o2, o3, 1f)
+ 
+-	ba,pt	%xcc, 2f
+-	 nop
++	ba,a,pt	%xcc, 2f
+ 
+ 	/* Disable STICK_INT interrupts. */
+ 1:
+diff --git a/arch/sparc/kernel/misctrap.S b/arch/sparc/kernel/misctrap.S
+index 753b4f031bfb..34b4933900bf 100644
+--- a/arch/sparc/kernel/misctrap.S
++++ b/arch/sparc/kernel/misctrap.S
+@@ -18,8 +18,7 @@ __do_privact:
+ 109:	or		%g7, %lo(109b), %g7
+ 	call		do_privact
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		__do_privact,.-__do_privact
+ 
+ 	.type		do_mna,#function
+@@ -46,8 +45,7 @@ do_mna:
+ 	mov		%l5, %o2
+ 	call		mem_address_unaligned
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		do_mna,.-do_mna
+ 
+ 	.type		do_lddfmna,#function
+@@ -65,8 +63,7 @@ do_lddfmna:
+ 	mov		%l5, %o2
+ 	call		handle_lddfmna
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		do_lddfmna,.-do_lddfmna
+ 
+ 	.type		do_stdfmna,#function
+@@ -84,8 +81,7 @@ do_stdfmna:
+ 	mov		%l5, %o2
+ 	call		handle_stdfmna
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		do_stdfmna,.-do_stdfmna
+ 
+ 	.type		breakpoint_trap,#function
+diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
+index badf0951d73c..9f9614df9e1e 100644
+--- a/arch/sparc/kernel/pci.c
++++ b/arch/sparc/kernel/pci.c
+@@ -994,6 +994,23 @@ void pcibios_set_master(struct pci_dev *dev)
+ 	/* No special bus mastering setup handling */
+ }
+ 
++#ifdef CONFIG_PCI_IOV
++int pcibios_add_device(struct pci_dev *dev)
++{
++	struct pci_dev *pdev;
++
++	/* Add sriov arch specific initialization here.
++	 * Copy dev_archdata from PF to VF
++	 */
++	if (dev->is_virtfn) {
++		pdev = dev->physfn;
++		memcpy(&dev->dev.archdata, &pdev->dev.archdata,
++		       sizeof(struct dev_archdata));
++	}
++	return 0;
++}
++#endif /* CONFIG_PCI_IOV */
++
+ static int __init pcibios_init(void)
+ {
+ 	pci_dfl_cache_line_size = 64 >> 2;
+diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
+index d08bdaffdbfc..216948ca4382 100644
+--- a/arch/sparc/kernel/rtrap_64.S
++++ b/arch/sparc/kernel/rtrap_64.S
+@@ -14,10 +14,6 @@
+ #include <asm/visasm.h>
+ #include <asm/processor.h>
+ 
+-#define		RTRAP_PSTATE		(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
+-#define		RTRAP_PSTATE_IRQOFF	(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
+-#define		RTRAP_PSTATE_AG_IRQOFF	(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
+-
+ #ifdef CONFIG_CONTEXT_TRACKING
+ # define SCHEDULE_USER schedule_user
+ #else
+@@ -242,52 +238,17 @@ rt_continue:	ldx			[%sp + PTREGS_OFF + PT_V9_G1], %g1
+ 		 wrpr			%g1, %cwp
+ 		ba,a,pt			%xcc, user_rtt_fill_64bit
+ 
+-user_rtt_fill_fixup:
+-		rdpr	%cwp, %g1
+-		add	%g1, 1, %g1
+-		wrpr	%g1, 0x0, %cwp
+-
+-		rdpr	%wstate, %g2
+-		sll	%g2, 3, %g2
+-		wrpr	%g2, 0x0, %wstate
+-
+-		/* We know %canrestore and %otherwin are both zero.  */
+-
+-		sethi	%hi(sparc64_kern_pri_context), %g2
+-		ldx	[%g2 + %lo(sparc64_kern_pri_context)], %g2
+-		mov	PRIMARY_CONTEXT, %g1
+-
+-661:		stxa	%g2, [%g1] ASI_DMMU
+-		.section .sun4v_1insn_patch, "ax"
+-		.word	661b
+-		stxa	%g2, [%g1] ASI_MMU
+-		.previous
+-
+-		sethi	%hi(KERNBASE), %g1
+-		flush	%g1
++user_rtt_fill_fixup_dax:
++		ba,pt	%xcc, user_rtt_fill_fixup_common
++		 mov	1, %g3
+ 
+-		or	%g4, FAULT_CODE_WINFIXUP, %g4
+-		stb	%g4, [%g6 + TI_FAULT_CODE]
+-		stx	%g5, [%g6 + TI_FAULT_ADDR]
++user_rtt_fill_fixup_mna:
++		ba,pt	%xcc, user_rtt_fill_fixup_common
++		 mov	2, %g3
+ 
+-		mov	%g6, %l1
+-		wrpr	%g0, 0x0, %tl
+-
+-661:		nop
+-		.section		.sun4v_1insn_patch, "ax"
+-		.word			661b
+-		SET_GL(0)
+-		.previous
+-
+-		wrpr	%g0, RTRAP_PSTATE, %pstate
+-
+-		mov	%l1, %g6
+-		ldx	[%g6 + TI_TASK], %g4
+-		LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
+-		call	do_sparc64_fault
+-		 add	%sp, PTREGS_OFF, %o0
+-		ba,pt	%xcc, rtrap
+-		 nop
++user_rtt_fill_fixup:
++		ba,pt	%xcc, user_rtt_fill_fixup_common
++		 clr	%g3
+ 
+ user_rtt_pre_restore:
+ 		add			%g1, 1, %g1
+diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
+index 4eed773a7735..77655f0f0fc7 100644
+--- a/arch/sparc/kernel/signal32.c
++++ b/arch/sparc/kernel/signal32.c
+@@ -138,12 +138,24 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
+ 	return 0;
+ }
+ 
++/* Checks if the fp is valid.  We always build signal frames which are
++ * 16-byte aligned, therefore we can always enforce that the restore
++ * frame has that property as well.
++ */
++static bool invalid_frame_pointer(void __user *fp, int fplen)
++{
++	if ((((unsigned long) fp) & 15) ||
++	    ((unsigned long)fp) > 0x100000000ULL - fplen)
++		return true;
++	return false;
++}
++
+ void do_sigreturn32(struct pt_regs *regs)
+ {
+ 	struct signal_frame32 __user *sf;
+ 	compat_uptr_t fpu_save;
+ 	compat_uptr_t rwin_save;
+-	unsigned int psr;
++	unsigned int psr, ufp;
+ 	unsigned pc, npc;
+ 	sigset_t set;
+ 	compat_sigset_t seta;
+@@ -158,11 +170,16 @@ void do_sigreturn32(struct pt_regs *regs)
+ 	sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP];
+ 
+ 	/* 1. Make sure we are not getting garbage from the user */
+-	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
+-	    (((unsigned long) sf) & 3))
++	if (invalid_frame_pointer(sf, sizeof(*sf)))
++		goto segv;
++
++	if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
++		goto segv;
++
++	if (ufp & 0x7)
+ 		goto segv;
+ 
+-	if (get_user(pc, &sf->info.si_regs.pc) ||
++	if (__get_user(pc, &sf->info.si_regs.pc) ||
+ 	    __get_user(npc, &sf->info.si_regs.npc))
+ 		goto segv;
+ 
+@@ -227,7 +244,7 @@ segv:
+ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
+ {
+ 	struct rt_signal_frame32 __user *sf;
+-	unsigned int psr, pc, npc;
++	unsigned int psr, pc, npc, ufp;
+ 	compat_uptr_t fpu_save;
+ 	compat_uptr_t rwin_save;
+ 	sigset_t set;
+@@ -242,11 +259,16 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
+ 	sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
+ 
+ 	/* 1. Make sure we are not getting garbage from the user */
+-	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
+-	    (((unsigned long) sf) & 3))
++	if (invalid_frame_pointer(sf, sizeof(*sf)))
+ 		goto segv;
+ 
+-	if (get_user(pc, &sf->regs.pc) || 
++	if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
++		goto segv;
++
++	if (ufp & 0x7)
++		goto segv;
++
++	if (__get_user(pc, &sf->regs.pc) ||
+ 	    __get_user(npc, &sf->regs.npc))
+ 		goto segv;
+ 
+@@ -307,14 +329,6 @@ segv:
+ 	force_sig(SIGSEGV, current);
+ }
+ 
+-/* Checks if the fp is valid */
+-static int invalid_frame_pointer(void __user *fp, int fplen)
+-{
+-	if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
+-		return 1;
+-	return 0;
+-}
+-
+ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
+ {
+ 	unsigned long sp;
+diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
+index 52aa5e4ce5e7..c3c12efe0bc0 100644
+--- a/arch/sparc/kernel/signal_32.c
++++ b/arch/sparc/kernel/signal_32.c
+@@ -60,10 +60,22 @@ struct rt_signal_frame {
+ #define SF_ALIGNEDSZ  (((sizeof(struct signal_frame) + 7) & (~7)))
+ #define RT_ALIGNEDSZ  (((sizeof(struct rt_signal_frame) + 7) & (~7)))
+ 
++/* Checks if the fp is valid.  We always build signal frames which are
++ * 16-byte aligned, therefore we can always enforce that the restore
++ * frame has that property as well.
++ */
++static inline bool invalid_frame_pointer(void __user *fp, int fplen)
++{
++	if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen))
++		return true;
++
++	return false;
++}
++
+ asmlinkage void do_sigreturn(struct pt_regs *regs)
+ {
++	unsigned long up_psr, pc, npc, ufp;
+ 	struct signal_frame __user *sf;
+-	unsigned long up_psr, pc, npc;
+ 	sigset_t set;
+ 	__siginfo_fpu_t __user *fpu_save;
+ 	__siginfo_rwin_t __user *rwin_save;
+@@ -77,10 +89,13 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
+ 	sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
+ 
+ 	/* 1. Make sure we are not getting garbage from the user */
+-	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
++	if (!invalid_frame_pointer(sf, sizeof(*sf)))
++		goto segv_and_exit;
++
++	if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
+ 		goto segv_and_exit;
+ 
+-	if (((unsigned long) sf) & 3)
++	if (ufp & 0x7)
+ 		goto segv_and_exit;
+ 
+ 	err = __get_user(pc,  &sf->info.si_regs.pc);
+@@ -127,7 +142,7 @@ segv_and_exit:
+ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
+ {
+ 	struct rt_signal_frame __user *sf;
+-	unsigned int psr, pc, npc;
++	unsigned int psr, pc, npc, ufp;
+ 	__siginfo_fpu_t __user *fpu_save;
+ 	__siginfo_rwin_t __user *rwin_save;
+ 	sigset_t set;
+@@ -135,8 +150,13 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
+ 
+ 	synchronize_user_stack();
+ 	sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
+-	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
+-	    (((unsigned long) sf) & 0x03))
++	if (!invalid_frame_pointer(sf, sizeof(*sf)))
++		goto segv;
++
++	if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
++		goto segv;
++
++	if (ufp & 0x7)
+ 		goto segv;
+ 
+ 	err = __get_user(pc, &sf->regs.pc);
+@@ -178,15 +198,6 @@ segv:
+ 	force_sig(SIGSEGV, current);
+ }
+ 
+-/* Checks if the fp is valid */
+-static inline int invalid_frame_pointer(void __user *fp, int fplen)
+-{
+-	if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen))
+-		return 1;
+-
+-	return 0;
+-}
+-
+ static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
+ {
+ 	unsigned long sp = regs->u_regs[UREG_FP];
+diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
+index d88beff47bab..5ee930c48f4c 100644
+--- a/arch/sparc/kernel/signal_64.c
++++ b/arch/sparc/kernel/signal_64.c
+@@ -52,7 +52,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs)
+ 	unsigned char fenab;
+ 	int err;
+ 
+-	flush_user_windows();
++	synchronize_user_stack();
+ 	if (get_thread_wsaved()					||
+ 	    (((unsigned long)ucp) & (sizeof(unsigned long)-1))	||
+ 	    (!__access_ok(ucp, sizeof(*ucp))))
+@@ -234,6 +234,17 @@ do_sigsegv:
+ 	goto out;
+ }
+ 
++/* Checks if the fp is valid.  We always build rt signal frames which
++ * are 16-byte aligned, therefore we can always enforce that the
++ * restore frame has that property as well.
++ */
++static bool invalid_frame_pointer(void __user *fp)
++{
++	if (((unsigned long) fp) & 15)
++		return true;
++	return false;
++}
++
+ struct rt_signal_frame {
+ 	struct sparc_stackf	ss;
+ 	siginfo_t		info;
+@@ -246,8 +257,8 @@ struct rt_signal_frame {
+ 
+ void do_rt_sigreturn(struct pt_regs *regs)
+ {
++	unsigned long tpc, tnpc, tstate, ufp;
+ 	struct rt_signal_frame __user *sf;
+-	unsigned long tpc, tnpc, tstate;
+ 	__siginfo_fpu_t __user *fpu_save;
+ 	__siginfo_rwin_t __user *rwin_save;
+ 	sigset_t set;
+@@ -261,10 +272,16 @@ void do_rt_sigreturn(struct pt_regs *regs)
+ 		(regs->u_regs [UREG_FP] + STACK_BIAS);
+ 
+ 	/* 1. Make sure we are not getting garbage from the user */
+-	if (((unsigned long) sf) & 3)
++	if (invalid_frame_pointer(sf))
++		goto segv;
++
++	if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
+ 		goto segv;
+ 
+-	err = get_user(tpc, &sf->regs.tpc);
++	if ((ufp + STACK_BIAS) & 0x7)
++		goto segv;
++
++	err = __get_user(tpc, &sf->regs.tpc);
+ 	err |= __get_user(tnpc, &sf->regs.tnpc);
+ 	if (test_thread_flag(TIF_32BIT)) {
+ 		tpc &= 0xffffffff;
+@@ -308,14 +325,6 @@ segv:
+ 	force_sig(SIGSEGV, current);
+ }
+ 
+-/* Checks if the fp is valid */
+-static int invalid_frame_pointer(void __user *fp)
+-{
+-	if (((unsigned long) fp) & 15)
+-		return 1;
+-	return 0;
+-}
+-
+ static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
+ {
+ 	unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS;
+diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c
+index 0f6eebe71e6c..e5fe8cef9a69 100644
+--- a/arch/sparc/kernel/sigutil_32.c
++++ b/arch/sparc/kernel/sigutil_32.c
+@@ -48,6 +48,10 @@ int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
+ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
+ {
+ 	int err;
++
++	if (((unsigned long) fpu) & 3)
++		return -EFAULT;
++
+ #ifdef CONFIG_SMP
+ 	if (test_tsk_thread_flag(current, TIF_USEDFPU))
+ 		regs->psr &= ~PSR_EF;
+@@ -97,7 +101,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
+ 	struct thread_info *t = current_thread_info();
+ 	int i, wsaved, err;
+ 
+-	__get_user(wsaved, &rp->wsaved);
++	if (((unsigned long) rp) & 3)
++		return -EFAULT;
++
++	get_user(wsaved, &rp->wsaved);
+ 	if (wsaved > NSWINS)
+ 		return -EFAULT;
+ 
+diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
+index 387834a9c56a..36aadcbeac69 100644
+--- a/arch/sparc/kernel/sigutil_64.c
++++ b/arch/sparc/kernel/sigutil_64.c
+@@ -37,7 +37,10 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
+ 	unsigned long fprs;
+ 	int err;
+ 
+-	err = __get_user(fprs, &fpu->si_fprs);
++	if (((unsigned long) fpu) & 7)
++		return -EFAULT;
++
++	err = get_user(fprs, &fpu->si_fprs);
+ 	fprs_write(0);
+ 	regs->tstate &= ~TSTATE_PEF;
+ 	if (fprs & FPRS_DL)
+@@ -72,7 +75,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
+ 	struct thread_info *t = current_thread_info();
+ 	int i, wsaved, err;
+ 
+-	__get_user(wsaved, &rp->wsaved);
++	if (((unsigned long) rp) & 7)
++		return -EFAULT;
++
++	get_user(wsaved, &rp->wsaved);
+ 	if (wsaved > NSWINS)
+ 		return -EFAULT;
+ 
+diff --git a/arch/sparc/kernel/spiterrs.S b/arch/sparc/kernel/spiterrs.S
+index c357e40ffd01..4a73009f66a5 100644
+--- a/arch/sparc/kernel/spiterrs.S
++++ b/arch/sparc/kernel/spiterrs.S
+@@ -85,8 +85,7 @@ __spitfire_cee_trap_continue:
+ 	ba,pt		%xcc, etraptl1
+ 	 rd		%pc, %g7
+ 
+-	ba,pt		%xcc, 2f
+-	 nop
++	ba,a,pt		%xcc, 2f
+ 
+ 1:	ba,pt		%xcc, etrap_irq
+ 	 rd		%pc, %g7
+@@ -100,8 +99,7 @@ __spitfire_cee_trap_continue:
+ 	mov		%l5, %o2
+ 	call		spitfire_access_error
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		__spitfire_access_error,.-__spitfire_access_error
+ 
+ 	/* This is the trap handler entry point for ECC correctable
+@@ -179,8 +177,7 @@ __spitfire_data_access_exception_tl1:
+ 	mov		%l5, %o2
+ 	call		spitfire_data_access_exception_tl1
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		__spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1
+ 
+ 	.type		__spitfire_data_access_exception,#function
+@@ -200,8 +197,7 @@ __spitfire_data_access_exception:
+ 	mov		%l5, %o2
+ 	call		spitfire_data_access_exception
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		__spitfire_data_access_exception,.-__spitfire_data_access_exception
+ 
+ 	.type		__spitfire_insn_access_exception_tl1,#function
+@@ -220,8 +216,7 @@ __spitfire_insn_access_exception_tl1:
+ 	mov		%l5, %o2
+ 	call		spitfire_insn_access_exception_tl1
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		__spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1
+ 
+ 	.type		__spitfire_insn_access_exception,#function
+@@ -240,6 +235,5 @@ __spitfire_insn_access_exception:
+ 	mov		%l5, %o2
+ 	call		spitfire_insn_access_exception
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		__spitfire_insn_access_exception,.-__spitfire_insn_access_exception
+diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
+index bb0008927598..c4a1b5c40e4e 100644
+--- a/arch/sparc/kernel/syscalls.S
++++ b/arch/sparc/kernel/syscalls.S
+@@ -158,7 +158,25 @@ linux_syscall_trace32:
+ 	 add	%sp, PTREGS_OFF, %o0
+ 	brnz,pn	%o0, 3f
+ 	 mov	-ENOSYS, %o0
++
++	/* Syscall tracing can modify the registers.  */
++	ldx	[%sp + PTREGS_OFF + PT_V9_G1], %g1
++	sethi	%hi(sys_call_table32), %l7
++	ldx	[%sp + PTREGS_OFF + PT_V9_I0], %i0
++	or	%l7, %lo(sys_call_table32), %l7
++	ldx	[%sp + PTREGS_OFF + PT_V9_I1], %i1
++	ldx	[%sp + PTREGS_OFF + PT_V9_I2], %i2
++	ldx	[%sp + PTREGS_OFF + PT_V9_I3], %i3
++	ldx	[%sp + PTREGS_OFF + PT_V9_I4], %i4
++	ldx	[%sp + PTREGS_OFF + PT_V9_I5], %i5
++
++	cmp	%g1, NR_syscalls
++	bgeu,pn	%xcc, 3f
++	 mov	-ENOSYS, %o0
++
++	sll	%g1, 2, %l4
+ 	srl	%i0, 0, %o0
++	lduw	[%l7 + %l4], %l7
+ 	srl	%i4, 0, %o4
+ 	srl	%i1, 0, %o1
+ 	srl	%i2, 0, %o2
+@@ -170,7 +188,25 @@ linux_syscall_trace:
+ 	 add	%sp, PTREGS_OFF, %o0
+ 	brnz,pn	%o0, 3f
+ 	 mov	-ENOSYS, %o0
++
++	/* Syscall tracing can modify the registers.  */
++	ldx	[%sp + PTREGS_OFF + PT_V9_G1], %g1
++	sethi	%hi(sys_call_table64), %l7
++	ldx	[%sp + PTREGS_OFF + PT_V9_I0], %i0
++	or	%l7, %lo(sys_call_table64), %l7
++	ldx	[%sp + PTREGS_OFF + PT_V9_I1], %i1
++	ldx	[%sp + PTREGS_OFF + PT_V9_I2], %i2
++	ldx	[%sp + PTREGS_OFF + PT_V9_I3], %i3
++	ldx	[%sp + PTREGS_OFF + PT_V9_I4], %i4
++	ldx	[%sp + PTREGS_OFF + PT_V9_I5], %i5
++
++	cmp	%g1, NR_syscalls
++	bgeu,pn	%xcc, 3f
++	 mov	-ENOSYS, %o0
++
++	sll	%g1, 2, %l4
+ 	mov	%i0, %o0
++	lduw	[%l7 + %l4], %l7
+ 	mov	%i1, %o1
+ 	mov	%i2, %o2
+ 	mov	%i3, %o3
+diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S
+new file mode 100644
+index 000000000000..5604a2b051d4
+--- /dev/null
++++ b/arch/sparc/kernel/urtt_fill.S
+@@ -0,0 +1,98 @@
++#include <asm/thread_info.h>
++#include <asm/trap_block.h>
++#include <asm/spitfire.h>
++#include <asm/ptrace.h>
++#include <asm/head.h>
++
++		.text
++		.align	8
++		.globl	user_rtt_fill_fixup_common
++user_rtt_fill_fixup_common:
++		rdpr	%cwp, %g1
++		add	%g1, 1, %g1
++		wrpr	%g1, 0x0, %cwp
++
++		rdpr	%wstate, %g2
++		sll	%g2, 3, %g2
++		wrpr	%g2, 0x0, %wstate
++
++		/* We know %canrestore and %otherwin are both zero.  */
++
++		sethi	%hi(sparc64_kern_pri_context), %g2
++		ldx	[%g2 + %lo(sparc64_kern_pri_context)], %g2
++		mov	PRIMARY_CONTEXT, %g1
++
++661:		stxa	%g2, [%g1] ASI_DMMU
++		.section .sun4v_1insn_patch, "ax"
++		.word	661b
++		stxa	%g2, [%g1] ASI_MMU
++		.previous
++
++		sethi	%hi(KERNBASE), %g1
++		flush	%g1
++
++		mov	%g4, %l4
++		mov	%g5, %l5
++		brnz,pn	%g3, 1f
++		 mov	%g3, %l3
++
++		or	%g4, FAULT_CODE_WINFIXUP, %g4
++		stb	%g4, [%g6 + TI_FAULT_CODE]
++		stx	%g5, [%g6 + TI_FAULT_ADDR]
++1:
++		mov	%g6, %l1
++		wrpr	%g0, 0x0, %tl
++
++661:		nop
++		.section		.sun4v_1insn_patch, "ax"
++		.word			661b
++		SET_GL(0)
++		.previous
++
++		wrpr	%g0, RTRAP_PSTATE, %pstate
++
++		mov	%l1, %g6
++		ldx	[%g6 + TI_TASK], %g4
++		LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
++
++		brnz,pn	%l3, 1f
++		 nop
++
++		call	do_sparc64_fault
++		 add	%sp, PTREGS_OFF, %o0
++		ba,pt	%xcc, rtrap
++		 nop
++
++1:		cmp	%g3, 2
++		bne,pn	%xcc, 2f
++		 nop
++
++		sethi	%hi(tlb_type), %g1
++		lduw	[%g1 + %lo(tlb_type)], %g1
++		cmp	%g1, 3
++		bne,pt	%icc, 1f
++		 add	%sp, PTREGS_OFF, %o0
++		mov	%l4, %o2
++		call	sun4v_do_mna
++		 mov	%l5, %o1
++		ba,a,pt	%xcc, rtrap
++1:		mov	%l4, %o1
++		mov	%l5, %o2
++		call	mem_address_unaligned
++		 nop
++		ba,a,pt	%xcc, rtrap
++
++2:		sethi	%hi(tlb_type), %g1
++		mov	%l4, %o1
++		lduw	[%g1 + %lo(tlb_type)], %g1
++		mov	%l5, %o2
++		cmp	%g1, 3
++		bne,pt	%icc, 1f
++		 add	%sp, PTREGS_OFF, %o0
++		call	sun4v_data_access_exception
++		 nop
++		ba,a,pt	%xcc, rtrap
++
++1:		call	spitfire_data_access_exception
++		 nop
++		ba,a,pt	%xcc, rtrap
+diff --git a/arch/sparc/kernel/utrap.S b/arch/sparc/kernel/utrap.S
+index b7f0f3f3a909..c731e8023d3e 100644
+--- a/arch/sparc/kernel/utrap.S
++++ b/arch/sparc/kernel/utrap.S
+@@ -11,8 +11,7 @@ utrap_trap:		/* %g3=handler,%g4=level */
+ 	mov		%l4, %o1
+         call		bad_trap
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 
+ invoke_utrap:
+ 	sllx		%g3, 3, %g3
+diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
+index f1a2f688b28a..4a41d412dd3d 100644
+--- a/arch/sparc/kernel/vmlinux.lds.S
++++ b/arch/sparc/kernel/vmlinux.lds.S
+@@ -33,6 +33,10 @@ ENTRY(_start)
+ jiffies = jiffies_64;
+ #endif
+ 
++#ifdef CONFIG_SPARC64
++ASSERT((swapper_tsb == 0x0000000000408000), "Error: sparc64 early assembler too large")
++#endif
++
+ SECTIONS
+ {
+ #ifdef CONFIG_SPARC64
+diff --git a/arch/sparc/kernel/winfixup.S b/arch/sparc/kernel/winfixup.S
+index 1e67ce958369..855019a8590e 100644
+--- a/arch/sparc/kernel/winfixup.S
++++ b/arch/sparc/kernel/winfixup.S
+@@ -32,8 +32,7 @@ fill_fixup:
+ 	 rd	%pc, %g7
+ 	call	do_sparc64_fault
+ 	 add	%sp, PTREGS_OFF, %o0
+-	ba,pt	%xcc, rtrap
+-	 nop
++	ba,a,pt	%xcc, rtrap
+ 
+ 	/* Be very careful about usage of the trap globals here.
+ 	 * You cannot touch %g5 as that has the fault information.
+diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
+index 131eaf4ad7f5..364d093f46c6 100644
+--- a/arch/sparc/mm/hugetlbpage.c
++++ b/arch/sparc/mm/hugetlbpage.c
+@@ -176,17 +176,31 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ 		     pte_t *ptep, pte_t entry)
+ {
+ 	int i;
++	pte_t orig[2];
++	unsigned long nptes;
+ 
+ 	if (!pte_present(*ptep) && pte_present(entry))
+ 		mm->context.huge_pte_count++;
+ 
+ 	addr &= HPAGE_MASK;
+-	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
+-		set_pte_at(mm, addr, ptep, entry);
++
++	nptes = 1 << HUGETLB_PAGE_ORDER;
++	orig[0] = *ptep;
++	orig[1] = *(ptep + nptes / 2);
++	for (i = 0; i < nptes; i++) {
++		*ptep = entry;
+ 		ptep++;
+ 		addr += PAGE_SIZE;
+ 		pte_val(entry) += PAGE_SIZE;
+ 	}
++
++	/* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
++	addr -= REAL_HPAGE_SIZE;
++	ptep -= nptes / 2;
++	maybe_tlb_batch_add(mm, addr, ptep, orig[1], 0);
++	addr -= REAL_HPAGE_SIZE;
++	ptep -= nptes / 2;
++	maybe_tlb_batch_add(mm, addr, ptep, orig[0], 0);
+ }
+ 
+ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+@@ -194,19 +208,28 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ {
+ 	pte_t entry;
+ 	int i;
++	unsigned long nptes;
+ 
+ 	entry = *ptep;
+ 	if (pte_present(entry))
+ 		mm->context.huge_pte_count--;
+ 
+ 	addr &= HPAGE_MASK;
+-
+-	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
+-		pte_clear(mm, addr, ptep);
++	nptes = 1 << HUGETLB_PAGE_ORDER;
++	for (i = 0; i < nptes; i++) {
++		*ptep = __pte(0UL);
+ 		addr += PAGE_SIZE;
+ 		ptep++;
+ 	}
+ 
++	/* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
++	addr -= REAL_HPAGE_SIZE;
++	ptep -= nptes / 2;
++	maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
++	addr -= REAL_HPAGE_SIZE;
++	ptep -= nptes / 2;
++	maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
++
+ 	return entry;
+ }
+ 
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 3025bd57f7ab..3c4b8975fa76 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -324,18 +324,6 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde
+ 	tsb_insert(tsb, tag, tte);
+ }
+ 
+-#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+-static inline bool is_hugetlb_pte(pte_t pte)
+-{
+-	if ((tlb_type == hypervisor &&
+-	     (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
+-	    (tlb_type != hypervisor &&
+-	     (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
+-		return true;
+-	return false;
+-}
+-#endif
+-
+ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+ {
+ 	struct mm_struct *mm;
+@@ -1267,13 +1255,6 @@ static int __init numa_parse_mdesc(void)
+ 	int i, j, err, count;
+ 	u64 node;
+ 
+-	/* Some sane defaults for numa latency values */
+-	for (i = 0; i < MAX_NUMNODES; i++) {
+-		for (j = 0; j < MAX_NUMNODES; j++)
+-			numa_latency[i][j] = (i == j) ?
+-				LOCAL_DISTANCE : REMOTE_DISTANCE;
+-	}
+-
+ 	node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
+ 	if (node == MDESC_NODE_NULL) {
+ 		mdesc_release(md);
+@@ -1369,10 +1350,18 @@ static int __init numa_parse_sun4u(void)
+ 
+ static int __init bootmem_init_numa(void)
+ {
++	int i, j;
+ 	int err = -1;
+ 
+ 	numadbg("bootmem_init_numa()\n");
+ 
++	/* Some sane defaults for numa latency values */
++	for (i = 0; i < MAX_NUMNODES; i++) {
++		for (j = 0; j < MAX_NUMNODES; j++)
++			numa_latency[i][j] = (i == j) ?
++				LOCAL_DISTANCE : REMOTE_DISTANCE;
++	}
++
+ 	if (numa_enabled) {
+ 		if (tlb_type == hypervisor)
+ 			err = numa_parse_mdesc();
+@@ -2832,9 +2821,10 @@ void hugetlb_setup(struct pt_regs *regs)
+ 	 * the Data-TLB for huge pages.
+ 	 */
+ 	if (tlb_type == cheetah_plus) {
++		bool need_context_reload = false;
+ 		unsigned long ctx;
+ 
+-		spin_lock(&ctx_alloc_lock);
++		spin_lock_irq(&ctx_alloc_lock);
+ 		ctx = mm->context.sparc64_ctx_val;
+ 		ctx &= ~CTX_PGSZ_MASK;
+ 		ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
+@@ -2853,9 +2843,12 @@ void hugetlb_setup(struct pt_regs *regs)
+ 			 * also executing in this address space.
+ 			 */
+ 			mm->context.sparc64_ctx_val = ctx;
+-			on_each_cpu(context_reload, mm, 0);
++			need_context_reload = true;
+ 		}
+-		spin_unlock(&ctx_alloc_lock);
++		spin_unlock_irq(&ctx_alloc_lock);
++
++		if (need_context_reload)
++			on_each_cpu(context_reload, mm, 0);
+ 	}
+ }
+ #endif
+diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
+index 9df2190c097e..f81cd9736700 100644
+--- a/arch/sparc/mm/tlb.c
++++ b/arch/sparc/mm/tlb.c
+@@ -67,7 +67,7 @@ void arch_leave_lazy_mmu_mode(void)
+ }
+ 
+ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
+-			      bool exec)
++			      bool exec, bool huge)
+ {
+ 	struct tlb_batch *tb = &get_cpu_var(tlb_batch);
+ 	unsigned long nr;
+@@ -84,13 +84,21 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
+ 	}
+ 
+ 	if (!tb->active) {
+-		flush_tsb_user_page(mm, vaddr);
++		flush_tsb_user_page(mm, vaddr, huge);
+ 		global_flush_tlb_page(mm, vaddr);
+ 		goto out;
+ 	}
+ 
+-	if (nr == 0)
++	if (nr == 0) {
+ 		tb->mm = mm;
++		tb->huge = huge;
++	}
++
++	if (tb->huge != huge) {
++		flush_tlb_pending();
++		tb->huge = huge;
++		nr = 0;
++	}
+ 
+ 	tb->vaddrs[nr] = vaddr;
+ 	tb->tlb_nr = ++nr;
+@@ -104,6 +112,8 @@ out:
+ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
+ 		   pte_t *ptep, pte_t orig, int fullmm)
+ {
++	bool huge = is_hugetlb_pte(orig);
++
+ 	if (tlb_type != hypervisor &&
+ 	    pte_dirty(orig)) {
+ 		unsigned long paddr, pfn = pte_pfn(orig);
+@@ -129,7 +139,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
+ 
+ no_cache_flush:
+ 	if (!fullmm)
+-		tlb_batch_add_one(mm, vaddr, pte_exec(orig));
++		tlb_batch_add_one(mm, vaddr, pte_exec(orig), huge);
+ }
+ 
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+@@ -145,7 +155,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
+ 		if (pte_val(*pte) & _PAGE_VALID) {
+ 			bool exec = pte_exec(*pte);
+ 
+-			tlb_batch_add_one(mm, vaddr, exec);
++			tlb_batch_add_one(mm, vaddr, exec, false);
+ 		}
+ 		pte++;
+ 		vaddr += PAGE_SIZE;
+@@ -185,8 +195,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ 			pte_t orig_pte = __pte(pmd_val(orig));
+ 			bool exec = pte_exec(orig_pte);
+ 
+-			tlb_batch_add_one(mm, addr, exec);
+-			tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
++			tlb_batch_add_one(mm, addr, exec, true);
++			tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
++					true);
+ 		} else {
+ 			tlb_batch_pmd_scan(mm, addr, orig);
+ 		}
+diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
+index a06576683c38..a0604a493a36 100644
+--- a/arch/sparc/mm/tsb.c
++++ b/arch/sparc/mm/tsb.c
+@@ -76,14 +76,15 @@ void flush_tsb_user(struct tlb_batch *tb)
+ 
+ 	spin_lock_irqsave(&mm->context.lock, flags);
+ 
+-	base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
+-	nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
+-	if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+-		base = __pa(base);
+-	__flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
+-
++	if (!tb->huge) {
++		base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
++		nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
++		if (tlb_type == cheetah_plus || tlb_type == hypervisor)
++			base = __pa(base);
++		__flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
++	}
+ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+-	if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
++	if (tb->huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) {
+ 		base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
+ 		nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
+ 		if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+@@ -94,20 +95,21 @@ void flush_tsb_user(struct tlb_batch *tb)
+ 	spin_unlock_irqrestore(&mm->context.lock, flags);
+ }
+ 
+-void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
++void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge)
+ {
+ 	unsigned long nentries, base, flags;
+ 
+ 	spin_lock_irqsave(&mm->context.lock, flags);
+ 
+-	base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
+-	nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
+-	if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+-		base = __pa(base);
+-	__flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
+-
++	if (!huge) {
++		base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
++		nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
++		if (tlb_type == cheetah_plus || tlb_type == hypervisor)
++			base = __pa(base);
++		__flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
++	}
+ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+-	if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
++	if (huge && mm->context.tsb_block[MM_TSB_HUGE].tsb) {
+ 		base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
+ 		nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
+ 		if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index ade185a46b1d..679302c312f8 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -109,6 +109,12 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
+ 	preempt_count_dec();
+ }
+ 
++/*
++ * In IST context, we explicitly disable preemption.  This serves two
++ * purposes: it makes it much less likely that we would accidentally
++ * schedule in IST context and it will force a warning if we somehow
++ * manage to schedule by accident.
++ */
+ void ist_enter(struct pt_regs *regs)
+ {
+ 	if (user_mode(regs)) {
+@@ -123,13 +129,7 @@ void ist_enter(struct pt_regs *regs)
+ 		rcu_nmi_enter();
+ 	}
+ 
+-	/*
+-	 * We are atomic because we're on the IST stack; or we're on
+-	 * x86_32, in which case we still shouldn't schedule; or we're
+-	 * on x86_64 and entered from user mode, in which case we're
+-	 * still atomic unless ist_begin_non_atomic is called.
+-	 */
+-	preempt_count_add(HARDIRQ_OFFSET);
++	preempt_disable();
+ 
+ 	/* This code is a bit fragile.  Test it. */
+ 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
+@@ -137,7 +137,7 @@ void ist_enter(struct pt_regs *regs)
+ 
+ void ist_exit(struct pt_regs *regs)
+ {
+-	preempt_count_sub(HARDIRQ_OFFSET);
++	preempt_enable_no_resched();
+ 
+ 	if (!user_mode(regs))
+ 		rcu_nmi_exit();
+@@ -168,7 +168,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
+ 	BUG_ON((unsigned long)(current_top_of_stack() -
+ 			       current_stack_pointer()) >= THREAD_SIZE);
+ 
+-	preempt_count_sub(HARDIRQ_OFFSET);
++	preempt_enable_no_resched();
+ }
+ 
+ /**
+@@ -178,7 +178,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
+  */
+ void ist_end_non_atomic(void)
+ {
+-	preempt_count_add(HARDIRQ_OFFSET);
++	preempt_disable();
+ }
+ 
+ static nokprobe_inline int
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 605cea75eb0d..be222666b1c2 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3014,6 +3014,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
+ 	if (dbgregs->flags)
+ 		return -EINVAL;
+ 
++	if (dbgregs->dr6 & ~0xffffffffull)
++		return -EINVAL;
++	if (dbgregs->dr7 & ~0xffffffffull)
++		return -EINVAL;
++
+ 	memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
+ 	kvm_update_dr0123(vcpu);
+ 	vcpu->arch.dr6 = dbgregs->dr6;
+diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
+index 4870f28403f5..05bfe568cd30 100644
+--- a/crypto/asymmetric_keys/Kconfig
++++ b/crypto/asymmetric_keys/Kconfig
+@@ -14,6 +14,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
+ 	select MPILIB
+ 	select PUBLIC_KEY_ALGO_RSA
+ 	select CRYPTO_HASH_INFO
++	select CRYPTO_AKCIPHER
+ 	help
+ 	  This option provides support for asymmetric public key type handling.
+ 	  If signature generation and/or verification are to be used,
+diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+index 52c7395cb8d8..0d0d4529ee36 100644
+--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
++++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+@@ -122,6 +122,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
+ 	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ 	struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ 	unsigned int unit;
++	u32 unit_size;
+ 	int ret;
+ 
+ 	if (!ctx->u.aes.key_len)
+@@ -133,11 +134,17 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
+ 	if (!req->info)
+ 		return -EINVAL;
+ 
+-	for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++)
+-		if (!(req->nbytes & (unit_size_map[unit].size - 1)))
+-			break;
++	unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
++	if (req->nbytes <= unit_size_map[0].size) {
++		for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
++			if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
++				unit_size = unit_size_map[unit].value;
++				break;
++			}
++		}
++	}
+ 
+-	if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) ||
++	if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
+ 	    (ctx->u.aes.key_len != AES_KEYSIZE_128)) {
+ 		/* Use the fallback to process the request for any
+ 		 * unsupported unit sizes or key sizes
+@@ -158,7 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
+ 	rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
+ 	rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
+ 					   : CCP_AES_ACTION_DECRYPT;
+-	rctx->cmd.u.xts.unit_size = unit_size_map[unit].value;
++	rctx->cmd.u.xts.unit_size = unit_size;
+ 	rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
+ 	rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
+ 	rctx->cmd.u.xts.iv = &rctx->iv_sg;
+diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
+index b9178d0a3093..aa1dbeaa9b49 100644
+--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
++++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
+@@ -145,8 +145,6 @@ int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
+ void adf_disable_aer(struct adf_accel_dev *accel_dev);
+ int adf_init_aer(void);
+ void adf_exit_aer(void);
+-int adf_init_pf_wq(void);
+-void adf_exit_pf_wq(void);
+ int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
+ void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
+ int adf_send_admin_init(struct adf_accel_dev *accel_dev);
+@@ -229,6 +227,8 @@ void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+ 				  uint32_t vf_mask);
+ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+ 				 uint32_t vf_mask);
++int adf_init_pf_wq(void);
++void adf_exit_pf_wq(void);
+ #else
+ static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+ {
+@@ -238,5 +238,14 @@ static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+ static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+ {
+ }
++
++static inline int adf_init_pf_wq(void)
++{
++	return 0;
++}
++
++static inline void adf_exit_pf_wq(void)
++{
++}
+ #endif
+ #endif
+diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
+index 33a1f9779b86..4ea71d505bce 100644
+--- a/drivers/gpio/gpio-bcm-kona.c
++++ b/drivers/gpio/gpio-bcm-kona.c
+@@ -551,11 +551,11 @@ static void bcm_kona_gpio_reset(struct bcm_kona_gpio *kona_gpio)
+ 	/* disable interrupts and clear status */
+ 	for (i = 0; i < kona_gpio->num_bank; i++) {
+ 		/* Unlock the entire bank first */
+-		bcm_kona_gpio_write_lock_regs(kona_gpio, i, UNLOCK_CODE);
++		bcm_kona_gpio_write_lock_regs(reg_base, i, UNLOCK_CODE);
+ 		writel(0xffffffff, reg_base + GPIO_INT_MASK(i));
+ 		writel(0xffffffff, reg_base + GPIO_INT_STATUS(i));
+ 		/* Now re-lock the bank */
+-		bcm_kona_gpio_write_lock_regs(kona_gpio, i, LOCK_CODE);
++		bcm_kona_gpio_write_lock_regs(reg_base, i, LOCK_CODE);
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+index 24c5434abd1c..a02238c85e18 100644
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -3316,6 +3316,24 @@ int drm_mode_addfb2(struct drm_device *dev,
+ 	return 0;
+ }
+ 
++struct drm_mode_rmfb_work {
++	struct work_struct work;
++	struct list_head fbs;
++};
++
++static void drm_mode_rmfb_work_fn(struct work_struct *w)
++{
++	struct drm_mode_rmfb_work *arg = container_of(w, typeof(*arg), work);
++
++	while (!list_empty(&arg->fbs)) {
++		struct drm_framebuffer *fb =
++			list_first_entry(&arg->fbs, typeof(*fb), filp_head);
++
++		list_del_init(&fb->filp_head);
++		drm_framebuffer_remove(fb);
++	}
++}
++
+ /**
+  * drm_mode_rmfb - remove an FB from the configuration
+  * @dev: drm device for the ioctl
+@@ -3356,7 +3374,25 @@ int drm_mode_rmfb(struct drm_device *dev,
+ 	mutex_unlock(&dev->mode_config.fb_lock);
+ 	mutex_unlock(&file_priv->fbs_lock);
+ 
+-	drm_framebuffer_unreference(fb);
++	/*
++	 * we now own the reference that was stored in the fbs list
++	 *
++	 * drm_framebuffer_remove may fail with -EINTR on pending signals,
++	 * so run this in a separate stack as there's no way to correctly
++	 * handle this after the fb is already removed from the lookup table.
++	 */
++	if (atomic_read(&fb->refcount.refcount) > 1) {
++		struct drm_mode_rmfb_work arg;
++
++		INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
++		INIT_LIST_HEAD(&arg.fbs);
++		list_add_tail(&fb->filp_head, &arg.fbs);
++
++		schedule_work(&arg.work);
++		flush_work(&arg.work);
++		destroy_work_on_stack(&arg.work);
++	} else
++		drm_framebuffer_unreference(fb);
+ 
+ 	return 0;
+ 
+@@ -3509,7 +3545,6 @@ out_err1:
+ 	return ret;
+ }
+ 
+-
+ /**
+  * drm_fb_release - remove and free the FBs on this file
+  * @priv: drm file for the ioctl
+@@ -3524,6 +3559,9 @@ out_err1:
+ void drm_fb_release(struct drm_file *priv)
+ {
+ 	struct drm_framebuffer *fb, *tfb;
++	struct drm_mode_rmfb_work arg;
++
++	INIT_LIST_HEAD(&arg.fbs);
+ 
+ 	/*
+ 	 * When the file gets released that means no one else can access the fb
+@@ -3536,10 +3574,22 @@ void drm_fb_release(struct drm_file *priv)
+ 	 * at it any more.
+ 	 */
+ 	list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
+-		list_del_init(&fb->filp_head);
++		if (atomic_read(&fb->refcount.refcount) > 1) {
++			list_move_tail(&fb->filp_head, &arg.fbs);
++		} else {
++			list_del_init(&fb->filp_head);
+ 
+-		/* This drops the fpriv->fbs reference. */
+-		drm_framebuffer_unreference(fb);
++			/* This drops the fpriv->fbs reference. */
++			drm_framebuffer_unreference(fb);
++		}
++	}
++
++	if (!list_empty(&arg.fbs)) {
++		INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
++
++		schedule_work(&arg.work);
++		flush_work(&arg.work);
++		destroy_work_on_stack(&arg.work);
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
+index 2b34622a4bfe..3920c3eb6006 100644
+--- a/drivers/net/ethernet/rocker/rocker.c
++++ b/drivers/net/ethernet/rocker/rocker.c
+@@ -4475,7 +4475,7 @@ static int rocker_port_obj_add(struct net_device *dev,
+ 		fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
+ 		err = rocker_port_fib_ipv4(rocker_port, trans,
+ 					   htonl(fib4->dst), fib4->dst_len,
+-					   &fib4->fi, fib4->tb_id, 0);
++					   fib4->fi, fib4->tb_id, 0);
+ 		break;
+ 	case SWITCHDEV_OBJ_ID_PORT_FDB:
+ 		err = rocker_port_fdb_add(rocker_port, trans,
+@@ -4547,7 +4547,7 @@ static int rocker_port_obj_del(struct net_device *dev,
+ 		fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
+ 		err = rocker_port_fib_ipv4(rocker_port, NULL,
+ 					   htonl(fib4->dst), fib4->dst_len,
+-					   &fib4->fi, fib4->tb_id,
++					   fib4->fi, fib4->tb_id,
+ 					   ROCKER_OP_FLAG_REMOVE);
+ 		break;
+ 	case SWITCHDEV_OBJ_ID_PORT_FDB:
+diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
+index e6a084a6be12..cbe9a330117a 100644
+--- a/drivers/net/ethernet/sfc/ef10.c
++++ b/drivers/net/ethernet/sfc/ef10.c
+@@ -619,6 +619,17 @@ fail:
+ 	return rc;
+ }
+ 
++static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
++{
++	struct efx_channel *channel;
++	struct efx_tx_queue *tx_queue;
++
++	/* All our existing PIO buffers went away */
++	efx_for_each_channel(channel, efx)
++		efx_for_each_channel_tx_queue(tx_queue, channel)
++			tx_queue->piobuf = NULL;
++}
++
+ #else /* !EFX_USE_PIO */
+ 
+ static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
+@@ -635,6 +646,10 @@ static void efx_ef10_free_piobufs(struct efx_nic *efx)
+ {
+ }
+ 
++static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
++{
++}
++
+ #endif /* EFX_USE_PIO */
+ 
+ static void efx_ef10_remove(struct efx_nic *efx)
+@@ -1018,6 +1033,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
+ 	nic_data->must_realloc_vis = true;
+ 	nic_data->must_restore_filters = true;
+ 	nic_data->must_restore_piobufs = true;
++	efx_ef10_forget_old_piobufs(efx);
+ 	nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+ 
+ 	/* Driver-created vswitches and vports must be re-created */
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 58efdec12f30..69e31e2a68fc 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -310,15 +310,15 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ 
+ 	/* Need Geneve and inner Ethernet header to be present */
+ 	if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
+-		goto error;
++		goto drop;
+ 
+ 	/* Return packets with reserved bits set */
+ 	geneveh = geneve_hdr(skb);
+ 	if (unlikely(geneveh->ver != GENEVE_VER))
+-		goto error;
++		goto drop;
+ 
+ 	if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
+-		goto error;
++		goto drop;
+ 
+ 	opts_len = geneveh->opt_len * 4;
+ 	if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
+@@ -336,10 +336,6 @@ drop:
+ 	/* Consume bad packet */
+ 	kfree_skb(skb);
+ 	return 0;
+-
+-error:
+-	/* Let the UDP layer deal with the skb */
+-	return 1;
+ }
+ 
+ static struct socket *geneve_create_sock(struct net *net, bool ipv6,
+@@ -998,6 +994,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	return geneve_xmit_skb(skb, dev, info);
+ }
+ 
++static int geneve_change_mtu(struct net_device *dev, int new_mtu)
++{
++	/* GENEVE overhead is not fixed, so we can't enforce a more
++	 * precise max MTU.
++	 */
++	if (new_mtu < 68 || new_mtu > IP_MAX_MTU)
++		return -EINVAL;
++	dev->mtu = new_mtu;
++	return 0;
++}
++
+ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	struct ip_tunnel_info *info = skb_tunnel_info(skb);
+@@ -1042,7 +1049,7 @@ static const struct net_device_ops geneve_netdev_ops = {
+ 	.ndo_stop		= geneve_stop,
+ 	.ndo_start_xmit		= geneve_xmit,
+ 	.ndo_get_stats64	= ip_tunnel_get_stats64,
+-	.ndo_change_mtu		= eth_change_mtu,
++	.ndo_change_mtu		= geneve_change_mtu,
+ 	.ndo_validate_addr	= eth_validate_addr,
+ 	.ndo_set_mac_address	= eth_mac_addr,
+ 	.ndo_fill_metadata_dst	= geneve_fill_metadata_dst,
+@@ -1349,11 +1356,21 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
+ 
+ 	err = geneve_configure(net, dev, &geneve_remote_unspec,
+ 			       0, 0, 0, htons(dst_port), true);
+-	if (err) {
+-		free_netdev(dev);
+-		return ERR_PTR(err);
+-	}
++	if (err)
++		goto err;
++
++	/* openvswitch users expect packet sizes to be unrestricted,
++	 * so set the largest MTU we can.
++	 */
++	err = geneve_change_mtu(dev, IP_MAX_MTU);
++	if (err)
++		goto err;
++
+ 	return dev;
++
++ err:
++	free_netdev(dev);
++	return ERR_PTR(err);
+ }
+ EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
+ 
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 59fefca74263..a5f392ae30d5 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -969,7 +969,7 @@ static void team_port_disable(struct team *team,
+ 			    NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+ 			    NETIF_F_HIGHDMA | NETIF_F_LRO)
+ 
+-static void __team_compute_features(struct team *team)
++static void ___team_compute_features(struct team *team)
+ {
+ 	struct team_port *port;
+ 	u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
+@@ -993,15 +993,20 @@ static void __team_compute_features(struct team *team)
+ 	team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ 	if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
+ 		team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
++}
+ 
++static void __team_compute_features(struct team *team)
++{
++	___team_compute_features(team);
+ 	netdev_change_features(team->dev);
+ }
+ 
+ static void team_compute_features(struct team *team)
+ {
+ 	mutex_lock(&team->lock);
+-	__team_compute_features(team);
++	___team_compute_features(team);
+ 	mutex_unlock(&team->lock);
++	netdev_change_features(team->dev);
+ }
+ 
+ static int team_port_enter(struct team *team, struct team_port *port)
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 2d186bd66d43..111b972e3053 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -567,11 +567,13 @@ static void tun_detach_all(struct net_device *dev)
+ 	for (i = 0; i < n; i++) {
+ 		tfile = rtnl_dereference(tun->tfiles[i]);
+ 		BUG_ON(!tfile);
++		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
+ 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
+ 		RCU_INIT_POINTER(tfile->tun, NULL);
+ 		--tun->numqueues;
+ 	}
+ 	list_for_each_entry(tfile, &tun->disabled, next) {
++		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
+ 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
+ 		RCU_INIT_POINTER(tfile->tun, NULL);
+ 	}
+@@ -627,6 +629,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
+ 			goto out;
+ 	}
+ 	tfile->queue_index = tun->numqueues;
++	tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
+ 	rcu_assign_pointer(tfile->tun, tun);
+ 	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
+ 	tun->numqueues++;
+@@ -1408,9 +1411,6 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
+ 	if (!iov_iter_count(to))
+ 		return 0;
+ 
+-	if (tun->dev->reg_state != NETREG_REGISTERED)
+-		return -EIO;
+-
+ 	/* Read frames from queue */
+ 	skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
+ 				  &peeked, &off, &err);
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 3c0df70e2f53..003780901628 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1254,7 +1254,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+ 
+ 	/* Need Vxlan and inner Ethernet header to be present */
+ 	if (!pskb_may_pull(skb, VXLAN_HLEN))
+-		goto error;
++		goto drop;
+ 
+ 	vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
+ 	flags = ntohl(vxh->vx_flags);
+@@ -1344,13 +1344,7 @@ drop:
+ bad_flags:
+ 	netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
+ 		   ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
+-
+-error:
+-	if (tun_dst)
+-		dst_release((struct dst_entry *)tun_dst);
+-
+-	/* Return non vxlan pkt */
+-	return 1;
++	goto drop;
+ }
+ 
+ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
+@@ -2370,29 +2364,43 @@ static void vxlan_set_multicast_list(struct net_device *dev)
+ {
+ }
+ 
+-static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
++static int __vxlan_change_mtu(struct net_device *dev,
++			      struct net_device *lowerdev,
++			      struct vxlan_rdst *dst, int new_mtu, bool strict)
+ {
+-	struct vxlan_dev *vxlan = netdev_priv(dev);
+-	struct vxlan_rdst *dst = &vxlan->default_dst;
+-	struct net_device *lowerdev;
+-	int max_mtu;
++	int max_mtu = IP_MAX_MTU;
+ 
+-	lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
+-	if (lowerdev == NULL)
+-		return eth_change_mtu(dev, new_mtu);
++	if (lowerdev)
++		max_mtu = lowerdev->mtu;
+ 
+ 	if (dst->remote_ip.sa.sa_family == AF_INET6)
+-		max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
++		max_mtu -= VXLAN6_HEADROOM;
+ 	else
+-		max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
++		max_mtu -= VXLAN_HEADROOM;
+ 
+-	if (new_mtu < 68 || new_mtu > max_mtu)
++	if (new_mtu < 68)
+ 		return -EINVAL;
+ 
++	if (new_mtu > max_mtu) {
++		if (strict)
++			return -EINVAL;
++
++		new_mtu = max_mtu;
++	}
++
+ 	dev->mtu = new_mtu;
+ 	return 0;
+ }
+ 
++static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
++{
++	struct vxlan_dev *vxlan = netdev_priv(dev);
++	struct vxlan_rdst *dst = &vxlan->default_dst;
++	struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
++							 dst->remote_ifindex);
++	return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true);
++}
++
+ static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb,
+ 				struct ip_tunnel_info *info,
+ 				__be16 sport, __be16 dport)
+@@ -2768,6 +2776,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
+ 	int err;
+ 	bool use_ipv6 = false;
+ 	__be16 default_port = vxlan->cfg.dst_port;
++	struct net_device *lowerdev = NULL;
+ 
+ 	vxlan->net = src_net;
+ 
+@@ -2788,9 +2797,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
+ 	}
+ 
+ 	if (conf->remote_ifindex) {
+-		struct net_device *lowerdev
+-			 = __dev_get_by_index(src_net, conf->remote_ifindex);
+-
++		lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
+ 		dst->remote_ifindex = conf->remote_ifindex;
+ 
+ 		if (!lowerdev) {
+@@ -2814,6 +2821,12 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
+ 		needed_headroom = lowerdev->hard_header_len;
+ 	}
+ 
++	if (conf->mtu) {
++		err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false);
++		if (err)
++			return err;
++	}
++
+ 	if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
+ 		needed_headroom += VXLAN6_HEADROOM;
+ 	else
+@@ -2991,6 +3004,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
+ 	if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
+ 		conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
+ 
++	if (tb[IFLA_MTU])
++		conf.mtu = nla_get_u32(tb[IFLA_MTU]);
++
+ 	err = vxlan_dev_configure(src_net, dev, &conf);
+ 	switch (err) {
+ 	case -ENODEV:
+diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+index 3d8019eb3d84..181b35879ebd 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
++++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+@@ -1191,9 +1191,10 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
+ 	const struct mtk_desc_pin *pin;
+ 
+ 	chained_irq_enter(chip, desc);
+-	for (eint_num = 0; eint_num < pctl->devdata->ap_num; eint_num += 32) {
++	for (eint_num = 0;
++	     eint_num < pctl->devdata->ap_num;
++	     eint_num += 32, reg += 4) {
+ 		status = readl(reg);
+-		reg += 4;
+ 		while (status) {
+ 			offset = __ffs(status);
+ 			index = eint_num + offset;
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index da2e068ee47d..93cbefa75b26 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -227,6 +227,7 @@ static struct {
+ 	{"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ 	{"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
+ 	{"Promise", "", NULL, BLIST_SPARSELUN},
++	{"QEMU", "QEMU CD-ROM", NULL, BLIST_SKIP_VPD_PAGES},
+ 	{"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
+ 	{"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024},
+ 	{"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index dd8ad2a44510..cf5b99e1f12b 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -910,9 +910,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+ 	}
+ 
+ 	/*
+-	 * If we finished all bytes in the request we are done now.
++	 * special case: failed zero length commands always need to
++	 * drop down into the retry code. Otherwise, if we finished
++	 * all bytes in the request we are done now.
+ 	 */
+-	if (!scsi_end_request(req, error, good_bytes, 0))
++	if (!(blk_rq_bytes(req) == 0 && error) &&
++	    !scsi_end_request(req, error, good_bytes, 0))
+ 		return;
+ 
+ 	/*
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 18effa378f97..108d7d810be3 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -1618,7 +1618,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
+ 	struct dentry *dentry = __d_alloc(parent->d_sb, name);
+ 	if (!dentry)
+ 		return NULL;
+-
++	dentry->d_flags |= DCACHE_RCUACCESS;
+ 	spin_lock(&parent->d_lock);
+ 	/*
+ 	 * don't need child lock because it is not subject
+@@ -2413,7 +2413,6 @@ static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
+ {
+ 	BUG_ON(!d_unhashed(entry));
+ 	hlist_bl_lock(b);
+-	entry->d_flags |= DCACHE_RCUACCESS;
+ 	hlist_bl_add_head_rcu(&entry->d_hash, b);
+ 	hlist_bl_unlock(b);
+ }
+@@ -2632,6 +2631,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
+ 	/* ... and switch them in the tree */
+ 	if (IS_ROOT(dentry)) {
+ 		/* splicing a tree */
++		dentry->d_flags |= DCACHE_RCUACCESS;
+ 		dentry->d_parent = target->d_parent;
+ 		target->d_parent = target;
+ 		list_del_init(&target->d_child);
+diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
+index 866bb18efefe..e818f5ac7a26 100644
+--- a/fs/ecryptfs/kthread.c
++++ b/fs/ecryptfs/kthread.c
+@@ -25,6 +25,7 @@
+ #include <linux/slab.h>
+ #include <linux/wait.h>
+ #include <linux/mount.h>
++#include <linux/file.h>
+ #include "ecryptfs_kernel.h"
+ 
+ struct ecryptfs_open_req {
+@@ -147,7 +148,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
+ 	flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR;
+ 	(*lower_file) = dentry_open(&req.path, flags, cred);
+ 	if (!IS_ERR(*lower_file))
+-		goto out;
++		goto have_file;
+ 	if ((flags & O_ACCMODE) == O_RDONLY) {
+ 		rc = PTR_ERR((*lower_file));
+ 		goto out;
+@@ -165,8 +166,16 @@ int ecryptfs_privileged_open(struct file **lower_file,
+ 	mutex_unlock(&ecryptfs_kthread_ctl.mux);
+ 	wake_up(&ecryptfs_kthread_ctl.wait);
+ 	wait_for_completion(&req.done);
+-	if (IS_ERR(*lower_file))
++	if (IS_ERR(*lower_file)) {
+ 		rc = PTR_ERR(*lower_file);
++		goto out;
++	}
++have_file:
++	if ((*lower_file)->f_op->mmap == NULL) {
++		fput(*lower_file);
++		*lower_file = NULL;
++		rc = -EMEDIUMTYPE;
++	}
+ out:
+ 	return rc;
+ }
+diff --git a/fs/proc/root.c b/fs/proc/root.c
+index 361ab4ee42fc..ec649c92d270 100644
+--- a/fs/proc/root.c
++++ b/fs/proc/root.c
+@@ -121,6 +121,13 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
+ 	if (IS_ERR(sb))
+ 		return ERR_CAST(sb);
+ 
++	/*
++	 * procfs isn't actually a stacking filesystem; however, there is
++	 * too much magic going on inside it to permit stacking things on
++	 * top of it
++	 */
++	sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
++
+ 	if (!proc_parse_options(options, ns)) {
+ 		deactivate_locked_super(sb);
+ 		return ERR_PTR(-EINVAL);
+diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
+index d5d798b35c1f..e98425058f20 100644
+--- a/include/linux/irqchip/arm-gic-v3.h
++++ b/include/linux/irqchip/arm-gic-v3.h
+@@ -301,7 +301,7 @@
+ #define ICC_SGI1R_AFFINITY_1_SHIFT	16
+ #define ICC_SGI1R_AFFINITY_1_MASK	(0xff << ICC_SGI1R_AFFINITY_1_SHIFT)
+ #define ICC_SGI1R_SGI_ID_SHIFT		24
+-#define ICC_SGI1R_SGI_ID_MASK		(0xff << ICC_SGI1R_SGI_ID_SHIFT)
++#define ICC_SGI1R_SGI_ID_MASK		(0xfULL << ICC_SGI1R_SGI_ID_SHIFT)
+ #define ICC_SGI1R_AFFINITY_2_SHIFT	32
+ #define ICC_SGI1R_AFFINITY_2_MASK	(0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
+ #define ICC_SGI1R_IRQ_ROUTING_MODE_BIT	40
+diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
+index c5577410c25d..04078e8a4803 100644
+--- a/include/linux/netfilter/x_tables.h
++++ b/include/linux/netfilter/x_tables.h
+@@ -239,11 +239,18 @@ void xt_unregister_match(struct xt_match *target);
+ int xt_register_matches(struct xt_match *match, unsigned int n);
+ void xt_unregister_matches(struct xt_match *match, unsigned int n);
+ 
++int xt_check_entry_offsets(const void *base, const char *elems,
++			   unsigned int target_offset,
++			   unsigned int next_offset);
++
+ int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
+ 		   bool inv_proto);
+ int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
+ 		    bool inv_proto);
+ 
++void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
++				 struct xt_counters_info *info, bool compat);
++
+ struct xt_table *xt_register_table(struct net *net,
+ 				   const struct xt_table *table,
+ 				   struct xt_table_info *bootstrap,
+@@ -478,7 +485,7 @@ void xt_compat_init_offsets(u_int8_t af, unsigned int number);
+ int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
+ 
+ int xt_compat_match_offset(const struct xt_match *match);
+-int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
++void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+ 			      unsigned int *size);
+ int xt_compat_match_to_user(const struct xt_entry_match *m,
+ 			    void __user **dstptr, unsigned int *size);
+@@ -488,6 +495,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
+ 				unsigned int *size);
+ int xt_compat_target_to_user(const struct xt_entry_target *t,
+ 			     void __user **dstptr, unsigned int *size);
++int xt_compat_check_entry_offsets(const void *base, const char *elems,
++				  unsigned int target_offset,
++				  unsigned int next_offset);
+ 
+ #endif /* CONFIG_COMPAT */
+ #endif /* _X_TABLES_H */
+diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
+index 62a750a6a8f8..af40bc586a1b 100644
+--- a/include/net/ip_tunnels.h
++++ b/include/net/ip_tunnels.h
+@@ -230,6 +230,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
+ int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
+ 		    u8 *protocol, struct flowi4 *fl4);
++int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
+ int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
+ 
+ struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
+diff --git a/include/net/switchdev.h b/include/net/switchdev.h
+index 1d22ce9f352e..31d0e5143848 100644
+--- a/include/net/switchdev.h
++++ b/include/net/switchdev.h
+@@ -88,7 +88,7 @@ struct switchdev_obj_ipv4_fib {
+ 	struct switchdev_obj obj;
+ 	u32 dst;
+ 	int dst_len;
+-	struct fib_info fi;
++	struct fib_info *fi;
+ 	u8 tos;
+ 	u8 type;
+ 	u32 nlflags;
+diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
+index d5e38c73377c..e4f048ee7043 100644
+--- a/include/uapi/linux/libc-compat.h
++++ b/include/uapi/linux/libc-compat.h
+@@ -52,7 +52,7 @@
+ #if defined(__GLIBC__)
+ 
+ /* Coordinate with glibc net/if.h header. */
+-#if defined(_NET_IF_H)
++#if defined(_NET_IF_H) && defined(__USE_MISC)
+ 
+ /* GLIBC headers included first so don't define anything
+  * that would already be defined. */
+diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
+index d1a7646f79c5..cb85d228b1ac 100644
+--- a/kernel/bpf/inode.c
++++ b/kernel/bpf/inode.c
+@@ -358,7 +358,7 @@ static int bpf_fill_super(struct super_block *sb, void *data, int silent)
+ static struct dentry *bpf_mount(struct file_system_type *type, int flags,
+ 				const char *dev_name, void *data)
+ {
+-	return mount_ns(type, flags, current->nsproxy->mnt_ns, bpf_fill_super);
++	return mount_nodev(type, flags, data, bpf_fill_super);
+ }
+ 
+ static struct file_system_type bpf_fs_type = {
+@@ -366,7 +366,6 @@ static struct file_system_type bpf_fs_type = {
+ 	.name		= "bpf",
+ 	.mount		= bpf_mount,
+ 	.kill_sb	= kill_litter_super,
+-	.fs_flags	= FS_USERNS_MOUNT,
+ };
+ 
+ MODULE_ALIAS_FS("bpf");
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 55bebf924946..6c0cdb5a73f8 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3008,7 +3008,8 @@ static noinline void __schedule_bug(struct task_struct *prev)
+ static inline void schedule_debug(struct task_struct *prev)
+ {
+ #ifdef CONFIG_SCHED_STACK_END_CHECK
+-	BUG_ON(task_stack_end_corrupted(prev));
++	if (task_stack_end_corrupted(prev))
++		panic("corrupted stack end detected inside scheduler\n");
+ #endif
+ 
+ 	if (unlikely(in_atomic_preempt_off())) {
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 6ba4dd988e2e..67648e6b2ac8 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3661,6 +3661,7 @@ static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
+ 	 * ordering is imposed by list_lru_node->lock taken by
+ 	 * memcg_drain_all_list_lrus().
+ 	 */
++	rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
+ 	css_for_each_descendant_pre(css, &memcg->css) {
+ 		child = mem_cgroup_from_css(css);
+ 		BUG_ON(child->kmemcg_id != kmemcg_id);
+@@ -3668,6 +3669,8 @@ static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
+ 		if (!memcg->use_hierarchy)
+ 			break;
+ 	}
++	rcu_read_unlock();
++
+ 	memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
+ 
+ 	memcg_free_cache_id(kmemcg_id);
+diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
+index a642bb829d09..09442e0f7f67 100644
+--- a/net/bridge/br_fdb.c
++++ b/net/bridge/br_fdb.c
+@@ -278,6 +278,8 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
+ 	 * change from under us.
+ 	 */
+ 	list_for_each_entry(v, &vg->vlan_list, vlist) {
++		if (!br_vlan_should_use(v))
++			continue;
+ 		f = __br_fdb_get(br, br->dev->dev_addr, v->vid);
+ 		if (f && f->is_local && !f->dst)
+ 			fdb_delete_local(br, NULL, f);
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index 7dc962b89fa1..3e4184088082 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -1247,6 +1247,14 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
+ 	err = ipgre_newlink(net, dev, tb, NULL);
+ 	if (err < 0)
+ 		goto out;
++
++	/* openvswitch users expect packet sizes to be unrestricted,
++	 * so set the largest MTU we can.
++	 */
++	err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
++	if (err)
++		goto out;
++
+ 	return dev;
+ out:
+ 	free_netdev(dev);
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index ce30c8b72457..3310ac75e3f3 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -948,17 +948,31 @@ done:
+ }
+ EXPORT_SYMBOL_GPL(ip_tunnel_ioctl);
+ 
+-int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
++int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
+ {
+ 	struct ip_tunnel *tunnel = netdev_priv(dev);
+ 	int t_hlen = tunnel->hlen + sizeof(struct iphdr);
++	int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen;
+ 
+-	if (new_mtu < 68 ||
+-	    new_mtu > 0xFFF8 - dev->hard_header_len - t_hlen)
++	if (new_mtu < 68)
+ 		return -EINVAL;
++
++	if (new_mtu > max_mtu) {
++		if (strict)
++			return -EINVAL;
++
++		new_mtu = max_mtu;
++	}
++
+ 	dev->mtu = new_mtu;
+ 	return 0;
+ }
++EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu);
++
++int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
++{
++	return __ip_tunnel_change_mtu(dev, new_mtu, true);
++}
+ EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu);
+ 
+ static void ip_tunnel_dev_free(struct net_device *dev)
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index 11dccba474b7..6e3e0e8b1ce3 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -359,11 +359,24 @@ unsigned int arpt_do_table(struct sk_buff *skb,
+ }
+ 
+ /* All zeroes == unconditional rule. */
+-static inline bool unconditional(const struct arpt_arp *arp)
++static inline bool unconditional(const struct arpt_entry *e)
+ {
+ 	static const struct arpt_arp uncond;
+ 
+-	return memcmp(arp, &uncond, sizeof(uncond)) == 0;
++	return e->target_offset == sizeof(struct arpt_entry) &&
++	       memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
++}
++
++static bool find_jump_target(const struct xt_table_info *t,
++			     const struct arpt_entry *target)
++{
++	struct arpt_entry *iter;
++
++	xt_entry_foreach(iter, t->entries, t->size) {
++		 if (iter == target)
++			return true;
++	}
++	return false;
+ }
+ 
+ /* Figures out from what hook each rule can be called: returns 0 if
+@@ -402,11 +415,10 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
+ 				|= ((1 << hook) | (1 << NF_ARP_NUMHOOKS));
+ 
+ 			/* Unconditional return/END. */
+-			if ((e->target_offset == sizeof(struct arpt_entry) &&
++			if ((unconditional(e) &&
+ 			     (strcmp(t->target.u.user.name,
+ 				     XT_STANDARD_TARGET) == 0) &&
+-			     t->verdict < 0 && unconditional(&e->arp)) ||
+-			    visited) {
++			     t->verdict < 0) || visited) {
+ 				unsigned int oldpos, size;
+ 
+ 				if ((strcmp(t->target.u.user.name,
+@@ -439,6 +451,8 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
+ 				size = e->next_offset;
+ 				e = (struct arpt_entry *)
+ 					(entry0 + pos + size);
++				if (pos + size >= newinfo->size)
++					return 0;
+ 				e->counters.pcnt = pos;
+ 				pos += size;
+ 			} else {
+@@ -458,9 +472,15 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
+ 					/* This a jump; chase it. */
+ 					duprintf("Jump rule %u -> %u\n",
+ 						 pos, newpos);
++					e = (struct arpt_entry *)
++						(entry0 + newpos);
++					if (!find_jump_target(newinfo, e))
++						return 0;
+ 				} else {
+ 					/* ... this is a fallthru */
+ 					newpos = pos + e->next_offset;
++					if (newpos >= newinfo->size)
++						return 0;
+ 				}
+ 				e = (struct arpt_entry *)
+ 					(entry0 + newpos);
+@@ -474,25 +494,6 @@ next:
+ 	return 1;
+ }
+ 
+-static inline int check_entry(const struct arpt_entry *e, const char *name)
+-{
+-	const struct xt_entry_target *t;
+-
+-	if (!arp_checkentry(&e->arp)) {
+-		duprintf("arp_tables: arp check failed %p %s.\n", e, name);
+-		return -EINVAL;
+-	}
+-
+-	if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset)
+-		return -EINVAL;
+-
+-	t = arpt_get_target_c(e);
+-	if (e->target_offset + t->u.target_size > e->next_offset)
+-		return -EINVAL;
+-
+-	return 0;
+-}
+-
+ static inline int check_target(struct arpt_entry *e, const char *name)
+ {
+ 	struct xt_entry_target *t = arpt_get_target(e);
+@@ -522,10 +523,6 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
+ 	struct xt_target *target;
+ 	int ret;
+ 
+-	ret = check_entry(e, name);
+-	if (ret)
+-		return ret;
+-
+ 	e->counters.pcnt = xt_percpu_counter_alloc();
+ 	if (IS_ERR_VALUE(e->counters.pcnt))
+ 		return -ENOMEM;
+@@ -557,7 +554,7 @@ static bool check_underflow(const struct arpt_entry *e)
+ 	const struct xt_entry_target *t;
+ 	unsigned int verdict;
+ 
+-	if (!unconditional(&e->arp))
++	if (!unconditional(e))
+ 		return false;
+ 	t = arpt_get_target_c(e);
+ 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
+@@ -576,9 +573,11 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
+ 					     unsigned int valid_hooks)
+ {
+ 	unsigned int h;
++	int err;
+ 
+ 	if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
+-	    (unsigned char *)e + sizeof(struct arpt_entry) >= limit) {
++	    (unsigned char *)e + sizeof(struct arpt_entry) >= limit ||
++	    (unsigned char *)e + e->next_offset > limit) {
+ 		duprintf("Bad offset %p\n", e);
+ 		return -EINVAL;
+ 	}
+@@ -590,6 +589,14 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
+ 		return -EINVAL;
+ 	}
+ 
++	if (!arp_checkentry(&e->arp))
++		return -EINVAL;
++
++	err = xt_check_entry_offsets(e, e->elems, e->target_offset,
++				     e->next_offset);
++	if (err)
++		return err;
++
+ 	/* Check hooks & underflows */
+ 	for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
+ 		if (!(valid_hooks & (1 << h)))
+@@ -598,9 +605,9 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
+ 			newinfo->hook_entry[h] = hook_entries[h];
+ 		if ((unsigned char *)e - base == underflows[h]) {
+ 			if (!check_underflow(e)) {
+-				pr_err("Underflows must be unconditional and "
+-				       "use the STANDARD target with "
+-				       "ACCEPT/DROP\n");
++				pr_debug("Underflows must be unconditional and "
++					 "use the STANDARD target with "
++					 "ACCEPT/DROP\n");
+ 				return -EINVAL;
+ 			}
+ 			newinfo->underflow[h] = underflows[h];
+@@ -691,10 +698,8 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
+ 		}
+ 	}
+ 
+-	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) {
+-		duprintf("Looping hook\n");
++	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
+ 		return -ELOOP;
+-	}
+ 
+ 	/* Finally, each sanity check must pass */
+ 	i = 0;
+@@ -1125,55 +1130,17 @@ static int do_add_counters(struct net *net, const void __user *user,
+ 	unsigned int i;
+ 	struct xt_counters_info tmp;
+ 	struct xt_counters *paddc;
+-	unsigned int num_counters;
+-	const char *name;
+-	int size;
+-	void *ptmp;
+ 	struct xt_table *t;
+ 	const struct xt_table_info *private;
+ 	int ret = 0;
+ 	struct arpt_entry *iter;
+ 	unsigned int addend;
+-#ifdef CONFIG_COMPAT
+-	struct compat_xt_counters_info compat_tmp;
+-
+-	if (compat) {
+-		ptmp = &compat_tmp;
+-		size = sizeof(struct compat_xt_counters_info);
+-	} else
+-#endif
+-	{
+-		ptmp = &tmp;
+-		size = sizeof(struct xt_counters_info);
+-	}
+ 
+-	if (copy_from_user(ptmp, user, size) != 0)
+-		return -EFAULT;
+-
+-#ifdef CONFIG_COMPAT
+-	if (compat) {
+-		num_counters = compat_tmp.num_counters;
+-		name = compat_tmp.name;
+-	} else
+-#endif
+-	{
+-		num_counters = tmp.num_counters;
+-		name = tmp.name;
+-	}
++	paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
++	if (IS_ERR(paddc))
++		return PTR_ERR(paddc);
+ 
+-	if (len != size + num_counters * sizeof(struct xt_counters))
+-		return -EINVAL;
+-
+-	paddc = vmalloc(len - size);
+-	if (!paddc)
+-		return -ENOMEM;
+-
+-	if (copy_from_user(paddc, user + size, len - size) != 0) {
+-		ret = -EFAULT;
+-		goto free;
+-	}
+-
+-	t = xt_find_table_lock(net, NFPROTO_ARP, name);
++	t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name);
+ 	if (IS_ERR_OR_NULL(t)) {
+ 		ret = t ? PTR_ERR(t) : -ENOENT;
+ 		goto free;
+@@ -1181,7 +1148,7 @@ static int do_add_counters(struct net *net, const void __user *user,
+ 
+ 	local_bh_disable();
+ 	private = t->private;
+-	if (private->number != num_counters) {
++	if (private->number != tmp.num_counters) {
+ 		ret = -EINVAL;
+ 		goto unlock_up_free;
+ 	}
+@@ -1208,6 +1175,18 @@ static int do_add_counters(struct net *net, const void __user *user,
+ }
+ 
+ #ifdef CONFIG_COMPAT
++struct compat_arpt_replace {
++	char				name[XT_TABLE_MAXNAMELEN];
++	u32				valid_hooks;
++	u32				num_entries;
++	u32				size;
++	u32				hook_entry[NF_ARP_NUMHOOKS];
++	u32				underflow[NF_ARP_NUMHOOKS];
++	u32				num_counters;
++	compat_uptr_t			counters;
++	struct compat_arpt_entry	entries[0];
++};
++
+ static inline void compat_release_entry(struct compat_arpt_entry *e)
+ {
+ 	struct xt_entry_target *t;
+@@ -1216,24 +1195,22 @@ static inline void compat_release_entry(struct compat_arpt_entry *e)
+ 	module_put(t->u.kernel.target->me);
+ }
+ 
+-static inline int
++static int
+ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
+ 				  struct xt_table_info *newinfo,
+ 				  unsigned int *size,
+ 				  const unsigned char *base,
+-				  const unsigned char *limit,
+-				  const unsigned int *hook_entries,
+-				  const unsigned int *underflows,
+-				  const char *name)
++				  const unsigned char *limit)
+ {
+ 	struct xt_entry_target *t;
+ 	struct xt_target *target;
+ 	unsigned int entry_offset;
+-	int ret, off, h;
++	int ret, off;
+ 
+ 	duprintf("check_compat_entry_size_and_hooks %p\n", e);
+ 	if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
+-	    (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) {
++	    (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit ||
++	    (unsigned char *)e + e->next_offset > limit) {
+ 		duprintf("Bad offset %p, limit = %p\n", e, limit);
+ 		return -EINVAL;
+ 	}
+@@ -1245,8 +1222,11 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
+ 		return -EINVAL;
+ 	}
+ 
+-	/* For purposes of check_entry casting the compat entry is fine */
+-	ret = check_entry((struct arpt_entry *)e, name);
++	if (!arp_checkentry(&e->arp))
++		return -EINVAL;
++
++	ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset,
++					    e->next_offset);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1270,17 +1250,6 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
+ 	if (ret)
+ 		goto release_target;
+ 
+-	/* Check hooks & underflows */
+-	for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
+-		if ((unsigned char *)e - base == hook_entries[h])
+-			newinfo->hook_entry[h] = hook_entries[h];
+-		if ((unsigned char *)e - base == underflows[h])
+-			newinfo->underflow[h] = underflows[h];
+-	}
+-
+-	/* Clear counters and comefrom */
+-	memset(&e->counters, 0, sizeof(e->counters));
+-	e->comefrom = 0;
+ 	return 0;
+ 
+ release_target:
+@@ -1289,18 +1258,17 @@ out:
+ 	return ret;
+ }
+ 
+-static int
++static void
+ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
+-			    unsigned int *size, const char *name,
++			    unsigned int *size,
+ 			    struct xt_table_info *newinfo, unsigned char *base)
+ {
+ 	struct xt_entry_target *t;
+ 	struct xt_target *target;
+ 	struct arpt_entry *de;
+ 	unsigned int origsize;
+-	int ret, h;
++	int h;
+ 
+-	ret = 0;
+ 	origsize = *size;
+ 	de = (struct arpt_entry *)*dstptr;
+ 	memcpy(de, e, sizeof(struct arpt_entry));
+@@ -1321,148 +1289,82 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
+ 		if ((unsigned char *)de - base < newinfo->underflow[h])
+ 			newinfo->underflow[h] -= origsize - *size;
+ 	}
+-	return ret;
+ }
+ 
+-static int translate_compat_table(const char *name,
+-				  unsigned int valid_hooks,
+-				  struct xt_table_info **pinfo,
++static int translate_compat_table(struct xt_table_info **pinfo,
+ 				  void **pentry0,
+-				  unsigned int total_size,
+-				  unsigned int number,
+-				  unsigned int *hook_entries,
+-				  unsigned int *underflows)
++				  const struct compat_arpt_replace *compatr)
+ {
+ 	unsigned int i, j;
+ 	struct xt_table_info *newinfo, *info;
+ 	void *pos, *entry0, *entry1;
+ 	struct compat_arpt_entry *iter0;
+-	struct arpt_entry *iter1;
++	struct arpt_replace repl;
+ 	unsigned int size;
+ 	int ret = 0;
+ 
+ 	info = *pinfo;
+ 	entry0 = *pentry0;
+-	size = total_size;
+-	info->number = number;
+-
+-	/* Init all hooks to impossible value. */
+-	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
+-		info->hook_entry[i] = 0xFFFFFFFF;
+-		info->underflow[i] = 0xFFFFFFFF;
+-	}
++	size = compatr->size;
++	info->number = compatr->num_entries;
+ 
+ 	duprintf("translate_compat_table: size %u\n", info->size);
+ 	j = 0;
+ 	xt_compat_lock(NFPROTO_ARP);
+-	xt_compat_init_offsets(NFPROTO_ARP, number);
++	xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
+ 	/* Walk through entries, checking offsets. */
+-	xt_entry_foreach(iter0, entry0, total_size) {
++	xt_entry_foreach(iter0, entry0, compatr->size) {
+ 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
+ 							entry0,
+-							entry0 + total_size,
+-							hook_entries,
+-							underflows,
+-							name);
++							entry0 + compatr->size);
+ 		if (ret != 0)
+ 			goto out_unlock;
+ 		++j;
+ 	}
+ 
+ 	ret = -EINVAL;
+-	if (j != number) {
++	if (j != compatr->num_entries) {
+ 		duprintf("translate_compat_table: %u not %u entries\n",
+-			 j, number);
++			 j, compatr->num_entries);
+ 		goto out_unlock;
+ 	}
+ 
+-	/* Check hooks all assigned */
+-	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
+-		/* Only hooks which are valid */
+-		if (!(valid_hooks & (1 << i)))
+-			continue;
+-		if (info->hook_entry[i] == 0xFFFFFFFF) {
+-			duprintf("Invalid hook entry %u %u\n",
+-				 i, hook_entries[i]);
+-			goto out_unlock;
+-		}
+-		if (info->underflow[i] == 0xFFFFFFFF) {
+-			duprintf("Invalid underflow %u %u\n",
+-				 i, underflows[i]);
+-			goto out_unlock;
+-		}
+-	}
+-
+ 	ret = -ENOMEM;
+ 	newinfo = xt_alloc_table_info(size);
+ 	if (!newinfo)
+ 		goto out_unlock;
+ 
+-	newinfo->number = number;
++	newinfo->number = compatr->num_entries;
+ 	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
+ 		newinfo->hook_entry[i] = info->hook_entry[i];
+ 		newinfo->underflow[i] = info->underflow[i];
+ 	}
+ 	entry1 = newinfo->entries;
+ 	pos = entry1;
+-	size = total_size;
+-	xt_entry_foreach(iter0, entry0, total_size) {
+-		ret = compat_copy_entry_from_user(iter0, &pos, &size,
+-						  name, newinfo, entry1);
+-		if (ret != 0)
+-			break;
+-	}
++	size = compatr->size;
++	xt_entry_foreach(iter0, entry0, compatr->size)
++		compat_copy_entry_from_user(iter0, &pos, &size,
++					    newinfo, entry1);
++
++	/* all module references in entry0 are now gone */
++
+ 	xt_compat_flush_offsets(NFPROTO_ARP);
+ 	xt_compat_unlock(NFPROTO_ARP);
+-	if (ret)
+-		goto free_newinfo;
+ 
+-	ret = -ELOOP;
+-	if (!mark_source_chains(newinfo, valid_hooks, entry1))
+-		goto free_newinfo;
++	memcpy(&repl, compatr, sizeof(*compatr));
+ 
+-	i = 0;
+-	xt_entry_foreach(iter1, entry1, newinfo->size) {
+-		iter1->counters.pcnt = xt_percpu_counter_alloc();
+-		if (IS_ERR_VALUE(iter1->counters.pcnt)) {
+-			ret = -ENOMEM;
+-			break;
+-		}
+-
+-		ret = check_target(iter1, name);
+-		if (ret != 0) {
+-			xt_percpu_counter_free(iter1->counters.pcnt);
+-			break;
+-		}
+-		++i;
+-		if (strcmp(arpt_get_target(iter1)->u.user.name,
+-		    XT_ERROR_TARGET) == 0)
+-			++newinfo->stacksize;
+-	}
+-	if (ret) {
+-		/*
+-		 * The first i matches need cleanup_entry (calls ->destroy)
+-		 * because they had called ->check already. The other j-i
+-		 * entries need only release.
+-		 */
+-		int skip = i;
+-		j -= i;
+-		xt_entry_foreach(iter0, entry0, newinfo->size) {
+-			if (skip-- > 0)
+-				continue;
+-			if (j-- == 0)
+-				break;
+-			compat_release_entry(iter0);
+-		}
+-		xt_entry_foreach(iter1, entry1, newinfo->size) {
+-			if (i-- == 0)
+-				break;
+-			cleanup_entry(iter1);
+-		}
+-		xt_free_table_info(newinfo);
+-		return ret;
++	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
++		repl.hook_entry[i] = newinfo->hook_entry[i];
++		repl.underflow[i] = newinfo->underflow[i];
+ 	}
+ 
++	repl.num_counters = 0;
++	repl.counters = NULL;
++	repl.size = newinfo->size;
++	ret = translate_table(newinfo, entry1, &repl);
++	if (ret)
++		goto free_newinfo;
++
+ 	*pinfo = newinfo;
+ 	*pentry0 = entry1;
+ 	xt_free_table_info(info);
+@@ -1470,31 +1372,18 @@ static int translate_compat_table(const char *name,
+ 
+ free_newinfo:
+ 	xt_free_table_info(newinfo);
+-out:
+-	xt_entry_foreach(iter0, entry0, total_size) {
++	return ret;
++out_unlock:
++	xt_compat_flush_offsets(NFPROTO_ARP);
++	xt_compat_unlock(NFPROTO_ARP);
++	xt_entry_foreach(iter0, entry0, compatr->size) {
+ 		if (j-- == 0)
+ 			break;
+ 		compat_release_entry(iter0);
+ 	}
+ 	return ret;
+-out_unlock:
+-	xt_compat_flush_offsets(NFPROTO_ARP);
+-	xt_compat_unlock(NFPROTO_ARP);
+-	goto out;
+ }
+ 
+-struct compat_arpt_replace {
+-	char				name[XT_TABLE_MAXNAMELEN];
+-	u32				valid_hooks;
+-	u32				num_entries;
+-	u32				size;
+-	u32				hook_entry[NF_ARP_NUMHOOKS];
+-	u32				underflow[NF_ARP_NUMHOOKS];
+-	u32				num_counters;
+-	compat_uptr_t			counters;
+-	struct compat_arpt_entry	entries[0];
+-};
+-
+ static int compat_do_replace(struct net *net, void __user *user,
+ 			     unsigned int len)
+ {
+@@ -1527,10 +1416,7 @@ static int compat_do_replace(struct net *net, void __user *user,
+ 		goto free_newinfo;
+ 	}
+ 
+-	ret = translate_compat_table(tmp.name, tmp.valid_hooks,
+-				     &newinfo, &loc_cpu_entry, tmp.size,
+-				     tmp.num_entries, tmp.hook_entry,
+-				     tmp.underflow);
++	ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp);
+ 	if (ret != 0)
+ 		goto free_newinfo;
+ 
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index b99affad6ba1..a399c5419622 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -168,11 +168,12 @@ get_entry(const void *base, unsigned int offset)
+ 
+ /* All zeroes == unconditional rule. */
+ /* Mildly perf critical (only if packet tracing is on) */
+-static inline bool unconditional(const struct ipt_ip *ip)
++static inline bool unconditional(const struct ipt_entry *e)
+ {
+ 	static const struct ipt_ip uncond;
+ 
+-	return memcmp(ip, &uncond, sizeof(uncond)) == 0;
++	return e->target_offset == sizeof(struct ipt_entry) &&
++	       memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
+ #undef FWINV
+ }
+ 
+@@ -229,11 +230,10 @@ get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
+ 	} else if (s == e) {
+ 		(*rulenum)++;
+ 
+-		if (s->target_offset == sizeof(struct ipt_entry) &&
++		if (unconditional(s) &&
+ 		    strcmp(t->target.u.kernel.target->name,
+ 			   XT_STANDARD_TARGET) == 0 &&
+-		   t->verdict < 0 &&
+-		   unconditional(&s->ip)) {
++		   t->verdict < 0) {
+ 			/* Tail of chains: STANDARD target (return/policy) */
+ 			*comment = *chainname == hookname
+ 				? comments[NF_IP_TRACE_COMMENT_POLICY]
+@@ -443,6 +443,18 @@ ipt_do_table(struct sk_buff *skb,
+ #endif
+ }
+ 
++static bool find_jump_target(const struct xt_table_info *t,
++			     const struct ipt_entry *target)
++{
++	struct ipt_entry *iter;
++
++	xt_entry_foreach(iter, t->entries, t->size) {
++		 if (iter == target)
++			return true;
++	}
++	return false;
++}
++
+ /* Figures out from what hook each rule can be called: returns 0 if
+    there are loops.  Puts hook bitmask in comefrom. */
+ static int
+@@ -476,11 +488,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
+ 			e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
+ 
+ 			/* Unconditional return/END. */
+-			if ((e->target_offset == sizeof(struct ipt_entry) &&
++			if ((unconditional(e) &&
+ 			     (strcmp(t->target.u.user.name,
+ 				     XT_STANDARD_TARGET) == 0) &&
+-			     t->verdict < 0 && unconditional(&e->ip)) ||
+-			    visited) {
++			     t->verdict < 0) || visited) {
+ 				unsigned int oldpos, size;
+ 
+ 				if ((strcmp(t->target.u.user.name,
+@@ -521,6 +532,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
+ 				size = e->next_offset;
+ 				e = (struct ipt_entry *)
+ 					(entry0 + pos + size);
++				if (pos + size >= newinfo->size)
++					return 0;
+ 				e->counters.pcnt = pos;
+ 				pos += size;
+ 			} else {
+@@ -539,9 +552,15 @@ mark_source_chains(const struct xt_table_info *newinfo,
+ 					/* This a jump; chase it. */
+ 					duprintf("Jump rule %u -> %u\n",
+ 						 pos, newpos);
++					e = (struct ipt_entry *)
++						(entry0 + newpos);
++					if (!find_jump_target(newinfo, e))
++						return 0;
+ 				} else {
+ 					/* ... this is a fallthru */
+ 					newpos = pos + e->next_offset;
++					if (newpos >= newinfo->size)
++						return 0;
+ 				}
+ 				e = (struct ipt_entry *)
+ 					(entry0 + newpos);
+@@ -569,27 +588,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
+ }
+ 
+ static int
+-check_entry(const struct ipt_entry *e, const char *name)
+-{
+-	const struct xt_entry_target *t;
+-
+-	if (!ip_checkentry(&e->ip)) {
+-		duprintf("ip check failed %p %s.\n", e, name);
+-		return -EINVAL;
+-	}
+-
+-	if (e->target_offset + sizeof(struct xt_entry_target) >
+-	    e->next_offset)
+-		return -EINVAL;
+-
+-	t = ipt_get_target_c(e);
+-	if (e->target_offset + t->u.target_size > e->next_offset)
+-		return -EINVAL;
+-
+-	return 0;
+-}
+-
+-static int
+ check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
+ {
+ 	const struct ipt_ip *ip = par->entryinfo;
+@@ -666,10 +664,6 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
+ 	struct xt_mtchk_param mtpar;
+ 	struct xt_entry_match *ematch;
+ 
+-	ret = check_entry(e, name);
+-	if (ret)
+-		return ret;
+-
+ 	e->counters.pcnt = xt_percpu_counter_alloc();
+ 	if (IS_ERR_VALUE(e->counters.pcnt))
+ 		return -ENOMEM;
+@@ -721,7 +715,7 @@ static bool check_underflow(const struct ipt_entry *e)
+ 	const struct xt_entry_target *t;
+ 	unsigned int verdict;
+ 
+-	if (!unconditional(&e->ip))
++	if (!unconditional(e))
+ 		return false;
+ 	t = ipt_get_target_c(e);
+ 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
+@@ -741,9 +735,11 @@ check_entry_size_and_hooks(struct ipt_entry *e,
+ 			   unsigned int valid_hooks)
+ {
+ 	unsigned int h;
++	int err;
+ 
+ 	if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
+-	    (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
++	    (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
++	    (unsigned char *)e + e->next_offset > limit) {
+ 		duprintf("Bad offset %p\n", e);
+ 		return -EINVAL;
+ 	}
+@@ -755,6 +751,14 @@ check_entry_size_and_hooks(struct ipt_entry *e,
+ 		return -EINVAL;
+ 	}
+ 
++	if (!ip_checkentry(&e->ip))
++		return -EINVAL;
++
++	err = xt_check_entry_offsets(e, e->elems, e->target_offset,
++				     e->next_offset);
++	if (err)
++		return err;
++
+ 	/* Check hooks & underflows */
+ 	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
+ 		if (!(valid_hooks & (1 << h)))
+@@ -763,9 +767,9 @@ check_entry_size_and_hooks(struct ipt_entry *e,
+ 			newinfo->hook_entry[h] = hook_entries[h];
+ 		if ((unsigned char *)e - base == underflows[h]) {
+ 			if (!check_underflow(e)) {
+-				pr_err("Underflows must be unconditional and "
+-				       "use the STANDARD target with "
+-				       "ACCEPT/DROP\n");
++				pr_debug("Underflows must be unconditional and "
++					 "use the STANDARD target with "
++					 "ACCEPT/DROP\n");
+ 				return -EINVAL;
+ 			}
+ 			newinfo->underflow[h] = underflows[h];
+@@ -1309,55 +1313,17 @@ do_add_counters(struct net *net, const void __user *user,
+ 	unsigned int i;
+ 	struct xt_counters_info tmp;
+ 	struct xt_counters *paddc;
+-	unsigned int num_counters;
+-	const char *name;
+-	int size;
+-	void *ptmp;
+ 	struct xt_table *t;
+ 	const struct xt_table_info *private;
+ 	int ret = 0;
+ 	struct ipt_entry *iter;
+ 	unsigned int addend;
+-#ifdef CONFIG_COMPAT
+-	struct compat_xt_counters_info compat_tmp;
+-
+-	if (compat) {
+-		ptmp = &compat_tmp;
+-		size = sizeof(struct compat_xt_counters_info);
+-	} else
+-#endif
+-	{
+-		ptmp = &tmp;
+-		size = sizeof(struct xt_counters_info);
+-	}
+-
+-	if (copy_from_user(ptmp, user, size) != 0)
+-		return -EFAULT;
+-
+-#ifdef CONFIG_COMPAT
+-	if (compat) {
+-		num_counters = compat_tmp.num_counters;
+-		name = compat_tmp.name;
+-	} else
+-#endif
+-	{
+-		num_counters = tmp.num_counters;
+-		name = tmp.name;
+-	}
+ 
+-	if (len != size + num_counters * sizeof(struct xt_counters))
+-		return -EINVAL;
+-
+-	paddc = vmalloc(len - size);
+-	if (!paddc)
+-		return -ENOMEM;
++	paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
++	if (IS_ERR(paddc))
++		return PTR_ERR(paddc);
+ 
+-	if (copy_from_user(paddc, user + size, len - size) != 0) {
+-		ret = -EFAULT;
+-		goto free;
+-	}
+-
+-	t = xt_find_table_lock(net, AF_INET, name);
++	t = xt_find_table_lock(net, AF_INET, tmp.name);
+ 	if (IS_ERR_OR_NULL(t)) {
+ 		ret = t ? PTR_ERR(t) : -ENOENT;
+ 		goto free;
+@@ -1365,7 +1331,7 @@ do_add_counters(struct net *net, const void __user *user,
+ 
+ 	local_bh_disable();
+ 	private = t->private;
+-	if (private->number != num_counters) {
++	if (private->number != tmp.num_counters) {
+ 		ret = -EINVAL;
+ 		goto unlock_up_free;
+ 	}
+@@ -1444,7 +1410,6 @@ compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
+ 
+ static int
+ compat_find_calc_match(struct xt_entry_match *m,
+-		       const char *name,
+ 		       const struct ipt_ip *ip,
+ 		       int *size)
+ {
+@@ -1479,21 +1444,19 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
+ 				  struct xt_table_info *newinfo,
+ 				  unsigned int *size,
+ 				  const unsigned char *base,
+-				  const unsigned char *limit,
+-				  const unsigned int *hook_entries,
+-				  const unsigned int *underflows,
+-				  const char *name)
++				  const unsigned char *limit)
+ {
+ 	struct xt_entry_match *ematch;
+ 	struct xt_entry_target *t;
+ 	struct xt_target *target;
+ 	unsigned int entry_offset;
+ 	unsigned int j;
+-	int ret, off, h;
++	int ret, off;
+ 
+ 	duprintf("check_compat_entry_size_and_hooks %p\n", e);
+ 	if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
+-	    (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
++	    (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
++	    (unsigned char *)e + e->next_offset > limit) {
+ 		duprintf("Bad offset %p, limit = %p\n", e, limit);
+ 		return -EINVAL;
+ 	}
+@@ -1505,8 +1468,11 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
+ 		return -EINVAL;
+ 	}
+ 
+-	/* For purposes of check_entry casting the compat entry is fine */
+-	ret = check_entry((struct ipt_entry *)e, name);
++	if (!ip_checkentry(&e->ip))
++		return -EINVAL;
++
++	ret = xt_compat_check_entry_offsets(e, e->elems,
++					    e->target_offset, e->next_offset);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1514,7 +1480,7 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
+ 	entry_offset = (void *)e - (void *)base;
+ 	j = 0;
+ 	xt_ematch_foreach(ematch, e) {
+-		ret = compat_find_calc_match(ematch, name, &e->ip, &off);
++		ret = compat_find_calc_match(ematch, &e->ip, &off);
+ 		if (ret != 0)
+ 			goto release_matches;
+ 		++j;
+@@ -1537,17 +1503,6 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
+ 	if (ret)
+ 		goto out;
+ 
+-	/* Check hooks & underflows */
+-	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
+-		if ((unsigned char *)e - base == hook_entries[h])
+-			newinfo->hook_entry[h] = hook_entries[h];
+-		if ((unsigned char *)e - base == underflows[h])
+-			newinfo->underflow[h] = underflows[h];
+-	}
+-
+-	/* Clear counters and comefrom */
+-	memset(&e->counters, 0, sizeof(e->counters));
+-	e->comefrom = 0;
+ 	return 0;
+ 
+ out:
+@@ -1561,19 +1516,18 @@ release_matches:
+ 	return ret;
+ }
+ 
+-static int
++static void
+ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
+-			    unsigned int *size, const char *name,
++			    unsigned int *size,
+ 			    struct xt_table_info *newinfo, unsigned char *base)
+ {
+ 	struct xt_entry_target *t;
+ 	struct xt_target *target;
+ 	struct ipt_entry *de;
+ 	unsigned int origsize;
+-	int ret, h;
++	int h;
+ 	struct xt_entry_match *ematch;
+ 
+-	ret = 0;
+ 	origsize = *size;
+ 	de = (struct ipt_entry *)*dstptr;
+ 	memcpy(de, e, sizeof(struct ipt_entry));
+@@ -1582,201 +1536,105 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
+ 	*dstptr += sizeof(struct ipt_entry);
+ 	*size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
+ 
+-	xt_ematch_foreach(ematch, e) {
+-		ret = xt_compat_match_from_user(ematch, dstptr, size);
+-		if (ret != 0)
+-			return ret;
+-	}
++	xt_ematch_foreach(ematch, e)
++		xt_compat_match_from_user(ematch, dstptr, size);
++
+ 	de->target_offset = e->target_offset - (origsize - *size);
+ 	t = compat_ipt_get_target(e);
+ 	target = t->u.kernel.target;
+ 	xt_compat_target_from_user(t, dstptr, size);
+ 
+ 	de->next_offset = e->next_offset - (origsize - *size);
++
+ 	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
+ 		if ((unsigned char *)de - base < newinfo->hook_entry[h])
+ 			newinfo->hook_entry[h] -= origsize - *size;
+ 		if ((unsigned char *)de - base < newinfo->underflow[h])
+ 			newinfo->underflow[h] -= origsize - *size;
+ 	}
+-	return ret;
+-}
+-
+-static int
+-compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
+-{
+-	struct xt_entry_match *ematch;
+-	struct xt_mtchk_param mtpar;
+-	unsigned int j;
+-	int ret = 0;
+-
+-	e->counters.pcnt = xt_percpu_counter_alloc();
+-	if (IS_ERR_VALUE(e->counters.pcnt))
+-		return -ENOMEM;
+-
+-	j = 0;
+-	mtpar.net	= net;
+-	mtpar.table     = name;
+-	mtpar.entryinfo = &e->ip;
+-	mtpar.hook_mask = e->comefrom;
+-	mtpar.family    = NFPROTO_IPV4;
+-	xt_ematch_foreach(ematch, e) {
+-		ret = check_match(ematch, &mtpar);
+-		if (ret != 0)
+-			goto cleanup_matches;
+-		++j;
+-	}
+-
+-	ret = check_target(e, net, name);
+-	if (ret)
+-		goto cleanup_matches;
+-	return 0;
+-
+- cleanup_matches:
+-	xt_ematch_foreach(ematch, e) {
+-		if (j-- == 0)
+-			break;
+-		cleanup_match(ematch, net);
+-	}
+-
+-	xt_percpu_counter_free(e->counters.pcnt);
+-
+-	return ret;
+ }
+ 
+ static int
+ translate_compat_table(struct net *net,
+-		       const char *name,
+-		       unsigned int valid_hooks,
+ 		       struct xt_table_info **pinfo,
+ 		       void **pentry0,
+-		       unsigned int total_size,
+-		       unsigned int number,
+-		       unsigned int *hook_entries,
+-		       unsigned int *underflows)
++		       const struct compat_ipt_replace *compatr)
+ {
+ 	unsigned int i, j;
+ 	struct xt_table_info *newinfo, *info;
+ 	void *pos, *entry0, *entry1;
+ 	struct compat_ipt_entry *iter0;
+-	struct ipt_entry *iter1;
++	struct ipt_replace repl;
+ 	unsigned int size;
+ 	int ret;
+ 
+ 	info = *pinfo;
+ 	entry0 = *pentry0;
+-	size = total_size;
+-	info->number = number;
+-
+-	/* Init all hooks to impossible value. */
+-	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+-		info->hook_entry[i] = 0xFFFFFFFF;
+-		info->underflow[i] = 0xFFFFFFFF;
+-	}
++	size = compatr->size;
++	info->number = compatr->num_entries;
+ 
+ 	duprintf("translate_compat_table: size %u\n", info->size);
+ 	j = 0;
+ 	xt_compat_lock(AF_INET);
+-	xt_compat_init_offsets(AF_INET, number);
++	xt_compat_init_offsets(AF_INET, compatr->num_entries);
+ 	/* Walk through entries, checking offsets. */
+-	xt_entry_foreach(iter0, entry0, total_size) {
++	xt_entry_foreach(iter0, entry0, compatr->size) {
+ 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
+ 							entry0,
+-							entry0 + total_size,
+-							hook_entries,
+-							underflows,
+-							name);
++							entry0 + compatr->size);
+ 		if (ret != 0)
+ 			goto out_unlock;
+ 		++j;
+ 	}
+ 
+ 	ret = -EINVAL;
+-	if (j != number) {
++	if (j != compatr->num_entries) {
+ 		duprintf("translate_compat_table: %u not %u entries\n",
+-			 j, number);
++			 j, compatr->num_entries);
+ 		goto out_unlock;
+ 	}
+ 
+-	/* Check hooks all assigned */
+-	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+-		/* Only hooks which are valid */
+-		if (!(valid_hooks & (1 << i)))
+-			continue;
+-		if (info->hook_entry[i] == 0xFFFFFFFF) {
+-			duprintf("Invalid hook entry %u %u\n",
+-				 i, hook_entries[i]);
+-			goto out_unlock;
+-		}
+-		if (info->underflow[i] == 0xFFFFFFFF) {
+-			duprintf("Invalid underflow %u %u\n",
+-				 i, underflows[i]);
+-			goto out_unlock;
+-		}
+-	}
+-
+ 	ret = -ENOMEM;
+ 	newinfo = xt_alloc_table_info(size);
+ 	if (!newinfo)
+ 		goto out_unlock;
+ 
+-	newinfo->number = number;
++	newinfo->number = compatr->num_entries;
+ 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+-		newinfo->hook_entry[i] = info->hook_entry[i];
+-		newinfo->underflow[i] = info->underflow[i];
++		newinfo->hook_entry[i] = compatr->hook_entry[i];
++		newinfo->underflow[i] = compatr->underflow[i];
+ 	}
+ 	entry1 = newinfo->entries;
+ 	pos = entry1;
+-	size = total_size;
+-	xt_entry_foreach(iter0, entry0, total_size) {
+-		ret = compat_copy_entry_from_user(iter0, &pos, &size,
+-						  name, newinfo, entry1);
+-		if (ret != 0)
+-			break;
+-	}
++	size = compatr->size;
++	xt_entry_foreach(iter0, entry0, compatr->size)
++		compat_copy_entry_from_user(iter0, &pos, &size,
++					    newinfo, entry1);
++
++	/* all module references in entry0 are now gone.
++	 * entry1/newinfo contains a 64bit ruleset that looks exactly as
++	 * generated by 64bit userspace.
++	 *
++	 * Call standard translate_table() to validate all hook_entrys,
++	 * underflows, check for loops, etc.
++	 */
+ 	xt_compat_flush_offsets(AF_INET);
+ 	xt_compat_unlock(AF_INET);
+-	if (ret)
+-		goto free_newinfo;
+ 
+-	ret = -ELOOP;
+-	if (!mark_source_chains(newinfo, valid_hooks, entry1))
+-		goto free_newinfo;
++	memcpy(&repl, compatr, sizeof(*compatr));
+ 
+-	i = 0;
+-	xt_entry_foreach(iter1, entry1, newinfo->size) {
+-		ret = compat_check_entry(iter1, net, name);
+-		if (ret != 0)
+-			break;
+-		++i;
+-		if (strcmp(ipt_get_target(iter1)->u.user.name,
+-		    XT_ERROR_TARGET) == 0)
+-			++newinfo->stacksize;
+-	}
+-	if (ret) {
+-		/*
+-		 * The first i matches need cleanup_entry (calls ->destroy)
+-		 * because they had called ->check already. The other j-i
+-		 * entries need only release.
+-		 */
+-		int skip = i;
+-		j -= i;
+-		xt_entry_foreach(iter0, entry0, newinfo->size) {
+-			if (skip-- > 0)
+-				continue;
+-			if (j-- == 0)
+-				break;
+-			compat_release_entry(iter0);
+-		}
+-		xt_entry_foreach(iter1, entry1, newinfo->size) {
+-			if (i-- == 0)
+-				break;
+-			cleanup_entry(iter1, net);
+-		}
+-		xt_free_table_info(newinfo);
+-		return ret;
++	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
++		repl.hook_entry[i] = newinfo->hook_entry[i];
++		repl.underflow[i] = newinfo->underflow[i];
+ 	}
+ 
++	repl.num_counters = 0;
++	repl.counters = NULL;
++	repl.size = newinfo->size;
++	ret = translate_table(net, newinfo, entry1, &repl);
++	if (ret)
++		goto free_newinfo;
++
+ 	*pinfo = newinfo;
+ 	*pentry0 = entry1;
+ 	xt_free_table_info(info);
+@@ -1784,17 +1642,16 @@ translate_compat_table(struct net *net,
+ 
+ free_newinfo:
+ 	xt_free_table_info(newinfo);
+-out:
+-	xt_entry_foreach(iter0, entry0, total_size) {
++	return ret;
++out_unlock:
++	xt_compat_flush_offsets(AF_INET);
++	xt_compat_unlock(AF_INET);
++	xt_entry_foreach(iter0, entry0, compatr->size) {
+ 		if (j-- == 0)
+ 			break;
+ 		compat_release_entry(iter0);
+ 	}
+ 	return ret;
+-out_unlock:
+-	xt_compat_flush_offsets(AF_INET);
+-	xt_compat_unlock(AF_INET);
+-	goto out;
+ }
+ 
+ static int
+@@ -1830,10 +1687,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
+ 		goto free_newinfo;
+ 	}
+ 
+-	ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
+-				     &newinfo, &loc_cpu_entry, tmp.size,
+-				     tmp.num_entries, tmp.hook_entry,
+-				     tmp.underflow);
++	ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
+ 	if (ret != 0)
+ 		goto free_newinfo;
+ 
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 21fbb54f11d0..44e1632370dd 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1531,7 +1531,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ 
+ 		/* if we're overly short, let UDP handle it */
+ 		encap_rcv = ACCESS_ONCE(up->encap_rcv);
+-		if (skb->len > sizeof(struct udphdr) && encap_rcv) {
++		if (encap_rcv) {
+ 			int ret;
+ 
+ 			/* Verify checksum before giving to encap */
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index a175152d3e46..58900c21e4e4 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1072,17 +1072,12 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
+ 					 const struct in6_addr *final_dst)
+ {
+ 	struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
+-	int err;
+ 
+ 	dst = ip6_sk_dst_check(sk, dst, fl6);
++	if (!dst)
++		dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
+ 
+-	err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
+-	if (err)
+-		return ERR_PTR(err);
+-	if (final_dst)
+-		fl6->daddr = *final_dst;
+-
+-	return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
++	return dst;
+ }
+ EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
+ 
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index 99425cf2819b..22f39e00bef3 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -198,11 +198,12 @@ get_entry(const void *base, unsigned int offset)
+ 
+ /* All zeroes == unconditional rule. */
+ /* Mildly perf critical (only if packet tracing is on) */
+-static inline bool unconditional(const struct ip6t_ip6 *ipv6)
++static inline bool unconditional(const struct ip6t_entry *e)
+ {
+ 	static const struct ip6t_ip6 uncond;
+ 
+-	return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
++	return e->target_offset == sizeof(struct ip6t_entry) &&
++	       memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
+ }
+ 
+ static inline const struct xt_entry_target *
+@@ -258,11 +259,10 @@ get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
+ 	} else if (s == e) {
+ 		(*rulenum)++;
+ 
+-		if (s->target_offset == sizeof(struct ip6t_entry) &&
++		if (unconditional(s) &&
+ 		    strcmp(t->target.u.kernel.target->name,
+ 			   XT_STANDARD_TARGET) == 0 &&
+-		    t->verdict < 0 &&
+-		    unconditional(&s->ipv6)) {
++		    t->verdict < 0) {
+ 			/* Tail of chains: STANDARD target (return/policy) */
+ 			*comment = *chainname == hookname
+ 				? comments[NF_IP6_TRACE_COMMENT_POLICY]
+@@ -455,6 +455,18 @@ ip6t_do_table(struct sk_buff *skb,
+ #endif
+ }
+ 
++static bool find_jump_target(const struct xt_table_info *t,
++			     const struct ip6t_entry *target)
++{
++	struct ip6t_entry *iter;
++
++	xt_entry_foreach(iter, t->entries, t->size) {
++		 if (iter == target)
++			return true;
++	}
++	return false;
++}
++
+ /* Figures out from what hook each rule can be called: returns 0 if
+    there are loops.  Puts hook bitmask in comefrom. */
+ static int
+@@ -488,11 +500,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
+ 			e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
+ 
+ 			/* Unconditional return/END. */
+-			if ((e->target_offset == sizeof(struct ip6t_entry) &&
++			if ((unconditional(e) &&
+ 			     (strcmp(t->target.u.user.name,
+ 				     XT_STANDARD_TARGET) == 0) &&
+-			     t->verdict < 0 &&
+-			     unconditional(&e->ipv6)) || visited) {
++			     t->verdict < 0) || visited) {
+ 				unsigned int oldpos, size;
+ 
+ 				if ((strcmp(t->target.u.user.name,
+@@ -533,6 +544,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
+ 				size = e->next_offset;
+ 				e = (struct ip6t_entry *)
+ 					(entry0 + pos + size);
++				if (pos + size >= newinfo->size)
++					return 0;
+ 				e->counters.pcnt = pos;
+ 				pos += size;
+ 			} else {
+@@ -551,9 +564,15 @@ mark_source_chains(const struct xt_table_info *newinfo,
+ 					/* This a jump; chase it. */
+ 					duprintf("Jump rule %u -> %u\n",
+ 						 pos, newpos);
++					e = (struct ip6t_entry *)
++						(entry0 + newpos);
++					if (!find_jump_target(newinfo, e))
++						return 0;
+ 				} else {
+ 					/* ... this is a fallthru */
+ 					newpos = pos + e->next_offset;
++					if (newpos >= newinfo->size)
++						return 0;
+ 				}
+ 				e = (struct ip6t_entry *)
+ 					(entry0 + newpos);
+@@ -580,27 +599,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
+ 	module_put(par.match->me);
+ }
+ 
+-static int
+-check_entry(const struct ip6t_entry *e, const char *name)
+-{
+-	const struct xt_entry_target *t;
+-
+-	if (!ip6_checkentry(&e->ipv6)) {
+-		duprintf("ip_tables: ip check failed %p %s.\n", e, name);
+-		return -EINVAL;
+-	}
+-
+-	if (e->target_offset + sizeof(struct xt_entry_target) >
+-	    e->next_offset)
+-		return -EINVAL;
+-
+-	t = ip6t_get_target_c(e);
+-	if (e->target_offset + t->u.target_size > e->next_offset)
+-		return -EINVAL;
+-
+-	return 0;
+-}
+-
+ static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
+ {
+ 	const struct ip6t_ip6 *ipv6 = par->entryinfo;
+@@ -679,10 +677,6 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
+ 	struct xt_mtchk_param mtpar;
+ 	struct xt_entry_match *ematch;
+ 
+-	ret = check_entry(e, name);
+-	if (ret)
+-		return ret;
+-
+ 	e->counters.pcnt = xt_percpu_counter_alloc();
+ 	if (IS_ERR_VALUE(e->counters.pcnt))
+ 		return -ENOMEM;
+@@ -733,7 +727,7 @@ static bool check_underflow(const struct ip6t_entry *e)
+ 	const struct xt_entry_target *t;
+ 	unsigned int verdict;
+ 
+-	if (!unconditional(&e->ipv6))
++	if (!unconditional(e))
+ 		return false;
+ 	t = ip6t_get_target_c(e);
+ 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
+@@ -753,9 +747,11 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
+ 			   unsigned int valid_hooks)
+ {
+ 	unsigned int h;
++	int err;
+ 
+ 	if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
+-	    (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
++	    (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
++	    (unsigned char *)e + e->next_offset > limit) {
+ 		duprintf("Bad offset %p\n", e);
+ 		return -EINVAL;
+ 	}
+@@ -767,6 +763,14 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
+ 		return -EINVAL;
+ 	}
+ 
++	if (!ip6_checkentry(&e->ipv6))
++		return -EINVAL;
++
++	err = xt_check_entry_offsets(e, e->elems, e->target_offset,
++				     e->next_offset);
++	if (err)
++		return err;
++
+ 	/* Check hooks & underflows */
+ 	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
+ 		if (!(valid_hooks & (1 << h)))
+@@ -775,9 +779,9 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
+ 			newinfo->hook_entry[h] = hook_entries[h];
+ 		if ((unsigned char *)e - base == underflows[h]) {
+ 			if (!check_underflow(e)) {
+-				pr_err("Underflows must be unconditional and "
+-				       "use the STANDARD target with "
+-				       "ACCEPT/DROP\n");
++				pr_debug("Underflows must be unconditional and "
++					 "use the STANDARD target with "
++					 "ACCEPT/DROP\n");
+ 				return -EINVAL;
+ 			}
+ 			newinfo->underflow[h] = underflows[h];
+@@ -1321,55 +1325,16 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
+ 	unsigned int i;
+ 	struct xt_counters_info tmp;
+ 	struct xt_counters *paddc;
+-	unsigned int num_counters;
+-	char *name;
+-	int size;
+-	void *ptmp;
+ 	struct xt_table *t;
+ 	const struct xt_table_info *private;
+ 	int ret = 0;
+ 	struct ip6t_entry *iter;
+ 	unsigned int addend;
+-#ifdef CONFIG_COMPAT
+-	struct compat_xt_counters_info compat_tmp;
+-
+-	if (compat) {
+-		ptmp = &compat_tmp;
+-		size = sizeof(struct compat_xt_counters_info);
+-	} else
+-#endif
+-	{
+-		ptmp = &tmp;
+-		size = sizeof(struct xt_counters_info);
+-	}
+-
+-	if (copy_from_user(ptmp, user, size) != 0)
+-		return -EFAULT;
+ 
+-#ifdef CONFIG_COMPAT
+-	if (compat) {
+-		num_counters = compat_tmp.num_counters;
+-		name = compat_tmp.name;
+-	} else
+-#endif
+-	{
+-		num_counters = tmp.num_counters;
+-		name = tmp.name;
+-	}
+-
+-	if (len != size + num_counters * sizeof(struct xt_counters))
+-		return -EINVAL;
+-
+-	paddc = vmalloc(len - size);
+-	if (!paddc)
+-		return -ENOMEM;
+-
+-	if (copy_from_user(paddc, user + size, len - size) != 0) {
+-		ret = -EFAULT;
+-		goto free;
+-	}
+-
+-	t = xt_find_table_lock(net, AF_INET6, name);
++	paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
++	if (IS_ERR(paddc))
++		return PTR_ERR(paddc);
++	t = xt_find_table_lock(net, AF_INET6, tmp.name);
+ 	if (IS_ERR_OR_NULL(t)) {
+ 		ret = t ? PTR_ERR(t) : -ENOENT;
+ 		goto free;
+@@ -1377,7 +1342,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
+ 
+ 	local_bh_disable();
+ 	private = t->private;
+-	if (private->number != num_counters) {
++	if (private->number != tmp.num_counters) {
+ 		ret = -EINVAL;
+ 		goto unlock_up_free;
+ 	}
+@@ -1456,7 +1421,6 @@ compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
+ 
+ static int
+ compat_find_calc_match(struct xt_entry_match *m,
+-		       const char *name,
+ 		       const struct ip6t_ip6 *ipv6,
+ 		       int *size)
+ {
+@@ -1491,21 +1455,19 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
+ 				  struct xt_table_info *newinfo,
+ 				  unsigned int *size,
+ 				  const unsigned char *base,
+-				  const unsigned char *limit,
+-				  const unsigned int *hook_entries,
+-				  const unsigned int *underflows,
+-				  const char *name)
++				  const unsigned char *limit)
+ {
+ 	struct xt_entry_match *ematch;
+ 	struct xt_entry_target *t;
+ 	struct xt_target *target;
+ 	unsigned int entry_offset;
+ 	unsigned int j;
+-	int ret, off, h;
++	int ret, off;
+ 
+ 	duprintf("check_compat_entry_size_and_hooks %p\n", e);
+ 	if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
+-	    (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
++	    (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
++	    (unsigned char *)e + e->next_offset > limit) {
+ 		duprintf("Bad offset %p, limit = %p\n", e, limit);
+ 		return -EINVAL;
+ 	}
+@@ -1517,8 +1479,11 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
+ 		return -EINVAL;
+ 	}
+ 
+-	/* For purposes of check_entry casting the compat entry is fine */
+-	ret = check_entry((struct ip6t_entry *)e, name);
++	if (!ip6_checkentry(&e->ipv6))
++		return -EINVAL;
++
++	ret = xt_compat_check_entry_offsets(e, e->elems,
++					    e->target_offset, e->next_offset);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1526,7 +1491,7 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
+ 	entry_offset = (void *)e - (void *)base;
+ 	j = 0;
+ 	xt_ematch_foreach(ematch, e) {
+-		ret = compat_find_calc_match(ematch, name, &e->ipv6, &off);
++		ret = compat_find_calc_match(ematch, &e->ipv6, &off);
+ 		if (ret != 0)
+ 			goto release_matches;
+ 		++j;
+@@ -1549,17 +1514,6 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
+ 	if (ret)
+ 		goto out;
+ 
+-	/* Check hooks & underflows */
+-	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
+-		if ((unsigned char *)e - base == hook_entries[h])
+-			newinfo->hook_entry[h] = hook_entries[h];
+-		if ((unsigned char *)e - base == underflows[h])
+-			newinfo->underflow[h] = underflows[h];
+-	}
+-
+-	/* Clear counters and comefrom */
+-	memset(&e->counters, 0, sizeof(e->counters));
+-	e->comefrom = 0;
+ 	return 0;
+ 
+ out:
+@@ -1573,18 +1527,17 @@ release_matches:
+ 	return ret;
+ }
+ 
+-static int
++static void
+ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
+-			    unsigned int *size, const char *name,
++			    unsigned int *size,
+ 			    struct xt_table_info *newinfo, unsigned char *base)
+ {
+ 	struct xt_entry_target *t;
+ 	struct ip6t_entry *de;
+ 	unsigned int origsize;
+-	int ret, h;
++	int h;
+ 	struct xt_entry_match *ematch;
+ 
+-	ret = 0;
+ 	origsize = *size;
+ 	de = (struct ip6t_entry *)*dstptr;
+ 	memcpy(de, e, sizeof(struct ip6t_entry));
+@@ -1593,11 +1546,9 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
+ 	*dstptr += sizeof(struct ip6t_entry);
+ 	*size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
+ 
+-	xt_ematch_foreach(ematch, e) {
+-		ret = xt_compat_match_from_user(ematch, dstptr, size);
+-		if (ret != 0)
+-			return ret;
+-	}
++	xt_ematch_foreach(ematch, e)
++		xt_compat_match_from_user(ematch, dstptr, size);
++
+ 	de->target_offset = e->target_offset - (origsize - *size);
+ 	t = compat_ip6t_get_target(e);
+ 	xt_compat_target_from_user(t, dstptr, size);
+@@ -1609,183 +1560,83 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
+ 		if ((unsigned char *)de - base < newinfo->underflow[h])
+ 			newinfo->underflow[h] -= origsize - *size;
+ 	}
+-	return ret;
+-}
+-
+-static int compat_check_entry(struct ip6t_entry *e, struct net *net,
+-			      const char *name)
+-{
+-	unsigned int j;
+-	int ret = 0;
+-	struct xt_mtchk_param mtpar;
+-	struct xt_entry_match *ematch;
+-
+-	e->counters.pcnt = xt_percpu_counter_alloc();
+-	if (IS_ERR_VALUE(e->counters.pcnt))
+-		return -ENOMEM;
+-	j = 0;
+-	mtpar.net	= net;
+-	mtpar.table     = name;
+-	mtpar.entryinfo = &e->ipv6;
+-	mtpar.hook_mask = e->comefrom;
+-	mtpar.family    = NFPROTO_IPV6;
+-	xt_ematch_foreach(ematch, e) {
+-		ret = check_match(ematch, &mtpar);
+-		if (ret != 0)
+-			goto cleanup_matches;
+-		++j;
+-	}
+-
+-	ret = check_target(e, net, name);
+-	if (ret)
+-		goto cleanup_matches;
+-	return 0;
+-
+- cleanup_matches:
+-	xt_ematch_foreach(ematch, e) {
+-		if (j-- == 0)
+-			break;
+-		cleanup_match(ematch, net);
+-	}
+-
+-	xt_percpu_counter_free(e->counters.pcnt);
+-
+-	return ret;
+ }
+ 
+ static int
+ translate_compat_table(struct net *net,
+-		       const char *name,
+-		       unsigned int valid_hooks,
+ 		       struct xt_table_info **pinfo,
+ 		       void **pentry0,
+-		       unsigned int total_size,
+-		       unsigned int number,
+-		       unsigned int *hook_entries,
+-		       unsigned int *underflows)
++		       const struct compat_ip6t_replace *compatr)
+ {
+ 	unsigned int i, j;
+ 	struct xt_table_info *newinfo, *info;
+ 	void *pos, *entry0, *entry1;
+ 	struct compat_ip6t_entry *iter0;
+-	struct ip6t_entry *iter1;
++	struct ip6t_replace repl;
+ 	unsigned int size;
+ 	int ret = 0;
+ 
+ 	info = *pinfo;
+ 	entry0 = *pentry0;
+-	size = total_size;
+-	info->number = number;
+-
+-	/* Init all hooks to impossible value. */
+-	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+-		info->hook_entry[i] = 0xFFFFFFFF;
+-		info->underflow[i] = 0xFFFFFFFF;
+-	}
++	size = compatr->size;
++	info->number = compatr->num_entries;
+ 
+ 	duprintf("translate_compat_table: size %u\n", info->size);
+ 	j = 0;
+ 	xt_compat_lock(AF_INET6);
+-	xt_compat_init_offsets(AF_INET6, number);
++	xt_compat_init_offsets(AF_INET6, compatr->num_entries);
+ 	/* Walk through entries, checking offsets. */
+-	xt_entry_foreach(iter0, entry0, total_size) {
++	xt_entry_foreach(iter0, entry0, compatr->size) {
+ 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
+ 							entry0,
+-							entry0 + total_size,
+-							hook_entries,
+-							underflows,
+-							name);
++							entry0 + compatr->size);
+ 		if (ret != 0)
+ 			goto out_unlock;
+ 		++j;
+ 	}
+ 
+ 	ret = -EINVAL;
+-	if (j != number) {
++	if (j != compatr->num_entries) {
+ 		duprintf("translate_compat_table: %u not %u entries\n",
+-			 j, number);
++			 j, compatr->num_entries);
+ 		goto out_unlock;
+ 	}
+ 
+-	/* Check hooks all assigned */
+-	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+-		/* Only hooks which are valid */
+-		if (!(valid_hooks & (1 << i)))
+-			continue;
+-		if (info->hook_entry[i] == 0xFFFFFFFF) {
+-			duprintf("Invalid hook entry %u %u\n",
+-				 i, hook_entries[i]);
+-			goto out_unlock;
+-		}
+-		if (info->underflow[i] == 0xFFFFFFFF) {
+-			duprintf("Invalid underflow %u %u\n",
+-				 i, underflows[i]);
+-			goto out_unlock;
+-		}
+-	}
+-
+ 	ret = -ENOMEM;
+ 	newinfo = xt_alloc_table_info(size);
+ 	if (!newinfo)
+ 		goto out_unlock;
+ 
+-	newinfo->number = number;
++	newinfo->number = compatr->num_entries;
+ 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+-		newinfo->hook_entry[i] = info->hook_entry[i];
+-		newinfo->underflow[i] = info->underflow[i];
++		newinfo->hook_entry[i] = compatr->hook_entry[i];
++		newinfo->underflow[i] = compatr->underflow[i];
+ 	}
+ 	entry1 = newinfo->entries;
+ 	pos = entry1;
+-	size = total_size;
+-	xt_entry_foreach(iter0, entry0, total_size) {
+-		ret = compat_copy_entry_from_user(iter0, &pos, &size,
+-						  name, newinfo, entry1);
+-		if (ret != 0)
+-			break;
+-	}
++	size = compatr->size;
++	xt_entry_foreach(iter0, entry0, compatr->size)
++		compat_copy_entry_from_user(iter0, &pos, &size,
++					    newinfo, entry1);
++
++	/* all module references in entry0 are now gone. */
+ 	xt_compat_flush_offsets(AF_INET6);
+ 	xt_compat_unlock(AF_INET6);
+-	if (ret)
+-		goto free_newinfo;
+ 
+-	ret = -ELOOP;
+-	if (!mark_source_chains(newinfo, valid_hooks, entry1))
+-		goto free_newinfo;
++	memcpy(&repl, compatr, sizeof(*compatr));
+ 
+-	i = 0;
+-	xt_entry_foreach(iter1, entry1, newinfo->size) {
+-		ret = compat_check_entry(iter1, net, name);
+-		if (ret != 0)
+-			break;
+-		++i;
+-		if (strcmp(ip6t_get_target(iter1)->u.user.name,
+-		    XT_ERROR_TARGET) == 0)
+-			++newinfo->stacksize;
+-	}
+-	if (ret) {
+-		/*
+-		 * The first i matches need cleanup_entry (calls ->destroy)
+-		 * because they had called ->check already. The other j-i
+-		 * entries need only release.
+-		 */
+-		int skip = i;
+-		j -= i;
+-		xt_entry_foreach(iter0, entry0, newinfo->size) {
+-			if (skip-- > 0)
+-				continue;
+-			if (j-- == 0)
+-				break;
+-			compat_release_entry(iter0);
+-		}
+-		xt_entry_foreach(iter1, entry1, newinfo->size) {
+-			if (i-- == 0)
+-				break;
+-			cleanup_entry(iter1, net);
+-		}
+-		xt_free_table_info(newinfo);
+-		return ret;
++	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
++		repl.hook_entry[i] = newinfo->hook_entry[i];
++		repl.underflow[i] = newinfo->underflow[i];
+ 	}
+ 
++	repl.num_counters = 0;
++	repl.counters = NULL;
++	repl.size = newinfo->size;
++	ret = translate_table(net, newinfo, entry1, &repl);
++	if (ret)
++		goto free_newinfo;
++
+ 	*pinfo = newinfo;
+ 	*pentry0 = entry1;
+ 	xt_free_table_info(info);
+@@ -1793,17 +1644,16 @@ translate_compat_table(struct net *net,
+ 
+ free_newinfo:
+ 	xt_free_table_info(newinfo);
+-out:
+-	xt_entry_foreach(iter0, entry0, total_size) {
++	return ret;
++out_unlock:
++	xt_compat_flush_offsets(AF_INET6);
++	xt_compat_unlock(AF_INET6);
++	xt_entry_foreach(iter0, entry0, compatr->size) {
+ 		if (j-- == 0)
+ 			break;
+ 		compat_release_entry(iter0);
+ 	}
+ 	return ret;
+-out_unlock:
+-	xt_compat_flush_offsets(AF_INET6);
+-	xt_compat_unlock(AF_INET6);
+-	goto out;
+ }
+ 
+ static int
+@@ -1839,10 +1689,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
+ 		goto free_newinfo;
+ 	}
+ 
+-	ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
+-				     &newinfo, &loc_cpu_entry, tmp.size,
+-				     tmp.num_entries, tmp.hook_entry,
+-				     tmp.underflow);
++	ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
+ 	if (ret != 0)
+ 		goto free_newinfo;
+ 
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index b8d405623f4f..1a1cd3938fd0 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1706,7 +1706,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
+ 	destp = ntohs(inet->inet_dport);
+ 	srcp  = ntohs(inet->inet_sport);
+ 
+-	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
++	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
++	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
++	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
+ 		timer_active	= 1;
+ 		timer_expires	= icsk->icsk_timeout;
+ 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 6665e1a0bfe1..275af43306f9 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -647,7 +647,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ 
+ 		/* if we're overly short, let UDP handle it */
+ 		encap_rcv = ACCESS_ONCE(up->encap_rcv);
+-		if (skb->len > sizeof(struct udphdr) && encap_rcv) {
++		if (encap_rcv) {
+ 			int ret;
+ 
+ 			/* Verify checksum before giving to encap */
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index afca2eb4dfa7..ec17cbe8a02b 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -1581,7 +1581,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
+ 	/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
+ 	tunnel->encap = encap;
+ 	if (encap == L2TP_ENCAPTYPE_UDP) {
+-		struct udp_tunnel_sock_cfg udp_cfg;
++		struct udp_tunnel_sock_cfg udp_cfg = { };
+ 
+ 		udp_cfg.sk_user_data = tunnel;
+ 		udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP;
+diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
+index d4aaad747ea9..25391fb25516 100644
+--- a/net/netfilter/x_tables.c
++++ b/net/netfilter/x_tables.c
+@@ -415,6 +415,47 @@ int xt_check_match(struct xt_mtchk_param *par,
+ }
+ EXPORT_SYMBOL_GPL(xt_check_match);
+ 
++/** xt_check_entry_match - check that matches end before start of target
++ *
++ * @match: beginning of xt_entry_match
++ * @target: beginning of this rules target (alleged end of matches)
++ * @alignment: alignment requirement of match structures
++ *
++ * Validates that all matches add up to the beginning of the target,
++ * and that each match covers at least the base structure size.
++ *
++ * Return: 0 on success, negative errno on failure.
++ */
++static int xt_check_entry_match(const char *match, const char *target,
++				const size_t alignment)
++{
++	const struct xt_entry_match *pos;
++	int length = target - match;
++
++	if (length == 0) /* no matches */
++		return 0;
++
++	pos = (struct xt_entry_match *)match;
++	do {
++		if ((unsigned long)pos % alignment)
++			return -EINVAL;
++
++		if (length < (int)sizeof(struct xt_entry_match))
++			return -EINVAL;
++
++		if (pos->u.match_size < sizeof(struct xt_entry_match))
++			return -EINVAL;
++
++		if (pos->u.match_size > length)
++			return -EINVAL;
++
++		length -= pos->u.match_size;
++		pos = ((void *)((char *)(pos) + (pos)->u.match_size));
++	} while (length > 0);
++
++	return 0;
++}
++
+ #ifdef CONFIG_COMPAT
+ int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
+ {
+@@ -484,13 +525,14 @@ int xt_compat_match_offset(const struct xt_match *match)
+ }
+ EXPORT_SYMBOL_GPL(xt_compat_match_offset);
+ 
+-int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+-			      unsigned int *size)
++void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
++			       unsigned int *size)
+ {
+ 	const struct xt_match *match = m->u.kernel.match;
+ 	struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
+ 	int pad, off = xt_compat_match_offset(match);
+ 	u_int16_t msize = cm->u.user.match_size;
++	char name[sizeof(m->u.user.name)];
+ 
+ 	m = *dstptr;
+ 	memcpy(m, cm, sizeof(*cm));
+@@ -504,10 +546,12 @@ int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+ 
+ 	msize += off;
+ 	m->u.user.match_size = msize;
++	strlcpy(name, match->name, sizeof(name));
++	module_put(match->me);
++	strncpy(m->u.user.name, name, sizeof(m->u.user.name));
+ 
+ 	*size += off;
+ 	*dstptr += msize;
+-	return 0;
+ }
+ EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
+ 
+@@ -538,8 +582,125 @@ int xt_compat_match_to_user(const struct xt_entry_match *m,
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
++
++/* non-compat version may have padding after verdict */
++struct compat_xt_standard_target {
++	struct compat_xt_entry_target t;
++	compat_uint_t verdict;
++};
++
++int xt_compat_check_entry_offsets(const void *base, const char *elems,
++				  unsigned int target_offset,
++				  unsigned int next_offset)
++{
++	long size_of_base_struct = elems - (const char *)base;
++	const struct compat_xt_entry_target *t;
++	const char *e = base;
++
++	if (target_offset < size_of_base_struct)
++		return -EINVAL;
++
++	if (target_offset + sizeof(*t) > next_offset)
++		return -EINVAL;
++
++	t = (void *)(e + target_offset);
++	if (t->u.target_size < sizeof(*t))
++		return -EINVAL;
++
++	if (target_offset + t->u.target_size > next_offset)
++		return -EINVAL;
++
++	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
++	    COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
++		return -EINVAL;
++
++	/* compat_xt_entry match has less strict aligment requirements,
++	 * otherwise they are identical.  In case of padding differences
++	 * we need to add compat version of xt_check_entry_match.
++	 */
++	BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
++
++	return xt_check_entry_match(elems, base + target_offset,
++				    __alignof__(struct compat_xt_entry_match));
++}
++EXPORT_SYMBOL(xt_compat_check_entry_offsets);
+ #endif /* CONFIG_COMPAT */
+ 
++/**
++ * xt_check_entry_offsets - validate arp/ip/ip6t_entry
++ *
++ * @base: pointer to arp/ip/ip6t_entry
++ * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
++ * @target_offset: the arp/ip/ip6_t->target_offset
++ * @next_offset: the arp/ip/ip6_t->next_offset
++ *
++ * validates that target_offset and next_offset are sane and that all
++ * match sizes (if any) align with the target offset.
++ *
++ * This function does not validate the targets or matches themselves, it
++ * only tests that all the offsets and sizes are correct, that all
++ * match structures are aligned, and that the last structure ends where
++ * the target structure begins.
++ *
++ * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version.
++ *
++ * The arp/ip/ip6t_entry structure @base must have passed following tests:
++ * - it must point to a valid memory location
++ * - base to base + next_offset must be accessible, i.e. not exceed allocated
++ *   length.
++ *
++ * A well-formed entry looks like this:
++ *
++ * ip(6)t_entry   match [mtdata]  match [mtdata] target [tgdata] ip(6)t_entry
++ * e->elems[]-----'                              |               |
++ *                matchsize                      |               |
++ *                                matchsize      |               |
++ *                                               |               |
++ * target_offset---------------------------------'               |
++ * next_offset---------------------------------------------------'
++ *
++ * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
++ *          This is where matches (if any) and the target reside.
++ * target_offset: beginning of target.
++ * next_offset: start of the next rule; also: size of this rule.
++ * Since targets have a minimum size, target_offset + minlen <= next_offset.
++ *
++ * Every match stores its size, sum of sizes must not exceed target_offset.
++ *
++ * Return: 0 on success, negative errno on failure.
++ */
++int xt_check_entry_offsets(const void *base,
++			   const char *elems,
++			   unsigned int target_offset,
++			   unsigned int next_offset)
++{
++	long size_of_base_struct = elems - (const char *)base;
++	const struct xt_entry_target *t;
++	const char *e = base;
++
++	/* target start is within the ip/ip6/arpt_entry struct */
++	if (target_offset < size_of_base_struct)
++		return -EINVAL;
++
++	if (target_offset + sizeof(*t) > next_offset)
++		return -EINVAL;
++
++	t = (void *)(e + target_offset);
++	if (t->u.target_size < sizeof(*t))
++		return -EINVAL;
++
++	if (target_offset + t->u.target_size > next_offset)
++		return -EINVAL;
++
++	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
++	    XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
++		return -EINVAL;
++
++	return xt_check_entry_match(elems, base + target_offset,
++				    __alignof__(struct xt_entry_match));
++}
++EXPORT_SYMBOL(xt_check_entry_offsets);
++
+ int xt_check_target(struct xt_tgchk_param *par,
+ 		    unsigned int size, u_int8_t proto, bool inv_proto)
+ {
+@@ -590,6 +751,80 @@ int xt_check_target(struct xt_tgchk_param *par,
+ }
+ EXPORT_SYMBOL_GPL(xt_check_target);
+ 
++/**
++ * xt_copy_counters_from_user - copy counters and metadata from userspace
++ *
++ * @user: src pointer to userspace memory
++ * @len: alleged size of userspace memory
++ * @info: where to store the xt_counters_info metadata
++ * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel
++ *
++ * Copies counter meta data from @user and stores it in @info.
++ *
++ * vmallocs memory to hold the counters, then copies the counter data
++ * from @user to the new memory and returns a pointer to it.
++ *
++ * If @compat is true, @info gets converted automatically to the 64bit
++ * representation.
++ *
++ * The metadata associated with the counters is stored in @info.
++ *
++ * Return: returns pointer that caller has to test via IS_ERR().
++ * If IS_ERR is false, caller has to vfree the pointer.
++ */
++void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
++				 struct xt_counters_info *info, bool compat)
++{
++	void *mem;
++	u64 size;
++
++#ifdef CONFIG_COMPAT
++	if (compat) {
++		/* structures only differ in size due to alignment */
++		struct compat_xt_counters_info compat_tmp;
++
++		if (len <= sizeof(compat_tmp))
++			return ERR_PTR(-EINVAL);
++
++		len -= sizeof(compat_tmp);
++		if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
++			return ERR_PTR(-EFAULT);
++
++		strlcpy(info->name, compat_tmp.name, sizeof(info->name));
++		info->num_counters = compat_tmp.num_counters;
++		user += sizeof(compat_tmp);
++	} else
++#endif
++	{
++		if (len <= sizeof(*info))
++			return ERR_PTR(-EINVAL);
++
++		len -= sizeof(*info);
++		if (copy_from_user(info, user, sizeof(*info)) != 0)
++			return ERR_PTR(-EFAULT);
++
++		info->name[sizeof(info->name) - 1] = '\0';
++		user += sizeof(*info);
++	}
++
++	size = sizeof(struct xt_counters);
++	size *= info->num_counters;
++
++	if (size != (u64)len)
++		return ERR_PTR(-EINVAL);
++
++	mem = vmalloc(len);
++	if (!mem)
++		return ERR_PTR(-ENOMEM);
++
++	if (copy_from_user(mem, user, len) == 0)
++		return mem;
++
++	vfree(mem);
++	return ERR_PTR(-EFAULT);
++}
++EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
++
+ #ifdef CONFIG_COMPAT
+ int xt_compat_target_offset(const struct xt_target *target)
+ {
+@@ -605,6 +840,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
+ 	struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
+ 	int pad, off = xt_compat_target_offset(target);
+ 	u_int16_t tsize = ct->u.user.target_size;
++	char name[sizeof(t->u.user.name)];
+ 
+ 	t = *dstptr;
+ 	memcpy(t, ct, sizeof(*ct));
+@@ -618,6 +854,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
+ 
+ 	tsize += off;
+ 	t->u.user.target_size = tsize;
++	strlcpy(name, target->name, sizeof(name));
++	module_put(target->me);
++	strncpy(t->u.user.name, name, sizeof(t->u.user.name));
+ 
+ 	*size += off;
+ 	*dstptr += tsize;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 992b35fb8615..7a5fa0c98377 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -2784,6 +2784,7 @@ static int netlink_dump(struct sock *sk)
+ 	struct netlink_callback *cb;
+ 	struct sk_buff *skb = NULL;
+ 	struct nlmsghdr *nlh;
++	struct module *module;
+ 	int len, err = -ENOBUFS;
+ 	int alloc_min_size;
+ 	int alloc_size;
+@@ -2863,9 +2864,11 @@ static int netlink_dump(struct sock *sk)
+ 		cb->done(cb);
+ 
+ 	nlk->cb_running = false;
++	module = cb->module;
++	skb = cb->skb;
+ 	mutex_unlock(nlk->cb_mutex);
+-	module_put(cb->module);
+-	consume_skb(cb->skb);
++	module_put(module);
++	consume_skb(skb);
+ 	return 0;
+ 
+ errout_skb:
+diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
+index d933cb89efac..5eb7694348b5 100644
+--- a/net/openvswitch/vport-vxlan.c
++++ b/net/openvswitch/vport-vxlan.c
+@@ -91,6 +91,8 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
+ 	struct vxlan_config conf = {
+ 		.no_share = true,
+ 		.flags = VXLAN_F_COLLECT_METADATA | VXLAN_F_UDP_ZERO_CSUM6_RX,
++		/* Don't restrict the packets that can be sent by MTU */
++		.mtu = IP_MAX_MTU,
+ 	};
+ 
+ 	if (!options) {
+diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
+index d5d7132ac847..1b58866175e6 100644
+--- a/net/switchdev/switchdev.c
++++ b/net/switchdev/switchdev.c
+@@ -1169,6 +1169,7 @@ int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
+ 		.obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
+ 		.dst = dst,
+ 		.dst_len = dst_len,
++		.fi = fi,
+ 		.tos = tos,
+ 		.type = type,
+ 		.nlflags = nlflags,
+@@ -1177,8 +1178,6 @@ int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
+ 	struct net_device *dev;
+ 	int err = 0;
+ 
+-	memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi));
+-
+ 	/* Don't offload route if using custom ip rules or if
+ 	 * IPv4 FIB offloading has been disabled completely.
+ 	 */
+@@ -1222,6 +1221,7 @@ int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
+ 		.obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
+ 		.dst = dst,
+ 		.dst_len = dst_len,
++		.fi = fi,
+ 		.tos = tos,
+ 		.type = type,
+ 		.nlflags = 0,
+@@ -1230,8 +1230,6 @@ int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
+ 	struct net_device *dev;
+ 	int err = 0;
+ 
+-	memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi));
+-
+ 	if (!(fi->fib_flags & RTNH_F_OFFLOAD))
+ 		return 0;
+ 
+diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
+index 1eadc95e1132..2ed732bfe94b 100644
+--- a/net/tipc/netlink_compat.c
++++ b/net/tipc/netlink_compat.c
+@@ -802,7 +802,7 @@ static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg,
+ 		goto out;
+ 
+ 	tipc_tlv_sprintf(msg->rep, "%-10u %s",
+-			 nla_get_u32(publ[TIPC_NLA_PUBL_REF]),
++			 nla_get_u32(publ[TIPC_NLA_PUBL_KEY]),
+ 			 scope_str[nla_get_u32(publ[TIPC_NLA_PUBL_SCOPE])]);
+ out:
+ 	tipc_tlv_sprintf(msg->rep, "\n");
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index e53003cf7703..9b713e0ce00d 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -2814,6 +2814,9 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
+ 		if (err)
+ 			return err;
+ 
++		if (!attrs[TIPC_NLA_SOCK])
++			return -EINVAL;
++
+ 		err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
+ 				       attrs[TIPC_NLA_SOCK],
+ 				       tipc_nl_sock_policy);
+diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
+index b50ee5d622e1..c753211cb83f 100644
+--- a/net/wireless/wext-core.c
++++ b/net/wireless/wext-core.c
+@@ -955,8 +955,29 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
+ 			return private(dev, iwr, cmd, info, handler);
+ 	}
+ 	/* Old driver API : call driver ioctl handler */
+-	if (dev->netdev_ops->ndo_do_ioctl)
+-		return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
++	if (dev->netdev_ops->ndo_do_ioctl) {
++#ifdef CONFIG_COMPAT
++		if (info->flags & IW_REQUEST_FLAG_COMPAT) {
++			int ret = 0;
++			struct iwreq iwr_lcl;
++			struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
++
++			memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
++			iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
++			iwr_lcl.u.data.length = iwp_compat->length;
++			iwr_lcl.u.data.flags = iwp_compat->flags;
++
++			ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
++
++			iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
++			iwp_compat->length = iwr_lcl.u.data.length;
++			iwp_compat->flags = iwr_lcl.u.data.flags;
++
++			return ret;
++		} else
++#endif
++			return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
++	}
+ 	return -EOPNOTSUPP;
+ }
+ 
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 411630e9c034..1475440b70aa 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -359,8 +359,11 @@ enum {
+ 
+ #define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
+ #define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
++#define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171)
++#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
+ #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
+-#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
++#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
++			IS_KBL(pci) || IS_KBL_LP(pci)
+ 
+ static char *driver_short_names[] = {
+ 	[AZX_DRIVER_ICH] = "HDA Intel",
+@@ -2204,6 +2207,12 @@ static const struct pci_device_id azx_ids[] = {
+ 	/* Sunrise Point-LP */
+ 	{ PCI_DEVICE(0x8086, 0x9d70),
+ 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
++	/* Kabylake */
++	{ PCI_DEVICE(0x8086, 0xa171),
++	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
++	/* Kabylake-LP */
++	{ PCI_DEVICE(0x8086, 0x9d71),
++	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+ 	/* Broxton-P(Apollolake) */
+ 	{ PCI_DEVICE(0x8086, 0x5a98),
+ 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index d53c25e7a1c1..0fe18ede3e85 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -346,6 +346,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
+ 	case 0x10ec0234:
+ 	case 0x10ec0274:
+ 	case 0x10ec0294:
++	case 0x10ec0700:
++	case 0x10ec0701:
++	case 0x10ec0703:
+ 		alc_update_coef_idx(codec, 0x10, 1<<15, 0);
+ 		break;
+ 	case 0x10ec0662:
+@@ -2655,6 +2658,7 @@ enum {
+ 	ALC269_TYPE_ALC256,
+ 	ALC269_TYPE_ALC225,
+ 	ALC269_TYPE_ALC294,
++	ALC269_TYPE_ALC700,
+ };
+ 
+ /*
+@@ -2686,6 +2690,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
+ 	case ALC269_TYPE_ALC256:
+ 	case ALC269_TYPE_ALC225:
+ 	case ALC269_TYPE_ALC294:
++	case ALC269_TYPE_ALC700:
+ 		ssids = alc269_ssids;
+ 		break;
+ 	default:
+@@ -3618,13 +3623,20 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
+ static void alc_headset_mode_unplugged(struct hda_codec *codec)
+ {
+ 	static struct coef_fw coef0255[] = {
+-		WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
+ 		WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
+ 		UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
+ 		WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
+ 		WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */
+ 		{}
+ 	};
++	static struct coef_fw coef0255_1[] = {
++		WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
++		{}
++	};
++	static struct coef_fw coef0256[] = {
++		WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */
++		{}
++	};
+ 	static struct coef_fw coef0233[] = {
+ 		WRITE_COEF(0x1b, 0x0c0b),
+ 		WRITE_COEF(0x45, 0xc429),
+@@ -3677,7 +3689,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
+ 
+ 	switch (codec->core.vendor_id) {
+ 	case 0x10ec0255:
++		alc_process_coef_fw(codec, coef0255_1);
++		alc_process_coef_fw(codec, coef0255);
++		break;
+ 	case 0x10ec0256:
++		alc_process_coef_fw(codec, coef0256);
+ 		alc_process_coef_fw(codec, coef0255);
+ 		break;
+ 	case 0x10ec0233:
+@@ -3896,6 +3912,12 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
+ 		WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+ 		{}
+ 	};
++	static struct coef_fw coef0256[] = {
++		WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */
++		WRITE_COEF(0x1b, 0x0c6b),
++		WRITE_COEFEX(0x57, 0x03, 0x8ea6),
++		{}
++	};
+ 	static struct coef_fw coef0233[] = {
+ 		WRITE_COEF(0x45, 0xd429),
+ 		WRITE_COEF(0x1b, 0x0c2b),
+@@ -3936,9 +3958,11 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
+ 
+ 	switch (codec->core.vendor_id) {
+ 	case 0x10ec0255:
+-	case 0x10ec0256:
+ 		alc_process_coef_fw(codec, coef0255);
+ 		break;
++	case 0x10ec0256:
++		alc_process_coef_fw(codec, coef0256);
++		break;
+ 	case 0x10ec0233:
+ 	case 0x10ec0283:
+ 		alc_process_coef_fw(codec, coef0233);
+@@ -3978,6 +4002,12 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
+ 		WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+ 		{}
+ 	};
++	static struct coef_fw coef0256[] = {
++		WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */
++		WRITE_COEF(0x1b, 0x0c6b),
++		WRITE_COEFEX(0x57, 0x03, 0x8ea6),
++		{}
++	};
+ 	static struct coef_fw coef0233[] = {
+ 		WRITE_COEF(0x45, 0xe429),
+ 		WRITE_COEF(0x1b, 0x0c2b),
+@@ -4018,9 +4048,11 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
+ 
+ 	switch (codec->core.vendor_id) {
+ 	case 0x10ec0255:
+-	case 0x10ec0256:
+ 		alc_process_coef_fw(codec, coef0255);
+ 		break;
++	case 0x10ec0256:
++		alc_process_coef_fw(codec, coef0256);
++		break;
+ 	case 0x10ec0233:
+ 	case 0x10ec0283:
+ 		alc_process_coef_fw(codec, coef0233);
+@@ -4266,7 +4298,7 @@ static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
+ static void alc255_set_default_jack_type(struct hda_codec *codec)
+ {
+ 	/* Set to iphone type */
+-	static struct coef_fw fw[] = {
++	static struct coef_fw alc255fw[] = {
+ 		WRITE_COEF(0x1b, 0x880b),
+ 		WRITE_COEF(0x45, 0xd089),
+ 		WRITE_COEF(0x1b, 0x080b),
+@@ -4274,7 +4306,22 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
+ 		WRITE_COEF(0x1b, 0x0c0b),
+ 		{}
+ 	};
+-	alc_process_coef_fw(codec, fw);
++	static struct coef_fw alc256fw[] = {
++		WRITE_COEF(0x1b, 0x884b),
++		WRITE_COEF(0x45, 0xd089),
++		WRITE_COEF(0x1b, 0x084b),
++		WRITE_COEF(0x46, 0x0004),
++		WRITE_COEF(0x1b, 0x0c4b),
++		{}
++	};
++	switch (codec->core.vendor_id) {
++	case 0x10ec0255:
++		alc_process_coef_fw(codec, alc255fw);
++		break;
++	case 0x10ec0256:
++		alc_process_coef_fw(codec, alc256fw);
++		break;
++	}
+ 	msleep(30);
+ }
+ 
+@@ -5587,6 +5634,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
++	SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460),
+ 	SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
+ 	SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+@@ -5775,6 +5823,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x12, 0x90a60180},
+ 		{0x14, 0x90170130},
+ 		{0x21, 0x02211040}),
++	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5565", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++		{0x12, 0x90a60180},
++		{0x14, 0x90170120},
++		{0x21, 0x02211030}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 		{0x12, 0x90a60160},
+ 		{0x14, 0x90170120},
+@@ -6053,6 +6105,14 @@ static int patch_alc269(struct hda_codec *codec)
+ 	case 0x10ec0294:
+ 		spec->codec_variant = ALC269_TYPE_ALC294;
+ 		break;
++	case 0x10ec0700:
++	case 0x10ec0701:
++	case 0x10ec0703:
++		spec->codec_variant = ALC269_TYPE_ALC700;
++		spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
++		alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */
++		break;
++
+ 	}
+ 
+ 	if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
+@@ -7008,6 +7068,9 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
+ 	HDA_CODEC_ENTRY(0x10ec0670, "ALC670", patch_alc662),
+ 	HDA_CODEC_ENTRY(0x10ec0671, "ALC671", patch_alc662),
+ 	HDA_CODEC_ENTRY(0x10ec0680, "ALC680", patch_alc680),
++	HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
++	HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
++	HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
+ 	HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc882),
+ 	HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
+ 	HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
+diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c
+index f0b08a2a48ba..7d31d8c5b9ea 100644
+--- a/virt/kvm/irqchip.c
++++ b/virt/kvm/irqchip.c
+@@ -40,7 +40,7 @@ int kvm_irq_map_gsi(struct kvm *kvm,
+ 
+ 	irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
+ 					lockdep_is_held(&kvm->irq_lock));
+-	if (gsi < irq_rt->nr_rt_entries) {
++	if (irq_rt && gsi < irq_rt->nr_rt_entries) {
+ 		hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
+ 			entries[n] = *e;
+ 			++n;


             reply	other threads:[~2016-06-24 20:40 UTC|newest]

Thread overview: 355+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-06-24 20:40 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2022-02-03 11:46 [gentoo-commits] proj/linux-patches:4.4 commit in: / Mike Pagano
2022-01-29 17:47 Mike Pagano
2022-01-27 11:42 Mike Pagano
2022-01-11 12:57 Mike Pagano
2022-01-05 12:57 Mike Pagano
2021-12-29 13:13 Mike Pagano
2021-12-22 14:09 Mike Pagano
2021-12-14 10:38 Mike Pagano
2021-12-08 12:58 Mike Pagano
2021-11-26 12:02 Mike Pagano
2021-11-12 13:39 Mike Pagano
2021-11-02 17:07 Mike Pagano
2021-10-27 12:01 Mike Pagano
2021-10-17 13:15 Mike Pagano
2021-10-09 21:36 Mike Pagano
2021-10-07 10:37 Mike Pagano
2021-10-06 11:33 Mike Pagano
2021-09-26 14:16 Mike Pagano
2021-09-22 11:43 Mike Pagano
2021-09-20 22:07 Mike Pagano
2021-09-03 11:26 Mike Pagano
2021-08-26 14:02 Mike Pagano
2021-08-25 23:20 Mike Pagano
2021-08-15 20:12 Mike Pagano
2021-08-10 16:22 Mike Pagano
2021-08-08 13:47 Mike Pagano
2021-08-04 11:56 Mike Pagano
2021-08-03 12:51 Mike Pagano
2021-07-28 12:39 Mike Pagano
2021-07-20 15:17 Alice Ferrazzi
2021-07-11 14:48 Mike Pagano
2021-06-30 14:29 Mike Pagano
2021-06-17 11:05 Alice Ferrazzi
2021-06-10 11:09 Mike Pagano
2021-06-03 10:43 Alice Ferrazzi
2021-05-26 11:59 Mike Pagano
2021-05-22 10:00 Mike Pagano
2021-04-28 11:08 Alice Ferrazzi
2021-04-16 11:20 Alice Ferrazzi
2021-04-10 13:21 Mike Pagano
2021-04-07 12:10 Mike Pagano
2021-03-30 14:13 Mike Pagano
2021-03-24 12:06 Mike Pagano
2021-03-17 15:39 Mike Pagano
2021-03-11 13:34 Mike Pagano
2021-03-07 15:12 Mike Pagano
2021-03-03 16:34 Alice Ferrazzi
2021-02-23 13:46 Mike Pagano
2021-02-10 10:17 Alice Ferrazzi
2021-02-05 14:57 Alice Ferrazzi
2021-02-03 23:23 Mike Pagano
2021-01-30 13:11 Alice Ferrazzi
2021-01-23 16:33 Mike Pagano
2021-01-17 16:23 Mike Pagano
2021-01-12 20:08 Mike Pagano
2021-01-09 12:53 Mike Pagano
2020-12-29 14:16 Mike Pagano
2020-12-11 12:54 Mike Pagano
2020-12-02 12:17 Mike Pagano
2020-11-24 13:29 Mike Pagano
2020-11-22 19:08 Mike Pagano
2020-11-18 19:21 Mike Pagano
2020-11-11 15:27 Mike Pagano
2020-11-10 13:53 Mike Pagano
2020-10-29 11:14 Mike Pagano
2020-10-17 10:13 Mike Pagano
2020-10-14 20:30 Mike Pagano
2020-10-01 11:41 Mike Pagano
2020-10-01 11:24 Mike Pagano
2020-09-24 16:04 Mike Pagano
2020-09-23 11:51 Mike Pagano
2020-09-23 11:50 Mike Pagano
2020-09-12 17:08 Mike Pagano
2020-09-03 11:32 Mike Pagano
2020-08-26 11:12 Mike Pagano
2020-08-21 11:11 Alice Ferrazzi
2020-07-31 16:10 Mike Pagano
2020-07-22 12:24 Mike Pagano
2020-07-09 12:05 Mike Pagano
2020-07-01 12:09 Mike Pagano
2020-06-22 14:43 Mike Pagano
2020-06-11 11:25 Mike Pagano
2020-06-03 11:35 Mike Pagano
2020-05-27 15:26 Mike Pagano
2020-05-20 11:20 Mike Pagano
2020-05-13 13:01 Mike Pagano
2020-05-11 22:52 Mike Pagano
2020-05-05 17:37 Mike Pagano
2020-05-02 19:20 Mike Pagano
2020-04-24 11:59 Mike Pagano
2020-04-15 18:24 Mike Pagano
2020-04-13 11:14 Mike Pagano
2020-04-02 18:55 Mike Pagano
2020-03-20 11:53 Mike Pagano
2020-03-20 11:51 Mike Pagano
2020-03-20 11:49 Mike Pagano
2020-03-11 10:14 Mike Pagano
2020-02-28 15:24 Mike Pagano
2020-02-14 23:34 Mike Pagano
2020-02-05 14:47 Mike Pagano
2020-01-29 12:36 Mike Pagano
2020-01-23 11:00 Mike Pagano
2020-01-14 22:24 Mike Pagano
2020-01-12 14:48 Mike Pagano
2020-01-04 16:46 Mike Pagano
2019-12-21 14:51 Mike Pagano
2019-12-05 14:47 Alice Ferrazzi
2019-11-29 21:41 Thomas Deutschmann
2019-11-28 23:49 Mike Pagano
2019-11-25 16:25 Mike Pagano
2019-11-16 10:54 Mike Pagano
2019-11-12 20:57 Mike Pagano
2019-11-10 16:13 Mike Pagano
2019-11-06 14:22 Mike Pagano
2019-10-29 10:08 Mike Pagano
2019-10-17 22:18 Mike Pagano
2019-10-07 21:03 Mike Pagano
2019-10-05 20:43 Mike Pagano
2019-09-21 15:56 Mike Pagano
2019-09-20 15:50 Mike Pagano
2019-09-16 12:21 Mike Pagano
2019-09-10 11:10 Mike Pagano
2019-09-06 17:17 Mike Pagano
2019-08-25 17:33 Mike Pagano
2019-08-11 10:58 Mike Pagano
2019-08-06 19:14 Mike Pagano
2019-08-04 16:03 Mike Pagano
2019-07-21 14:36 Mike Pagano
2019-07-10 11:01 Mike Pagano
2019-06-27 11:11 Mike Pagano
2019-06-22 19:01 Mike Pagano
2019-06-17 19:18 Mike Pagano
2019-06-11 17:30 Mike Pagano
2019-06-11 12:38 Mike Pagano
2019-05-16 23:01 Mike Pagano
2019-04-27 17:28 Mike Pagano
2019-04-03 10:49 Mike Pagano
2019-04-03 10:49 Mike Pagano
2019-03-23 14:17 Mike Pagano
2019-02-23 14:40 Mike Pagano
2019-02-20 11:14 Mike Pagano
2019-02-15 23:38 Mike Pagano
2019-02-15 23:35 Mike Pagano
2019-02-08 15:21 Mike Pagano
2019-02-06 20:51 Mike Pagano
2019-02-06  0:05 Mike Pagano
2019-01-26 14:59 Mike Pagano
2019-01-16 23:27 Mike Pagano
2019-01-13 19:46 Mike Pagano
2019-01-13 19:24 Mike Pagano
2018-12-29 22:56 Mike Pagano
2018-12-21 14:40 Mike Pagano
2018-12-17 21:56 Mike Pagano
2018-12-13 11:35 Mike Pagano
2018-12-01 18:35 Mike Pagano
2018-12-01 15:02 Mike Pagano
2018-11-27 16:59 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 15:02 Mike Pagano
2018-11-21 12:18 Mike Pagano
2018-11-10 21:27 Mike Pagano
2018-10-20 12:33 Mike Pagano
2018-10-13 16:35 Mike Pagano
2018-10-10 11:20 Mike Pagano
2018-09-29 13:32 Mike Pagano
2018-09-26 10:44 Mike Pagano
2018-09-19 22:37 Mike Pagano
2018-09-15 10:09 Mike Pagano
2018-09-09 23:26 Mike Pagano
2018-09-05 15:21 Mike Pagano
2018-08-28 22:32 Mike Pagano
2018-08-24 11:41 Mike Pagano
2018-08-22 10:08 Alice Ferrazzi
2018-08-18 18:06 Mike Pagano
2018-08-17 19:24 Mike Pagano
2018-08-15 16:44 Mike Pagano
2018-08-09 10:49 Mike Pagano
2018-08-07 18:14 Mike Pagano
2018-07-28 10:37 Mike Pagano
2018-07-22 15:15 Mike Pagano
2018-07-19 15:27 Mike Pagano
2018-07-17 10:24 Mike Pagano
2018-07-12 16:21 Alice Ferrazzi
2018-07-04 14:26 Mike Pagano
2018-06-16 15:41 Mike Pagano
2018-06-13 14:54 Mike Pagano
2018-06-06 18:00 Mike Pagano
2018-05-30 22:35 Mike Pagano
2018-05-30 11:38 Mike Pagano
2018-05-26 13:43 Mike Pagano
2018-05-16 10:22 Mike Pagano
2018-05-02 16:11 Mike Pagano
2018-04-29 11:48 Mike Pagano
2018-04-24 11:28 Mike Pagano
2018-04-13 22:20 Mike Pagano
2018-04-08 14:25 Mike Pagano
2018-03-31 23:00 Mike Pagano
2018-03-31 22:16 Mike Pagano
2018-03-25 13:42 Mike Pagano
2018-03-22 12:54 Mike Pagano
2018-03-11 18:25 Mike Pagano
2018-03-05  2:52 Alice Ferrazzi
2018-02-28 15:05 Alice Ferrazzi
2018-02-25 15:46 Mike Pagano
2018-02-22 23:20 Mike Pagano
2018-02-17 15:10 Alice Ferrazzi
2018-02-03 21:23 Mike Pagano
2018-01-31 13:36 Alice Ferrazzi
2018-01-23 21:15 Mike Pagano
2018-01-17 10:20 Alice Ferrazzi
2018-01-17  9:18 Alice Ferrazzi
2018-01-15 15:01 Alice Ferrazzi
2018-01-10 11:56 Mike Pagano
2018-01-10 11:48 Mike Pagano
2018-01-05 15:59 Alice Ferrazzi
2018-01-05 15:05 Alice Ferrazzi
2018-01-02 20:12 Mike Pagano
2017-12-25 14:41 Alice Ferrazzi
2017-12-20 12:45 Mike Pagano
2017-12-16 11:46 Alice Ferrazzi
2017-12-09 18:50 Alice Ferrazzi
2017-12-05 11:39 Mike Pagano
2017-11-30 12:25 Alice Ferrazzi
2017-11-24 10:49 Alice Ferrazzi
2017-11-24  9:46 Alice Ferrazzi
2017-11-21  8:40 Alice Ferrazzi
2017-11-18 18:12 Mike Pagano
2017-11-15 16:44 Alice Ferrazzi
2017-11-08 13:50 Mike Pagano
2017-11-02 10:02 Mike Pagano
2017-10-27 10:33 Mike Pagano
2017-10-21 20:13 Mike Pagano
2017-10-18 13:44 Mike Pagano
2017-10-12 12:22 Mike Pagano
2017-10-08 14:25 Mike Pagano
2017-10-05 11:39 Mike Pagano
2017-09-27 10:38 Mike Pagano
2017-09-14 13:37 Mike Pagano
2017-09-13 22:26 Mike Pagano
2017-09-13 14:33 Mike Pagano
2017-09-07 22:42 Mike Pagano
2017-09-02 17:14 Mike Pagano
2017-08-30 10:08 Mike Pagano
2017-08-25 10:53 Mike Pagano
2017-08-16 22:30 Mike Pagano
2017-08-13 16:52 Mike Pagano
2017-08-11 17:44 Mike Pagano
2017-08-07 10:25 Mike Pagano
2017-05-14 13:32 Mike Pagano
2017-05-08 10:40 Mike Pagano
2017-05-03 17:41 Mike Pagano
2017-04-30 18:08 Mike Pagano
2017-04-30 17:59 Mike Pagano
2017-04-27  8:18 Alice Ferrazzi
2017-04-22 17:00 Mike Pagano
2017-04-18 10:21 Mike Pagano
2017-04-12 17:59 Mike Pagano
2017-04-08 13:56 Mike Pagano
2017-03-31 10:43 Mike Pagano
2017-03-30 18:16 Mike Pagano
2017-03-26 11:53 Mike Pagano
2017-03-22 12:28 Mike Pagano
2017-03-18 14:32 Mike Pagano
2017-03-15 14:39 Mike Pagano
2017-03-12 12:17 Mike Pagano
2017-03-02 16:29 Mike Pagano
2017-03-02 16:29 Mike Pagano
2017-02-26 20:45 Mike Pagano
2017-02-24  0:38 Mike Pagano
2017-02-23 20:12 Mike Pagano
2017-02-18 16:27 Alice Ferrazzi
2017-02-15 16:22 Alice Ferrazzi
2017-02-09  8:05 Alice Ferrazzi
2017-02-04 13:47 Alice Ferrazzi
2017-02-01 12:59 Alice Ferrazzi
2017-01-26  8:24 Alice Ferrazzi
2017-01-20 12:45 Alice Ferrazzi
2017-01-15 22:57 Mike Pagano
2017-01-14 14:46 Mike Pagano
2017-01-12 12:11 Mike Pagano
2017-01-09 12:46 Mike Pagano
2017-01-06 23:13 Mike Pagano
2016-12-15 23:41 Mike Pagano
2016-12-11 15:02 Alice Ferrazzi
2016-12-09 13:57 Alice Ferrazzi
2016-12-08  0:03 Mike Pagano
2016-12-02 16:21 Mike Pagano
2016-11-26 18:51 Mike Pagano
2016-11-26 18:40 Mike Pagano
2016-11-22  0:14 Mike Pagano
2016-11-19 11:03 Mike Pagano
2016-11-15 10:05 Alice Ferrazzi
2016-11-10 18:13 Alice Ferrazzi
2016-11-01  3:14 Alice Ferrazzi
2016-10-31 14:09 Alice Ferrazzi
2016-10-28 18:27 Alice Ferrazzi
2016-10-22 13:05 Mike Pagano
2016-10-21 11:10 Mike Pagano
2016-10-16 19:25 Mike Pagano
2016-10-08 19:55 Mike Pagano
2016-09-30 19:07 Mike Pagano
2016-09-24 10:51 Mike Pagano
2016-09-16 19:10 Mike Pagano
2016-09-15 13:58 Mike Pagano
2016-09-09 19:20 Mike Pagano
2016-08-20 16:31 Mike Pagano
2016-08-17 11:48 Mike Pagano
2016-08-10 12:56 Mike Pagano
2016-07-27 19:19 Mike Pagano
2016-07-11 19:59 Mike Pagano
2016-07-02 15:30 Mike Pagano
2016-07-01  0:55 Mike Pagano
2016-06-08 13:38 Mike Pagano
2016-06-02 18:24 Mike Pagano
2016-05-19 13:00 Mike Pagano
2016-05-12  0:14 Mike Pagano
2016-05-04 23:51 Mike Pagano
2016-04-20 11:27 Mike Pagano
2016-04-12 18:59 Mike Pagano
2016-03-22 22:47 Mike Pagano
2016-03-16 19:43 Mike Pagano
2016-03-10  0:51 Mike Pagano
2016-03-04 11:15 Mike Pagano
2016-02-26  0:02 Mike Pagano
2016-02-19 23:33 Mike Pagano
2016-02-18  0:20 Mike Pagano
2016-02-01  0:19 Mike Pagano
2016-02-01  0:13 Mike Pagano
2016-01-31 23:33 Mike Pagano
2016-01-20 12:38 Mike Pagano
2016-01-10 17:19 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1466800804.0a21ca48b5ca15cd0f5699ea94da60754a272c0c.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox