public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/hardened-patchset:master commit in: 3.10.9/, 3.10.7/, 3.2.50/
@ 2013-08-22 11:18 Anthony G. Basile
  0 siblings, 0 replies; 2+ messages in thread
From: Anthony G. Basile @ 2013-08-22 11:18 UTC (permalink / raw
  To: gentoo-commits

commit:     90ddae61d100a40875777992db9fa25578a547c5
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Thu Aug 22 11:18:30 2013 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Thu Aug 22 11:18:30 2013 +0000
URL:        http://git.overlays.gentoo.org/gitweb/?p=proj/hardened-patchset.git;a=commit;h=90ddae61

Grsec/PaX: 2.9.1-{3.2.50.3.10.9}-201308202015

---
 {3.10.7 => 3.10.9}/0000_README                     |   2 +-
 ...4420_grsecurity-2.9.1-3.10.9-201308202015.patch | 768 ++++++---------------
 {3.10.7 => 3.10.9}/4425_grsec_remove_EI_PAX.patch  |   0
 .../4427_force_XATTR_PAX_tmpfs.patch               |   0
 .../4430_grsec-remove-localversion-grsec.patch     |   0
 {3.10.7 => 3.10.9}/4435_grsec-mute-warnings.patch  |   0
 .../4440_grsec-remove-protected-paths.patch        |   0
 .../4450_grsec-kconfig-default-gids.patch          |   0
 .../4465_selinux-avc_audit-log-curr_ip.patch       |   0
 {3.10.7 => 3.10.9}/4470_disable-compat_vdso.patch  |   0
 {3.10.7 => 3.10.9}/4475_emutramp_default_on.patch  |   0
 3.2.50/0000_README                                 |   2 +-
 ...420_grsecurity-2.9.1-3.2.50-201308202017.patch} |  25 +-
 13 files changed, 225 insertions(+), 572 deletions(-)

diff --git a/3.10.7/0000_README b/3.10.9/0000_README
similarity index 96%
rename from 3.10.7/0000_README
rename to 3.10.9/0000_README
index e8ef030..31b21f6 100644
--- a/3.10.7/0000_README
+++ b/3.10.9/0000_README
@@ -2,7 +2,7 @@ README
 -----------------------------------------------------------------------------
 Individual Patch Descriptions:
 -----------------------------------------------------------------------------
-Patch:	4420_grsecurity-2.9.1-3.10.7-201308192211.patch
+Patch:	4420_grsecurity-2.9.1-3.10.9-201308202015.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/3.10.7/4420_grsecurity-2.9.1-3.10.7-201308192211.patch b/3.10.9/4420_grsecurity-2.9.1-3.10.9-201308202015.patch
similarity index 99%
rename from 3.10.7/4420_grsecurity-2.9.1-3.10.7-201308192211.patch
rename to 3.10.9/4420_grsecurity-2.9.1-3.10.9-201308202015.patch
index 73ebf27..24d81a0 100644
--- a/3.10.7/4420_grsecurity-2.9.1-3.10.7-201308192211.patch
+++ b/3.10.9/4420_grsecurity-2.9.1-3.10.9-201308202015.patch
@@ -281,7 +281,7 @@ index 2fe6e76..889ee23 100644
  
  	pcd.		[PARIDE]
 diff --git a/Makefile b/Makefile
-index 33e36ab..31f1dc8 100644
+index 4b31d62..ac99d49 100644
 --- a/Makefile
 +++ b/Makefile
 @@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -2144,33 +2144,6 @@ index f00b569..aa5bb41 100644
  
  /*
   * Change these and you break ASM code in entry-common.S
-diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
-index bdf2b84..aa9b4ac 100644
---- a/arch/arm/include/asm/tlb.h
-+++ b/arch/arm/include/asm/tlb.h
-@@ -43,6 +43,7 @@ struct mmu_gather {
- 	struct mm_struct	*mm;
- 	unsigned int		fullmm;
- 	struct vm_area_struct	*vma;
-+	unsigned long		start, end;
- 	unsigned long		range_start;
- 	unsigned long		range_end;
- 	unsigned int		nr;
-@@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
- }
- 
- static inline void
--tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
-+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- 	tlb->mm = mm;
--	tlb->fullmm = fullmm;
-+	tlb->fullmm = !(start | (end+1));
-+	tlb->start = start;
-+	tlb->end = end;
- 	tlb->vma = NULL;
- 	tlb->max = ARRAY_SIZE(tlb->local);
- 	tlb->pages = tlb->local;
 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
 index 7e1f760..de33b13 100644
 --- a/arch/arm/include/asm/uaccess.h
@@ -2889,33 +2862,18 @@ index 07314af..c46655c 100644
  	flush_icache_range((uintptr_t)(addr),
  			   (uintptr_t)(addr) + size);
 diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
-index d9f5cd4..e186ee1 100644
+index e19edc6..e186ee1 100644
 --- a/arch/arm/kernel/perf_event.c
 +++ b/arch/arm/kernel/perf_event.c
-@@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
- static int
- armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
- {
--	int mapping = (*event_map)[config];
-+	int mapping;
-+
-+	if (config >= PERF_COUNT_HW_MAX)
-+		return -EINVAL;
-+
-+	mapping = (*event_map)[config];
- 	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
- }
- 
-@@ -253,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events,
- 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- 	struct pmu *leader_pmu = event->group_leader->pmu;
+@@ -56,7 +56,7 @@ armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
+ 	int mapping;
  
-+	if (is_software_event(event))
-+		return 1;
-+
- 	if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
- 		return 1;
+ 	if (config >= PERF_COUNT_HW_MAX)
+-		return -ENOENT;
++		return -EINVAL;
  
+ 	mapping = (*event_map)[config];
+ 	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
 diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
 index 1f2740e..b36e225 100644
 --- a/arch/arm/kernel/perf_event_cpu.c
@@ -2930,21 +2888,9 @@ index 1f2740e..b36e225 100644
  };
  
 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
-index 5bc2615..4f1a0c2 100644
+index 5bc2615..dcd439f 100644
 --- a/arch/arm/kernel/process.c
 +++ b/arch/arm/kernel/process.c
-@@ -28,10 +28,10 @@
- #include <linux/tick.h>
- #include <linux/utsname.h>
- #include <linux/uaccess.h>
--#include <linux/random.h>
- #include <linux/hw_breakpoint.h>
- #include <linux/cpuidle.h>
- #include <linux/leds.h>
-+#include <linux/random.h>
- 
- #include <asm/cacheflush.h>
- #include <asm/idmap.h>
 @@ -223,6 +223,7 @@ void machine_power_off(void)
  
  	if (pm_power_off)
@@ -4543,33 +4489,6 @@ index ce6d763..cfea917 100644
  
  extern void *samsung_dmadev_get_ops(void);
  extern void *s3c_dma_get_ops(void);
-diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
-index 654f096..5546653 100644
---- a/arch/arm64/include/asm/tlb.h
-+++ b/arch/arm64/include/asm/tlb.h
-@@ -35,6 +35,7 @@ struct mmu_gather {
- 	struct mm_struct	*mm;
- 	unsigned int		fullmm;
- 	struct vm_area_struct	*vma;
-+	unsigned long		start, end;
- 	unsigned long		range_start;
- 	unsigned long		range_end;
- 	unsigned int		nr;
-@@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
- }
- 
- static inline void
--tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
-+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- 	tlb->mm = mm;
--	tlb->fullmm = fullmm;
-+	tlb->fullmm = !(start | (end+1));
-+	tlb->start = start;
-+	tlb->end = end;
- 	tlb->vma = NULL;
- 	tlb->max = ARRAY_SIZE(tlb->local);
- 	tlb->pages = tlb->local;
 diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
 index f4726dc..39ed646 100644
 --- a/arch/arm64/kernel/debug-monitors.c
@@ -4979,45 +4898,6 @@ index 54ff557..70c88b7 100644
  }
  
  static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
-diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
-index ef3a9de..bc5efc7 100644
---- a/arch/ia64/include/asm/tlb.h
-+++ b/arch/ia64/include/asm/tlb.h
-@@ -22,7 +22,7 @@
-  * unmapping a portion of the virtual address space, these hooks are called according to
-  * the following template:
-  *
-- *	tlb <- tlb_gather_mmu(mm, full_mm_flush);	// start unmap for address space MM
-+ *	tlb <- tlb_gather_mmu(mm, start, end);		// start unmap for address space MM
-  *	{
-  *	  for each vma that needs a shootdown do {
-  *	    tlb_start_vma(tlb, vma);
-@@ -58,6 +58,7 @@ struct mmu_gather {
- 	unsigned int		max;
- 	unsigned char		fullmm;		/* non-zero means full mm flush */
- 	unsigned char		need_flush;	/* really unmapped some PTEs? */
-+	unsigned long		start, end;
- 	unsigned long		start_addr;
- 	unsigned long		end_addr;
- 	struct page		**pages;
-@@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
- 
- 
- static inline void
--tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
-+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- 	tlb->mm = mm;
- 	tlb->max = ARRAY_SIZE(tlb->local);
- 	tlb->pages = tlb->local;
- 	tlb->nr = 0;
--	tlb->fullmm = full_mm_flush;
-+	tlb->fullmm = !(start | (end+1));
-+	tlb->start = start;
-+	tlb->end = end;
- 	tlb->start_addr = ~0UL;
- }
- 
 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
 index 449c8c0..18965fb 100644
 --- a/arch/ia64/include/asm/uaccess.h
@@ -7645,34 +7525,6 @@ index c4a93d6..4d2a9b4 100644
 +#define arch_align_stack(x) ((x) & ~0xfUL)
  
  #endif /* __ASM_EXEC_H */
-diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
-index b75d7d6..6d6d92b 100644
---- a/arch/s390/include/asm/tlb.h
-+++ b/arch/s390/include/asm/tlb.h
-@@ -32,6 +32,7 @@ struct mmu_gather {
- 	struct mm_struct *mm;
- 	struct mmu_table_batch *batch;
- 	unsigned int fullmm;
-+	unsigned long start, end;
- };
- 
- struct mmu_table_batch {
-@@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
- 
- static inline void tlb_gather_mmu(struct mmu_gather *tlb,
- 				  struct mm_struct *mm,
--				  unsigned int full_mm_flush)
-+				  unsigned long start,
-+				  unsigned long end)
- {
- 	tlb->mm = mm;
--	tlb->fullmm = full_mm_flush;
-+	tlb->start = start;
-+	tlb->end = end;
-+	tlb->fullmm = !(start | (end+1));
- 	tlb->batch = NULL;
- 	if (tlb->fullmm)
- 		__tlb_flush_mm(mm);
 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
 index 9c33ed4..e40cbef 100644
 --- a/arch/s390/include/asm/uaccess.h
@@ -7941,25 +7793,6 @@ index ef9e555..331bd29 100644
  
  #define __read_mostly __attribute__((__section__(".data..read_mostly")))
  
-diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
-index e61d43d..362192e 100644
---- a/arch/sh/include/asm/tlb.h
-+++ b/arch/sh/include/asm/tlb.h
-@@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
- }
- 
- static inline void
--tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
-+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- 	tlb->mm = mm;
--	tlb->fullmm = full_mm_flush;
-+	tlb->start = start;
-+	tlb->end = end;
-+	tlb->fullmm = !(start | (end+1));
- 
- 	init_tlb_gather(tlb);
- }
 diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
 index 03f2b55..b0270327 100644
 --- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
@@ -10595,25 +10428,6 @@ index 0032f92..cd151e0 100644
  
  #ifdef CONFIG_64BIT
  #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
-diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
-index 4febacd..29b0301 100644
---- a/arch/um/include/asm/tlb.h
-+++ b/arch/um/include/asm/tlb.h
-@@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
- }
- 
- static inline void
--tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
-+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- 	tlb->mm = mm;
--	tlb->fullmm = full_mm_flush;
-+	tlb->start = start;
-+	tlb->end = end;
-+	tlb->fullmm = !(start | (end+1));
- 
- 	init_tlb_gather(tlb);
- }
 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
 index bbcef52..6a2a483 100644
 --- a/arch/um/kernel/process.c
@@ -15963,7 +15777,7 @@ index e642300..0ef8f31 100644
  #define pgprot_writecombine	pgprot_writecombine
  extern pgprot_t pgprot_writecombine(pgprot_t prot);
 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
-index 22224b3..c5d8d7d 100644
+index 22224b3..b3a2f90 100644
 --- a/arch/x86/include/asm/processor.h
 +++ b/arch/x86/include/asm/processor.h
 @@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
@@ -16006,7 +15820,39 @@ index 22224b3..c5d8d7d 100644
  #endif
  #ifdef CONFIG_X86_32
  	unsigned long		ip;
-@@ -823,11 +836,18 @@ static inline void spin_lock_prefetch(const void *x)
+@@ -552,29 +565,8 @@ static inline void load_sp0(struct tss_struct *tss,
+ extern unsigned long mmu_cr4_features;
+ extern u32 *trampoline_cr4_features;
+ 
+-static inline void set_in_cr4(unsigned long mask)
+-{
+-	unsigned long cr4;
+-
+-	mmu_cr4_features |= mask;
+-	if (trampoline_cr4_features)
+-		*trampoline_cr4_features = mmu_cr4_features;
+-	cr4 = read_cr4();
+-	cr4 |= mask;
+-	write_cr4(cr4);
+-}
+-
+-static inline void clear_in_cr4(unsigned long mask)
+-{
+-	unsigned long cr4;
+-
+-	mmu_cr4_features &= ~mask;
+-	if (trampoline_cr4_features)
+-		*trampoline_cr4_features = mmu_cr4_features;
+-	cr4 = read_cr4();
+-	cr4 &= ~mask;
+-	write_cr4(cr4);
+-}
++extern void set_in_cr4(unsigned long mask);
++extern void clear_in_cr4(unsigned long mask);
+ 
+ typedef struct {
+ 	unsigned long		seg;
+@@ -823,11 +815,18 @@ static inline void spin_lock_prefetch(const void *x)
   */
  #define TASK_SIZE		PAGE_OFFSET
  #define TASK_SIZE_MAX		TASK_SIZE
@@ -16027,7 +15873,7 @@ index 22224b3..c5d8d7d 100644
  	.vm86_info		= NULL,					  \
  	.sysenter_cs		= __KERNEL_CS,				  \
  	.io_bitmap_ptr		= NULL,					  \
-@@ -841,7 +861,7 @@ static inline void spin_lock_prefetch(const void *x)
+@@ -841,7 +840,7 @@ static inline void spin_lock_prefetch(const void *x)
   */
  #define INIT_TSS  {							  \
  	.x86_tss = {							  \
@@ -16036,7 +15882,7 @@ index 22224b3..c5d8d7d 100644
  		.ss0		= __KERNEL_DS,				  \
  		.ss1		= __KERNEL_CS,				  \
  		.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,		  \
-@@ -852,11 +872,7 @@ static inline void spin_lock_prefetch(const void *x)
+@@ -852,11 +851,7 @@ static inline void spin_lock_prefetch(const void *x)
  extern unsigned long thread_saved_pc(struct task_struct *tsk);
  
  #define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
@@ -16049,7 +15895,7 @@ index 22224b3..c5d8d7d 100644
  
  /*
   * The below -8 is to reserve 8 bytes on top of the ring0 stack.
-@@ -871,7 +887,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -871,7 +866,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
  #define task_pt_regs(task)                                             \
  ({                                                                     \
         struct pt_regs *__regs__;                                       \
@@ -16058,7 +15904,7 @@ index 22224b3..c5d8d7d 100644
         __regs__ - 1;                                                   \
  })
  
-@@ -881,13 +897,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -881,13 +876,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
  /*
   * User space process size. 47bits minus one guard page.
   */
@@ -16074,7 +15920,7 @@ index 22224b3..c5d8d7d 100644
  
  #define TASK_SIZE		(test_thread_flag(TIF_ADDR32) ? \
  					IA32_PAGE_OFFSET : TASK_SIZE_MAX)
-@@ -898,11 +914,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -898,11 +893,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
  #define STACK_TOP_MAX		TASK_SIZE_MAX
  
  #define INIT_THREAD  { \
@@ -16088,7 +15934,7 @@ index 22224b3..c5d8d7d 100644
  }
  
  /*
-@@ -930,6 +946,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
+@@ -930,6 +925,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
   */
  #define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3))
  
@@ -16099,7 +15945,7 @@ index 22224b3..c5d8d7d 100644
  #define KSTK_EIP(task)		(task_pt_regs(task)->ip)
  
  /* Get/set a process' ability to use the timestamp counter instruction */
-@@ -942,7 +962,8 @@ extern int set_tsc_mode(unsigned int val);
+@@ -942,7 +941,8 @@ extern int set_tsc_mode(unsigned int val);
  extern u16 amd_get_nb_id(int cpu);
  
  struct aperfmperf {
@@ -16109,7 +15955,7 @@ index 22224b3..c5d8d7d 100644
  };
  
  static inline void get_aperfmperf(struct aperfmperf *am)
-@@ -970,7 +991,7 @@ unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
+@@ -970,7 +970,7 @@ unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
  	return ratio;
  }
  
@@ -16118,7 +15964,7 @@ index 22224b3..c5d8d7d 100644
  extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
  
  void default_idle(void);
-@@ -980,6 +1001,6 @@ bool xen_set_default_idle(void);
+@@ -980,6 +980,6 @@ bool xen_set_default_idle(void);
  #define xen_set_default_idle 0
  #endif
  
@@ -18612,7 +18458,7 @@ index 5013a48..0782c53 100644
  		if (c->x86_model == 3 && c->x86_mask == 0)
  			size = 64;
 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index 22018f7..2ae0e75 100644
+index 22018f7..df77e23 100644
 --- a/arch/x86/kernel/cpu/common.c
 +++ b/arch/x86/kernel/cpu/common.c
 @@ -88,60 +88,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
@@ -18676,7 +18522,7 @@ index 22018f7..2ae0e75 100644
  static int __init x86_xsave_setup(char *s)
  {
  	setup_clear_cpu_cap(X86_FEATURE_XSAVE);
-@@ -288,6 +234,53 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
+@@ -288,6 +234,57 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
  		set_in_cr4(X86_CR4_SMAP);
  }
  
@@ -18700,7 +18546,9 @@ index 22018f7..2ae0e75 100644
 +
 +#ifdef CONFIG_PAX_MEMORY_UDEREF
 +		if (clone_pgd_mask != ~(pgdval_t)0UL) {
++			pax_open_kernel();
 +			pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
++			pax_close_kernel();
 +			printk("PAX: slow and weak UDEREF enabled\n");
 +		} else
 +			printk("PAX: UDEREF disabled\n");
@@ -18713,7 +18561,9 @@ index 22018f7..2ae0e75 100644
 +	set_in_cr4(X86_CR4_PCIDE);
 +
 +#ifdef CONFIG_PAX_MEMORY_UDEREF
++	pax_open_kernel();
 +	clone_pgd_mask = ~(pgdval_t)0UL;
++	pax_close_kernel();
 +	if (pax_user_shadow_base)
 +		printk("PAX: weak UDEREF enabled\n");
 +	else {
@@ -18730,7 +18580,7 @@ index 22018f7..2ae0e75 100644
  /*
   * Some CPU features depend on higher CPUID levels, which may not always
   * be available due to CPUID level capping or broken virtualization
-@@ -386,7 +379,7 @@ void switch_to_new_gdt(int cpu)
+@@ -386,7 +383,7 @@ void switch_to_new_gdt(int cpu)
  {
  	struct desc_ptr gdt_descr;
  
@@ -18739,7 +18589,7 @@ index 22018f7..2ae0e75 100644
  	gdt_descr.size = GDT_SIZE - 1;
  	load_gdt(&gdt_descr);
  	/* Reload the per-cpu base */
-@@ -874,6 +867,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+@@ -874,6 +871,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
  	setup_smep(c);
  	setup_smap(c);
  
@@ -18750,7 +18600,7 @@ index 22018f7..2ae0e75 100644
  	/*
  	 * The vendor-specific functions might have changed features.
  	 * Now we do "generic changes."
-@@ -882,6 +879,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+@@ -882,6 +883,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
  	/* Filter out anything that depends on CPUID levels we don't have */
  	filter_cpuid_features(c, true);
  
@@ -18761,7 +18611,7 @@ index 22018f7..2ae0e75 100644
  	/* If the model name is still unset, do table lookup. */
  	if (!c->x86_model_id[0]) {
  		const char *p;
-@@ -1069,10 +1070,12 @@ static __init int setup_disablecpuid(char *arg)
+@@ -1069,10 +1074,12 @@ static __init int setup_disablecpuid(char *arg)
  }
  __setup("clearcpuid=", setup_disablecpuid);
  
@@ -18776,7 +18626,7 @@ index 22018f7..2ae0e75 100644
  
  DEFINE_PER_CPU_FIRST(union irq_stack_union,
  		     irq_stack_union) __aligned(PAGE_SIZE);
-@@ -1086,7 +1089,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
+@@ -1086,7 +1093,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
  EXPORT_PER_CPU_SYMBOL(current_task);
  
  DEFINE_PER_CPU(unsigned long, kernel_stack) =
@@ -18785,7 +18635,7 @@ index 22018f7..2ae0e75 100644
  EXPORT_PER_CPU_SYMBOL(kernel_stack);
  
  DEFINE_PER_CPU(char *, irq_stack_ptr) =
-@@ -1231,7 +1234,7 @@ void __cpuinit cpu_init(void)
+@@ -1231,7 +1238,7 @@ void __cpuinit cpu_init(void)
  	load_ucode_ap();
  
  	cpu = stack_smp_processor_id();
@@ -18794,7 +18644,7 @@ index 22018f7..2ae0e75 100644
  	oist = &per_cpu(orig_ist, cpu);
  
  #ifdef CONFIG_NUMA
-@@ -1257,7 +1260,7 @@ void __cpuinit cpu_init(void)
+@@ -1257,7 +1264,7 @@ void __cpuinit cpu_init(void)
  	switch_to_new_gdt(cpu);
  	loadsegment(fs, 0);
  
@@ -18803,7 +18653,7 @@ index 22018f7..2ae0e75 100644
  
  	memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
  	syscall_init();
-@@ -1266,7 +1269,6 @@ void __cpuinit cpu_init(void)
+@@ -1266,7 +1273,6 @@ void __cpuinit cpu_init(void)
  	wrmsrl(MSR_KERNEL_GS_BASE, 0);
  	barrier();
  
@@ -18811,7 +18661,7 @@ index 22018f7..2ae0e75 100644
  	enable_x2apic();
  
  	/*
-@@ -1318,7 +1320,7 @@ void __cpuinit cpu_init(void)
+@@ -1318,7 +1324,7 @@ void __cpuinit cpu_init(void)
  {
  	int cpu = smp_processor_id();
  	struct task_struct *curr = current;
@@ -19214,7 +19064,7 @@ index a9e2207..d70c83a 100644
  
  	intel_ds_init();
 diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
-index 52441a2..f94fae8 100644
+index 8aac56b..588fb13 100644
 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
 +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
 @@ -3093,7 +3093,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
@@ -20482,7 +20332,7 @@ index 8f3e2de..6b71e39 100644
  
  /*
 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
-index 7272089..ee191c7 100644
+index 7272089..0b74104 100644
 --- a/arch/x86/kernel/entry_64.S
 +++ b/arch/x86/kernel/entry_64.S
 @@ -59,6 +59,8 @@
@@ -20838,14 +20688,22 @@ index 7272089..ee191c7 100644
 +	SET_RDI_INTO_CR3
 +	jmp 2f
 +1:
++
 +	mov %rdi,%rbx
++
++#ifdef CONFIG_PAX_KERNEXEC
++	GET_CR0_INTO_RDI
++	btr $16,%rdi
++	jnc 3f
++	SET_RDI_INTO_CR0
++#endif
++
 +	add $__START_KERNEL_map,%rbx
 +	sub phys_base(%rip),%rbx
 +
 +#ifdef CONFIG_PARAVIRT
 +	cmpl $0, pv_info+PARAVIRT_enabled
 +	jz 1f
-+	pushq %rdi
 +	i = 0
 +	.rept USER_PGD_PTRS
 +	mov i*8(%rbx),%rsi
@@ -20854,18 +20712,10 @@ index 7272089..ee191c7 100644
 +	call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
 +	i = i + 1
 +	.endr
-+	popq %rdi
 +	jmp 2f
 +1:
 +#endif
 +
-+#ifdef CONFIG_PAX_KERNEXEC
-+	GET_CR0_INTO_RDI
-+	btr $16,%rdi
-+	jnc 3f
-+	SET_RDI_INTO_CR0
-+#endif
-+
 +	i = 0
 +	.rept USER_PGD_PTRS
 +	movb $0x67,i*8(%rbx)
@@ -22497,7 +22347,7 @@ index a836860..1b5c665 100644
 -	.skip PAGE_SIZE
 +	.fill 512,8,0
 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
-index 0fa6912..37fce70 100644
+index 0fa6912..b37438b 100644
 --- a/arch/x86/kernel/i386_ksyms_32.c
 +++ b/arch/x86/kernel/i386_ksyms_32.c
 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
@@ -22513,7 +22363,7 @@ index 0fa6912..37fce70 100644
  
  EXPORT_SYMBOL(__get_user_1);
  EXPORT_SYMBOL(__get_user_2);
-@@ -37,3 +41,7 @@ EXPORT_SYMBOL(strstr);
+@@ -37,3 +41,11 @@ EXPORT_SYMBOL(strstr);
  
  EXPORT_SYMBOL(csum_partial);
  EXPORT_SYMBOL(empty_zero_page);
@@ -22521,6 +22371,10 @@ index 0fa6912..37fce70 100644
 +#ifdef CONFIG_PAX_KERNEXEC
 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
 +#endif
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++EXPORT_SYMBOL(cpu_pgd);
++#endif
 diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
 index f7ea30d..6318acc 100644
 --- a/arch/x86/kernel/i387.c
@@ -24272,7 +24126,7 @@ index f2bb9c9..bed145d7 100644
  
  1:
 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
-index 56f7fcf..3b88ad1 100644
+index 56f7fcf..2cfe4f1 100644
 --- a/arch/x86/kernel/setup.c
 +++ b/arch/x86/kernel/setup.c
 @@ -110,6 +110,7 @@
@@ -24283,7 +24137,7 @@ index 56f7fcf..3b88ad1 100644
  
  /*
   * max_low_pfn_mapped: highest direct mapped pfn under 4GB
-@@ -205,10 +206,12 @@ EXPORT_SYMBOL(boot_cpu_data);
+@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
  #endif
  
  
@@ -24298,8 +24152,46 @@ index 56f7fcf..3b88ad1 100644
 +unsigned long mmu_cr4_features __read_only;
  #endif
  
++void set_in_cr4(unsigned long mask)
++{
++	unsigned long cr4 = read_cr4();
++
++	if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
++		return;
++
++	pax_open_kernel();
++	mmu_cr4_features |= mask;
++	pax_close_kernel();
++
++	if (trampoline_cr4_features)
++		*trampoline_cr4_features = mmu_cr4_features;
++	cr4 |= mask;
++	write_cr4(cr4);
++}
++EXPORT_SYMBOL(set_in_cr4);
++
++void clear_in_cr4(unsigned long mask)
++{
++	unsigned long cr4 = read_cr4();
++
++	if (!(cr4 & mask) && cr4 == mmu_cr4_features)
++		return;
++
++	pax_open_kernel();
++	mmu_cr4_features &= ~mask;
++	pax_close_kernel();
++
++	if (trampoline_cr4_features)
++		*trampoline_cr4_features = mmu_cr4_features;
++	cr4 &= ~mask;
++	write_cr4(cr4);
++}
++EXPORT_SYMBOL(clear_in_cr4);
++
  /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
-@@ -444,7 +447,7 @@ static void __init parse_setup_data(void)
+ int bootloader_type, bootloader_version;
+ 
+@@ -444,7 +483,7 @@ static void __init parse_setup_data(void)
  
  		switch (data->type) {
  		case SETUP_E820_EXT:
@@ -24308,7 +24200,7 @@ index 56f7fcf..3b88ad1 100644
  			break;
  		case SETUP_DTB:
  			add_dtb(pa_data);
-@@ -771,7 +774,7 @@ static void __init trim_bios_range(void)
+@@ -771,7 +810,7 @@ static void __init trim_bios_range(void)
  	 * area (640->1Mb) as ram even though it is not.
  	 * take them out.
  	 */
@@ -24317,7 +24209,7 @@ index 56f7fcf..3b88ad1 100644
  
  	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
  }
-@@ -779,7 +782,7 @@ static void __init trim_bios_range(void)
+@@ -779,7 +818,7 @@ static void __init trim_bios_range(void)
  /* called before trim_bios_range() to spare extra sanitize */
  static void __init e820_add_kernel_range(void)
  {
@@ -24326,7 +24218,7 @@ index 56f7fcf..3b88ad1 100644
  	u64 size = __pa_symbol(_end) - start;
  
  	/*
-@@ -841,8 +844,12 @@ static void __init trim_low_memory_range(void)
+@@ -841,8 +880,12 @@ static void __init trim_low_memory_range(void)
  
  void __init setup_arch(char **cmdline_p)
  {
@@ -24339,7 +24231,7 @@ index 56f7fcf..3b88ad1 100644
  
  	early_reserve_initrd();
  
-@@ -934,14 +941,14 @@ void __init setup_arch(char **cmdline_p)
+@@ -934,14 +977,14 @@ void __init setup_arch(char **cmdline_p)
  
  	if (!boot_params.hdr.root_flags)
  		root_mountflags &= ~MS_RDONLY;
@@ -24842,7 +24734,7 @@ index 0000000..5877189
 +	return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
 +}
 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
-index dbded5a..ace2781 100644
+index 48f8375..ace2781 100644
 --- a/arch/x86/kernel/sys_x86_64.c
 +++ b/arch/x86/kernel/sys_x86_64.c
 @@ -81,8 +81,8 @@ out:
@@ -24860,7 +24752,7 @@ index dbded5a..ace2781 100644
  				*begin = new_begin;
  		}
  	} else {
--		*begin = TASK_UNMAPPED_BASE;
+-		*begin = mmap_legacy_base();
 +		*begin = mm->mmap_base;
  		*end = TASK_SIZE;
  	}
@@ -25621,7 +25513,7 @@ index 9a907a6..f83f921 100644
  		     (unsigned long)VSYSCALL_START);
  
 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
-index b014d94..6d6ca7b 100644
+index b014d94..e775258 100644
 --- a/arch/x86/kernel/x8664_ksyms_64.c
 +++ b/arch/x86/kernel/x8664_ksyms_64.c
 @@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
@@ -25633,6 +25525,14 @@ index b014d94..6d6ca7b 100644
  
  EXPORT_SYMBOL(copy_page);
  EXPORT_SYMBOL(clear_page);
+@@ -66,3 +64,7 @@ EXPORT_SYMBOL(empty_zero_page);
+ #ifndef CONFIG_PARAVIRT
+ EXPORT_SYMBOL(native_load_gs_index);
+ #endif
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++EXPORT_SYMBOL(cpu_pgd);
++#endif
 diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
 index 45a14db..075bb9b 100644
 --- a/arch/x86/kernel/x86_init.c
@@ -30668,7 +30568,7 @@ index d87dd6d..bf3fa66 100644
  
  	pte = kmemcheck_pte_lookup(address);
 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
-index 845df68..1d8d29f 100644
+index c1af323..4758dad 100644
 --- a/arch/x86/mm/mmap.c
 +++ b/arch/x86/mm/mmap.c
 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
@@ -30708,8 +30608,8 @@ index 845df68..1d8d29f 100644
   * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
   * does, but not when emulating X86_32
   */
--static unsigned long mmap_legacy_base(void)
-+static unsigned long mmap_legacy_base(struct mm_struct *mm)
+-unsigned long mmap_legacy_base(void)
++unsigned long mmap_legacy_base(struct mm_struct *mm)
  {
 -	if (mmap_is_ia32())
 +	if (mmap_is_ia32()) {
@@ -30726,7 +30626,7 @@ index 845df68..1d8d29f 100644
  		return TASK_UNMAPPED_BASE + mmap_rnd();
  }
  
-@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
+@@ -113,11 +126,23 @@ unsigned long mmap_legacy_base(void)
  void arch_pick_mmap_layout(struct mm_struct *mm)
  {
  	if (mmap_is_legacy()) {
@@ -41972,19 +41872,6 @@ index f975696..4597e21 100644
  
  #ifdef CONFIG_NET_POLL_CONTROLLER
  	/*
-diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
-index 25723d8..925ab8e 100644
---- a/drivers/net/can/usb/peak_usb/pcan_usb.c
-+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
-@@ -649,7 +649,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
- 		if ((mc->ptr + rec_len) > mc->end)
- 			goto decode_failed;
- 
--		memcpy(cf->data, mc->ptr, rec_len);
-+		memcpy(cf->data, mc->ptr, cf->can_dlc);
- 		mc->ptr += rec_len;
- 	}
- 
 diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
 index e1d2643..7f4133b 100644
 --- a/drivers/net/ethernet/8390/ax88796.c
@@ -45297,45 +45184,25 @@ index c699a30..b90a5fd 100644
  	pDevice->apdev->netdev_ops = &apdev_netdev_ops;
  
  	pDevice->apdev->type = ARPHRD_IEEE80211;
-diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
-index d7e51e4..d07eaab 100644
---- a/drivers/staging/zcache/tmem.c
-+++ b/drivers/staging/zcache/tmem.c
-@@ -51,7 +51,7 @@
-  * A tmem host implementation must use this function to register callbacks
-  * for memory allocation.
-  */
--static struct tmem_hostops tmem_hostops;
-+static tmem_hostops_no_const tmem_hostops;
- 
- static void tmem_objnode_tree_init(void);
- 
-@@ -65,7 +65,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
-  * A tmem host implementation must use this function to register
-  * callbacks for a page-accessible memory (PAM) implementation.
-  */
--static struct tmem_pamops tmem_pamops;
-+static tmem_pamops_no_const tmem_pamops;
- 
- void tmem_register_pamops(struct tmem_pamops *m)
- {
 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
-index d128ce2..a43980c 100644
+index d128ce2..fc1f9a1 100644
 --- a/drivers/staging/zcache/tmem.h
 +++ b/drivers/staging/zcache/tmem.h
-@@ -226,6 +226,7 @@ struct tmem_pamops {
+@@ -225,7 +225,7 @@ struct tmem_pamops {
+ 	bool (*is_remote)(void *);
  	int (*replace_in_obj)(void *, struct tmem_obj *);
  #endif
- };
-+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
+-};
++} __no_const;
  extern void tmem_register_pamops(struct tmem_pamops *m);
  
  /* memory allocation methods provided by the host implementation */
-@@ -235,6 +236,7 @@ struct tmem_hostops {
+@@ -234,7 +234,7 @@ struct tmem_hostops {
+ 	void (*obj_free)(struct tmem_obj *, struct tmem_pool *);
  	struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
  	void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
- };
-+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
+-};
++} __no_const;
  extern void tmem_register_hostops(struct tmem_hostops *m);
  
  /* core tmem accessor functions */
@@ -47004,7 +46871,7 @@ index d6bea3e..60b250e 100644
  
  /**
 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
-index 6ef94bc..1b41265 100644
+index 028fc83..65bb105 100644
 --- a/drivers/usb/wusbcore/wa-xfer.c
 +++ b/drivers/usb/wusbcore/wa-xfer.c
 @@ -296,7 +296,7 @@ out:
@@ -52866,7 +52733,7 @@ index e4141f2..d8263e8 100644
  		i += packet_length_size;
  		if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
 diff --git a/fs/exec.c b/fs/exec.c
-index ffd7a81..3c84660 100644
+index 1f44670..3c84660 100644
 --- a/fs/exec.c
 +++ b/fs/exec.c
 @@ -55,8 +55,20 @@
@@ -53073,24 +52940,6 @@ index ffd7a81..3c84660 100644
  	/*
  	 * cover the whole range: [new_start, old_end)
  	 */
-@@ -607,7 +653,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
- 		return -ENOMEM;
- 
- 	lru_add_drain();
--	tlb_gather_mmu(&tlb, mm, 0);
-+	tlb_gather_mmu(&tlb, mm, old_start, old_end);
- 	if (new_end > old_start) {
- 		/*
- 		 * when the old and new regions overlap clear from new_end.
-@@ -624,7 +670,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
- 		free_pgd_range(&tlb, old_start, old_end, new_end,
- 			vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
- 	}
--	tlb_finish_mmu(&tlb, new_end, old_end);
-+	tlb_finish_mmu(&tlb, old_start, old_end);
- 
- 	/*
- 	 * Shrink the vma to just the new range.  Always succeeds.
 @@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
  	stack_top = arch_align_stack(stack_top);
  	stack_top = PAGE_ALIGN(stack_top);
@@ -58496,7 +58345,7 @@ index 6b6a993..807cccc 100644
  		kfree(s);
  }
 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
-index 3e636d8..350cc48 100644
+index 65fc60a..350cc48 100644
 --- a/fs/proc/task_mmu.c
 +++ b/fs/proc/task_mmu.c
 @@ -11,12 +11,19 @@
@@ -58663,34 +58512,6 @@ index 3e636d8..350cc48 100644
  		   mss.resident >> 10,
  		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
  		   mss.shared_clean  >> 10,
-@@ -792,14 +843,14 @@ typedef struct {
- } pagemap_entry_t;
- 
- struct pagemapread {
--	int pos, len;
-+	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
- 	pagemap_entry_t *buffer;
- };
- 
- #define PAGEMAP_WALK_SIZE	(PMD_SIZE)
- #define PAGEMAP_WALK_MASK	(PMD_MASK)
- 
--#define PM_ENTRY_BYTES      sizeof(u64)
-+#define PM_ENTRY_BYTES      sizeof(pagemap_entry_t)
- #define PM_STATUS_BITS      3
- #define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
- #define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
-@@ -1038,8 +1089,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
- 	if (!count)
- 		goto out_task;
- 
--	pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
--	pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
-+	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
-+	pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
- 	ret = -ENOMEM;
- 	if (!pm.buffer)
- 		goto out_task;
 @@ -1264,6 +1315,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
  	int n;
  	char buffer[50];
@@ -70603,19 +70424,6 @@ index a59ff51..2594a70 100644
  #endif /* CONFIG_MMU */
  
  #endif /* !__ASSEMBLY__ */
-diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
-index 13821c3..5672d7e 100644
---- a/include/asm-generic/tlb.h
-+++ b/include/asm-generic/tlb.h
-@@ -112,7 +112,7 @@ struct mmu_gather {
- 
- #define HAVE_GENERIC_MMU_GATHER
- 
--void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
-+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
- void tlb_flush_mmu(struct mmu_gather *tlb);
- void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
- 							unsigned long end);
 diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
 index c184aa8..d049942 100644
 --- a/include/asm-generic/uaccess.h
@@ -74681,7 +74489,7 @@ index 6dacb93..6174423 100644
  static inline void anon_vma_merge(struct vm_area_struct *vma,
  				  struct vm_area_struct *next)
 diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 178a8d9..450bf11 100644
+index 3aeb14b..73816a6 100644
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
 @@ -62,6 +62,7 @@ struct bio_list;
@@ -74701,10 +74509,11 @@ index 178a8d9..450bf11 100644
  extern signed long schedule_timeout_interruptible(signed long timeout);
  extern signed long schedule_timeout_killable(signed long timeout);
  extern signed long schedule_timeout_uninterruptible(signed long timeout);
-@@ -314,6 +315,19 @@ struct nsproxy;
+@@ -314,7 +315,19 @@ struct nsproxy;
  struct user_namespace;
  
  #ifdef CONFIG_MMU
+-extern unsigned long mmap_legacy_base(void);
 +
 +#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
 +extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
@@ -74717,11 +74526,11 @@ index 178a8d9..450bf11 100644
 +
 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
-+
++extern unsigned long mmap_legacy_base(struct mm_struct *mm);
  extern void arch_pick_mmap_layout(struct mm_struct *mm);
  extern unsigned long
  arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
-@@ -591,6 +605,17 @@ struct signal_struct {
+@@ -592,6 +605,17 @@ struct signal_struct {
  #ifdef CONFIG_TASKSTATS
  	struct taskstats *stats;
  #endif
@@ -74739,7 +74548,7 @@ index 178a8d9..450bf11 100644
  #ifdef CONFIG_AUDIT
  	unsigned audit_tty;
  	unsigned audit_tty_log_passwd;
-@@ -671,6 +696,14 @@ struct user_struct {
+@@ -672,6 +696,14 @@ struct user_struct {
  	struct key *session_keyring;	/* UID's default session keyring */
  #endif
  
@@ -74754,7 +74563,7 @@ index 178a8d9..450bf11 100644
  	/* Hash table maintenance information */
  	struct hlist_node uidhash_node;
  	kuid_t uid;
-@@ -1158,8 +1191,8 @@ struct task_struct {
+@@ -1159,8 +1191,8 @@ struct task_struct {
  	struct list_head thread_group;
  
  	struct completion *vfork_done;		/* for vfork() */
@@ -74765,7 +74574,7 @@ index 178a8d9..450bf11 100644
  
  	cputime_t utime, stime, utimescaled, stimescaled;
  	cputime_t gtime;
-@@ -1184,11 +1217,6 @@ struct task_struct {
+@@ -1185,11 +1217,6 @@ struct task_struct {
  	struct task_cputime cputime_expires;
  	struct list_head cpu_timers[3];
  
@@ -74777,7 +74586,7 @@ index 178a8d9..450bf11 100644
  	char comm[TASK_COMM_LEN]; /* executable name excluding path
  				     - access with [gs]et_task_comm (which lock
  				       it with task_lock())
-@@ -1205,6 +1233,10 @@ struct task_struct {
+@@ -1206,6 +1233,10 @@ struct task_struct {
  #endif
  /* CPU-specific state of this task */
  	struct thread_struct thread;
@@ -74788,7 +74597,7 @@ index 178a8d9..450bf11 100644
  /* filesystem information */
  	struct fs_struct *fs;
  /* open file information */
-@@ -1278,6 +1310,10 @@ struct task_struct {
+@@ -1279,6 +1310,10 @@ struct task_struct {
  	gfp_t lockdep_reclaim_gfp;
  #endif
  
@@ -74799,7 +74608,7 @@ index 178a8d9..450bf11 100644
  /* journalling filesystem info */
  	void *journal_info;
  
-@@ -1316,6 +1352,10 @@ struct task_struct {
+@@ -1317,6 +1352,10 @@ struct task_struct {
  	/* cg_list protected by css_set_lock and tsk->alloc_lock */
  	struct list_head cg_list;
  #endif
@@ -74810,7 +74619,7 @@ index 178a8d9..450bf11 100644
  #ifdef CONFIG_FUTEX
  	struct robust_list_head __user *robust_list;
  #ifdef CONFIG_COMPAT
-@@ -1416,8 +1456,76 @@ struct task_struct {
+@@ -1417,8 +1456,76 @@ struct task_struct {
  	unsigned int	sequential_io;
  	unsigned int	sequential_io_avg;
  #endif
@@ -74887,7 +74696,7 @@ index 178a8d9..450bf11 100644
  /* Future-safe accessor for struct task_struct's cpus_allowed. */
  #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
  
-@@ -1476,7 +1584,7 @@ struct pid_namespace;
+@@ -1477,7 +1584,7 @@ struct pid_namespace;
  pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
  			struct pid_namespace *ns);
  
@@ -74896,7 +74705,7 @@ index 178a8d9..450bf11 100644
  {
  	return tsk->pid;
  }
-@@ -1919,7 +2027,9 @@ void yield(void);
+@@ -1920,7 +2027,9 @@ void yield(void);
  extern struct exec_domain	default_exec_domain;
  
  union thread_union {
@@ -74906,7 +74715,7 @@ index 178a8d9..450bf11 100644
  	unsigned long stack[THREAD_SIZE/sizeof(long)];
  };
  
-@@ -1952,6 +2062,7 @@ extern struct pid_namespace init_pid_ns;
+@@ -1953,6 +2062,7 @@ extern struct pid_namespace init_pid_ns;
   */
  
  extern struct task_struct *find_task_by_vpid(pid_t nr);
@@ -74914,7 +74723,7 @@ index 178a8d9..450bf11 100644
  extern struct task_struct *find_task_by_pid_ns(pid_t nr,
  		struct pid_namespace *ns);
  
-@@ -2118,7 +2229,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
+@@ -2119,7 +2229,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
  extern void exit_itimers(struct signal_struct *);
  extern void flush_itimer_signals(void);
  
@@ -74923,7 +74732,7 @@ index 178a8d9..450bf11 100644
  
  extern int allow_signal(int);
  extern int disallow_signal(int);
-@@ -2309,9 +2420,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
+@@ -2310,9 +2420,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
  
  #endif
  
@@ -75489,7 +75298,7 @@ index a5ffd32..0935dea 100644
  extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
  				   unsigned long offset, size_t size,
 diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
-index 4147d70..d356a10 100644
+index 84662ec..d8f8adb 100644
 --- a/include/linux/syscalls.h
 +++ b/include/linux/syscalls.h
 @@ -97,8 +97,12 @@ struct sigaltstack;
@@ -78773,7 +78582,7 @@ index 7bb73f9..d7978ed 100644
  {
  	struct signal_struct *sig = current->signal;
 diff --git a/kernel/fork.c b/kernel/fork.c
-index 987b28a..11ee8a5 100644
+index ffbc090..08ceeee 100644
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
 @@ -319,7 +319,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
@@ -79068,7 +78877,7 @@ index 987b28a..11ee8a5 100644
  		if (clone_flags & CLONE_VFORK) {
  			p->vfork_done = &vfork;
  			init_completion(&vfork);
-@@ -1723,7 +1785,7 @@ void __init proc_caches_init(void)
+@@ -1729,7 +1791,7 @@ void __init proc_caches_init(void)
  	mm_cachep = kmem_cache_create("mm_struct",
  			sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
  			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
@@ -79077,7 +78886,7 @@ index 987b28a..11ee8a5 100644
  	mmap_init();
  	nsproxy_cache_init();
  }
-@@ -1763,7 +1825,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
+@@ -1769,7 +1831,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
  		return 0;
  
  	/* don't need lock here; in the worst case we'll do useless copy */
@@ -79086,7 +78895,7 @@ index 987b28a..11ee8a5 100644
  		return 0;
  
  	*new_fsp = copy_fs_struct(fs);
-@@ -1875,7 +1937,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+@@ -1881,7 +1943,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
  			fs = current->fs;
  			spin_lock(&fs->lock);
  			current->fs = new_fs;
@@ -82096,7 +81905,7 @@ index e8b3350..d83d44e 100644
  	.priority = CPU_PRI_MIGRATION,
  };
 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index c61a614..d7f3d7e 100644
+index 03b73be..9422b9f 100644
 --- a/kernel/sched/fair.c
 +++ b/kernel/sched/fair.c
 @@ -831,7 +831,7 @@ void task_numa_fault(int node, int pages, bool migrated)
@@ -82108,7 +81917,7 @@ index c61a614..d7f3d7e 100644
  	p->mm->numa_scan_offset = 0;
  }
  
-@@ -5686,7 +5686,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
+@@ -5687,7 +5687,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
   * run_rebalance_domains is triggered when needed from the scheduler tick.
   * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
   */
@@ -84466,7 +84275,7 @@ index b32b70c..e512eb0 100644
  	set_page_address(page, (void *)vaddr);
  
 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
-index 5cf99bf..5c01c2f 100644
+index 7c5eb85..5c01c2f 100644
 --- a/mm/hugetlb.c
 +++ b/mm/hugetlb.c
 @@ -2022,15 +2022,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
@@ -84511,15 +84320,6 @@ index 5cf99bf..5c01c2f 100644
  	if (ret)
  		goto out;
  
-@@ -2490,7 +2494,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
- 
- 	mm = vma->vm_mm;
- 
--	tlb_gather_mmu(&tlb, mm, 0);
-+	tlb_gather_mmu(&tlb, mm, start, end);
- 	__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
- 	tlb_finish_mmu(&tlb, start, end);
- }
 @@ -2545,6 +2549,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
  	return 1;
  }
@@ -84872,39 +84672,10 @@ index ceb0c7f..b2b8e94 100644
  	} else {
  		pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
 diff --git a/mm/memory.c b/mm/memory.c
-index 5e50800..7c0340f 100644
+index 5a35443..7c0340f 100644
 --- a/mm/memory.c
 +++ b/mm/memory.c
-@@ -211,14 +211,15 @@ static int tlb_next_batch(struct mmu_gather *tlb)
-  *	tear-down from @mm. The @fullmm argument is used when @mm is without
-  *	users and we're going to destroy the full address space (exit/execve).
-  */
--void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
-+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- 	tlb->mm = mm;
- 
--	tlb->fullmm     = fullmm;
-+	/* Is it from 0 to ~0? */
-+	tlb->fullmm     = !(start | (end+1));
- 	tlb->need_flush_all = 0;
--	tlb->start	= -1UL;
--	tlb->end	= 0;
-+	tlb->start	= start;
-+	tlb->end	= end;
- 	tlb->need_flush = 0;
- 	tlb->local.next = NULL;
- 	tlb->local.nr   = 0;
-@@ -258,8 +259,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
- {
- 	struct mmu_gather_batch *batch, *next;
- 
--	tlb->start = start;
--	tlb->end   = end;
- 	tlb_flush_mmu(tlb);
- 
- 	/* keep the page table cache within bounds */
-@@ -429,6 +428,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+@@ -428,6 +428,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
  		free_pte_range(tlb, pmd, addr);
  	} while (pmd++, addr = next, addr != end);
  
@@ -84912,7 +84683,7 @@ index 5e50800..7c0340f 100644
  	start &= PUD_MASK;
  	if (start < floor)
  		return;
-@@ -443,6 +443,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+@@ -442,6 +443,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
  	pmd = pmd_offset(pud, start);
  	pud_clear(pud);
  	pmd_free_tlb(tlb, pmd, start);
@@ -84921,7 +84692,7 @@ index 5e50800..7c0340f 100644
  }
  
  static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
-@@ -462,6 +464,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+@@ -461,6 +464,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
  		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
  	} while (pud++, addr = next, addr != end);
  
@@ -84929,7 +84700,7 @@ index 5e50800..7c0340f 100644
  	start &= PGDIR_MASK;
  	if (start < floor)
  		return;
-@@ -476,6 +479,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+@@ -475,6 +479,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
  	pud = pud_offset(pgd, start);
  	pgd_clear(pgd);
  	pud_free_tlb(tlb, pud, start);
@@ -84938,65 +84709,7 @@ index 5e50800..7c0340f 100644
  }
  
  /*
-@@ -1101,7 +1106,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
- 	spinlock_t *ptl;
- 	pte_t *start_pte;
- 	pte_t *pte;
--	unsigned long range_start = addr;
- 
- again:
- 	init_rss_vec(rss);
-@@ -1204,17 +1208,25 @@ again:
- 	 * and page-free while holding it.
- 	 */
- 	if (force_flush) {
-+		unsigned long old_end;
-+
- 		force_flush = 0;
- 
--#ifdef HAVE_GENERIC_MMU_GATHER
--		tlb->start = range_start;
-+		/*
-+		 * Flush the TLB just for the previous segment,
-+		 * then update the range to be the remaining
-+		 * TLB range.
-+		 */
-+		old_end = tlb->end;
- 		tlb->end = addr;
--#endif
-+
- 		tlb_flush_mmu(tlb);
--		if (addr != end) {
--			range_start = addr;
-+
-+		tlb->start = addr;
-+		tlb->end = old_end;
-+
-+		if (addr != end)
- 			goto again;
--		}
- 	}
- 
- 	return addr;
-@@ -1399,7 +1411,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
- 	unsigned long end = start + size;
- 
- 	lru_add_drain();
--	tlb_gather_mmu(&tlb, mm, 0);
-+	tlb_gather_mmu(&tlb, mm, start, end);
- 	update_hiwater_rss(mm);
- 	mmu_notifier_invalidate_range_start(mm, start, end);
- 	for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
-@@ -1425,7 +1437,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
- 	unsigned long end = address + size;
- 
- 	lru_add_drain();
--	tlb_gather_mmu(&tlb, mm, 0);
-+	tlb_gather_mmu(&tlb, mm, address, end);
- 	update_hiwater_rss(mm);
- 	mmu_notifier_invalidate_range_start(mm, address, end);
- 	unmap_single_vma(&tlb, vma, address, end, details);
-@@ -1638,12 +1650,6 @@ no_page_table:
+@@ -1644,12 +1650,6 @@ no_page_table:
  	return page;
  }
  
@@ -85009,7 +84722,7 @@ index 5e50800..7c0340f 100644
  /**
   * __get_user_pages() - pin user pages in memory
   * @tsk:	task_struct of target task
-@@ -1730,10 +1736,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1736,10 +1736,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  
  	i = 0;
  
@@ -85022,7 +84735,7 @@ index 5e50800..7c0340f 100644
  		if (!vma && in_gate_area(mm, start)) {
  			unsigned long pg = start & PAGE_MASK;
  			pgd_t *pgd;
-@@ -1782,7 +1788,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1788,7 +1788,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  			goto next_page;
  		}
  
@@ -85031,7 +84744,7 @@ index 5e50800..7c0340f 100644
  		    (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
  		    !(vm_flags & vma->vm_flags))
  			return i ? : -EFAULT;
-@@ -1811,11 +1817,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1817,11 +1817,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  				int ret;
  				unsigned int fault_flags = 0;
  
@@ -85043,7 +84756,7 @@ index 5e50800..7c0340f 100644
  				if (foll_flags & FOLL_WRITE)
  					fault_flags |= FAULT_FLAG_WRITE;
  				if (nonblocking)
-@@ -1895,7 +1896,7 @@ next_page:
+@@ -1901,7 +1896,7 @@ next_page:
  			start += page_increm * PAGE_SIZE;
  			nr_pages -= page_increm;
  		} while (nr_pages && start < vma->vm_end);
@@ -85052,7 +84765,7 @@ index 5e50800..7c0340f 100644
  	return i;
  }
  EXPORT_SYMBOL(__get_user_pages);
-@@ -2102,6 +2103,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -2108,6 +2103,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
  	page_add_file_rmap(page);
  	set_pte_at(mm, addr, pte, mk_pte(page, prot));
  
@@ -85063,7 +84776,7 @@ index 5e50800..7c0340f 100644
  	retval = 0;
  	pte_unmap_unlock(pte, ptl);
  	return retval;
-@@ -2146,9 +2151,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -2152,9 +2151,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
  	if (!page_count(page))
  		return -EINVAL;
  	if (!(vma->vm_flags & VM_MIXEDMAP)) {
@@ -85085,7 +84798,7 @@ index 5e50800..7c0340f 100644
  	}
  	return insert_page(vma, addr, page, vma->vm_page_prot);
  }
-@@ -2231,6 +2248,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+@@ -2237,6 +2248,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
  			unsigned long pfn)
  {
  	BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
@@ -85093,7 +84806,7 @@ index 5e50800..7c0340f 100644
  
  	if (addr < vma->vm_start || addr >= vma->vm_end)
  		return -EFAULT;
-@@ -2478,7 +2496,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
+@@ -2484,7 +2496,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
  
  	BUG_ON(pud_huge(*pud));
  
@@ -85104,7 +84817,7 @@ index 5e50800..7c0340f 100644
  	if (!pmd)
  		return -ENOMEM;
  	do {
-@@ -2498,7 +2518,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
+@@ -2504,7 +2518,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
  	unsigned long next;
  	int err;
  
@@ -85115,7 +84828,7 @@ index 5e50800..7c0340f 100644
  	if (!pud)
  		return -ENOMEM;
  	do {
-@@ -2586,6 +2608,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
+@@ -2592,6 +2608,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
  		copy_user_highpage(dst, src, va, vma);
  }
  
@@ -85302,7 +85015,7 @@ index 5e50800..7c0340f 100644
  /*
   * This routine handles present pages, when users try to write
   * to a shared page. It is done by copying the page to a new address
-@@ -2802,6 +3004,12 @@ gotten:
+@@ -2808,6 +3004,12 @@ gotten:
  	 */
  	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
  	if (likely(pte_same(*page_table, orig_pte))) {
@@ -85315,7 +85028,7 @@ index 5e50800..7c0340f 100644
  		if (old_page) {
  			if (!PageAnon(old_page)) {
  				dec_mm_counter_fast(mm, MM_FILEPAGES);
-@@ -2853,6 +3061,10 @@ gotten:
+@@ -2859,6 +3061,10 @@ gotten:
  			page_remove_rmap(old_page);
  		}
  
@@ -85326,7 +85039,7 @@ index 5e50800..7c0340f 100644
  		/* Free the old page.. */
  		new_page = old_page;
  		ret |= VM_FAULT_WRITE;
-@@ -3128,6 +3340,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3134,6 +3340,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
  	swap_free(entry);
  	if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
  		try_to_free_swap(page);
@@ -85338,7 +85051,7 @@ index 5e50800..7c0340f 100644
  	unlock_page(page);
  	if (page != swapcache) {
  		/*
-@@ -3151,6 +3368,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3157,6 +3368,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
  
  	/* No need to invalidate - it was non-present before */
  	update_mmu_cache(vma, address, page_table);
@@ -85350,7 +85063,7 @@ index 5e50800..7c0340f 100644
  unlock:
  	pte_unmap_unlock(page_table, ptl);
  out:
-@@ -3170,40 +3392,6 @@ out_release:
+@@ -3176,40 +3392,6 @@ out_release:
  }
  
  /*
@@ -85391,7 +85104,7 @@ index 5e50800..7c0340f 100644
   * We enter with non-exclusive mmap_sem (to exclude vma changes,
   * but allow concurrent faults), and pte mapped but not yet locked.
   * We return with mmap_sem still held, but pte unmapped and unlocked.
-@@ -3212,27 +3400,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3218,27 +3400,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
  		unsigned long address, pte_t *page_table, pmd_t *pmd,
  		unsigned int flags)
  {
@@ -85424,7 +85137,7 @@ index 5e50800..7c0340f 100644
  	if (unlikely(anon_vma_prepare(vma)))
  		goto oom;
  	page = alloc_zeroed_user_highpage_movable(vma, address);
-@@ -3256,6 +3440,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3262,6 +3440,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
  	if (!pte_none(*page_table))
  		goto release;
  
@@ -85436,7 +85149,7 @@ index 5e50800..7c0340f 100644
  	inc_mm_counter_fast(mm, MM_ANONPAGES);
  	page_add_new_anon_rmap(page, vma, address);
  setpte:
-@@ -3263,6 +3452,12 @@ setpte:
+@@ -3269,6 +3452,12 @@ setpte:
  
  	/* No need to invalidate - it was non-present before */
  	update_mmu_cache(vma, address, page_table);
@@ -85449,7 +85162,7 @@ index 5e50800..7c0340f 100644
  unlock:
  	pte_unmap_unlock(page_table, ptl);
  	return 0;
-@@ -3406,6 +3601,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3412,6 +3601,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  	 */
  	/* Only go through if we didn't race with anybody else... */
  	if (likely(pte_same(*page_table, orig_pte))) {
@@ -85462,7 +85175,7 @@ index 5e50800..7c0340f 100644
  		flush_icache_page(vma, page);
  		entry = mk_pte(page, vma->vm_page_prot);
  		if (flags & FAULT_FLAG_WRITE)
-@@ -3425,6 +3626,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3431,6 +3626,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  
  		/* no need to invalidate: a not-present page won't be cached */
  		update_mmu_cache(vma, address, page_table);
@@ -85477,7 +85190,7 @@ index 5e50800..7c0340f 100644
  	} else {
  		if (cow_page)
  			mem_cgroup_uncharge_page(cow_page);
-@@ -3746,6 +3955,12 @@ int handle_pte_fault(struct mm_struct *mm,
+@@ -3752,6 +3955,12 @@ int handle_pte_fault(struct mm_struct *mm,
  		if (flags & FAULT_FLAG_WRITE)
  			flush_tlb_fix_spurious_fault(vma, address);
  	}
@@ -85490,7 +85203,7 @@ index 5e50800..7c0340f 100644
  unlock:
  	pte_unmap_unlock(pte, ptl);
  	return 0;
-@@ -3762,6 +3977,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3768,6 +3977,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  	pmd_t *pmd;
  	pte_t *pte;
  
@@ -85501,7 +85214,7 @@ index 5e50800..7c0340f 100644
  	__set_current_state(TASK_RUNNING);
  
  	count_vm_event(PGFAULT);
-@@ -3773,6 +3992,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3779,6 +3992,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  	if (unlikely(is_vm_hugetlb_page(vma)))
  		return hugetlb_fault(mm, vma, address, flags);
  
@@ -85536,7 +85249,7 @@ index 5e50800..7c0340f 100644
  retry:
  	pgd = pgd_offset(mm, address);
  	pud = pud_alloc(mm, pgd, address);
-@@ -3871,6 +4118,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+@@ -3877,6 +4118,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
  	spin_unlock(&mm->page_table_lock);
  	return 0;
  }
@@ -85560,7 +85273,7 @@ index 5e50800..7c0340f 100644
  #endif /* __PAGETABLE_PUD_FOLDED */
  
  #ifndef __PAGETABLE_PMD_FOLDED
-@@ -3901,6 +4165,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+@@ -3907,6 +4165,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
  	spin_unlock(&mm->page_table_lock);
  	return 0;
  }
@@ -85591,7 +85304,7 @@ index 5e50800..7c0340f 100644
  #endif /* __PAGETABLE_PMD_FOLDED */
  
  #if !defined(__HAVE_ARCH_GATE_AREA)
-@@ -3914,7 +4202,7 @@ static int __init gate_vma_init(void)
+@@ -3920,7 +4202,7 @@ static int __init gate_vma_init(void)
  	gate_vma.vm_start = FIXADDR_USER_START;
  	gate_vma.vm_end = FIXADDR_USER_END;
  	gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
@@ -85600,7 +85313,7 @@ index 5e50800..7c0340f 100644
  
  	return 0;
  }
-@@ -4048,8 +4336,8 @@ out:
+@@ -4054,8 +4336,8 @@ out:
  	return ret;
  }
  
@@ -85611,7 +85324,7 @@ index 5e50800..7c0340f 100644
  {
  	resource_size_t phys_addr;
  	unsigned long prot = 0;
-@@ -4074,8 +4362,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+@@ -4080,8 +4362,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
   * Access another process' address space as given in mm.  If non-NULL, use the
   * given task for page fault accounting.
   */
@@ -85622,7 +85335,7 @@ index 5e50800..7c0340f 100644
  {
  	struct vm_area_struct *vma;
  	void *old_buf = buf;
-@@ -4083,7 +4371,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -4089,7 +4371,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
  	down_read(&mm->mmap_sem);
  	/* ignore errors, just check how much was successfully transferred */
  	while (len) {
@@ -85631,7 +85344,7 @@ index 5e50800..7c0340f 100644
  		void *maddr;
  		struct page *page = NULL;
  
-@@ -4142,8 +4430,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -4148,8 +4430,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
   *
   * The caller must hold a reference on @mm.
   */
@@ -85642,7 +85355,7 @@ index 5e50800..7c0340f 100644
  {
  	return __access_remote_vm(NULL, mm, addr, buf, len, write);
  }
-@@ -4153,11 +4441,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+@@ -4159,11 +4441,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
   * Source/target buffer must be kernel space,
   * Do not walk the page table directly, use get_user_pages
   */
@@ -85819,7 +85532,7 @@ index 79b7cf7..9944291 100644
  	    capable(CAP_IPC_LOCK))
  		ret = do_mlockall(flags);
 diff --git a/mm/mmap.c b/mm/mmap.c
-index 7dbe397..bfb7626 100644
+index 8d25fdc..bfb7626 100644
 --- a/mm/mmap.c
 +++ b/mm/mmap.c
 @@ -36,6 +36,7 @@
@@ -86688,15 +86401,6 @@ index 7dbe397..bfb7626 100644
  		if (vma->vm_flags & VM_ACCOUNT)
  			nr_accounted += nrpages;
  		vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
-@@ -2356,7 +2728,7 @@ static void unmap_region(struct mm_struct *mm,
- 	struct mmu_gather tlb;
- 
- 	lru_add_drain();
--	tlb_gather_mmu(&tlb, mm, 0);
-+	tlb_gather_mmu(&tlb, mm, start, end);
- 	update_hiwater_rss(mm);
- 	unmap_vmas(&tlb, vma, start, end);
- 	free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
 @@ -2379,6 +2751,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
  	insertion_point = (prev ? &prev->vm_next : &mm->mmap);
  	vma->vm_prev = NULL;
@@ -87002,15 +86706,6 @@ index 7dbe397..bfb7626 100644
  	return addr;
  }
  
-@@ -2735,7 +3232,7 @@ void exit_mmap(struct mm_struct *mm)
- 
- 	lru_add_drain();
- 	flush_cache_mm(mm);
--	tlb_gather_mmu(&tlb, mm, 1);
-+	tlb_gather_mmu(&tlb, mm, 0, -1);
- 	/* update_hiwater_rss(mm) here? but nobody should be looking */
- 	/* Use -1 here to ensure all VMAs in the mm are unmapped */
- 	unmap_vmas(&tlb, vma, 0, -1);
 @@ -2750,6 +3247,7 @@ void exit_mmap(struct mm_struct *mm)
  	while (vma) {
  		if (vma->vm_flags & VM_ACCOUNT)
@@ -93669,7 +93364,7 @@ index 57ee84d..8b99cf5 100644
  			);
  
 diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
-index 1076fe1..8285fd7 100644
+index 1076fe1..f190285 100644
 --- a/net/netlink/genetlink.c
 +++ b/net/netlink/genetlink.c
 @@ -310,18 +310,20 @@ int genl_register_ops(struct genl_family *family, struct genl_ops *ops)
@@ -93710,27 +93405,6 @@ index 1076fe1..8285fd7 100644
  			return 0;
  		}
  	}
-@@ -789,6 +791,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
- 	struct net *net = sock_net(skb->sk);
- 	int chains_to_skip = cb->args[0];
- 	int fams_to_skip = cb->args[1];
-+	bool need_locking = chains_to_skip || fams_to_skip;
-+
-+	if (need_locking)
-+		genl_lock();
- 
- 	for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
- 		n = 0;
-@@ -810,6 +816,9 @@ errout:
- 	cb->args[0] = i;
- 	cb->args[1] = n;
- 
-+	if (need_locking)
-+		genl_unlock();
-+
- 	return skb->len;
- }
- 
 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
 index ec0c80f..41e1830 100644
 --- a/net/netrom/af_netrom.c

diff --git a/3.10.7/4425_grsec_remove_EI_PAX.patch b/3.10.9/4425_grsec_remove_EI_PAX.patch
similarity index 100%
rename from 3.10.7/4425_grsec_remove_EI_PAX.patch
rename to 3.10.9/4425_grsec_remove_EI_PAX.patch

diff --git a/3.10.7/4427_force_XATTR_PAX_tmpfs.patch b/3.10.9/4427_force_XATTR_PAX_tmpfs.patch
similarity index 100%
rename from 3.10.7/4427_force_XATTR_PAX_tmpfs.patch
rename to 3.10.9/4427_force_XATTR_PAX_tmpfs.patch

diff --git a/3.10.7/4430_grsec-remove-localversion-grsec.patch b/3.10.9/4430_grsec-remove-localversion-grsec.patch
similarity index 100%
rename from 3.10.7/4430_grsec-remove-localversion-grsec.patch
rename to 3.10.9/4430_grsec-remove-localversion-grsec.patch

diff --git a/3.10.7/4435_grsec-mute-warnings.patch b/3.10.9/4435_grsec-mute-warnings.patch
similarity index 100%
rename from 3.10.7/4435_grsec-mute-warnings.patch
rename to 3.10.9/4435_grsec-mute-warnings.patch

diff --git a/3.10.7/4440_grsec-remove-protected-paths.patch b/3.10.9/4440_grsec-remove-protected-paths.patch
similarity index 100%
rename from 3.10.7/4440_grsec-remove-protected-paths.patch
rename to 3.10.9/4440_grsec-remove-protected-paths.patch

diff --git a/3.10.7/4450_grsec-kconfig-default-gids.patch b/3.10.9/4450_grsec-kconfig-default-gids.patch
similarity index 100%
rename from 3.10.7/4450_grsec-kconfig-default-gids.patch
rename to 3.10.9/4450_grsec-kconfig-default-gids.patch

diff --git a/3.10.7/4465_selinux-avc_audit-log-curr_ip.patch b/3.10.9/4465_selinux-avc_audit-log-curr_ip.patch
similarity index 100%
rename from 3.10.7/4465_selinux-avc_audit-log-curr_ip.patch
rename to 3.10.9/4465_selinux-avc_audit-log-curr_ip.patch

diff --git a/3.10.7/4470_disable-compat_vdso.patch b/3.10.9/4470_disable-compat_vdso.patch
similarity index 100%
rename from 3.10.7/4470_disable-compat_vdso.patch
rename to 3.10.9/4470_disable-compat_vdso.patch

diff --git a/3.10.7/4475_emutramp_default_on.patch b/3.10.9/4475_emutramp_default_on.patch
similarity index 100%
rename from 3.10.7/4475_emutramp_default_on.patch
rename to 3.10.9/4475_emutramp_default_on.patch

diff --git a/3.2.50/0000_README b/3.2.50/0000_README
index df20efb..a654e82 100644
--- a/3.2.50/0000_README
+++ b/3.2.50/0000_README
@@ -118,7 +118,7 @@ Patch:	1049_linux-3.2.50.patch
 From:	http://www.kernel.org
 Desc:	Linux 3.2.50
 
-Patch:	4420_grsecurity-2.9.1-3.2.50-201308181813.patch
+Patch:	4420_grsecurity-2.9.1-3.2.50-201308202017.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/3.2.50/4420_grsecurity-2.9.1-3.2.50-201308181813.patch b/3.2.50/4420_grsecurity-2.9.1-3.2.50-201308202017.patch
similarity index 99%
rename from 3.2.50/4420_grsecurity-2.9.1-3.2.50-201308181813.patch
rename to 3.2.50/4420_grsecurity-2.9.1-3.2.50-201308202017.patch
index d8e4449..01378eb 100644
--- a/3.2.50/4420_grsecurity-2.9.1-3.2.50-201308181813.patch
+++ b/3.2.50/4420_grsecurity-2.9.1-3.2.50-201308202017.patch
@@ -5050,7 +5050,7 @@ index 9844662..04a2a1e 100644
  	do_exit(err);
  
 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
-index 7d14bb6..1305601 100644
+index 7d14bb69..1305601 100644
 --- a/arch/powerpc/kernel/vdso.c
 +++ b/arch/powerpc/kernel/vdso.c
 @@ -35,6 +35,7 @@
@@ -92175,7 +92175,7 @@ index 3d1d55d..1ee2a18 100644
  	.exit = netlink_net_exit,
  };
 diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
-index 874f8ff..d8b8f87 100644
+index 874f8ff..339bb58 100644
 --- a/net/netlink/genetlink.c
 +++ b/net/netlink/genetlink.c
 @@ -288,18 +288,20 @@ int genl_register_ops(struct genl_family *family, struct genl_ops *ops)
@@ -92216,27 +92216,6 @@ index 874f8ff..d8b8f87 100644
  			return 0;
  		}
  	}
-@@ -700,6 +702,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
- 	struct net *net = sock_net(skb->sk);
- 	int chains_to_skip = cb->args[0];
- 	int fams_to_skip = cb->args[1];
-+	bool need_locking = chains_to_skip || fams_to_skip;
-+
-+	if (need_locking)
-+		genl_lock();
- 
- 	for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
- 		n = 0;
-@@ -721,6 +727,9 @@ errout:
- 	cb->args[0] = i;
- 	cb->args[1] = n;
- 
-+	if (need_locking)
-+		genl_unlock();
-+
- 	return skb->len;
- }
- 
 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
 index 3df7c5a..8f324b0 100644
 --- a/net/netrom/af_netrom.c


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* [gentoo-commits] proj/hardened-patchset:master commit in: 3.10.9/, 3.10.7/, 3.2.50/
@ 2013-08-22 12:09 Anthony G. Basile
  0 siblings, 0 replies; 2+ messages in thread
From: Anthony G. Basile @ 2013-08-22 12:09 UTC (permalink / raw
  To: gentoo-commits

commit:     6edbe713204f28f5ecd1704ff61515c5e28ea9ae
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Thu Aug 22 11:18:30 2013 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Thu Aug 22 12:08:43 2013 +0000
URL:        http://git.overlays.gentoo.org/gitweb/?p=proj/hardened-patchset.git;a=commit;h=6edbe713

Grsec/PaX: 2.9.1-{3.2.50.3.10.9}-201308202015

---
 {3.10.7 => 3.10.9}/0000_README                     |   10 +-
 3.10.9/1007_linux-3.10.8.patch                     | 1793 ++++++++++++++++++++
 3.10.9/1008_linux-3.10.9.patch                     |   37 +
 ...4420_grsecurity-2.9.1-3.10.9-201308202015.patch |  768 +++------
 {3.10.7 => 3.10.9}/4425_grsec_remove_EI_PAX.patch  |    0
 .../4427_force_XATTR_PAX_tmpfs.patch               |    0
 .../4430_grsec-remove-localversion-grsec.patch     |    0
 {3.10.7 => 3.10.9}/4435_grsec-mute-warnings.patch  |    0
 .../4440_grsec-remove-protected-paths.patch        |    0
 .../4450_grsec-kconfig-default-gids.patch          |    0
 .../4465_selinux-avc_audit-log-curr_ip.patch       |    0
 {3.10.7 => 3.10.9}/4470_disable-compat_vdso.patch  |    0
 {3.10.7 => 3.10.9}/4475_emutramp_default_on.patch  |    0
 3.2.50/0000_README                                 |    2 +-
 ...420_grsecurity-2.9.1-3.2.50-201308202017.patch} |   25 +-
 15 files changed, 2063 insertions(+), 572 deletions(-)

diff --git a/3.10.7/0000_README b/3.10.9/0000_README
similarity index 88%
rename from 3.10.7/0000_README
rename to 3.10.9/0000_README
index e8ef030..71cd5ee 100644
--- a/3.10.7/0000_README
+++ b/3.10.9/0000_README
@@ -2,7 +2,15 @@ README
 -----------------------------------------------------------------------------
 Individual Patch Descriptions:
 -----------------------------------------------------------------------------
-Patch:	4420_grsecurity-2.9.1-3.10.7-201308192211.patch
+Patch:	1007_linux-3.10.8.patch
+From:	http://www.kernel.org
+Desc:	Linux 3.10.8
+
+Patch:	1008_linux-3.10.9.patch
+From:	http://www.kernel.org
+Desc:	Linux 3.10.9
+
+Patch:	4420_grsecurity-2.9.1-3.10.9-201308202015.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/3.10.9/1007_linux-3.10.8.patch b/3.10.9/1007_linux-3.10.8.patch
new file mode 100644
index 0000000..bf200d8
--- /dev/null
+++ b/3.10.9/1007_linux-3.10.8.patch
@@ -0,0 +1,1793 @@
+diff --git a/Makefile b/Makefile
+index 33e36ab..1a21612 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 10
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = TOSSUG Baby Fish
+ 
+diff --git a/arch/Kconfig b/arch/Kconfig
+index a4429bc..00e3702 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -404,6 +404,12 @@ config CLONE_BACKWARDS2
+ 	help
+ 	  Architecture has the first two arguments of clone(2) swapped.
+ 
++config CLONE_BACKWARDS3
++	bool
++	help
++	  Architecture has tls passed as the 3rd argument of clone(2),
++	  not the 5th one.
++
+ config ODD_RT_SIGACTION
+ 	bool
+ 	help
+diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
+index 18d5032..4bb08e3 100644
+--- a/arch/arm/include/asm/kvm_asm.h
++++ b/arch/arm/include/asm/kvm_asm.h
+@@ -37,16 +37,18 @@
+ #define c5_AIFSR	15	/* Auxilary Instrunction Fault Status R */
+ #define c6_DFAR		16	/* Data Fault Address Register */
+ #define c6_IFAR		17	/* Instruction Fault Address Register */
+-#define c9_L2CTLR	18	/* Cortex A15 L2 Control Register */
+-#define c10_PRRR	19	/* Primary Region Remap Register */
+-#define c10_NMRR	20	/* Normal Memory Remap Register */
+-#define c12_VBAR	21	/* Vector Base Address Register */
+-#define c13_CID		22	/* Context ID Register */
+-#define c13_TID_URW	23	/* Thread ID, User R/W */
+-#define c13_TID_URO	24	/* Thread ID, User R/O */
+-#define c13_TID_PRIV	25	/* Thread ID, Privileged */
+-#define c14_CNTKCTL	26	/* Timer Control Register (PL1) */
+-#define NR_CP15_REGS	27	/* Number of regs (incl. invalid) */
++#define c7_PAR		18	/* Physical Address Register */
++#define c7_PAR_high	19	/* PAR top 32 bits */
++#define c9_L2CTLR	20	/* Cortex A15 L2 Control Register */
++#define c10_PRRR	21	/* Primary Region Remap Register */
++#define c10_NMRR	22	/* Normal Memory Remap Register */
++#define c12_VBAR	23	/* Vector Base Address Register */
++#define c13_CID		24	/* Context ID Register */
++#define c13_TID_URW	25	/* Thread ID, User R/W */
++#define c13_TID_URO	26	/* Thread ID, User R/O */
++#define c13_TID_PRIV	27	/* Thread ID, Privileged */
++#define c14_CNTKCTL	28	/* Timer Control Register (PL1) */
++#define NR_CP15_REGS	29	/* Number of regs (incl. invalid) */
+ 
+ #define ARM_EXCEPTION_RESET	  0
+ #define ARM_EXCEPTION_UNDEFINED   1
+diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
+index bdf2b84..aa9b4ac 100644
+--- a/arch/arm/include/asm/tlb.h
++++ b/arch/arm/include/asm/tlb.h
+@@ -43,6 +43,7 @@ struct mmu_gather {
+ 	struct mm_struct	*mm;
+ 	unsigned int		fullmm;
+ 	struct vm_area_struct	*vma;
++	unsigned long		start, end;
+ 	unsigned long		range_start;
+ 	unsigned long		range_end;
+ 	unsigned int		nr;
+@@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+ }
+ 
+ static inline void
+-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
++tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+ {
+ 	tlb->mm = mm;
+-	tlb->fullmm = fullmm;
++	tlb->fullmm = !(start | (end+1));
++	tlb->start = start;
++	tlb->end = end;
+ 	tlb->vma = NULL;
+ 	tlb->max = ARRAY_SIZE(tlb->local);
+ 	tlb->pages = tlb->local;
+diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
+index d9f5cd4..e19edc6 100644
+--- a/arch/arm/kernel/perf_event.c
++++ b/arch/arm/kernel/perf_event.c
+@@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
+ static int
+ armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
+ {
+-	int mapping = (*event_map)[config];
++	int mapping;
++
++	if (config >= PERF_COUNT_HW_MAX)
++		return -ENOENT;
++
++	mapping = (*event_map)[config];
+ 	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
+ }
+ 
+@@ -253,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events,
+ 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ 	struct pmu *leader_pmu = event->group_leader->pmu;
+ 
++	if (is_software_event(event))
++		return 1;
++
+ 	if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
+ 		return 1;
+ 
+diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
+index 8eea97b..4a51990 100644
+--- a/arch/arm/kvm/coproc.c
++++ b/arch/arm/kvm/coproc.c
+@@ -180,6 +180,10 @@ static const struct coproc_reg cp15_regs[] = {
+ 			NULL, reset_unknown, c6_DFAR },
+ 	{ CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32,
+ 			NULL, reset_unknown, c6_IFAR },
++
++	/* PAR swapped by interrupt.S */
++	{ CRn( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
++
+ 	/*
+ 	 * DC{C,I,CI}SW operations:
+ 	 */
+diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
+index f7793df..16cd4ba 100644
+--- a/arch/arm/kvm/interrupts.S
++++ b/arch/arm/kvm/interrupts.S
+@@ -49,6 +49,7 @@ __kvm_hyp_code_start:
+ ENTRY(__kvm_tlb_flush_vmid_ipa)
+ 	push	{r2, r3}
+ 
++	dsb	ishst
+ 	add	r0, r0, #KVM_VTTBR
+ 	ldrd	r2, r3, [r0]
+ 	mcrr	p15, 6, r2, r3, c2	@ Write VTTBR
+@@ -291,6 +292,7 @@ THUMB(	orr	r2, r2, #PSR_T_BIT	)
+ 	ldr	r2, =BSYM(panic)
+ 	msr	ELR_hyp, r2
+ 	ldr	r0, =\panic_str
++	clrex				@ Clear exclusive monitor
+ 	eret
+ .endm
+ 
+@@ -414,6 +416,10 @@ guest_trap:
+ 	mrcne	p15, 4, r2, c6, c0, 4	@ HPFAR
+ 	bne	3f
+ 
++	/* Preserve PAR */
++	mrrc	p15, 0, r0, r1, c7	@ PAR
++	push	{r0, r1}
++
+ 	/* Resolve IPA using the xFAR */
+ 	mcr	p15, 0, r2, c7, c8, 0	@ ATS1CPR
+ 	isb
+@@ -424,13 +430,20 @@ guest_trap:
+ 	lsl	r2, r2, #4
+ 	orr	r2, r2, r1, lsl #24
+ 
++	/* Restore PAR */
++	pop	{r0, r1}
++	mcrr	p15, 0, r0, r1, c7	@ PAR
++
+ 3:	load_vcpu			@ Load VCPU pointer to r0
+ 	str	r2, [r0, #VCPU_HPFAR]
+ 
+ 1:	mov	r1, #ARM_EXCEPTION_HVC
+ 	b	__kvm_vcpu_return
+ 
+-4:	pop	{r0, r1, r2}		@ Failed translation, return to guest
++4:	pop	{r0, r1}		@ Failed translation, return to guest
++	mcrr	p15, 0, r0, r1, c7	@ PAR
++	clrex
++	pop	{r0, r1, r2}
+ 	eret
+ 
+ /*
+@@ -456,6 +469,7 @@ switch_to_guest_vfp:
+ 
+ 	pop	{r3-r7}
+ 	pop	{r0-r2}
++	clrex
+ 	eret
+ #endif
+ 
+diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
+index 3c8f2f0..2b44b95 100644
+--- a/arch/arm/kvm/interrupts_head.S
++++ b/arch/arm/kvm/interrupts_head.S
+@@ -302,11 +302,14 @@ vcpu	.req	r0		@ vcpu pointer always in r0
+ 	.endif
+ 
+ 	mrc	p15, 0, r2, c14, c1, 0	@ CNTKCTL
++	mrrc	p15, 0, r4, r5, c7	@ PAR
+ 
+ 	.if \store_to_vcpu == 0
+-	push	{r2}
++	push	{r2,r4-r5}
+ 	.else
+ 	str	r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
++	add	r12, vcpu, #CP15_OFFSET(c7_PAR)
++	strd	r4, r5, [r12]
+ 	.endif
+ .endm
+ 
+@@ -319,12 +322,15 @@ vcpu	.req	r0		@ vcpu pointer always in r0
+  */
+ .macro write_cp15_state read_from_vcpu
+ 	.if \read_from_vcpu == 0
+-	pop	{r2}
++	pop	{r2,r4-r5}
+ 	.else
+ 	ldr	r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
++	add	r12, vcpu, #CP15_OFFSET(c7_PAR)
++	ldrd	r4, r5, [r12]
+ 	.endif
+ 
+ 	mcr	p15, 0, r2, c14, c1, 0	@ CNTKCTL
++	mcrr	p15, 0, r4, r5, c7	@ PAR
+ 
+ 	.if \read_from_vcpu == 0
+ 	pop	{r2-r12}
+diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
+index 654f096..5546653 100644
+--- a/arch/arm64/include/asm/tlb.h
++++ b/arch/arm64/include/asm/tlb.h
+@@ -35,6 +35,7 @@ struct mmu_gather {
+ 	struct mm_struct	*mm;
+ 	unsigned int		fullmm;
+ 	struct vm_area_struct	*vma;
++	unsigned long		start, end;
+ 	unsigned long		range_start;
+ 	unsigned long		range_end;
+ 	unsigned int		nr;
+@@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+ }
+ 
+ static inline void
+-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
++tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+ {
+ 	tlb->mm = mm;
+-	tlb->fullmm = fullmm;
++	tlb->fullmm = !(start | (end+1));
++	tlb->start = start;
++	tlb->end = end;
+ 	tlb->vma = NULL;
+ 	tlb->max = ARRAY_SIZE(tlb->local);
+ 	tlb->pages = tlb->local;
+diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
+index ef3a9de..bc5efc7 100644
+--- a/arch/ia64/include/asm/tlb.h
++++ b/arch/ia64/include/asm/tlb.h
+@@ -22,7 +22,7 @@
+  * unmapping a portion of the virtual address space, these hooks are called according to
+  * the following template:
+  *
+- *	tlb <- tlb_gather_mmu(mm, full_mm_flush);	// start unmap for address space MM
++ *	tlb <- tlb_gather_mmu(mm, start, end);		// start unmap for address space MM
+  *	{
+  *	  for each vma that needs a shootdown do {
+  *	    tlb_start_vma(tlb, vma);
+@@ -58,6 +58,7 @@ struct mmu_gather {
+ 	unsigned int		max;
+ 	unsigned char		fullmm;		/* non-zero means full mm flush */
+ 	unsigned char		need_flush;	/* really unmapped some PTEs? */
++	unsigned long		start, end;
+ 	unsigned long		start_addr;
+ 	unsigned long		end_addr;
+ 	struct page		**pages;
+@@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
+ 
+ 
+ static inline void
+-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
++tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+ {
+ 	tlb->mm = mm;
+ 	tlb->max = ARRAY_SIZE(tlb->local);
+ 	tlb->pages = tlb->local;
+ 	tlb->nr = 0;
+-	tlb->fullmm = full_mm_flush;
++	tlb->fullmm = !(start | (end+1));
++	tlb->start = start;
++	tlb->end = end;
+ 	tlb->start_addr = ~0UL;
+ }
+ 
+diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c
+index 2291a7d..fa277ae 100644
+--- a/arch/m68k/emu/natfeat.c
++++ b/arch/m68k/emu/natfeat.c
+@@ -18,9 +18,11 @@
+ #include <asm/machdep.h>
+ #include <asm/natfeat.h>
+ 
++extern long nf_get_id2(const char *feature_name);
++
+ asm("\n"
+-"	.global nf_get_id,nf_call\n"
+-"nf_get_id:\n"
++"	.global nf_get_id2,nf_call\n"
++"nf_get_id2:\n"
+ "	.short	0x7300\n"
+ "	rts\n"
+ "nf_call:\n"
+@@ -29,12 +31,25 @@ asm("\n"
+ "1:	moveq.l	#0,%d0\n"
+ "	rts\n"
+ "	.section __ex_table,\"a\"\n"
+-"	.long	nf_get_id,1b\n"
++"	.long	nf_get_id2,1b\n"
+ "	.long	nf_call,1b\n"
+ "	.previous");
+-EXPORT_SYMBOL_GPL(nf_get_id);
+ EXPORT_SYMBOL_GPL(nf_call);
+ 
++long nf_get_id(const char *feature_name)
++{
++	/* feature_name may be in vmalloc()ed memory, so make a copy */
++	char name_copy[32];
++	size_t n;
++
++	n = strlcpy(name_copy, feature_name, sizeof(name_copy));
++	if (n >= sizeof(name_copy))
++		return 0;
++
++	return nf_get_id2(name_copy);
++}
++EXPORT_SYMBOL_GPL(nf_get_id);
++
+ void nfprint(const char *fmt, ...)
+ {
+ 	static char buf[256];
+diff --git a/arch/m68k/include/asm/div64.h b/arch/m68k/include/asm/div64.h
+index 444ea8a..ef881cf 100644
+--- a/arch/m68k/include/asm/div64.h
++++ b/arch/m68k/include/asm/div64.h
+@@ -15,16 +15,17 @@
+ 		unsigned long long n64;				\
+ 	} __n;							\
+ 	unsigned long __rem, __upper;				\
++	unsigned long __base = (base);				\
+ 								\
+ 	__n.n64 = (n);						\
+ 	if ((__upper = __n.n32[0])) {				\
+ 		asm ("divul.l %2,%1:%0"				\
+-			: "=d" (__n.n32[0]), "=d" (__upper)	\
+-			: "d" (base), "0" (__n.n32[0]));	\
++		     : "=d" (__n.n32[0]), "=d" (__upper)	\
++		     : "d" (__base), "0" (__n.n32[0]));		\
+ 	}							\
+ 	asm ("divu.l %2,%1:%0"					\
+-		: "=d" (__n.n32[1]), "=d" (__rem)		\
+-		: "d" (base), "1" (__upper), "0" (__n.n32[1]));	\
++	     : "=d" (__n.n32[1]), "=d" (__rem)			\
++	     : "d" (__base), "1" (__upper), "0" (__n.n32[1]));	\
+ 	(n) = __n.n64;						\
+ 	__rem;							\
+ })
+diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
+index d22a4ec..4fab522 100644
+--- a/arch/microblaze/Kconfig
++++ b/arch/microblaze/Kconfig
+@@ -28,7 +28,7 @@ config MICROBLAZE
+ 	select GENERIC_CLOCKEVENTS
+ 	select GENERIC_IDLE_POLL_SETUP
+ 	select MODULES_USE_ELF_RELA
+-	select CLONE_BACKWARDS
++	select CLONE_BACKWARDS3
+ 
+ config SWAP
+ 	def_bool n
+diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
+index b75d7d6..6d6d92b 100644
+--- a/arch/s390/include/asm/tlb.h
++++ b/arch/s390/include/asm/tlb.h
+@@ -32,6 +32,7 @@ struct mmu_gather {
+ 	struct mm_struct *mm;
+ 	struct mmu_table_batch *batch;
+ 	unsigned int fullmm;
++	unsigned long start, end;
+ };
+ 
+ struct mmu_table_batch {
+@@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
+ 
+ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
+ 				  struct mm_struct *mm,
+-				  unsigned int full_mm_flush)
++				  unsigned long start,
++				  unsigned long end)
+ {
+ 	tlb->mm = mm;
+-	tlb->fullmm = full_mm_flush;
++	tlb->start = start;
++	tlb->end = end;
++	tlb->fullmm = !(start | (end+1));
+ 	tlb->batch = NULL;
+ 	if (tlb->fullmm)
+ 		__tlb_flush_mm(mm);
+diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
+index e61d43d..362192e 100644
+--- a/arch/sh/include/asm/tlb.h
++++ b/arch/sh/include/asm/tlb.h
+@@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
+ }
+ 
+ static inline void
+-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
++tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+ {
+ 	tlb->mm = mm;
+-	tlb->fullmm = full_mm_flush;
++	tlb->start = start;
++	tlb->end = end;
++	tlb->fullmm = !(start | (end+1));
+ 
+ 	init_tlb_gather(tlb);
+ }
+diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
+index 4febacd..29b0301 100644
+--- a/arch/um/include/asm/tlb.h
++++ b/arch/um/include/asm/tlb.h
+@@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
+ }
+ 
+ static inline void
+-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
++tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+ {
+ 	tlb->mm = mm;
+-	tlb->fullmm = full_mm_flush;
++	tlb->start = start;
++	tlb->end = end;
++	tlb->fullmm = !(start | (end+1));
+ 
+ 	init_tlb_gather(tlb);
+ }
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+index 52441a2..8aac56b 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+@@ -314,8 +314,8 @@ static struct uncore_event_desc snbep_uncore_imc_events[] = {
+ static struct uncore_event_desc snbep_uncore_qpi_events[] = {
+ 	INTEL_UNCORE_EVENT_DESC(clockticks,       "event=0x14"),
+ 	INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
+-	INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x02,umask=0x08"),
+-	INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x03,umask=0x04"),
++	INTEL_UNCORE_EVENT_DESC(drs_data,         "event=0x102,umask=0x08"),
++	INTEL_UNCORE_EVENT_DESC(ncb_data,         "event=0x103,umask=0x04"),
+ 	{ /* end: all zeroes */ },
+ };
+ 
+diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
+index dbded5a..48f8375 100644
+--- a/arch/x86/kernel/sys_x86_64.c
++++ b/arch/x86/kernel/sys_x86_64.c
+@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
+ 				*begin = new_begin;
+ 		}
+ 	} else {
+-		*begin = TASK_UNMAPPED_BASE;
++		*begin = mmap_legacy_base();
+ 		*end = TASK_SIZE;
+ 	}
+ }
+diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
+index 845df68..c1af323 100644
+--- a/arch/x86/mm/mmap.c
++++ b/arch/x86/mm/mmap.c
+@@ -98,7 +98,7 @@ static unsigned long mmap_base(void)
+  * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
+  * does, but not when emulating X86_32
+  */
+-static unsigned long mmap_legacy_base(void)
++unsigned long mmap_legacy_base(void)
+ {
+ 	if (mmap_is_ia32())
+ 		return TASK_UNMAPPED_BASE;
+diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
+index d5cd313..d5bbdcf 100644
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -4347,18 +4347,28 @@ static void cfq_exit_queue(struct elevator_queue *e)
+ 	kfree(cfqd);
+ }
+ 
+-static int cfq_init_queue(struct request_queue *q)
++static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
+ {
+ 	struct cfq_data *cfqd;
+ 	struct blkcg_gq *blkg __maybe_unused;
+ 	int i, ret;
++	struct elevator_queue *eq;
++
++	eq = elevator_alloc(q, e);
++	if (!eq)
++		return -ENOMEM;
+ 
+ 	cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
+-	if (!cfqd)
++	if (!cfqd) {
++		kobject_put(&eq->kobj);
+ 		return -ENOMEM;
++	}
++	eq->elevator_data = cfqd;
+ 
+ 	cfqd->queue = q;
+-	q->elevator->elevator_data = cfqd;
++	spin_lock_irq(q->queue_lock);
++	q->elevator = eq;
++	spin_unlock_irq(q->queue_lock);
+ 
+ 	/* Init root service tree */
+ 	cfqd->grp_service_tree = CFQ_RB_ROOT;
+@@ -4433,6 +4443,7 @@ static int cfq_init_queue(struct request_queue *q)
+ 
+ out_free:
+ 	kfree(cfqd);
++	kobject_put(&eq->kobj);
+ 	return ret;
+ }
+ 
+diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
+index ba19a3a..20614a3 100644
+--- a/block/deadline-iosched.c
++++ b/block/deadline-iosched.c
+@@ -337,13 +337,21 @@ static void deadline_exit_queue(struct elevator_queue *e)
+ /*
+  * initialize elevator private data (deadline_data).
+  */
+-static int deadline_init_queue(struct request_queue *q)
++static int deadline_init_queue(struct request_queue *q, struct elevator_type *e)
+ {
+ 	struct deadline_data *dd;
++	struct elevator_queue *eq;
++
++	eq = elevator_alloc(q, e);
++	if (!eq)
++		return -ENOMEM;
+ 
+ 	dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
+-	if (!dd)
++	if (!dd) {
++		kobject_put(&eq->kobj);
+ 		return -ENOMEM;
++	}
++	eq->elevator_data = dd;
+ 
+ 	INIT_LIST_HEAD(&dd->fifo_list[READ]);
+ 	INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
+@@ -355,7 +363,9 @@ static int deadline_init_queue(struct request_queue *q)
+ 	dd->front_merges = 1;
+ 	dd->fifo_batch = fifo_batch;
+ 
+-	q->elevator->elevator_data = dd;
++	spin_lock_irq(q->queue_lock);
++	q->elevator = eq;
++	spin_unlock_irq(q->queue_lock);
+ 	return 0;
+ }
+ 
+diff --git a/block/elevator.c b/block/elevator.c
+index eba5b04..668394d 100644
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -150,7 +150,7 @@ void __init load_default_elevator_module(void)
+ 
+ static struct kobj_type elv_ktype;
+ 
+-static struct elevator_queue *elevator_alloc(struct request_queue *q,
++struct elevator_queue *elevator_alloc(struct request_queue *q,
+ 				  struct elevator_type *e)
+ {
+ 	struct elevator_queue *eq;
+@@ -170,6 +170,7 @@ err:
+ 	elevator_put(e);
+ 	return NULL;
+ }
++EXPORT_SYMBOL(elevator_alloc);
+ 
+ static void elevator_release(struct kobject *kobj)
+ {
+@@ -221,16 +222,7 @@ int elevator_init(struct request_queue *q, char *name)
+ 		}
+ 	}
+ 
+-	q->elevator = elevator_alloc(q, e);
+-	if (!q->elevator)
+-		return -ENOMEM;
+-
+-	err = e->ops.elevator_init_fn(q);
+-	if (err) {
+-		kobject_put(&q->elevator->kobj);
+-		return err;
+-	}
+-
++	err = e->ops.elevator_init_fn(q, e);
+ 	return 0;
+ }
+ EXPORT_SYMBOL(elevator_init);
+@@ -935,16 +927,9 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
+ 	spin_unlock_irq(q->queue_lock);
+ 
+ 	/* allocate, init and register new elevator */
+-	err = -ENOMEM;
+-	q->elevator = elevator_alloc(q, new_e);
+-	if (!q->elevator)
+-		goto fail_init;
+-
+-	err = new_e->ops.elevator_init_fn(q);
+-	if (err) {
+-		kobject_put(&q->elevator->kobj);
++	err = new_e->ops.elevator_init_fn(q, new_e);
++	if (err)
+ 		goto fail_init;
+-	}
+ 
+ 	if (registered) {
+ 		err = elv_register_queue(q);
+diff --git a/block/noop-iosched.c b/block/noop-iosched.c
+index 5d1bf70..3de89d4 100644
+--- a/block/noop-iosched.c
++++ b/block/noop-iosched.c
+@@ -59,16 +59,27 @@ noop_latter_request(struct request_queue *q, struct request *rq)
+ 	return list_entry(rq->queuelist.next, struct request, queuelist);
+ }
+ 
+-static int noop_init_queue(struct request_queue *q)
++static int noop_init_queue(struct request_queue *q, struct elevator_type *e)
+ {
+ 	struct noop_data *nd;
++	struct elevator_queue *eq;
++
++	eq = elevator_alloc(q, e);
++	if (!eq)
++		return -ENOMEM;
+ 
+ 	nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
+-	if (!nd)
++	if (!nd) {
++		kobject_put(&eq->kobj);
+ 		return -ENOMEM;
++	}
++	eq->elevator_data = nd;
+ 
+ 	INIT_LIST_HEAD(&nd->queue);
+-	q->elevator->elevator_data = nd;
++
++	spin_lock_irq(q->queue_lock);
++	q->elevator = eq;
++	spin_unlock_irq(q->queue_lock);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
+index 25723d8..925ab8e 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
+@@ -649,7 +649,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
+ 		if ((mc->ptr + rec_len) > mc->end)
+ 			goto decode_failed;
+ 
+-		memcpy(cf->data, mc->ptr, rec_len);
++		memcpy(cf->data, mc->ptr, cf->can_dlc);
+ 		mc->ptr += rec_len;
+ 	}
+ 
+diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
+index 9a95045..900f5f8 100644
+--- a/drivers/net/wireless/iwlegacy/4965-mac.c
++++ b/drivers/net/wireless/iwlegacy/4965-mac.c
+@@ -4442,12 +4442,12 @@ il4965_irq_tasklet(struct il_priv *il)
+ 		 * is killed. Hence update the killswitch state here. The
+ 		 * rfkill handler will care about restarting if needed.
+ 		 */
+-		if (!test_bit(S_ALIVE, &il->status)) {
+-			if (hw_rf_kill)
+-				set_bit(S_RFKILL, &il->status);
+-			else
+-				clear_bit(S_RFKILL, &il->status);
++		if (hw_rf_kill) {
++			set_bit(S_RFKILL, &il->status);
++		} else {
++			clear_bit(S_RFKILL, &il->status);
+ 			wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
++			il_force_reset(il, true);
+ 		}
+ 
+ 		handled |= CSR_INT_BIT_RF_KILL;
+@@ -5316,6 +5316,9 @@ il4965_alive_start(struct il_priv *il)
+ 
+ 	il->active_rate = RATES_MASK;
+ 
++	il_power_update_mode(il, true);
++	D_INFO("Updated power mode\n");
++
+ 	if (il_is_associated(il)) {
+ 		struct il_rxon_cmd *active_rxon =
+ 		    (struct il_rxon_cmd *)&il->active;
+@@ -5346,9 +5349,6 @@ il4965_alive_start(struct il_priv *il)
+ 	D_INFO("ALIVE processing complete.\n");
+ 	wake_up(&il->wait_command_queue);
+ 
+-	il_power_update_mode(il, true);
+-	D_INFO("Updated power mode\n");
+-
+ 	return;
+ 
+ restart:
+diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
+index e9a3cbc..9c9ebad 100644
+--- a/drivers/net/wireless/iwlegacy/common.c
++++ b/drivers/net/wireless/iwlegacy/common.c
+@@ -4660,6 +4660,7 @@ il_force_reset(struct il_priv *il, bool external)
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL(il_force_reset);
+ 
+ int
+ il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index a635988..5b44cd4 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -78,6 +78,12 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x04d8, 0x000c), .driver_info =
+ 			USB_QUIRK_CONFIG_INTF_STRINGS },
+ 
++	/* CarrolTouch 4000U */
++	{ USB_DEVICE(0x04e7, 0x0009), .driver_info = USB_QUIRK_RESET_RESUME },
++
++	/* CarrolTouch 4500U */
++	{ USB_DEVICE(0x04e7, 0x0030), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ 	/* Samsung Android phone modem - ID conflict with SPH-I500 */
+ 	{ USB_DEVICE(0x04e8, 0x6601), .driver_info =
+ 			USB_QUIRK_CONFIG_INTF_STRINGS },
+diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
+index f80d033..8e3c878 100644
+--- a/drivers/usb/host/ehci-sched.c
++++ b/drivers/usb/host/ehci-sched.c
+@@ -1391,21 +1391,20 @@ iso_stream_schedule (
+ 
+ 		/* Behind the scheduling threshold? */
+ 		if (unlikely(start < next)) {
++			unsigned now2 = (now - base) & (mod - 1);
+ 
+ 			/* USB_ISO_ASAP: Round up to the first available slot */
+ 			if (urb->transfer_flags & URB_ISO_ASAP)
+ 				start += (next - start + period - 1) & -period;
+ 
+ 			/*
+-			 * Not ASAP: Use the next slot in the stream.  If
+-			 * the entire URB falls before the threshold, fail.
++			 * Not ASAP: Use the next slot in the stream,
++			 * no matter what.
+ 			 */
+-			else if (start + span - period < next) {
+-				ehci_dbg(ehci, "iso urb late %p (%u+%u < %u)\n",
++			else if (start + span - period < now2) {
++				ehci_dbg(ehci, "iso underrun %p (%u+%u < %u)\n",
+ 						urb, start + base,
+-						span - period, next + base);
+-				status = -EXDEV;
+-				goto fail;
++						span - period, now2 + base);
+ 			}
+ 		}
+ 
+diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
+index 3549d07..07fbdf0 100644
+--- a/drivers/usb/serial/keyspan.c
++++ b/drivers/usb/serial/keyspan.c
+@@ -2315,7 +2315,7 @@ static int keyspan_startup(struct usb_serial *serial)
+ 	if (d_details == NULL) {
+ 		dev_err(&serial->dev->dev, "%s - unknown product id %x\n",
+ 		    __func__, le16_to_cpu(serial->dev->descriptor.idProduct));
+-		return 1;
++		return -ENODEV;
+ 	}
+ 
+ 	/* Setup private data for serial driver */
+diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
+index f27c621..5050cc8 100644
+--- a/drivers/usb/serial/mos7720.c
++++ b/drivers/usb/serial/mos7720.c
+@@ -90,6 +90,7 @@ struct urbtracker {
+ 	struct list_head        urblist_entry;
+ 	struct kref             ref_count;
+ 	struct urb              *urb;
++	struct usb_ctrlrequest	*setup;
+ };
+ 
+ enum mos7715_pp_modes {
+@@ -271,6 +272,7 @@ static void destroy_urbtracker(struct kref *kref)
+ 	struct mos7715_parport *mos_parport = urbtrack->mos_parport;
+ 
+ 	usb_free_urb(urbtrack->urb);
++	kfree(urbtrack->setup);
+ 	kfree(urbtrack);
+ 	kref_put(&mos_parport->ref_count, destroy_mos_parport);
+ }
+@@ -355,7 +357,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
+ 	struct urbtracker *urbtrack;
+ 	int ret_val;
+ 	unsigned long flags;
+-	struct usb_ctrlrequest setup;
+ 	struct usb_serial *serial = mos_parport->serial;
+ 	struct usb_device *usbdev = serial->dev;
+ 
+@@ -373,14 +374,20 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
+ 		kfree(urbtrack);
+ 		return -ENOMEM;
+ 	}
+-	setup.bRequestType = (__u8)0x40;
+-	setup.bRequest = (__u8)0x0e;
+-	setup.wValue = get_reg_value(reg, dummy);
+-	setup.wIndex = get_reg_index(reg);
+-	setup.wLength = 0;
++	urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL);
++	if (!urbtrack->setup) {
++		usb_free_urb(urbtrack->urb);
++		kfree(urbtrack);
++		return -ENOMEM;
++	}
++	urbtrack->setup->bRequestType = (__u8)0x40;
++	urbtrack->setup->bRequest = (__u8)0x0e;
++	urbtrack->setup->wValue = get_reg_value(reg, dummy);
++	urbtrack->setup->wIndex = get_reg_index(reg);
++	urbtrack->setup->wLength = 0;
+ 	usb_fill_control_urb(urbtrack->urb, usbdev,
+ 			     usb_sndctrlpipe(usbdev, 0),
+-			     (unsigned char *)&setup,
++			     (unsigned char *)urbtrack->setup,
+ 			     NULL, 0, async_complete, urbtrack);
+ 	kref_init(&urbtrack->ref_count);
+ 	INIT_LIST_HEAD(&urbtrack->urblist_entry);
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index b92d333..2c1749d 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -2208,7 +2208,7 @@ static int mos7810_check(struct usb_serial *serial)
+ static int mos7840_probe(struct usb_serial *serial,
+ 				const struct usb_device_id *id)
+ {
+-	u16 product = serial->dev->descriptor.idProduct;
++	u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
+ 	u8 *buf;
+ 	int device_type;
+ 
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
+index 01f79f1..32bdd5e 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.c
++++ b/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -1536,14 +1536,15 @@ static int ti_download_firmware(struct ti_device *tdev)
+ 	char buf[32];
+ 
+ 	/* try ID specific firmware first, then try generic firmware */
+-	sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor,
+-	    dev->descriptor.idProduct);
++	sprintf(buf, "ti_usb-v%04x-p%04x.fw",
++			le16_to_cpu(dev->descriptor.idVendor),
++			le16_to_cpu(dev->descriptor.idProduct));
+ 	status = request_firmware(&fw_p, buf, &dev->dev);
+ 
+ 	if (status != 0) {
+ 		buf[0] = '\0';
+-		if (dev->descriptor.idVendor == MTS_VENDOR_ID) {
+-			switch (dev->descriptor.idProduct) {
++		if (le16_to_cpu(dev->descriptor.idVendor) == MTS_VENDOR_ID) {
++			switch (le16_to_cpu(dev->descriptor.idProduct)) {
+ 			case MTS_CDMA_PRODUCT_ID:
+ 				strcpy(buf, "mts_cdma.fw");
+ 				break;
+diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
+index ece326e..db0cf53 100644
+--- a/drivers/usb/serial/usb_wwan.c
++++ b/drivers/usb/serial/usb_wwan.c
+@@ -291,18 +291,18 @@ static void usb_wwan_indat_callback(struct urb *urb)
+ 			tty_flip_buffer_push(&port->port);
+ 		} else
+ 			dev_dbg(dev, "%s: empty read urb received\n", __func__);
+-
+-		/* Resubmit urb so we continue receiving */
+-		err = usb_submit_urb(urb, GFP_ATOMIC);
+-		if (err) {
+-			if (err != -EPERM) {
+-				dev_err(dev, "%s: resubmit read urb failed. (%d)\n", __func__, err);
+-				/* busy also in error unless we are killed */
+-				usb_mark_last_busy(port->serial->dev);
+-			}
+-		} else {
++	}
++	/* Resubmit urb so we continue receiving */
++	err = usb_submit_urb(urb, GFP_ATOMIC);
++	if (err) {
++		if (err != -EPERM) {
++			dev_err(dev, "%s: resubmit read urb failed. (%d)\n",
++				__func__, err);
++			/* busy also in error unless we are killed */
+ 			usb_mark_last_busy(port->serial->dev);
+ 		}
++	} else {
++		usb_mark_last_busy(port->serial->dev);
+ 	}
+ }
+ 
+diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
+index 6ef94bc..028fc83 100644
+--- a/drivers/usb/wusbcore/wa-xfer.c
++++ b/drivers/usb/wusbcore/wa-xfer.c
+@@ -1110,6 +1110,12 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
+ 	}
+ 	spin_lock_irqsave(&xfer->lock, flags);
+ 	rpipe = xfer->ep->hcpriv;
++	if (rpipe == NULL) {
++		pr_debug("%s: xfer id 0x%08X has no RPIPE.  %s",
++			__func__, wa_xfer_id(xfer),
++			"Probably already aborted.\n" );
++		goto out_unlock;
++	}
+ 	/* Check the delayed list -> if there, release and complete */
+ 	spin_lock_irqsave(&wa->xfer_list_lock, flags2);
+ 	if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
+@@ -1493,8 +1499,7 @@ static void wa_xfer_result_cb(struct urb *urb)
+ 			break;
+ 		}
+ 		usb_status = xfer_result->bTransferStatus & 0x3f;
+-		if (usb_status == WA_XFER_STATUS_ABORTED
+-		    || usb_status == WA_XFER_STATUS_NOT_FOUND)
++		if (usb_status == WA_XFER_STATUS_NOT_FOUND)
+ 			/* taken care of already */
+ 			break;
+ 		xfer_id = xfer_result->dwTransferID;
+diff --git a/fs/exec.c b/fs/exec.c
+index ffd7a81..1f44670 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -607,7 +607,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
+ 		return -ENOMEM;
+ 
+ 	lru_add_drain();
+-	tlb_gather_mmu(&tlb, mm, 0);
++	tlb_gather_mmu(&tlb, mm, old_start, old_end);
+ 	if (new_end > old_start) {
+ 		/*
+ 		 * when the old and new regions overlap clear from new_end.
+@@ -624,7 +624,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
+ 		free_pgd_range(&tlb, old_start, old_end, new_end,
+ 			vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
+ 	}
+-	tlb_finish_mmu(&tlb, new_end, old_end);
++	tlb_finish_mmu(&tlb, old_start, old_end);
+ 
+ 	/*
+ 	 * Shrink the vma to just the new range.  Always succeeds.
+diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
+index 451eb40..1c88061 100644
+--- a/fs/ext4/ext4_jbd2.c
++++ b/fs/ext4/ext4_jbd2.c
+@@ -219,10 +219,10 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
+ 	set_buffer_prio(bh);
+ 	if (ext4_handle_valid(handle)) {
+ 		err = jbd2_journal_dirty_metadata(handle, bh);
+-		if (err) {
+-			/* Errors can only happen if there is a bug */
+-			handle->h_err = err;
+-			__ext4_journal_stop(where, line, handle);
++		/* Errors can only happen if there is a bug */
++		if (WARN_ON_ONCE(err)) {
++			ext4_journal_abort_handle(where, line, __func__, bh,
++						  handle, err);
+ 		}
+ 	} else {
+ 		if (inode)
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 3e636d8..65fc60a 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -792,14 +792,14 @@ typedef struct {
+ } pagemap_entry_t;
+ 
+ struct pagemapread {
+-	int pos, len;
++	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
+ 	pagemap_entry_t *buffer;
+ };
+ 
+ #define PAGEMAP_WALK_SIZE	(PMD_SIZE)
+ #define PAGEMAP_WALK_MASK	(PMD_MASK)
+ 
+-#define PM_ENTRY_BYTES      sizeof(u64)
++#define PM_ENTRY_BYTES      sizeof(pagemap_entry_t)
+ #define PM_STATUS_BITS      3
+ #define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
+ #define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
+@@ -1038,8 +1038,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
+ 	if (!count)
+ 		goto out_task;
+ 
+-	pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
+-	pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
++	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
++	pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
+ 	ret = -ENOMEM;
+ 	if (!pm.buffer)
+ 		goto out_task;
+diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
+index 13821c3..5672d7e 100644
+--- a/include/asm-generic/tlb.h
++++ b/include/asm-generic/tlb.h
+@@ -112,7 +112,7 @@ struct mmu_gather {
+ 
+ #define HAVE_GENERIC_MMU_GATHER
+ 
+-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
++void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
+ void tlb_flush_mmu(struct mmu_gather *tlb);
+ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
+ 							unsigned long end);
+diff --git a/include/linux/elevator.h b/include/linux/elevator.h
+index acd0312..306dd8c 100644
+--- a/include/linux/elevator.h
++++ b/include/linux/elevator.h
+@@ -7,6 +7,7 @@
+ #ifdef CONFIG_BLOCK
+ 
+ struct io_cq;
++struct elevator_type;
+ 
+ typedef int (elevator_merge_fn) (struct request_queue *, struct request **,
+ 				 struct bio *);
+@@ -35,7 +36,8 @@ typedef void (elevator_put_req_fn) (struct request *);
+ typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
+ typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
+ 
+-typedef int (elevator_init_fn) (struct request_queue *);
++typedef int (elevator_init_fn) (struct request_queue *,
++				struct elevator_type *e);
+ typedef void (elevator_exit_fn) (struct elevator_queue *);
+ 
+ struct elevator_ops
+@@ -155,6 +157,8 @@ extern int elevator_init(struct request_queue *, char *);
+ extern void elevator_exit(struct elevator_queue *);
+ extern int elevator_change(struct request_queue *, const char *);
+ extern bool elv_rq_merge_ok(struct request *, struct bio *);
++extern struct elevator_queue *elevator_alloc(struct request_queue *,
++					struct elevator_type *);
+ 
+ /*
+  * Helper functions.
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 178a8d9..3aeb14b 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -314,6 +314,7 @@ struct nsproxy;
+ struct user_namespace;
+ 
+ #ifdef CONFIG_MMU
++extern unsigned long mmap_legacy_base(void);
+ extern void arch_pick_mmap_layout(struct mm_struct *mm);
+ extern unsigned long
+ arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
+diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
+index 4147d70..84662ec 100644
+--- a/include/linux/syscalls.h
++++ b/include/linux/syscalls.h
+@@ -802,9 +802,14 @@ asmlinkage long sys_vfork(void);
+ asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int,
+ 	       int __user *);
+ #else
++#ifdef CONFIG_CLONE_BACKWARDS3
++asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *,
++			  int __user *, int);
++#else
+ asmlinkage long sys_clone(unsigned long, unsigned long, int __user *,
+ 	       int __user *, int);
+ #endif
++#endif
+ 
+ asmlinkage long sys_execve(const char __user *filename,
+ 		const char __user *const __user *argv,
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 64b3f79..6948e94 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -1502,11 +1502,13 @@ static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
+ {
+ 	struct cpuset *cs = cgroup_cs(cgrp);
+ 	cpuset_filetype_t type = cft->private;
+-	int retval = -ENODEV;
++	int retval = 0;
+ 
+ 	mutex_lock(&cpuset_mutex);
+-	if (!is_cpuset_online(cs))
++	if (!is_cpuset_online(cs)) {
++		retval = -ENODEV;
+ 		goto out_unlock;
++	}
+ 
+ 	switch (type) {
+ 	case FILE_CPU_EXCLUSIVE:
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 987b28a..ffbc090 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1675,6 +1675,12 @@ SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
+ 		 int __user *, parent_tidptr,
+ 		 int __user *, child_tidptr,
+ 		 int, tls_val)
++#elif defined(CONFIG_CLONE_BACKWARDS3)
++SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
++		int, stack_size,
++		int __user *, parent_tidptr,
++		int __user *, child_tidptr,
++		int, tls_val)
+ #else
+ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
+ 		 int __user *, parent_tidptr,
+diff --git a/kernel/power/qos.c b/kernel/power/qos.c
+index 587ddde..25cf89b 100644
+--- a/kernel/power/qos.c
++++ b/kernel/power/qos.c
+@@ -293,6 +293,15 @@ int pm_qos_request_active(struct pm_qos_request *req)
+ }
+ EXPORT_SYMBOL_GPL(pm_qos_request_active);
+ 
++static void __pm_qos_update_request(struct pm_qos_request *req,
++			   s32 new_value)
++{
++	if (new_value != req->node.prio)
++		pm_qos_update_target(
++			pm_qos_array[req->pm_qos_class]->constraints,
++			&req->node, PM_QOS_UPDATE_REQ, new_value);
++}
++
+ /**
+  * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
+  * @work: work struct for the delayed work (timeout)
+@@ -305,7 +314,7 @@ static void pm_qos_work_fn(struct work_struct *work)
+ 						  struct pm_qos_request,
+ 						  work);
+ 
+-	pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
++	__pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
+ }
+ 
+ /**
+@@ -365,6 +374,8 @@ void pm_qos_update_request(struct pm_qos_request *req,
+ 		pm_qos_update_target(
+ 			pm_qos_array[req->pm_qos_class]->constraints,
+ 			&req->node, PM_QOS_UPDATE_REQ, new_value);
++
++	__pm_qos_update_request(req, new_value);
+ }
+ EXPORT_SYMBOL_GPL(pm_qos_update_request);
+ 
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index c61a614..03b73be 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -1984,6 +1984,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
+ 	 */
+ 	update_entity_load_avg(curr, 1);
+ 	update_cfs_rq_blocked_load(cfs_rq, 1);
++	update_cfs_shares(cfs_rq);
+ 
+ #ifdef CONFIG_SCHED_HRTICK
+ 	/*
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 5cf99bf..7c5eb85 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2490,7 +2490,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
+ 
+ 	mm = vma->vm_mm;
+ 
+-	tlb_gather_mmu(&tlb, mm, 0);
++	tlb_gather_mmu(&tlb, mm, start, end);
+ 	__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
+ 	tlb_finish_mmu(&tlb, start, end);
+ }
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 15b0409..82a187a 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3186,11 +3186,11 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
+ 	if (!s->memcg_params)
+ 		return -ENOMEM;
+ 
+-	INIT_WORK(&s->memcg_params->destroy,
+-			kmem_cache_destroy_work_func);
+ 	if (memcg) {
+ 		s->memcg_params->memcg = memcg;
+ 		s->memcg_params->root_cache = root_cache;
++		INIT_WORK(&s->memcg_params->destroy,
++				kmem_cache_destroy_work_func);
+ 	} else
+ 		s->memcg_params->is_root_cache = true;
+ 
+diff --git a/mm/memory.c b/mm/memory.c
+index 5e50800..5a35443 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -211,14 +211,15 @@ static int tlb_next_batch(struct mmu_gather *tlb)
+  *	tear-down from @mm. The @fullmm argument is used when @mm is without
+  *	users and we're going to destroy the full address space (exit/execve).
+  */
+-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
++void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+ {
+ 	tlb->mm = mm;
+ 
+-	tlb->fullmm     = fullmm;
++	/* Is it from 0 to ~0? */
++	tlb->fullmm     = !(start | (end+1));
+ 	tlb->need_flush_all = 0;
+-	tlb->start	= -1UL;
+-	tlb->end	= 0;
++	tlb->start	= start;
++	tlb->end	= end;
+ 	tlb->need_flush = 0;
+ 	tlb->local.next = NULL;
+ 	tlb->local.nr   = 0;
+@@ -258,8 +259,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
+ {
+ 	struct mmu_gather_batch *batch, *next;
+ 
+-	tlb->start = start;
+-	tlb->end   = end;
+ 	tlb_flush_mmu(tlb);
+ 
+ 	/* keep the page table cache within bounds */
+@@ -1101,7 +1100,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
+ 	spinlock_t *ptl;
+ 	pte_t *start_pte;
+ 	pte_t *pte;
+-	unsigned long range_start = addr;
+ 
+ again:
+ 	init_rss_vec(rss);
+@@ -1204,17 +1202,25 @@ again:
+ 	 * and page-free while holding it.
+ 	 */
+ 	if (force_flush) {
++		unsigned long old_end;
++
+ 		force_flush = 0;
+ 
+-#ifdef HAVE_GENERIC_MMU_GATHER
+-		tlb->start = range_start;
++		/*
++		 * Flush the TLB just for the previous segment,
++		 * then update the range to be the remaining
++		 * TLB range.
++		 */
++		old_end = tlb->end;
+ 		tlb->end = addr;
+-#endif
++
+ 		tlb_flush_mmu(tlb);
+-		if (addr != end) {
+-			range_start = addr;
++
++		tlb->start = addr;
++		tlb->end = old_end;
++
++		if (addr != end)
+ 			goto again;
+-		}
+ 	}
+ 
+ 	return addr;
+@@ -1399,7 +1405,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
+ 	unsigned long end = start + size;
+ 
+ 	lru_add_drain();
+-	tlb_gather_mmu(&tlb, mm, 0);
++	tlb_gather_mmu(&tlb, mm, start, end);
+ 	update_hiwater_rss(mm);
+ 	mmu_notifier_invalidate_range_start(mm, start, end);
+ 	for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
+@@ -1425,7 +1431,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
+ 	unsigned long end = address + size;
+ 
+ 	lru_add_drain();
+-	tlb_gather_mmu(&tlb, mm, 0);
++	tlb_gather_mmu(&tlb, mm, address, end);
+ 	update_hiwater_rss(mm);
+ 	mmu_notifier_invalidate_range_start(mm, address, end);
+ 	unmap_single_vma(&tlb, vma, address, end, details);
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 7dbe397..8d25fdc 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2356,7 +2356,7 @@ static void unmap_region(struct mm_struct *mm,
+ 	struct mmu_gather tlb;
+ 
+ 	lru_add_drain();
+-	tlb_gather_mmu(&tlb, mm, 0);
++	tlb_gather_mmu(&tlb, mm, start, end);
+ 	update_hiwater_rss(mm);
+ 	unmap_vmas(&tlb, vma, start, end);
+ 	free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
+@@ -2735,7 +2735,7 @@ void exit_mmap(struct mm_struct *mm)
+ 
+ 	lru_add_drain();
+ 	flush_cache_mm(mm);
+-	tlb_gather_mmu(&tlb, mm, 1);
++	tlb_gather_mmu(&tlb, mm, 0, -1);
+ 	/* update_hiwater_rss(mm) here? but nobody should be looking */
+ 	/* Use -1 here to ensure all VMAs in the mm are unmapped */
+ 	unmap_vmas(&tlb, vma, 0, -1);
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 741448b..55a42f9 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -237,8 +237,9 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
+ 			     struct ieee80211_channel *channel,
+ 			     const struct ieee80211_ht_operation *ht_oper,
+ 			     const struct ieee80211_vht_operation *vht_oper,
+-			     struct cfg80211_chan_def *chandef, bool verbose)
++			     struct cfg80211_chan_def *chandef, bool tracking)
+ {
++	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ 	struct cfg80211_chan_def vht_chandef;
+ 	u32 ht_cfreq, ret;
+ 
+@@ -257,7 +258,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
+ 	ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
+ 						  channel->band);
+ 	/* check that channel matches the right operating channel */
+-	if (channel->center_freq != ht_cfreq) {
++	if (!tracking && channel->center_freq != ht_cfreq) {
+ 		/*
+ 		 * It's possible that some APs are confused here;
+ 		 * Netgear WNDR3700 sometimes reports 4 higher than
+@@ -265,11 +266,10 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
+ 		 * since we look at probe response/beacon data here
+ 		 * it should be OK.
+ 		 */
+-		if (verbose)
+-			sdata_info(sdata,
+-				   "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
+-				   channel->center_freq, ht_cfreq,
+-				   ht_oper->primary_chan, channel->band);
++		sdata_info(sdata,
++			   "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
++			   channel->center_freq, ht_cfreq,
++			   ht_oper->primary_chan, channel->band);
+ 		ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
+ 		goto out;
+ 	}
+@@ -323,7 +323,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
+ 				channel->band);
+ 		break;
+ 	default:
+-		if (verbose)
++		if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
+ 			sdata_info(sdata,
+ 				   "AP VHT operation IE has invalid channel width (%d), disable VHT\n",
+ 				   vht_oper->chan_width);
+@@ -332,7 +332,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
+ 	}
+ 
+ 	if (!cfg80211_chandef_valid(&vht_chandef)) {
+-		if (verbose)
++		if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
+ 			sdata_info(sdata,
+ 				   "AP VHT information is invalid, disable VHT\n");
+ 		ret = IEEE80211_STA_DISABLE_VHT;
+@@ -345,7 +345,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
+ 	}
+ 
+ 	if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
+-		if (verbose)
++		if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
+ 			sdata_info(sdata,
+ 				   "AP VHT information doesn't match HT, disable VHT\n");
+ 		ret = IEEE80211_STA_DISABLE_VHT;
+@@ -361,18 +361,27 @@ out:
+ 	if (ret & IEEE80211_STA_DISABLE_VHT)
+ 		vht_chandef = *chandef;
+ 
++	/*
++	 * Ignore the DISABLED flag when we're already connected and only
++	 * tracking the APs beacon for bandwidth changes - otherwise we
++	 * might get disconnected here if we connect to an AP, update our
++	 * regulatory information based on the AP's country IE and the
++	 * information we have is wrong/outdated and disables the channel
++	 * that we're actually using for the connection to the AP.
++	 */
+ 	while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
+-					IEEE80211_CHAN_DISABLED)) {
++					tracking ? 0 :
++						   IEEE80211_CHAN_DISABLED)) {
+ 		if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
+ 			ret = IEEE80211_STA_DISABLE_HT |
+ 			      IEEE80211_STA_DISABLE_VHT;
+-			goto out;
++			break;
+ 		}
+ 
+ 		ret |= chandef_downgrade(chandef);
+ 	}
+ 
+-	if (chandef->width != vht_chandef.width && verbose)
++	if (chandef->width != vht_chandef.width && !tracking)
+ 		sdata_info(sdata,
+ 			   "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
+ 
+@@ -412,7 +421,7 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
+ 
+ 	/* calculate new channel (type) based on HT/VHT operation IEs */
+ 	flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper,
+-					     vht_oper, &chandef, false);
++					     vht_oper, &chandef, true);
+ 
+ 	/*
+ 	 * Downgrade the new channel if we associated with restricted
+@@ -3906,7 +3915,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
+ 	ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
+ 						     cbss->channel,
+ 						     ht_oper, vht_oper,
+-						     &chandef, true);
++						     &chandef, false);
+ 
+ 	sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss),
+ 				      local->rx_chains);
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index 1076fe1..ba6e55d 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -789,6 +789,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
+ 	struct net *net = sock_net(skb->sk);
+ 	int chains_to_skip = cb->args[0];
+ 	int fams_to_skip = cb->args[1];
++	bool need_locking = chains_to_skip || fams_to_skip;
++
++	if (need_locking)
++		genl_lock();
+ 
+ 	for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
+ 		n = 0;
+@@ -810,6 +814,9 @@ errout:
+ 	cb->args[0] = i;
+ 	cb->args[1] = n;
+ 
++	if (need_locking)
++		genl_unlock();
++
+ 	return skb->len;
+ }
+ 
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 73405e0..64fcbae 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -876,6 +876,7 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev,
+ 		cfg80211_leave_mesh(rdev, dev);
+ 		break;
+ 	case NL80211_IFTYPE_AP:
++	case NL80211_IFTYPE_P2P_GO:
+ 		cfg80211_stop_ap(rdev, dev);
+ 		break;
+ 	default:
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index db8ead9..448c034 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -471,10 +471,12 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
+ 			goto out_unlock;
+ 		}
+ 		*rdev = wiphy_to_dev((*wdev)->wiphy);
+-		cb->args[0] = (*rdev)->wiphy_idx;
++		/* 0 is the first index - add 1 to parse only once */
++		cb->args[0] = (*rdev)->wiphy_idx + 1;
+ 		cb->args[1] = (*wdev)->identifier;
+ 	} else {
+-		struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0]);
++		/* subtract the 1 again here */
++		struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
+ 		struct wireless_dev *tmp;
+ 
+ 		if (!wiphy) {
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index 24400cf..ad22dec 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -519,7 +519,7 @@ static bool same_amp_caps(struct hda_codec *codec, hda_nid_t nid1,
+ }
+ 
+ #define nid_has_mute(codec, nid, dir) \
+-	check_amp_caps(codec, nid, dir, AC_AMPCAP_MUTE)
++	check_amp_caps(codec, nid, dir, (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE))
+ #define nid_has_volume(codec, nid, dir) \
+ 	check_amp_caps(codec, nid, dir, AC_AMPCAP_NUM_STEPS)
+ 
+@@ -621,7 +621,7 @@ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
+ 		if (enable)
+ 			val = (caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT;
+ 	}
+-	if (caps & AC_AMPCAP_MUTE) {
++	if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) {
+ 		if (!enable)
+ 			val |= HDA_AMP_MUTE;
+ 	}
+@@ -645,7 +645,7 @@ static unsigned int get_amp_mask_to_modify(struct hda_codec *codec,
+ {
+ 	unsigned int mask = 0xff;
+ 
+-	if (caps & AC_AMPCAP_MUTE) {
++	if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) {
+ 		if (is_ctl_associated(codec, nid, dir, idx, NID_PATH_MUTE_CTL))
+ 			mask &= ~0x80;
+ 	}
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 051c03d..57f9f2a 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1027,6 +1027,7 @@ enum {
+ 	ALC880_FIXUP_GPIO2,
+ 	ALC880_FIXUP_MEDION_RIM,
+ 	ALC880_FIXUP_LG,
++	ALC880_FIXUP_LG_LW25,
+ 	ALC880_FIXUP_W810,
+ 	ALC880_FIXUP_EAPD_COEF,
+ 	ALC880_FIXUP_TCL_S700,
+@@ -1085,6 +1086,14 @@ static const struct hda_fixup alc880_fixups[] = {
+ 			{ }
+ 		}
+ 	},
++	[ALC880_FIXUP_LG_LW25] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x1a, 0x0181344f }, /* line-in */
++			{ 0x1b, 0x0321403f }, /* headphone */
++			{ }
++		}
++	},
+ 	[ALC880_FIXUP_W810] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+@@ -1337,6 +1346,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_FIXUP_LG),
+ 	SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_FIXUP_LG),
+ 	SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_FIXUP_LG),
++	SND_PCI_QUIRK(0x1854, 0x0077, "LG LW25", ALC880_FIXUP_LG_LW25),
+ 	SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_FIXUP_TCL_S700),
+ 
+ 	/* Below is the copied entries from alc880_quirks.c.
+@@ -4200,6 +4210,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
+ 	SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
+ 	SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
++	SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
+ 	SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
+index 987f728..ee25f32 100644
+--- a/sound/soc/codecs/cs42l52.c
++++ b/sound/soc/codecs/cs42l52.c
+@@ -451,7 +451,7 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = {
+ 	SOC_ENUM("Beep Pitch", beep_pitch_enum),
+ 	SOC_ENUM("Beep on Time", beep_ontime_enum),
+ 	SOC_ENUM("Beep off Time", beep_offtime_enum),
+-	SOC_SINGLE_TLV("Beep Volume", CS42L52_BEEP_VOL, 0, 0x1f, 0x07, hl_tlv),
++	SOC_SINGLE_SX_TLV("Beep Volume", CS42L52_BEEP_VOL, 0, 0x07, 0x1f, hl_tlv),
+ 	SOC_SINGLE("Beep Mixer Switch", CS42L52_BEEP_TONE_CTL, 5, 1, 1),
+ 	SOC_ENUM("Beep Treble Corner Freq", beep_treble_enum),
+ 	SOC_ENUM("Beep Bass Corner Freq", beep_bass_enum),
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index c7051c4..3606383 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -682,13 +682,14 @@ static int dapm_new_mux(struct snd_soc_dapm_widget *w)
+ 		return -EINVAL;
+ 	}
+ 
+-	path = list_first_entry(&w->sources, struct snd_soc_dapm_path,
+-				list_sink);
+-	if (!path) {
++	if (list_empty(&w->sources)) {
+ 		dev_err(dapm->dev, "ASoC: mux %s has no paths\n", w->name);
+ 		return -EINVAL;
+ 	}
+ 
++	path = list_first_entry(&w->sources, struct snd_soc_dapm_path,
++				list_sink);
++
+ 	ret = dapm_create_or_share_mixmux_kcontrol(w, 0, path);
+ 	if (ret < 0)
+ 		return ret;
+diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
+index 31d092d..a5432b1 100644
+--- a/sound/soc/tegra/tegra30_i2s.c
++++ b/sound/soc/tegra/tegra30_i2s.c
+@@ -228,7 +228,7 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
+ 		reg = TEGRA30_I2S_CIF_RX_CTRL;
+ 	} else {
+ 		val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX;
+-		reg = TEGRA30_I2S_CIF_RX_CTRL;
++		reg = TEGRA30_I2S_CIF_TX_CTRL;
+ 	}
+ 
+ 	regmap_write(i2s->regmap, reg, val);
+diff --git a/sound/usb/6fire/midi.c b/sound/usb/6fire/midi.c
+index 2672242..f3dd726 100644
+--- a/sound/usb/6fire/midi.c
++++ b/sound/usb/6fire/midi.c
+@@ -19,6 +19,10 @@
+ #include "chip.h"
+ #include "comm.h"
+ 
++enum {
++	MIDI_BUFSIZE = 64
++};
++
+ static void usb6fire_midi_out_handler(struct urb *urb)
+ {
+ 	struct midi_runtime *rt = urb->context;
+@@ -156,6 +160,12 @@ int usb6fire_midi_init(struct sfire_chip *chip)
+ 	if (!rt)
+ 		return -ENOMEM;
+ 
++	rt->out_buffer = kzalloc(MIDI_BUFSIZE, GFP_KERNEL);
++	if (!rt->out_buffer) {
++		kfree(rt);
++		return -ENOMEM;
++	}
++
+ 	rt->chip = chip;
+ 	rt->in_received = usb6fire_midi_in_received;
+ 	rt->out_buffer[0] = 0x80; /* 'send midi' command */
+@@ -169,6 +179,7 @@ int usb6fire_midi_init(struct sfire_chip *chip)
+ 
+ 	ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance);
+ 	if (ret < 0) {
++		kfree(rt->out_buffer);
+ 		kfree(rt);
+ 		snd_printk(KERN_ERR PREFIX "unable to create midi.\n");
+ 		return ret;
+@@ -197,6 +208,9 @@ void usb6fire_midi_abort(struct sfire_chip *chip)
+ 
+ void usb6fire_midi_destroy(struct sfire_chip *chip)
+ {
+-	kfree(chip->midi);
++	struct midi_runtime *rt = chip->midi;
++
++	kfree(rt->out_buffer);
++	kfree(rt);
+ 	chip->midi = NULL;
+ }
+diff --git a/sound/usb/6fire/midi.h b/sound/usb/6fire/midi.h
+index c321006..84851b9 100644
+--- a/sound/usb/6fire/midi.h
++++ b/sound/usb/6fire/midi.h
+@@ -16,10 +16,6 @@
+ 
+ #include "common.h"
+ 
+-enum {
+-	MIDI_BUFSIZE = 64
+-};
+-
+ struct midi_runtime {
+ 	struct sfire_chip *chip;
+ 	struct snd_rawmidi *instance;
+@@ -32,7 +28,7 @@ struct midi_runtime {
+ 	struct snd_rawmidi_substream *out;
+ 	struct urb out_urb;
+ 	u8 out_serial; /* serial number of out packet */
+-	u8 out_buffer[MIDI_BUFSIZE];
++	u8 *out_buffer;
+ 	int buffer_offset;
+ 
+ 	void (*in_received)(struct midi_runtime *rt, u8 *data, int length);
+diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
+index 074aaf7..25f9e61 100644
+--- a/sound/usb/6fire/pcm.c
++++ b/sound/usb/6fire/pcm.c
+@@ -580,6 +580,33 @@ static void usb6fire_pcm_init_urb(struct pcm_urb *urb,
+ 	urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB;
+ }
+ 
++static int usb6fire_pcm_buffers_init(struct pcm_runtime *rt)
++{
++	int i;
++
++	for (i = 0; i < PCM_N_URBS; i++) {
++		rt->out_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
++				* PCM_MAX_PACKET_SIZE, GFP_KERNEL);
++		if (!rt->out_urbs[i].buffer)
++			return -ENOMEM;
++		rt->in_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
++				* PCM_MAX_PACKET_SIZE, GFP_KERNEL);
++		if (!rt->in_urbs[i].buffer)
++			return -ENOMEM;
++	}
++	return 0;
++}
++
++static void usb6fire_pcm_buffers_destroy(struct pcm_runtime *rt)
++{
++	int i;
++
++	for (i = 0; i < PCM_N_URBS; i++) {
++		kfree(rt->out_urbs[i].buffer);
++		kfree(rt->in_urbs[i].buffer);
++	}
++}
++
+ int usb6fire_pcm_init(struct sfire_chip *chip)
+ {
+ 	int i;
+@@ -591,6 +618,13 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
+ 	if (!rt)
+ 		return -ENOMEM;
+ 
++	ret = usb6fire_pcm_buffers_init(rt);
++	if (ret) {
++		usb6fire_pcm_buffers_destroy(rt);
++		kfree(rt);
++		return ret;
++	}
++
+ 	rt->chip = chip;
+ 	rt->stream_state = STREAM_DISABLED;
+ 	rt->rate = ARRAY_SIZE(rates);
+@@ -612,6 +646,7 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
+ 
+ 	ret = snd_pcm_new(chip->card, "DMX6FireUSB", 0, 1, 1, &pcm);
+ 	if (ret < 0) {
++		usb6fire_pcm_buffers_destroy(rt);
+ 		kfree(rt);
+ 		snd_printk(KERN_ERR PREFIX "cannot create pcm instance.\n");
+ 		return ret;
+@@ -627,6 +662,7 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
+ 			snd_dma_continuous_data(GFP_KERNEL),
+ 			MAX_BUFSIZE, MAX_BUFSIZE);
+ 	if (ret) {
++		usb6fire_pcm_buffers_destroy(rt);
+ 		kfree(rt);
+ 		snd_printk(KERN_ERR PREFIX
+ 				"error preallocating pcm buffers.\n");
+@@ -671,6 +707,9 @@ void usb6fire_pcm_abort(struct sfire_chip *chip)
+ 
+ void usb6fire_pcm_destroy(struct sfire_chip *chip)
+ {
+-	kfree(chip->pcm);
++	struct pcm_runtime *rt = chip->pcm;
++
++	usb6fire_pcm_buffers_destroy(rt);
++	kfree(rt);
+ 	chip->pcm = NULL;
+ }
+diff --git a/sound/usb/6fire/pcm.h b/sound/usb/6fire/pcm.h
+index 9b01133..f5779d6 100644
+--- a/sound/usb/6fire/pcm.h
++++ b/sound/usb/6fire/pcm.h
+@@ -32,7 +32,7 @@ struct pcm_urb {
+ 	struct urb instance;
+ 	struct usb_iso_packet_descriptor packets[PCM_N_PACKETS_PER_URB];
+ 	/* END DO NOT SEPARATE */
+-	u8 buffer[PCM_N_PACKETS_PER_URB * PCM_MAX_PACKET_SIZE];
++	u8 *buffer;
+ 
+ 	struct pcm_urb *peer;
+ };
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index d543808..95558ef 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -888,6 +888,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
+ 	case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
+ 	case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
+ 	case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
++	case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
+ 	case USB_ID(0x046d, 0x0991):
+ 	/* Most audio usb devices lie about volume resolution.
+ 	 * Most Logitech webcams have res = 384.

diff --git a/3.10.9/1008_linux-3.10.9.patch b/3.10.9/1008_linux-3.10.9.patch
new file mode 100644
index 0000000..e91b33a
--- /dev/null
+++ b/3.10.9/1008_linux-3.10.9.patch
@@ -0,0 +1,37 @@
+diff --git a/Makefile b/Makefile
+index 1a21612..4b31d62 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 10
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = TOSSUG Baby Fish
+ 
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index ba6e55d..1076fe1 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -789,10 +789,6 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
+ 	struct net *net = sock_net(skb->sk);
+ 	int chains_to_skip = cb->args[0];
+ 	int fams_to_skip = cb->args[1];
+-	bool need_locking = chains_to_skip || fams_to_skip;
+-
+-	if (need_locking)
+-		genl_lock();
+ 
+ 	for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
+ 		n = 0;
+@@ -814,9 +810,6 @@ errout:
+ 	cb->args[0] = i;
+ 	cb->args[1] = n;
+ 
+-	if (need_locking)
+-		genl_unlock();
+-
+ 	return skb->len;
+ }
+ 

diff --git a/3.10.7/4420_grsecurity-2.9.1-3.10.7-201308192211.patch b/3.10.9/4420_grsecurity-2.9.1-3.10.9-201308202015.patch
similarity index 99%
rename from 3.10.7/4420_grsecurity-2.9.1-3.10.7-201308192211.patch
rename to 3.10.9/4420_grsecurity-2.9.1-3.10.9-201308202015.patch
index 73ebf27..24d81a0 100644
--- a/3.10.7/4420_grsecurity-2.9.1-3.10.7-201308192211.patch
+++ b/3.10.9/4420_grsecurity-2.9.1-3.10.9-201308202015.patch
@@ -281,7 +281,7 @@ index 2fe6e76..889ee23 100644
  
  	pcd.		[PARIDE]
 diff --git a/Makefile b/Makefile
-index 33e36ab..31f1dc8 100644
+index 4b31d62..ac99d49 100644
 --- a/Makefile
 +++ b/Makefile
 @@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -2144,33 +2144,6 @@ index f00b569..aa5bb41 100644
  
  /*
   * Change these and you break ASM code in entry-common.S
-diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
-index bdf2b84..aa9b4ac 100644
---- a/arch/arm/include/asm/tlb.h
-+++ b/arch/arm/include/asm/tlb.h
-@@ -43,6 +43,7 @@ struct mmu_gather {
- 	struct mm_struct	*mm;
- 	unsigned int		fullmm;
- 	struct vm_area_struct	*vma;
-+	unsigned long		start, end;
- 	unsigned long		range_start;
- 	unsigned long		range_end;
- 	unsigned int		nr;
-@@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
- }
- 
- static inline void
--tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
-+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- 	tlb->mm = mm;
--	tlb->fullmm = fullmm;
-+	tlb->fullmm = !(start | (end+1));
-+	tlb->start = start;
-+	tlb->end = end;
- 	tlb->vma = NULL;
- 	tlb->max = ARRAY_SIZE(tlb->local);
- 	tlb->pages = tlb->local;
 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
 index 7e1f760..de33b13 100644
 --- a/arch/arm/include/asm/uaccess.h
@@ -2889,33 +2862,18 @@ index 07314af..c46655c 100644
  	flush_icache_range((uintptr_t)(addr),
  			   (uintptr_t)(addr) + size);
 diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
-index d9f5cd4..e186ee1 100644
+index e19edc6..e186ee1 100644
 --- a/arch/arm/kernel/perf_event.c
 +++ b/arch/arm/kernel/perf_event.c
-@@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
- static int
- armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
- {
--	int mapping = (*event_map)[config];
-+	int mapping;
-+
-+	if (config >= PERF_COUNT_HW_MAX)
-+		return -EINVAL;
-+
-+	mapping = (*event_map)[config];
- 	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
- }
- 
-@@ -253,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events,
- 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- 	struct pmu *leader_pmu = event->group_leader->pmu;
+@@ -56,7 +56,7 @@ armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
+ 	int mapping;
  
-+	if (is_software_event(event))
-+		return 1;
-+
- 	if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
- 		return 1;
+ 	if (config >= PERF_COUNT_HW_MAX)
+-		return -ENOENT;
++		return -EINVAL;
  
+ 	mapping = (*event_map)[config];
+ 	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
 diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
 index 1f2740e..b36e225 100644
 --- a/arch/arm/kernel/perf_event_cpu.c
@@ -2930,21 +2888,9 @@ index 1f2740e..b36e225 100644
  };
  
 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
-index 5bc2615..4f1a0c2 100644
+index 5bc2615..dcd439f 100644
 --- a/arch/arm/kernel/process.c
 +++ b/arch/arm/kernel/process.c
-@@ -28,10 +28,10 @@
- #include <linux/tick.h>
- #include <linux/utsname.h>
- #include <linux/uaccess.h>
--#include <linux/random.h>
- #include <linux/hw_breakpoint.h>
- #include <linux/cpuidle.h>
- #include <linux/leds.h>
-+#include <linux/random.h>
- 
- #include <asm/cacheflush.h>
- #include <asm/idmap.h>
 @@ -223,6 +223,7 @@ void machine_power_off(void)
  
  	if (pm_power_off)
@@ -4543,33 +4489,6 @@ index ce6d763..cfea917 100644
  
  extern void *samsung_dmadev_get_ops(void);
  extern void *s3c_dma_get_ops(void);
-diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
-index 654f096..5546653 100644
---- a/arch/arm64/include/asm/tlb.h
-+++ b/arch/arm64/include/asm/tlb.h
-@@ -35,6 +35,7 @@ struct mmu_gather {
- 	struct mm_struct	*mm;
- 	unsigned int		fullmm;
- 	struct vm_area_struct	*vma;
-+	unsigned long		start, end;
- 	unsigned long		range_start;
- 	unsigned long		range_end;
- 	unsigned int		nr;
-@@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
- }
- 
- static inline void
--tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
-+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- 	tlb->mm = mm;
--	tlb->fullmm = fullmm;
-+	tlb->fullmm = !(start | (end+1));
-+	tlb->start = start;
-+	tlb->end = end;
- 	tlb->vma = NULL;
- 	tlb->max = ARRAY_SIZE(tlb->local);
- 	tlb->pages = tlb->local;
 diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
 index f4726dc..39ed646 100644
 --- a/arch/arm64/kernel/debug-monitors.c
@@ -4979,45 +4898,6 @@ index 54ff557..70c88b7 100644
  }
  
  static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
-diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
-index ef3a9de..bc5efc7 100644
---- a/arch/ia64/include/asm/tlb.h
-+++ b/arch/ia64/include/asm/tlb.h
-@@ -22,7 +22,7 @@
-  * unmapping a portion of the virtual address space, these hooks are called according to
-  * the following template:
-  *
-- *	tlb <- tlb_gather_mmu(mm, full_mm_flush);	// start unmap for address space MM
-+ *	tlb <- tlb_gather_mmu(mm, start, end);		// start unmap for address space MM
-  *	{
-  *	  for each vma that needs a shootdown do {
-  *	    tlb_start_vma(tlb, vma);
-@@ -58,6 +58,7 @@ struct mmu_gather {
- 	unsigned int		max;
- 	unsigned char		fullmm;		/* non-zero means full mm flush */
- 	unsigned char		need_flush;	/* really unmapped some PTEs? */
-+	unsigned long		start, end;
- 	unsigned long		start_addr;
- 	unsigned long		end_addr;
- 	struct page		**pages;
-@@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
- 
- 
- static inline void
--tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
-+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- 	tlb->mm = mm;
- 	tlb->max = ARRAY_SIZE(tlb->local);
- 	tlb->pages = tlb->local;
- 	tlb->nr = 0;
--	tlb->fullmm = full_mm_flush;
-+	tlb->fullmm = !(start | (end+1));
-+	tlb->start = start;
-+	tlb->end = end;
- 	tlb->start_addr = ~0UL;
- }
- 
 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
 index 449c8c0..18965fb 100644
 --- a/arch/ia64/include/asm/uaccess.h
@@ -7645,34 +7525,6 @@ index c4a93d6..4d2a9b4 100644
 +#define arch_align_stack(x) ((x) & ~0xfUL)
  
  #endif /* __ASM_EXEC_H */
-diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
-index b75d7d6..6d6d92b 100644
---- a/arch/s390/include/asm/tlb.h
-+++ b/arch/s390/include/asm/tlb.h
-@@ -32,6 +32,7 @@ struct mmu_gather {
- 	struct mm_struct *mm;
- 	struct mmu_table_batch *batch;
- 	unsigned int fullmm;
-+	unsigned long start, end;
- };
- 
- struct mmu_table_batch {
-@@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
- 
- static inline void tlb_gather_mmu(struct mmu_gather *tlb,
- 				  struct mm_struct *mm,
--				  unsigned int full_mm_flush)
-+				  unsigned long start,
-+				  unsigned long end)
- {
- 	tlb->mm = mm;
--	tlb->fullmm = full_mm_flush;
-+	tlb->start = start;
-+	tlb->end = end;
-+	tlb->fullmm = !(start | (end+1));
- 	tlb->batch = NULL;
- 	if (tlb->fullmm)
- 		__tlb_flush_mm(mm);
 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
 index 9c33ed4..e40cbef 100644
 --- a/arch/s390/include/asm/uaccess.h
@@ -7941,25 +7793,6 @@ index ef9e555..331bd29 100644
  
  #define __read_mostly __attribute__((__section__(".data..read_mostly")))
  
-diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
-index e61d43d..362192e 100644
---- a/arch/sh/include/asm/tlb.h
-+++ b/arch/sh/include/asm/tlb.h
-@@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
- }
- 
- static inline void
--tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
-+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- 	tlb->mm = mm;
--	tlb->fullmm = full_mm_flush;
-+	tlb->start = start;
-+	tlb->end = end;
-+	tlb->fullmm = !(start | (end+1));
- 
- 	init_tlb_gather(tlb);
- }
 diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
 index 03f2b55..b0270327 100644
 --- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
@@ -10595,25 +10428,6 @@ index 0032f92..cd151e0 100644
  
  #ifdef CONFIG_64BIT
  #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
-diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
-index 4febacd..29b0301 100644
---- a/arch/um/include/asm/tlb.h
-+++ b/arch/um/include/asm/tlb.h
-@@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
- }
- 
- static inline void
--tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
-+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- 	tlb->mm = mm;
--	tlb->fullmm = full_mm_flush;
-+	tlb->start = start;
-+	tlb->end = end;
-+	tlb->fullmm = !(start | (end+1));
- 
- 	init_tlb_gather(tlb);
- }
 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
 index bbcef52..6a2a483 100644
 --- a/arch/um/kernel/process.c
@@ -15963,7 +15777,7 @@ index e642300..0ef8f31 100644
  #define pgprot_writecombine	pgprot_writecombine
  extern pgprot_t pgprot_writecombine(pgprot_t prot);
 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
-index 22224b3..c5d8d7d 100644
+index 22224b3..b3a2f90 100644
 --- a/arch/x86/include/asm/processor.h
 +++ b/arch/x86/include/asm/processor.h
 @@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
@@ -16006,7 +15820,39 @@ index 22224b3..c5d8d7d 100644
  #endif
  #ifdef CONFIG_X86_32
  	unsigned long		ip;
-@@ -823,11 +836,18 @@ static inline void spin_lock_prefetch(const void *x)
+@@ -552,29 +565,8 @@ static inline void load_sp0(struct tss_struct *tss,
+ extern unsigned long mmu_cr4_features;
+ extern u32 *trampoline_cr4_features;
+ 
+-static inline void set_in_cr4(unsigned long mask)
+-{
+-	unsigned long cr4;
+-
+-	mmu_cr4_features |= mask;
+-	if (trampoline_cr4_features)
+-		*trampoline_cr4_features = mmu_cr4_features;
+-	cr4 = read_cr4();
+-	cr4 |= mask;
+-	write_cr4(cr4);
+-}
+-
+-static inline void clear_in_cr4(unsigned long mask)
+-{
+-	unsigned long cr4;
+-
+-	mmu_cr4_features &= ~mask;
+-	if (trampoline_cr4_features)
+-		*trampoline_cr4_features = mmu_cr4_features;
+-	cr4 = read_cr4();
+-	cr4 &= ~mask;
+-	write_cr4(cr4);
+-}
++extern void set_in_cr4(unsigned long mask);
++extern void clear_in_cr4(unsigned long mask);
+ 
+ typedef struct {
+ 	unsigned long		seg;
+@@ -823,11 +815,18 @@ static inline void spin_lock_prefetch(const void *x)
   */
  #define TASK_SIZE		PAGE_OFFSET
  #define TASK_SIZE_MAX		TASK_SIZE
@@ -16027,7 +15873,7 @@ index 22224b3..c5d8d7d 100644
  	.vm86_info		= NULL,					  \
  	.sysenter_cs		= __KERNEL_CS,				  \
  	.io_bitmap_ptr		= NULL,					  \
-@@ -841,7 +861,7 @@ static inline void spin_lock_prefetch(const void *x)
+@@ -841,7 +840,7 @@ static inline void spin_lock_prefetch(const void *x)
   */
  #define INIT_TSS  {							  \
  	.x86_tss = {							  \
@@ -16036,7 +15882,7 @@ index 22224b3..c5d8d7d 100644
  		.ss0		= __KERNEL_DS,				  \
  		.ss1		= __KERNEL_CS,				  \
  		.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,		  \
-@@ -852,11 +872,7 @@ static inline void spin_lock_prefetch(const void *x)
+@@ -852,11 +851,7 @@ static inline void spin_lock_prefetch(const void *x)
  extern unsigned long thread_saved_pc(struct task_struct *tsk);
  
  #define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
@@ -16049,7 +15895,7 @@ index 22224b3..c5d8d7d 100644
  
  /*
   * The below -8 is to reserve 8 bytes on top of the ring0 stack.
-@@ -871,7 +887,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -871,7 +866,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
  #define task_pt_regs(task)                                             \
  ({                                                                     \
         struct pt_regs *__regs__;                                       \
@@ -16058,7 +15904,7 @@ index 22224b3..c5d8d7d 100644
         __regs__ - 1;                                                   \
  })
  
-@@ -881,13 +897,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -881,13 +876,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
  /*
   * User space process size. 47bits minus one guard page.
   */
@@ -16074,7 +15920,7 @@ index 22224b3..c5d8d7d 100644
  
  #define TASK_SIZE		(test_thread_flag(TIF_ADDR32) ? \
  					IA32_PAGE_OFFSET : TASK_SIZE_MAX)
-@@ -898,11 +914,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -898,11 +893,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
  #define STACK_TOP_MAX		TASK_SIZE_MAX
  
  #define INIT_THREAD  { \
@@ -16088,7 +15934,7 @@ index 22224b3..c5d8d7d 100644
  }
  
  /*
-@@ -930,6 +946,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
+@@ -930,6 +925,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
   */
  #define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3))
  
@@ -16099,7 +15945,7 @@ index 22224b3..c5d8d7d 100644
  #define KSTK_EIP(task)		(task_pt_regs(task)->ip)
  
  /* Get/set a process' ability to use the timestamp counter instruction */
-@@ -942,7 +962,8 @@ extern int set_tsc_mode(unsigned int val);
+@@ -942,7 +941,8 @@ extern int set_tsc_mode(unsigned int val);
  extern u16 amd_get_nb_id(int cpu);
  
  struct aperfmperf {
@@ -16109,7 +15955,7 @@ index 22224b3..c5d8d7d 100644
  };
  
  static inline void get_aperfmperf(struct aperfmperf *am)
-@@ -970,7 +991,7 @@ unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
+@@ -970,7 +970,7 @@ unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
  	return ratio;
  }
  
@@ -16118,7 +15964,7 @@ index 22224b3..c5d8d7d 100644
  extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
  
  void default_idle(void);
-@@ -980,6 +1001,6 @@ bool xen_set_default_idle(void);
+@@ -980,6 +980,6 @@ bool xen_set_default_idle(void);
  #define xen_set_default_idle 0
  #endif
  
@@ -18612,7 +18458,7 @@ index 5013a48..0782c53 100644
  		if (c->x86_model == 3 && c->x86_mask == 0)
  			size = 64;
 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index 22018f7..2ae0e75 100644
+index 22018f7..df77e23 100644
 --- a/arch/x86/kernel/cpu/common.c
 +++ b/arch/x86/kernel/cpu/common.c
 @@ -88,60 +88,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
@@ -18676,7 +18522,7 @@ index 22018f7..2ae0e75 100644
  static int __init x86_xsave_setup(char *s)
  {
  	setup_clear_cpu_cap(X86_FEATURE_XSAVE);
-@@ -288,6 +234,53 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
+@@ -288,6 +234,57 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
  		set_in_cr4(X86_CR4_SMAP);
  }
  
@@ -18700,7 +18546,9 @@ index 22018f7..2ae0e75 100644
 +
 +#ifdef CONFIG_PAX_MEMORY_UDEREF
 +		if (clone_pgd_mask != ~(pgdval_t)0UL) {
++			pax_open_kernel();
 +			pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
++			pax_close_kernel();
 +			printk("PAX: slow and weak UDEREF enabled\n");
 +		} else
 +			printk("PAX: UDEREF disabled\n");
@@ -18713,7 +18561,9 @@ index 22018f7..2ae0e75 100644
 +	set_in_cr4(X86_CR4_PCIDE);
 +
 +#ifdef CONFIG_PAX_MEMORY_UDEREF
++	pax_open_kernel();
 +	clone_pgd_mask = ~(pgdval_t)0UL;
++	pax_close_kernel();
 +	if (pax_user_shadow_base)
 +		printk("PAX: weak UDEREF enabled\n");
 +	else {
@@ -18730,7 +18580,7 @@ index 22018f7..2ae0e75 100644
  /*
   * Some CPU features depend on higher CPUID levels, which may not always
   * be available due to CPUID level capping or broken virtualization
-@@ -386,7 +379,7 @@ void switch_to_new_gdt(int cpu)
+@@ -386,7 +383,7 @@ void switch_to_new_gdt(int cpu)
  {
  	struct desc_ptr gdt_descr;
  
@@ -18739,7 +18589,7 @@ index 22018f7..2ae0e75 100644
  	gdt_descr.size = GDT_SIZE - 1;
  	load_gdt(&gdt_descr);
  	/* Reload the per-cpu base */
-@@ -874,6 +867,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+@@ -874,6 +871,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
  	setup_smep(c);
  	setup_smap(c);
  
@@ -18750,7 +18600,7 @@ index 22018f7..2ae0e75 100644
  	/*
  	 * The vendor-specific functions might have changed features.
  	 * Now we do "generic changes."
-@@ -882,6 +879,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+@@ -882,6 +883,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
  	/* Filter out anything that depends on CPUID levels we don't have */
  	filter_cpuid_features(c, true);
  
@@ -18761,7 +18611,7 @@ index 22018f7..2ae0e75 100644
  	/* If the model name is still unset, do table lookup. */
  	if (!c->x86_model_id[0]) {
  		const char *p;
-@@ -1069,10 +1070,12 @@ static __init int setup_disablecpuid(char *arg)
+@@ -1069,10 +1074,12 @@ static __init int setup_disablecpuid(char *arg)
  }
  __setup("clearcpuid=", setup_disablecpuid);
  
@@ -18776,7 +18626,7 @@ index 22018f7..2ae0e75 100644
  
  DEFINE_PER_CPU_FIRST(union irq_stack_union,
  		     irq_stack_union) __aligned(PAGE_SIZE);
-@@ -1086,7 +1089,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
+@@ -1086,7 +1093,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
  EXPORT_PER_CPU_SYMBOL(current_task);
  
  DEFINE_PER_CPU(unsigned long, kernel_stack) =
@@ -18785,7 +18635,7 @@ index 22018f7..2ae0e75 100644
  EXPORT_PER_CPU_SYMBOL(kernel_stack);
  
  DEFINE_PER_CPU(char *, irq_stack_ptr) =
-@@ -1231,7 +1234,7 @@ void __cpuinit cpu_init(void)
+@@ -1231,7 +1238,7 @@ void __cpuinit cpu_init(void)
  	load_ucode_ap();
  
  	cpu = stack_smp_processor_id();
@@ -18794,7 +18644,7 @@ index 22018f7..2ae0e75 100644
  	oist = &per_cpu(orig_ist, cpu);
  
  #ifdef CONFIG_NUMA
-@@ -1257,7 +1260,7 @@ void __cpuinit cpu_init(void)
+@@ -1257,7 +1264,7 @@ void __cpuinit cpu_init(void)
  	switch_to_new_gdt(cpu);
  	loadsegment(fs, 0);
  
@@ -18803,7 +18653,7 @@ index 22018f7..2ae0e75 100644
  
  	memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
  	syscall_init();
-@@ -1266,7 +1269,6 @@ void __cpuinit cpu_init(void)
+@@ -1266,7 +1273,6 @@ void __cpuinit cpu_init(void)
  	wrmsrl(MSR_KERNEL_GS_BASE, 0);
  	barrier();
  
@@ -18811,7 +18661,7 @@ index 22018f7..2ae0e75 100644
  	enable_x2apic();
  
  	/*
-@@ -1318,7 +1320,7 @@ void __cpuinit cpu_init(void)
+@@ -1318,7 +1324,7 @@ void __cpuinit cpu_init(void)
  {
  	int cpu = smp_processor_id();
  	struct task_struct *curr = current;
@@ -19214,7 +19064,7 @@ index a9e2207..d70c83a 100644
  
  	intel_ds_init();
 diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
-index 52441a2..f94fae8 100644
+index 8aac56b..588fb13 100644
 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
 +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
 @@ -3093,7 +3093,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
@@ -20482,7 +20332,7 @@ index 8f3e2de..6b71e39 100644
  
  /*
 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
-index 7272089..ee191c7 100644
+index 7272089..0b74104 100644
 --- a/arch/x86/kernel/entry_64.S
 +++ b/arch/x86/kernel/entry_64.S
 @@ -59,6 +59,8 @@
@@ -20838,14 +20688,22 @@ index 7272089..ee191c7 100644
 +	SET_RDI_INTO_CR3
 +	jmp 2f
 +1:
++
 +	mov %rdi,%rbx
++
++#ifdef CONFIG_PAX_KERNEXEC
++	GET_CR0_INTO_RDI
++	btr $16,%rdi
++	jnc 3f
++	SET_RDI_INTO_CR0
++#endif
++
 +	add $__START_KERNEL_map,%rbx
 +	sub phys_base(%rip),%rbx
 +
 +#ifdef CONFIG_PARAVIRT
 +	cmpl $0, pv_info+PARAVIRT_enabled
 +	jz 1f
-+	pushq %rdi
 +	i = 0
 +	.rept USER_PGD_PTRS
 +	mov i*8(%rbx),%rsi
@@ -20854,18 +20712,10 @@ index 7272089..ee191c7 100644
 +	call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
 +	i = i + 1
 +	.endr
-+	popq %rdi
 +	jmp 2f
 +1:
 +#endif
 +
-+#ifdef CONFIG_PAX_KERNEXEC
-+	GET_CR0_INTO_RDI
-+	btr $16,%rdi
-+	jnc 3f
-+	SET_RDI_INTO_CR0
-+#endif
-+
 +	i = 0
 +	.rept USER_PGD_PTRS
 +	movb $0x67,i*8(%rbx)
@@ -22497,7 +22347,7 @@ index a836860..1b5c665 100644
 -	.skip PAGE_SIZE
 +	.fill 512,8,0
 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
-index 0fa6912..37fce70 100644
+index 0fa6912..b37438b 100644
 --- a/arch/x86/kernel/i386_ksyms_32.c
 +++ b/arch/x86/kernel/i386_ksyms_32.c
 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
@@ -22513,7 +22363,7 @@ index 0fa6912..37fce70 100644
  
  EXPORT_SYMBOL(__get_user_1);
  EXPORT_SYMBOL(__get_user_2);
-@@ -37,3 +41,7 @@ EXPORT_SYMBOL(strstr);
+@@ -37,3 +41,11 @@ EXPORT_SYMBOL(strstr);
  
  EXPORT_SYMBOL(csum_partial);
  EXPORT_SYMBOL(empty_zero_page);
@@ -22521,6 +22371,10 @@ index 0fa6912..37fce70 100644
 +#ifdef CONFIG_PAX_KERNEXEC
 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
 +#endif
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++EXPORT_SYMBOL(cpu_pgd);
++#endif
 diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
 index f7ea30d..6318acc 100644
 --- a/arch/x86/kernel/i387.c
@@ -24272,7 +24126,7 @@ index f2bb9c9..bed145d7 100644
  
  1:
 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
-index 56f7fcf..3b88ad1 100644
+index 56f7fcf..2cfe4f1 100644
 --- a/arch/x86/kernel/setup.c
 +++ b/arch/x86/kernel/setup.c
 @@ -110,6 +110,7 @@
@@ -24283,7 +24137,7 @@ index 56f7fcf..3b88ad1 100644
  
  /*
   * max_low_pfn_mapped: highest direct mapped pfn under 4GB
-@@ -205,10 +206,12 @@ EXPORT_SYMBOL(boot_cpu_data);
+@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
  #endif
  
  
@@ -24298,8 +24152,46 @@ index 56f7fcf..3b88ad1 100644
 +unsigned long mmu_cr4_features __read_only;
  #endif
  
++void set_in_cr4(unsigned long mask)
++{
++	unsigned long cr4 = read_cr4();
++
++	if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
++		return;
++
++	pax_open_kernel();
++	mmu_cr4_features |= mask;
++	pax_close_kernel();
++
++	if (trampoline_cr4_features)
++		*trampoline_cr4_features = mmu_cr4_features;
++	cr4 |= mask;
++	write_cr4(cr4);
++}
++EXPORT_SYMBOL(set_in_cr4);
++
++void clear_in_cr4(unsigned long mask)
++{
++	unsigned long cr4 = read_cr4();
++
++	if (!(cr4 & mask) && cr4 == mmu_cr4_features)
++		return;
++
++	pax_open_kernel();
++	mmu_cr4_features &= ~mask;
++	pax_close_kernel();
++
++	if (trampoline_cr4_features)
++		*trampoline_cr4_features = mmu_cr4_features;
++	cr4 &= ~mask;
++	write_cr4(cr4);
++}
++EXPORT_SYMBOL(clear_in_cr4);
++
  /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
-@@ -444,7 +447,7 @@ static void __init parse_setup_data(void)
+ int bootloader_type, bootloader_version;
+ 
+@@ -444,7 +483,7 @@ static void __init parse_setup_data(void)
  
  		switch (data->type) {
  		case SETUP_E820_EXT:
@@ -24308,7 +24200,7 @@ index 56f7fcf..3b88ad1 100644
  			break;
  		case SETUP_DTB:
  			add_dtb(pa_data);
-@@ -771,7 +774,7 @@ static void __init trim_bios_range(void)
+@@ -771,7 +810,7 @@ static void __init trim_bios_range(void)
  	 * area (640->1Mb) as ram even though it is not.
  	 * take them out.
  	 */
@@ -24317,7 +24209,7 @@ index 56f7fcf..3b88ad1 100644
  
  	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
  }
-@@ -779,7 +782,7 @@ static void __init trim_bios_range(void)
+@@ -779,7 +818,7 @@ static void __init trim_bios_range(void)
  /* called before trim_bios_range() to spare extra sanitize */
  static void __init e820_add_kernel_range(void)
  {
@@ -24326,7 +24218,7 @@ index 56f7fcf..3b88ad1 100644
  	u64 size = __pa_symbol(_end) - start;
  
  	/*
-@@ -841,8 +844,12 @@ static void __init trim_low_memory_range(void)
+@@ -841,8 +880,12 @@ static void __init trim_low_memory_range(void)
  
  void __init setup_arch(char **cmdline_p)
  {
@@ -24339,7 +24231,7 @@ index 56f7fcf..3b88ad1 100644
  
  	early_reserve_initrd();
  
-@@ -934,14 +941,14 @@ void __init setup_arch(char **cmdline_p)
+@@ -934,14 +977,14 @@ void __init setup_arch(char **cmdline_p)
  
  	if (!boot_params.hdr.root_flags)
  		root_mountflags &= ~MS_RDONLY;
@@ -24842,7 +24734,7 @@ index 0000000..5877189
 +	return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
 +}
 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
-index dbded5a..ace2781 100644
+index 48f8375..ace2781 100644
 --- a/arch/x86/kernel/sys_x86_64.c
 +++ b/arch/x86/kernel/sys_x86_64.c
 @@ -81,8 +81,8 @@ out:
@@ -24860,7 +24752,7 @@ index dbded5a..ace2781 100644
  				*begin = new_begin;
  		}
  	} else {
--		*begin = TASK_UNMAPPED_BASE;
+-		*begin = mmap_legacy_base();
 +		*begin = mm->mmap_base;
  		*end = TASK_SIZE;
  	}
@@ -25621,7 +25513,7 @@ index 9a907a6..f83f921 100644
  		     (unsigned long)VSYSCALL_START);
  
 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
-index b014d94..6d6ca7b 100644
+index b014d94..e775258 100644
 --- a/arch/x86/kernel/x8664_ksyms_64.c
 +++ b/arch/x86/kernel/x8664_ksyms_64.c
 @@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
@@ -25633,6 +25525,14 @@ index b014d94..6d6ca7b 100644
  
  EXPORT_SYMBOL(copy_page);
  EXPORT_SYMBOL(clear_page);
+@@ -66,3 +64,7 @@ EXPORT_SYMBOL(empty_zero_page);
+ #ifndef CONFIG_PARAVIRT
+ EXPORT_SYMBOL(native_load_gs_index);
+ #endif
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++EXPORT_SYMBOL(cpu_pgd);
++#endif
 diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
 index 45a14db..075bb9b 100644
 --- a/arch/x86/kernel/x86_init.c
@@ -30668,7 +30568,7 @@ index d87dd6d..bf3fa66 100644
  
  	pte = kmemcheck_pte_lookup(address);
 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
-index 845df68..1d8d29f 100644
+index c1af323..4758dad 100644
 --- a/arch/x86/mm/mmap.c
 +++ b/arch/x86/mm/mmap.c
 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
@@ -30708,8 +30608,8 @@ index 845df68..1d8d29f 100644
   * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
   * does, but not when emulating X86_32
   */
--static unsigned long mmap_legacy_base(void)
-+static unsigned long mmap_legacy_base(struct mm_struct *mm)
+-unsigned long mmap_legacy_base(void)
++unsigned long mmap_legacy_base(struct mm_struct *mm)
  {
 -	if (mmap_is_ia32())
 +	if (mmap_is_ia32()) {
@@ -30726,7 +30626,7 @@ index 845df68..1d8d29f 100644
  		return TASK_UNMAPPED_BASE + mmap_rnd();
  }
  
-@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
+@@ -113,11 +126,23 @@ unsigned long mmap_legacy_base(void)
  void arch_pick_mmap_layout(struct mm_struct *mm)
  {
  	if (mmap_is_legacy()) {
@@ -41972,19 +41872,6 @@ index f975696..4597e21 100644
  
  #ifdef CONFIG_NET_POLL_CONTROLLER
  	/*
-diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
-index 25723d8..925ab8e 100644
---- a/drivers/net/can/usb/peak_usb/pcan_usb.c
-+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
-@@ -649,7 +649,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
- 		if ((mc->ptr + rec_len) > mc->end)
- 			goto decode_failed;
- 
--		memcpy(cf->data, mc->ptr, rec_len);
-+		memcpy(cf->data, mc->ptr, cf->can_dlc);
- 		mc->ptr += rec_len;
- 	}
- 
 diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
 index e1d2643..7f4133b 100644
 --- a/drivers/net/ethernet/8390/ax88796.c
@@ -45297,45 +45184,25 @@ index c699a30..b90a5fd 100644
  	pDevice->apdev->netdev_ops = &apdev_netdev_ops;
  
  	pDevice->apdev->type = ARPHRD_IEEE80211;
-diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
-index d7e51e4..d07eaab 100644
---- a/drivers/staging/zcache/tmem.c
-+++ b/drivers/staging/zcache/tmem.c
-@@ -51,7 +51,7 @@
-  * A tmem host implementation must use this function to register callbacks
-  * for memory allocation.
-  */
--static struct tmem_hostops tmem_hostops;
-+static tmem_hostops_no_const tmem_hostops;
- 
- static void tmem_objnode_tree_init(void);
- 
-@@ -65,7 +65,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
-  * A tmem host implementation must use this function to register
-  * callbacks for a page-accessible memory (PAM) implementation.
-  */
--static struct tmem_pamops tmem_pamops;
-+static tmem_pamops_no_const tmem_pamops;
- 
- void tmem_register_pamops(struct tmem_pamops *m)
- {
 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
-index d128ce2..a43980c 100644
+index d128ce2..fc1f9a1 100644
 --- a/drivers/staging/zcache/tmem.h
 +++ b/drivers/staging/zcache/tmem.h
-@@ -226,6 +226,7 @@ struct tmem_pamops {
+@@ -225,7 +225,7 @@ struct tmem_pamops {
+ 	bool (*is_remote)(void *);
  	int (*replace_in_obj)(void *, struct tmem_obj *);
  #endif
- };
-+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
+-};
++} __no_const;
  extern void tmem_register_pamops(struct tmem_pamops *m);
  
  /* memory allocation methods provided by the host implementation */
-@@ -235,6 +236,7 @@ struct tmem_hostops {
+@@ -234,7 +234,7 @@ struct tmem_hostops {
+ 	void (*obj_free)(struct tmem_obj *, struct tmem_pool *);
  	struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
  	void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
- };
-+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
+-};
++} __no_const;
  extern void tmem_register_hostops(struct tmem_hostops *m);
  
  /* core tmem accessor functions */
@@ -47004,7 +46871,7 @@ index d6bea3e..60b250e 100644
  
  /**
 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
-index 6ef94bc..1b41265 100644
+index 028fc83..65bb105 100644
 --- a/drivers/usb/wusbcore/wa-xfer.c
 +++ b/drivers/usb/wusbcore/wa-xfer.c
 @@ -296,7 +296,7 @@ out:
@@ -52866,7 +52733,7 @@ index e4141f2..d8263e8 100644
  		i += packet_length_size;
  		if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
 diff --git a/fs/exec.c b/fs/exec.c
-index ffd7a81..3c84660 100644
+index 1f44670..3c84660 100644
 --- a/fs/exec.c
 +++ b/fs/exec.c
 @@ -55,8 +55,20 @@
@@ -53073,24 +52940,6 @@ index ffd7a81..3c84660 100644
  	/*
  	 * cover the whole range: [new_start, old_end)
  	 */
-@@ -607,7 +653,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
- 		return -ENOMEM;
- 
- 	lru_add_drain();
--	tlb_gather_mmu(&tlb, mm, 0);
-+	tlb_gather_mmu(&tlb, mm, old_start, old_end);
- 	if (new_end > old_start) {
- 		/*
- 		 * when the old and new regions overlap clear from new_end.
-@@ -624,7 +670,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
- 		free_pgd_range(&tlb, old_start, old_end, new_end,
- 			vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
- 	}
--	tlb_finish_mmu(&tlb, new_end, old_end);
-+	tlb_finish_mmu(&tlb, old_start, old_end);
- 
- 	/*
- 	 * Shrink the vma to just the new range.  Always succeeds.
 @@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
  	stack_top = arch_align_stack(stack_top);
  	stack_top = PAGE_ALIGN(stack_top);
@@ -58496,7 +58345,7 @@ index 6b6a993..807cccc 100644
  		kfree(s);
  }
 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
-index 3e636d8..350cc48 100644
+index 65fc60a..350cc48 100644
 --- a/fs/proc/task_mmu.c
 +++ b/fs/proc/task_mmu.c
 @@ -11,12 +11,19 @@
@@ -58663,34 +58512,6 @@ index 3e636d8..350cc48 100644
  		   mss.resident >> 10,
  		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
  		   mss.shared_clean  >> 10,
-@@ -792,14 +843,14 @@ typedef struct {
- } pagemap_entry_t;
- 
- struct pagemapread {
--	int pos, len;
-+	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
- 	pagemap_entry_t *buffer;
- };
- 
- #define PAGEMAP_WALK_SIZE	(PMD_SIZE)
- #define PAGEMAP_WALK_MASK	(PMD_MASK)
- 
--#define PM_ENTRY_BYTES      sizeof(u64)
-+#define PM_ENTRY_BYTES      sizeof(pagemap_entry_t)
- #define PM_STATUS_BITS      3
- #define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
- #define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
-@@ -1038,8 +1089,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
- 	if (!count)
- 		goto out_task;
- 
--	pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
--	pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
-+	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
-+	pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
- 	ret = -ENOMEM;
- 	if (!pm.buffer)
- 		goto out_task;
 @@ -1264,6 +1315,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
  	int n;
  	char buffer[50];
@@ -70603,19 +70424,6 @@ index a59ff51..2594a70 100644
  #endif /* CONFIG_MMU */
  
  #endif /* !__ASSEMBLY__ */
-diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
-index 13821c3..5672d7e 100644
---- a/include/asm-generic/tlb.h
-+++ b/include/asm-generic/tlb.h
-@@ -112,7 +112,7 @@ struct mmu_gather {
- 
- #define HAVE_GENERIC_MMU_GATHER
- 
--void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
-+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
- void tlb_flush_mmu(struct mmu_gather *tlb);
- void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
- 							unsigned long end);
 diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
 index c184aa8..d049942 100644
 --- a/include/asm-generic/uaccess.h
@@ -74681,7 +74489,7 @@ index 6dacb93..6174423 100644
  static inline void anon_vma_merge(struct vm_area_struct *vma,
  				  struct vm_area_struct *next)
 diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 178a8d9..450bf11 100644
+index 3aeb14b..73816a6 100644
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
 @@ -62,6 +62,7 @@ struct bio_list;
@@ -74701,10 +74509,11 @@ index 178a8d9..450bf11 100644
  extern signed long schedule_timeout_interruptible(signed long timeout);
  extern signed long schedule_timeout_killable(signed long timeout);
  extern signed long schedule_timeout_uninterruptible(signed long timeout);
-@@ -314,6 +315,19 @@ struct nsproxy;
+@@ -314,7 +315,19 @@ struct nsproxy;
  struct user_namespace;
  
  #ifdef CONFIG_MMU
+-extern unsigned long mmap_legacy_base(void);
 +
 +#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
 +extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
@@ -74717,11 +74526,11 @@ index 178a8d9..450bf11 100644
 +
 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
-+
++extern unsigned long mmap_legacy_base(struct mm_struct *mm);
  extern void arch_pick_mmap_layout(struct mm_struct *mm);
  extern unsigned long
  arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
-@@ -591,6 +605,17 @@ struct signal_struct {
+@@ -592,6 +605,17 @@ struct signal_struct {
  #ifdef CONFIG_TASKSTATS
  	struct taskstats *stats;
  #endif
@@ -74739,7 +74548,7 @@ index 178a8d9..450bf11 100644
  #ifdef CONFIG_AUDIT
  	unsigned audit_tty;
  	unsigned audit_tty_log_passwd;
-@@ -671,6 +696,14 @@ struct user_struct {
+@@ -672,6 +696,14 @@ struct user_struct {
  	struct key *session_keyring;	/* UID's default session keyring */
  #endif
  
@@ -74754,7 +74563,7 @@ index 178a8d9..450bf11 100644
  	/* Hash table maintenance information */
  	struct hlist_node uidhash_node;
  	kuid_t uid;
-@@ -1158,8 +1191,8 @@ struct task_struct {
+@@ -1159,8 +1191,8 @@ struct task_struct {
  	struct list_head thread_group;
  
  	struct completion *vfork_done;		/* for vfork() */
@@ -74765,7 +74574,7 @@ index 178a8d9..450bf11 100644
  
  	cputime_t utime, stime, utimescaled, stimescaled;
  	cputime_t gtime;
-@@ -1184,11 +1217,6 @@ struct task_struct {
+@@ -1185,11 +1217,6 @@ struct task_struct {
  	struct task_cputime cputime_expires;
  	struct list_head cpu_timers[3];
  
@@ -74777,7 +74586,7 @@ index 178a8d9..450bf11 100644
  	char comm[TASK_COMM_LEN]; /* executable name excluding path
  				     - access with [gs]et_task_comm (which lock
  				       it with task_lock())
-@@ -1205,6 +1233,10 @@ struct task_struct {
+@@ -1206,6 +1233,10 @@ struct task_struct {
  #endif
  /* CPU-specific state of this task */
  	struct thread_struct thread;
@@ -74788,7 +74597,7 @@ index 178a8d9..450bf11 100644
  /* filesystem information */
  	struct fs_struct *fs;
  /* open file information */
-@@ -1278,6 +1310,10 @@ struct task_struct {
+@@ -1279,6 +1310,10 @@ struct task_struct {
  	gfp_t lockdep_reclaim_gfp;
  #endif
  
@@ -74799,7 +74608,7 @@ index 178a8d9..450bf11 100644
  /* journalling filesystem info */
  	void *journal_info;
  
-@@ -1316,6 +1352,10 @@ struct task_struct {
+@@ -1317,6 +1352,10 @@ struct task_struct {
  	/* cg_list protected by css_set_lock and tsk->alloc_lock */
  	struct list_head cg_list;
  #endif
@@ -74810,7 +74619,7 @@ index 178a8d9..450bf11 100644
  #ifdef CONFIG_FUTEX
  	struct robust_list_head __user *robust_list;
  #ifdef CONFIG_COMPAT
-@@ -1416,8 +1456,76 @@ struct task_struct {
+@@ -1417,8 +1456,76 @@ struct task_struct {
  	unsigned int	sequential_io;
  	unsigned int	sequential_io_avg;
  #endif
@@ -74887,7 +74696,7 @@ index 178a8d9..450bf11 100644
  /* Future-safe accessor for struct task_struct's cpus_allowed. */
  #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
  
-@@ -1476,7 +1584,7 @@ struct pid_namespace;
+@@ -1477,7 +1584,7 @@ struct pid_namespace;
  pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
  			struct pid_namespace *ns);
  
@@ -74896,7 +74705,7 @@ index 178a8d9..450bf11 100644
  {
  	return tsk->pid;
  }
-@@ -1919,7 +2027,9 @@ void yield(void);
+@@ -1920,7 +2027,9 @@ void yield(void);
  extern struct exec_domain	default_exec_domain;
  
  union thread_union {
@@ -74906,7 +74715,7 @@ index 178a8d9..450bf11 100644
  	unsigned long stack[THREAD_SIZE/sizeof(long)];
  };
  
-@@ -1952,6 +2062,7 @@ extern struct pid_namespace init_pid_ns;
+@@ -1953,6 +2062,7 @@ extern struct pid_namespace init_pid_ns;
   */
  
  extern struct task_struct *find_task_by_vpid(pid_t nr);
@@ -74914,7 +74723,7 @@ index 178a8d9..450bf11 100644
  extern struct task_struct *find_task_by_pid_ns(pid_t nr,
  		struct pid_namespace *ns);
  
-@@ -2118,7 +2229,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
+@@ -2119,7 +2229,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
  extern void exit_itimers(struct signal_struct *);
  extern void flush_itimer_signals(void);
  
@@ -74923,7 +74732,7 @@ index 178a8d9..450bf11 100644
  
  extern int allow_signal(int);
  extern int disallow_signal(int);
-@@ -2309,9 +2420,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
+@@ -2310,9 +2420,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
  
  #endif
  
@@ -75489,7 +75298,7 @@ index a5ffd32..0935dea 100644
  extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
  				   unsigned long offset, size_t size,
 diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
-index 4147d70..d356a10 100644
+index 84662ec..d8f8adb 100644
 --- a/include/linux/syscalls.h
 +++ b/include/linux/syscalls.h
 @@ -97,8 +97,12 @@ struct sigaltstack;
@@ -78773,7 +78582,7 @@ index 7bb73f9..d7978ed 100644
  {
  	struct signal_struct *sig = current->signal;
 diff --git a/kernel/fork.c b/kernel/fork.c
-index 987b28a..11ee8a5 100644
+index ffbc090..08ceeee 100644
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
 @@ -319,7 +319,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
@@ -79068,7 +78877,7 @@ index 987b28a..11ee8a5 100644
  		if (clone_flags & CLONE_VFORK) {
  			p->vfork_done = &vfork;
  			init_completion(&vfork);
-@@ -1723,7 +1785,7 @@ void __init proc_caches_init(void)
+@@ -1729,7 +1791,7 @@ void __init proc_caches_init(void)
  	mm_cachep = kmem_cache_create("mm_struct",
  			sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
  			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
@@ -79077,7 +78886,7 @@ index 987b28a..11ee8a5 100644
  	mmap_init();
  	nsproxy_cache_init();
  }
-@@ -1763,7 +1825,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
+@@ -1769,7 +1831,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
  		return 0;
  
  	/* don't need lock here; in the worst case we'll do useless copy */
@@ -79086,7 +78895,7 @@ index 987b28a..11ee8a5 100644
  		return 0;
  
  	*new_fsp = copy_fs_struct(fs);
-@@ -1875,7 +1937,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+@@ -1881,7 +1943,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
  			fs = current->fs;
  			spin_lock(&fs->lock);
  			current->fs = new_fs;
@@ -82096,7 +81905,7 @@ index e8b3350..d83d44e 100644
  	.priority = CPU_PRI_MIGRATION,
  };
 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index c61a614..d7f3d7e 100644
+index 03b73be..9422b9f 100644
 --- a/kernel/sched/fair.c
 +++ b/kernel/sched/fair.c
 @@ -831,7 +831,7 @@ void task_numa_fault(int node, int pages, bool migrated)
@@ -82108,7 +81917,7 @@ index c61a614..d7f3d7e 100644
  	p->mm->numa_scan_offset = 0;
  }
  
-@@ -5686,7 +5686,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
+@@ -5687,7 +5687,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
   * run_rebalance_domains is triggered when needed from the scheduler tick.
   * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
   */
@@ -84466,7 +84275,7 @@ index b32b70c..e512eb0 100644
  	set_page_address(page, (void *)vaddr);
  
 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
-index 5cf99bf..5c01c2f 100644
+index 7c5eb85..5c01c2f 100644
 --- a/mm/hugetlb.c
 +++ b/mm/hugetlb.c
 @@ -2022,15 +2022,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
@@ -84511,15 +84320,6 @@ index 5cf99bf..5c01c2f 100644
  	if (ret)
  		goto out;
  
-@@ -2490,7 +2494,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
- 
- 	mm = vma->vm_mm;
- 
--	tlb_gather_mmu(&tlb, mm, 0);
-+	tlb_gather_mmu(&tlb, mm, start, end);
- 	__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
- 	tlb_finish_mmu(&tlb, start, end);
- }
 @@ -2545,6 +2549,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
  	return 1;
  }
@@ -84872,39 +84672,10 @@ index ceb0c7f..b2b8e94 100644
  	} else {
  		pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
 diff --git a/mm/memory.c b/mm/memory.c
-index 5e50800..7c0340f 100644
+index 5a35443..7c0340f 100644
 --- a/mm/memory.c
 +++ b/mm/memory.c
-@@ -211,14 +211,15 @@ static int tlb_next_batch(struct mmu_gather *tlb)
-  *	tear-down from @mm. The @fullmm argument is used when @mm is without
-  *	users and we're going to destroy the full address space (exit/execve).
-  */
--void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
-+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
- {
- 	tlb->mm = mm;
- 
--	tlb->fullmm     = fullmm;
-+	/* Is it from 0 to ~0? */
-+	tlb->fullmm     = !(start | (end+1));
- 	tlb->need_flush_all = 0;
--	tlb->start	= -1UL;
--	tlb->end	= 0;
-+	tlb->start	= start;
-+	tlb->end	= end;
- 	tlb->need_flush = 0;
- 	tlb->local.next = NULL;
- 	tlb->local.nr   = 0;
-@@ -258,8 +259,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
- {
- 	struct mmu_gather_batch *batch, *next;
- 
--	tlb->start = start;
--	tlb->end   = end;
- 	tlb_flush_mmu(tlb);
- 
- 	/* keep the page table cache within bounds */
-@@ -429,6 +428,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+@@ -428,6 +428,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
  		free_pte_range(tlb, pmd, addr);
  	} while (pmd++, addr = next, addr != end);
  
@@ -84912,7 +84683,7 @@ index 5e50800..7c0340f 100644
  	start &= PUD_MASK;
  	if (start < floor)
  		return;
-@@ -443,6 +443,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+@@ -442,6 +443,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
  	pmd = pmd_offset(pud, start);
  	pud_clear(pud);
  	pmd_free_tlb(tlb, pmd, start);
@@ -84921,7 +84692,7 @@ index 5e50800..7c0340f 100644
  }
  
  static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
-@@ -462,6 +464,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+@@ -461,6 +464,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
  		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
  	} while (pud++, addr = next, addr != end);
  
@@ -84929,7 +84700,7 @@ index 5e50800..7c0340f 100644
  	start &= PGDIR_MASK;
  	if (start < floor)
  		return;
-@@ -476,6 +479,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+@@ -475,6 +479,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
  	pud = pud_offset(pgd, start);
  	pgd_clear(pgd);
  	pud_free_tlb(tlb, pud, start);
@@ -84938,65 +84709,7 @@ index 5e50800..7c0340f 100644
  }
  
  /*
-@@ -1101,7 +1106,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
- 	spinlock_t *ptl;
- 	pte_t *start_pte;
- 	pte_t *pte;
--	unsigned long range_start = addr;
- 
- again:
- 	init_rss_vec(rss);
-@@ -1204,17 +1208,25 @@ again:
- 	 * and page-free while holding it.
- 	 */
- 	if (force_flush) {
-+		unsigned long old_end;
-+
- 		force_flush = 0;
- 
--#ifdef HAVE_GENERIC_MMU_GATHER
--		tlb->start = range_start;
-+		/*
-+		 * Flush the TLB just for the previous segment,
-+		 * then update the range to be the remaining
-+		 * TLB range.
-+		 */
-+		old_end = tlb->end;
- 		tlb->end = addr;
--#endif
-+
- 		tlb_flush_mmu(tlb);
--		if (addr != end) {
--			range_start = addr;
-+
-+		tlb->start = addr;
-+		tlb->end = old_end;
-+
-+		if (addr != end)
- 			goto again;
--		}
- 	}
- 
- 	return addr;
-@@ -1399,7 +1411,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
- 	unsigned long end = start + size;
- 
- 	lru_add_drain();
--	tlb_gather_mmu(&tlb, mm, 0);
-+	tlb_gather_mmu(&tlb, mm, start, end);
- 	update_hiwater_rss(mm);
- 	mmu_notifier_invalidate_range_start(mm, start, end);
- 	for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
-@@ -1425,7 +1437,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
- 	unsigned long end = address + size;
- 
- 	lru_add_drain();
--	tlb_gather_mmu(&tlb, mm, 0);
-+	tlb_gather_mmu(&tlb, mm, address, end);
- 	update_hiwater_rss(mm);
- 	mmu_notifier_invalidate_range_start(mm, address, end);
- 	unmap_single_vma(&tlb, vma, address, end, details);
-@@ -1638,12 +1650,6 @@ no_page_table:
+@@ -1644,12 +1650,6 @@ no_page_table:
  	return page;
  }
  
@@ -85009,7 +84722,7 @@ index 5e50800..7c0340f 100644
  /**
   * __get_user_pages() - pin user pages in memory
   * @tsk:	task_struct of target task
-@@ -1730,10 +1736,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1736,10 +1736,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  
  	i = 0;
  
@@ -85022,7 +84735,7 @@ index 5e50800..7c0340f 100644
  		if (!vma && in_gate_area(mm, start)) {
  			unsigned long pg = start & PAGE_MASK;
  			pgd_t *pgd;
-@@ -1782,7 +1788,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1788,7 +1788,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  			goto next_page;
  		}
  
@@ -85031,7 +84744,7 @@ index 5e50800..7c0340f 100644
  		    (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
  		    !(vm_flags & vma->vm_flags))
  			return i ? : -EFAULT;
-@@ -1811,11 +1817,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1817,11 +1817,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  				int ret;
  				unsigned int fault_flags = 0;
  
@@ -85043,7 +84756,7 @@ index 5e50800..7c0340f 100644
  				if (foll_flags & FOLL_WRITE)
  					fault_flags |= FAULT_FLAG_WRITE;
  				if (nonblocking)
-@@ -1895,7 +1896,7 @@ next_page:
+@@ -1901,7 +1896,7 @@ next_page:
  			start += page_increm * PAGE_SIZE;
  			nr_pages -= page_increm;
  		} while (nr_pages && start < vma->vm_end);
@@ -85052,7 +84765,7 @@ index 5e50800..7c0340f 100644
  	return i;
  }
  EXPORT_SYMBOL(__get_user_pages);
-@@ -2102,6 +2103,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -2108,6 +2103,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
  	page_add_file_rmap(page);
  	set_pte_at(mm, addr, pte, mk_pte(page, prot));
  
@@ -85063,7 +84776,7 @@ index 5e50800..7c0340f 100644
  	retval = 0;
  	pte_unmap_unlock(pte, ptl);
  	return retval;
-@@ -2146,9 +2151,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -2152,9 +2151,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
  	if (!page_count(page))
  		return -EINVAL;
  	if (!(vma->vm_flags & VM_MIXEDMAP)) {
@@ -85085,7 +84798,7 @@ index 5e50800..7c0340f 100644
  	}
  	return insert_page(vma, addr, page, vma->vm_page_prot);
  }
-@@ -2231,6 +2248,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+@@ -2237,6 +2248,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
  			unsigned long pfn)
  {
  	BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
@@ -85093,7 +84806,7 @@ index 5e50800..7c0340f 100644
  
  	if (addr < vma->vm_start || addr >= vma->vm_end)
  		return -EFAULT;
-@@ -2478,7 +2496,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
+@@ -2484,7 +2496,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
  
  	BUG_ON(pud_huge(*pud));
  
@@ -85104,7 +84817,7 @@ index 5e50800..7c0340f 100644
  	if (!pmd)
  		return -ENOMEM;
  	do {
-@@ -2498,7 +2518,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
+@@ -2504,7 +2518,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
  	unsigned long next;
  	int err;
  
@@ -85115,7 +84828,7 @@ index 5e50800..7c0340f 100644
  	if (!pud)
  		return -ENOMEM;
  	do {
-@@ -2586,6 +2608,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
+@@ -2592,6 +2608,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
  		copy_user_highpage(dst, src, va, vma);
  }
  
@@ -85302,7 +85015,7 @@ index 5e50800..7c0340f 100644
  /*
   * This routine handles present pages, when users try to write
   * to a shared page. It is done by copying the page to a new address
-@@ -2802,6 +3004,12 @@ gotten:
+@@ -2808,6 +3004,12 @@ gotten:
  	 */
  	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
  	if (likely(pte_same(*page_table, orig_pte))) {
@@ -85315,7 +85028,7 @@ index 5e50800..7c0340f 100644
  		if (old_page) {
  			if (!PageAnon(old_page)) {
  				dec_mm_counter_fast(mm, MM_FILEPAGES);
-@@ -2853,6 +3061,10 @@ gotten:
+@@ -2859,6 +3061,10 @@ gotten:
  			page_remove_rmap(old_page);
  		}
  
@@ -85326,7 +85039,7 @@ index 5e50800..7c0340f 100644
  		/* Free the old page.. */
  		new_page = old_page;
  		ret |= VM_FAULT_WRITE;
-@@ -3128,6 +3340,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3134,6 +3340,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
  	swap_free(entry);
  	if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
  		try_to_free_swap(page);
@@ -85338,7 +85051,7 @@ index 5e50800..7c0340f 100644
  	unlock_page(page);
  	if (page != swapcache) {
  		/*
-@@ -3151,6 +3368,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3157,6 +3368,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
  
  	/* No need to invalidate - it was non-present before */
  	update_mmu_cache(vma, address, page_table);
@@ -85350,7 +85063,7 @@ index 5e50800..7c0340f 100644
  unlock:
  	pte_unmap_unlock(page_table, ptl);
  out:
-@@ -3170,40 +3392,6 @@ out_release:
+@@ -3176,40 +3392,6 @@ out_release:
  }
  
  /*
@@ -85391,7 +85104,7 @@ index 5e50800..7c0340f 100644
   * We enter with non-exclusive mmap_sem (to exclude vma changes,
   * but allow concurrent faults), and pte mapped but not yet locked.
   * We return with mmap_sem still held, but pte unmapped and unlocked.
-@@ -3212,27 +3400,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3218,27 +3400,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
  		unsigned long address, pte_t *page_table, pmd_t *pmd,
  		unsigned int flags)
  {
@@ -85424,7 +85137,7 @@ index 5e50800..7c0340f 100644
  	if (unlikely(anon_vma_prepare(vma)))
  		goto oom;
  	page = alloc_zeroed_user_highpage_movable(vma, address);
-@@ -3256,6 +3440,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3262,6 +3440,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
  	if (!pte_none(*page_table))
  		goto release;
  
@@ -85436,7 +85149,7 @@ index 5e50800..7c0340f 100644
  	inc_mm_counter_fast(mm, MM_ANONPAGES);
  	page_add_new_anon_rmap(page, vma, address);
  setpte:
-@@ -3263,6 +3452,12 @@ setpte:
+@@ -3269,6 +3452,12 @@ setpte:
  
  	/* No need to invalidate - it was non-present before */
  	update_mmu_cache(vma, address, page_table);
@@ -85449,7 +85162,7 @@ index 5e50800..7c0340f 100644
  unlock:
  	pte_unmap_unlock(page_table, ptl);
  	return 0;
-@@ -3406,6 +3601,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3412,6 +3601,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  	 */
  	/* Only go through if we didn't race with anybody else... */
  	if (likely(pte_same(*page_table, orig_pte))) {
@@ -85462,7 +85175,7 @@ index 5e50800..7c0340f 100644
  		flush_icache_page(vma, page);
  		entry = mk_pte(page, vma->vm_page_prot);
  		if (flags & FAULT_FLAG_WRITE)
-@@ -3425,6 +3626,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3431,6 +3626,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  
  		/* no need to invalidate: a not-present page won't be cached */
  		update_mmu_cache(vma, address, page_table);
@@ -85477,7 +85190,7 @@ index 5e50800..7c0340f 100644
  	} else {
  		if (cow_page)
  			mem_cgroup_uncharge_page(cow_page);
-@@ -3746,6 +3955,12 @@ int handle_pte_fault(struct mm_struct *mm,
+@@ -3752,6 +3955,12 @@ int handle_pte_fault(struct mm_struct *mm,
  		if (flags & FAULT_FLAG_WRITE)
  			flush_tlb_fix_spurious_fault(vma, address);
  	}
@@ -85490,7 +85203,7 @@ index 5e50800..7c0340f 100644
  unlock:
  	pte_unmap_unlock(pte, ptl);
  	return 0;
-@@ -3762,6 +3977,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3768,6 +3977,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  	pmd_t *pmd;
  	pte_t *pte;
  
@@ -85501,7 +85214,7 @@ index 5e50800..7c0340f 100644
  	__set_current_state(TASK_RUNNING);
  
  	count_vm_event(PGFAULT);
-@@ -3773,6 +3992,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3779,6 +3992,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  	if (unlikely(is_vm_hugetlb_page(vma)))
  		return hugetlb_fault(mm, vma, address, flags);
  
@@ -85536,7 +85249,7 @@ index 5e50800..7c0340f 100644
  retry:
  	pgd = pgd_offset(mm, address);
  	pud = pud_alloc(mm, pgd, address);
-@@ -3871,6 +4118,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+@@ -3877,6 +4118,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
  	spin_unlock(&mm->page_table_lock);
  	return 0;
  }
@@ -85560,7 +85273,7 @@ index 5e50800..7c0340f 100644
  #endif /* __PAGETABLE_PUD_FOLDED */
  
  #ifndef __PAGETABLE_PMD_FOLDED
-@@ -3901,6 +4165,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+@@ -3907,6 +4165,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
  	spin_unlock(&mm->page_table_lock);
  	return 0;
  }
@@ -85591,7 +85304,7 @@ index 5e50800..7c0340f 100644
  #endif /* __PAGETABLE_PMD_FOLDED */
  
  #if !defined(__HAVE_ARCH_GATE_AREA)
-@@ -3914,7 +4202,7 @@ static int __init gate_vma_init(void)
+@@ -3920,7 +4202,7 @@ static int __init gate_vma_init(void)
  	gate_vma.vm_start = FIXADDR_USER_START;
  	gate_vma.vm_end = FIXADDR_USER_END;
  	gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
@@ -85600,7 +85313,7 @@ index 5e50800..7c0340f 100644
  
  	return 0;
  }
-@@ -4048,8 +4336,8 @@ out:
+@@ -4054,8 +4336,8 @@ out:
  	return ret;
  }
  
@@ -85611,7 +85324,7 @@ index 5e50800..7c0340f 100644
  {
  	resource_size_t phys_addr;
  	unsigned long prot = 0;
-@@ -4074,8 +4362,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+@@ -4080,8 +4362,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
   * Access another process' address space as given in mm.  If non-NULL, use the
   * given task for page fault accounting.
   */
@@ -85622,7 +85335,7 @@ index 5e50800..7c0340f 100644
  {
  	struct vm_area_struct *vma;
  	void *old_buf = buf;
-@@ -4083,7 +4371,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -4089,7 +4371,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
  	down_read(&mm->mmap_sem);
  	/* ignore errors, just check how much was successfully transferred */
  	while (len) {
@@ -85631,7 +85344,7 @@ index 5e50800..7c0340f 100644
  		void *maddr;
  		struct page *page = NULL;
  
-@@ -4142,8 +4430,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -4148,8 +4430,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
   *
   * The caller must hold a reference on @mm.
   */
@@ -85642,7 +85355,7 @@ index 5e50800..7c0340f 100644
  {
  	return __access_remote_vm(NULL, mm, addr, buf, len, write);
  }
-@@ -4153,11 +4441,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+@@ -4159,11 +4441,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
   * Source/target buffer must be kernel space,
   * Do not walk the page table directly, use get_user_pages
   */
@@ -85819,7 +85532,7 @@ index 79b7cf7..9944291 100644
  	    capable(CAP_IPC_LOCK))
  		ret = do_mlockall(flags);
 diff --git a/mm/mmap.c b/mm/mmap.c
-index 7dbe397..bfb7626 100644
+index 8d25fdc..bfb7626 100644
 --- a/mm/mmap.c
 +++ b/mm/mmap.c
 @@ -36,6 +36,7 @@
@@ -86688,15 +86401,6 @@ index 7dbe397..bfb7626 100644
  		if (vma->vm_flags & VM_ACCOUNT)
  			nr_accounted += nrpages;
  		vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
-@@ -2356,7 +2728,7 @@ static void unmap_region(struct mm_struct *mm,
- 	struct mmu_gather tlb;
- 
- 	lru_add_drain();
--	tlb_gather_mmu(&tlb, mm, 0);
-+	tlb_gather_mmu(&tlb, mm, start, end);
- 	update_hiwater_rss(mm);
- 	unmap_vmas(&tlb, vma, start, end);
- 	free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
 @@ -2379,6 +2751,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
  	insertion_point = (prev ? &prev->vm_next : &mm->mmap);
  	vma->vm_prev = NULL;
@@ -87002,15 +86706,6 @@ index 7dbe397..bfb7626 100644
  	return addr;
  }
  
-@@ -2735,7 +3232,7 @@ void exit_mmap(struct mm_struct *mm)
- 
- 	lru_add_drain();
- 	flush_cache_mm(mm);
--	tlb_gather_mmu(&tlb, mm, 1);
-+	tlb_gather_mmu(&tlb, mm, 0, -1);
- 	/* update_hiwater_rss(mm) here? but nobody should be looking */
- 	/* Use -1 here to ensure all VMAs in the mm are unmapped */
- 	unmap_vmas(&tlb, vma, 0, -1);
 @@ -2750,6 +3247,7 @@ void exit_mmap(struct mm_struct *mm)
  	while (vma) {
  		if (vma->vm_flags & VM_ACCOUNT)
@@ -93669,7 +93364,7 @@ index 57ee84d..8b99cf5 100644
  			);
  
 diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
-index 1076fe1..8285fd7 100644
+index 1076fe1..f190285 100644
 --- a/net/netlink/genetlink.c
 +++ b/net/netlink/genetlink.c
 @@ -310,18 +310,20 @@ int genl_register_ops(struct genl_family *family, struct genl_ops *ops)
@@ -93710,27 +93405,6 @@ index 1076fe1..8285fd7 100644
  			return 0;
  		}
  	}
-@@ -789,6 +791,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
- 	struct net *net = sock_net(skb->sk);
- 	int chains_to_skip = cb->args[0];
- 	int fams_to_skip = cb->args[1];
-+	bool need_locking = chains_to_skip || fams_to_skip;
-+
-+	if (need_locking)
-+		genl_lock();
- 
- 	for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
- 		n = 0;
-@@ -810,6 +816,9 @@ errout:
- 	cb->args[0] = i;
- 	cb->args[1] = n;
- 
-+	if (need_locking)
-+		genl_unlock();
-+
- 	return skb->len;
- }
- 
 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
 index ec0c80f..41e1830 100644
 --- a/net/netrom/af_netrom.c

diff --git a/3.10.7/4425_grsec_remove_EI_PAX.patch b/3.10.9/4425_grsec_remove_EI_PAX.patch
similarity index 100%
rename from 3.10.7/4425_grsec_remove_EI_PAX.patch
rename to 3.10.9/4425_grsec_remove_EI_PAX.patch

diff --git a/3.10.7/4427_force_XATTR_PAX_tmpfs.patch b/3.10.9/4427_force_XATTR_PAX_tmpfs.patch
similarity index 100%
rename from 3.10.7/4427_force_XATTR_PAX_tmpfs.patch
rename to 3.10.9/4427_force_XATTR_PAX_tmpfs.patch

diff --git a/3.10.7/4430_grsec-remove-localversion-grsec.patch b/3.10.9/4430_grsec-remove-localversion-grsec.patch
similarity index 100%
rename from 3.10.7/4430_grsec-remove-localversion-grsec.patch
rename to 3.10.9/4430_grsec-remove-localversion-grsec.patch

diff --git a/3.10.7/4435_grsec-mute-warnings.patch b/3.10.9/4435_grsec-mute-warnings.patch
similarity index 100%
rename from 3.10.7/4435_grsec-mute-warnings.patch
rename to 3.10.9/4435_grsec-mute-warnings.patch

diff --git a/3.10.7/4440_grsec-remove-protected-paths.patch b/3.10.9/4440_grsec-remove-protected-paths.patch
similarity index 100%
rename from 3.10.7/4440_grsec-remove-protected-paths.patch
rename to 3.10.9/4440_grsec-remove-protected-paths.patch

diff --git a/3.10.7/4450_grsec-kconfig-default-gids.patch b/3.10.9/4450_grsec-kconfig-default-gids.patch
similarity index 100%
rename from 3.10.7/4450_grsec-kconfig-default-gids.patch
rename to 3.10.9/4450_grsec-kconfig-default-gids.patch

diff --git a/3.10.7/4465_selinux-avc_audit-log-curr_ip.patch b/3.10.9/4465_selinux-avc_audit-log-curr_ip.patch
similarity index 100%
rename from 3.10.7/4465_selinux-avc_audit-log-curr_ip.patch
rename to 3.10.9/4465_selinux-avc_audit-log-curr_ip.patch

diff --git a/3.10.7/4470_disable-compat_vdso.patch b/3.10.9/4470_disable-compat_vdso.patch
similarity index 100%
rename from 3.10.7/4470_disable-compat_vdso.patch
rename to 3.10.9/4470_disable-compat_vdso.patch

diff --git a/3.10.7/4475_emutramp_default_on.patch b/3.10.9/4475_emutramp_default_on.patch
similarity index 100%
rename from 3.10.7/4475_emutramp_default_on.patch
rename to 3.10.9/4475_emutramp_default_on.patch

diff --git a/3.2.50/0000_README b/3.2.50/0000_README
index df20efb..a654e82 100644
--- a/3.2.50/0000_README
+++ b/3.2.50/0000_README
@@ -118,7 +118,7 @@ Patch:	1049_linux-3.2.50.patch
 From:	http://www.kernel.org
 Desc:	Linux 3.2.50
 
-Patch:	4420_grsecurity-2.9.1-3.2.50-201308181813.patch
+Patch:	4420_grsecurity-2.9.1-3.2.50-201308202017.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/3.2.50/4420_grsecurity-2.9.1-3.2.50-201308181813.patch b/3.2.50/4420_grsecurity-2.9.1-3.2.50-201308202017.patch
similarity index 99%
rename from 3.2.50/4420_grsecurity-2.9.1-3.2.50-201308181813.patch
rename to 3.2.50/4420_grsecurity-2.9.1-3.2.50-201308202017.patch
index d8e4449..01378eb 100644
--- a/3.2.50/4420_grsecurity-2.9.1-3.2.50-201308181813.patch
+++ b/3.2.50/4420_grsecurity-2.9.1-3.2.50-201308202017.patch
@@ -5050,7 +5050,7 @@ index 9844662..04a2a1e 100644
  	do_exit(err);
  
 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
-index 7d14bb6..1305601 100644
+index 7d14bb69..1305601 100644
 --- a/arch/powerpc/kernel/vdso.c
 +++ b/arch/powerpc/kernel/vdso.c
 @@ -35,6 +35,7 @@
@@ -92175,7 +92175,7 @@ index 3d1d55d..1ee2a18 100644
  	.exit = netlink_net_exit,
  };
 diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
-index 874f8ff..d8b8f87 100644
+index 874f8ff..339bb58 100644
 --- a/net/netlink/genetlink.c
 +++ b/net/netlink/genetlink.c
 @@ -288,18 +288,20 @@ int genl_register_ops(struct genl_family *family, struct genl_ops *ops)
@@ -92216,27 +92216,6 @@ index 874f8ff..d8b8f87 100644
  			return 0;
  		}
  	}
-@@ -700,6 +702,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
- 	struct net *net = sock_net(skb->sk);
- 	int chains_to_skip = cb->args[0];
- 	int fams_to_skip = cb->args[1];
-+	bool need_locking = chains_to_skip || fams_to_skip;
-+
-+	if (need_locking)
-+		genl_lock();
- 
- 	for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
- 		n = 0;
-@@ -721,6 +727,9 @@ errout:
- 	cb->args[0] = i;
- 	cb->args[1] = n;
- 
-+	if (need_locking)
-+		genl_unlock();
-+
- 	return skb->len;
- }
- 
 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
 index 3df7c5a..8f324b0 100644
 --- a/net/netrom/af_netrom.c


^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2013-08-22 12:09 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-08-22 11:18 [gentoo-commits] proj/hardened-patchset:master commit in: 3.10.9/, 3.10.7/, 3.2.50/ Anthony G. Basile
  -- strict thread matches above, loose matches on Subject: below --
2013-08-22 12:09 Anthony G. Basile

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox