public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/hardened-patchset:master commit in: 3.14.50/, 3.2.70/, 4.1.5/, 3.2.71/
@ 2015-08-15  7:29 Anthony G. Basile
  0 siblings, 0 replies; only message in thread
From: Anthony G. Basile @ 2015-08-15  7:29 UTC (permalink / raw
  To: gentoo-commits

commit:     626e32c440c3ba46da9fd329862733b069cbc553
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Sat Aug 15 07:32:19 2015 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Sat Aug 15 07:32:19 2015 +0000
URL:        https://gitweb.gentoo.org/proj/hardened-patchset.git/commit/?id=626e32c4

grsecurity-{3.2.71,3.14.50,4.1.5}-201508142233

 3.14.50/0000_README                                |    2 +-
 ...4420_grsecurity-3.1-3.14.50-201508142232.patch} |   37 +-
 {3.2.70 => 3.2.71}/0000_README                     |    6 +-
 {3.2.70 => 3.2.71}/1021_linux-3.2.22.patch         |    0
 {3.2.70 => 3.2.71}/1022_linux-3.2.23.patch         |    0
 {3.2.70 => 3.2.71}/1023_linux-3.2.24.patch         |    0
 {3.2.70 => 3.2.71}/1024_linux-3.2.25.patch         |    0
 {3.2.70 => 3.2.71}/1025_linux-3.2.26.patch         |    0
 {3.2.70 => 3.2.71}/1026_linux-3.2.27.patch         |    0
 {3.2.70 => 3.2.71}/1027_linux-3.2.28.patch         |    0
 {3.2.70 => 3.2.71}/1028_linux-3.2.29.patch         |    0
 {3.2.70 => 3.2.71}/1029_linux-3.2.30.patch         |    0
 {3.2.70 => 3.2.71}/1030_linux-3.2.31.patch         |    0
 {3.2.70 => 3.2.71}/1031_linux-3.2.32.patch         |    0
 {3.2.70 => 3.2.71}/1032_linux-3.2.33.patch         |    0
 {3.2.70 => 3.2.71}/1033_linux-3.2.34.patch         |    0
 {3.2.70 => 3.2.71}/1034_linux-3.2.35.patch         |    0
 {3.2.70 => 3.2.71}/1035_linux-3.2.36.patch         |    0
 {3.2.70 => 3.2.71}/1036_linux-3.2.37.patch         |    0
 {3.2.70 => 3.2.71}/1037_linux-3.2.38.patch         |    0
 {3.2.70 => 3.2.71}/1038_linux-3.2.39.patch         |    0
 {3.2.70 => 3.2.71}/1039_linux-3.2.40.patch         |    0
 {3.2.70 => 3.2.71}/1040_linux-3.2.41.patch         |    0
 {3.2.70 => 3.2.71}/1041_linux-3.2.42.patch         |    0
 {3.2.70 => 3.2.71}/1042_linux-3.2.43.patch         |    0
 {3.2.70 => 3.2.71}/1043_linux-3.2.44.patch         |    0
 {3.2.70 => 3.2.71}/1044_linux-3.2.45.patch         |    0
 {3.2.70 => 3.2.71}/1045_linux-3.2.46.patch         |    0
 {3.2.70 => 3.2.71}/1046_linux-3.2.47.patch         |    0
 {3.2.70 => 3.2.71}/1047_linux-3.2.48.patch         |    0
 {3.2.70 => 3.2.71}/1048_linux-3.2.49.patch         |    0
 {3.2.70 => 3.2.71}/1049_linux-3.2.50.patch         |    0
 {3.2.70 => 3.2.71}/1050_linux-3.2.51.patch         |    0
 {3.2.70 => 3.2.71}/1051_linux-3.2.52.patch         |    0
 {3.2.70 => 3.2.71}/1052_linux-3.2.53.patch         |    0
 {3.2.70 => 3.2.71}/1053_linux-3.2.54.patch         |    0
 {3.2.70 => 3.2.71}/1054_linux-3.2.55.patch         |    0
 {3.2.70 => 3.2.71}/1055_linux-3.2.56.patch         |    0
 {3.2.70 => 3.2.71}/1056_linux-3.2.57.patch         |    0
 {3.2.70 => 3.2.71}/1057_linux-3.2.58.patch         |    0
 {3.2.70 => 3.2.71}/1058_linux-3.2.59.patch         |    0
 {3.2.70 => 3.2.71}/1059_linux-3.2.60.patch         |    0
 {3.2.70 => 3.2.71}/1060_linux-3.2.61.patch         |    0
 {3.2.70 => 3.2.71}/1061_linux-3.2.62.patch         |    0
 {3.2.70 => 3.2.71}/1062_linux-3.2.63.patch         |    0
 {3.2.70 => 3.2.71}/1063_linux-3.2.64.patch         |    0
 {3.2.70 => 3.2.71}/1064_linux-3.2.65.patch         |    0
 {3.2.70 => 3.2.71}/1065_linux-3.2.66.patch         |    0
 {3.2.70 => 3.2.71}/1066_linux-3.2.67.patch         |    0
 {3.2.70 => 3.2.71}/1067_linux-3.2.68.patch         |    0
 {3.2.70 => 3.2.71}/1068_linux-3.2.69.patch         |    0
 {3.2.70 => 3.2.71}/1069_linux-3.2.70.patch         |    0
 3.2.71/1070_linux-3.2.71.patch                     | 3488 ++++++++++++++++++++
 .../4420_grsecurity-3.1-3.2.71-201508142231.patch  |  345 +-
 {3.2.70 => 3.2.71}/4425_grsec_remove_EI_PAX.patch  |    0
 .../4427_force_XATTR_PAX_tmpfs.patch               |    0
 .../4430_grsec-remove-localversion-grsec.patch     |    0
 {3.2.70 => 3.2.71}/4435_grsec-mute-warnings.patch  |    0
 .../4440_grsec-remove-protected-paths.patch        |    0
 .../4450_grsec-kconfig-default-gids.patch          |    0
 .../4465_selinux-avc_audit-log-curr_ip.patch       |    0
 {3.2.70 => 3.2.71}/4470_disable-compat_vdso.patch  |    0
 {3.2.70 => 3.2.71}/4475_emutramp_default_on.patch  |    0
 4.1.5/0000_README                                  |    2 +-
 ...> 4420_grsecurity-3.1-4.1.5-201508142233.patch} |  290 +-
 65 files changed, 3837 insertions(+), 333 deletions(-)

diff --git a/3.14.50/0000_README b/3.14.50/0000_README
index 5416cb6..9ad9afc 100644
--- a/3.14.50/0000_README
+++ b/3.14.50/0000_README
@@ -6,7 +6,7 @@ Patch:	1049_linux-3.14.50.patch
 From:	http://www.kernel.org
 Desc:	Linux 3.14.50
 
-Patch:	4420_grsecurity-3.1-3.14.50-201508102128.patch
+Patch:	4420_grsecurity-3.1-3.14.50-201508142232.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/3.14.50/4420_grsecurity-3.1-3.14.50-201508102128.patch b/3.14.50/4420_grsecurity-3.1-3.14.50-201508142232.patch
similarity index 99%
rename from 3.14.50/4420_grsecurity-3.1-3.14.50-201508102128.patch
rename to 3.14.50/4420_grsecurity-3.1-3.14.50-201508142232.patch
index 1086c4e..f556dbc 100644
--- a/3.14.50/4420_grsecurity-3.1-3.14.50-201508102128.patch
+++ b/3.14.50/4420_grsecurity-3.1-3.14.50-201508142232.patch
@@ -103524,10 +103524,35 @@ index 6498531..b0ff3c8 100644
  	msg_params.flg = msgflg;
  
 diff --git a/ipc/sem.c b/ipc/sem.c
-index bee5554..ec7d947 100644
+index bee5554..6cd5ac2 100644
 --- a/ipc/sem.c
 +++ b/ipc/sem.c
-@@ -561,10 +561,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
+@@ -253,6 +253,16 @@ static void sem_rcu_free(struct rcu_head *head)
+ }
+ 
+ /*
++ * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
++ * are only control barriers.
++ * The code must pair with spin_unlock(&sem->lock) or
++ * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
++ *
++ * smp_rmb() is sufficient, as writes cannot pass the control barrier.
++ */
++#define ipc_smp_acquire__after_spin_is_unlocked()	smp_rmb()
++
++/*
+  * Wait until all currently ongoing simple ops have completed.
+  * Caller must own sem_perm.lock.
+  * New simple ops cannot start, because simple ops first check
+@@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma)
+ 		sem = sma->sem_base + i;
+ 		spin_unlock_wait(&sem->lock);
+ 	}
++	ipc_smp_acquire__after_spin_is_unlocked();
+ }
+ 
+ /*
+@@ -561,10 +572,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
  	return 0;
  }
  
@@ -103544,7 +103569,7 @@ index bee5554..ec7d947 100644
  	struct ipc_params sem_params;
  
  	ns = current->nsproxy->ipc_ns;
-@@ -572,10 +577,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
+@@ -572,10 +588,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
  	if (nsems < 0 || nsems > ns->sc_semmsl)
  		return -EINVAL;
  
@@ -103555,7 +103580,7 @@ index bee5554..ec7d947 100644
  	sem_params.key = key;
  	sem_params.flg = semflg;
  	sem_params.u.nsems = nsems;
-@@ -1760,7 +1761,7 @@ static int get_queue_result(struct sem_queue *q)
+@@ -1760,7 +1772,7 @@ static int get_queue_result(struct sem_queue *q)
  }
  
  SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
@@ -103564,7 +103589,7 @@ index bee5554..ec7d947 100644
  {
  	int error = -EINVAL;
  	struct sem_array *sma;
-@@ -1996,7 +1997,7 @@ out_free:
+@@ -1996,7 +2008,7 @@ out_free:
  }
  
  SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
@@ -116867,7 +116892,7 @@ index f9c0980a..fcbbfeb 100644
  	tty_port_close(&dev->port, tty, filp);
  }
 diff --git a/net/bridge/br.c b/net/bridge/br.c
-index 19311aa..339d794 100644
+index 19311aaf..339d794 100644
 --- a/net/bridge/br.c
 +++ b/net/bridge/br.c
 @@ -49,6 +49,8 @@ static int __init br_init(void)

diff --git a/3.2.70/0000_README b/3.2.71/0000_README
similarity index 97%
rename from 3.2.70/0000_README
rename to 3.2.71/0000_README
index 52d8c39..aaaec69 100644
--- a/3.2.70/0000_README
+++ b/3.2.71/0000_README
@@ -198,7 +198,11 @@ Patch:	1069_linux-3.2.70.patch
 From:	http://www.kernel.org
 Desc:	Linux 3.2.70
 
-Patch:	4420_grsecurity-3.1-3.2.70-201508102127.patch
+Patch:	1070_linux-3.2.71.patch
+From:	http://www.kernel.org
+Desc:	Linux 3.2.71
+
+Patch:	4420_grsecurity-3.1-3.2.71-201508142231.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/3.2.70/1021_linux-3.2.22.patch b/3.2.71/1021_linux-3.2.22.patch
similarity index 100%
rename from 3.2.70/1021_linux-3.2.22.patch
rename to 3.2.71/1021_linux-3.2.22.patch

diff --git a/3.2.70/1022_linux-3.2.23.patch b/3.2.71/1022_linux-3.2.23.patch
similarity index 100%
rename from 3.2.70/1022_linux-3.2.23.patch
rename to 3.2.71/1022_linux-3.2.23.patch

diff --git a/3.2.70/1023_linux-3.2.24.patch b/3.2.71/1023_linux-3.2.24.patch
similarity index 100%
rename from 3.2.70/1023_linux-3.2.24.patch
rename to 3.2.71/1023_linux-3.2.24.patch

diff --git a/3.2.70/1024_linux-3.2.25.patch b/3.2.71/1024_linux-3.2.25.patch
similarity index 100%
rename from 3.2.70/1024_linux-3.2.25.patch
rename to 3.2.71/1024_linux-3.2.25.patch

diff --git a/3.2.70/1025_linux-3.2.26.patch b/3.2.71/1025_linux-3.2.26.patch
similarity index 100%
rename from 3.2.70/1025_linux-3.2.26.patch
rename to 3.2.71/1025_linux-3.2.26.patch

diff --git a/3.2.70/1026_linux-3.2.27.patch b/3.2.71/1026_linux-3.2.27.patch
similarity index 100%
rename from 3.2.70/1026_linux-3.2.27.patch
rename to 3.2.71/1026_linux-3.2.27.patch

diff --git a/3.2.70/1027_linux-3.2.28.patch b/3.2.71/1027_linux-3.2.28.patch
similarity index 100%
rename from 3.2.70/1027_linux-3.2.28.patch
rename to 3.2.71/1027_linux-3.2.28.patch

diff --git a/3.2.70/1028_linux-3.2.29.patch b/3.2.71/1028_linux-3.2.29.patch
similarity index 100%
rename from 3.2.70/1028_linux-3.2.29.patch
rename to 3.2.71/1028_linux-3.2.29.patch

diff --git a/3.2.70/1029_linux-3.2.30.patch b/3.2.71/1029_linux-3.2.30.patch
similarity index 100%
rename from 3.2.70/1029_linux-3.2.30.patch
rename to 3.2.71/1029_linux-3.2.30.patch

diff --git a/3.2.70/1030_linux-3.2.31.patch b/3.2.71/1030_linux-3.2.31.patch
similarity index 100%
rename from 3.2.70/1030_linux-3.2.31.patch
rename to 3.2.71/1030_linux-3.2.31.patch

diff --git a/3.2.70/1031_linux-3.2.32.patch b/3.2.71/1031_linux-3.2.32.patch
similarity index 100%
rename from 3.2.70/1031_linux-3.2.32.patch
rename to 3.2.71/1031_linux-3.2.32.patch

diff --git a/3.2.70/1032_linux-3.2.33.patch b/3.2.71/1032_linux-3.2.33.patch
similarity index 100%
rename from 3.2.70/1032_linux-3.2.33.patch
rename to 3.2.71/1032_linux-3.2.33.patch

diff --git a/3.2.70/1033_linux-3.2.34.patch b/3.2.71/1033_linux-3.2.34.patch
similarity index 100%
rename from 3.2.70/1033_linux-3.2.34.patch
rename to 3.2.71/1033_linux-3.2.34.patch

diff --git a/3.2.70/1034_linux-3.2.35.patch b/3.2.71/1034_linux-3.2.35.patch
similarity index 100%
rename from 3.2.70/1034_linux-3.2.35.patch
rename to 3.2.71/1034_linux-3.2.35.patch

diff --git a/3.2.70/1035_linux-3.2.36.patch b/3.2.71/1035_linux-3.2.36.patch
similarity index 100%
rename from 3.2.70/1035_linux-3.2.36.patch
rename to 3.2.71/1035_linux-3.2.36.patch

diff --git a/3.2.70/1036_linux-3.2.37.patch b/3.2.71/1036_linux-3.2.37.patch
similarity index 100%
rename from 3.2.70/1036_linux-3.2.37.patch
rename to 3.2.71/1036_linux-3.2.37.patch

diff --git a/3.2.70/1037_linux-3.2.38.patch b/3.2.71/1037_linux-3.2.38.patch
similarity index 100%
rename from 3.2.70/1037_linux-3.2.38.patch
rename to 3.2.71/1037_linux-3.2.38.patch

diff --git a/3.2.70/1038_linux-3.2.39.patch b/3.2.71/1038_linux-3.2.39.patch
similarity index 100%
rename from 3.2.70/1038_linux-3.2.39.patch
rename to 3.2.71/1038_linux-3.2.39.patch

diff --git a/3.2.70/1039_linux-3.2.40.patch b/3.2.71/1039_linux-3.2.40.patch
similarity index 100%
rename from 3.2.70/1039_linux-3.2.40.patch
rename to 3.2.71/1039_linux-3.2.40.patch

diff --git a/3.2.70/1040_linux-3.2.41.patch b/3.2.71/1040_linux-3.2.41.patch
similarity index 100%
rename from 3.2.70/1040_linux-3.2.41.patch
rename to 3.2.71/1040_linux-3.2.41.patch

diff --git a/3.2.70/1041_linux-3.2.42.patch b/3.2.71/1041_linux-3.2.42.patch
similarity index 100%
rename from 3.2.70/1041_linux-3.2.42.patch
rename to 3.2.71/1041_linux-3.2.42.patch

diff --git a/3.2.70/1042_linux-3.2.43.patch b/3.2.71/1042_linux-3.2.43.patch
similarity index 100%
rename from 3.2.70/1042_linux-3.2.43.patch
rename to 3.2.71/1042_linux-3.2.43.patch

diff --git a/3.2.70/1043_linux-3.2.44.patch b/3.2.71/1043_linux-3.2.44.patch
similarity index 100%
rename from 3.2.70/1043_linux-3.2.44.patch
rename to 3.2.71/1043_linux-3.2.44.patch

diff --git a/3.2.70/1044_linux-3.2.45.patch b/3.2.71/1044_linux-3.2.45.patch
similarity index 100%
rename from 3.2.70/1044_linux-3.2.45.patch
rename to 3.2.71/1044_linux-3.2.45.patch

diff --git a/3.2.70/1045_linux-3.2.46.patch b/3.2.71/1045_linux-3.2.46.patch
similarity index 100%
rename from 3.2.70/1045_linux-3.2.46.patch
rename to 3.2.71/1045_linux-3.2.46.patch

diff --git a/3.2.70/1046_linux-3.2.47.patch b/3.2.71/1046_linux-3.2.47.patch
similarity index 100%
rename from 3.2.70/1046_linux-3.2.47.patch
rename to 3.2.71/1046_linux-3.2.47.patch

diff --git a/3.2.70/1047_linux-3.2.48.patch b/3.2.71/1047_linux-3.2.48.patch
similarity index 100%
rename from 3.2.70/1047_linux-3.2.48.patch
rename to 3.2.71/1047_linux-3.2.48.patch

diff --git a/3.2.70/1048_linux-3.2.49.patch b/3.2.71/1048_linux-3.2.49.patch
similarity index 100%
rename from 3.2.70/1048_linux-3.2.49.patch
rename to 3.2.71/1048_linux-3.2.49.patch

diff --git a/3.2.70/1049_linux-3.2.50.patch b/3.2.71/1049_linux-3.2.50.patch
similarity index 100%
rename from 3.2.70/1049_linux-3.2.50.patch
rename to 3.2.71/1049_linux-3.2.50.patch

diff --git a/3.2.70/1050_linux-3.2.51.patch b/3.2.71/1050_linux-3.2.51.patch
similarity index 100%
rename from 3.2.70/1050_linux-3.2.51.patch
rename to 3.2.71/1050_linux-3.2.51.patch

diff --git a/3.2.70/1051_linux-3.2.52.patch b/3.2.71/1051_linux-3.2.52.patch
similarity index 100%
rename from 3.2.70/1051_linux-3.2.52.patch
rename to 3.2.71/1051_linux-3.2.52.patch

diff --git a/3.2.70/1052_linux-3.2.53.patch b/3.2.71/1052_linux-3.2.53.patch
similarity index 100%
rename from 3.2.70/1052_linux-3.2.53.patch
rename to 3.2.71/1052_linux-3.2.53.patch

diff --git a/3.2.70/1053_linux-3.2.54.patch b/3.2.71/1053_linux-3.2.54.patch
similarity index 100%
rename from 3.2.70/1053_linux-3.2.54.patch
rename to 3.2.71/1053_linux-3.2.54.patch

diff --git a/3.2.70/1054_linux-3.2.55.patch b/3.2.71/1054_linux-3.2.55.patch
similarity index 100%
rename from 3.2.70/1054_linux-3.2.55.patch
rename to 3.2.71/1054_linux-3.2.55.patch

diff --git a/3.2.70/1055_linux-3.2.56.patch b/3.2.71/1055_linux-3.2.56.patch
similarity index 100%
rename from 3.2.70/1055_linux-3.2.56.patch
rename to 3.2.71/1055_linux-3.2.56.patch

diff --git a/3.2.70/1056_linux-3.2.57.patch b/3.2.71/1056_linux-3.2.57.patch
similarity index 100%
rename from 3.2.70/1056_linux-3.2.57.patch
rename to 3.2.71/1056_linux-3.2.57.patch

diff --git a/3.2.70/1057_linux-3.2.58.patch b/3.2.71/1057_linux-3.2.58.patch
similarity index 100%
rename from 3.2.70/1057_linux-3.2.58.patch
rename to 3.2.71/1057_linux-3.2.58.patch

diff --git a/3.2.70/1058_linux-3.2.59.patch b/3.2.71/1058_linux-3.2.59.patch
similarity index 100%
rename from 3.2.70/1058_linux-3.2.59.patch
rename to 3.2.71/1058_linux-3.2.59.patch

diff --git a/3.2.70/1059_linux-3.2.60.patch b/3.2.71/1059_linux-3.2.60.patch
similarity index 100%
rename from 3.2.70/1059_linux-3.2.60.patch
rename to 3.2.71/1059_linux-3.2.60.patch

diff --git a/3.2.70/1060_linux-3.2.61.patch b/3.2.71/1060_linux-3.2.61.patch
similarity index 100%
rename from 3.2.70/1060_linux-3.2.61.patch
rename to 3.2.71/1060_linux-3.2.61.patch

diff --git a/3.2.70/1061_linux-3.2.62.patch b/3.2.71/1061_linux-3.2.62.patch
similarity index 100%
rename from 3.2.70/1061_linux-3.2.62.patch
rename to 3.2.71/1061_linux-3.2.62.patch

diff --git a/3.2.70/1062_linux-3.2.63.patch b/3.2.71/1062_linux-3.2.63.patch
similarity index 100%
rename from 3.2.70/1062_linux-3.2.63.patch
rename to 3.2.71/1062_linux-3.2.63.patch

diff --git a/3.2.70/1063_linux-3.2.64.patch b/3.2.71/1063_linux-3.2.64.patch
similarity index 100%
rename from 3.2.70/1063_linux-3.2.64.patch
rename to 3.2.71/1063_linux-3.2.64.patch

diff --git a/3.2.70/1064_linux-3.2.65.patch b/3.2.71/1064_linux-3.2.65.patch
similarity index 100%
rename from 3.2.70/1064_linux-3.2.65.patch
rename to 3.2.71/1064_linux-3.2.65.patch

diff --git a/3.2.70/1065_linux-3.2.66.patch b/3.2.71/1065_linux-3.2.66.patch
similarity index 100%
rename from 3.2.70/1065_linux-3.2.66.patch
rename to 3.2.71/1065_linux-3.2.66.patch

diff --git a/3.2.70/1066_linux-3.2.67.patch b/3.2.71/1066_linux-3.2.67.patch
similarity index 100%
rename from 3.2.70/1066_linux-3.2.67.patch
rename to 3.2.71/1066_linux-3.2.67.patch

diff --git a/3.2.70/1067_linux-3.2.68.patch b/3.2.71/1067_linux-3.2.68.patch
similarity index 100%
rename from 3.2.70/1067_linux-3.2.68.patch
rename to 3.2.71/1067_linux-3.2.68.patch

diff --git a/3.2.70/1068_linux-3.2.69.patch b/3.2.71/1068_linux-3.2.69.patch
similarity index 100%
rename from 3.2.70/1068_linux-3.2.69.patch
rename to 3.2.71/1068_linux-3.2.69.patch

diff --git a/3.2.70/1069_linux-3.2.70.patch b/3.2.71/1069_linux-3.2.70.patch
similarity index 100%
rename from 3.2.70/1069_linux-3.2.70.patch
rename to 3.2.71/1069_linux-3.2.70.patch

diff --git a/3.2.71/1070_linux-3.2.71.patch b/3.2.71/1070_linux-3.2.71.patch
new file mode 100644
index 0000000..8702e84
--- /dev/null
+++ b/3.2.71/1070_linux-3.2.71.patch
@@ -0,0 +1,3488 @@
+diff --git a/Makefile b/Makefile
+index 41a626b..9d5fea7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 70
++SUBLEVEL = 71
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+ 
+diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
+index 53088e2..2ba1226 100644
+--- a/arch/s390/kernel/process.c
++++ b/arch/s390/kernel/process.c
+@@ -250,7 +250,7 @@ asmlinkage void execve_tail(void)
+ {
+ 	current->thread.fp_regs.fpc = 0;
+ 	if (MACHINE_HAS_IEEE)
+-		asm volatile("sfpc %0,%0" : : "d" (0));
++		asm volatile("sfpc %0" : : "d" (0));
+ }
+ 
+ /*
+diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
+index 95792d8..51ca1c3 100644
+--- a/arch/s390/kernel/sclp.S
++++ b/arch/s390/kernel/sclp.S
+@@ -270,6 +270,8 @@ ENTRY(_sclp_print_early)
+ 	jno	.Lesa2
+ 	ahi	%r15,-80
+ 	stmh	%r6,%r15,96(%r15)		# store upper register halves
++	basr	%r13,0
++	lmh	%r0,%r15,.Lzeroes-.(%r13)	# clear upper register halves
+ .Lesa2:
+ #endif
+ 	lr	%r10,%r2			# save string pointer
+@@ -293,6 +295,8 @@ ENTRY(_sclp_print_early)
+ #endif
+ 	lm	%r6,%r15,120(%r15)		# restore registers
+ 	br	%r14
++.Lzeroes:
++	.fill	64,4,0
+ 
+ .LwritedataS4:
+ 	.long	0x00760005			# SCLP command for write data
+diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
+index a09b6c3..bf3e71a 100644
+--- a/arch/tile/kernel/setup.c
++++ b/arch/tile/kernel/setup.c
+@@ -973,7 +973,7 @@ static void __init load_hv_initrd(void)
+ 
+ void __init free_initrd_mem(unsigned long begin, unsigned long end)
+ {
+-	free_bootmem(__pa(begin), end - begin);
++	free_bootmem_late(__pa(begin), end - begin);
+ }
+ 
+ #else
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 9171618..93ce7e4 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -472,7 +472,7 @@ struct kvm_arch {
+ 	struct kvm_pic *vpic;
+ 	struct kvm_ioapic *vioapic;
+ 	struct kvm_pit *vpit;
+-	int vapics_in_nmi_mode;
++	atomic_t vapics_in_nmi_mode;
+ 
+ 	unsigned int tss_addr;
+ 	struct page *apic_access_page;
+diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
+index cced57f..ab98fe5 100644
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -318,7 +318,7 @@ static void pit_do_work(struct work_struct *work)
+ 		 * LVT0 to NMI delivery. Other PIC interrupts are just sent to
+ 		 * VCPU0, and only if its LVT0 is in EXTINT mode.
+ 		 */
+-		if (kvm->arch.vapics_in_nmi_mode > 0)
++		if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
+ 			kvm_for_each_vcpu(i, vcpu, kvm)
+ 				kvm_apic_nmi_wd_deliver(vcpu);
+ 	}
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 176205a..055cc49 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -757,10 +757,10 @@ static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
+ 		if (!nmi_wd_enabled) {
+ 			apic_debug("Receive NMI setting on APIC_LVT0 "
+ 				   "for cpu %d\n", apic->vcpu->vcpu_id);
+-			apic->vcpu->kvm->arch.vapics_in_nmi_mode++;
++			atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
+ 		}
+ 	} else if (nmi_wd_enabled)
+-		apic->vcpu->kvm->arch.vapics_in_nmi_mode--;
++		atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
+ }
+ 
+ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
+@@ -1253,6 +1253,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
+ 
+ 	apic_update_ppr(apic);
+ 	hrtimer_cancel(&apic->lapic_timer.timer);
++	apic_manage_nmi_watchdog(apic, apic_get_reg(apic, APIC_LVT0));
+ 	update_divide_count(apic);
+ 	start_apic_timer(apic);
+ 	apic->irr_pending = true;
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 5189fe8..81afe1b 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -321,6 +321,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
+ 	pte_t pte;
+ 	unsigned long pfn;
+ 	struct page *page;
++	unsigned char dummy;
+ 
+ 	ptep = lookup_address((unsigned long)v, &level);
+ 	BUG_ON(ptep == NULL);
+@@ -330,6 +331,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
+ 
+ 	pte = pfn_pte(pfn, prot);
+ 
++	/*
++	 * Careful: update_va_mapping() will fail if the virtual address
++	 * we're poking isn't populated in the page tables.  We don't
++	 * need to worry about the direct map (that's always in the page
++	 * tables), but we need to be careful about vmap space.  In
++	 * particular, the top level page table can lazily propagate
++	 * entries between processes, so if we've switched mms since we
++	 * vmapped the target in the first place, we might not have the
++	 * top-level page table entry populated.
++	 *
++	 * We disable preemption because we want the same mm active when
++	 * we probe the target and when we issue the hypercall.  We'll
++	 * have the same nominal mm, but if we're a kernel thread, lazy
++	 * mm dropping could change our pgd.
++	 *
++	 * Out of an abundance of caution, this uses __get_user() to fault
++	 * in the target address just in case there's some obscure case
++	 * in which the target address isn't readable.
++	 */
++
++	preempt_disable();
++
++	pagefault_disable();	/* Avoid warnings due to being atomic. */
++	__get_user(dummy, (unsigned char __user __force *)v);
++	pagefault_enable();
++
+ 	if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
+ 		BUG();
+ 
+@@ -341,6 +368,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
+ 				BUG();
+ 	} else
+ 		kmap_flush_unused();
++
++	preempt_enable();
+ }
+ 
+ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
+@@ -348,6 +377,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
+ 	const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
+ 	int i;
+ 
++	/*
++	 * We need to mark the all aliases of the LDT pages RO.  We
++	 * don't need to call vm_flush_aliases(), though, since that's
++	 * only responsible for flushing aliases out the TLBs, not the
++	 * page tables, and Xen will flush the TLB for us if needed.
++	 *
++	 * To avoid confusing future readers: none of this is necessary
++	 * to load the LDT.  The hypervisor only checks this when the
++	 * LDT is faulted in due to subsequent descriptor access.
++	 */
++
+ 	for(i = 0; i < entries; i += entries_per_page)
+ 		set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
+ }
+diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
+index 420ebfe..ecc428e 100644
+--- a/drivers/acpi/acpica/utxface.c
++++ b/drivers/acpi/acpica/utxface.c
+@@ -163,10 +163,12 @@ acpi_status acpi_enable_subsystem(u32 flags)
+ 	 * Obtain a permanent mapping for the FACS. This is required for the
+ 	 * Global Lock and the Firmware Waking Vector
+ 	 */
+-	status = acpi_tb_initialize_facs();
+-	if (ACPI_FAILURE(status)) {
+-		ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
+-		return_ACPI_STATUS(status);
++	if (!(flags & ACPI_NO_FACS_INIT)) {
++		status = acpi_tb_initialize_facs();
++		if (ACPI_FAILURE(status)) {
++			ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
++			return_ACPI_STATUS(status);
++		}
+ 	}
+ 
+ 	/*
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index fcd8586..4e9beff 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4107,9 +4107,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	{ "ST3320[68]13AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
+ 						ATA_HORKAGE_FIRMWARE_WARN },
+ 
+-	/* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
++	/* drives which fail FPDMA_AA activation (some may freeze afterwards) */
+ 	{ "ST1000LM024 HN-M101MBB", "2AR10001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
+ 	{ "ST1000LM024 HN-M101MBB", "2BA30001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
++	{ "VB0250EAVER",	"HPG7",		ATA_HORKAGE_BROKEN_FPDMA_AA },
+ 
+ 	/* Blacklist entries taken from Silicon Image 3124/3132
+ 	   Windows driver .inf file - also several Linux problem reports */
+@@ -4154,6 +4155,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER },
+ 	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
+ 
++	/* devices that don't properly handle TRIM commands */
++	{ "SuperSSpeed S238*",		NULL,	ATA_HORKAGE_NOTRIM, },
++
+ 	/*
+ 	 * Some WD SATA-I drives spin up and down erratically when the link
+ 	 * is put into the slumber mode.  We don't have full list of the
+@@ -4458,7 +4462,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
+ 	else /* In the ancient relic department - skip all of this */
+ 		return 0;
+ 
+-	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
++	/* On some disks, this command causes spin-up, so we need longer timeout */
++	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
+ 
+ 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
+ 	return err_mask;
+diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
+index 0ba32fe..93ea335 100644
+--- a/drivers/ata/libata-pmp.c
++++ b/drivers/ata/libata-pmp.c
+@@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
+ 				       ATA_LFLAG_NO_SRST |
+ 				       ATA_LFLAG_ASSUME_ATA;
+ 		}
++	} else if (vendor == 0x11ab && devid == 0x4140) {
++		/* Marvell 4140 quirks */
++		ata_for_each_link(link, ap, EDGE) {
++			/* port 4 is for SEMB device and it doesn't like SRST */
++			if (link->pmp == 4)
++				link->flags |= ATA_LFLAG_DISABLED;
++		}
+ 	}
+ }
+ 
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 8460e62..0ac7a5e 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -2473,7 +2473,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
+ 		rbuf[14] = (lowest_aligned >> 8) & 0x3f;
+ 		rbuf[15] = lowest_aligned;
+ 
+-		if (ata_id_has_trim(args->id)) {
++		if (ata_id_has_trim(args->id) &&
++		    !(dev->horkage & ATA_HORKAGE_NOTRIM)) {
+ 			rbuf[14] |= 0x80; /* TPE */
+ 
+ 			if (ata_id_has_zero_after_trim(args->id))
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 9f32f43..e9e8f3b 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -78,6 +78,7 @@ static struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe057) },
+ 	{ USB_DEVICE(0x0489, 0xe056) },
+ 	{ USB_DEVICE(0x0489, 0xe05f) },
++	{ USB_DEVICE(0x0489, 0xe076) },
+ 	{ USB_DEVICE(0x0489, 0xe078) },
+ 	{ USB_DEVICE(0x04c5, 0x1330) },
+ 	{ USB_DEVICE(0x04CA, 0x3004) },
+@@ -86,6 +87,7 @@ static struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x04CA, 0x3007) },
+ 	{ USB_DEVICE(0x04CA, 0x3008) },
+ 	{ USB_DEVICE(0x04CA, 0x300b) },
++	{ USB_DEVICE(0x04CA, 0x300f) },
+ 	{ USB_DEVICE(0x04CA, 0x3010) },
+ 	{ USB_DEVICE(0x0930, 0x0219) },
+ 	{ USB_DEVICE(0x0930, 0x0220) },
+@@ -109,6 +111,7 @@ static struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3408) },
+ 	{ USB_DEVICE(0x13d3, 0x3423) },
+ 	{ USB_DEVICE(0x13d3, 0x3432) },
++	{ USB_DEVICE(0x13d3, 0x3474) },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xE02C) },
+@@ -133,6 +136,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+@@ -141,6 +145,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+@@ -164,6 +169,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU22 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 92973a3..c5e400b 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -163,6 +163,7 @@ static struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+@@ -171,6 +172,7 @@ static struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+@@ -194,6 +196,7 @@ static struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
+index 43c4ec3..59d4697 100644
+--- a/drivers/char/agp/intel-gtt.c
++++ b/drivers/char/agp/intel-gtt.c
+@@ -1195,7 +1195,7 @@ static inline int needs_idle_maps(void)
+ 	/* Query intel_iommu to see if we need the workaround. Presumably that
+ 	 * was loaded first.
+ 	 */
+-	if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
++	if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
+ 	     gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
+ 	     intel_iommu_gfx_mapped)
+ 		return 1;
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index dbe76b5..90c76fc 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -2384,6 +2384,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
+ 		break;
+ 	default:
+ 		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
++		kfree(t_alg);
+ 		return ERR_PTR(-EINVAL);
+ 	}
+ 
+diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
+index 9b01145..a258101 100644
+--- a/drivers/dma/mv_xor.c
++++ b/drivers/dma/mv_xor.c
+@@ -386,7 +386,8 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
+ 	dma_cookie_t cookie = 0;
+ 	int busy = mv_chan_is_busy(mv_chan);
+ 	u32 current_desc = mv_chan_get_current_desc(mv_chan);
+-	int seen_current = 0;
++	int current_cleaned = 0;
++	struct mv_xor_desc *hw_desc;
+ 
+ 	dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
+ 	dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc);
+@@ -398,38 +399,57 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
+ 
+ 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
+ 					chain_node) {
+-		prefetch(_iter);
+-		prefetch(&_iter->async_tx);
+ 
+-		/* do not advance past the current descriptor loaded into the
+-		 * hardware channel, subsequent descriptors are either in
+-		 * process or have not been submitted
+-		 */
+-		if (seen_current)
+-			break;
++		/* clean finished descriptors */
++		hw_desc = iter->hw_desc;
++		if (hw_desc->status & XOR_DESC_SUCCESS) {
++			cookie = mv_xor_run_tx_complete_actions(iter, mv_chan,
++								cookie);
+ 
+-		/* stop the search if we reach the current descriptor and the
+-		 * channel is busy
+-		 */
+-		if (iter->async_tx.phys == current_desc) {
+-			seen_current = 1;
+-			if (busy)
++			/* done processing desc, clean slot */
++			mv_xor_clean_slot(iter, mv_chan);
++
++			/* break if we did cleaned the current */
++			if (iter->async_tx.phys == current_desc) {
++				current_cleaned = 1;
++				break;
++			}
++		} else {
++			if (iter->async_tx.phys == current_desc) {
++				current_cleaned = 0;
+ 				break;
++			}
+ 		}
+-
+-		cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
+-
+-		if (mv_xor_clean_slot(iter, mv_chan))
+-			break;
+ 	}
+ 
+ 	if ((busy == 0) && !list_empty(&mv_chan->chain)) {
+-		struct mv_xor_desc_slot *chain_head;
+-		chain_head = list_entry(mv_chan->chain.next,
+-					struct mv_xor_desc_slot,
+-					chain_node);
+-
+-		mv_xor_start_new_chain(mv_chan, chain_head);
++		if (current_cleaned) {
++			/*
++			 * current descriptor cleaned and removed, run
++			 * from list head
++			 */
++			iter = list_entry(mv_chan->chain.next,
++					  struct mv_xor_desc_slot,
++					  chain_node);
++			mv_xor_start_new_chain(mv_chan, iter);
++		} else {
++			if (!list_is_last(&iter->chain_node, &mv_chan->chain)) {
++				/*
++				 * descriptors are still waiting after
++				 * current, trigger them
++				 */
++				iter = list_entry(iter->chain_node.next,
++						  struct mv_xor_desc_slot,
++						  chain_node);
++				mv_xor_start_new_chain(mv_chan, iter);
++			} else {
++				/*
++				 * some descriptors are still waiting
++				 * to be cleaned
++				 */
++				tasklet_schedule(&mv_chan->irq_tasklet);
++			}
++		}
+ 	}
+ 
+ 	if (cookie > 0)
+diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
+index 977b592..ae2cfba 100644
+--- a/drivers/dma/mv_xor.h
++++ b/drivers/dma/mv_xor.h
+@@ -30,6 +30,7 @@
+ #define XOR_OPERATION_MODE_XOR		0
+ #define XOR_OPERATION_MODE_MEMCPY	2
+ #define XOR_OPERATION_MODE_MEMSET	4
++#define XOR_DESC_SUCCESS		0x40000000
+ 
+ #define XOR_CURR_DESC(chan)	(chan->mmr_base + 0x210 + (chan->idx * 4))
+ #define XOR_NEXT_DESC(chan)	(chan->mmr_base + 0x200 + (chan->idx * 4))
+diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+index 09851ce..20110b4 100644
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -1505,6 +1505,13 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ 		return -EINVAL;
+ 
++	/*
++	 * Universal plane src offsets are only 16.16, prevent havoc for
++	 * drivers using universal plane code internally.
++	 */
++	if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
++		return -ERANGE;
++
+ 	mutex_lock(&dev->mode_config.mutex);
+ 	obj = drm_mode_object_find(dev, crtc_req->crtc_id,
+ 				   DRM_MODE_OBJECT_CRTC);
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
+index 8115557..21e689d 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -1259,10 +1259,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
+ 
+ 			if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
+ 			    (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
++				u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
++
++				if (hss > lvds->native_mode.hdisplay)
++					hss = (10 - 1) * 8;
++
+ 				lvds->native_mode.htotal = lvds->native_mode.hdisplay +
+ 					(RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
+ 				lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
+-					(RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
++					hss;
+ 				lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
+ 					(RBIOS8(tmp + 23) * 8);
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
+index ba7ab79..d2572108 100644
+--- a/drivers/gpu/drm/radeon/radeon_gart.c
++++ b/drivers/gpu/drm/radeon/radeon_gart.c
+@@ -171,8 +171,10 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
+ 			}
+ 		}
+ 	}
+-	mb();
+-	radeon_gart_tlb_flush(rdev);
++	if (rdev->gart.ptr) {
++		mb();
++		radeon_gart_tlb_flush(rdev);
++	}
+ }
+ 
+ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
+@@ -217,8 +219,10 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
+ 			}
+ 		}
+ 	}
+-	mb();
+-	radeon_gart_tlb_flush(rdev);
++	if (rdev->gart.ptr) {
++		mb();
++		radeon_gart_tlb_flush(rdev);
++	}
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+index 4f9496e..42f5a2b 100644
+--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+@@ -51,10 +51,12 @@ static void radeon_hotplug_work_func(struct work_struct *work)
+ 	struct drm_mode_config *mode_config = &dev->mode_config;
+ 	struct drm_connector *connector;
+ 
++	mutex_lock(&mode_config->mutex);
+ 	if (mode_config->num_connector) {
+ 		list_for_each_entry(connector, &mode_config->connector_list, head)
+ 			radeon_connector_hotplug(connector);
+ 	}
++	mutex_unlock(&mode_config->mutex);
+ 	/* Just fire off a uevent and let userspace tell us what to do */
+ 	drm_helper_hpd_irq_event(dev);
+ }
+diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
+index 73fd664..e26a7c3 100644
+--- a/drivers/input/touchscreen/usbtouchscreen.c
++++ b/drivers/input/touchscreen/usbtouchscreen.c
+@@ -528,6 +528,9 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
+ 		goto err_out;
+ 	}
+ 
++	/* TSC-25 data sheet specifies a delay after the RESET command */
++	msleep(150);
++
+ 	/* set coordinate output rate */
+ 	buf[0] = buf[1] = 0xFF;
+ 	ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 80f8bd5..d9f23a4 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -13,6 +13,7 @@
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
++#include <linux/vmalloc.h>
+ 
+ #define	DM_MSG_PREFIX	"thin"
+ 
+@@ -158,9 +159,7 @@ static struct bio_prison *prison_create(unsigned nr_cells)
+ {
+ 	unsigned i;
+ 	uint32_t nr_buckets = calc_nr_buckets(nr_cells);
+-	size_t len = sizeof(struct bio_prison) +
+-		(sizeof(struct hlist_head) * nr_buckets);
+-	struct bio_prison *prison = kmalloc(len, GFP_KERNEL);
++	struct bio_prison *prison = kmalloc(sizeof(*prison), GFP_KERNEL);
+ 
+ 	if (!prison)
+ 		return NULL;
+@@ -173,9 +172,15 @@ static struct bio_prison *prison_create(unsigned nr_cells)
+ 		return NULL;
+ 	}
+ 
++	prison->cells = vmalloc(sizeof(*prison->cells) * nr_buckets);
++	if (!prison->cells) {
++		mempool_destroy(prison->cell_pool);
++		kfree(prison);
++		return NULL;
++	}
++
+ 	prison->nr_buckets = nr_buckets;
+ 	prison->hash_mask = nr_buckets - 1;
+-	prison->cells = (struct hlist_head *) (prison + 1);
+ 	for (i = 0; i < nr_buckets; i++)
+ 		INIT_HLIST_HEAD(prison->cells + i);
+ 
+@@ -184,6 +189,7 @@ static struct bio_prison *prison_create(unsigned nr_cells)
+ 
+ static void prison_destroy(struct bio_prison *prison)
+ {
++	vfree(prison->cells);
+ 	mempool_destroy(prison->cell_pool);
+ 	kfree(prison);
+ }
+diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
+index 1de0f5f..6e79c11 100644
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -309,8 +309,8 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
+ 
+ 		if (s < 0 && nr_center < -s) {
+ 			/* not enough in central node */
+-			shift(left, center, nr_center);
+-			s = nr_center - target;
++			shift(left, center, -nr_center);
++			s += nr_center;
+ 			shift(left, right, s);
+ 			nr_right += s;
+ 		} else
+@@ -323,7 +323,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
+ 		if (s > 0 && nr_center < s) {
+ 			/* not enough in central node */
+ 			shift(center, right, nr_center);
+-			s = target - nr_center;
++			s -= nr_center;
+ 			shift(left, right, s);
+ 			nr_left -= s;
+ 		} else
+diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
+index bbb2ec5..18f37e0 100644
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -236,7 +236,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
+ 	int r;
+ 	struct del_stack *s;
+ 
+-	s = kmalloc(sizeof(*s), GFP_KERNEL);
++	s = kmalloc(sizeof(*s), GFP_NOIO);
+ 	if (!s)
+ 		return -ENOMEM;
+ 	s->tm = info->tm;
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index aec029a..6e7b002 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -313,7 +313,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
+ 		spin_lock_irqsave(&conf->device_lock, flags);
+ 		if (r1_bio->mddev->degraded == conf->raid_disks ||
+ 		    (r1_bio->mddev->degraded == conf->raid_disks-1 &&
+-		     !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
++		     test_bit(In_sync, &conf->mirrors[mirror].rdev->flags)))
+ 			uptodate = 1;
+ 		spin_unlock_irqrestore(&conf->device_lock, flags);
+ 	}
+diff --git a/drivers/media/dvb/frontends/cx24116.c b/drivers/media/dvb/frontends/cx24116.c
+index ccd0525..4ff6d15 100644
+--- a/drivers/media/dvb/frontends/cx24116.c
++++ b/drivers/media/dvb/frontends/cx24116.c
+@@ -963,6 +963,10 @@ static int cx24116_send_diseqc_msg(struct dvb_frontend *fe,
+ 	struct cx24116_state *state = fe->demodulator_priv;
+ 	int i, ret;
+ 
++	/* Validate length */
++	if (d->msg_len > sizeof(d->msg))
++                return -EINVAL;
++
+ 	/* Dump DiSEqC message */
+ 	if (debug) {
+ 		printk(KERN_INFO "cx24116: %s(", __func__);
+@@ -974,10 +978,6 @@ static int cx24116_send_diseqc_msg(struct dvb_frontend *fe,
+ 		printk(") toneburst=%d\n", toneburst);
+ 	}
+ 
+-	/* Validate length */
+-	if (d->msg_len > (CX24116_ARGLEN - CX24116_DISEQC_MSGOFS))
+-		return -EINVAL;
+-
+ 	/* DiSEqC message */
+ 	for (i = 0; i < d->msg_len; i++)
+ 		state->dsec_cmd.args[CX24116_DISEQC_MSGOFS + i] = d->msg[i];
+diff --git a/drivers/media/dvb/frontends/s5h1420.c b/drivers/media/dvb/frontends/s5h1420.c
+index 3879d2e..507ccae 100644
+--- a/drivers/media/dvb/frontends/s5h1420.c
++++ b/drivers/media/dvb/frontends/s5h1420.c
+@@ -180,7 +180,7 @@ static int s5h1420_send_master_cmd (struct dvb_frontend* fe,
+ 	int result = 0;
+ 
+ 	dprintk("enter %s\n", __func__);
+-	if (cmd->msg_len > 8)
++	if (cmd->msg_len > sizeof(cmd->msg))
+ 		return -EINVAL;
+ 
+ 	/* setup for DISEQC */
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index 4802f7f..f53d5c8 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -1285,9 +1285,11 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
+ 			break;
+ 		case MMC_BLK_CMD_ERR:
+ 			ret = mmc_blk_cmd_err(md, card, brq, req, ret);
+-			if (!mmc_blk_reset(md, card->host, type))
+-				break;
+-			goto cmd_abort;
++			if (mmc_blk_reset(md, card->host, type))
++				goto cmd_abort;
++			if (!ret)
++				goto start_new_req;
++			break;
+ 		case MMC_BLK_RETRY:
+ 			if (retry++ < 5)
+ 				break;
+diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
+index f43b365..9af1528 100644
+--- a/drivers/mtd/maps/dc21285.c
++++ b/drivers/mtd/maps/dc21285.c
+@@ -38,9 +38,9 @@ static void nw_en_write(void)
+ 	 * we want to write a bit pattern XXX1 to Xilinx to enable
+ 	 * the write gate, which will be open for about the next 2ms.
+ 	 */
+-	spin_lock_irqsave(&nw_gpio_lock, flags);
++	raw_spin_lock_irqsave(&nw_gpio_lock, flags);
+ 	nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
+-	spin_unlock_irqrestore(&nw_gpio_lock, flags);
++	raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
+ 
+ 	/*
+ 	 * let the ISA bus to catch on...
+diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
+index 424ca5f..df58db3 100644
+--- a/drivers/mtd/mtd_blkdevs.c
++++ b/drivers/mtd/mtd_blkdevs.c
+@@ -214,6 +214,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
+ 		return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
+ 
+ 	mutex_lock(&dev->lock);
++	mutex_lock(&mtd_table_mutex);
+ 
+ 	if (dev->open)
+ 		goto unlock;
+@@ -236,6 +237,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
+ 
+ unlock:
+ 	dev->open++;
++	mutex_unlock(&mtd_table_mutex);
+ 	mutex_unlock(&dev->lock);
+ 	blktrans_dev_put(dev);
+ 	return ret;
+@@ -246,6 +248,7 @@ error_release:
+ error_put:
+ 	module_put(dev->tr->owner);
+ 	kref_put(&dev->ref, blktrans_dev_release);
++	mutex_unlock(&mtd_table_mutex);
+ 	mutex_unlock(&dev->lock);
+ 	blktrans_dev_put(dev);
+ 	return ret;
+@@ -260,6 +263,7 @@ static int blktrans_release(struct gendisk *disk, fmode_t mode)
+ 		return ret;
+ 
+ 	mutex_lock(&dev->lock);
++	mutex_lock(&mtd_table_mutex);
+ 
+ 	if (--dev->open)
+ 		goto unlock;
+@@ -272,6 +276,7 @@ static int blktrans_release(struct gendisk *disk, fmode_t mode)
+ 		__put_mtd_device(dev->mtd);
+ 	}
+ unlock:
++	mutex_unlock(&mtd_table_mutex);
+ 	mutex_unlock(&dev->lock);
+ 	blktrans_dev_put(dev);
+ 	return ret;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
+index 9820ec8..e93a0bf 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
++++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
+@@ -153,6 +153,8 @@ struct dma_desc {
+ 			u32 buffer2_size:13;
+ 			u32 reserved4:3;
+ 		} etx;		/* -- enhanced -- */
++
++		u64 all_flags;
+ 	} des01;
+ 	unsigned int des2;
+ 	unsigned int des3;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+index d879763..73e647b 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+@@ -232,6 +232,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+ {
+ 	int i;
+ 	for (i = 0; i < ring_size; i++) {
++		p->des01.all_flags = 0;
+ 		p->des01.erx.own = 1;
+ 		p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
+ 
+@@ -248,7 +249,7 @@ static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+ 	int i;
+ 
+ 	for (i = 0; i < ring_size; i++) {
+-		p->des01.etx.own = 0;
++		p->des01.all_flags = 0;
+ 		ehn_desc_tx_set_on_ring_chain(p, (i == ring_size - 1));
+ 		p++;
+ 	}
+diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+index fda5d2b..5bf9c37 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+@@ -126,6 +126,7 @@ static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+ {
+ 	int i;
+ 	for (i = 0; i < ring_size; i++) {
++		p->des01.all_flags = 0;
+ 		p->des01.rx.own = 1;
+ 		p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
+ 
+@@ -141,7 +142,7 @@ static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+ {
+ 	int i;
+ 	for (i = 0; i < ring_size; i++) {
+-		p->des01.tx.own = 0;
++		p->des01.all_flags = 0;
+ 		ndesc_tx_set_on_ring_chain(p, (i == (ring_size - 1)));
+ 		p++;
+ 	}
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index d4d2bc1..05852e3 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -441,19 +441,17 @@ static void init_dma_desc_rings(struct net_device *dev)
+ 	priv->rx_skbuff =
+ 	    kmalloc(sizeof(struct sk_buff *) * rxsize, GFP_KERNEL);
+ 	priv->dma_rx =
+-	    (struct dma_desc *)dma_alloc_coherent(priv->device,
+-						  rxsize *
+-						  sizeof(struct dma_desc),
+-						  &priv->dma_rx_phy,
+-						  GFP_KERNEL);
++	    (struct dma_desc *)dma_zalloc_coherent(priv->device, rxsize *
++						   sizeof(struct dma_desc),
++						   &priv->dma_rx_phy,
++						   GFP_KERNEL);
+ 	priv->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * txsize,
+ 				       GFP_KERNEL);
+ 	priv->dma_tx =
+-	    (struct dma_desc *)dma_alloc_coherent(priv->device,
+-						  txsize *
+-						  sizeof(struct dma_desc),
+-						  &priv->dma_tx_phy,
+-						  GFP_KERNEL);
++	    (struct dma_desc *)dma_zalloc_coherent(priv->device, txsize *
++						   sizeof(struct dma_desc),
++						   &priv->dma_tx_phy,
++						   GFP_KERNEL);
+ 
+ 	if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL)) {
+ 		pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__);
+diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
+index 73c7081..c646a49 100644
+--- a/drivers/net/ethernet/sun/niu.c
++++ b/drivers/net/ethernet/sun/niu.c
+@@ -6670,10 +6670,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
+ 		struct sk_buff *skb_new;
+ 
+ 		skb_new = skb_realloc_headroom(skb, len);
+-		if (!skb_new) {
+-			rp->tx_errors++;
++		if (!skb_new)
+ 			goto out_drop;
+-		}
+ 		kfree_skb(skb);
+ 		skb = skb_new;
+ 	} else
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index ad33126..8d64c16 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -235,7 +235,7 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
+ {
+ 	struct ath_hw *ah = sc->sc_ah;
+ 	struct ath_common *common = ath9k_hw_common(ah);
+-	bool ret;
++	bool ret = true;
+ 
+ 	ieee80211_stop_queues(sc->hw);
+ 
+@@ -245,10 +245,13 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
+ 	ath9k_debug_samp_bb_mac(sc);
+ 	ath9k_hw_disable_interrupts(ah);
+ 
+-	ret = ath_drain_all_txq(sc, retry_tx);
+-
+-	if (!ath_stoprecv(sc))
+-		ret = false;
++	if (AR_SREV_9300_20_OR_LATER(ah)) {
++		ret &= ath_stoprecv(sc);
++		ret &= ath_drain_all_txq(sc, retry_tx);
++	} else {
++		ret &= ath_drain_all_txq(sc, retry_tx);
++		ret &= ath_stoprecv(sc);
++	}
+ 
+ 	if (!flush) {
+ 		if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+diff --git a/drivers/pcmcia/topic.h b/drivers/pcmcia/topic.h
+index 615a45a..582688fe 100644
+--- a/drivers/pcmcia/topic.h
++++ b/drivers/pcmcia/topic.h
+@@ -104,6 +104,9 @@
+ #define TOPIC_EXCA_IF_CONTROL		0x3e	/* 8 bit */
+ #define TOPIC_EXCA_IFC_33V_ENA		0x01
+ 
++#define TOPIC_PCI_CFG_PPBCN		0x3e	/* 16-bit */
++#define TOPIC_PCI_CFG_PPBCN_WBEN	0x0400
++
+ static void topic97_zoom_video(struct pcmcia_socket *sock, int onoff)
+ {
+ 	struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket);
+@@ -138,6 +141,7 @@ static int topic97_override(struct yenta_socket *socket)
+ static int topic95_override(struct yenta_socket *socket)
+ {
+ 	u8 fctrl;
++	u16 ppbcn;
+ 
+ 	/* enable 3.3V support for 16bit cards */
+ 	fctrl = exca_readb(socket, TOPIC_EXCA_IF_CONTROL);
+@@ -146,6 +150,18 @@ static int topic95_override(struct yenta_socket *socket)
+ 	/* tell yenta to use exca registers to power 16bit cards */
+ 	socket->flags |= YENTA_16BIT_POWER_EXCA | YENTA_16BIT_POWER_DF;
+ 
++	/* Disable write buffers to prevent lockups under load with numerous
++	   Cardbus cards, observed on Tecra 500CDT and reported elsewhere on the
++	   net.  This is not a power-on default according to the datasheet
++	   but some BIOSes seem to set it. */
++	if (pci_read_config_word(socket->dev, TOPIC_PCI_CFG_PPBCN, &ppbcn) == 0
++	    && socket->dev->revision <= 7
++	    && (ppbcn & TOPIC_PCI_CFG_PPBCN_WBEN)) {
++		ppbcn &= ~TOPIC_PCI_CFG_PPBCN_WBEN;
++		pci_write_config_word(socket->dev, TOPIC_PCI_CFG_PPBCN, ppbcn);
++		dev_info(&socket->dev->dev, "Disabled ToPIC95 Cardbus write buffers.\n");
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
+index 1d3bcce..e57f5de 100644
+--- a/drivers/platform/x86/dell-laptop.c
++++ b/drivers/platform/x86/dell-laptop.c
+@@ -215,7 +215,6 @@ static struct dmi_system_id __devinitdata dell_quirks[] = {
+ };
+ 
+ static struct calling_interface_buffer *buffer;
+-static struct page *bufferpage;
+ static DEFINE_MUTEX(buffer_mutex);
+ 
+ static int hwswitch_state;
+@@ -715,11 +714,10 @@ static int __init dell_init(void)
+ 	 * Allocate buffer below 4GB for SMI data--only 32-bit physical addr
+ 	 * is passed to SMI handler.
+ 	 */
+-	bufferpage = alloc_page(GFP_KERNEL | GFP_DMA32);
++	buffer = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
+ 
+-	if (!bufferpage)
++	if (!buffer)
+ 		goto fail_buffer;
+-	buffer = page_address(bufferpage);
+ 
+ 	ret = dell_setup_rfkill();
+ 
+@@ -788,7 +786,7 @@ fail_backlight:
+ fail_filter:
+ 	dell_cleanup_rfkill();
+ fail_rfkill:
+-	free_page((unsigned long)bufferpage);
++	free_page((unsigned long)buffer);
+ fail_buffer:
+ 	platform_device_del(platform_device);
+ fail_platform_device2:
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index a36addf..04a6928 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -407,7 +407,8 @@ const struct ideapad_rfk_data ideapad_rfk_data[] = {
+ 
+ static int ideapad_rfk_set(void *data, bool blocked)
+ {
+-	unsigned long opcode = (unsigned long)data;
++	unsigned long dev = (unsigned long)data;
++	int opcode = ideapad_rfk_data[dev].opcode;
+ 
+ 	return write_ec_cmd(ideapad_handle, opcode, !blocked);
+ }
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index adba3d6..2dd9838 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -726,7 +726,7 @@ static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
+ static void print_constraints(struct regulator_dev *rdev)
+ {
+ 	struct regulation_constraints *constraints = rdev->constraints;
+-	char buf[80] = "";
++	char buf[160] = "";
+ 	int count = 0;
+ 	int ret;
+ 
+diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
+index ac84736..a9932bd 100644
+--- a/drivers/scsi/ipr.h
++++ b/drivers/scsi/ipr.h
+@@ -251,7 +251,7 @@
+ #define IPR_RUNTIME_RESET				0x40000000
+ 
+ #define IPR_IPL_INIT_MIN_STAGE_TIME			5
+-#define IPR_IPL_INIT_DEFAULT_STAGE_TIME                 15
++#define IPR_IPL_INIT_DEFAULT_STAGE_TIME                 30
+ #define IPR_IPL_INIT_STAGE_UNKNOWN			0x0
+ #define IPR_IPL_INIT_STAGE_TRANSOP			0xB0000000
+ #define IPR_IPL_INIT_STAGE_MASK				0xff000000
+diff --git a/drivers/staging/iio/dac/ad5624r_spi.c b/drivers/staging/iio/dac/ad5624r_spi.c
+index 284d8790..8e81fce 100644
+--- a/drivers/staging/iio/dac/ad5624r_spi.c
++++ b/drivers/staging/iio/dac/ad5624r_spi.c
+@@ -49,7 +49,7 @@ static const struct ad5624r_chip_info ad5624r_chip_info_tbl[] = {
+ };
+ 
+ static int ad5624r_spi_write(struct spi_device *spi,
+-			     u8 cmd, u8 addr, u16 val, u8 len)
++			     u8 cmd, u8 addr, u16 val, u8 shift)
+ {
+ 	u32 data;
+ 	u8 msg[3];
+@@ -62,7 +62,7 @@ static int ad5624r_spi_write(struct spi_device *spi,
+ 	 * 14-, 12-bit input code followed by 0, 2, or 4 don't care bits,
+ 	 * for the AD5664R, AD5644R, and AD5624R, respectively.
+ 	 */
+-	data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << (16 - len));
++	data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << shift);
+ 	msg[0] = data >> 16;
+ 	msg[1] = data >> 8;
+ 	msg[2] = data;
+diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
+index 6d69265..a33bece 100644
+--- a/drivers/staging/rtl8712/rtl8712_recv.c
++++ b/drivers/staging/rtl8712/rtl8712_recv.c
+@@ -1076,7 +1076,8 @@ static int recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
+ 		/* for first fragment packet, driver need allocate 1536 +
+ 		 * drvinfo_sz + RXDESC_SIZE to defrag packet. */
+ 		if ((mf == 1) && (frag == 0))
+-			alloc_sz = 1658;/*1658+6=1664, 1664 is 128 alignment.*/
++			/*1658+6=1664, 1664 is 128 alignment.*/
++			alloc_sz = max_t(u16, tmp_len, 1658);
+ 		else
+ 			alloc_sz = tmp_len;
+ 		/* 2 is for IP header 4 bytes alignment in QoS packet case.
+diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
+index d540a06..077c506 100644
+--- a/drivers/staging/vt6655/device_main.c
++++ b/drivers/staging/vt6655/device_main.c
+@@ -1602,6 +1602,10 @@ static int device_rx_srv(PSDevice pDevice, unsigned int uIdx) {
+ //        DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->pCurrRD = %x, works = %d\n", pRD, works);
+         if (works++>15)
+             break;
++
++        if (!pRD->pRDInfo->skb)
++            break;
++
+         if (device_receive_frame(pDevice, pRD)) {
+             if (!device_alloc_rx_buf(pDevice,pRD)) {
+                     DBG_PRT(MSG_LEVEL_ERR, KERN_ERR
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index ae4e7da..59fb984 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -4509,6 +4509,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
+ 	struct iscsi_session *sess;
+ 	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+ 	struct se_session *se_sess, *se_sess_tmp;
++	LIST_HEAD(free_list);
+ 	int session_count = 0;
+ 
+ 	spin_lock_bh(&se_tpg->session_lock);
+@@ -4530,14 +4531,17 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
+ 		}
+ 		atomic_set(&sess->session_reinstatement, 1);
+ 		spin_unlock(&sess->conn_lock);
+-		spin_unlock_bh(&se_tpg->session_lock);
+ 
+-		iscsit_free_session(sess);
+-		spin_lock_bh(&se_tpg->session_lock);
++		list_move_tail(&se_sess->sess_list, &free_list);
++	}
++	spin_unlock_bh(&se_tpg->session_lock);
+ 
++	list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
++		sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
++
++		iscsit_free_session(sess);
+ 		session_count++;
+ 	}
+-	spin_unlock_bh(&se_tpg->session_lock);
+ 
+ 	pr_debug("Released %d iSCSI Session(s) from Target Portal"
+ 			" Group: %hu\n", session_count, tpg->tpgt);
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 99fcb8c..9645186 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -229,8 +229,7 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
+ 	if (rs485conf->flags & SER_RS485_ENABLED) {
+ 		dev_dbg(port->dev, "Setting UART to RS485\n");
+ 		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
+-		if ((rs485conf->delay_rts_after_send) > 0)
+-			UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
++		UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
+ 		mode |= ATMEL_US_USMODE_RS485;
+ 	} else {
+ 		dev_dbg(port->dev, "Setting UART to RS232\n");
+@@ -305,9 +304,7 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
+ 
+ 	if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
+ 		dev_dbg(port->dev, "Setting UART to RS485\n");
+-		if ((atmel_port->rs485.delay_rts_after_send) > 0)
+-			UART_PUT_TTGR(port,
+-					atmel_port->rs485.delay_rts_after_send);
++		UART_PUT_TTGR(port, atmel_port->rs485.delay_rts_after_send);
+ 		mode |= ATMEL_US_USMODE_RS485;
+ 	} else {
+ 		dev_dbg(port->dev, "Setting UART to RS232\n");
+@@ -1229,9 +1226,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
+ 
+ 	if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
+ 		dev_dbg(port->dev, "Setting UART to RS485\n");
+-		if ((atmel_port->rs485.delay_rts_after_send) > 0)
+-			UART_PUT_TTGR(port,
+-					atmel_port->rs485.delay_rts_after_send);
++		UART_PUT_TTGR(port, atmel_port->rs485.delay_rts_after_send);
+ 		mode |= ATMEL_US_USMODE_RS485;
+ 	} else {
+ 		dev_dbg(port->dev, "Setting UART to RS232\n");
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 57d6302..ca666d0 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1295,6 +1295,11 @@ skip_countries:
+ 
+ 	acm_table[minor] = acm;
+ 
++	if (quirks & CLEAR_HALT_CONDITIONS) {
++		usb_clear_halt(usb_dev, usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress));
++		usb_clear_halt(usb_dev, usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress));
++	}
++
+ 	return 0;
+ alloc_fail7:
+ 	for (i = 0; i < ACM_NW; i++)
+@@ -1574,6 +1579,10 @@ static const struct usb_device_id acm_ids[] = {
+ 	.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ 	},
+ 
++	{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
++	.driver_info = CLEAR_HALT_CONDITIONS,
++	},
++
+ 	/* Nokia S60 phones expose two ACM channels. The first is
+ 	 * a modem and is picked up by the standard AT-command
+ 	 * information below. The second is 'vendor-specific' but
+diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
+index c3f1b36..7aa5e9a 100644
+--- a/drivers/usb/class/cdc-acm.h
++++ b/drivers/usb/class/cdc-acm.h
+@@ -127,3 +127,4 @@ struct acm {
+ #define NO_CAP_LINE			4
+ #define NOT_A_MODEM			8
+ #define NO_DATA_INTERFACE		16
++#define CLEAR_HALT_CONDITIONS		BIT(7)
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 18286ce..7cfe286 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2157,9 +2157,6 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
+ #define HUB_LONG_RESET_TIME	200
+ #define HUB_RESET_TIMEOUT	800
+ 
+-static int hub_port_reset(struct usb_hub *hub, int port1,
+-			struct usb_device *udev, unsigned int delay, bool warm);
+-
+ /* Is a USB 3.0 port in the Inactive or Complinance Mode state?
+  * Port worm reset is required to recover
+  */
+@@ -2239,44 +2236,6 @@ delay:
+ 	return -EBUSY;
+ }
+ 
+-static void hub_port_finish_reset(struct usb_hub *hub, int port1,
+-			struct usb_device *udev, int *status)
+-{
+-	switch (*status) {
+-	case 0:
+-		/* TRSTRCY = 10 ms; plus some extra */
+-		msleep(10 + 40);
+-		if (udev) {
+-			struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+-
+-			update_devnum(udev, 0);
+-			/* The xHC may think the device is already reset,
+-			 * so ignore the status.
+-			 */
+-			if (hcd->driver->reset_device)
+-				hcd->driver->reset_device(hcd, udev);
+-		}
+-		/* FALL THROUGH */
+-	case -ENOTCONN:
+-	case -ENODEV:
+-		clear_port_feature(hub->hdev,
+-				port1, USB_PORT_FEAT_C_RESET);
+-		if (hub_is_superspeed(hub->hdev)) {
+-			clear_port_feature(hub->hdev, port1,
+-					USB_PORT_FEAT_C_BH_PORT_RESET);
+-			clear_port_feature(hub->hdev, port1,
+-					USB_PORT_FEAT_C_PORT_LINK_STATE);
+-			clear_port_feature(hub->hdev, port1,
+-					USB_PORT_FEAT_C_CONNECTION);
+-		}
+-		if (udev)
+-			usb_set_device_state(udev, *status
+-					? USB_STATE_NOTATTACHED
+-					: USB_STATE_DEFAULT);
+-		break;
+-	}
+-}
+-
+ /* Handle port reset and port warm(BH) reset (for USB3 protocol ports) */
+ static int hub_port_reset(struct usb_hub *hub, int port1,
+ 			struct usb_device *udev, unsigned int delay, bool warm)
+@@ -2299,13 +2258,9 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
+ 		 * If the caller hasn't explicitly requested a warm reset,
+ 		 * double check and see if one is needed.
+ 		 */
+-		status = hub_port_status(hub, port1,
+-					&portstatus, &portchange);
+-		if (status < 0)
+-			goto done;
+-
+-		if (hub_port_warm_reset_required(hub, portstatus))
+-			warm = true;
++		if (hub_port_status(hub, port1, &portstatus, &portchange) == 0)
++			if (hub_port_warm_reset_required(hub, portstatus))
++				warm = true;
+ 	}
+ 
+ 	/* Reset the port */
+@@ -2328,11 +2283,19 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
+ 
+ 		/* Check for disconnect or reset */
+ 		if (status == 0 || status == -ENOTCONN || status == -ENODEV) {
+-			hub_port_finish_reset(hub, port1, udev, &status);
++			clear_port_feature(hub->hdev, port1,
++					USB_PORT_FEAT_C_RESET);
+ 
+ 			if (!hub_is_superspeed(hub->hdev))
+ 				goto done;
+ 
++			clear_port_feature(hub->hdev, port1,
++					USB_PORT_FEAT_C_BH_PORT_RESET);
++			clear_port_feature(hub->hdev, port1,
++					USB_PORT_FEAT_C_PORT_LINK_STATE);
++			clear_port_feature(hub->hdev, port1,
++					USB_PORT_FEAT_C_CONNECTION);
++
+ 			/*
+ 			 * If a USB 3.0 device migrates from reset to an error
+ 			 * state, re-issue the warm reset.
+@@ -2366,6 +2329,26 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
+ 		port1);
+ 
+ done:
++	if (status == 0) {
++		/* TRSTRCY = 10 ms; plus some extra */
++		msleep(10 + 40);
++		if (udev) {
++			struct usb_hcd *hcd = bus_to_hcd(udev->bus);
++
++			update_devnum(udev, 0);
++			/* The xHC may think the device is already reset,
++			 * so ignore the status.
++			 */
++			if (hcd->driver->reset_device)
++				hcd->driver->reset_device(hcd, udev);
++
++			usb_set_device_state(udev, USB_STATE_DEFAULT);
++		}
++	} else {
++		if (udev)
++			usb_set_device_state(udev, USB_STATE_NOTATTACHED);
++	}
++
+ 	if (!hub_is_superspeed(hub->hdev))
+ 		up_read(&ehci_cf_port_reset_rwsem);
+ 
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index c4134e8..24864d4 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -498,6 +498,10 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+ 		dev_vdbg(dwc->dev, "USB_REQ_SET_CONFIGURATION\n");
+ 		ret = dwc3_ep0_set_config(dwc, ctrl);
+ 		break;
++	case USB_REQ_SET_INTERFACE:
++		dev_vdbg(dwc->dev ,"USB_REQ_SET_INTERFACE");
++		dwc->start_config_issued = false;
++		/* Fall through */
+ 	default:
+ 		dev_vdbg(dwc->dev, "Forwarding to gadget driver\n");
+ 		ret = dwc3_ep0_delegate_req(dwc, ctrl);
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 5f2e3d0..b4623f1 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -171,6 +171,8 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
+ 		if (!(reg & DWC3_DEPCMD_CMDACT)) {
+ 			dev_vdbg(dwc->dev, "Command Complete --> %d\n",
+ 					DWC3_DEPCMD_STATUS(reg));
++			if (DWC3_DEPCMD_STATUS(reg))
++				return -EINVAL;
+ 			return 0;
+ 		}
+ 
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index c1fa92e..8605813 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -449,10 +449,13 @@ static void xhci_hub_report_link_state(struct xhci_hcd *xhci,
+ 	u32 pls = status_reg & PORT_PLS_MASK;
+ 
+ 	/* resume state is a xHCI internal state.
+-	 * Do not report it to usb core.
++	 * Do not report it to usb core, instead, pretend to be U3,
++	 * thus usb core knows it's not ready for transfer
+ 	 */
+-	if (pls == XDEV_RESUME)
++	if (pls == XDEV_RESUME) {
++		*status |= USB_SS_PORT_LS_U3;
+ 		return;
++	}
+ 
+ 	/* When the CAS bit is set then warm reset
+ 	 * should be performed on port
+@@ -592,7 +595,14 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 			status |= USB_PORT_STAT_C_RESET << 16;
+ 		/* USB3.0 only */
+ 		if (hcd->speed == HCD_USB3) {
+-			if ((temp & PORT_PLC))
++			/* Port link change with port in resume state should not be
++			 * reported to usbcore, as this is an internal state to be
++			 * handled by xhci driver. Reporting PLC to usbcore may
++			 * cause usbcore clearing PLC first and port change event
++			 * irq won't be generated.
++			 */
++			if ((temp & PORT_PLC) &&
++				(temp & PORT_PLS_MASK) != XDEV_RESUME)
+ 				status |= USB_PORT_STAT_C_LINK_STATE << 16;
+ 			if ((temp & PORT_WRC))
+ 				status |= USB_PORT_STAT_C_BH_RESET << 16;
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 0f4a41d..d5d2af5 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1330,10 +1330,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
+ 		/* Attempt to use the ring cache */
+ 		if (virt_dev->num_rings_cached == 0)
+ 			return -ENOMEM;
++		virt_dev->num_rings_cached--;
+ 		virt_dev->eps[ep_index].new_ring =
+ 			virt_dev->ring_cache[virt_dev->num_rings_cached];
+ 		virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
+-		virt_dev->num_rings_cached--;
+ 		xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
+ 			usb_endpoint_xfer_isoc(&ep->desc) ? true : false);
+ 	}
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 5c535a8..950a8cc 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -3361,6 +3361,9 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
+ 			return -EINVAL;
+ 	}
+ 
++	if (virt_dev->tt_info)
++		old_active_eps = virt_dev->tt_info->active_eps;
++
+ 	if (virt_dev->udev != udev) {
+ 		/* If the virt_dev and the udev does not match, this virt_dev
+ 		 * may belong to another udev.
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 073a0f98..1a19724 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -193,6 +193,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */
+ 	{ USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */
+ 	{ USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */
++	{ USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */
+ 	{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
+ 	{ USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
+ 	{ USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 68e8552..a0c4cc4 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -2019,6 +2019,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_NO_READ_DISC_INFO ),
+ 
++/* Reported by Oliver Neukum <oneukum@suse.com>
++ * This device morphes spontaneously into another device if the access
++ * pattern of Windows isn't followed. Thus writable media would be dirty
++ * if the initial instance is used. So the device is limited to its
++ * virtual CD.
++ * And yes, the concept that BCD goes up to 9 is not heeded */
++UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff,
++		"ZTE,Incorporated",
++		"ZTE WCDMA Technologies MSM",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_SINGLE_LUN ),
++
+ /* Reported by Sven Geggus <sven-usbst@geggus.net>
+  * This encrypted pen drive returns bogus data for the initial READ(10).
+  */
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index be32b1b..738707a 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -883,6 +883,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg)
+ 		}
+ 		if (eventfp != d->log_file) {
+ 			filep = d->log_file;
++			d->log_file = eventfp;
+ 			ctx = d->log_ctx;
+ 			d->log_ctx = eventfp ?
+ 				eventfd_ctx_fileget(eventfp) : NULL;
+diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
+index 2b4acb8..8a93753 100644
+--- a/drivers/watchdog/omap_wdt.c
++++ b/drivers/watchdog/omap_wdt.c
+@@ -150,6 +150,13 @@ static int omap_wdt_open(struct inode *inode, struct file *file)
+ 
+ 	pm_runtime_get_sync(wdev->dev);
+ 
++	/*
++	 * Make sure the watchdog is disabled. This is unfortunately required
++	 * because writing to various registers with the watchdog running has no
++	 * effect.
++	 */
++	omap_wdt_disable(wdev);
++
+ 	/* initialize prescaler */
+ 	while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x01)
+ 		cpu_relax();
+diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
+index 879ed88..bf1df72 100644
+--- a/fs/9p/vfs_inode.c
++++ b/fs/9p/vfs_inode.c
+@@ -527,8 +527,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
+ 	unlock_new_inode(inode);
+ 	return inode;
+ error:
+-	unlock_new_inode(inode);
+-	iput(inode);
++	iget_failed(inode);
+ 	return ERR_PTR(retval);
+ 
+ }
+diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
+index 30d4fa8..dbbc83f 100644
+--- a/fs/9p/vfs_inode_dotl.c
++++ b/fs/9p/vfs_inode_dotl.c
+@@ -169,8 +169,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
+ 	unlock_new_inode(inode);
+ 	return inode;
+ error:
+-	unlock_new_inode(inode);
+-	iput(inode);
++	iget_failed(inode);
+ 	return ERR_PTR(retval);
+ 
+ }
+diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
+index a1fee6f..b3d1efe 100644
+--- a/fs/btrfs/inode-map.c
++++ b/fs/btrfs/inode-map.c
+@@ -244,6 +244,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
+ {
+ 	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+ 	struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
++	spinlock_t *rbroot_lock = &root->free_ino_pinned->tree_lock;
+ 	struct btrfs_free_space *info;
+ 	struct rb_node *n;
+ 	u64 count;
+@@ -252,24 +253,30 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
+ 		return;
+ 
+ 	while (1) {
++		bool add_to_ctl = true;
++
++		spin_lock(rbroot_lock);
+ 		n = rb_first(rbroot);
+-		if (!n)
++		if (!n) {
++			spin_unlock(rbroot_lock);
+ 			break;
++		}
+ 
+ 		info = rb_entry(n, struct btrfs_free_space, offset_index);
+ 		BUG_ON(info->bitmap);
+ 
+ 		if (info->offset > root->cache_progress)
+-			goto free;
++			add_to_ctl = false;
+ 		else if (info->offset + info->bytes > root->cache_progress)
+ 			count = root->cache_progress - info->offset + 1;
+ 		else
+ 			count = info->bytes;
+ 
+-		__btrfs_add_free_space(ctl, info->offset, count);
+-free:
+ 		rb_erase(&info->offset_index, rbroot);
+-		kfree(info);
++		spin_unlock(rbroot_lock);
++		if (add_to_ctl)
++			__btrfs_add_free_space(ctl, info->offset, count);
++		kmem_cache_free(btrfs_free_space_cachep, info);
+ 	}
+ }
+ 
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 52bacff..ba26540 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2448,6 +2448,20 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
+ 					new_key.offset += skip;
+ 				}
+ 
++				/*
++				 * Don't copy an inline extent into an offset
++				 * greater than zero. Having an inline extent
++				 * at such an offset results in chaos as btrfs
++				 * isn't prepared for such cases. Just skip
++				 * this case for the same reasons as commented
++				 * at btrfs_ioctl_clone().
++				 */
++				if (new_key.offset > 0) {
++					ret = -EOPNOTSUPP;
++					btrfs_end_transaction(trans, root);
++					goto out;
++				}
++
+ 				if (key.offset + datal > off+len)
+ 					trim = key.offset + datal - (off+len);
+ 
+diff --git a/fs/buffer.c b/fs/buffer.c
+index c457f84..7eb4da4 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -1002,7 +1002,7 @@ init_page_buffers(struct page *page, struct block_device *bdev,
+  */
+ static int
+ grow_dev_page(struct block_device *bdev, sector_t block,
+-		pgoff_t index, int size, int sizebits)
++	      pgoff_t index, int size, int sizebits, gfp_t gfp)
+ {
+ 	struct inode *inode = bdev->bd_inode;
+ 	struct page *page;
+@@ -1011,7 +1011,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
+ 	int ret = 0;		/* Will call free_more_memory() */
+ 
+ 	page = find_or_create_page(inode->i_mapping, index,
+-		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
++		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS) | gfp);
+ 	if (!page)
+ 		return ret;
+ 
+@@ -1059,7 +1059,7 @@ failed:
+  * that page was dirty, the buffers are set dirty also.
+  */
+ static int
+-grow_buffers(struct block_device *bdev, sector_t block, int size)
++grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
+ {
+ 	pgoff_t index;
+ 	int sizebits;
+@@ -1086,11 +1086,12 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
+ 	}
+ 
+ 	/* Create a page with the proper size buffers.. */
+-	return grow_dev_page(bdev, block, index, size, sizebits);
++	return grow_dev_page(bdev, block, index, size, sizebits, gfp);
+ }
+ 
+-static struct buffer_head *
+-__getblk_slow(struct block_device *bdev, sector_t block, int size)
++struct buffer_head *
++__getblk_slow(struct block_device *bdev, sector_t block,
++	     unsigned size, gfp_t gfp)
+ {
+ 	/* Size must be multiple of hard sectorsize */
+ 	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
+@@ -1112,13 +1113,14 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
+ 		if (bh)
+ 			return bh;
+ 
+-		ret = grow_buffers(bdev, block, size);
++		ret = grow_buffers(bdev, block, size, gfp);
+ 		if (ret < 0)
+ 			return NULL;
+ 		if (ret == 0)
+ 			free_more_memory();
+ 	}
+ }
++EXPORT_SYMBOL(__getblk_slow);
+ 
+ /*
+  * The relationship between dirty buffers and dirty pages:
+@@ -1369,24 +1371,25 @@ __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
+ EXPORT_SYMBOL(__find_get_block);
+ 
+ /*
+- * __getblk will locate (and, if necessary, create) the buffer_head
++ * __getblk_gfp() will locate (and, if necessary, create) the buffer_head
+  * which corresponds to the passed block_device, block and size. The
+  * returned buffer has its reference count incremented.
+  *
+- * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
+- * attempt is failing.  FIXME, perhaps?
++ * __getblk_gfp() will lock up the machine if grow_dev_page's
++ * try_to_free_buffers() attempt is failing.  FIXME, perhaps?
+  */
+ struct buffer_head *
+-__getblk(struct block_device *bdev, sector_t block, unsigned size)
++__getblk_gfp(struct block_device *bdev, sector_t block,
++	     unsigned size, gfp_t gfp)
+ {
+ 	struct buffer_head *bh = __find_get_block(bdev, block, size);
+ 
+ 	might_sleep();
+ 	if (bh == NULL)
+-		bh = __getblk_slow(bdev, block, size);
++		bh = __getblk_slow(bdev, block, size, gfp);
+ 	return bh;
+ }
+-EXPORT_SYMBOL(__getblk);
++EXPORT_SYMBOL(__getblk_gfp);
+ 
+ /*
+  * Do async read-ahead on a buffer..
+@@ -1402,24 +1405,28 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
+ EXPORT_SYMBOL(__breadahead);
+ 
+ /**
+- *  __bread() - reads a specified block and returns the bh
++ *  __bread_gfp() - reads a specified block and returns the bh
+  *  @bdev: the block_device to read from
+  *  @block: number of block
+  *  @size: size (in bytes) to read
+- * 
++ *  @gfp: page allocation flag
++ *
+  *  Reads a specified block, and returns buffer head that contains it.
++ *  The page cache can be allocated from non-movable area
++ *  not to prevent page migration if you set gfp to zero.
+  *  It returns NULL if the block was unreadable.
+  */
+ struct buffer_head *
+-__bread(struct block_device *bdev, sector_t block, unsigned size)
++__bread_gfp(struct block_device *bdev, sector_t block,
++		   unsigned size, gfp_t gfp)
+ {
+-	struct buffer_head *bh = __getblk(bdev, block, size);
++	struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
+ 
+ 	if (likely(bh) && !buffer_uptodate(bh))
+ 		bh = __bread_slow(bh);
+ 	return bh;
+ }
+-EXPORT_SYMBOL(__bread);
++EXPORT_SYMBOL(__bread_gfp);
+ 
+ /*
+  * invalidate_bh_lrus() is called rarely - but not only at unmount.
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 2da63ab..e3d65ab 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -700,7 +700,8 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
+ 		path[ppos].p_depth = i;
+ 		path[ppos].p_ext = NULL;
+ 
+-		bh = sb_getblk(inode->i_sb, path[ppos].p_block);
++		bh = sb_getblk_gfp(inode->i_sb, path[ppos].p_block,
++				   __GFP_MOVABLE | GFP_NOFS);
+ 		if (unlikely(!bh)) {
+ 			ret = -ENOMEM;
+ 			goto err;
+@@ -905,7 +906,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
+ 		err = -EIO;
+ 		goto cleanup;
+ 	}
+-	bh = sb_getblk(inode->i_sb, newblock);
++	bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
+ 	if (!bh) {
+ 		err = -ENOMEM;
+ 		goto cleanup;
+@@ -1089,7 +1090,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
+ 	if (newblock == 0)
+ 		return err;
+ 
+-	bh = sb_getblk(inode->i_sb, newblock);
++	bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
+ 	if (!bh)
+ 		return -ENOMEM;
+ 	lock_buffer(bh);
+diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
+index 26d6dbf..ae1425a 100644
+--- a/fs/ext4/indirect.c
++++ b/fs/ext4/indirect.c
+@@ -706,7 +706,7 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
+ 				       EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
+ 		EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
+ 				 "non-extent mapped inodes with bigalloc");
+-		return -ENOSPC;
++		return -EUCLEAN;
+ 	}
+ 
+ 	goal = ext4_find_goal(inode, map->m_lblk, partial);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index f06857b..0610766 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1849,18 +1849,31 @@ static int __ext4_journalled_writepage(struct page *page,
+ 	page_bufs = page_buffers(page);
+ 	BUG_ON(!page_bufs);
+ 	walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
+-	/* As soon as we unlock the page, it can go away, but we have
+-	 * references to buffers so we are safe */
++	/*
++	 * We need to release the page lock before we start the
++	 * journal, so grab a reference so the page won't disappear
++	 * out from under us.
++	 */
++	get_page(page);
+ 	unlock_page(page);
+ 
+ 	handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
+ 	if (IS_ERR(handle)) {
+ 		ret = PTR_ERR(handle);
+-		goto out;
++		put_page(page);
++		goto out_no_pagelock;
+ 	}
+-
+ 	BUG_ON(!ext4_handle_valid(handle));
+ 
++	lock_page(page);
++	put_page(page);
++	if (page->mapping != mapping) {
++		/* The page got truncated from under us */
++		ext4_journal_stop(handle);
++		ret = 0;
++		goto out;
++	}
++
+ 	ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
+ 				do_journal_get_write_access);
+ 
+@@ -1876,6 +1889,8 @@ static int __ext4_journalled_writepage(struct page *page,
+ 	walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
+ 	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
+ out:
++	unlock_page(page);
++out_no_pagelock:
+ 	return ret;
+ }
+ 
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 5baa7ba..7c03826 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -4720,18 +4720,12 @@ do_more:
+ 		/*
+ 		 * blocks being freed are metadata. these blocks shouldn't
+ 		 * be used until this transaction is committed
++		 *
++		 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
++		 * to fail.
+ 		 */
+-	retry:
+-		new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
+-		if (!new_entry) {
+-			/*
+-			 * We use a retry loop because
+-			 * ext4_free_blocks() is not allowed to fail.
+-			 */
+-			cond_resched();
+-			congestion_wait(BLK_RW_ASYNC, HZ/50);
+-			goto retry;
+-		}
++		new_entry = kmem_cache_alloc(ext4_free_ext_cachep,
++				GFP_NOFS|__GFP_NOFAIL);
+ 		new_entry->start_cluster = bit;
+ 		new_entry->group  = block_group;
+ 		new_entry->count = count_clusters;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 422be11..be4db0e 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -857,6 +857,7 @@ static void ext4_put_super(struct super_block *sb)
+ 		dump_orphan_list(sb, sbi);
+ 	J_ASSERT(list_empty(&sbi->s_orphan));
+ 
++	sync_blockdev(sb->s_bdev);
+ 	invalidate_bdev(sb->s_bdev);
+ 	if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
+ 		/*
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index afc0f706..e613870 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -993,6 +993,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
+ 		goto err_fput;
+ 
+ 	fuse_conn_init(fc);
++	fc->release = fuse_free_conn;
+ 
+ 	fc->dev = sb->s_dev;
+ 	fc->sb = sb;
+@@ -1007,7 +1008,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
+ 		fc->dont_mask = 1;
+ 	sb->s_flags |= MS_POSIXACL;
+ 
+-	fc->release = fuse_free_conn;
+ 	fc->flags = d.flags;
+ 	fc->user_id = d.user_id;
+ 	fc->group_id = d.group_id;
+diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
+index 16a698b..39c7059 100644
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -478,80 +478,28 @@ out:
+ 
+ int jbd2_cleanup_journal_tail(journal_t *journal)
+ {
+-	transaction_t * transaction;
+ 	tid_t		first_tid;
+-	unsigned long	blocknr, freed;
++	unsigned long	blocknr;
+ 
+ 	if (is_journal_aborted(journal))
+-		return 1;
+-
+-	/* OK, work out the oldest transaction remaining in the log, and
+-	 * the log block it starts at.
+-	 *
+-	 * If the log is now empty, we need to work out which is the
+-	 * next transaction ID we will write, and where it will
+-	 * start. */
++		return -EIO;
+ 
+-	write_lock(&journal->j_state_lock);
+-	spin_lock(&journal->j_list_lock);
+-	transaction = journal->j_checkpoint_transactions;
+-	if (transaction) {
+-		first_tid = transaction->t_tid;
+-		blocknr = transaction->t_log_start;
+-	} else if ((transaction = journal->j_committing_transaction) != NULL) {
+-		first_tid = transaction->t_tid;
+-		blocknr = transaction->t_log_start;
+-	} else if ((transaction = journal->j_running_transaction) != NULL) {
+-		first_tid = transaction->t_tid;
+-		blocknr = journal->j_head;
+-	} else {
+-		first_tid = journal->j_transaction_sequence;
+-		blocknr = journal->j_head;
+-	}
+-	spin_unlock(&journal->j_list_lock);
+-	J_ASSERT(blocknr != 0);
+-
+-	/* If the oldest pinned transaction is at the tail of the log
+-           already then there's not much we can do right now. */
+-	if (journal->j_tail_sequence == first_tid) {
+-		write_unlock(&journal->j_state_lock);
++	if (!jbd2_journal_get_log_tail(journal, &first_tid, &blocknr))
+ 		return 1;
+-	}
+-
+-	/* OK, update the superblock to recover the freed space.
+-	 * Physical blocks come first: have we wrapped beyond the end of
+-	 * the log?  */
+-	freed = blocknr - journal->j_tail;
+-	if (blocknr < journal->j_tail)
+-		freed = freed + journal->j_last - journal->j_first;
+-
+-	trace_jbd2_cleanup_journal_tail(journal, first_tid, blocknr, freed);
+-	jbd_debug(1,
+-		  "Cleaning journal tail from %d to %d (offset %lu), "
+-		  "freeing %lu\n",
+-		  journal->j_tail_sequence, first_tid, blocknr, freed);
+-
+-	journal->j_free += freed;
+-	journal->j_tail_sequence = first_tid;
+-	journal->j_tail = blocknr;
+-	write_unlock(&journal->j_state_lock);
++	J_ASSERT(blocknr != 0);
+ 
+ 	/*
+-	 * If there is an external journal, we need to make sure that
+-	 * any data blocks that were recently written out --- perhaps
+-	 * by jbd2_log_do_checkpoint() --- are flushed out before we
+-	 * drop the transactions from the external journal.  It's
+-	 * unlikely this will be necessary, especially with a
+-	 * appropriately sized journal, but we need this to guarantee
+-	 * correctness.  Fortunately jbd2_cleanup_journal_tail()
+-	 * doesn't get called all that often.
++	 * We need to make sure that any blocks that were recently written out
++	 * --- perhaps by jbd2_log_do_checkpoint() --- are flushed out before
++	 * we drop the transactions from the journal. It's unlikely this will
++	 * be necessary, especially with an appropriately sized journal, but we
++	 * need this to guarantee correctness.  Fortunately
++	 * jbd2_cleanup_journal_tail() doesn't get called all that often.
+ 	 */
+-	if ((journal->j_fs_dev != journal->j_dev) &&
+-	    (journal->j_flags & JBD2_BARRIER))
+-		blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
+-	if (!(journal->j_flags & JBD2_ABORT))
+-		jbd2_journal_update_superblock(journal, 1);
+-	return 0;
++	if (journal->j_flags & JBD2_BARRIER)
++		blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
++
++	return __jbd2_update_log_tail(journal, first_tid, blocknr);
+ }
+ 
+ 
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index ab9463a..bccb605 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -340,7 +340,16 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+ 	/* Do we need to erase the effects of a prior jbd2_journal_flush? */
+ 	if (journal->j_flags & JBD2_FLUSHED) {
+ 		jbd_debug(3, "super block updated\n");
+-		jbd2_journal_update_superblock(journal, 1);
++		/*
++		 * We hold j_checkpoint_mutex so tail cannot change under us.
++		 * We don't need any special data guarantees for writing sb
++		 * since journal is empty and it is ok for write to be
++		 * flushed only with transaction commit.
++		 */
++		jbd2_journal_update_sb_log_tail(journal,
++						journal->j_tail_sequence,
++						journal->j_tail,
++						WRITE_SYNC);
+ 	} else {
+ 		jbd_debug(3, "superblock not updated\n");
+ 	}
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 17b04fc..9532dac 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -775,6 +775,92 @@ struct journal_head *jbd2_journal_get_descriptor_buffer(journal_t *journal)
+ 	return jbd2_journal_add_journal_head(bh);
+ }
+ 
++/*
++ * Return tid of the oldest transaction in the journal and block in the journal
++ * where the transaction starts.
++ *
++ * If the journal is now empty, return which will be the next transaction ID
++ * we will write and where will that transaction start.
++ *
++ * The return value is 0 if journal tail cannot be pushed any further, 1 if
++ * it can.
++ */
++int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
++			      unsigned long *block)
++{
++	transaction_t *transaction;
++	int ret;
++
++	read_lock(&journal->j_state_lock);
++	spin_lock(&journal->j_list_lock);
++	transaction = journal->j_checkpoint_transactions;
++	if (transaction) {
++		*tid = transaction->t_tid;
++		*block = transaction->t_log_start;
++	} else if ((transaction = journal->j_committing_transaction) != NULL) {
++		*tid = transaction->t_tid;
++		*block = transaction->t_log_start;
++	} else if ((transaction = journal->j_running_transaction) != NULL) {
++		*tid = transaction->t_tid;
++		*block = journal->j_head;
++	} else {
++		*tid = journal->j_transaction_sequence;
++		*block = journal->j_head;
++	}
++	ret = tid_gt(*tid, journal->j_tail_sequence);
++	spin_unlock(&journal->j_list_lock);
++	read_unlock(&journal->j_state_lock);
++
++	return ret;
++}
++
++/*
++ * Update information in journal structure and in on disk journal superblock
++ * about log tail. This function does not check whether information passed in
++ * really pushes log tail further. It's responsibility of the caller to make
++ * sure provided log tail information is valid (e.g. by holding
++ * j_checkpoint_mutex all the time between computing log tail and calling this
++ * function as is the case with jbd2_cleanup_journal_tail()).
++ *
++ * Requires j_checkpoint_mutex
++ */
++int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
++{
++	unsigned long freed;
++	int ret;
++
++	BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
++
++	/*
++	 * We cannot afford for write to remain in drive's caches since as
++	 * soon as we update j_tail, next transaction can start reusing journal
++	 * space and if we lose sb update during power failure we'd replay
++	 * old transaction with possibly newly overwritten data.
++	 */
++	ret = jbd2_journal_update_sb_log_tail(journal, tid, block, WRITE_FUA);
++	if (ret)
++		goto out;
++
++	write_lock(&journal->j_state_lock);
++	freed = block - journal->j_tail;
++	if (block < journal->j_tail)
++		freed += journal->j_last - journal->j_first;
++
++	trace_jbd2_update_log_tail(journal, tid, block, freed);
++	jbd_debug(1,
++		  "Cleaning journal tail from %d to %d (offset %lu), "
++		  "freeing %lu\n",
++		  journal->j_tail_sequence, tid, block, freed);
++
++	journal->j_free += freed;
++	journal->j_tail_sequence = tid;
++	journal->j_tail = block;
++	write_unlock(&journal->j_state_lock);
++
++out:
++	return ret;
++}
++
+ struct jbd2_stats_proc_session {
+ 	journal_t *journal;
+ 	struct transaction_stats_s *stats;
+@@ -1143,40 +1229,41 @@ static int journal_reset(journal_t *journal)
+ 
+ 	journal->j_max_transaction_buffers = journal->j_maxlen / 4;
+ 
+-	/* Add the dynamic fields and write it to disk. */
+-	jbd2_journal_update_superblock(journal, 1);
+-	return jbd2_journal_start_thread(journal);
+-}
+-
+-/**
+- * void jbd2_journal_update_superblock() - Update journal sb on disk.
+- * @journal: The journal to update.
+- * @wait: Set to '0' if you don't want to wait for IO completion.
+- *
+- * Update a journal's dynamic superblock fields and write it to disk,
+- * optionally waiting for the IO to complete.
+- */
+-void jbd2_journal_update_superblock(journal_t *journal, int wait)
+-{
+-	journal_superblock_t *sb = journal->j_superblock;
+-	struct buffer_head *bh = journal->j_sb_buffer;
+-
+ 	/*
+ 	 * As a special case, if the on-disk copy is already marked as needing
+-	 * no recovery (s_start == 0) and there are no outstanding transactions
+-	 * in the filesystem, then we can safely defer the superblock update
+-	 * until the next commit by setting JBD2_FLUSHED.  This avoids
++	 * no recovery (s_start == 0), then we can safely defer the superblock
++	 * update until the next commit by setting JBD2_FLUSHED.  This avoids
+ 	 * attempting a write to a potential-readonly device.
+ 	 */
+-	if (sb->s_start == 0 && journal->j_tail_sequence ==
+-				journal->j_transaction_sequence) {
++	if (sb->s_start == 0) {
+ 		jbd_debug(1, "JBD2: Skipping superblock update on recovered sb "
+ 			"(start %ld, seq %d, errno %d)\n",
+ 			journal->j_tail, journal->j_tail_sequence,
+ 			journal->j_errno);
+-		goto out;
++		journal->j_flags |= JBD2_FLUSHED;
++	} else {
++		/*
++		 * Update log tail information. We use WRITE_FUA since new
++		 * transaction will start reusing journal space and so we
++		 * must make sure information about current log tail is on
++		 * disk before that.
++		 */
++		jbd2_journal_update_sb_log_tail(journal,
++						journal->j_tail_sequence,
++						journal->j_tail,
++						WRITE_FUA);
+ 	}
++	return jbd2_journal_start_thread(journal);
++}
+ 
++static int jbd2_write_superblock(journal_t *journal, int write_op)
++{
++	struct buffer_head *bh = journal->j_sb_buffer;
++	int ret;
++
++	if (!(journal->j_flags & JBD2_BARRIER))
++		write_op &= ~(REQ_FUA | REQ_FLUSH);
++	lock_buffer(bh);
+ 	if (buffer_write_io_error(bh)) {
+ 		/*
+ 		 * Oh, dear.  A previous attempt to write the journal
+@@ -1192,48 +1279,112 @@ void jbd2_journal_update_superblock(journal_t *journal, int wait)
+ 		clear_buffer_write_io_error(bh);
+ 		set_buffer_uptodate(bh);
+ 	}
++	get_bh(bh);
++	bh->b_end_io = end_buffer_write_sync;
++	ret = submit_bh(write_op, bh);
++	wait_on_buffer(bh);
++	if (buffer_write_io_error(bh)) {
++		clear_buffer_write_io_error(bh);
++		set_buffer_uptodate(bh);
++		ret = -EIO;
++	}
++	if (ret) {
++		printk(KERN_ERR "JBD2: Error %d detected when updating "
++		       "journal superblock for %s.\n", ret,
++		       journal->j_devname);
++		jbd2_journal_abort(journal, ret);
++	}
++
++	return ret;
++}
++
++/**
++ * jbd2_journal_update_sb_log_tail() - Update log tail in journal sb on disk.
++ * @journal: The journal to update.
++ * @tail_tid: TID of the new transaction at the tail of the log
++ * @tail_block: The first block of the transaction at the tail of the log
++ * @write_op: With which operation should we write the journal sb
++ *
++ * Update a journal's superblock information about log tail and write it to
++ * disk, waiting for the IO to complete.
++ */
++int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
++				     unsigned long tail_block, int write_op)
++{
++	journal_superblock_t *sb = journal->j_superblock;
++	int ret;
++
++	jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
++		  tail_block, tail_tid);
++
++	sb->s_sequence = cpu_to_be32(tail_tid);
++	sb->s_start    = cpu_to_be32(tail_block);
++
++	ret = jbd2_write_superblock(journal, write_op);
++	if (ret)
++		goto out;
++	/* Log is no longer empty */
++	write_lock(&journal->j_state_lock);
++	WARN_ON(!sb->s_sequence);
++	journal->j_flags &= ~JBD2_FLUSHED;
++	write_unlock(&journal->j_state_lock);
++
++out:
++	return ret;
++}
++
++/**
++ * jbd2_mark_journal_empty() - Mark on disk journal as empty.
++ * @journal: The journal to update.
++ *
++ * Update a journal's dynamic superblock fields to show that journal is empty.
++ * Write updated superblock to disk waiting for IO to complete.
++ */
++static void jbd2_mark_journal_empty(journal_t *journal)
++{
++	journal_superblock_t *sb = journal->j_superblock;
+ 
+ 	read_lock(&journal->j_state_lock);
+-	jbd_debug(1, "JBD2: updating superblock (start %ld, seq %d, errno %d)\n",
+-		  journal->j_tail, journal->j_tail_sequence, journal->j_errno);
++	jbd_debug(1, "JBD2: Marking journal as empty (seq %d)\n",
++		  journal->j_tail_sequence);
+ 
+ 	sb->s_sequence = cpu_to_be32(journal->j_tail_sequence);
+-	sb->s_start    = cpu_to_be32(journal->j_tail);
+-	sb->s_errno    = cpu_to_be32(journal->j_errno);
++	sb->s_start    = cpu_to_be32(0);
+ 	read_unlock(&journal->j_state_lock);
+ 
+-	BUFFER_TRACE(bh, "marking dirty");
+-	mark_buffer_dirty(bh);
+-	if (wait) {
+-		sync_dirty_buffer(bh);
+-		if (buffer_write_io_error(bh)) {
+-			printk(KERN_ERR "JBD2: I/O error detected "
+-			       "when updating journal superblock for %s.\n",
+-			       journal->j_devname);
+-			clear_buffer_write_io_error(bh);
+-			set_buffer_uptodate(bh);
+-		}
+-	} else
+-		write_dirty_buffer(bh, WRITE);
+-
+-out:
+-	/* If we have just flushed the log (by marking s_start==0), then
+-	 * any future commit will have to be careful to update the
+-	 * superblock again to re-record the true start of the log. */
++	jbd2_write_superblock(journal, WRITE_FUA);
+ 
++	/* Log is no longer empty */
+ 	write_lock(&journal->j_state_lock);
+-	if (sb->s_start)
+-		journal->j_flags &= ~JBD2_FLUSHED;
+-	else
+-		journal->j_flags |= JBD2_FLUSHED;
++	journal->j_flags |= JBD2_FLUSHED;
+ 	write_unlock(&journal->j_state_lock);
+ }
+ 
++
++/**
++ * jbd2_journal_update_sb_errno() - Update error in the journal.
++ * @journal: The journal to update.
++ *
++ * Update a journal's errno.  Write updated superblock to disk waiting for IO
++ * to complete.
++ */
++static void jbd2_journal_update_sb_errno(journal_t *journal)
++{
++	journal_superblock_t *sb = journal->j_superblock;
++
++	read_lock(&journal->j_state_lock);
++	jbd_debug(1, "JBD2: updating superblock error (errno %d)\n",
++		  journal->j_errno);
++	sb->s_errno    = cpu_to_be32(journal->j_errno);
++	read_unlock(&journal->j_state_lock);
++
++	jbd2_write_superblock(journal, WRITE_SYNC);
++}
++
+ /*
+  * Read the superblock for a given journal, performing initial
+  * validation of the format.
+  */
+-
+ static int journal_get_superblock(journal_t *journal)
+ {
+ 	struct buffer_head *bh;
+@@ -1426,15 +1577,10 @@ int jbd2_journal_destroy(journal_t *journal)
+ 	spin_unlock(&journal->j_list_lock);
+ 
+ 	if (journal->j_sb_buffer) {
+-		if (!is_journal_aborted(journal)) {
+-			/* We can now mark the journal as empty. */
+-			journal->j_tail = 0;
+-			journal->j_tail_sequence =
+-				++journal->j_transaction_sequence;
+-			jbd2_journal_update_superblock(journal, 1);
+-		} else {
++		if (!is_journal_aborted(journal))
++			jbd2_mark_journal_empty(journal);
++		else
+ 			err = -EIO;
+-		}
+ 		brelse(journal->j_sb_buffer);
+ 	}
+ 
+@@ -1648,7 +1794,6 @@ int jbd2_journal_flush(journal_t *journal)
+ {
+ 	int err = 0;
+ 	transaction_t *transaction = NULL;
+-	unsigned long old_tail;
+ 
+ 	write_lock(&journal->j_state_lock);
+ 
+@@ -1683,28 +1828,28 @@ int jbd2_journal_flush(journal_t *journal)
+ 	if (is_journal_aborted(journal))
+ 		return -EIO;
+ 
+-	jbd2_cleanup_journal_tail(journal);
++	if (!err) {
++		err = jbd2_cleanup_journal_tail(journal);
++		if (err < 0)
++			goto out;
++		err = 0;
++	}
+ 
+ 	/* Finally, mark the journal as really needing no recovery.
+ 	 * This sets s_start==0 in the underlying superblock, which is
+ 	 * the magic code for a fully-recovered superblock.  Any future
+ 	 * commits of data to the journal will restore the current
+ 	 * s_start value. */
++	jbd2_mark_journal_empty(journal);
+ 	write_lock(&journal->j_state_lock);
+-	old_tail = journal->j_tail;
+-	journal->j_tail = 0;
+-	write_unlock(&journal->j_state_lock);
+-	jbd2_journal_update_superblock(journal, 1);
+-	write_lock(&journal->j_state_lock);
+-	journal->j_tail = old_tail;
+-
+ 	J_ASSERT(!journal->j_running_transaction);
+ 	J_ASSERT(!journal->j_committing_transaction);
+ 	J_ASSERT(!journal->j_checkpoint_transactions);
+ 	J_ASSERT(journal->j_head == journal->j_tail);
+ 	J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence);
+ 	write_unlock(&journal->j_state_lock);
+-	return 0;
++out:
++	return err;
+ }
+ 
+ /**
+@@ -1738,7 +1883,7 @@ int jbd2_journal_wipe(journal_t *journal, int write)
+ 
+ 	err = jbd2_journal_skip_recovery(journal);
+ 	if (write)
+-		jbd2_journal_update_superblock(journal, 1);
++		jbd2_mark_journal_empty(journal);
+ 
+  no_recovery:
+ 	return err;
+@@ -1788,7 +1933,7 @@ static void __journal_abort_soft (journal_t *journal, int errno)
+ 	__jbd2_journal_abort_hard(journal);
+ 
+ 	if (errno)
+-		jbd2_journal_update_superblock(journal, 1);
++		jbd2_journal_update_sb_errno(journal);
+ }
+ 
+ /**
+diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
+index 421834b..875df5f 100644
+--- a/fs/jbd2/recovery.c
++++ b/fs/jbd2/recovery.c
+@@ -21,6 +21,7 @@
+ #include <linux/jbd2.h>
+ #include <linux/errno.h>
+ #include <linux/crc32.h>
++#include <linux/blkdev.h>
+ #endif
+ 
+ /*
+@@ -265,7 +266,9 @@ int jbd2_journal_recover(journal_t *journal)
+ 	err2 = sync_blockdev(journal->j_fs_dev);
+ 	if (!err)
+ 		err = err2;
+-
++	/* Make sure all replayed data is on permanent storage */
++	if (journal->j_flags & JBD2_BARRIER)
++		blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
+ 	return err;
+ }
+ 
+diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
+index 183c6b1..bee14a3 100644
+--- a/fs/nfs/nfs3xdr.c
++++ b/fs/nfs/nfs3xdr.c
+@@ -1333,7 +1333,7 @@ static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
+ 	if (args->npages != 0)
+ 		xdr_write_pages(xdr, args->pages, 0, args->len);
+ 	else
+-		xdr_reserve_space(xdr, NFS_ACL_INLINE_BUFSIZE);
++		xdr_reserve_space(xdr, args->len);
+ 
+ 	error = nfsacl_encode(xdr->buf, base, args->inode,
+ 			    (args->mask & NFS_ACL) ?
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index ce4168a..cd55214 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1192,6 +1192,8 @@ restart:
+ 				}
+ 				spin_unlock(&state->state_lock);
+ 				nfs4_put_open_state(state);
++				clear_bit(NFS4CLNT_RECLAIM_NOGRACE,
++					&state->flags);
+ 				goto restart;
+ 			}
+ 		}
+diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
+index b0d7ef8..31a0f4b 100644
+--- a/include/acpi/actypes.h
++++ b/include/acpi/actypes.h
+@@ -495,6 +495,7 @@ typedef u64 acpi_integer;
+ #define ACPI_NO_ACPI_ENABLE             0x10
+ #define ACPI_NO_DEVICE_INIT             0x20
+ #define ACPI_NO_OBJECT_INIT             0x40
++#define ACPI_NO_FACS_INIT               0x80
+ 
+ /*
+  * Initialization state
+diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
+index 458f497..fed3f3a 100644
+--- a/include/linux/buffer_head.h
++++ b/include/linux/buffer_head.h
+@@ -166,12 +166,13 @@ void __wait_on_buffer(struct buffer_head *);
+ wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
+ struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
+ 			unsigned size);
+-struct buffer_head *__getblk(struct block_device *bdev, sector_t block,
+-			unsigned size);
++struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
++				  unsigned size, gfp_t gfp);
+ void __brelse(struct buffer_head *);
+ void __bforget(struct buffer_head *);
+ void __breadahead(struct block_device *, sector_t block, unsigned int size);
+-struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size);
++struct buffer_head *__bread_gfp(struct block_device *,
++				sector_t block, unsigned size, gfp_t gfp);
+ void invalidate_bh_lrus(void);
+ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
+ void free_buffer_head(struct buffer_head * bh);
+@@ -286,7 +287,13 @@ static inline void bforget(struct buffer_head *bh)
+ static inline struct buffer_head *
+ sb_bread(struct super_block *sb, sector_t block)
+ {
+-	return __bread(sb->s_bdev, block, sb->s_blocksize);
++	return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
++}
++
++static inline struct buffer_head *
++sb_bread_unmovable(struct super_block *sb, sector_t block)
++{
++	return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
+ }
+ 
+ static inline void
+@@ -298,7 +305,14 @@ sb_breadahead(struct super_block *sb, sector_t block)
+ static inline struct buffer_head *
+ sb_getblk(struct super_block *sb, sector_t block)
+ {
+-	return __getblk(sb->s_bdev, block, sb->s_blocksize);
++	return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
++}
++
++
++static inline struct buffer_head *
++sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
++{
++	return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
+ }
+ 
+ static inline struct buffer_head *
+@@ -335,6 +349,36 @@ static inline void lock_buffer(struct buffer_head *bh)
+ 		__lock_buffer(bh);
+ }
+ 
++static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
++						   sector_t block,
++						   unsigned size)
++{
++	return __getblk_gfp(bdev, block, size, 0);
++}
++
++static inline struct buffer_head *__getblk(struct block_device *bdev,
++					   sector_t block,
++					   unsigned size)
++{
++	return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
++}
++
++/**
++ *  __bread() - reads a specified block and returns the bh
++ *  @bdev: the block_device to read from
++ *  @block: number of block
++ *  @size: size (in bytes) to read
++ *
++ *  Reads a specified block, and returns buffer head that contains it.
++ *  The page cache is allocated from movable area so that it can be migrated.
++ *  It returns NULL if the block was unreadable.
++ */
++static inline struct buffer_head *
++__bread(struct block_device *bdev, sector_t block, unsigned size)
++{
++	return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
++}
++
+ extern int __set_page_dirty_buffers(struct page *page);
+ 
+ #else /* CONFIG_BLOCK */
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index a153ed5..4920c55 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -972,6 +972,9 @@ extern void __journal_clean_data_list(transaction_t *transaction);
+ /* Log buffer allocation */
+ extern struct journal_head * jbd2_journal_get_descriptor_buffer(journal_t *);
+ int jbd2_journal_next_log_block(journal_t *, unsigned long long *);
++int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
++			      unsigned long *block);
++int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
+ 
+ /* Commit management */
+ extern void jbd2_journal_commit_transaction(journal_t *);
+@@ -1083,7 +1086,8 @@ extern int	   jbd2_journal_destroy    (journal_t *);
+ extern int	   jbd2_journal_recover    (journal_t *journal);
+ extern int	   jbd2_journal_wipe       (journal_t *, int);
+ extern int	   jbd2_journal_skip_recovery	(journal_t *);
+-extern void	   jbd2_journal_update_superblock	(journal_t *, int);
++extern int	   jbd2_journal_update_sb_log_tail	(journal_t *, tid_t,
++				unsigned long, int);
+ extern void	   __jbd2_journal_abort_hard	(journal_t *);
+ extern void	   jbd2_journal_abort      (journal_t *, int);
+ extern int	   jbd2_journal_errno      (journal_t *);
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 3d4b5b6..000434e 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -403,6 +403,8 @@ enum {
+ 	ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17),	/* Set max sects to 65535 */
+ 	ATA_HORKAGE_NOLPM	= (1 << 20),	/* don't use LPM */
+ 	ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21),	/* some WDs have broken LPM */
++	ATA_HORKAGE_NOTRIM	= (1 << 24),	/* don't use TRIM */
++
+ 
+ 	 /* DMA mask for user DMA control: User visible values; DO NOT
+ 	    renumber */
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index 41116ab..d2abc34 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -1056,7 +1056,7 @@ struct nfs_impl_id4 {
+ 	struct nfstime4	date;
+ };
+ 
+-#define NFS4_EXCHANGE_ID_LEN	(48)
++#define NFS4_EXCHANGE_ID_LEN	(127)
+ struct nfs41_exchange_id_args {
+ 	struct nfs_client		*client;
+ 	nfs4_verifier			*verifier;
+diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
+index 7596441..5c74007 100644
+--- a/include/trace/events/jbd2.h
++++ b/include/trace/events/jbd2.h
+@@ -200,7 +200,7 @@ TRACE_EVENT(jbd2_checkpoint_stats,
+ 		  __entry->forced_to_close, __entry->written, __entry->dropped)
+ );
+ 
+-TRACE_EVENT(jbd2_cleanup_journal_tail,
++TRACE_EVENT(jbd2_update_log_tail,
+ 
+ 	TP_PROTO(journal_t *journal, tid_t first_tid,
+ 		 unsigned long block_nr, unsigned long freed),
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index 20e88af..d9ce3d4 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -848,6 +848,9 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
+ 	if (delta.tv64 < 0)
+ 		return 0;
+ 
++	if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
++		return 0;
++
+ 	if (interval.tv64 < timer->base->resolution.tv64)
+ 		interval.tv64 = timer->base->resolution.tv64;
+ 
+@@ -1260,11 +1263,14 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
+ 	 * Note: We clear the CALLBACK bit after enqueue_hrtimer and
+ 	 * we do not reprogramm the event hardware. Happens either in
+ 	 * hrtimer_start_range_ns() or in hrtimer_interrupt()
++	 *
++	 * Note: Because we dropped the cpu_base->lock above,
++	 * hrtimer_start_range_ns() can have popped in and enqueued the timer
++	 * for us already.
+ 	 */
+-	if (restart != HRTIMER_NORESTART) {
+-		BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
++	if (restart != HRTIMER_NORESTART &&
++	    !(timer->state & HRTIMER_STATE_ENQUEUED))
+ 		enqueue_hrtimer(timer, base);
+-	}
+ 
+ 	WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK));
+ 
+diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
+index 636af6d..bc84596 100644
+--- a/kernel/rcutiny.c
++++ b/kernel/rcutiny.c
+@@ -160,6 +160,11 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
+ 
+ 	/* Move the ready-to-invoke callbacks to a local list. */
+ 	local_irq_save(flags);
++	if (rcp->donetail == &rcp->rcucblist) {
++		/* No callbacks ready, so just leave. */
++		local_irq_restore(flags);
++		return;
++	}
+ 	RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1));
+ 	list = rcp->rcucblist;
+ 	rcp->rcucblist = *rcp->donetail;
+diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
+index 47343cc..bfeb725 100644
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -1027,6 +1027,9 @@ static void parse_init(struct filter_parse_state *ps,
+ 
+ static char infix_next(struct filter_parse_state *ps)
+ {
++	if (!ps->infix.cnt)
++		return 0;
++
+ 	ps->infix.cnt--;
+ 
+ 	return ps->infix.string[ps->infix.tail++];
+@@ -1042,6 +1045,9 @@ static char infix_peek(struct filter_parse_state *ps)
+ 
+ static void infix_advance(struct filter_parse_state *ps)
+ {
++	if (!ps->infix.cnt)
++		return;
++
+ 	ps->infix.cnt--;
+ 	ps->infix.tail++;
+ }
+@@ -1358,7 +1364,9 @@ static int check_preds(struct filter_parse_state *ps)
+ 		}
+ 		cnt--;
+ 		n_normal_preds++;
+-		WARN_ON_ONCE(cnt < 0);
++		/* all ops should have operands */
++		if (cnt < 0)
++			break;
+ 	}
+ 
+ 	if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
+diff --git a/lib/bitmap.c b/lib/bitmap.c
+index dbc526f..389e75e 100644
+--- a/lib/bitmap.c
++++ b/lib/bitmap.c
+@@ -601,12 +601,12 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
+ 	unsigned a, b;
+ 	int c, old_c, totaldigits;
+ 	const char __user __force *ubuf = (const char __user __force *)buf;
+-	int exp_digit, in_range;
++	int at_start, in_range;
+ 
+ 	totaldigits = c = 0;
+ 	bitmap_zero(maskp, nmaskbits);
+ 	do {
+-		exp_digit = 1;
++		at_start = 1;
+ 		in_range = 0;
+ 		a = b = 0;
+ 
+@@ -635,11 +635,10 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
+ 				break;
+ 
+ 			if (c == '-') {
+-				if (exp_digit || in_range)
++				if (at_start || in_range)
+ 					return -EINVAL;
+ 				b = 0;
+ 				in_range = 1;
+-				exp_digit = 1;
+ 				continue;
+ 			}
+ 
+@@ -649,16 +648,18 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
+ 			b = b * 10 + (c - '0');
+ 			if (!in_range)
+ 				a = b;
+-			exp_digit = 0;
++			at_start = 0;
+ 			totaldigits++;
+ 		}
+ 		if (!(a <= b))
+ 			return -EINVAL;
+ 		if (b >= nmaskbits)
+ 			return -ERANGE;
+-		while (a <= b) {
+-			set_bit(a, maskp);
+-			a++;
++		if (!at_start) {
++			while (a <= b) {
++				set_bit(a, maskp);
++				a++;
++			}
+ 		}
+ 	} while (buflen && c == ',');
+ 	return 0;
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 556858c..6c009c2 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2007,8 +2007,8 @@ int file_remove_suid(struct file *file)
+ 		error = security_inode_killpriv(dentry);
+ 	if (!error && killsuid)
+ 		error = __remove_suid(dentry, killsuid);
+-	if (!error && (inode->i_sb->s_flags & MS_NOSEC))
+-		inode->i_flags |= S_NOSEC;
++	if (!error)
++		inode_has_no_xattr(inode);
+ 
+ 	return error;
+ }
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index cc8cf1d..cbae846 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -192,6 +192,8 @@ static struct kmem_cache *scan_area_cache;
+ 
+ /* set if tracing memory operations is enabled */
+ static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
++/* same as above but only for the kmemleak_free() callback */
++static int kmemleak_free_enabled;
+ /* set in the late_initcall if there were no errors */
+ static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
+ /* enables or disables early logging of the memory operations */
+@@ -885,7 +887,7 @@ void __ref kmemleak_free(const void *ptr)
+ {
+ 	pr_debug("%s(0x%p)\n", __func__, ptr);
+ 
+-	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
++	if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
+ 		delete_object_full((unsigned long)ptr);
+ 	else if (atomic_read(&kmemleak_early_log))
+ 		log_early(KMEMLEAK_FREE, ptr, 0, 0);
+@@ -1614,6 +1616,13 @@ static void kmemleak_do_cleanup(struct work_struct *work)
+ 	mutex_lock(&scan_mutex);
+ 	stop_scan_thread();
+ 
++	/*
++	 * Once the scan thread has stopped, it is safe to no longer track
++	 * object freeing. Ordering of the scan thread stopping and the memory
++	 * accesses below is guaranteed by the kthread_stop() function.
++	 */
++	kmemleak_free_enabled = 0;
++
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(object, &object_list, object_list)
+ 		delete_object_full(object->pointer);
+@@ -1640,6 +1649,8 @@ static void kmemleak_disable(void)
+ 	/* check whether it is too early for a kernel thread */
+ 	if (atomic_read(&kmemleak_initialized))
+ 		schedule_work(&cleanup_work);
++	else
++		kmemleak_free_enabled = 0;
+ 
+ 	pr_info("Kernel memory leak detector disabled\n");
+ }
+@@ -1688,6 +1699,7 @@ void __init kmemleak_init(void)
+ 	if (!atomic_read(&kmemleak_error)) {
+ 		atomic_set(&kmemleak_enabled, 1);
+ 		atomic_set(&kmemleak_early_log, 0);
++		kmemleak_free_enabled = 1;
+ 	}
+ 	local_irq_restore(flags);
+ 
+diff --git a/mm/memory.c b/mm/memory.c
+index 452b8ba..7762b1d 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3153,6 +3153,10 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ 
+ 	pte_unmap(page_table);
+ 
++	/* File mapping without ->vm_ops ? */
++	if (vma->vm_flags & VM_SHARED)
++		return VM_FAULT_SIGBUS;
++
+ 	/* Check if we need to add a guard page to the stack */
+ 	if (check_stack_guard_page(vma, address) < 0)
+ 		return VM_FAULT_SIGSEGV;
+@@ -3412,6 +3416,9 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ 			- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+ 
+ 	pte_unmap(page_table);
++	/* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
++	if (!vma->vm_ops->fault)
++		return VM_FAULT_SIGBUS;
+ 	return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
+ }
+ 
+@@ -3470,11 +3477,9 @@ int handle_pte_fault(struct mm_struct *mm,
+ 	entry = *pte;
+ 	if (!pte_present(entry)) {
+ 		if (pte_none(entry)) {
+-			if (vma->vm_ops) {
+-				if (likely(vma->vm_ops->fault))
+-					return do_linear_fault(mm, vma, address,
++			if (vma->vm_ops)
++				return do_linear_fault(mm, vma, address,
+ 						pte, pmd, flags, entry);
+-			}
+ 			return do_anonymous_page(mm, vma, address,
+ 						 pte, pmd, flags);
+ 		}
+diff --git a/net/9p/client.c b/net/9p/client.c
+index 854ca7a..e958178 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -824,7 +824,8 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
+ 	if (err < 0) {
+ 		if (err == -EIO)
+ 			c->status = Disconnected;
+-		goto reterr;
++		if (err != -ERESTARTSYS)
++			goto reterr;
+ 	}
+ 	if (req->status == REQ_STATUS_ERROR) {
+ 		P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 1bd197f..5f21e53 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -36,6 +36,9 @@
+ #define mlock_dereference(X, br) \
+ 	rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
+ 
++static void br_multicast_add_router(struct net_bridge *br,
++				    struct net_bridge_port *port);
++
+ #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
+ {
+@@ -842,6 +845,8 @@ void br_multicast_enable_port(struct net_bridge_port *port)
+ 		goto out;
+ 
+ 	__br_multicast_enable_port(port);
++	if (port->multicast_router == 2 && hlist_unhashed(&port->rlist))
++		br_multicast_add_router(br, port);
+ 
+ out:
+ 	spin_unlock(&br->multicast_lock);
+diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
+index 7c1745d..6cdd3af 100644
+--- a/net/bridge/br_netfilter.c
++++ b/net/bridge/br_netfilter.c
+@@ -822,12 +822,15 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb)
+ 	    !skb_is_gso(skb)) {
+ 		if (br_parse_ip_options(skb))
+ 			/* Drop invalid packet */
+-			return NF_DROP;
++			goto drop;
+ 		ret = ip_fragment(skb, br_dev_queue_push_xmit);
+ 	} else
+ 		ret = br_dev_queue_push_xmit(skb);
+ 
+ 	return ret;
++ drop:
++	kfree_skb(skb);
++	return 0;
+ }
+ #else
+ static int br_nf_dev_queue_xmit(struct sk_buff *skb)
+diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
+index fd863fe7..bb38a3c 100644
+--- a/net/ceph/osdmap.c
++++ b/net/ceph/osdmap.c
+@@ -102,7 +102,7 @@ static int crush_decode_tree_bucket(void **p, void *end,
+ {
+ 	int j;
+ 	dout("crush_decode_tree_bucket %p to %p\n", *p, end);
+-	ceph_decode_32_safe(p, end, b->num_nodes, bad);
++	ceph_decode_8_safe(p, end, b->num_nodes, bad);
+ 	b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
+ 	if (b->node_weights == NULL)
+ 		return -ENOMEM;
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index 68bbf9f..6f54d0a 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -180,18 +180,19 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
+ 		 * However, this function was correct in any case. 8)
+ 		 */
+ 		unsigned long cpu_flags;
++		struct sk_buff_head *queue = &sk->sk_receive_queue;
+ 
+-		spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
+-		skb = skb_peek(&sk->sk_receive_queue);
++		spin_lock_irqsave(&queue->lock, cpu_flags);
++		skb = skb_peek(queue);
+ 		if (skb) {
+ 			*peeked = skb->peeked;
+ 			if (flags & MSG_PEEK) {
+ 				skb->peeked = 1;
+ 				atomic_inc(&skb->users);
+ 			} else
+-				__skb_unlink(skb, &sk->sk_receive_queue);
++				__skb_unlink(skb, queue);
+ 		}
+-		spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
++		spin_unlock_irqrestore(&queue->lock, cpu_flags);
+ 
+ 		if (skb)
+ 			return skb;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 1c0d862..7f43202 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2947,6 +2947,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
+ 	local_irq_save(flags);
+ 
+ 	rps_lock(sd);
++	if (!netif_running(skb->dev))
++		goto drop;
+ 	if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
+ 		if (skb_queue_len(&sd->input_pkt_queue)) {
+ enqueue:
+@@ -2967,6 +2969,7 @@ enqueue:
+ 		goto enqueue;
+ 	}
+ 
++drop:
+ 	sd->dropped++;
+ 	rps_unlock(sd);
+ 
+@@ -3258,8 +3261,6 @@ static int __netif_receive_skb(struct sk_buff *skb)
+ 
+ 	pt_prev = NULL;
+ 
+-	rcu_read_lock();
+-
+ another_round:
+ 
+ 	__this_cpu_inc(softnet_data.processed);
+@@ -3354,7 +3355,6 @@ ncls:
+ 	}
+ 
+ out:
+-	rcu_read_unlock();
+ 	return ret;
+ }
+ 
+@@ -3375,34 +3375,31 @@ out:
+  */
+ int netif_receive_skb(struct sk_buff *skb)
+ {
++	int ret;
++
+ 	if (netdev_tstamp_prequeue)
+ 		net_timestamp_check(skb);
+ 
+ 	if (skb_defer_rx_timestamp(skb))
+ 		return NET_RX_SUCCESS;
+ 
++	rcu_read_lock();
++
+ #ifdef CONFIG_RPS
+ 	{
+ 		struct rps_dev_flow voidflow, *rflow = &voidflow;
+-		int cpu, ret;
+-
+-		rcu_read_lock();
+-
+-		cpu = get_rps_cpu(skb->dev, skb, &rflow);
++		int cpu = get_rps_cpu(skb->dev, skb, &rflow);
+ 
+ 		if (cpu >= 0) {
+ 			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
+ 			rcu_read_unlock();
+-		} else {
+-			rcu_read_unlock();
+-			ret = __netif_receive_skb(skb);
++			return ret;
+ 		}
+-
+-		return ret;
+ 	}
+-#else
+-	return __netif_receive_skb(skb);
+ #endif
++	ret = __netif_receive_skb(skb);
++	rcu_read_unlock();
++	return ret;
+ }
+ EXPORT_SYMBOL(netif_receive_skb);
+ 
+@@ -3793,8 +3790,10 @@ static int process_backlog(struct napi_struct *napi, int quota)
+ 		unsigned int qlen;
+ 
+ 		while ((skb = __skb_dequeue(&sd->process_queue))) {
++			rcu_read_lock();
+ 			local_irq_enable();
+ 			__netif_receive_skb(skb);
++			rcu_read_unlock();
+ 			local_irq_disable();
+ 			input_queue_head_incr(sd);
+ 			if (++work >= quota) {
+@@ -5305,6 +5304,7 @@ static void rollback_registered_many(struct list_head *head)
+ 		unlist_netdevice(dev);
+ 
+ 		dev->reg_state = NETREG_UNREGISTERING;
++		on_each_cpu(flush_backlog, dev, 1);
+ 	}
+ 
+ 	synchronize_net();
+@@ -5877,8 +5877,6 @@ void netdev_run_todo(void)
+ 
+ 		dev->reg_state = NETREG_UNREGISTERED;
+ 
+-		on_each_cpu(flush_backlog, dev, 1);
+-
+ 		netdev_wait_allrefs(dev);
+ 
+ 		/* paranoia */
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index 80aeac9..9dd65a9 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -568,7 +568,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
+ 			   "     dst_min: %s  dst_max: %s\n",
+ 			   pkt_dev->dst_min, pkt_dev->dst_max);
+ 		seq_printf(seq,
+-			   "        src_min: %s  src_max: %s\n",
++			   "     src_min: %s  src_max: %s\n",
+ 			   pkt_dev->src_min, pkt_dev->src_max);
+ 	}
+ 
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 5b412f0..e77373a 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1147,10 +1147,6 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
+ 	[IFLA_INFO_DATA]	= { .type = NLA_NESTED },
+ };
+ 
+-static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
+-	[IFLA_VF_INFO]		= { .type = NLA_NESTED },
+-};
+-
+ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
+ 	[IFLA_VF_MAC]		= { .len = sizeof(struct ifla_vf_mac) },
+ 	[IFLA_VF_VLAN]		= { .len = sizeof(struct ifla_vf_vlan) },
+@@ -1224,58 +1220,53 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
+ 	return 0;
+ }
+ 
+-static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
++static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
+ {
+-	int rem, err = -EINVAL;
+-	struct nlattr *vf;
+ 	const struct net_device_ops *ops = dev->netdev_ops;
++	int err = -EINVAL;
+ 
+-	nla_for_each_nested(vf, attr, rem) {
+-		switch (nla_type(vf)) {
+-		case IFLA_VF_MAC: {
+-			struct ifla_vf_mac *ivm;
+-			ivm = nla_data(vf);
+-			err = -EOPNOTSUPP;
+-			if (ops->ndo_set_vf_mac)
+-				err = ops->ndo_set_vf_mac(dev, ivm->vf,
+-							  ivm->mac);
+-			break;
+-		}
+-		case IFLA_VF_VLAN: {
+-			struct ifla_vf_vlan *ivv;
+-			ivv = nla_data(vf);
+-			err = -EOPNOTSUPP;
+-			if (ops->ndo_set_vf_vlan)
+-				err = ops->ndo_set_vf_vlan(dev, ivv->vf,
+-							   ivv->vlan,
+-							   ivv->qos);
+-			break;
+-		}
+-		case IFLA_VF_TX_RATE: {
+-			struct ifla_vf_tx_rate *ivt;
+-			ivt = nla_data(vf);
+-			err = -EOPNOTSUPP;
+-			if (ops->ndo_set_vf_tx_rate)
+-				err = ops->ndo_set_vf_tx_rate(dev, ivt->vf,
+-							      ivt->rate);
+-			break;
+-		}
+-		case IFLA_VF_SPOOFCHK: {
+-			struct ifla_vf_spoofchk *ivs;
+-			ivs = nla_data(vf);
+-			err = -EOPNOTSUPP;
+-			if (ops->ndo_set_vf_spoofchk)
+-				err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
+-							       ivs->setting);
+-			break;
+-		}
+-		default:
+-			err = -EINVAL;
+-			break;
+-		}
+-		if (err)
+-			break;
++	if (tb[IFLA_VF_MAC]) {
++		struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
++		err = -EOPNOTSUPP;
++		if (ops->ndo_set_vf_mac)
++			err = ops->ndo_set_vf_mac(dev, ivm->vf,
++						  ivm->mac);
++		if (err < 0)
++			return err;
+ 	}
++
++	if (tb[IFLA_VF_VLAN]) {
++		struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
++
++		err = -EOPNOTSUPP;
++		if (ops->ndo_set_vf_vlan)
++			err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
++						   ivv->qos);
++		if (err < 0)
++			return err;
++	}
++
++	if (tb[IFLA_VF_TX_RATE]) {
++		struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
++
++		if (ops->ndo_set_vf_tx_rate)
++			err = ops->ndo_set_vf_tx_rate(dev, ivt->vf,
++						      ivt->rate);
++		if (err < 0)
++			return err;
++	}
++
++	if (tb[IFLA_VF_SPOOFCHK]) {
++		struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
++
++		err = -EOPNOTSUPP;
++		if (ops->ndo_set_vf_spoofchk)
++			err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
++						       ivs->setting);
++		if (err < 0)
++			return err;
++	}
++
+ 	return err;
+ }
+ 
+@@ -1458,14 +1449,21 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
+ 	}
+ 
+ 	if (tb[IFLA_VFINFO_LIST]) {
++		struct nlattr *vfinfo[IFLA_VF_MAX + 1];
+ 		struct nlattr *attr;
+ 		int rem;
++
+ 		nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
+-			if (nla_type(attr) != IFLA_VF_INFO) {
++			if (nla_type(attr) != IFLA_VF_INFO ||
++			    nla_len(attr) < NLA_HDRLEN) {
+ 				err = -EINVAL;
+ 				goto errout;
+ 			}
+-			err = do_setvfinfo(dev, attr);
++			err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr,
++					       ifla_vf_policy);
++			if (err < 0)
++				goto errout;
++			err = do_setvfinfo(dev, vfinfo);
+ 			if (err < 0)
+ 				goto errout;
+ 			modified = 1;
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index 16e25a4..c20c356 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -385,7 +385,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
+ 	ihl = ip_hdrlen(skb);
+ 
+ 	/* Determine the position of this fragment. */
+-	end = offset + skb->len - ihl;
++	end = offset + skb->len - skb_network_offset(skb) - ihl;
+ 	err = -EINVAL;
+ 
+ 	/* Is this the final fragment? */
+@@ -415,7 +415,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
+ 		goto err;
+ 
+ 	err = -ENOMEM;
+-	if (pskb_pull(skb, ihl) == NULL)
++	if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
+ 		goto err;
+ 
+ 	err = pskb_trim_rcsum(skb, end - offset);
+@@ -638,6 +638,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
+ 	iph->frag_off = 0;
+ 	iph->tot_len = htons(len);
+ 	iph->tos |= ecn;
++	ip_send_check(iph);
++
+ 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
+ 	qp->q.fragments = NULL;
+ 	qp->q.fragments_tail = NULL;
+diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
+index 880a55d..0228ecb 100644
+--- a/net/mac80211/debugfs_netdev.c
++++ b/net/mac80211/debugfs_netdev.c
+@@ -598,6 +598,7 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
+ 
+ 	debugfs_remove_recursive(sdata->debugfs.dir);
+ 	sdata->debugfs.dir = NULL;
++	sdata->debugfs.subdir_stations = NULL;
+ }
+ 
+ void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
+diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
+index 7918eb7..cec0ed5 100644
+--- a/net/netfilter/nf_conntrack_expect.c
++++ b/net/netfilter/nf_conntrack_expect.c
+@@ -205,7 +205,8 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
+ 			a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
+ 	}
+ 
+-	return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
++	return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
++	       nf_ct_zone(a->master) == nf_ct_zone(b->master);
+ }
+ 
+ static inline int expect_matches(const struct nf_conntrack_expect *a,
+diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
+index e8fdb17..a985158 100644
+--- a/net/rds/ib_rdma.c
++++ b/net/rds/ib_rdma.c
+@@ -759,8 +759,10 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
+ 	}
+ 
+ 	ibmr = rds_ib_alloc_fmr(rds_ibdev);
+-	if (IS_ERR(ibmr))
++	if (IS_ERR(ibmr)) {
++		rds_ib_dev_put(rds_ibdev);
+ 		return ibmr;
++	}
+ 
+ 	ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
+ 	if (ret == 0)
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index 686fb1a..233dbe6 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -195,7 +195,8 @@ static void rose_kill_by_device(struct net_device *dev)
+ 
+ 		if (rose->device == dev) {
+ 			rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
+-			rose->neighbour->use--;
++			if (rose->neighbour)
++				rose->neighbour->use--;
+ 			rose->device = NULL;
+ 		}
+ 	}
+diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
+index 3ad435a..b56f23e 100644
+--- a/net/sunrpc/backchannel_rqst.c
++++ b/net/sunrpc/backchannel_rqst.c
+@@ -59,7 +59,7 @@ static void xprt_free_allocation(struct rpc_rqst *req)
+ 
+ 	dprintk("RPC:        free allocations for req= %p\n", req);
+ 	BUG_ON(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
+-	xbufp = &req->rq_private_buf;
++	xbufp = &req->rq_rcv_buf;
+ 	free_page((unsigned long)xbufp->head[0].iov_base);
+ 	xbufp = &req->rq_snd_buf;
+ 	free_page((unsigned long)xbufp->head[0].iov_base);
+diff --git a/sound/soc/codecs/wm8737.c b/sound/soc/codecs/wm8737.c
+index f6aef58..2a012d3 100644
+--- a/sound/soc/codecs/wm8737.c
++++ b/sound/soc/codecs/wm8737.c
+@@ -485,7 +485,8 @@ static int wm8737_set_bias_level(struct snd_soc_codec *codec,
+ 
+ 			/* Fast VMID ramp at 2*2.5k */
+ 			snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
+-					    WM8737_VMIDSEL_MASK, 0x4);
++					    WM8737_VMIDSEL_MASK,
++					    2 << WM8737_VMIDSEL_SHIFT);
+ 
+ 			/* Bring VMID up */
+ 			snd_soc_update_bits(codec, WM8737_POWER_MANAGEMENT,
+@@ -499,7 +500,8 @@ static int wm8737_set_bias_level(struct snd_soc_codec *codec,
+ 
+ 		/* VMID at 2*300k */
+ 		snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
+-				    WM8737_VMIDSEL_MASK, 2);
++				    WM8737_VMIDSEL_MASK,
++				    1 << WM8737_VMIDSEL_SHIFT);
+ 
+ 		break;
+ 
+diff --git a/sound/soc/codecs/wm8903.h b/sound/soc/codecs/wm8903.h
+index db94931..0bb4a64 100644
+--- a/sound/soc/codecs/wm8903.h
++++ b/sound/soc/codecs/wm8903.h
+@@ -172,7 +172,7 @@ extern int wm8903_mic_detect(struct snd_soc_codec *codec,
+ #define WM8903_VMID_BUF_ENA_WIDTH                    1  /* VMID_BUF_ENA */
+ 
+ #define WM8903_VMID_RES_50K                          2
+-#define WM8903_VMID_RES_250K                         3
++#define WM8903_VMID_RES_250K                         4
+ #define WM8903_VMID_RES_5K                           6
+ 
+ /*
+diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
+index 77ff1d7..f8b9930 100644
+--- a/sound/soc/codecs/wm8955.c
++++ b/sound/soc/codecs/wm8955.c
+@@ -282,7 +282,7 @@ static int wm8955_configure_clocking(struct snd_soc_codec *codec)
+ 		snd_soc_update_bits(codec, WM8955_PLL_CONTROL_2,
+ 				    WM8955_K_17_9_MASK,
+ 				    (pll.k >> 9) & WM8955_K_17_9_MASK);
+-		snd_soc_update_bits(codec, WM8955_PLL_CONTROL_2,
++		snd_soc_update_bits(codec, WM8955_PLL_CONTROL_3,
+ 				    WM8955_K_8_0_MASK,
+ 				    pll.k & WM8955_K_8_0_MASK);
+ 		if (pll.k)
+diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
+index 8d26104..c7911fd 100644
+--- a/sound/soc/codecs/wm8960.c
++++ b/sound/soc/codecs/wm8960.c
+@@ -186,7 +186,7 @@ SOC_SINGLE("PCM Playback -6dB Switch", WM8960_DACCTL1, 7, 1, 0),
+ SOC_ENUM("ADC Polarity", wm8960_enum[0]),
+ SOC_SINGLE("ADC High Pass Filter Switch", WM8960_DACCTL1, 0, 1, 0),
+ 
+-SOC_ENUM("DAC Polarity", wm8960_enum[2]),
++SOC_ENUM("DAC Polarity", wm8960_enum[1]),
+ SOC_SINGLE_BOOL_EXT("DAC Deemphasis Switch", 0,
+ 		    wm8960_get_deemph, wm8960_put_deemph),
+ 
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index 851786f..893b750 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -312,6 +312,20 @@ static const struct usbmix_name_map scms_usb3318_map[] = {
+ 	{ 0 }
+ };
+ 
++/* Bose companion 5, the dB conversion factor is 16 instead of 256 */
++static struct usbmix_dB_map bose_companion5_dB = {-5006, -6};
++static struct usbmix_name_map bose_companion5_map[] = {
++	{ 3, NULL, .dB = &bose_companion5_dB },
++	{ 0 }	/* terminator */
++};
++
++/* Dragonfly DAC 1.2, the dB conversion factor is 1 instead of 256 */
++static struct usbmix_dB_map dragonfly_1_2_dB = {0, 5000};
++static struct usbmix_name_map dragonfly_1_2_map[] = {
++	{ 7, NULL, .dB = &dragonfly_1_2_dB },
++	{ 0 }	/* terminator */
++};
++
+ /*
+  * Control map entries
+  */
+@@ -394,6 +408,16 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
+ 		.id = USB_ID(0x25c4, 0x0003),
+ 		.map = scms_usb3318_map,
+ 	},
++	{
++		/* Bose Companion 5 */
++		.id = USB_ID(0x05a7, 0x1020),
++		.map = bose_companion5_map,
++	},
++	{
++		/* Dragonfly DAC 1.2 */
++		.id = USB_ID(0x21b4, 0x0081),
++		.map = dragonfly_1_2_map,
++	},
+ 	{ 0 } /* terminator */
+ };
+ 
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index b38dde0..c014f00 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -2383,6 +2383,74 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ 	}
+ },
+ 
++/* Steinberg devices */
++{
++	/* Steinberg MI2 */
++	USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x2040),
++	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_COMPOSITE,
++		.data = & (const struct snd_usb_audio_quirk[]) {
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 1,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 2,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 3,
++				.type = QUIRK_MIDI_FIXED_ENDPOINT,
++				.data = &(const struct snd_usb_midi_endpoint_info) {
++					.out_cables = 0x0001,
++					.in_cables  = 0x0001
++				}
++			},
++			{
++				.ifnum = -1
++			}
++		}
++	}
++},
++{
++	/* Steinberg MI4 */
++	USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x4040),
++	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_COMPOSITE,
++		.data = & (const struct snd_usb_audio_quirk[]) {
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 1,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 2,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 3,
++				.type = QUIRK_MIDI_FIXED_ENDPOINT,
++				.data = &(const struct snd_usb_midi_endpoint_info) {
++					.out_cables = 0x0001,
++					.in_cables  = 0x0001
++				}
++			},
++			{
++				.ifnum = -1
++			}
++		}
++	}
++},
++
+ /* TerraTec devices */
+ {
+ 	USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0012),

diff --git a/3.2.70/4420_grsecurity-3.1-3.2.70-201508102127.patch b/3.2.71/4420_grsecurity-3.1-3.2.71-201508142231.patch
similarity index 99%
rename from 3.2.70/4420_grsecurity-3.1-3.2.70-201508102127.patch
rename to 3.2.71/4420_grsecurity-3.1-3.2.71-201508142231.patch
index 9aaf5cc..50b752f 100644
--- a/3.2.70/4420_grsecurity-3.1-3.2.70-201508102127.patch
+++ b/3.2.71/4420_grsecurity-3.1-3.2.71-201508142231.patch
@@ -315,7 +315,7 @@ index 2a68089..b3300e1 100644
  
  A toggle value indicating if modules are allowed to be loaded
 diff --git a/Makefile b/Makefile
-index 41a626b..31e889e 100644
+index 9d5fea7..1e122ae 100644
 --- a/Makefile
 +++ b/Makefile
 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -6810,7 +6810,7 @@ index dfcb343..eda788a 100644
  		if (r_type == R_390_GOTPC)
  			*(unsigned int *) loc = val;
 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
-index 53088e2..9f44a36 100644
+index 2ba1226..df182c3 100644
 --- a/arch/s390/kernel/process.c
 +++ b/arch/s390/kernel/process.c
 @@ -320,39 +320,3 @@ unsigned long get_wchan(struct task_struct *p)
@@ -14309,7 +14309,7 @@ index 5478825..839e88c 100644
  #define flush_insn_slot(p)	do { } while (0)
  
 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
-index 9171618..fe2b1da 100644
+index 93ce7e4..2343831 100644
 --- a/arch/x86/include/asm/kvm_host.h
 +++ b/arch/x86/include/asm/kvm_host.h
 @@ -45,6 +45,7 @@
@@ -25409,7 +25409,7 @@ index 9af0b82..086874c 100644
  			rsvd = CR3_PAE_RESERVED_BITS;
  		else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
-index 176205a..920cd58 100644
+index 055cc49..6d3fe8c 100644
 --- a/arch/x86/kvm/lapic.c
 +++ b/arch/x86/kvm/lapic.c
 @@ -53,7 +53,7 @@
@@ -32647,7 +32647,7 @@ index 26c731a..fb510c7 100644
  	  This is the Linux Xen port.  Enabling this will allow the
  	  kernel to boot in a paravirtualized environment under the
 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
-index 5189fe8..e27635d 100644
+index 81afe1b..e27635d 100644
 --- a/arch/x86/xen/enlighten.c
 +++ b/arch/x86/xen/enlighten.c
 @@ -86,8 +86,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
@@ -32659,75 +32659,7 @@ index 5189fe8..e27635d 100644
  RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
  __read_mostly int xen_have_vector_callback;
  EXPORT_SYMBOL_GPL(xen_have_vector_callback);
-@@ -321,6 +319,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
- 	pte_t pte;
- 	unsigned long pfn;
- 	struct page *page;
-+	unsigned char dummy;
- 
- 	ptep = lookup_address((unsigned long)v, &level);
- 	BUG_ON(ptep == NULL);
-@@ -330,6 +329,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
- 
- 	pte = pfn_pte(pfn, prot);
- 
-+	/*
-+	 * Careful: update_va_mapping() will fail if the virtual address
-+	 * we're poking isn't populated in the page tables.  We don't
-+	 * need to worry about the direct map (that's always in the page
-+	 * tables), but we need to be careful about vmap space.  In
-+	 * particular, the top level page table can lazily propagate
-+	 * entries between processes, so if we've switched mms since we
-+	 * vmapped the target in the first place, we might not have the
-+	 * top-level page table entry populated.
-+	 *
-+	 * We disable preemption because we want the same mm active when
-+	 * we probe the target and when we issue the hypercall.  We'll
-+	 * have the same nominal mm, but if we're a kernel thread, lazy
-+	 * mm dropping could change our pgd.
-+	 *
-+	 * Out of an abundance of caution, this uses __get_user() to fault
-+	 * in the target address just in case there's some obscure case
-+	 * in which the target address isn't readable.
-+	 */
-+
-+	preempt_disable();
-+
-+	pagefault_disable();	/* Avoid warnings due to being atomic. */
-+	__get_user(dummy, (unsigned char __user __force *)v);
-+	pagefault_enable();
-+
- 	if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
- 		BUG();
- 
-@@ -341,6 +366,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
- 				BUG();
- 	} else
- 		kmap_flush_unused();
-+
-+	preempt_enable();
- }
- 
- static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
-@@ -348,6 +375,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
- 	const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
- 	int i;
- 
-+	/*
-+	 * We need to mark the all aliases of the LDT pages RO.  We
-+	 * don't need to call vm_flush_aliases(), though, since that's
-+	 * only responsible for flushing aliases out the TLBs, not the
-+	 * page tables, and Xen will flush the TLB for us if needed.
-+	 *
-+	 * To avoid confusing future readers: none of this is necessary
-+	 * to load the LDT.  The hypervisor only checks this when the
-+	 * LDT is faulted in due to subsequent descriptor access.
-+	 */
-+
- 	for(i = 0; i < entries; i += entries_per_page)
- 		set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
- }
-@@ -382,8 +420,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
+@@ -422,8 +420,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
  {
  	unsigned long va = dtr->address;
  	unsigned int size = dtr->size + 1;
@@ -32737,7 +32669,7 @@ index 5189fe8..e27635d 100644
  	int f;
  
  	/*
-@@ -431,8 +468,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
+@@ -471,8 +468,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
  {
  	unsigned long va = dtr->address;
  	unsigned int size = dtr->size + 1;
@@ -32747,7 +32679,7 @@ index 5189fe8..e27635d 100644
  	int f;
  
  	/*
-@@ -440,7 +476,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
+@@ -480,7 +476,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
  	 * 8-byte entries, or 16 4k pages..
  	 */
  
@@ -32756,7 +32688,7 @@ index 5189fe8..e27635d 100644
  	BUG_ON(va & ~PAGE_MASK);
  
  	for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
-@@ -1072,30 +1108,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
+@@ -1112,30 +1108,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
  #endif
  };
  
@@ -32794,7 +32726,7 @@ index 5189fe8..e27635d 100644
  {
  	if (pm_power_off)
  		pm_power_off();
-@@ -1144,6 +1180,9 @@ static void __init xen_setup_stackprotector(void)
+@@ -1184,6 +1180,9 @@ static void __init xen_setup_stackprotector(void)
  	pv_cpu_ops.load_gdt = xen_load_gdt_boot;
  
  	setup_stack_canary_segment(0);
@@ -32804,7 +32736,7 @@ index 5189fe8..e27635d 100644
  	switch_to_new_gdt(0);
  
  	pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
-@@ -1196,7 +1235,17 @@ asmlinkage void __init xen_start_kernel(void)
+@@ -1236,7 +1235,17 @@ asmlinkage void __init xen_start_kernel(void)
  	__userpte_alloc_gfp &= ~__GFP_HIGHMEM;
  
  	/* Work out if we support NX */
@@ -32823,7 +32755,7 @@ index 5189fe8..e27635d 100644
  
  	xen_setup_features();
  
-@@ -1227,13 +1276,6 @@ asmlinkage void __init xen_start_kernel(void)
+@@ -1267,13 +1276,6 @@ asmlinkage void __init xen_start_kernel(void)
  
  	machine_ops = xen_machine_ops;
  
@@ -32837,7 +32769,7 @@ index 5189fe8..e27635d 100644
  	xen_smp_init();
  
  #ifdef CONFIG_ACPI_NUMA
-@@ -1418,7 +1460,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
+@@ -1458,7 +1460,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
  	return NOTIFY_OK;
  }
  
@@ -33740,10 +33672,10 @@ index 41ffb8c..2afaff8 100644
  				unsigned long timeout_msec)
  {
 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
-index fcd8586..19ba966 100644
+index 4e9beff..c7d8e97 100644
 --- a/drivers/ata/libata-core.c
 +++ b/drivers/ata/libata-core.c
-@@ -4790,7 +4790,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
+@@ -4795,7 +4795,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
  	struct ata_port *ap;
  	unsigned int tag;
  
@@ -33752,7 +33684,7 @@ index fcd8586..19ba966 100644
  	ap = qc->ap;
  
  	qc->flags = 0;
-@@ -4806,7 +4806,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
+@@ -4811,7 +4811,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
  	struct ata_port *ap;
  	struct ata_link *link;
  
@@ -33761,7 +33693,7 @@ index fcd8586..19ba966 100644
  	WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
  	ap = qc->ap;
  	link = qc->dev->link;
-@@ -5811,6 +5811,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
+@@ -5816,6 +5816,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
  		return;
  
  	spin_lock(&lock);
@@ -33769,7 +33701,7 @@ index fcd8586..19ba966 100644
  
  	for (cur = ops->inherits; cur; cur = cur->inherits) {
  		void **inherit = (void **)cur;
-@@ -5824,8 +5825,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
+@@ -5829,8 +5830,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
  		if (IS_ERR(*pp))
  			*pp = NULL;
  
@@ -38577,7 +38509,7 @@ index 98723cb..10ca85b 100644
  	return -EINVAL;
  }
 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
-index 09851ce..4ba7573 100644
+index 20110b4..aeb2d2ad 100644
 --- a/drivers/gpu/drm/drm_crtc.c
 +++ b/drivers/gpu/drm/drm_crtc.c
 @@ -1379,7 +1379,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
@@ -38609,7 +38541,7 @@ index 09851ce..4ba7573 100644
  		for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
  			if (connector->encoder_ids[i] != 0) {
  				if (put_user(connector->encoder_ids[i],
-@@ -1576,7 +1576,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+@@ -1583,7 +1583,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
  		}
  
  		for (i = 0; i < crtc_req->count_connectors; i++) {
@@ -38618,7 +38550,7 @@ index 09851ce..4ba7573 100644
  			if (get_user(out_id, &set_connectors_ptr[i])) {
  				ret = -EFAULT;
  				goto out;
-@@ -1856,7 +1856,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+@@ -1863,7 +1863,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
  	fb = obj_to_fb(obj);
  
  	num_clips = r->num_clips;
@@ -38627,7 +38559,7 @@ index 09851ce..4ba7573 100644
  
  	if (!num_clips != !clips_ptr) {
  		ret = -EINVAL;
-@@ -2282,7 +2282,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
+@@ -2289,7 +2289,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
  	out_resp->flags = property->flags;
  
  	if ((out_resp->count_values >= value_count) && value_count) {
@@ -38636,7 +38568,7 @@ index 09851ce..4ba7573 100644
  		for (i = 0; i < value_count; i++) {
  			if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
  				ret = -EFAULT;
-@@ -2295,7 +2295,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
+@@ -2302,7 +2302,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
  	if (property->flags & DRM_MODE_PROP_ENUM) {
  		if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
  			copied = 0;
@@ -38645,7 +38577,7 @@ index 09851ce..4ba7573 100644
  			list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
  
  				if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
-@@ -2303,7 +2303,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
+@@ -2310,7 +2310,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
  					goto done;
  				}
  
@@ -38654,7 +38586,7 @@ index 09851ce..4ba7573 100644
  						 &prop_enum->name, DRM_PROP_NAME_LEN)) {
  					ret = -EFAULT;
  					goto done;
-@@ -2318,7 +2318,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
+@@ -2325,7 +2325,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
  		if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
  			copied = 0;
  			blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
@@ -38663,7 +38595,7 @@ index 09851ce..4ba7573 100644
  
  			list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
  				if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
-@@ -2379,7 +2379,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
+@@ -2386,7 +2386,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
  	struct drm_mode_get_blob *out_resp = data;
  	struct drm_property_blob *blob;
  	int ret = 0;
@@ -38672,7 +38604,7 @@ index 09851ce..4ba7573 100644
  
  	if (!drm_core_check_feature(dev, DRIVER_MODESET))
  		return -EINVAL;
-@@ -2393,7 +2393,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
+@@ -2400,7 +2400,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
  	blob = obj_to_blob(obj);
  
  	if (out_resp->length == blob->length) {
@@ -43654,7 +43586,7 @@ index 1cbfc6b..56e1dbb 100644
  /*----------------------------------------------------------------*/
  
 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
-index aec029a..d2c133e 100644
+index 6e7b002..c33b531 100644
 --- a/drivers/md/raid1.c
 +++ b/drivers/md/raid1.c
 @@ -1591,7 +1591,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
@@ -45540,7 +45472,7 @@ index ba168a7..399925d6 100644
  					st_gdata->list[type]->reserve);
  			/* next 2 required for BT only */
 diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
-index 4802f7f..5ae431e 100644
+index f53d5c8..73f9732 100644
 --- a/drivers/mmc/card/block.c
 +++ b/drivers/mmc/card/block.c
 @@ -399,7 +399,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
@@ -46390,10 +46322,10 @@ index c07cfe9..81cbf7e 100644
  
  /* To mask all all interrupts.*/
 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
-index d4d2bc1..14b8672 100644
+index 05852e3..be08bb2 100644
 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
-@@ -1602,7 +1602,7 @@ static const struct file_operations stmmac_rings_status_fops = {
+@@ -1600,7 +1600,7 @@ static const struct file_operations stmmac_rings_status_fops = {
  	.open = stmmac_sysfs_ring_open,
  	.read = seq_read,
  	.llseek = seq_lseek,
@@ -46402,7 +46334,7 @@ index d4d2bc1..14b8672 100644
  };
  
  static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
-@@ -1674,7 +1674,7 @@ static const struct file_operations stmmac_dma_cap_fops = {
+@@ -1672,7 +1672,7 @@ static const struct file_operations stmmac_dma_cap_fops = {
  	.open = stmmac_sysfs_dma_cap_open,
  	.read = seq_read,
  	.llseek = seq_lseek,
@@ -48808,7 +48740,7 @@ index e15d4c9..83cd617 100644
  		__power_supply_attrs[i] = &power_supply_attrs[i].attr;
  }
 diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
-index adba3d6..7d7a5a6 100644
+index 2dd9838..bd0c037 100644
 --- a/drivers/regulator/core.c
 +++ b/drivers/regulator/core.c
 @@ -2641,7 +2641,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
@@ -51790,7 +51722,7 @@ index ed147c4..94fc3c6 100644
  
  /* core tmem accessor functions */
 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
-index ae4e7da..46264ce 100644
+index 59fb984..c159222 100644
 --- a/drivers/target/iscsi/iscsi_target.c
 +++ b/drivers/target/iscsi/iscsi_target.c
 @@ -1357,7 +1357,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
@@ -52948,7 +52880,7 @@ index eb2c3bd..5236c12 100644
  			wake_up(&usb_kill_urb_queue);
  		usb_put_urb(urb);
 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
-index 18286ce..c6d2114 100644
+index 7cfe286..1cf137f1 100644
 --- a/drivers/usb/core/hub.c
 +++ b/drivers/usb/core/hub.c
 @@ -25,6 +25,7 @@
@@ -52959,7 +52891,7 @@ index 18286ce..c6d2114 100644
  
  #include <asm/uaccess.h>
  #include <asm/byteorder.h>
-@@ -3485,6 +3486,9 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
+@@ -3468,6 +3469,9 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
  		return;
  	}
  
@@ -53249,7 +53181,7 @@ index 5f6df6e..0a16602 100644
  
  /*
 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
-index be32b1b..b5f6c08 100644
+index 738707a..c1b766f 100644
 --- a/drivers/vhost/vhost.c
 +++ b/drivers/vhost/vhost.c
 @@ -631,7 +631,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
@@ -56980,20 +56912,10 @@ index 2524e4c..2962cc6a 100644
  	if (retval > 0)
  		retval = 0;
 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
-index 879ed88..dbaf762 100644
+index bf1df72..dbaf762 100644
 --- a/fs/9p/vfs_inode.c
 +++ b/fs/9p/vfs_inode.c
-@@ -527,8 +527,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
- 	unlock_new_inode(inode);
- 	return inode;
- error:
--	unlock_new_inode(inode);
--	iput(inode);
-+	iget_failed(inode);
- 	return ERR_PTR(retval);
- 
- }
-@@ -1286,7 +1285,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+@@ -1285,7 +1285,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
  void
  v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
  {
@@ -57002,20 +56924,6 @@ index 879ed88..dbaf762 100644
  
  	P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
  		IS_ERR(s) ? "<error>" : s);
-diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
-index 30d4fa8..dbbc83f 100644
---- a/fs/9p/vfs_inode_dotl.c
-+++ b/fs/9p/vfs_inode_dotl.c
-@@ -169,8 +169,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
- 	unlock_new_inode(inode);
- 	return inode;
- error:
--	unlock_new_inode(inode);
--	iput(inode);
-+	iget_failed(inode);
- 	return ERR_PTR(retval);
- 
- }
 diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
 index c70251d..fe305fd 100644
 --- a/fs/9p/vfs_super.c
@@ -58493,10 +58401,10 @@ index da528f8..97002a3 100644
  		do_chunk_alloc(trans, root->fs_info->extent_root,
  			       num_bytes, data, CHUNK_ALLOC_FORCE);
 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
-index 52bacff..a4b7f29 100644
+index ba26540..722eebc 100644
 --- a/fs/btrfs/ioctl.c
 +++ b/fs/btrfs/ioctl.c
-@@ -2775,7 +2775,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
+@@ -2789,7 +2789,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
  		up_read(&info->groups_sem);
  	}
  
@@ -58545,10 +58453,10 @@ index 200f63b..490b833 100644
  /*
   * used by btrfsctl to scan devices when no FS is mounted
 diff --git a/fs/buffer.c b/fs/buffer.c
-index c457f84..3e206d5 100644
+index 7eb4da4..9f8c5b3 100644
 --- a/fs/buffer.c
 +++ b/fs/buffer.c
-@@ -3326,7 +3326,7 @@ void __init buffer_init(void)
+@@ -3333,7 +3333,7 @@ void __init buffer_init(void)
  	bh_cachep = kmem_cache_create("buffer_head",
  			sizeof(struct buffer_head), 0,
  				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
@@ -60973,7 +60881,7 @@ index 6858d9d..590047a 100644
  
  	/* locality groups */
 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
-index 5baa7ba..917bb08 100644
+index 7c03826..d0a4b11 100644
 --- a/fs/ext4/mballoc.c
 +++ b/fs/ext4/mballoc.c
 @@ -1796,7 +1796,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
@@ -61103,7 +61011,7 @@ index f3358ab..fbb1d90 100644
  		       "MMP failure info: last update time: %llu, last update "
  		       "node: %s, last update device: %s\n",
 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
-index 422be11..ef4b528 100644
+index be4db0e..f60dea7 100644
 --- a/fs/ext4/super.c
 +++ b/fs/ext4/super.c
 @@ -92,6 +92,8 @@ static struct file_system_type ext2_fs_type = {
@@ -61124,7 +61032,7 @@ index 422be11..ef4b528 100644
  #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
  #else
  #define IS_EXT3_SB(sb) (0)
-@@ -1438,7 +1442,7 @@ static ext4_fsblk_t get_sb_block(void **data)
+@@ -1439,7 +1443,7 @@ static ext4_fsblk_t get_sb_block(void **data)
  }
  
  #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
@@ -61133,7 +61041,7 @@ index 422be11..ef4b528 100644
  	"Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
  
  #ifdef CONFIG_QUOTA
-@@ -2460,7 +2464,7 @@ struct ext4_attr {
+@@ -2461,7 +2465,7 @@ struct ext4_attr {
  	ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
  			 const char *, size_t);
  	int offset;
@@ -61142,7 +61050,7 @@ index 422be11..ef4b528 100644
  
  static int parse_strtoul(const char *buf,
  		unsigned long max, unsigned long *value)
-@@ -3167,7 +3171,6 @@ int ext4_calculate_overhead(struct super_block *sb)
+@@ -3168,7 +3172,6 @@ int ext4_calculate_overhead(struct super_block *sb)
  	ext4_fsblk_t overhead = 0;
  	char *buf = (char *) get_zeroed_page(GFP_KERNEL);
  
@@ -61150,7 +61058,7 @@ index 422be11..ef4b528 100644
  	if (!buf)
  		return -ENOMEM;
  
-@@ -5044,7 +5047,6 @@ static inline int ext2_feature_set_ok(struct super_block *sb)
+@@ -5045,7 +5048,6 @@ static inline int ext2_feature_set_ok(struct super_block *sb)
  		return 0;
  	return 1;
  }
@@ -61158,7 +61066,7 @@ index 422be11..ef4b528 100644
  #else
  static inline void register_as_ext2(void) { }
  static inline void unregister_as_ext2(void) { }
-@@ -5077,7 +5079,6 @@ static inline int ext3_feature_set_ok(struct super_block *sb)
+@@ -5078,7 +5080,6 @@ static inline int ext3_feature_set_ok(struct super_block *sb)
  		return 0;
  	return 1;
  }
@@ -61166,7 +61074,7 @@ index 422be11..ef4b528 100644
  #else
  static inline void register_as_ext3(void) { }
  static inline void unregister_as_ext3(void) { }
-@@ -5091,6 +5092,7 @@ static struct file_system_type ext4_fs_type = {
+@@ -5092,6 +5093,7 @@ static struct file_system_type ext4_fs_type = {
  	.kill_sb	= kill_block_super,
  	.fs_flags	= FS_REQUIRES_DEV,
  };
@@ -62945,7 +62853,7 @@ index e13558c..56ca611 100644
  	if (!IS_ERR(link))
  		free_page((unsigned long) link);
 diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
-index afc0f706..a5489ea 100644
+index e613870..5384cc6 100644
 --- a/fs/fuse/inode.c
 +++ b/fs/fuse/inode.c
 @@ -1106,6 +1106,7 @@ static struct file_system_type fuse_fs_type = {
@@ -83834,10 +83742,10 @@ index f93d8c1..71244f6 100644
  
  int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
 diff --git a/include/linux/libata.h b/include/linux/libata.h
-index 3d4b5b6..3648fe8 100644
+index 000434e..6d73f50 100644
 --- a/include/linux/libata.h
 +++ b/include/linux/libata.h
-@@ -924,7 +924,7 @@ struct ata_port_operations {
+@@ -926,7 +926,7 @@ struct ata_port_operations {
  	 * fields must be pointers.
  	 */
  	const struct ata_port_operations	*inherits;
@@ -91217,10 +91125,10 @@ index 9b22d03..6295b62 100644
  				prev->next = info->next;
  			else
 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
-index 20e88af..ec1b0d2 100644
+index d9ce3d4..502c9ce 100644
 --- a/kernel/hrtimer.c
 +++ b/kernel/hrtimer.c
-@@ -1436,7 +1436,7 @@ void hrtimer_peek_ahead_timers(void)
+@@ -1442,7 +1442,7 @@ void hrtimer_peek_ahead_timers(void)
  	local_irq_restore(flags);
  }
  
@@ -91229,7 +91137,7 @@ index 20e88af..ec1b0d2 100644
  {
  	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  
-@@ -1778,7 +1778,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
+@@ -1784,7 +1784,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
  	return NOTIFY_OK;
  }
  
@@ -96322,7 +96230,7 @@ index f07c144..d2ad3b0 100644
  	}
  
 diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
-index 636af6d..90b936f 100644
+index bc84596..5aef930 100644
 --- a/kernel/rcutiny.c
 +++ b/kernel/rcutiny.c
 @@ -46,7 +46,7 @@
@@ -96334,7 +96242,7 @@ index 636af6d..90b936f 100644
  static void __call_rcu(struct rcu_head *head,
  		       void (*func)(struct rcu_head *rcu),
  		       struct rcu_ctrlblk *rcp);
-@@ -186,7 +186,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
+@@ -191,7 +191,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
  	RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count));
  }
  
@@ -99430,30 +99338,10 @@ index 875fed4..7a76cbb 100644
  }
  
 diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
-index 47343cc..9c39703 100644
+index bfeb725..fe05d4a 100644
 --- a/kernel/trace/trace_events_filter.c
 +++ b/kernel/trace/trace_events_filter.c
-@@ -1027,6 +1027,9 @@ static void parse_init(struct filter_parse_state *ps,
- 
- static char infix_next(struct filter_parse_state *ps)
- {
-+	if (!ps->infix.cnt)
-+		return 0;
-+
- 	ps->infix.cnt--;
- 
- 	return ps->infix.string[ps->infix.tail++];
-@@ -1042,6 +1045,9 @@ static char infix_peek(struct filter_parse_state *ps)
- 
- static void infix_advance(struct filter_parse_state *ps)
- {
-+	if (!ps->infix.cnt)
-+		return;
-+
- 	ps->infix.cnt--;
- 	ps->infix.tail++;
- }
-@@ -1356,6 +1362,8 @@ static int check_preds(struct filter_parse_state *ps)
+@@ -1362,6 +1362,8 @@ static int check_preds(struct filter_parse_state *ps)
  			cnt--;
  			continue;
  		}
@@ -99461,7 +99349,7 @@ index 47343cc..9c39703 100644
 +		// a reject here when it's backported
  		cnt--;
  		n_normal_preds++;
- 		WARN_ON_ONCE(cnt < 0);
+ 		/* all ops should have operands */
 diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
 index a7d2a4c..b034c76 100644
 --- a/kernel/trace/trace_functions_graph.c
@@ -99792,7 +99680,7 @@ index c06efca..bcafc28 100644
  
  ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
 diff --git a/lib/bitmap.c b/lib/bitmap.c
-index dbc526f..528d2c2 100644
+index 389e75e..f685684 100644
 --- a/lib/bitmap.c
 +++ b/lib/bitmap.c
 @@ -423,7 +423,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
@@ -99819,10 +99707,10 @@ index dbc526f..528d2c2 100644
  	int c, old_c, totaldigits;
 -	const char __user __force *ubuf = (const char __user __force *)buf;
 +	const char __user *ubuf = (const char __force_user *)buf;
- 	int exp_digit, in_range;
+ 	int at_start, in_range;
  
  	totaldigits = c = 0;
-@@ -700,7 +700,7 @@ int bitmap_parselist_user(const char __user *ubuf,
+@@ -701,7 +701,7 @@ int bitmap_parselist_user(const char __user *ubuf,
  {
  	if (!access_ok(VERIFY_READ, ubuf, ulen))
  		return -EFAULT;
@@ -101056,7 +100944,7 @@ index 2b49dd2..0527d62 100644
  		bdi_destroy(bdi);
  		return err;
 diff --git a/mm/filemap.c b/mm/filemap.c
-index 556858c..71a567d 100644
+index 6c009c2..e632831 100644
 --- a/mm/filemap.c
 +++ b/mm/filemap.c
 @@ -1773,7 +1773,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
@@ -101330,10 +101218,10 @@ index 0c26b5e..1cc340f 100644
  #ifdef CONFIG_MEMORY_FAILURE
  extern bool is_free_buddy_page(struct page *page);
 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
-index cc8cf1d..677c52d 100644
+index cbae846..19f64d5 100644
 --- a/mm/kmemleak.c
 +++ b/mm/kmemleak.c
-@@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq,
+@@ -359,7 +359,7 @@ static void print_unreferenced(struct seq_file *seq,
  
  	for (i = 0; i < object->trace_len; i++) {
  		void *ptr = (void *)object->trace[i];
@@ -101342,7 +101230,7 @@ index cc8cf1d..677c52d 100644
  	}
  }
  
-@@ -1747,7 +1747,7 @@ static int __init kmemleak_late_init(void)
+@@ -1759,7 +1759,7 @@ static int __init kmemleak_late_init(void)
  		return -ENOMEM;
  	}
  
@@ -101563,7 +101451,7 @@ index 51901b1..79af2f4 100644
  	/* keep elevated page count for bad page */
  	return ret;
 diff --git a/mm/memory.c b/mm/memory.c
-index 452b8ba..d322be8 100644
+index 7762b1d..409b29a 100644
 --- a/mm/memory.c
 +++ b/mm/memory.c
 @@ -462,8 +462,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -101988,7 +101876,7 @@ index 452b8ba..d322be8 100644
   * We enter with non-exclusive mmap_sem (to exclude vma changes,
   * but allow concurrent faults), and pte mapped but not yet locked.
   * We return with mmap_sem still held, but pte unmapped and unlocked.
-@@ -3147,27 +3330,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3147,31 +3330,29 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
  		unsigned long address, pte_t *page_table, pmd_t *pmd,
  		unsigned int flags)
  {
@@ -101999,6 +101887,13 @@ index 452b8ba..d322be8 100644
  
 -	pte_unmap(page_table);
 -
+ 	/* File mapping without ->vm_ops ? */
+-	if (vma->vm_flags & VM_SHARED)
++	if (vma->vm_flags & VM_SHARED) {
++		pte_unmap(page_table);
+ 		return VM_FAULT_SIGBUS;
++	}
+ 
 -	/* Check if we need to add a guard page to the stack */
 -	if (check_stack_guard_page(vma, address) < 0)
 -		return VM_FAULT_SIGSEGV;
@@ -102021,7 +101916,7 @@ index 452b8ba..d322be8 100644
  	if (unlikely(anon_vma_prepare(vma)))
  		goto oom;
  	page = alloc_zeroed_user_highpage_movable(vma, address);
-@@ -3186,6 +3365,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3190,6 +3371,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
  	if (!pte_none(*page_table))
  		goto release;
  
@@ -102033,7 +101928,7 @@ index 452b8ba..d322be8 100644
  	inc_mm_counter_fast(mm, MM_ANONPAGES);
  	page_add_new_anon_rmap(page, vma, address);
  setpte:
-@@ -3193,6 +3377,12 @@ setpte:
+@@ -3197,6 +3383,12 @@ setpte:
  
  	/* No need to invalidate - it was non-present before */
  	update_mmu_cache(vma, address, page_table);
@@ -102046,7 +101941,7 @@ index 452b8ba..d322be8 100644
  unlock:
  	pte_unmap_unlock(page_table, ptl);
  	return 0;
-@@ -3336,6 +3526,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3340,6 +3532,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  	 */
  	/* Only go through if we didn't race with anybody else... */
  	if (likely(pte_same(*page_table, orig_pte))) {
@@ -102059,7 +101954,7 @@ index 452b8ba..d322be8 100644
  		flush_icache_page(vma, page);
  		entry = mk_pte(page, vma->vm_page_prot);
  		if (flags & FAULT_FLAG_WRITE)
-@@ -3355,6 +3551,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3359,6 +3557,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  
  		/* no need to invalidate: a not-present page won't be cached */
  		update_mmu_cache(vma, address, page_table);
@@ -102074,7 +101969,7 @@ index 452b8ba..d322be8 100644
  	} else {
  		if (cow_page)
  			mem_cgroup_uncharge_page(cow_page);
-@@ -3508,6 +3712,12 @@ int handle_pte_fault(struct mm_struct *mm,
+@@ -3513,6 +3719,12 @@ int handle_pte_fault(struct mm_struct *mm,
  		if (flags & FAULT_FLAG_WRITE)
  			flush_tlb_fix_spurious_fault(vma, address);
  	}
@@ -102087,7 +101982,7 @@ index 452b8ba..d322be8 100644
  unlock:
  	pte_unmap_unlock(pte, ptl);
  	return 0;
-@@ -3524,6 +3734,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3529,6 +3741,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  	pmd_t *pmd;
  	pte_t *pte;
  
@@ -102098,7 +101993,7 @@ index 452b8ba..d322be8 100644
  	__set_current_state(TASK_RUNNING);
  
  	count_vm_event(PGFAULT);
-@@ -3535,6 +3749,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3540,6 +3756,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  	if (unlikely(is_vm_hugetlb_page(vma)))
  		return hugetlb_fault(mm, vma, address, flags);
  
@@ -102133,7 +102028,7 @@ index 452b8ba..d322be8 100644
  retry:
  	pgd = pgd_offset(mm, address);
  	pud = pud_alloc(mm, pgd, address);
-@@ -3576,7 +3818,7 @@ retry:
+@@ -3581,7 +3825,7 @@ retry:
  	 * run pte_offset_map on the pmd, if an huge pmd could
  	 * materialize from under us from a different thread.
  	 */
@@ -102142,7 +102037,7 @@ index 452b8ba..d322be8 100644
  		return VM_FAULT_OOM;
  	/* if an huge pmd materialized from under us just retry later */
  	if (unlikely(pmd_trans_huge(*pmd)))
-@@ -3613,6 +3855,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+@@ -3618,6 +3862,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
  	spin_unlock(&mm->page_table_lock);
  	return 0;
  }
@@ -102166,7 +102061,7 @@ index 452b8ba..d322be8 100644
  #endif /* __PAGETABLE_PUD_FOLDED */
  
  #ifndef __PAGETABLE_PMD_FOLDED
-@@ -3643,11 +3902,35 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+@@ -3648,11 +3909,35 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
  	spin_unlock(&mm->page_table_lock);
  	return 0;
  }
@@ -102204,7 +102099,7 @@ index 452b8ba..d322be8 100644
  	struct vm_area_struct * vma;
  
  	vma = find_vma(current->mm, addr);
-@@ -3680,7 +3963,7 @@ static int __init gate_vma_init(void)
+@@ -3685,7 +3970,7 @@ static int __init gate_vma_init(void)
  	gate_vma.vm_start = FIXADDR_USER_START;
  	gate_vma.vm_end = FIXADDR_USER_END;
  	gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
@@ -102213,7 +102108,7 @@ index 452b8ba..d322be8 100644
  	/*
  	 * Make sure the vDSO gets into every core dump.
  	 * Dumping its contents makes post-mortem fully interpretable later
-@@ -3820,8 +4103,8 @@ out:
+@@ -3825,8 +4110,8 @@ out:
  	return ret;
  }
  
@@ -102224,7 +102119,7 @@ index 452b8ba..d322be8 100644
  {
  	resource_size_t phys_addr;
  	unsigned long prot = 0;
-@@ -3846,8 +4129,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+@@ -3851,8 +4136,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
   * Access another process' address space as given in mm.  If non-NULL, use the
   * given task for page fault accounting.
   */
@@ -102235,7 +102130,7 @@ index 452b8ba..d322be8 100644
  {
  	struct vm_area_struct *vma;
  	void *old_buf = buf;
-@@ -3855,7 +4138,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -3860,7 +4145,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
  	down_read(&mm->mmap_sem);
  	/* ignore errors, just check how much was successfully transferred */
  	while (len) {
@@ -102244,7 +102139,7 @@ index 452b8ba..d322be8 100644
  		void *maddr;
  		struct page *page = NULL;
  
-@@ -3914,8 +4197,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -3919,8 +4204,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
   *
   * The caller must hold a reference on @mm.
   */
@@ -102255,7 +102150,7 @@ index 452b8ba..d322be8 100644
  {
  	return __access_remote_vm(NULL, mm, addr, buf, len, write);
  }
-@@ -3925,11 +4208,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+@@ -3930,11 +4215,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
   * Source/target buffer must be kernel space,
   * Do not walk the page table directly, use get_user_pages
   */
@@ -106488,7 +106383,7 @@ index c705612..8f2e391 100644
  	.maxtype	= IFLA_VLAN_MAX,
  	.policy		= vlan_policy,
 diff --git a/net/9p/client.c b/net/9p/client.c
-index 854ca7a..fc1bfc8 100644
+index e958178..94c4e6d 100644
 --- a/net/9p/client.c
 +++ b/net/9p/client.c
 @@ -582,7 +582,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
@@ -106500,7 +106395,7 @@ index 854ca7a..fc1bfc8 100644
  				if (err) {
  					err = -EFAULT;
  					goto out_free;
-@@ -1528,7 +1528,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
+@@ -1529,7 +1529,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
  			kernel_buf = 1;
  			indata = data;
  		} else
@@ -106509,7 +106404,7 @@ index 854ca7a..fc1bfc8 100644
  		/*
  		 * response header len is 11
  		 * PDU Header(7) + IO Size (4)
-@@ -1603,7 +1603,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
+@@ -1604,7 +1604,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
  			kernel_buf = 1;
  			odata = data;
  		} else
@@ -107046,10 +106941,10 @@ index f20c4fd..73aee41 100644
  	if (err < 0) {
  		pr_err("bridge: can't register sap for STP\n");
 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
-index 1bd197f..1119378 100644
+index 5f21e53..c8b995a 100644
 --- a/net/bridge/br_multicast.c
 +++ b/net/bridge/br_multicast.c
-@@ -1415,7 +1415,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
+@@ -1420,7 +1420,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
  	nexthdr = ip6h->nexthdr;
  	offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
  
@@ -107627,10 +107522,10 @@ index f06994d..b7fd27f 100644
  	a0 = a[0];
  	a1 = a[1];
 diff --git a/net/core/datagram.c b/net/core/datagram.c
-index 68bbf9f..5ef0d12 100644
+index 6f54d0a..4c14198 100644
 --- a/net/core/datagram.c
 +++ b/net/core/datagram.c
-@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
+@@ -286,7 +286,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
  	}
  
  	kfree_skb(skb);
@@ -107640,7 +107535,7 @@ index 68bbf9f..5ef0d12 100644
  
  	return err;
 diff --git a/net/core/dev.c b/net/core/dev.c
-index 1c0d862..d4946e6 100644
+index 7f43202..f37e3d5 100644
 --- a/net/core/dev.c
 +++ b/net/core/dev.c
 @@ -1142,10 +1142,14 @@ void dev_load(struct net *net, const char *name)
@@ -107703,7 +107598,7 @@ index 1c0d862..d4946e6 100644
  
  #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
  
-@@ -2972,7 +2976,7 @@ enqueue:
+@@ -2975,7 +2979,7 @@ drop:
  
  	local_irq_restore(flags);
  
@@ -107712,7 +107607,7 @@ index 1c0d862..d4946e6 100644
  	kfree_skb(skb);
  	return NET_RX_DROP;
  }
-@@ -3046,7 +3050,7 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3049,7 +3053,7 @@ int netif_rx_ni(struct sk_buff *skb)
  }
  EXPORT_SYMBOL(netif_rx_ni);
  
@@ -107721,7 +107616,7 @@ index 1c0d862..d4946e6 100644
  {
  	struct softnet_data *sd = &__get_cpu_var(softnet_data);
  
-@@ -3345,7 +3349,7 @@ ncls:
+@@ -3346,7 +3350,7 @@ ncls:
  	if (pt_prev) {
  		ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
  	} else {
@@ -107730,7 +107625,7 @@ index 1c0d862..d4946e6 100644
  		kfree_skb(skb);
  		/* Jamal, now you will not able to escape explaining
  		 * me how you were going to use this. :-)
-@@ -3911,7 +3915,7 @@ void netif_napi_del(struct napi_struct *napi)
+@@ -3910,7 +3914,7 @@ void netif_napi_del(struct napi_struct *napi)
  }
  EXPORT_SYMBOL(netif_napi_del);
  
@@ -107739,7 +107634,7 @@ index 1c0d862..d4946e6 100644
  {
  	struct softnet_data *sd = &__get_cpu_var(softnet_data);
  	unsigned long time_limit = jiffies + 2;
-@@ -4189,7 +4193,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
+@@ -4188,7 +4192,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
  	struct rtnl_link_stats64 temp;
  	const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
  
@@ -107754,7 +107649,7 @@ index 1c0d862..d4946e6 100644
  		   "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
  		   dev->name, stats->rx_bytes, stats->rx_packets,
  		   stats->rx_errors,
-@@ -4264,7 +4274,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
+@@ -4263,7 +4273,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
  	return 0;
  }
  
@@ -107763,7 +107658,7 @@ index 1c0d862..d4946e6 100644
  	.start = dev_seq_start,
  	.next  = dev_seq_next,
  	.stop  = dev_seq_stop,
-@@ -4294,7 +4304,7 @@ static const struct seq_operations softnet_seq_ops = {
+@@ -4293,7 +4303,7 @@ static const struct seq_operations softnet_seq_ops = {
  
  static int softnet_seq_open(struct inode *inode, struct file *file)
  {
@@ -107772,7 +107667,7 @@ index 1c0d862..d4946e6 100644
  }
  
  static const struct file_operations softnet_seq_fops = {
-@@ -4381,8 +4391,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
+@@ -4380,8 +4390,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
  		else
  			seq_printf(seq, "%04x", ntohs(pt->type));
  
@@ -107786,7 +107681,7 @@ index 1c0d862..d4946e6 100644
  	}
  
  	return 0;
-@@ -4444,7 +4459,7 @@ static void __net_exit dev_proc_net_exit(struct net *net)
+@@ -4443,7 +4458,7 @@ static void __net_exit dev_proc_net_exit(struct net *net)
  	proc_net_remove(net, "dev");
  }
  
@@ -107795,7 +107690,7 @@ index 1c0d862..d4946e6 100644
  	.init = dev_proc_net_init,
  	.exit = dev_proc_net_exit,
  };
-@@ -5939,7 +5954,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+@@ -5937,7 +5952,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
  	} else {
  		netdev_stats_to_stats64(storage, &dev->stats);
  	}
@@ -107804,7 +107699,7 @@ index 1c0d862..d4946e6 100644
  	return storage;
  }
  EXPORT_SYMBOL(dev_get_stats);
-@@ -6528,7 +6543,7 @@ static void __net_exit netdev_exit(struct net *net)
+@@ -6526,7 +6541,7 @@ static void __net_exit netdev_exit(struct net *net)
  	kfree(net->dev_index_head);
  }
  
@@ -107813,7 +107708,7 @@ index 1c0d862..d4946e6 100644
  	.init = netdev_init,
  	.exit = netdev_exit,
  };
-@@ -6590,7 +6605,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
+@@ -6588,7 +6603,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
  	rtnl_unlock();
  }
  
@@ -108118,7 +108013,7 @@ index dd00b71..74d1779 100644
  	return error;
  }
 diff --git a/net/core/pktgen.c b/net/core/pktgen.c
-index 80aeac9..b08d0a8 100644
+index 9dd65a9..faca75e 100644
 --- a/net/core/pktgen.c
 +++ b/net/core/pktgen.c
 @@ -3726,7 +3726,7 @@ static int __init pg_init(void)
@@ -108131,7 +108026,7 @@ index 80aeac9..b08d0a8 100644
  		return -ENODEV;
  
 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
-index 5b412f0..595dfcd 100644
+index e77373a..8b24693 100644
 --- a/net/core/rtnetlink.c
 +++ b/net/core/rtnetlink.c
 @@ -57,7 +57,7 @@ struct rtnl_link {
@@ -108169,7 +108064,7 @@ index 5b412f0..595dfcd 100644
  }
  EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
  
-@@ -1484,10 +1487,13 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
+@@ -1482,10 +1485,13 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
  			goto errout;
  
  		nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
@@ -108954,7 +108849,7 @@ index 9d74cc7..5a73694 100644
  		p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
  		p->rate_tokens = 0;
 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
-index 16e25a4..cbb0cd5 100644
+index c20c356..0ee34b2 100644
 --- a/net/ipv4/ip_fragment.c
 +++ b/net/ipv4/ip_fragment.c
 @@ -315,7 +315,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
@@ -108966,7 +108861,7 @@ index 16e25a4..cbb0cd5 100644
  	qp->rid = end;
  
  	rc = qp->q.fragments && (end - start) > max;
-@@ -776,21 +776,21 @@ static struct ctl_table ip4_frags_ctl_table[] = {
+@@ -778,21 +778,21 @@ static struct ctl_table ip4_frags_ctl_table[] = {
  
  static int __net_init ip4_frags_ns_ctl_register(struct net *net)
  {
@@ -108993,7 +108888,7 @@ index 16e25a4..cbb0cd5 100644
  	if (hdr == NULL)
  		goto err_reg;
  
-@@ -798,8 +798,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
+@@ -800,8 +800,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
  	return 0;
  
  err_reg:

diff --git a/3.2.70/4425_grsec_remove_EI_PAX.patch b/3.2.71/4425_grsec_remove_EI_PAX.patch
similarity index 100%
rename from 3.2.70/4425_grsec_remove_EI_PAX.patch
rename to 3.2.71/4425_grsec_remove_EI_PAX.patch

diff --git a/3.2.70/4427_force_XATTR_PAX_tmpfs.patch b/3.2.71/4427_force_XATTR_PAX_tmpfs.patch
similarity index 100%
rename from 3.2.70/4427_force_XATTR_PAX_tmpfs.patch
rename to 3.2.71/4427_force_XATTR_PAX_tmpfs.patch

diff --git a/3.2.70/4430_grsec-remove-localversion-grsec.patch b/3.2.71/4430_grsec-remove-localversion-grsec.patch
similarity index 100%
rename from 3.2.70/4430_grsec-remove-localversion-grsec.patch
rename to 3.2.71/4430_grsec-remove-localversion-grsec.patch

diff --git a/3.2.70/4435_grsec-mute-warnings.patch b/3.2.71/4435_grsec-mute-warnings.patch
similarity index 100%
rename from 3.2.70/4435_grsec-mute-warnings.patch
rename to 3.2.71/4435_grsec-mute-warnings.patch

diff --git a/3.2.70/4440_grsec-remove-protected-paths.patch b/3.2.71/4440_grsec-remove-protected-paths.patch
similarity index 100%
rename from 3.2.70/4440_grsec-remove-protected-paths.patch
rename to 3.2.71/4440_grsec-remove-protected-paths.patch

diff --git a/3.2.70/4450_grsec-kconfig-default-gids.patch b/3.2.71/4450_grsec-kconfig-default-gids.patch
similarity index 100%
rename from 3.2.70/4450_grsec-kconfig-default-gids.patch
rename to 3.2.71/4450_grsec-kconfig-default-gids.patch

diff --git a/3.2.70/4465_selinux-avc_audit-log-curr_ip.patch b/3.2.71/4465_selinux-avc_audit-log-curr_ip.patch
similarity index 100%
rename from 3.2.70/4465_selinux-avc_audit-log-curr_ip.patch
rename to 3.2.71/4465_selinux-avc_audit-log-curr_ip.patch

diff --git a/3.2.70/4470_disable-compat_vdso.patch b/3.2.71/4470_disable-compat_vdso.patch
similarity index 100%
rename from 3.2.70/4470_disable-compat_vdso.patch
rename to 3.2.71/4470_disable-compat_vdso.patch

diff --git a/3.2.70/4475_emutramp_default_on.patch b/3.2.71/4475_emutramp_default_on.patch
similarity index 100%
rename from 3.2.70/4475_emutramp_default_on.patch
rename to 3.2.71/4475_emutramp_default_on.patch

diff --git a/4.1.5/0000_README b/4.1.5/0000_README
index ec1dd3d..68f1c28 100644
--- a/4.1.5/0000_README
+++ b/4.1.5/0000_README
@@ -6,7 +6,7 @@ Patch:	1004_linux-4.1.5.patch
 From:	http://www.kernel.org
 Desc:	Linux 4.1.5
 
-Patch:	4420_grsecurity-3.1-4.1.5-201508102129.patch
+Patch:	4420_grsecurity-3.1-4.1.5-201508142233.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/4.1.5/4420_grsecurity-3.1-4.1.5-201508102129.patch b/4.1.5/4420_grsecurity-3.1-4.1.5-201508142233.patch
similarity index 99%
rename from 4.1.5/4420_grsecurity-3.1-4.1.5-201508102129.patch
rename to 4.1.5/4420_grsecurity-3.1-4.1.5-201508142233.patch
index c6671a0..5e56e38 100644
--- a/4.1.5/4420_grsecurity-3.1-4.1.5-201508102129.patch
+++ b/4.1.5/4420_grsecurity-3.1-4.1.5-201508142233.patch
@@ -4748,7 +4748,7 @@ index 7186382..0c145cf 100644
  }
  
 diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
-index e0e2358..a4ee460 100644
+index e0e2358..96c6791 100644
 --- a/arch/arm/net/bpf_jit_32.c
 +++ b/arch/arm/net/bpf_jit_32.c
 @@ -20,6 +20,7 @@
@@ -4759,7 +4759,7 @@ index e0e2358..a4ee460 100644
  
  #include "bpf_jit_32.h"
  
-@@ -72,34 +73,58 @@ struct jit_ctx {
+@@ -72,7 +73,11 @@ struct jit_ctx {
  #endif
  };
  
@@ -4769,62 +4769,9 @@ index e0e2358..a4ee460 100644
  int bpf_jit_enable __read_mostly;
 +#endif
  
--static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
-+static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
-+		      unsigned int size)
-+{
-+	void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
-+
-+	if (!ptr)
-+		return -EFAULT;
-+	memcpy(ret, ptr, size);
-+	return 0;
-+}
-+
-+static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
- {
- 	u8 ret;
- 	int err;
- 
--	err = skb_copy_bits(skb, offset, &ret, 1);
-+	if (offset < 0)
-+		err = call_neg_helper(skb, offset, &ret, 1);
-+	else
-+		err = skb_copy_bits(skb, offset, &ret, 1);
- 
- 	return (u64)err << 32 | ret;
- }
- 
--static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
-+static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
- {
- 	u16 ret;
- 	int err;
- 
--	err = skb_copy_bits(skb, offset, &ret, 2);
-+	if (offset < 0)
-+		err = call_neg_helper(skb, offset, &ret, 2);
-+	else
-+		err = skb_copy_bits(skb, offset, &ret, 2);
- 
- 	return (u64)err << 32 | ntohs(ret);
- }
- 
--static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
-+static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
+ static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
  {
- 	u32 ret;
- 	int err;
- 
--	err = skb_copy_bits(skb, offset, &ret, 4);
-+	if (offset < 0)
-+		err = call_neg_helper(skb, offset, &ret, 4);
-+	else
-+		err = skb_copy_bits(skb, offset, &ret, 4);
- 
- 	return (u64)err << 32 | ntohl(ret);
- }
-@@ -179,8 +204,10 @@ static void jit_fill_hole(void *area, unsigned int size)
+@@ -179,8 +184,10 @@ static void jit_fill_hole(void *area, unsigned int size)
  {
  	u32 *ptr;
  	/* We are guaranteed to have aligned memory. */
@@ -4835,17 +4782,7 @@ index e0e2358..a4ee460 100644
  }
  
  static void build_prologue(struct jit_ctx *ctx)
-@@ -536,9 +563,6 @@ static int build_body(struct jit_ctx *ctx)
- 		case BPF_LD | BPF_B | BPF_ABS:
- 			load_order = 0;
- load:
--			/* the interpreter will deal with the negative K */
--			if ((int)k < 0)
--				return -ENOTSUPP;
- 			emit_mov_i(r_off, k, ctx);
- load_common:
- 			ctx->seen |= SEEN_DATA | SEEN_CALL;
-@@ -547,12 +571,24 @@ load_common:
+@@ -547,7 +554,7 @@ load_common:
  				emit(ARM_SUB_I(r_scratch, r_skb_hl,
  					       1 << load_order), ctx);
  				emit(ARM_CMP_R(r_scratch, r_off), ctx);
@@ -4854,24 +4791,7 @@ index e0e2358..a4ee460 100644
  			} else {
  				emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
  				condt = ARM_COND_HI;
- 			}
- 
-+			/*
-+			 * test for negative offset, only if we are
-+			 * currently scheduled to take the fast
-+			 * path. this will update the flags so that
-+			 * the slowpath instruction are ignored if the
-+			 * offset is negative.
-+			 *
-+			 * for loard_order == 0 the HI condition will
-+			 * make loads at offset 0 take the slow path too.
-+			 */
-+			_emit(condt, ARM_CMP_I(r_off, 0), ctx);
-+
- 			_emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
- 			      ctx);
- 
-@@ -860,9 +896,11 @@ b_epilogue:
+@@ -860,9 +867,11 @@ b_epilogue:
  			off = offsetof(struct sk_buff, vlan_tci);
  			emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
  			if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
@@ -19213,6 +19133,23 @@ index 7d5a192..23ef1aa 100644
  #define __USER32_CS			(GDT_ENTRY_DEFAULT_USER32_CS*8 + 3)
  #define __USER_DS			(GDT_ENTRY_DEFAULT_USER_DS*8 + 3)
  #define __USER32_DS			__USER_DS
+diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
+index 6fe6b18..9dfce4e 100644
+--- a/arch/x86/include/asm/sigcontext.h
++++ b/arch/x86/include/asm/sigcontext.h
+@@ -57,9 +57,9 @@ struct sigcontext {
+ 	unsigned long ip;
+ 	unsigned long flags;
+ 	unsigned short cs;
+-	unsigned short __pad2;	/* Was called gs, but was always zero. */
+-	unsigned short __pad1;	/* Was called fs, but was always zero. */
+-	unsigned short ss;
++	unsigned short gs;
++	unsigned short fs;
++	unsigned short __pad0;
+ 	unsigned long err;
+ 	unsigned long trapno;
+ 	unsigned long oldmask;
 diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
 index ba665eb..0f72938 100644
 --- a/arch/x86/include/asm/smap.h
@@ -20639,6 +20576,38 @@ index 960a8a9..404daf7 100644
  #define BIOS_END		0x00100000
  
  #define BIOS_ROM_BASE		0xffe00000
+diff --git a/arch/x86/include/uapi/asm/sigcontext.h b/arch/x86/include/uapi/asm/sigcontext.h
+index 16dc4e8..d8b9f908 100644
+--- a/arch/x86/include/uapi/asm/sigcontext.h
++++ b/arch/x86/include/uapi/asm/sigcontext.h
+@@ -177,24 +177,9 @@ struct sigcontext {
+ 	__u64 rip;
+ 	__u64 eflags;		/* RFLAGS */
+ 	__u16 cs;
+-
+-	/*
+-	 * Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"),
+-	 * Linux saved and restored fs and gs in these slots.  This
+-	 * was counterproductive, as fsbase and gsbase were never
+-	 * saved, so arch_prctl was presumably unreliable.
+-	 *
+-	 * If these slots are ever needed for any other purpose, there
+-	 * is some risk that very old 64-bit binaries could get
+-	 * confused.  I doubt that many such binaries still work,
+-	 * though, since the same patch in 2.5.64 also removed the
+-	 * 64-bit set_thread_area syscall, so it appears that there is
+-	 * no TLS API that works in both pre- and post-2.5.64 kernels.
+-	 */
+-	__u16 __pad2;		/* Was gs. */
+-	__u16 __pad1;		/* Was fs. */
+-
+-	__u16 ss;
++	__u16 gs;
++	__u16 fs;
++	__u16 __pad0;
+ 	__u64 err;
+ 	__u64 trapno;
+ 	__u64 oldmask;
 diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
 index 9bcd0b5..750f1b7 100644
 --- a/arch/x86/kernel/Makefile
@@ -27886,10 +27855,38 @@ index e4fcb87..9c06c55 100644
  		 * Up to this point, the boot CPU has been using .init.data
  		 * area.  Reload any changed state for the boot CPU.
 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
-index 1ea14fd..b16147f 100644
+index 1ea14fd..b551e66 100644
 --- a/arch/x86/kernel/signal.c
 +++ b/arch/x86/kernel/signal.c
-@@ -183,7 +183,7 @@ static unsigned long align_sigframe(unsigned long sp)
+@@ -93,8 +93,15 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
+ 		COPY(r15);
+ #endif /* CONFIG_X86_64 */
+ 
++#ifdef CONFIG_X86_32
+ 		COPY_SEG_CPL3(cs);
+ 		COPY_SEG_CPL3(ss);
++#else /* !CONFIG_X86_32 */
++		/* Kernel saves and restores only the CS segment register on signals,
++		 * which is the bare minimum needed to allow mixed 32/64-bit code.
++		 * App's signal handler can save/restore other segments if needed. */
++		COPY_SEG_CPL3(cs);
++#endif /* CONFIG_X86_32 */
+ 
+ 		get_user_ex(tmpflags, &sc->flags);
+ 		regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
+@@ -154,9 +161,8 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
+ #else /* !CONFIG_X86_32 */
+ 		put_user_ex(regs->flags, &sc->flags);
+ 		put_user_ex(regs->cs, &sc->cs);
+-		put_user_ex(0, &sc->__pad2);
+-		put_user_ex(0, &sc->__pad1);
+-		put_user_ex(regs->ss, &sc->ss);
++		put_user_ex(0, &sc->gs);
++		put_user_ex(0, &sc->fs);
+ #endif /* CONFIG_X86_32 */
+ 
+ 		put_user_ex(fpstate, &sc->fpstate);
+@@ -183,7 +189,7 @@ static unsigned long align_sigframe(unsigned long sp)
  	 * Align the stack pointer according to the i386 ABI,
  	 * i.e. so that on function entry ((sp + 4) & 15) == 0.
  	 */
@@ -27898,7 +27895,7 @@ index 1ea14fd..b16147f 100644
  #else /* !CONFIG_X86_32 */
  	sp = round_down(sp, 16) - 8;
  #endif
-@@ -291,10 +291,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
+@@ -291,10 +297,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
  	}
  
  	if (current->mm->context.vdso)
@@ -27911,7 +27908,7 @@ index 1ea14fd..b16147f 100644
  	if (ksig->ka.sa.sa_flags & SA_RESTORER)
  		restorer = ksig->ka.sa.sa_restorer;
  
-@@ -308,7 +307,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
+@@ -308,7 +313,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
  	 * reasons and because gdb uses it as a signature to notice
  	 * signal handler stack frames.
  	 */
@@ -27920,7 +27917,7 @@ index 1ea14fd..b16147f 100644
  
  	if (err)
  		return -EFAULT;
-@@ -355,8 +354,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
+@@ -355,8 +360,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
  		save_altstack_ex(&frame->uc.uc_stack, regs->sp);
  
  		/* Set up to return from userspace.  */
@@ -27933,7 +27930,7 @@ index 1ea14fd..b16147f 100644
  		if (ksig->ka.sa.sa_flags & SA_RESTORER)
  			restorer = ksig->ka.sa.sa_restorer;
  		put_user_ex(restorer, &frame->pretcode);
-@@ -368,7 +369,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
+@@ -368,7 +375,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
  		 * reasons and because gdb uses it as a signature to notice
  		 * signal handler stack frames.
  		 */
@@ -27942,7 +27939,29 @@ index 1ea14fd..b16147f 100644
  	} put_user_catch(err);
  	
  	err |= copy_siginfo_to_user(&frame->info, &ksig->info);
-@@ -598,7 +599,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
+@@ -450,19 +457,9 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
+ 
+ 	regs->sp = (unsigned long)frame;
+ 
+-	/*
+-	 * Set up the CS and SS registers to run signal handlers in
+-	 * 64-bit mode, even if the handler happens to be interrupting
+-	 * 32-bit or 16-bit code.
+-	 *
+-	 * SS is subtle.  In 64-bit mode, we don't need any particular
+-	 * SS descriptor, but we do need SS to be valid.  It's possible
+-	 * that the old SS is entirely bogus -- this can happen if the
+-	 * signal we're trying to deliver is #GP or #SS caused by a bad
+-	 * SS value.
+-	 */
++	/* Set up the CS register to run signal handlers in 64-bit mode,
++	   even if the handler happens to be interrupting 32-bit code. */
+ 	regs->cs = __USER_CS;
+-	regs->ss = __USER_DS;
+ 
+ 	return 0;
+ }
+@@ -598,7 +595,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
  {
  	int usig = ksig->sig;
  	sigset_t *set = sigmask_to_save();
@@ -27956,7 +27975,7 @@ index 1ea14fd..b16147f 100644
  
  	/* Set up the stack frame */
  	if (is_ia32_frame()) {
-@@ -609,7 +615,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
+@@ -609,7 +611,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
  	} else if (is_x32_frame()) {
  		return x32_setup_rt_frame(ksig, cset, regs);
  	} else {
@@ -103342,10 +103361,52 @@ index 3aaea7f..e8a13d6 100644
  		if (u->mq_bytes + mq_bytes < u->mq_bytes ||
  		    u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
 diff --git a/ipc/sem.c b/ipc/sem.c
-index d1a6edd..ef08b40 100644
+index d1a6edd..1a59db4 100644
 --- a/ipc/sem.c
 +++ b/ipc/sem.c
-@@ -1780,7 +1780,7 @@ static int get_queue_result(struct sem_queue *q)
+@@ -253,6 +253,16 @@ static void sem_rcu_free(struct rcu_head *head)
+ }
+ 
+ /*
++ * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
++ * are only control barriers.
++ * The code must pair with spin_unlock(&sem->lock) or
++ * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
++ *
++ * smp_rmb() is sufficient, as writes cannot pass the control barrier.
++ */
++#define ipc_smp_acquire__after_spin_is_unlocked()	smp_rmb()
++
++/*
+  * Wait until all currently ongoing simple ops have completed.
+  * Caller must own sem_perm.lock.
+  * New simple ops cannot start, because simple ops first check
+@@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma)
+ 		sem = sma->sem_base + i;
+ 		spin_unlock_wait(&sem->lock);
+ 	}
++	ipc_smp_acquire__after_spin_is_unlocked();
+ }
+ 
+ /*
+@@ -327,13 +338,12 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ 		/* Then check that the global lock is free */
+ 		if (!spin_is_locked(&sma->sem_perm.lock)) {
+ 			/*
+-			 * The ipc object lock check must be visible on all
+-			 * cores before rechecking the complex count.  Otherwise
+-			 * we can race with  another thread that does:
++			 * We need a memory barrier with acquire semantics,
++			 * otherwise we can race with another thread that does:
+ 			 *	complex_count++;
+ 			 *	spin_unlock(sem_perm.lock);
+ 			 */
+-			smp_rmb();
++			ipc_smp_acquire__after_spin_is_unlocked();
+ 
+ 			/*
+ 			 * Now repeat the test of complex_count:
+@@ -1780,7 +1790,7 @@ static int get_queue_result(struct sem_queue *q)
  }
  
  SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
@@ -103354,7 +103415,7 @@ index d1a6edd..ef08b40 100644
  {
  	int error = -EINVAL;
  	struct sem_array *sma;
-@@ -2015,7 +2015,7 @@ out_free:
+@@ -2015,7 +2025,7 @@ out_free:
  }
  
  SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
@@ -118435,6 +118496,19 @@ index 8d695b6..752d427a 100644
  
  	return nh->nh_saddr;
  }
+diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
+index 09b62e1..2871350 100644
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -2457,7 +2457,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
+ 		key = l->key + 1;
+ 		iter->pos++;
+ 
+-		if (pos-- <= 0)
++		if (--pos <= 0)
+ 			break;
+ 
+ 		l = NULL;
 diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
 index 5e346a0..d0a7c03 100644
 --- a/net/ipv4/inet_fragment.c
@@ -119331,7 +119405,7 @@ index c9ab964..607d9f7 100644
  			if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
  				return 1;
 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
-index fc1c658..42a8d34 100644
+index fc1c658..4de4e33 100644
 --- a/net/ipv4/tcp_ipv4.c
 +++ b/net/ipv4/tcp_ipv4.c
 @@ -89,6 +89,10 @@ int sysctl_tcp_tw_reuse __read_mostly;
@@ -119345,6 +119419,15 @@ index fc1c658..42a8d34 100644
  #ifdef CONFIG_TCP_MD5SIG
  static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
  			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
+@@ -1348,7 +1352,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
+ 	req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
+ 	if (req) {
+ 		nsk = tcp_check_req(sk, skb, req, false);
+-		if (!nsk)
++		if (!nsk || nsk == sk)
+ 			reqsk_put(req);
+ 		return nsk;
+ 	}
 @@ -1427,6 +1431,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
  	return 0;
  
@@ -120317,7 +120400,7 @@ index abcc79f..3b2d2d5 100644
  	struct ctl_table *ipv6_icmp_table;
  	int err;
 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
-index 3adffb3..a67e4d1 100644
+index 3adffb3..fe3cc78 100644
 --- a/net/ipv6/tcp_ipv6.c
 +++ b/net/ipv6/tcp_ipv6.c
 @@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
@@ -120331,6 +120414,15 @@ index 3adffb3..a67e4d1 100644
  static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
  {
  	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
+@@ -946,7 +950,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
+ 				   &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
+ 	if (req) {
+ 		nsk = tcp_check_req(sk, skb, req, false);
+-		if (!nsk)
++		if (!nsk || nsk == sk)
+ 			reqsk_put(req);
+ 		return nsk;
+ 	}
 @@ -1283,6 +1287,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
  	return 0;
  


^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2015-08-15  7:29 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-08-15  7:29 [gentoo-commits] proj/hardened-patchset:master commit in: 3.14.50/, 3.2.70/, 4.1.5/, 3.2.71/ Anthony G. Basile

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox