public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] linux-patches r2333 - in genpatches-2.6/trunk: 3.0 3.4
@ 2013-04-08  7:17 Tom Wijsman (tomwij)
  0 siblings, 0 replies; only message in thread
From: Tom Wijsman (tomwij) @ 2013-04-08  7:17 UTC (permalink / raw
  To: gentoo-commits

Author: tomwij
Date: 2013-04-08 07:17:58 +0000 (Mon, 08 Apr 2013)
New Revision: 2333

Added:
   genpatches-2.6/trunk/3.0/1071_linux-3.0.72.patch
   genpatches-2.6/trunk/3.0/2700_ThinkPad-30-brightness-control-fix.patch
   genpatches-2.6/trunk/3.4/1038_linux-3.4.39.patch
   genpatches-2.6/trunk/3.4/2700_ThinkPad-30-brightness-control-fix.patch
Modified:
   genpatches-2.6/trunk/3.0/0000_README
   genpatches-2.6/trunk/3.4/0000_README
Log:
Linux patches 3.0.72 and 3.4.39. Patch to fix brightness control on some ThinkPad laptops, see bug #463304.

Modified: genpatches-2.6/trunk/3.0/0000_README
===================================================================
--- genpatches-2.6/trunk/3.0/0000_README	2013-04-05 21:11:21 UTC (rev 2332)
+++ genpatches-2.6/trunk/3.0/0000_README	2013-04-08 07:17:58 UTC (rev 2333)
@@ -319,6 +319,10 @@
 From:   http://www.kernel.org
 Desc:   Linux 3.0.71
 
+Patch:  1071_linux-3.0.72.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.0.72
+
 Patch:  1800_fix-zcache-build.patch
 From:   http://bugs.gentoo.org/show_bug.cgi?id=376325
 Desc:   Fix zcache build error
@@ -355,6 +359,10 @@
 From:   http://bugs.gentoo.org/show_bug.cgi?id=318567
 Desc:   ALPS Touchpad - dump raw packet data
 
+Patch:  2700_ThinkPad-30-brightness-control-fix.patch
+From:   Seth Forshee <seth.forshee@canonical.com>
+Desc:   ACPI: Disable Windows 8 compatibility for some Lenovo ThinkPads
+
 Patch:  4200_fbcondecor-0.9.6.patch
 From:   http://dev.gentoo.org/~spock
 Desc:   Bootsplash successor by Michal Januszewski ported by Alexxy

Added: genpatches-2.6/trunk/3.0/1071_linux-3.0.72.patch
===================================================================
--- genpatches-2.6/trunk/3.0/1071_linux-3.0.72.patch	                        (rev 0)
+++ genpatches-2.6/trunk/3.0/1071_linux-3.0.72.patch	2013-04-08 07:17:58 UTC (rev 2333)
@@ -0,0 +1,2674 @@
+diff --git a/Makefile b/Makefile
+index fbba8bc..533c56b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 0
+-SUBLEVEL = 71
++SUBLEVEL = 72
+ EXTRAVERSION =
+ NAME = Sneaky Weasel
+ 
+diff --git a/arch/arm/include/asm/signal.h b/arch/arm/include/asm/signal.h
+index 43ba0fb..559ee24 100644
+--- a/arch/arm/include/asm/signal.h
++++ b/arch/arm/include/asm/signal.h
+@@ -127,6 +127,7 @@ struct sigaction {
+ 	__sigrestore_t sa_restorer;
+ 	sigset_t sa_mask;		/* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+ 
+ struct k_sigaction {
+ 	struct sigaction sa;
+diff --git a/arch/avr32/include/asm/signal.h b/arch/avr32/include/asm/signal.h
+index 8790dfc..e6952a0 100644
+--- a/arch/avr32/include/asm/signal.h
++++ b/arch/avr32/include/asm/signal.h
+@@ -128,6 +128,7 @@ struct sigaction {
+ 	__sigrestore_t sa_restorer;
+ 	sigset_t sa_mask;		/* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+ 
+ struct k_sigaction {
+ 	struct sigaction sa;
+diff --git a/arch/cris/include/asm/signal.h b/arch/cris/include/asm/signal.h
+index ea6af9a..057fea2 100644
+--- a/arch/cris/include/asm/signal.h
++++ b/arch/cris/include/asm/signal.h
+@@ -122,6 +122,7 @@ struct sigaction {
+ 	void (*sa_restorer)(void);
+ 	sigset_t sa_mask;		/* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+ 
+ struct k_sigaction {
+ 	struct sigaction sa;
+diff --git a/arch/h8300/include/asm/signal.h b/arch/h8300/include/asm/signal.h
+index fd8b66e..8695707 100644
+--- a/arch/h8300/include/asm/signal.h
++++ b/arch/h8300/include/asm/signal.h
+@@ -121,6 +121,7 @@ struct sigaction {
+ 	void (*sa_restorer)(void);
+ 	sigset_t sa_mask;		/* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+ 
+ struct k_sigaction {
+ 	struct sigaction sa;
+diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
+index 8213efe..a874213 100644
+--- a/arch/ia64/kvm/kvm-ia64.c
++++ b/arch/ia64/kvm/kvm-ia64.c
+@@ -1168,6 +1168,11 @@ out:
+ 
+ #define PALE_RESET_ENTRY    0x80000000ffffffb0UL
+ 
++bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
++{
++	return irqchip_in_kernel(vcpu->kcm) == (vcpu->arch.apic != NULL);
++}
++
+ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+ {
+ 	struct kvm_vcpu *v;
+diff --git a/arch/m32r/include/asm/signal.h b/arch/m32r/include/asm/signal.h
+index b2eeb0d..802d561 100644
+--- a/arch/m32r/include/asm/signal.h
++++ b/arch/m32r/include/asm/signal.h
+@@ -123,6 +123,7 @@ struct sigaction {
+ 	__sigrestore_t sa_restorer;
+ 	sigset_t sa_mask;		/* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+ 
+ struct k_sigaction {
+ 	struct sigaction sa;
+diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h
+index 0b6b0e5..ee80858 100644
+--- a/arch/m68k/include/asm/signal.h
++++ b/arch/m68k/include/asm/signal.h
+@@ -119,6 +119,7 @@ struct sigaction {
+ 	__sigrestore_t sa_restorer;
+ 	sigset_t sa_mask;		/* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+ 
+ struct k_sigaction {
+ 	struct sigaction sa;
+diff --git a/arch/mn10300/include/asm/signal.h b/arch/mn10300/include/asm/signal.h
+index 1865d72..eecaa76 100644
+--- a/arch/mn10300/include/asm/signal.h
++++ b/arch/mn10300/include/asm/signal.h
+@@ -131,6 +131,7 @@ struct sigaction {
+ 	__sigrestore_t sa_restorer;
+ 	sigset_t sa_mask;		/* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+ 
+ struct k_sigaction {
+ 	struct sigaction sa;
+diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h
+index 3eb13be..ec63a0a 100644
+--- a/arch/powerpc/include/asm/signal.h
++++ b/arch/powerpc/include/asm/signal.h
+@@ -109,6 +109,7 @@ struct sigaction {
+ 	__sigrestore_t sa_restorer;
+ 	sigset_t sa_mask;		/* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+ 
+ struct k_sigaction {
+ 	struct sigaction sa;
+diff --git a/arch/s390/include/asm/signal.h b/arch/s390/include/asm/signal.h
+index cdf5cb2..c872626 100644
+--- a/arch/s390/include/asm/signal.h
++++ b/arch/s390/include/asm/signal.h
+@@ -131,6 +131,7 @@ struct sigaction {
+         void (*sa_restorer)(void);
+         sigset_t sa_mask;               /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+ 
+ struct k_sigaction {
+         struct sigaction sa;
+diff --git a/arch/sparc/include/asm/signal.h b/arch/sparc/include/asm/signal.h
+index e49b828..4929431 100644
+--- a/arch/sparc/include/asm/signal.h
++++ b/arch/sparc/include/asm/signal.h
+@@ -191,6 +191,7 @@ struct __old_sigaction {
+ 	unsigned long		sa_flags;
+ 	void			(*sa_restorer)(void);  /* not used by Linux/SPARC yet */
+ };
++#define __ARCH_HAS_SA_RESTORER
+ 
+ typedef struct sigaltstack {
+ 	void			__user *ss_sp;
+diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
+index 598457c..6cbc795 100644
+--- a/arch/x86/include/asm/signal.h
++++ b/arch/x86/include/asm/signal.h
+@@ -125,6 +125,8 @@ typedef unsigned long sigset_t;
+ extern void do_notify_resume(struct pt_regs *, void *, __u32);
+ # endif /* __KERNEL__ */
+ 
++#define __ARCH_HAS_SA_RESTORER
++
+ #ifdef __i386__
+ # ifdef __KERNEL__
+ struct old_sigaction {
+diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
+index bfd75ff..d9302b7 100644
+--- a/arch/x86/kernel/amd_iommu.c
++++ b/arch/x86/kernel/amd_iommu.c
+@@ -53,6 +53,8 @@ static struct protection_domain *pt_domain;
+ 
+ static struct iommu_ops amd_iommu_ops;
+ 
++static struct dma_map_ops amd_iommu_dma_ops;
++
+ /*
+  * general struct to manage commands send to an IOMMU
+  */
+@@ -1778,18 +1780,20 @@ static int device_change_notifier(struct notifier_block *nb,
+ 
+ 		domain = domain_for_device(dev);
+ 
+-		/* allocate a protection domain if a device is added */
+ 		dma_domain = find_protection_domain(devid);
+-		if (dma_domain)
+-			goto out;
+-		dma_domain = dma_ops_domain_alloc();
+-		if (!dma_domain)
+-			goto out;
+-		dma_domain->target_dev = devid;
++		if (!dma_domain) {
++			/* allocate a protection domain if a device is added */
++			dma_domain = dma_ops_domain_alloc();
++			if (!dma_domain)
++				goto out;
++			dma_domain->target_dev = devid;
++
++			spin_lock_irqsave(&iommu_pd_list_lock, flags);
++			list_add_tail(&dma_domain->list, &iommu_pd_list);
++			spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
++		}
+ 
+-		spin_lock_irqsave(&iommu_pd_list_lock, flags);
+-		list_add_tail(&dma_domain->list, &iommu_pd_list);
+-		spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
++		dev->archdata.dma_ops = &amd_iommu_dma_ops;
+ 
+ 		break;
+ 	case BUS_NOTIFY_DEL_DEVICE:
+diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
+index efad723..43e04d1 100644
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -338,11 +338,15 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
+ 		return HRTIMER_NORESTART;
+ }
+ 
+-static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period)
++static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
+ {
++	struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
+ 	struct kvm_timer *pt = &ps->pit_timer;
+ 	s64 interval;
+ 
++	if (!irqchip_in_kernel(kvm))
++		return;
++
+ 	interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
+ 
+ 	pr_debug("create pit timer, interval is %llu nsec\n", interval);
+@@ -394,13 +398,13 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
+         /* FIXME: enhance mode 4 precision */
+ 	case 4:
+ 		if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) {
+-			create_pit_timer(ps, val, 0);
++			create_pit_timer(kvm, val, 0);
+ 		}
+ 		break;
+ 	case 2:
+ 	case 3:
+ 		if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){
+-			create_pit_timer(ps, val, 1);
++			create_pit_timer(kvm, val, 1);
+ 		}
+ 		break;
+ 	default:
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index fbb0936..e329dc5 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -575,6 +575,9 @@ static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
+ {
+ 	struct kvm_cpuid_entry2 *best;
+ 
++	if (!cpu_has_xsave)
++		return 0;
++
+ 	best = kvm_find_cpuid_entry(vcpu, 1, 0);
+ 	return best && (best->ecx & bit(X86_FEATURE_XSAVE));
+ }
+@@ -3410,6 +3413,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
+ 		r = -EEXIST;
+ 		if (kvm->arch.vpic)
+ 			goto create_irqchip_unlock;
++		r = -EINVAL;
++		if (atomic_read(&kvm->online_vcpus))
++			goto create_irqchip_unlock;
+ 		r = -ENOMEM;
+ 		vpic = kvm_create_pic(kvm);
+ 		if (vpic) {
+@@ -5851,6 +5857,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ 	int pending_vec, max_bits, idx;
+ 	struct desc_ptr dt;
+ 
++	if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
++		return -EINVAL;
++
+ 	dt.size = sregs->idt.limit;
+ 	dt.address = sregs->idt.base;
+ 	kvm_x86_ops->set_idt(vcpu, &dt);
+@@ -6116,12 +6125,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+ 	if (r == 0)
+ 		r = kvm_mmu_setup(vcpu);
+ 	vcpu_put(vcpu);
+-	if (r < 0)
+-		goto free_vcpu;
+ 
+-	return 0;
+-free_vcpu:
+-	kvm_x86_ops->vcpu_free(vcpu);
+ 	return r;
+ }
+ 
+@@ -6194,6 +6198,11 @@ void kvm_arch_check_processor_compat(void *rtn)
+ 	kvm_x86_ops->check_processor_compatibility(rtn);
+ }
+ 
++bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
++{
++	return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
++}
++
+ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+ {
+ 	struct page *page;
+diff --git a/arch/xtensa/include/asm/signal.h b/arch/xtensa/include/asm/signal.h
+index 633ba73..75edf8a 100644
+--- a/arch/xtensa/include/asm/signal.h
++++ b/arch/xtensa/include/asm/signal.h
+@@ -133,6 +133,7 @@ struct sigaction {
+ 	void (*sa_restorer)(void);
+ 	sigset_t sa_mask;		/* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+ 
+ struct k_sigaction {
+ 	struct sigaction sa;
+diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
+index 887f68f..db30542 100644
+--- a/drivers/block/aoe/aoecmd.c
++++ b/drivers/block/aoe/aoecmd.c
+@@ -30,8 +30,9 @@ new_skb(ulong len)
+ {
+ 	struct sk_buff *skb;
+ 
+-	skb = alloc_skb(len, GFP_ATOMIC);
++	skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC);
+ 	if (skb) {
++		skb_reserve(skb, MAX_HEADER);
+ 		skb_reset_mac_header(skb);
+ 		skb_reset_network_header(skb);
+ 		skb->protocol = __constant_htons(ETH_P_AOE);
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 258cd0a..38f8da9 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -928,6 +928,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
+ 	wake_up_process(lo->lo_thread);
+ 	if (max_part > 0)
+ 		ioctl_by_bdev(bdev, BLKRRPART, 0);
++
++	/* Grab the block_device to prevent its destruction after we
++	 * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev).
++	 */
++	bdgrab(bdev);
+ 	return 0;
+ 
+ out_clr:
+@@ -1024,8 +1029,10 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
+ 	memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
+ 	memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
+ 	memset(lo->lo_file_name, 0, LO_NAME_SIZE);
+-	if (bdev)
++	if (bdev) {
++		bdput(bdev);
+ 		invalidate_bdev(bdev);
++	}
+ 	set_capacity(lo->lo_disk, 0);
+ 	loop_sysfs_exit(lo);
+ 	if (bdev) {
+diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
+index 54139d0..92bdc40 100644
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -650,13 +650,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
+ 		bio->bi_end_io  = end_block_io_op;
+ 	}
+ 
+-	/*
+-	 * We set it one so that the last submit_bio does not have to call
+-	 * atomic_inc.
+-	 */
+ 	atomic_set(&pending_req->pendcnt, nbio);
+-
+-	/* Get a reference count for the disk queue and start sending I/O */
+ 	blk_start_plug(&plug);
+ 
+ 	for (i = 0; i < nbio; i++)
+@@ -684,6 +678,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
+  fail_put_bio:
+ 	for (i = 0; i < nbio; i++)
+ 		bio_put(biolist[i]);
++	atomic_set(&pending_req->pendcnt, 1);
+ 	__end_block_io_op(pending_req, -EINVAL);
+ 	msleep(1); /* back off a bit */
+ 	return -EIO;
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 85e1ad6..e866ed9 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -71,8 +71,10 @@ static struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x03F0, 0x311D) },
+ 
+ 	/* Atheros AR3012 with sflash firmware*/
++	{ USB_DEVICE(0x0CF3, 0x0036) },
+ 	{ USB_DEVICE(0x0CF3, 0x3004) },
+ 	{ USB_DEVICE(0x0CF3, 0x311D) },
++	{ USB_DEVICE(0x0CF3, 0x817a) },
+ 	{ USB_DEVICE(0x13d3, 0x3375) },
+ 	{ USB_DEVICE(0x04CA, 0x3005) },
+ 
+@@ -93,8 +95,10 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
+ static struct usb_device_id ath3k_blist_tbl[] = {
+ 
+ 	/* Atheros AR3012 with sflash firmware*/
++	{ USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ 
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 4b764f8..a77e0d1 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -136,8 +136,10 @@ static struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
+ 
+ 	/* Atheros 3012 with sflash firmware */
++	{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ 
+diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
+index 6871ed3..c5cce9c 100644
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -119,6 +119,8 @@ struct efivar_attribute {
+ 	ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count);
+ };
+ 
++static struct efivars __efivars;
++static struct efivar_operations ops;
+ 
+ #define EFIVAR_ATTR(_name, _mode, _show, _store) \
+ struct efivar_attribute efivar_attr_##_name = { \
+@@ -730,6 +732,53 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
+ 	return count;
+ }
+ 
++static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor)
++{
++	struct efivar_entry *entry, *n;
++	struct efivars *efivars = &__efivars;
++	unsigned long strsize1, strsize2;
++	bool found = false;
++
++	strsize1 = utf16_strsize(variable_name, 1024);
++	list_for_each_entry_safe(entry, n, &efivars->list, list) {
++		strsize2 = utf16_strsize(entry->var.VariableName, 1024);
++		if (strsize1 == strsize2 &&
++			!memcmp(variable_name, &(entry->var.VariableName),
++				strsize2) &&
++			!efi_guidcmp(entry->var.VendorGuid,
++				*vendor)) {
++			found = true;
++			break;
++		}
++	}
++	return found;
++}
++
++/*
++ * Returns the size of variable_name, in bytes, including the
++ * terminating NULL character, or variable_name_size if no NULL
++ * character is found among the first variable_name_size bytes.
++ */
++static unsigned long var_name_strnsize(efi_char16_t *variable_name,
++				       unsigned long variable_name_size)
++{
++	unsigned long len;
++	efi_char16_t c;
++
++	/*
++	 * The variable name is, by definition, a NULL-terminated
++	 * string, so make absolutely sure that variable_name_size is
++	 * the value we expect it to be. If not, return the real size.
++	 */
++	for (len = 2; len <= variable_name_size; len += sizeof(c)) {
++		c = variable_name[(len / sizeof(c)) - 1];
++		if (!c)
++			break;
++	}
++
++	return min(len, variable_name_size);
++}
++
+ /*
+  * Let's not leave out systab information that snuck into
+  * the efivars driver
+@@ -917,6 +966,28 @@ void unregister_efivars(struct efivars *efivars)
+ }
+ EXPORT_SYMBOL_GPL(unregister_efivars);
+ 
++/*
++ * Print a warning when duplicate EFI variables are encountered and
++ * disable the sysfs workqueue since the firmware is buggy.
++ */
++static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid,
++			     unsigned long len16)
++{
++	size_t i, len8 = len16 / sizeof(efi_char16_t);
++	char *s8;
++
++	s8 = kzalloc(len8, GFP_KERNEL);
++	if (!s8)
++		return;
++
++	for (i = 0; i < len8; i++)
++		s8[i] = s16[i];
++
++	printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n",
++	       s8, vendor_guid);
++	kfree(s8);
++}
++
+ int register_efivars(struct efivars *efivars,
+ 		     const struct efivar_operations *ops,
+ 		     struct kobject *parent_kobj)
+@@ -957,6 +1028,24 @@ int register_efivars(struct efivars *efivars,
+ 						&vendor_guid);
+ 		switch (status) {
+ 		case EFI_SUCCESS:
++			variable_name_size = var_name_strnsize(variable_name,
++							       variable_name_size);
++
++			/*
++			 * Some firmware implementations return the
++			 * same variable name on multiple calls to
++			 * get_next_variable(). Terminate the loop
++			 * immediately as there is no guarantee that
++			 * we'll ever see a different variable name,
++			 * and may end up looping here forever.
++			 */
++			if (variable_is_present(variable_name, &vendor_guid)) {
++				dup_variable_bug(variable_name, &vendor_guid,
++						 variable_name_size);
++				status = EFI_NOT_FOUND;
++				break;
++			}
++
+ 			efivar_create_sysfs_entry(efivars,
+ 						  variable_name_size,
+ 						  variable_name,
+@@ -983,9 +1072,6 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(register_efivars);
+ 
+-static struct efivars __efivars;
+-static struct efivar_operations ops;
+-
+ /*
+  * For now we register the efi subsystem with the firmware subsystem
+  * and the vars subsystem with the efi subsystem.  In the future, it
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 11ecb0c..2e0c24d 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -6507,8 +6507,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ {
+ 	struct drm_device *dev = crtc->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	struct intel_framebuffer *intel_fb;
+-	struct drm_i915_gem_object *obj;
++	struct drm_framebuffer *old_fb = crtc->fb;
++	struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
+ 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ 	struct intel_unpin_work *work;
+ 	unsigned long flags;
+@@ -6520,15 +6520,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ 
+ 	work->event = event;
+ 	work->dev = crtc->dev;
+-	intel_fb = to_intel_framebuffer(crtc->fb);
+-	work->old_fb_obj = intel_fb->obj;
++	work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
+ 	INIT_WORK(&work->work, intel_unpin_work_fn);
+ 
++	ret = drm_vblank_get(dev, intel_crtc->pipe);
++	if (ret)
++		goto free_work;
++
+ 	/* We borrow the event spin lock for protecting unpin_work */
+ 	spin_lock_irqsave(&dev->event_lock, flags);
+ 	if (intel_crtc->unpin_work) {
+ 		spin_unlock_irqrestore(&dev->event_lock, flags);
+ 		kfree(work);
++		drm_vblank_put(dev, intel_crtc->pipe);
+ 
+ 		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
+ 		return -EBUSY;
+@@ -6536,9 +6540,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ 	intel_crtc->unpin_work = work;
+ 	spin_unlock_irqrestore(&dev->event_lock, flags);
+ 
+-	intel_fb = to_intel_framebuffer(fb);
+-	obj = intel_fb->obj;
+-
+ 	mutex_lock(&dev->struct_mutex);
+ 
+ 	/* Reference the objects for the scheduled work. */
+@@ -6547,10 +6548,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ 
+ 	crtc->fb = fb;
+ 
+-	ret = drm_vblank_get(dev, intel_crtc->pipe);
+-	if (ret)
+-		goto cleanup_objs;
+-
+ 	work->pending_flip_obj = obj;
+ 
+ 	work->enable_stall_check = true;
+@@ -6572,7 +6569,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ 
+ cleanup_pending:
+ 	atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+-cleanup_objs:
++	crtc->fb = old_fb;
+ 	drm_gem_object_unreference(&work->old_fb_obj->base);
+ 	drm_gem_object_unreference(&obj->base);
+ 	mutex_unlock(&dev->struct_mutex);
+@@ -6581,6 +6578,8 @@ cleanup_objs:
+ 	intel_crtc->unpin_work = NULL;
+ 	spin_unlock_irqrestore(&dev->event_lock, flags);
+ 
++	drm_vblank_put(dev, intel_crtc->pipe);
++free_work:
+ 	kfree(work);
+ 
+ 	return ret;
+diff --git a/drivers/net/atl1e/atl1e.h b/drivers/net/atl1e/atl1e.h
+index 490d3b3..4093097 100644
+--- a/drivers/net/atl1e/atl1e.h
++++ b/drivers/net/atl1e/atl1e.h
+@@ -439,7 +439,6 @@ struct atl1e_adapter {
+ 	struct atl1e_hw        hw;
+ 	struct atl1e_hw_stats  hw_stats;
+ 
+-	bool have_msi;
+ 	u32 wol;
+ 	u16 link_speed;
+ 	u16 link_duplex;
+diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
+index 86a9122..b0132bb 100644
+--- a/drivers/net/atl1e/atl1e_main.c
++++ b/drivers/net/atl1e/atl1e_main.c
+@@ -1848,37 +1848,19 @@ static void atl1e_free_irq(struct atl1e_adapter *adapter)
+ 	struct net_device *netdev = adapter->netdev;
+ 
+ 	free_irq(adapter->pdev->irq, netdev);
+-
+-	if (adapter->have_msi)
+-		pci_disable_msi(adapter->pdev);
+ }
+ 
+ static int atl1e_request_irq(struct atl1e_adapter *adapter)
+ {
+ 	struct pci_dev    *pdev   = adapter->pdev;
+ 	struct net_device *netdev = adapter->netdev;
+-	int flags = 0;
+ 	int err = 0;
+ 
+-	adapter->have_msi = true;
+-	err = pci_enable_msi(adapter->pdev);
+-	if (err) {
+-		netdev_dbg(adapter->netdev,
+-			   "Unable to allocate MSI interrupt Error: %d\n", err);
+-		adapter->have_msi = false;
+-	} else
+-		netdev->irq = pdev->irq;
+-
+-
+-	if (!adapter->have_msi)
+-		flags |= IRQF_SHARED;
+-	err = request_irq(adapter->pdev->irq, atl1e_intr, flags,
+-			netdev->name, netdev);
++	err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED,
++			  netdev->name, netdev);
+ 	if (err) {
+ 		netdev_dbg(adapter->netdev,
+ 			   "Unable to allocate interrupt Error: %d\n", err);
+-		if (adapter->have_msi)
+-			pci_disable_msi(adapter->pdev);
+ 		return err;
+ 	}
+ 	netdev_dbg(adapter->netdev, "atl1e_request_irq OK\n");
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 6f8b268..dd433a7 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -2017,12 +2017,11 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
+ 		return -EINVAL;
+ 	}
+ 
++	write_unlock_bh(&bond->lock);
+ 	/* unregister rx_handler early so bond_handle_frame wouldn't be called
+ 	 * for this slave anymore.
+ 	 */
+ 	netdev_rx_handler_unregister(slave_dev);
+-	write_unlock_bh(&bond->lock);
+-	synchronize_net();
+ 	write_lock_bh(&bond->lock);
+ 
+ 	if (!bond->params.fail_over_mac) {
+diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
+index e5efe3a..e5d0eed 100644
+--- a/drivers/net/davinci_emac.c
++++ b/drivers/net/davinci_emac.c
+@@ -1049,7 +1049,7 @@ static void emac_tx_handler(void *token, int len, int status)
+ 	struct net_device	*ndev = skb->dev;
+ 
+ 	if (unlikely(netif_queue_stopped(ndev)))
+-		netif_start_queue(ndev);
++		netif_wake_queue(ndev);
+ 	ndev->stats.tx_packets++;
+ 	ndev->stats.tx_bytes += len;
+ 	dev_kfree_skb_any(skb);
+diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
+index bcd9ba6..99593f0 100644
+--- a/drivers/net/ks8851.c
++++ b/drivers/net/ks8851.c
+@@ -489,7 +489,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
+ 	for (; rxfc != 0; rxfc--) {
+ 		rxh = ks8851_rdreg32(ks, KS_RXFHSR);
+ 		rxstat = rxh & 0xffff;
+-		rxlen = rxh >> 16;
++		rxlen = (rxh >> 16) & 0xfff;
+ 
+ 		netif_dbg(ks, rx_status, ks->netdev,
+ 			  "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 6696e56..023b57e 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -552,6 +552,10 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
+ 	if (unlikely(len < ETH_HLEN))
+ 		goto err;
+ 
++	err = -EMSGSIZE;
++	if (unlikely(count > UIO_MAXIOV))
++		goto err;
++
+ 	skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, len, vnet_hdr.hdr_len,
+ 				noblock, &err);
+ 	if (!skb)
+diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
+index 236d00e..0055daf 100644
+--- a/drivers/net/pch_gbe/pch_gbe_main.c
++++ b/drivers/net/pch_gbe/pch_gbe_main.c
+@@ -1509,9 +1509,9 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
+ 			skb_put(skb, length);
+ 			skb->protocol = eth_type_trans(skb, netdev);
+ 			if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
+-				skb->ip_summed = CHECKSUM_NONE;
+-			else
+ 				skb->ip_summed = CHECKSUM_UNNECESSARY;
++			else
++				skb->ip_summed = CHECKSUM_NONE;
+ 
+ 			napi_gro_receive(&adapter->napi, skb);
+ 			(*work_done)++;
+diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
+index 5f93956..7f7aae2 100644
+--- a/drivers/net/sky2.c
++++ b/drivers/net/sky2.c
+@@ -992,7 +992,7 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
+ 		sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
+ 		sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
+ 
+-		tp = space - 2048/8;
++		tp = space - 8192/8;
+ 		sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
+ 		sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
+ 	} else {
+diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
+index a79a166..8cc863e 100644
+--- a/drivers/net/sky2.h
++++ b/drivers/net/sky2.h
+@@ -2064,7 +2064,7 @@ enum {
+ 	GM_IS_RX_FF_OR	= 1<<1,	/* Receive FIFO Overrun */
+ 	GM_IS_RX_COMPL	= 1<<0,	/* Frame Reception Complete */
+ 
+-#define GMAC_DEF_MSK     GM_IS_TX_FF_UR
++#define GMAC_DEF_MSK     (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR)
+ };
+ 
+ /*	GMAC_LINK_CTRL	16 bit	GMAC Link Control Reg (YUKON only) */
+diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
+index de0de3e..2f4775f 100644
+--- a/drivers/net/usb/smsc75xx.c
++++ b/drivers/net/usb/smsc75xx.c
+@@ -719,8 +719,12 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size)
+ static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
+ {
+ 	struct usbnet *dev = netdev_priv(netdev);
++	int ret;
++
++	if (new_mtu > MAX_SINGLE_PACKET_SIZE)
++		return -EINVAL;
+ 
+-	int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu);
++	ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
+ 	check_warn_return(ret, "Failed to set mac rx frame length");
+ 
+ 	return usbnet_change_mtu(netdev, new_mtu);
+@@ -964,7 +968,7 @@ static int smsc75xx_reset(struct usbnet *dev)
+ 
+ 	netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x", buf);
+ 
+-	ret = smsc75xx_set_rx_max_frame_length(dev, 1514);
++	ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
+ 	check_warn_return(ret, "Failed to set max rx frame length");
+ 
+ 	ret = smsc75xx_read_reg(dev, MAC_RX, &buf);
+@@ -1108,8 +1112,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ 			else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT))
+ 				dev->net->stats.rx_frame_errors++;
+ 		} else {
+-			/* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
+-			if (unlikely(size > (ETH_FRAME_LEN + 12))) {
++			/* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */
++			if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) {
+ 				netif_dbg(dev, rx_err, dev->net,
+ 					"size err rx_cmd_a=0x%08x", rx_cmd_a);
+ 				return 0;
+diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
+index 47d44bc..5deeb14 100644
+--- a/drivers/net/wireless/b43/dma.c
++++ b/drivers/net/wireless/b43/dma.c
+@@ -1390,8 +1390,12 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ 	struct b43_dmaring *ring;
+ 	struct b43_dmadesc_generic *desc;
+ 	struct b43_dmadesc_meta *meta;
++	static const struct b43_txstatus fake; /* filled with 0 */
++	const struct b43_txstatus *txstat;
+ 	int slot, firstused;
+ 	bool frame_succeed;
++	int skip;
++	static u8 err_out1, err_out2;
+ 
+ 	ring = parse_cookie(dev, status->cookie, &slot);
+ 	if (unlikely(!ring))
+@@ -1404,13 +1408,36 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ 	firstused = ring->current_slot - ring->used_slots + 1;
+ 	if (firstused < 0)
+ 		firstused = ring->nr_slots + firstused;
++
++	skip = 0;
+ 	if (unlikely(slot != firstused)) {
+ 		/* This possibly is a firmware bug and will result in
+-		 * malfunction, memory leaks and/or stall of DMA functionality. */
+-		b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
+-		       "Expected %d, but got %d\n",
+-		       ring->index, firstused, slot);
+-		return;
++		 * malfunction, memory leaks and/or stall of DMA functionality.
++		 */
++		if (slot == next_slot(ring, next_slot(ring, firstused))) {
++			/* If a single header/data pair was missed, skip over
++			 * the first two slots in an attempt to recover.
++			 */
++			slot = firstused;
++			skip = 2;
++			if (!err_out1) {
++				/* Report the error once. */
++				b43dbg(dev->wl,
++				       "Skip on DMA ring %d slot %d.\n",
++				       ring->index, slot);
++				err_out1 = 1;
++			}
++		} else {
++			/* More than a single header/data pair were missed.
++			 * Report this error once.
++			 */
++			if (!err_out2)
++				b43dbg(dev->wl,
++				       "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
++				       ring->index, firstused, slot);
++			err_out2 = 1;
++			return;
++		}
+ 	}
+ 
+ 	ops = ring->ops;
+@@ -1424,11 +1451,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ 			       slot, firstused, ring->index);
+ 			break;
+ 		}
++
+ 		if (meta->skb) {
+ 			struct b43_private_tx_info *priv_info =
+-				b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
++			     b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
+ 
+-			unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
++			unmap_descbuffer(ring, meta->dmaaddr,
++					 meta->skb->len, 1);
+ 			kfree(priv_info->bouncebuffer);
+ 			priv_info->bouncebuffer = NULL;
+ 		} else {
+@@ -1440,8 +1469,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ 			struct ieee80211_tx_info *info;
+ 
+ 			if (unlikely(!meta->skb)) {
+-				/* This is a scatter-gather fragment of a frame, so
+-				 * the skb pointer must not be NULL. */
++				/* This is a scatter-gather fragment of a frame,
++				 * so the skb pointer must not be NULL.
++				 */
+ 				b43dbg(dev->wl, "TX status unexpected NULL skb "
+ 				       "at slot %d (first=%d) on ring %d\n",
+ 				       slot, firstused, ring->index);
+@@ -1452,9 +1482,18 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ 
+ 			/*
+ 			 * Call back to inform the ieee80211 subsystem about
+-			 * the status of the transmission.
++			 * the status of the transmission. When skipping over
++			 * a missed TX status report, use a status structure
++			 * filled with zeros to indicate that the frame was not
++			 * sent (frame_count 0) and not acknowledged
+ 			 */
+-			frame_succeed = b43_fill_txstatus_report(dev, info, status);
++			if (unlikely(skip))
++				txstat = &fake;
++			else
++				txstat = status;
++
++			frame_succeed = b43_fill_txstatus_report(dev, info,
++								 txstat);
+ #ifdef CONFIG_B43_DEBUG
+ 			if (frame_succeed)
+ 				ring->nr_succeed_tx_packets++;
+@@ -1482,12 +1521,14 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ 		/* Everything unmapped and free'd. So it's not used anymore. */
+ 		ring->used_slots--;
+ 
+-		if (meta->is_last_fragment) {
++		if (meta->is_last_fragment && !skip) {
+ 			/* This is the last scatter-gather
+ 			 * fragment of the frame. We are done. */
+ 			break;
+ 		}
+ 		slot = next_slot(ring, slot);
++		if (skip > 0)
++			--skip;
+ 	}
+ 	if (ring->stopped) {
+ 		B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
+diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
+index 3f1559e..45dfc2b 100644
+--- a/drivers/net/wireless/mwifiex/init.c
++++ b/drivers/net/wireless/mwifiex/init.c
+@@ -561,6 +561,14 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
+ 		return ret;
+ 	}
+ 
++	/* cancel current command */
++	if (adapter->curr_cmd) {
++		dev_warn(adapter->dev, "curr_cmd is still in processing\n");
++		del_timer(&adapter->cmd_timer);
++		mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
++		adapter->curr_cmd = NULL;
++	}
++
+ 	/* shut down mwifiex */
+ 	dev_dbg(adapter->dev, "info: shutdown mwifiex...\n");
+ 
+diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
+index c72128f..42cad5c 100644
+--- a/drivers/staging/comedi/drivers/s626.c
++++ b/drivers/staging/comedi/drivers/s626.c
+@@ -1882,7 +1882,7 @@ static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
+ 	case TRIG_NONE:
+ 		/*  continous acquisition */
+ 		devpriv->ai_continous = 1;
+-		devpriv->ai_sample_count = 0;
++		devpriv->ai_sample_count = 1;
+ 		break;
+ 	}
+ 
+diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
+index 66825c9..ab23201 100644
+--- a/drivers/tty/vt/vc_screen.c
++++ b/drivers/tty/vt/vc_screen.c
+@@ -92,7 +92,7 @@ vcs_poll_data_free(struct vcs_poll_data *poll)
+ static struct vcs_poll_data *
+ vcs_poll_data_get(struct file *file)
+ {
+-	struct vcs_poll_data *poll = file->private_data;
++	struct vcs_poll_data *poll = file->private_data, *kill = NULL;
+ 
+ 	if (poll)
+ 		return poll;
+@@ -121,10 +121,12 @@ vcs_poll_data_get(struct file *file)
+ 		file->private_data = poll;
+ 	} else {
+ 		/* someone else raced ahead of us */
+-		vcs_poll_data_free(poll);
++		kill = poll;
+ 		poll = file->private_data;
+ 	}
+ 	spin_unlock(&file->f_lock);
++	if (kill)
++		vcs_poll_data_free(kill);
+ 
+ 	return poll;
+ }
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index cb436fe1..151ca5e 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1960,8 +1960,8 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 		if (event_trb != ep_ring->dequeue &&
+ 				event_trb != td->last_trb)
+ 			td->urb->actual_length =
+-				td->urb->transfer_buffer_length
+-				- TRB_LEN(le32_to_cpu(event->transfer_len));
++				td->urb->transfer_buffer_length -
++				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ 		else
+ 			td->urb->actual_length = 0;
+ 
+@@ -1993,7 +1993,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 		/* Maybe the event was for the data stage? */
+ 			td->urb->actual_length =
+ 				td->urb->transfer_buffer_length -
+-				TRB_LEN(le32_to_cpu(event->transfer_len));
++				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ 			xhci_dbg(xhci, "Waiting for status "
+ 					"stage event\n");
+ 			return 0;
+@@ -2029,7 +2029,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 	/* handle completion code */
+ 	switch (trb_comp_code) {
+ 	case COMP_SUCCESS:
+-		if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
++		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
+ 			frame->status = 0;
+ 			break;
+ 		}
+@@ -2076,7 +2076,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 				len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
+ 		}
+ 		len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
+-			TRB_LEN(le32_to_cpu(event->transfer_len));
++			EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ 
+ 		if (trb_comp_code != COMP_STOP_INVAL) {
+ 			frame->actual_length = len;
+@@ -2134,7 +2134,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 	case COMP_SUCCESS:
+ 		/* Double check that the HW transferred everything. */
+ 		if (event_trb != td->last_trb ||
+-				TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
++		    EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
+ 			xhci_warn(xhci, "WARN Successful completion "
+ 					"on short TX\n");
+ 			if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+@@ -2162,18 +2162,18 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 				"%d bytes untransferred\n",
+ 				td->urb->ep->desc.bEndpointAddress,
+ 				td->urb->transfer_buffer_length,
+-				TRB_LEN(le32_to_cpu(event->transfer_len)));
++				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
+ 	/* Fast path - was this the last TRB in the TD for this URB? */
+ 	if (event_trb == td->last_trb) {
+-		if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
++		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
+ 			td->urb->actual_length =
+ 				td->urb->transfer_buffer_length -
+-				TRB_LEN(le32_to_cpu(event->transfer_len));
++				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ 			if (td->urb->transfer_buffer_length <
+ 					td->urb->actual_length) {
+ 				xhci_warn(xhci, "HC gave bad length "
+ 						"of %d bytes left\n",
+-					  TRB_LEN(le32_to_cpu(event->transfer_len)));
++					  EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
+ 				td->urb->actual_length = 0;
+ 				if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ 					*status = -EREMOTEIO;
+@@ -2217,7 +2217,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 		if (trb_comp_code != COMP_STOP_INVAL)
+ 			td->urb->actual_length +=
+ 				TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
+-				TRB_LEN(le32_to_cpu(event->transfer_len));
++				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ 	}
+ 
+ 	return finish_td(xhci, td, event_trb, event, ep, status, false);
+@@ -2283,7 +2283,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 	 * transfer type
+ 	 */
+ 	case COMP_SUCCESS:
+-		if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
++		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
+ 			break;
+ 		if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
+ 			trb_comp_code = COMP_SHORT_TX;
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 8b94412..94724b0 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -831,6 +831,10 @@ struct xhci_transfer_event {
+ 	__le32	flags;
+ };
+ 
++/* Transfer event TRB length bit mask */
++/* bits 0:23 */
++#define	EVENT_TRB_LEN(p)		((p) & 0xffffff)
++
+ /** Transfer Event bit fields **/
+ #define	TRB_TO_EP_ID(p)	(((p) >> 16) & 0x1f)
+ 
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 149198f..132f114 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -646,6 +646,7 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
+ 	{ USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) },
+ 	{ USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
++	{ USB_DEVICE(MITSUBISHI_VID, MITSUBISHI_FXUSB_PID) },
+ 	{ USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
+ 	{ USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
+ 	{ USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 97e0a6b..809c03a 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -584,6 +584,13 @@
+ #define CONTEC_COM1USBH_PID	0x8311	/* COM-1(USB)H */
+ 
+ /*
++ * Mitsubishi Electric Corp. (http://www.meau.com)
++ * Submitted by Konstantin Holoborodko
++ */
++#define MITSUBISHI_VID		0x06D3
++#define MITSUBISHI_FXUSB_PID	0x0284 /* USB/RS422 converters: FX-USB-AW/-BD */
++
++/*
+  * Definitions for B&B Electronics products.
+  */
+ #define BANDB_VID		0x0856	/* B&B Electronics Vendor ID */
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 77e8e5b..97e4cb5 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -576,6 +576,7 @@ struct block_device *bdgrab(struct block_device *bdev)
+ 	ihold(bdev->bd_inode);
+ 	return bdev;
+ }
++EXPORT_SYMBOL(bdgrab);
+ 
+ long nr_blockdev_pages(void)
+ {
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 7e20a65..01220b7 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3786,7 +3786,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
+ 	spin_lock(&block_rsv->lock);
+ 	spin_lock(&sinfo->lock);
+ 
+-	block_rsv->size = num_bytes;
++	block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
+ 
+ 	num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
+ 		    sinfo->bytes_reserved + sinfo->bytes_readonly +
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index e0113aa..2041de7 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -288,9 +288,9 @@ struct ext4_group_desc
+  */
+ 
+ struct flex_groups {
+-	atomic_t free_inodes;
+-	atomic_t free_blocks;
+-	atomic_t used_dirs;
++	atomic64_t	free_blocks;
++	atomic_t	free_inodes;
++	atomic_t	used_dirs;
+ };
+ 
+ #define EXT4_BG_INODE_UNINIT	0x0001 /* Inode table/bitmap not in use */
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 29272de..dd732c7 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -345,8 +345,8 @@ static int find_group_flex(struct super_block *sb, struct inode *parent,
+ 	ext4_group_t ngroups = ext4_get_groups_count(sb);
+ 	int flex_size = ext4_flex_bg_size(sbi);
+ 	ext4_group_t best_flex = parent_fbg_group;
+-	int blocks_per_flex = sbi->s_blocks_per_group * flex_size;
+-	int flexbg_free_blocks;
++	ext4_fsblk_t blocks_per_flex = sbi->s_blocks_per_group * flex_size;
++	ext4_fsblk_t flexbg_free_blocks;
+ 	int flex_freeb_ratio;
+ 	ext4_group_t n_fbg_groups;
+ 	ext4_group_t i;
+@@ -355,7 +355,7 @@ static int find_group_flex(struct super_block *sb, struct inode *parent,
+ 		sbi->s_log_groups_per_flex;
+ 
+ find_close_to_parent:
+-	flexbg_free_blocks = atomic_read(&flex_group[best_flex].free_blocks);
++	flexbg_free_blocks = atomic64_read(&flex_group[best_flex].free_blocks);
+ 	flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;
+ 	if (atomic_read(&flex_group[best_flex].free_inodes) &&
+ 	    flex_freeb_ratio > free_block_ratio)
+@@ -370,7 +370,7 @@ find_close_to_parent:
+ 		if (i == parent_fbg_group || i == parent_fbg_group - 1)
+ 			continue;
+ 
+-		flexbg_free_blocks = atomic_read(&flex_group[i].free_blocks);
++		flexbg_free_blocks = atomic64_read(&flex_group[i].free_blocks);
+ 		flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;
+ 
+ 		if (flex_freeb_ratio > free_block_ratio &&
+@@ -380,14 +380,14 @@ find_close_to_parent:
+ 		}
+ 
+ 		if ((atomic_read(&flex_group[best_flex].free_inodes) == 0) ||
+-		    ((atomic_read(&flex_group[i].free_blocks) >
+-		      atomic_read(&flex_group[best_flex].free_blocks)) &&
++		    ((atomic64_read(&flex_group[i].free_blocks) >
++		      atomic64_read(&flex_group[best_flex].free_blocks)) &&
+ 		     atomic_read(&flex_group[i].free_inodes)))
+ 			best_flex = i;
+ 	}
+ 
+ 	if (!atomic_read(&flex_group[best_flex].free_inodes) ||
+-	    !atomic_read(&flex_group[best_flex].free_blocks))
++	    !atomic64_read(&flex_group[best_flex].free_blocks))
+ 		return -1;
+ 
+ found_flexbg:
+@@ -406,8 +406,8 @@ out:
+ }
+ 
+ struct orlov_stats {
++	__u64 free_blocks;
+ 	__u32 free_inodes;
+-	__u32 free_blocks;
+ 	__u32 used_dirs;
+ };
+ 
+@@ -424,7 +424,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
+ 
+ 	if (flex_size > 1) {
+ 		stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
+-		stats->free_blocks = atomic_read(&flex_group[g].free_blocks);
++		stats->free_blocks = atomic64_read(&flex_group[g].free_blocks);
+ 		stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
+ 		return;
+ 	}
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 31bbdb5..35959f6 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2814,8 +2814,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
+ 	if (sbi->s_log_groups_per_flex) {
+ 		ext4_group_t flex_group = ext4_flex_group(sbi,
+ 							  ac->ac_b_ex.fe_group);
+-		atomic_sub(ac->ac_b_ex.fe_len,
+-			   &sbi->s_flex_groups[flex_group].free_blocks);
++		atomic64_sub(ac->ac_b_ex.fe_len,
++			     &sbi->s_flex_groups[flex_group].free_blocks);
+ 	}
+ 
+ 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
+@@ -4614,7 +4614,7 @@ do_more:
+ 
+ 	if (sbi->s_log_groups_per_flex) {
+ 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
+-		atomic_add(count, &sbi->s_flex_groups[flex_group].free_blocks);
++		atomic64_add(count, &sbi->s_flex_groups[flex_group].free_blocks);
+ 	}
+ 
+ 	ext4_mb_unload_buddy(&e4b);
+@@ -4745,8 +4745,8 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
+ 
+ 	if (sbi->s_log_groups_per_flex) {
+ 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
+-		atomic_add(blocks_freed,
+-			   &sbi->s_flex_groups[flex_group].free_blocks);
++		atomic64_add(blocks_freed,
++			     &sbi->s_flex_groups[flex_group].free_blocks);
+ 	}
+ 
+ 	ext4_mb_unload_buddy(&e4b);
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 244100f..d2661aac 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -929,8 +929,8 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
+ 	    sbi->s_log_groups_per_flex) {
+ 		ext4_group_t flex_group;
+ 		flex_group = ext4_flex_group(sbi, input->group);
+-		atomic_add(input->free_blocks_count,
+-			   &sbi->s_flex_groups[flex_group].free_blocks);
++		atomic64_add(input->free_blocks_count,
++			     &sbi->s_flex_groups[flex_group].free_blocks);
+ 		atomic_add(EXT4_INODES_PER_GROUP(sb),
+ 			   &sbi->s_flex_groups[flex_group].free_inodes);
+ 	}
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index c6a3363..e05cd34 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1992,8 +1992,8 @@ static int ext4_fill_flex_info(struct super_block *sb)
+ 		flex_group = ext4_flex_group(sbi, i);
+ 		atomic_add(ext4_free_inodes_count(sb, gdp),
+ 			   &sbi->s_flex_groups[flex_group].free_inodes);
+-		atomic_add(ext4_free_blks_count(sb, gdp),
+-			   &sbi->s_flex_groups[flex_group].free_blocks);
++		atomic64_add(ext4_free_blks_count(sb, gdp),
++			     &sbi->s_flex_groups[flex_group].free_blocks);
+ 		atomic_add(ext4_used_dirs_count(sb, gdp),
+ 			   &sbi->s_flex_groups[flex_group].used_dirs);
+ 	}
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 3720caa..894e326 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3440,19 +3440,6 @@ static inline int nfs4_server_supports_acls(struct nfs_server *server)
+  */
+ #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
+ 
+-static void buf_to_pages(const void *buf, size_t buflen,
+-		struct page **pages, unsigned int *pgbase)
+-{
+-	const void *p = buf;
+-
+-	*pgbase = offset_in_page(buf);
+-	p -= *pgbase;
+-	while (p < buf + buflen) {
+-		*(pages++) = virt_to_page(p);
+-		p += PAGE_CACHE_SIZE;
+-	}
+-}
+-
+ static int buf_to_pages_noslab(const void *buf, size_t buflen,
+ 		struct page **pages, unsigned int *pgbase)
+ {
+@@ -3549,9 +3536,19 @@ out:
+ 	nfs4_set_cached_acl(inode, acl);
+ }
+ 
++/*
++ * The getxattr API returns the required buffer length when called with a
++ * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
++ * the required buf.  On a NULL buf, we send a page of data to the server
++ * guessing that the ACL request can be serviced by a page. If so, we cache
++ * up to the page of ACL data, and the 2nd call to getxattr is serviced by
++ * the cache. If not so, we throw away the page, and cache the required
++ * length. The next getxattr call will then produce another round trip to
++ * the server, this time with the input buf of the required size.
++ */
+ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
+ {
+-	struct page *pages[NFS4ACL_MAXPAGES];
++	struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
+ 	struct nfs_getaclargs args = {
+ 		.fh = NFS_FH(inode),
+ 		.acl_pages = pages,
+@@ -3566,41 +3563,61 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
+ 		.rpc_argp = &args,
+ 		.rpc_resp = &res,
+ 	};
+-	struct page *localpage = NULL;
+-	int ret;
+-
+-	if (buflen < PAGE_SIZE) {
+-		/* As long as we're doing a round trip to the server anyway,
+-		 * let's be prepared for a page of acl data. */
+-		localpage = alloc_page(GFP_KERNEL);
+-		resp_buf = page_address(localpage);
+-		if (localpage == NULL)
+-			return -ENOMEM;
+-		args.acl_pages[0] = localpage;
+-		args.acl_pgbase = 0;
+-		args.acl_len = PAGE_SIZE;
+-	} else {
+-		resp_buf = buf;
+-		buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
++	int ret = -ENOMEM, npages, i;
++	size_t acl_len = 0;
++
++	npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
++	/* As long as we're doing a round trip to the server anyway,
++	 * let's be prepared for a page of acl data. */
++	if (npages == 0)
++		npages = 1;
++
++	for (i = 0; i < npages; i++) {
++		pages[i] = alloc_page(GFP_KERNEL);
++		if (!pages[i])
++			goto out_free;
++	}
++	if (npages > 1) {
++		/* for decoding across pages */
++		res.acl_scratch = alloc_page(GFP_KERNEL);
++		if (!res.acl_scratch)
++			goto out_free;
+ 	}
+-	ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
++	args.acl_len = npages * PAGE_SIZE;
++	args.acl_pgbase = 0;
++	/* Let decode_getfacl know not to fail if the ACL data is larger than
++	 * the page we send as a guess */
++	if (buf == NULL)
++		res.acl_flags |= NFS4_ACL_LEN_REQUEST;
++	resp_buf = page_address(pages[0]);
++
++	dprintk("%s  buf %p buflen %ld npages %d args.acl_len %ld\n",
++		__func__, buf, buflen, npages, args.acl_len);
++	ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
++			     &msg, &args.seq_args, &res.seq_res, 0);
+ 	if (ret)
+ 		goto out_free;
+-	if (res.acl_len > args.acl_len)
+-		nfs4_write_cached_acl(inode, NULL, res.acl_len);
++
++	acl_len = res.acl_len - res.acl_data_offset;
++	if (acl_len > args.acl_len)
++		nfs4_write_cached_acl(inode, NULL, acl_len);
+ 	else
+-		nfs4_write_cached_acl(inode, resp_buf, res.acl_len);
++		nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset,
++				      acl_len);
+ 	if (buf) {
+ 		ret = -ERANGE;
+-		if (res.acl_len > buflen)
++		if (acl_len > buflen)
+ 			goto out_free;
+-		if (localpage)
+-			memcpy(buf, resp_buf, res.acl_len);
++		_copy_from_pages(buf, pages, res.acl_data_offset,
++				res.acl_len);
+ 	}
+-	ret = res.acl_len;
++	ret = acl_len;
+ out_free:
+-	if (localpage)
+-		__free_page(localpage);
++	for (i = 0; i < npages; i++)
++		if (pages[i])
++			__free_page(pages[i]);
++	if (res.acl_scratch)
++		__free_page(res.acl_scratch);
+ 	return ret;
+ }
+ 
+@@ -3631,6 +3648,8 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
+ 		nfs_zap_acl_cache(inode);
+ 	ret = nfs4_read_cached_acl(inode, buf, buflen);
+ 	if (ret != -ENOENT)
++		/* -ENOENT is returned if there is no ACL or if there is an ACL
++		 * but no cached acl data, just the acl length */
+ 		return ret;
+ 	return nfs4_get_acl_uncached(inode, buf, buflen);
+ }
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index 5fcc67b..4204e96 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -2374,11 +2374,12 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
+ 	encode_compound_hdr(xdr, req, &hdr);
+ 	encode_sequence(xdr, &args->seq_args, &hdr);
+ 	encode_putfh(xdr, args->fh, &hdr);
+-	replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1;
++	replen = hdr.replen + op_decode_hdr_maxsz + 1;
+ 	encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr);
+ 
+ 	xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
+ 		args->acl_pages, args->acl_pgbase, args->acl_len);
++
+ 	encode_nops(&hdr);
+ }
+ 
+@@ -4714,17 +4715,18 @@ decode_restorefh(struct xdr_stream *xdr)
+ }
+ 
+ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
+-		size_t *acl_len)
++			 struct nfs_getaclres *res)
+ {
+-	__be32 *savep;
++	__be32 *savep, *bm_p;
+ 	uint32_t attrlen,
+ 		 bitmap[2] = {0};
+ 	struct kvec *iov = req->rq_rcv_buf.head;
+ 	int status;
+ 
+-	*acl_len = 0;
++	res->acl_len = 0;
+ 	if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
+ 		goto out;
++	bm_p = xdr->p;
+ 	if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
+ 		goto out;
+ 	if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
+@@ -4736,18 +4738,30 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
+ 		size_t hdrlen;
+ 		u32 recvd;
+ 
++		/* The bitmap (xdr len + bitmaps) and the attr xdr len words
++		 * are stored with the acl data to handle the problem of
++		 * variable length bitmaps.*/
++		xdr->p = bm_p;
++		res->acl_data_offset = be32_to_cpup(bm_p) + 2;
++		res->acl_data_offset <<= 2;
++
+ 		/* We ignore &savep and don't do consistency checks on
+ 		 * the attr length.  Let userspace figure it out.... */
+ 		hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base;
++		attrlen += res->acl_data_offset;
+ 		recvd = req->rq_rcv_buf.len - hdrlen;
+ 		if (attrlen > recvd) {
+-			dprintk("NFS: server cheating in getattr"
+-					" acl reply: attrlen %u > recvd %u\n",
++			if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
++				/* getxattr interface called with a NULL buf */
++				res->acl_len = attrlen;
++				goto out;
++			}
++			dprintk("NFS: acl reply: attrlen %u > recvd %u\n",
+ 					attrlen, recvd);
+ 			return -EINVAL;
+ 		}
+ 		xdr_read_pages(xdr, attrlen);
+-		*acl_len = attrlen;
++		res->acl_len = attrlen;
+ 	} else
+ 		status = -EOPNOTSUPP;
+ 
+@@ -5673,6 +5687,10 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ 	struct compound_hdr hdr;
+ 	int status;
+ 
++	if (res->acl_scratch != NULL) {
++		void *p = page_address(res->acl_scratch);
++		xdr_set_scratch_buffer(xdr, p, PAGE_SIZE);
++	}
+ 	status = decode_compound_hdr(xdr, &hdr);
+ 	if (status)
+ 		goto out;
+@@ -5682,7 +5700,7 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ 	status = decode_putfh(xdr);
+ 	if (status)
+ 		goto out;
+-	status = decode_getacl(xdr, rqstp, &res->acl_len);
++	status = decode_getacl(xdr, rqstp, res);
+ 
+ out:
+ 	return status;
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index ecdd18a..59ac3f4 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -262,7 +262,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
+ 		iattr->ia_valid |= ATTR_SIZE;
+ 	}
+ 	if (bmval[0] & FATTR4_WORD0_ACL) {
+-		int nace;
++		u32 nace;
+ 		struct nfs4_ace *ace;
+ 
+ 		READ_BUF(4); len += 4;
+diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
+index 567b3db..7cbc585 100644
+--- a/fs/sysfs/dir.c
++++ b/fs/sysfs/dir.c
+@@ -917,6 +917,8 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+ 		ino = parent_sd->s_ino;
+ 		if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) == 0)
+ 			filp->f_pos++;
++		else
++			return 0;
+ 	}
+ 	if (filp->f_pos == 1) {
+ 		if (parent_sd->s_parent)
+@@ -925,6 +927,8 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+ 			ino = parent_sd->s_ino;
+ 		if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) == 0)
+ 			filp->f_pos++;
++		else
++			return 0;
+ 	}
+ 	mutex_lock(&sysfs_mutex);
+ 	for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos);
+@@ -955,10 +959,21 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+ 	return 0;
+ }
+ 
++static loff_t sysfs_dir_llseek(struct file *file, loff_t offset, int whence)
++{
++	struct inode *inode = file->f_path.dentry->d_inode;
++	loff_t ret;
++
++	mutex_lock(&inode->i_mutex);
++	ret = generic_file_llseek(file, offset, whence);
++	mutex_unlock(&inode->i_mutex);
++
++	return ret;
++}
+ 
+ const struct file_operations sysfs_dir_operations = {
+ 	.read		= generic_read_dir,
+ 	.readdir	= sysfs_readdir,
+ 	.release	= sysfs_dir_release,
+-	.llseek		= generic_file_llseek,
++	.llseek		= sysfs_dir_llseek,
+ };
+diff --git a/include/asm-generic/signal.h b/include/asm-generic/signal.h
+index 555c0ae..743f7a5 100644
+--- a/include/asm-generic/signal.h
++++ b/include/asm-generic/signal.h
+@@ -99,6 +99,10 @@ typedef unsigned long old_sigset_t;
+ 
+ #include <asm-generic/signal-defs.h>
+ 
++#ifdef SA_RESTORER
++#define __ARCH_HAS_SA_RESTORER
++#endif
++
+ struct sigaction {
+ 	__sighandler_t sa_handler;
+ 	unsigned long sa_flags;
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index 82d5476..8663a26 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -736,6 +736,13 @@ static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
+ {
+ 	return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
+ }
++
++bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
++
++#else
++
++static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
++
+ #endif
+ 
+ #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index 0012fc3..9733df5 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -591,8 +591,13 @@ struct nfs_getaclargs {
+ 	struct nfs4_sequence_args 	seq_args;
+ };
+ 
++/* getxattr ACL interface flags */
++#define NFS4_ACL_LEN_REQUEST	0x0001	/* zero length getxattr buffer */
+ struct nfs_getaclres {
+ 	size_t				acl_len;
++	size_t				acl_data_offset;
++	int				acl_flags;
++	struct page *			acl_scratch;
+ 	struct nfs4_sequence_res	seq_res;
+ };
+ 
+diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
+index a20970e..af70af3 100644
+--- a/include/linux/sunrpc/xdr.h
++++ b/include/linux/sunrpc/xdr.h
+@@ -191,6 +191,8 @@ extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
+ 			     struct xdr_array2_desc *desc);
+ extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
+ 			     struct xdr_array2_desc *desc);
++extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase,
++			     size_t len);
+ 
+ /*
+  * Provide some simple tools for XDR buffer overflow-checking etc.
+diff --git a/include/linux/thermal.h b/include/linux/thermal.h
+index d3ec89f..6b762c6 100644
+--- a/include/linux/thermal.h
++++ b/include/linux/thermal.h
+@@ -130,7 +130,7 @@ struct thermal_zone_device {
+ /* Adding event notification support elements */
+ #define THERMAL_GENL_FAMILY_NAME                "thermal_event"
+ #define THERMAL_GENL_VERSION                    0x01
+-#define THERMAL_GENL_MCAST_GROUP_NAME           "thermal_mc_group"
++#define THERMAL_GENL_MCAST_GROUP_NAME           "thermal_mc_grp"
+ 
+ enum events {
+ 	THERMAL_AUX0,
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 0386710..b0c0887 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -437,7 +437,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
+ 		if (force_default || ka->sa.sa_handler != SIG_IGN)
+ 			ka->sa.sa_handler = SIG_DFL;
+ 		ka->sa.sa_flags = 0;
+-#ifdef SA_RESTORER
++#ifdef __ARCH_HAS_SA_RESTORER
+ 		ka->sa.sa_restorer = NULL;
+ #endif
+ 		sigemptyset(&ka->sa.sa_mask);
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 97bf540..b3ae845 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2527,11 +2527,25 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
+ 	return -EINVAL;
+ }
+ 
+-static void set_tracer_flags(unsigned int mask, int enabled)
++/* Some tracers require overwrite to stay enabled */
++int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
++{
++	if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
++		return -1;
++
++	return 0;
++}
++
++int set_tracer_flag(unsigned int mask, int enabled)
+ {
+ 	/* do nothing if flag is already set */
+ 	if (!!(trace_flags & mask) == !!enabled)
+-		return;
++		return 0;
++
++	/* Give the tracer a chance to approve the change */
++	if (current_trace->flag_changed)
++		if (current_trace->flag_changed(current_trace, mask, !!enabled))
++			return -EINVAL;
+ 
+ 	if (enabled)
+ 		trace_flags |= mask;
+@@ -2543,6 +2557,8 @@ static void set_tracer_flags(unsigned int mask, int enabled)
+ 
+ 	if (mask == TRACE_ITER_OVERWRITE)
+ 		ring_buffer_change_overwrite(global_trace.buffer, enabled);
++
++	return 0;
+ }
+ 
+ static ssize_t
+@@ -2552,7 +2568,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
+ 	char buf[64];
+ 	char *cmp;
+ 	int neg = 0;
+-	int ret;
++	int ret = -ENODEV;
+ 	int i;
+ 
+ 	if (cnt >= sizeof(buf))
+@@ -2569,21 +2585,23 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
+ 		cmp += 2;
+ 	}
+ 
++	mutex_lock(&trace_types_lock);
++
+ 	for (i = 0; trace_options[i]; i++) {
+ 		if (strcmp(cmp, trace_options[i]) == 0) {
+-			set_tracer_flags(1 << i, !neg);
++			ret = set_tracer_flag(1 << i, !neg);
+ 			break;
+ 		}
+ 	}
+ 
+ 	/* If no option could be set, test the specific tracer options */
+-	if (!trace_options[i]) {
+-		mutex_lock(&trace_types_lock);
++	if (!trace_options[i])
+ 		ret = set_tracer_option(current_trace, cmp, neg);
+-		mutex_unlock(&trace_types_lock);
+-		if (ret)
+-			return ret;
+-	}
++
++	mutex_unlock(&trace_types_lock);
++
++	if (ret < 0)
++		return ret;
+ 
+ 	*ppos += cnt;
+ 
+@@ -2881,6 +2899,9 @@ static int tracing_set_tracer(const char *buf)
+ 		goto out;
+ 
+ 	trace_branch_disable();
++
++	current_trace->enabled = false;
++
+ 	if (current_trace && current_trace->reset)
+ 		current_trace->reset(tr);
+ 	if (current_trace && current_trace->use_max_tr) {
+@@ -2910,6 +2931,7 @@ static int tracing_set_tracer(const char *buf)
+ 			goto out;
+ 	}
+ 
++	current_trace->enabled = true;
+ 	trace_branch_enable(tr);
+  out:
+ 	mutex_unlock(&trace_types_lock);
+@@ -4180,7 +4202,13 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ 
+ 	if (val != 0 && val != 1)
+ 		return -EINVAL;
+-	set_tracer_flags(1 << index, val);
++
++	mutex_lock(&trace_types_lock);
++	ret = set_tracer_flag(1 << index, val);
++	mutex_unlock(&trace_types_lock);
++
++	if (ret < 0)
++		return ret;
+ 
+ 	*ppos += cnt;
+ 
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index f807407..123ee28 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -271,10 +271,14 @@ struct tracer {
+ 	enum print_line_t	(*print_line)(struct trace_iterator *iter);
+ 	/* If you handled the flag setting, return 0 */
+ 	int			(*set_flag)(u32 old_flags, u32 bit, int set);
++	/* Return 0 if OK with change, else return non-zero */
++	int			(*flag_changed)(struct tracer *tracer,
++						u32 mask, int set);
+ 	struct tracer		*next;
+ 	struct tracer_flags	*flags;
+ 	int			print_max;
+ 	int			use_max_tr;
++	bool			enabled;
+ };
+ 
+ 
+@@ -776,6 +780,9 @@ extern struct list_head ftrace_events;
+ extern const char *__start___trace_bprintk_fmt[];
+ extern const char *__stop___trace_bprintk_fmt[];
+ 
++int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
++int set_tracer_flag(unsigned int mask, int enabled);
++
+ #undef FTRACE_ENTRY
+ #define FTRACE_ENTRY(call, struct_name, id, tstruct, print)		\
+ 	extern struct ftrace_event_call					\
+diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
+index c77424b..984aad8 100644
+--- a/kernel/trace/trace_irqsoff.c
++++ b/kernel/trace/trace_irqsoff.c
+@@ -32,7 +32,7 @@ enum {
+ 
+ static int trace_type __read_mostly;
+ 
+-static int save_lat_flag;
++static int save_flags;
+ 
+ static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
+ static int start_irqsoff_tracer(struct trace_array *tr, int graph);
+@@ -544,8 +544,11 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
+ 
+ static void __irqsoff_tracer_init(struct trace_array *tr)
+ {
+-	save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
+-	trace_flags |= TRACE_ITER_LATENCY_FMT;
++	save_flags = trace_flags;
++
++	/* non overwrite screws up the latency tracers */
++	set_tracer_flag(TRACE_ITER_OVERWRITE, 1);
++	set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1);
+ 
+ 	tracing_max_latency = 0;
+ 	irqsoff_trace = tr;
+@@ -559,10 +562,13 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
+ 
+ static void irqsoff_tracer_reset(struct trace_array *tr)
+ {
++	int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
++	int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
++
+ 	stop_irqsoff_tracer(tr, is_graph());
+ 
+-	if (!save_lat_flag)
+-		trace_flags &= ~TRACE_ITER_LATENCY_FMT;
++	set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag);
++	set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag);
+ }
+ 
+ static void irqsoff_tracer_start(struct trace_array *tr)
+@@ -595,6 +601,7 @@ static struct tracer irqsoff_tracer __read_mostly =
+ 	.print_line     = irqsoff_print_line,
+ 	.flags		= &tracer_flags,
+ 	.set_flag	= irqsoff_set_flag,
++	.flag_changed	= trace_keep_overwrite,
+ #ifdef CONFIG_FTRACE_SELFTEST
+ 	.selftest    = trace_selftest_startup_irqsoff,
+ #endif
+@@ -628,6 +635,7 @@ static struct tracer preemptoff_tracer __read_mostly =
+ 	.print_line     = irqsoff_print_line,
+ 	.flags		= &tracer_flags,
+ 	.set_flag	= irqsoff_set_flag,
++	.flag_changed	= trace_keep_overwrite,
+ #ifdef CONFIG_FTRACE_SELFTEST
+ 	.selftest    = trace_selftest_startup_preemptoff,
+ #endif
+@@ -663,6 +671,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
+ 	.print_line     = irqsoff_print_line,
+ 	.flags		= &tracer_flags,
+ 	.set_flag	= irqsoff_set_flag,
++	.flag_changed	= trace_keep_overwrite,
+ #ifdef CONFIG_FTRACE_SELFTEST
+ 	.selftest    = trace_selftest_startup_preemptirqsoff,
+ #endif
+diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
+index f029dd4..1beb25e 100644
+--- a/kernel/trace/trace_sched_wakeup.c
++++ b/kernel/trace/trace_sched_wakeup.c
+@@ -36,7 +36,7 @@ static void __wakeup_reset(struct trace_array *tr);
+ static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
+ static void wakeup_graph_return(struct ftrace_graph_ret *trace);
+ 
+-static int save_lat_flag;
++static int save_flags;
+ 
+ #define TRACE_DISPLAY_GRAPH     1
+ 
+@@ -526,8 +526,11 @@ static void stop_wakeup_tracer(struct trace_array *tr)
+ 
+ static int __wakeup_tracer_init(struct trace_array *tr)
+ {
+-	save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
+-	trace_flags |= TRACE_ITER_LATENCY_FMT;
++	save_flags = trace_flags;
++
++	/* non overwrite screws up the latency tracers */
++	set_tracer_flag(TRACE_ITER_OVERWRITE, 1);
++	set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1);
+ 
+ 	tracing_max_latency = 0;
+ 	wakeup_trace = tr;
+@@ -549,12 +552,15 @@ static int wakeup_rt_tracer_init(struct trace_array *tr)
+ 
+ static void wakeup_tracer_reset(struct trace_array *tr)
+ {
++	int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
++	int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
++
+ 	stop_wakeup_tracer(tr);
+ 	/* make sure we put back any tasks we are tracing */
+ 	wakeup_reset(tr);
+ 
+-	if (!save_lat_flag)
+-		trace_flags &= ~TRACE_ITER_LATENCY_FMT;
++	set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag);
++	set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag);
+ }
+ 
+ static void wakeup_tracer_start(struct trace_array *tr)
+@@ -580,6 +586,7 @@ static struct tracer wakeup_tracer __read_mostly =
+ 	.print_line	= wakeup_print_line,
+ 	.flags		= &tracer_flags,
+ 	.set_flag	= wakeup_set_flag,
++	.flag_changed	= trace_keep_overwrite,
+ #ifdef CONFIG_FTRACE_SELFTEST
+ 	.selftest    = trace_selftest_startup_wakeup,
+ #endif
+@@ -601,6 +608,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
+ 	.print_line	= wakeup_print_line,
+ 	.flags		= &tracer_flags,
+ 	.set_flag	= wakeup_set_flag,
++	.flag_changed	= trace_keep_overwrite,
+ #ifdef CONFIG_FTRACE_SELFTEST
+ 	.selftest    = trace_selftest_startup_wakeup,
+ #endif
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index e0a3e51..a739dd1 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -453,19 +453,20 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages)
+ 
+ 	zone->present_pages += onlined_pages;
+ 	zone->zone_pgdat->node_present_pages += onlined_pages;
+-	if (need_zonelists_rebuild)
+-		build_all_zonelists(zone);
+-	else
+-		zone_pcp_update(zone);
++	if (onlined_pages) {
++		node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
++		if (need_zonelists_rebuild)
++			build_all_zonelists(zone);
++		else
++			zone_pcp_update(zone);
++	}
+ 
+ 	mutex_unlock(&zonelists_mutex);
+ 
+ 	init_per_zone_wmark_min();
+ 
+-	if (onlined_pages) {
++	if (onlined_pages)
+ 		kswapd_run(zone_to_nid(zone));
+-		node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
+-	}
+ 
+ 	vm_total_pages = nr_free_pagecache_pages();
+ 
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+index 917ecb9..1e93a91 100644
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -108,13 +108,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
+ 	grp = rtnl_dereference(real_dev->vlgrp);
+ 	BUG_ON(!grp);
+ 
+-	/* Take it out of our own structures, but be sure to interlock with
+-	 * HW accelerating devices or SW vlan input packet processing if
+-	 * VLAN is not 0 (leave it there for 802.1p).
+-	 */
+-	if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER))
+-		ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id);
+-
+ 	grp->nr_vlans--;
+ 
+ 	if (vlan->flags & VLAN_FLAG_GVRP)
+@@ -139,6 +132,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
+ 		call_rcu(&grp->rcu, vlan_rcu_free);
+ 	}
+ 
++	/* Take it out of our own structures, but be sure to interlock with
++	 * HW accelerating devices or SW vlan input packet processing if
++	 * VLAN is not 0 (leave it there for 802.1p).
++	 */
++	if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER))
++		ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id);
++
+ 	/* Get rid of the vlan's reference to real_dev */
+ 	dev_put(real_dev);
+ }
+diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
+index fa22ba2..ad7d8b2 100644
+--- a/net/batman-adv/icmp_socket.c
++++ b/net/batman-adv/icmp_socket.c
+@@ -136,10 +136,9 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf,
+ 
+ 	spin_unlock_bh(&socket_client->lock);
+ 
+-	error = __copy_to_user(buf, &socket_packet->icmp_packet,
+-			       socket_packet->icmp_len);
++	packet_len = min(count, socket_packet->icmp_len);
++	error = copy_to_user(buf, &socket_packet->icmp_packet, packet_len);
+ 
+-	packet_len = socket_packet->icmp_len;
+ 	kfree(socket_packet);
+ 
+ 	if (error)
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index cb4fb78..7b1d362 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -378,6 +378,7 @@ static void __sco_sock_close(struct sock *sk)
+ 			sco_chan_del(sk, ECONNRESET);
+ 		break;
+ 
++	case BT_CONNECT2:
+ 	case BT_CONNECT:
+ 	case BT_DISCONN:
+ 		sco_chan_del(sk, ECONNRESET);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index ed1f0ca..e5eba56 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3070,6 +3070,7 @@ int netdev_rx_handler_register(struct net_device *dev,
+ 	if (dev->rx_handler)
+ 		return -EBUSY;
+ 
++	/* Note: rx_handler_data must be set before rx_handler */
+ 	rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
+ 	rcu_assign_pointer(dev->rx_handler, rx_handler);
+ 
+@@ -3090,6 +3091,11 @@ void netdev_rx_handler_unregister(struct net_device *dev)
+ 
+ 	ASSERT_RTNL();
+ 	rcu_assign_pointer(dev->rx_handler, NULL);
++	/* a reader seeing a non NULL rx_handler in a rcu_read_lock()
++	 * section has a guarantee to see a non NULL rx_handler_data
++	 * as well.
++	 */
++	synchronize_net();
+ 	rcu_assign_pointer(dev->rx_handler_data, NULL);
+ }
+ EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 8a56d24..ab44f9d 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2244,11 +2244,8 @@ void tcp_enter_loss(struct sock *sk, int how)
+ 	if (tcp_is_reno(tp))
+ 		tcp_reset_reno_sack(tp);
+ 
+-	if (!how) {
+-		/* Push undo marker, if it was plain RTO and nothing
+-		 * was retransmitted. */
+-		tp->undo_marker = tp->snd_una;
+-	} else {
++	tp->undo_marker = tp->snd_una;
++	if (how) {
+ 		tp->sacked_out = 0;
+ 		tp->fackets_out = 0;
+ 	}
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index e0b8bd1..0d9b959 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1579,8 +1579,11 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
+ 			goto send_now;
+ 	}
+ 
+-	/* Ok, it looks like it is advisable to defer.  */
+-	tp->tso_deferred = 1 | (jiffies << 1);
++	/* Ok, it looks like it is advisable to defer.
++	 * Do not rearm the timer if already set to not break TCP ACK clocking.
++	 */
++	if (!tp->tso_deferred)
++		tp->tso_deferred = 1 | (jiffies << 1);
+ 
+ 	return 1;
+ 
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index e845c0c..93c4721 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -4553,26 +4553,20 @@ static void addrconf_sysctl_unregister(struct inet6_dev *idev)
+ 
+ static int __net_init addrconf_init_net(struct net *net)
+ {
+-	int err;
++	int err = -ENOMEM;
+ 	struct ipv6_devconf *all, *dflt;
+ 
+-	err = -ENOMEM;
+-	all = &ipv6_devconf;
+-	dflt = &ipv6_devconf_dflt;
++	all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
++	if (all == NULL)
++		goto err_alloc_all;
+ 
+-	if (!net_eq(net, &init_net)) {
+-		all = kmemdup(all, sizeof(ipv6_devconf), GFP_KERNEL);
+-		if (all == NULL)
+-			goto err_alloc_all;
++	dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
++	if (dflt == NULL)
++		goto err_alloc_dflt;
+ 
+-		dflt = kmemdup(dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
+-		if (dflt == NULL)
+-			goto err_alloc_dflt;
+-	} else {
+-		/* these will be inherited by all namespaces */
+-		dflt->autoconf = ipv6_defaults.autoconf;
+-		dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
+-	}
++	/* these will be inherited by all namespaces */
++	dflt->autoconf = ipv6_defaults.autoconf;
++	dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
+ 
+ 	net->ipv6.devconf_all = all;
+ 	net->ipv6.devconf_dflt = dflt;
+diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
+index cc61697..8ad05f8 100644
+--- a/net/irda/af_irda.c
++++ b/net/irda/af_irda.c
+@@ -2584,8 +2584,10 @@ bed:
+ 				    NULL, NULL, NULL);
+ 
+ 		/* Check if the we got some results */
+-		if (!self->cachedaddr)
+-			return -EAGAIN;		/* Didn't find any devices */
++		if (!self->cachedaddr) {
++			err = -EAGAIN;		/* Didn't find any devices */
++			goto out;
++		}
+ 		daddr = self->cachedaddr;
+ 		/* Cleanup */
+ 		self->cachedaddr = 0;
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index 482fa57..874f8ff 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -134,6 +134,7 @@ int genl_register_mc_group(struct genl_family *family,
+ 	int err = 0;
+ 
+ 	BUG_ON(grp->name[0] == '\0');
++	BUG_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL);
+ 
+ 	genl_lock();
+ 
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index d7824ec..3ee3fe3 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -135,6 +135,8 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *
+ 		list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
+ 	task->tk_waitqueue = queue;
+ 	queue->qlen++;
++	/* barrier matches the read in rpc_wake_up_task_queue_locked() */
++	smp_wmb();
+ 	rpc_set_queued(task);
+ 
+ 	dprintk("RPC: %5u added to queue %p \"%s\"\n",
+@@ -369,8 +371,11 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task
+  */
+ static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
+ {
+-	if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue)
+-		__rpc_do_wake_up_task(queue, task);
++	if (RPC_IS_QUEUED(task)) {
++		smp_rmb();
++		if (task->tk_waitqueue == queue)
++			__rpc_do_wake_up_task(queue, task);
++	}
+ }
+ 
+ /*
+diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
+index f008c14..671e482 100644
+--- a/net/sunrpc/xdr.c
++++ b/net/sunrpc/xdr.c
+@@ -296,7 +296,7 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
+  * Copies data into an arbitrary memory location from an array of pages
+  * The copy is assumed to be non-overlapping.
+  */
+-static void
++void
+ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
+ {
+ 	struct page **pgfrom;
+@@ -324,6 +324,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
+ 
+ 	} while ((len -= copy) != 0);
+ }
++EXPORT_SYMBOL_GPL(_copy_from_pages);
+ 
+ /*
+  * xdr_shrink_bufhead
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 0722a25..afbdd0c 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -371,7 +371,7 @@ static void unix_sock_destructor(struct sock *sk)
+ #endif
+ }
+ 
+-static int unix_release_sock(struct sock *sk, int embrion)
++static void unix_release_sock(struct sock *sk, int embrion)
+ {
+ 	struct unix_sock *u = unix_sk(sk);
+ 	struct dentry *dentry;
+@@ -444,8 +444,6 @@ static int unix_release_sock(struct sock *sk, int embrion)
+ 
+ 	if (unix_tot_inflight)
+ 		unix_gc();		/* Garbage collect fds */
+-
+-	return 0;
+ }
+ 
+ static void init_peercred(struct sock *sk)
+@@ -682,9 +680,10 @@ static int unix_release(struct socket *sock)
+ 	if (!sk)
+ 		return 0;
+ 
++	unix_release_sock(sk, 0);
+ 	sock->sk = NULL;
+ 
+-	return unix_release_sock(sk, 0);
++	return 0;
+ }
+ 
+ static int unix_autobind(struct socket *sock)
+diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
+index 373e14f..fb37356 100644
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -91,7 +91,7 @@ int x25_parse_address_block(struct sk_buff *skb,
+ 	int needed;
+ 	int rc;
+ 
+-	if (skb->len < 1) {
++	if (!pskb_may_pull(skb, 1)) {
+ 		/* packet has no address block */
+ 		rc = 0;
+ 		goto empty;
+@@ -100,7 +100,7 @@ int x25_parse_address_block(struct sk_buff *skb,
+ 	len = *skb->data;
+ 	needed = 1 + (len >> 4) + (len & 0x0f);
+ 
+-	if (skb->len < needed) {
++	if (!pskb_may_pull(skb, needed)) {
+ 		/* packet is too short to hold the addresses it claims
+ 		   to hold */
+ 		rc = -1;
+@@ -952,14 +952,27 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
+ 	 *
+ 	 *	Facilities length is mandatory in call request packets
+ 	 */
+-	if (skb->len < 1)
++	if (!pskb_may_pull(skb, 1))
+ 		goto out_clear_request;
+ 	len = skb->data[0] + 1;
+-	if (skb->len < len)
++	if (!pskb_may_pull(skb, len))
+ 		goto out_clear_request;
+ 	skb_pull(skb,len);
+ 
+ 	/*
++	 *	Ensure that the amount of call user data is valid.
++	 */
++	if (skb->len > X25_MAX_CUD_LEN)
++		goto out_clear_request;
++
++	/*
++	 *	Get all the call user data so it can be used in
++	 *	x25_find_listener and skb_copy_from_linear_data up ahead.
++	 */
++	if (!pskb_may_pull(skb, skb->len))
++		goto out_clear_request;
++
++	/*
+ 	 *	Find a listener for the particular address/cud pair.
+ 	 */
+ 	sk = x25_find_listener(&source_addr,skb);
+@@ -1167,6 +1180,9 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
+ 	 *	byte of the user data is the logical value of the Q Bit.
+ 	 */
+ 	if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
++		if (!pskb_may_pull(skb, 1))
++			goto out_kfree_skb;
++
+ 		qbit = skb->data[0];
+ 		skb_pull(skb, 1);
+ 	}
+@@ -1245,7 +1261,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
+ 	struct x25_sock *x25 = x25_sk(sk);
+ 	struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name;
+ 	size_t copied;
+-	int qbit;
++	int qbit, header_len = x25->neighbour->extended ?
++		X25_EXT_MIN_LEN : X25_STD_MIN_LEN;
++
+ 	struct sk_buff *skb;
+ 	unsigned char *asmptr;
+ 	int rc = -ENOTCONN;
+@@ -1266,6 +1284,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
+ 
+ 		skb = skb_dequeue(&x25->interrupt_in_queue);
+ 
++		if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
++			goto out_free_dgram;
++
+ 		skb_pull(skb, X25_STD_MIN_LEN);
+ 
+ 		/*
+@@ -1286,10 +1307,12 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
+ 		if (!skb)
+ 			goto out;
+ 
++		if (!pskb_may_pull(skb, header_len))
++			goto out_free_dgram;
++
+ 		qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT;
+ 
+-		skb_pull(skb, x25->neighbour->extended ?
+-				X25_EXT_MIN_LEN : X25_STD_MIN_LEN);
++		skb_pull(skb, header_len);
+ 
+ 		if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) {
+ 			asmptr  = skb_push(skb, 1);
+diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
+index 9005f6d..60749c5 100644
+--- a/net/x25/x25_dev.c
++++ b/net/x25/x25_dev.c
+@@ -32,6 +32,9 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
+ 	unsigned short frametype;
+ 	unsigned int lci;
+ 
++	if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
++		return 0;
++
+ 	frametype = skb->data[2];
+ 	lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
+ 
+@@ -115,6 +118,9 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
+ 		goto drop;
+ 	}
+ 
++	if (!pskb_may_pull(skb, 1))
++		return 0;
++
+ 	switch (skb->data[0]) {
+ 
+ 	case X25_IFACE_DATA:
+diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
+index f77e4e7..36384a1 100644
+--- a/net/x25/x25_facilities.c
++++ b/net/x25/x25_facilities.c
+@@ -44,7 +44,7 @@
+ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
+ 		struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
+ {
+-	unsigned char *p = skb->data;
++	unsigned char *p;
+ 	unsigned int len;
+ 
+ 	*vc_fac_mask = 0;
+@@ -60,14 +60,16 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
+ 	memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae));
+ 	memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae));
+ 
+-	if (skb->len < 1)
++	if (!pskb_may_pull(skb, 1))
+ 		return 0;
+ 
+-	len = *p++;
++	len = skb->data[0];
+ 
+-	if (len >= skb->len)
++	if (!pskb_may_pull(skb, 1 + len))
+ 		return -1;
+ 
++	p = skb->data + 1;
++
+ 	while (len > 0) {
+ 		switch (*p & X25_FAC_CLASS_MASK) {
+ 		case X25_FAC_CLASS_A:
+diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
+index 15de65f..36ab913 100644
+--- a/net/x25/x25_in.c
++++ b/net/x25/x25_in.c
+@@ -107,6 +107,8 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
+ 			/*
+ 			 *	Parse the data in the frame.
+ 			 */
++			if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
++				goto out_clear;
+ 			skb_pull(skb, X25_STD_MIN_LEN);
+ 
+ 			len = x25_parse_address_block(skb, &source_addr,
+@@ -127,9 +129,11 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
+ 			 *	Copy any Call User Data.
+ 			 */
+ 			if (skb->len > 0) {
+-				skb_copy_from_linear_data(skb,
+-					      x25->calluserdata.cuddata,
+-					      skb->len);
++				if (skb->len > X25_MAX_CUD_LEN)
++					goto out_clear;
++
++				skb_copy_bits(skb, 0, x25->calluserdata.cuddata,
++					skb->len);
+ 				x25->calluserdata.cudlength = skb->len;
+ 			}
+ 			if (!sock_flag(sk, SOCK_DEAD))
+@@ -137,6 +141,9 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
+ 			break;
+ 		}
+ 		case X25_CLEAR_REQUEST:
++			if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
++				goto out_clear;
++
+ 			x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
+ 			x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
+ 			break;
+@@ -164,6 +171,9 @@ static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametyp
+ 	switch (frametype) {
+ 
+ 		case X25_CLEAR_REQUEST:
++			if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
++				goto out_clear;
++
+ 			x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
+ 			x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
+ 			break;
+@@ -177,6 +187,11 @@ static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametyp
+ 	}
+ 
+ 	return 0;
++
++out_clear:
++	x25_write_internal(sk, X25_CLEAR_REQUEST);
++	x25_start_t23timer(sk);
++	return 0;
+ }
+ 
+ /*
+@@ -206,6 +221,9 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp
+ 			break;
+ 
+ 		case X25_CLEAR_REQUEST:
++			if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
++				goto out_clear;
++
+ 			x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
+ 			x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
+ 			break;
+@@ -304,6 +322,12 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp
+ 	}
+ 
+ 	return queued;
++
++out_clear:
++	x25_write_internal(sk, X25_CLEAR_REQUEST);
++	x25->state = X25_STATE_2;
++	x25_start_t23timer(sk);
++	return 0;
+ }
+ 
+ /*
+@@ -313,13 +337,13 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp
+  */
+ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype)
+ {
++	struct x25_sock *x25 = x25_sk(sk);
++
+ 	switch (frametype) {
+ 
+ 		case X25_RESET_REQUEST:
+ 			x25_write_internal(sk, X25_RESET_CONFIRMATION);
+ 		case X25_RESET_CONFIRMATION: {
+-			struct x25_sock *x25 = x25_sk(sk);
+-
+ 			x25_stop_timer(sk);
+ 			x25->condition = 0x00;
+ 			x25->va        = 0;
+@@ -331,6 +355,9 @@ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametyp
+ 			break;
+ 		}
+ 		case X25_CLEAR_REQUEST:
++			if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
++				goto out_clear;
++
+ 			x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
+ 			x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
+ 			break;
+@@ -340,6 +367,12 @@ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametyp
+ 	}
+ 
+ 	return 0;
++
++out_clear:
++	x25_write_internal(sk, X25_CLEAR_REQUEST);
++	x25->state = X25_STATE_2;
++	x25_start_t23timer(sk);
++	return 0;
+ }
+ 
+ /* Higher level upcall for a LAPB frame */
+diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
+index 2130692..0a9e074 100644
+--- a/net/x25/x25_link.c
++++ b/net/x25/x25_link.c
+@@ -90,6 +90,9 @@ void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
+ 			break;
+ 
+ 		case X25_DIAGNOSTIC:
++			if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
++				break;
++
+ 			printk(KERN_WARNING "x25: diagnostic #%d - "
+ 			       "%02X %02X %02X\n",
+ 			       skb->data[3], skb->data[4],
+diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
+index dc20cf1..faf98d8 100644
+--- a/net/x25/x25_subr.c
++++ b/net/x25/x25_subr.c
+@@ -271,7 +271,11 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
+ 	       int *d, int *m)
+ {
+ 	struct x25_sock *x25 = x25_sk(sk);
+-	unsigned char *frame = skb->data;
++	unsigned char *frame;
++
++	if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
++		return X25_ILLEGAL;
++	frame = skb->data;
+ 
+ 	*ns = *nr = *q = *d = *m = 0;
+ 
+@@ -296,6 +300,10 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
+ 		if (frame[2] == X25_RR  ||
+ 		    frame[2] == X25_RNR ||
+ 		    frame[2] == X25_REJ) {
++			if (!pskb_may_pull(skb, X25_EXT_MIN_LEN))
++				return X25_ILLEGAL;
++			frame = skb->data;
++
+ 			*nr = (frame[3] >> 1) & 0x7F;
+ 			return frame[2];
+ 		}
+@@ -310,6 +318,10 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
+ 
+ 	if (x25->neighbour->extended) {
+ 		if ((frame[2] & 0x01) == X25_DATA) {
++			if (!pskb_may_pull(skb, X25_EXT_MIN_LEN))
++				return X25_ILLEGAL;
++			frame = skb->data;
++
+ 			*q  = (frame[0] & X25_Q_BIT) == X25_Q_BIT;
+ 			*d  = (frame[0] & X25_D_BIT) == X25_D_BIT;
+ 			*m  = (frame[3] & X25_EXT_M_BIT) == X25_EXT_M_BIT;
+diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
+index 9f614b4..272407c 100644
+--- a/virt/kvm/irq_comm.c
++++ b/virt/kvm/irq_comm.c
+@@ -318,6 +318,7 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt,
+ 	 */
+ 	hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link)
+ 		if (ei->type == KVM_IRQ_ROUTING_MSI ||
++		    ue->type == KVM_IRQ_ROUTING_MSI ||
+ 		    ue->u.irqchip.irqchip == ei->irqchip.irqchip)
+ 			return r;
+ 
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 6b39ba9..88dde44 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1616,18 +1616,22 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
+ 
+ 	r = kvm_arch_vcpu_setup(vcpu);
+ 	if (r)
+-		return r;
++		goto vcpu_destroy;
+ 
+ 	mutex_lock(&kvm->lock);
++	if (!kvm_vcpu_compatible(vcpu)) {
++		r = -EINVAL;
++		goto unlock_vcpu_destroy;
++	}
+ 	if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
+ 		r = -EINVAL;
+-		goto vcpu_destroy;
++		goto unlock_vcpu_destroy;
+ 	}
+ 
+ 	kvm_for_each_vcpu(r, v, kvm)
+ 		if (v->vcpu_id == id) {
+ 			r = -EEXIST;
+-			goto vcpu_destroy;
++			goto unlock_vcpu_destroy;
+ 		}
+ 
+ 	BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
+@@ -1637,7 +1641,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
+ 	r = create_vcpu_fd(vcpu);
+ 	if (r < 0) {
+ 		kvm_put_kvm(kvm);
+-		goto vcpu_destroy;
++		goto unlock_vcpu_destroy;
+ 	}
+ 
+ 	kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
+@@ -1651,8 +1655,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
+ 	mutex_unlock(&kvm->lock);
+ 	return r;
+ 
+-vcpu_destroy:
++unlock_vcpu_destroy:
+ 	mutex_unlock(&kvm->lock);
++vcpu_destroy:
+ 	kvm_arch_vcpu_destroy(vcpu);
+ 	return r;
+ }

Added: genpatches-2.6/trunk/3.0/2700_ThinkPad-30-brightness-control-fix.patch
===================================================================
--- genpatches-2.6/trunk/3.0/2700_ThinkPad-30-brightness-control-fix.patch	                        (rev 0)
+++ genpatches-2.6/trunk/3.0/2700_ThinkPad-30-brightness-control-fix.patch	2013-04-08 07:17:58 UTC (rev 2333)
@@ -0,0 +1,81 @@
+diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
+index cb96296..6c242ed 100644
+--- a/drivers/acpi/blacklist.c
++++ b/drivers/acpi/blacklist.c
+@@ -193,6 +193,13 @@  static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
+ 	return 0;
+ }
+ 
++static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
++{
++	printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
++	acpi_osi_setup("!Windows 2012");
++	return 0;
++}
++
+ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
+ 	{
+ 	.callback = dmi_disable_osi_vista,
+@@ -269,6 +276,61 @@  static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
+ 	},
+ 
+ 	/*
++	 * The following Lenovo models have a broken workaround in the
++	 * acpi_video backlight implementation to meet the Windows 8
++	 * requirement of 101 backlight levels. Reverting to pre-Win8
++	 * behavior fixes the problem.
++	 */
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad L430",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L430"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad T430s",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad T530",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T530"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad W530",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad X1 Carbon",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad X230",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
++		},
++	},
++
++	/*
+ 	 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
+ 	 * Linux ignores it, except for the machines enumerated below.
+ 	 */
+

Modified: genpatches-2.6/trunk/3.4/0000_README
===================================================================
--- genpatches-2.6/trunk/3.4/0000_README	2013-04-05 21:11:21 UTC (rev 2332)
+++ genpatches-2.6/trunk/3.4/0000_README	2013-04-08 07:17:58 UTC (rev 2333)
@@ -191,6 +191,10 @@
 From:   http://www.kernel.org
 Desc:   Linux 3.4.38
 
+Patch:  1038_linux-3.4.39.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.4.39
+
 Patch:  1700_correct-bnx2-firware-ver-mips.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=424609
 Desc:   Correct firmware version for bnx2 on mips
@@ -203,6 +207,10 @@
 From:   http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=commitdiff_plain;h=0829a19a6142af09eb4f9509cd1d3666270e87bd
 Desc:   Fix build warnings for lpfc when debugfs is not defined. Bug #432604
 
+Patch:  2700_ThinkPad-30-brightness-control-fix.patch
+From:   Seth Forshee <seth.forshee@canonical.com>
+Desc:   ACPI: Disable Windows 8 compatibility for some Lenovo ThinkPads
+
 Patch:  4200_fbcondecor-0.9.6.patch
 From:   http://dev.gentoo.org/~spock
 Desc:   Bootsplash successor by Michal Januszewski ported by Alexxy

Added: genpatches-2.6/trunk/3.4/1038_linux-3.4.39.patch
===================================================================
--- genpatches-2.6/trunk/3.4/1038_linux-3.4.39.patch	                        (rev 0)
+++ genpatches-2.6/trunk/3.4/1038_linux-3.4.39.patch	2013-04-08 07:17:58 UTC (rev 2333)
@@ -0,0 +1,3168 @@
+diff --git a/Makefile b/Makefile
+index 255e00b..5de9b43 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 4
+-SUBLEVEL = 38
++SUBLEVEL = 39
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+ 
+diff --git a/arch/arm/include/asm/signal.h b/arch/arm/include/asm/signal.h
+index 43ba0fb..559ee24 100644
+--- a/arch/arm/include/asm/signal.h
++++ b/arch/arm/include/asm/signal.h
+@@ -127,6 +127,7 @@ struct sigaction {
+ 	__sigrestore_t sa_restorer;
+ 	sigset_t sa_mask;		/* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+ 
+ struct k_sigaction {
+ 	struct sigaction sa;
+diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c
+index 031805b..7f26faf 100644
+--- a/arch/arm/mach-cns3xxx/core.c
++++ b/arch/arm/mach-cns3xxx/core.c
+@@ -22,19 +22,9 @@
+ 
+ static struct map_desc cns3xxx_io_desc[] __initdata = {
+ 	{
+-		.virtual	= CNS3XXX_TC11MP_TWD_BASE_VIRT,
+-		.pfn		= __phys_to_pfn(CNS3XXX_TC11MP_TWD_BASE),
+-		.length		= SZ_4K,
+-		.type		= MT_DEVICE,
+-	}, {
+-		.virtual	= CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT,
+-		.pfn		= __phys_to_pfn(CNS3XXX_TC11MP_GIC_CPU_BASE),
+-		.length		= SZ_4K,
+-		.type		= MT_DEVICE,
+-	}, {
+-		.virtual	= CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT,
+-		.pfn		= __phys_to_pfn(CNS3XXX_TC11MP_GIC_DIST_BASE),
+-		.length		= SZ_4K,
++		.virtual	= CNS3XXX_TC11MP_SCU_BASE_VIRT,
++		.pfn		= __phys_to_pfn(CNS3XXX_TC11MP_SCU_BASE),
++		.length		= SZ_8K,
+ 		.type		= MT_DEVICE,
+ 	}, {
+ 		.virtual	= CNS3XXX_TIMER1_2_3_BASE_VIRT,
+diff --git a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
+index 191c8e5..b1021aa 100644
+--- a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
++++ b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
+@@ -94,10 +94,10 @@
+ #define RTC_INTR_STS_OFFSET			0x34
+ 
+ #define CNS3XXX_MISC_BASE			0x76000000	/* Misc Control */
+-#define CNS3XXX_MISC_BASE_VIRT			0xFFF07000	/* Misc Control */
++#define CNS3XXX_MISC_BASE_VIRT			0xFB000000	/* Misc Control */
+ 
+ #define CNS3XXX_PM_BASE				0x77000000	/* Power Management Control */
+-#define CNS3XXX_PM_BASE_VIRT			0xFFF08000
++#define CNS3XXX_PM_BASE_VIRT			0xFB001000
+ 
+ #define PM_CLK_GATE_OFFSET			0x00
+ #define PM_SOFT_RST_OFFSET			0x04
+@@ -109,7 +109,7 @@
+ #define PM_PLL_HM_PD_OFFSET			0x1C
+ 
+ #define CNS3XXX_UART0_BASE			0x78000000	/* UART 0 */
+-#define CNS3XXX_UART0_BASE_VIRT			0xFFF09000
++#define CNS3XXX_UART0_BASE_VIRT			0xFB002000
+ 
+ #define CNS3XXX_UART1_BASE			0x78400000	/* UART 1 */
+ #define CNS3XXX_UART1_BASE_VIRT			0xFFF0A000
+@@ -130,7 +130,7 @@
+ #define CNS3XXX_I2S_BASE_VIRT			0xFFF10000
+ 
+ #define CNS3XXX_TIMER1_2_3_BASE			0x7C800000	/* Timer */
+-#define CNS3XXX_TIMER1_2_3_BASE_VIRT		0xFFF10800
++#define CNS3XXX_TIMER1_2_3_BASE_VIRT		0xFB003000
+ 
+ #define TIMER1_COUNTER_OFFSET			0x00
+ #define TIMER1_AUTO_RELOAD_OFFSET		0x04
+@@ -227,16 +227,16 @@
+  * Testchip peripheral and fpga gic regions
+  */
+ #define CNS3XXX_TC11MP_SCU_BASE			0x90000000	/* IRQ, Test chip */
+-#define CNS3XXX_TC11MP_SCU_BASE_VIRT		0xFF000000
++#define CNS3XXX_TC11MP_SCU_BASE_VIRT		0xFB004000
+ 
+ #define CNS3XXX_TC11MP_GIC_CPU_BASE		0x90000100	/* Test chip interrupt controller CPU interface */
+-#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT	0xFF000100
++#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT	(CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x100)
+ 
+ #define CNS3XXX_TC11MP_TWD_BASE			0x90000600
+-#define CNS3XXX_TC11MP_TWD_BASE_VIRT		0xFF000600
++#define CNS3XXX_TC11MP_TWD_BASE_VIRT		(CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x600)
+ 
+ #define CNS3XXX_TC11MP_GIC_DIST_BASE		0x90001000	/* Test chip interrupt controller distributor */
+-#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT	0xFF001000
++#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT	(CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x1000)
+ 
+ #define CNS3XXX_TC11MP_L220_BASE		0x92002000	/* L220 registers */
+ #define CNS3XXX_TC11MP_L220_BASE_VIRT		0xFF002000
+diff --git a/arch/avr32/include/asm/signal.h b/arch/avr32/include/asm/signal.h
+index 8790dfc..e6952a0 100644
+--- a/arch/avr32/include/asm/signal.h
++++ b/arch/avr32/include/asm/signal.h
+@@ -128,6 +128,7 @@ struct sigaction {
+ 	__sigrestore_t sa_restorer;
+ 	sigset_t sa_mask;		/* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+ 
+ struct k_sigaction {
+ 	struct sigaction sa;
+diff --git a/arch/cris/include/asm/signal.h b/arch/cris/include/asm/signal.h
+index ea6af9a..057fea2 100644
+--- a/arch/cris/include/asm/signal.h
++++ b/arch/cris/include/asm/signal.h
+@@ -122,6 +122,7 @@ struct sigaction {
+ 	void (*sa_restorer)(void);
+ 	sigset_t sa_mask;		/* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+ 
+ struct k_sigaction {
+ 	struct sigaction sa;
+diff --git a/arch/h8300/include/asm/signal.h b/arch/h8300/include/asm/signal.h
+index fd8b66e..8695707 100644
+--- a/arch/h8300/include/asm/signal.h
++++ b/arch/h8300/include/asm/signal.h
+@@ -121,6 +121,7 @@ struct sigaction {
+ 	void (*sa_restorer)(void);
+ 	sigset_t sa_mask;		/* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+ 
+ struct k_sigaction {
+ 	struct sigaction sa;
+diff --git a/arch/m32r/include/asm/signal.h b/arch/m32r/include/asm/signal.h
+index b2eeb0d..802d561 100644
+--- a/arch/m32r/include/asm/signal.h
++++ b/arch/m32r/include/asm/signal.h
+@@ -123,6 +123,7 @@ struct sigaction {
+ 	__sigrestore_t sa_restorer;
+ 	sigset_t sa_mask;		/* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+ 
+ struct k_sigaction {
+ 	struct sigaction sa;
+diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h
+index 93fe83e..a20ae63 100644
+--- a/arch/m68k/include/asm/signal.h
++++ b/arch/m68k/include/asm/signal.h
+@@ -119,6 +119,7 @@ struct sigaction {
+ 	__sigrestore_t sa_restorer;
+ 	sigset_t sa_mask;		/* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+ 
+ struct k_sigaction {
+ 	struct sigaction sa;
+diff --git a/arch/mn10300/include/asm/signal.h b/arch/mn10300/include/asm/signal.h
+index 1865d72..eecaa76 100644
+--- a/arch/mn10300/include/asm/signal.h
++++ b/arch/mn10300/include/asm/signal.h
+@@ -131,6 +131,7 @@ struct sigaction {
+ 	__sigrestore_t sa_restorer;
+ 	sigset_t sa_mask;		/* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+ 
+ struct k_sigaction {
+ 	struct sigaction sa;
+diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h
+index 3eb13be..ec63a0a 100644
+--- a/arch/powerpc/include/asm/signal.h
++++ b/arch/powerpc/include/asm/signal.h
+@@ -109,6 +109,7 @@ struct sigaction {
+ 	__sigrestore_t sa_restorer;
+ 	sigset_t sa_mask;		/* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+ 
+ struct k_sigaction {
+ 	struct sigaction sa;
+diff --git a/arch/s390/include/asm/signal.h b/arch/s390/include/asm/signal.h
+index cdf5cb2..c872626 100644
+--- a/arch/s390/include/asm/signal.h
++++ b/arch/s390/include/asm/signal.h
+@@ -131,6 +131,7 @@ struct sigaction {
+         void (*sa_restorer)(void);
+         sigset_t sa_mask;               /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+ 
+ struct k_sigaction {
+         struct sigaction sa;
+diff --git a/arch/sparc/include/asm/signal.h b/arch/sparc/include/asm/signal.h
+index aa42fe3..d07beb3 100644
+--- a/arch/sparc/include/asm/signal.h
++++ b/arch/sparc/include/asm/signal.h
+@@ -192,6 +192,7 @@ struct __old_sigaction {
+ 	unsigned long		sa_flags;
+ 	void			(*sa_restorer)(void);  /* not used by Linux/SPARC yet */
+ };
++#define __ARCH_HAS_SA_RESTORER
+ 
+ typedef struct sigaltstack {
+ 	void			__user *ss_sp;
+diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
+index bff23f4..fd107ab 100644
+--- a/arch/tile/kernel/setup.c
++++ b/arch/tile/kernel/setup.c
+@@ -912,15 +912,8 @@ void __cpuinit setup_cpu(int boot)
+ 
+ #ifdef CONFIG_BLK_DEV_INITRD
+ 
+-/*
+- * Note that the kernel can potentially support other compression
+- * techniques than gz, though we don't do so by default.  If we ever
+- * decide to do so we can either look for other filename extensions,
+- * or just allow a file with this name to be compressed with an
+- * arbitrary compressor (somewhat counterintuitively).
+- */
+ static int __initdata set_initramfs_file;
+-static char __initdata initramfs_file[128] = "initramfs.cpio.gz";
++static char __initdata initramfs_file[128] = "initramfs";
+ 
+ static int __init setup_initramfs_file(char *str)
+ {
+@@ -934,9 +927,9 @@ static int __init setup_initramfs_file(char *str)
+ early_param("initramfs_file", setup_initramfs_file);
+ 
+ /*
+- * We look for an "initramfs.cpio.gz" file in the hvfs.
+- * If there is one, we allocate some memory for it and it will be
+- * unpacked to the initramfs.
++ * We look for a file called "initramfs" in the hvfs.  If there is one, we
++ * allocate some memory for it and it will be unpacked to the initramfs.
++ * If it's compressed, the initd code will uncompress it first.
+  */
+ static void __init load_hv_initrd(void)
+ {
+@@ -946,10 +939,16 @@ static void __init load_hv_initrd(void)
+ 
+ 	fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
+ 	if (fd == HV_ENOENT) {
+-		if (set_initramfs_file)
++		if (set_initramfs_file) {
+ 			pr_warning("No such hvfs initramfs file '%s'\n",
+ 				   initramfs_file);
+-		return;
++			return;
++		} else {
++			/* Try old backwards-compatible name. */
++			fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz");
++			if (fd == HV_ENOENT)
++				return;
++		}
+ 	}
+ 	BUG_ON(fd < 0);
+ 	stat = hv_fs_fstat(fd);
+diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
+index 598457c..6cbc795 100644
+--- a/arch/x86/include/asm/signal.h
++++ b/arch/x86/include/asm/signal.h
+@@ -125,6 +125,8 @@ typedef unsigned long sigset_t;
+ extern void do_notify_resume(struct pt_regs *, void *, __u32);
+ # endif /* __KERNEL__ */
+ 
++#define __ARCH_HAS_SA_RESTORER
++
+ #ifdef __i386__
+ # ifdef __KERNEL__
+ struct old_sigaction {
+diff --git a/arch/xtensa/include/asm/signal.h b/arch/xtensa/include/asm/signal.h
+index 633ba73..75edf8a 100644
+--- a/arch/xtensa/include/asm/signal.h
++++ b/arch/xtensa/include/asm/signal.h
+@@ -133,6 +133,7 @@ struct sigaction {
+ 	void (*sa_restorer)(void);
+ 	sigset_t sa_mask;		/* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+ 
+ struct k_sigaction {
+ 	struct sigaction sa;
+diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
+index 887f68f..db30542 100644
+--- a/drivers/block/aoe/aoecmd.c
++++ b/drivers/block/aoe/aoecmd.c
+@@ -30,8 +30,9 @@ new_skb(ulong len)
+ {
+ 	struct sk_buff *skb;
+ 
+-	skb = alloc_skb(len, GFP_ATOMIC);
++	skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC);
+ 	if (skb) {
++		skb_reserve(skb, MAX_HEADER);
+ 		skb_reset_mac_header(skb);
+ 		skb_reset_network_header(skb);
+ 		skb->protocol = __constant_htons(ETH_P_AOE);
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 5e3be05..179b5b4 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -908,6 +908,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
+ 		lo->lo_flags |= LO_FLAGS_PARTSCAN;
+ 	if (lo->lo_flags & LO_FLAGS_PARTSCAN)
+ 		ioctl_by_bdev(bdev, BLKRRPART, 0);
++
++	/* Grab the block_device to prevent its destruction after we
++	 * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev).
++	 */
++	bdgrab(bdev);
+ 	return 0;
+ 
+ out_clr:
+@@ -1004,8 +1009,10 @@ static int loop_clr_fd(struct loop_device *lo)
+ 	memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
+ 	memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
+ 	memset(lo->lo_file_name, 0, LO_NAME_SIZE);
+-	if (bdev)
++	if (bdev) {
++		bdput(bdev);
+ 		invalidate_bdev(bdev);
++	}
+ 	set_capacity(lo->lo_disk, 0);
+ 	loop_sysfs_exit(lo);
+ 	if (bdev) {
+diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
+index 73d8c92..4fd1dea 100644
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -422,6 +422,16 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
+ 	return err;
+ }
+ 
++static int dispatch_other_io(struct xen_blkif *blkif,
++			     struct blkif_request *req,
++			     struct pending_req *pending_req)
++{
++	free_req(pending_req);
++	make_response(blkif, req->u.other.id, req->operation,
++		      BLKIF_RSP_EOPNOTSUPP);
++	return -EIO;
++}
++
+ static void xen_blk_drain_io(struct xen_blkif *blkif)
+ {
+ 	atomic_set(&blkif->drain, 1);
+@@ -543,17 +553,30 @@ __do_block_io_op(struct xen_blkif *blkif)
+ 
+ 		/* Apply all sanity checks to /private copy/ of request. */
+ 		barrier();
+-		if (unlikely(req.operation == BLKIF_OP_DISCARD)) {
++
++		switch (req.operation) {
++		case BLKIF_OP_READ:
++		case BLKIF_OP_WRITE:
++		case BLKIF_OP_WRITE_BARRIER:
++		case BLKIF_OP_FLUSH_DISKCACHE:
++			if (dispatch_rw_block_io(blkif, &req, pending_req))
++				goto done;
++			break;
++		case BLKIF_OP_DISCARD:
+ 			free_req(pending_req);
+ 			if (dispatch_discard_io(blkif, &req))
+-				break;
+-		} else if (dispatch_rw_block_io(blkif, &req, pending_req))
++				goto done;
+ 			break;
++		default:
++			if (dispatch_other_io(blkif, &req, pending_req))
++				goto done;
++			break;
++		}
+ 
+ 		/* Yield point for this unbounded loop. */
+ 		cond_resched();
+ 	}
+-
++done:
+ 	return more_to_do;
+ }
+ 
+@@ -623,6 +646,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
+ 		goto fail_response;
+ 	}
+ 
++	preq.dev           = req->u.rw.handle;
+ 	preq.sector_number = req->u.rw.sector_number;
+ 	preq.nr_sects      = 0;
+ 
+@@ -719,13 +743,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
+ 		bio->bi_end_io  = end_block_io_op;
+ 	}
+ 
+-	/*
+-	 * We set it one so that the last submit_bio does not have to call
+-	 * atomic_inc.
+-	 */
+ 	atomic_set(&pending_req->pendcnt, nbio);
+-
+-	/* Get a reference count for the disk queue and start sending I/O */
+ 	blk_start_plug(&plug);
+ 
+ 	for (i = 0; i < nbio; i++)
+@@ -753,6 +771,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
+  fail_put_bio:
+ 	for (i = 0; i < nbio; i++)
+ 		bio_put(biolist[i]);
++	atomic_set(&pending_req->pendcnt, 1);
+ 	__end_block_io_op(pending_req, -EINVAL);
+ 	msleep(1); /* back off a bit */
+ 	return -EIO;
+diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
+index 9ad3b5e..fc2a486 100644
+--- a/drivers/block/xen-blkback/common.h
++++ b/drivers/block/xen-blkback/common.h
+@@ -76,11 +76,18 @@ struct blkif_x86_32_request_discard {
+ 	uint64_t       nr_sectors;
+ } __attribute__((__packed__));
+ 
++struct blkif_x86_32_request_other {
++	uint8_t        _pad1;
++	blkif_vdev_t   _pad2;
++	uint64_t       id;           /* private guest value, echoed in resp  */
++} __attribute__((__packed__));
++
+ struct blkif_x86_32_request {
+ 	uint8_t        operation;    /* BLKIF_OP_???                         */
+ 	union {
+ 		struct blkif_x86_32_request_rw rw;
+ 		struct blkif_x86_32_request_discard discard;
++		struct blkif_x86_32_request_other other;
+ 	} u;
+ } __attribute__((__packed__));
+ 
+@@ -112,11 +119,19 @@ struct blkif_x86_64_request_discard {
+ 	uint64_t       nr_sectors;
+ } __attribute__((__packed__));
+ 
++struct blkif_x86_64_request_other {
++	uint8_t        _pad1;
++	blkif_vdev_t   _pad2;
++	uint32_t       _pad3;        /* offsetof(blkif_..,u.discard.id)==8   */
++	uint64_t       id;           /* private guest value, echoed in resp  */
++} __attribute__((__packed__));
++
+ struct blkif_x86_64_request {
+ 	uint8_t        operation;    /* BLKIF_OP_???                         */
+ 	union {
+ 		struct blkif_x86_64_request_rw rw;
+ 		struct blkif_x86_64_request_discard discard;
++		struct blkif_x86_64_request_other other;
+ 	} u;
+ } __attribute__((__packed__));
+ 
+@@ -262,6 +277,11 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
+ 		dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
+ 		break;
+ 	default:
++		/*
++		 * Don't know how to translate this op. Only get the
++		 * ID so failure can be reported to the frontend.
++		 */
++		dst->u.other.id = src->u.other.id;
+ 		break;
+ 	}
+ }
+@@ -293,6 +313,11 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
+ 		dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
+ 		break;
+ 	default:
++		/*
++		 * Don't know how to translate this op. Only get the
++		 * ID so failure can be reported to the frontend.
++		 */
++		dst->u.other.id = src->u.other.id;
+ 		break;
+ 	}
+ }
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 1ae7039..681be14 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -72,8 +72,10 @@ static struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x03F0, 0x311D) },
+ 
+ 	/* Atheros AR3012 with sflash firmware*/
++	{ USB_DEVICE(0x0CF3, 0x0036) },
+ 	{ USB_DEVICE(0x0CF3, 0x3004) },
+ 	{ USB_DEVICE(0x0CF3, 0x311D) },
++	{ USB_DEVICE(0x0CF3, 0x817a) },
+ 	{ USB_DEVICE(0x13d3, 0x3375) },
+ 	{ USB_DEVICE(0x04CA, 0x3005) },
+ 	{ USB_DEVICE(0x13d3, 0x3362) },
+@@ -93,8 +95,10 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
+ static struct usb_device_id ath3k_blist_tbl[] = {
+ 
+ 	/* Atheros AR3012 with sflash firmware*/
++	{ USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 27f9d9f..56fede1 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -136,8 +136,10 @@ static struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
+ 
+ 	/* Atheros 3012 with sflash firmware */
++	{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
+index bfd8f43..2cbb675 100644
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -122,6 +122,9 @@ struct efivar_attribute {
+ 	ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count);
+ };
+ 
++static struct efivars __efivars;
++static struct efivar_operations ops;
++
+ #define PSTORE_EFI_ATTRIBUTES \
+ 	(EFI_VARIABLE_NON_VOLATILE | \
+ 	 EFI_VARIABLE_BOOTSERVICE_ACCESS | \
+@@ -942,6 +945,53 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
+ 	return count;
+ }
+ 
++static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor)
++{
++	struct efivar_entry *entry, *n;
++	struct efivars *efivars = &__efivars;
++	unsigned long strsize1, strsize2;
++	bool found = false;
++
++	strsize1 = utf16_strsize(variable_name, 1024);
++	list_for_each_entry_safe(entry, n, &efivars->list, list) {
++		strsize2 = utf16_strsize(entry->var.VariableName, 1024);
++		if (strsize1 == strsize2 &&
++			!memcmp(variable_name, &(entry->var.VariableName),
++				strsize2) &&
++			!efi_guidcmp(entry->var.VendorGuid,
++				*vendor)) {
++			found = true;
++			break;
++		}
++	}
++	return found;
++}
++
++/*
++ * Returns the size of variable_name, in bytes, including the
++ * terminating NULL character, or variable_name_size if no NULL
++ * character is found among the first variable_name_size bytes.
++ */
++static unsigned long var_name_strnsize(efi_char16_t *variable_name,
++				       unsigned long variable_name_size)
++{
++	unsigned long len;
++	efi_char16_t c;
++
++	/*
++	 * The variable name is, by definition, a NULL-terminated
++	 * string, so make absolutely sure that variable_name_size is
++	 * the value we expect it to be. If not, return the real size.
++	 */
++	for (len = 2; len <= variable_name_size; len += sizeof(c)) {
++		c = variable_name[(len / sizeof(c)) - 1];
++		if (!c)
++			break;
++	}
++
++	return min(len, variable_name_size);
++}
++
+ /*
+  * Let's not leave out systab information that snuck into
+  * the efivars driver
+@@ -1129,6 +1179,28 @@ void unregister_efivars(struct efivars *efivars)
+ }
+ EXPORT_SYMBOL_GPL(unregister_efivars);
+ 
++/*
++ * Print a warning when duplicate EFI variables are encountered and
++ * disable the sysfs workqueue since the firmware is buggy.
++ */
++static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid,
++			     unsigned long len16)
++{
++	size_t i, len8 = len16 / sizeof(efi_char16_t);
++	char *s8;
++
++	s8 = kzalloc(len8, GFP_KERNEL);
++	if (!s8)
++		return;
++
++	for (i = 0; i < len8; i++)
++		s8[i] = s16[i];
++
++	printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n",
++	       s8, vendor_guid);
++	kfree(s8);
++}
++
+ int register_efivars(struct efivars *efivars,
+ 		     const struct efivar_operations *ops,
+ 		     struct kobject *parent_kobj)
+@@ -1169,6 +1241,24 @@ int register_efivars(struct efivars *efivars,
+ 						&vendor_guid);
+ 		switch (status) {
+ 		case EFI_SUCCESS:
++			variable_name_size = var_name_strnsize(variable_name,
++							       variable_name_size);
++
++			/*
++			 * Some firmware implementations return the
++			 * same variable name on multiple calls to
++			 * get_next_variable(). Terminate the loop
++			 * immediately as there is no guarantee that
++			 * we'll ever see a different variable name,
++			 * and may end up looping here forever.
++			 */
++			if (variable_is_present(variable_name, &vendor_guid)) {
++				dup_variable_bug(variable_name, &vendor_guid,
++						 variable_name_size);
++				status = EFI_NOT_FOUND;
++				break;
++			}
++
+ 			efivar_create_sysfs_entry(efivars,
+ 						  variable_name_size,
+ 						  variable_name,
+@@ -1205,9 +1295,6 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(register_efivars);
+ 
+-static struct efivars __efivars;
+-static struct efivar_operations ops;
+-
+ /*
+  * For now we register the efi subsystem with the firmware subsystem
+  * and the vars subsystem with the efi subsystem.  In the future, it
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 7ccf896..84867a8 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -7567,8 +7567,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ {
+ 	struct drm_device *dev = crtc->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	struct intel_framebuffer *intel_fb;
+-	struct drm_i915_gem_object *obj;
++	struct drm_framebuffer *old_fb = crtc->fb;
++	struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
+ 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ 	struct intel_unpin_work *work;
+ 	unsigned long flags;
+@@ -7580,8 +7580,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ 
+ 	work->event = event;
+ 	work->dev = crtc->dev;
+-	intel_fb = to_intel_framebuffer(crtc->fb);
+-	work->old_fb_obj = intel_fb->obj;
++	work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
+ 	INIT_WORK(&work->work, intel_unpin_work_fn);
+ 
+ 	ret = drm_vblank_get(dev, intel_crtc->pipe);
+@@ -7601,9 +7600,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ 	intel_crtc->unpin_work = work;
+ 	spin_unlock_irqrestore(&dev->event_lock, flags);
+ 
+-	intel_fb = to_intel_framebuffer(fb);
+-	obj = intel_fb->obj;
+-
+ 	mutex_lock(&dev->struct_mutex);
+ 
+ 	/* Reference the objects for the scheduled work. */
+@@ -7634,6 +7630,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ 
+ cleanup_pending:
+ 	atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
++	crtc->fb = old_fb;
+ 	drm_gem_object_unreference(&work->old_fb_obj->base);
+ 	drm_gem_object_unreference(&obj->base);
+ 	mutex_unlock(&dev->struct_mutex);
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 02f4664..14d2239 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -644,6 +644,9 @@
+ #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008		0x3008
+ #define USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN	0x3001
+ 
++#define USB_VENDOR_ID_REALTEK		0x0bda
++#define USB_DEVICE_ID_REALTEK_READER	0x0152
++
+ #define USB_VENDOR_ID_ROCCAT		0x1e7d
+ #define USB_DEVICE_ID_ROCCAT_ARVO	0x30d4
+ #define USB_DEVICE_ID_ROCCAT_ISKU	0x319c
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 9fea98f..5c4112e 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -77,6 +77,7 @@ static const struct hid_blacklist {
+ 	{ USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
++	{ USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS },
+ 	{ USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET },
+ 	{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 57ed244..0569843 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -2270,18 +2270,16 @@ static int device_change_notifier(struct notifier_block *nb,
+ 
+ 		/* allocate a protection domain if a device is added */
+ 		dma_domain = find_protection_domain(devid);
+-		if (dma_domain)
+-			goto out;
+-		dma_domain = dma_ops_domain_alloc();
+-		if (!dma_domain)
+-			goto out;
+-		dma_domain->target_dev = devid;
+-
+-		spin_lock_irqsave(&iommu_pd_list_lock, flags);
+-		list_add_tail(&dma_domain->list, &iommu_pd_list);
+-		spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
+-
+-		dev_data = get_dev_data(dev);
++		if (!dma_domain) {
++			dma_domain = dma_ops_domain_alloc();
++			if (!dma_domain)
++				goto out;
++			dma_domain->target_dev = devid;
++
++			spin_lock_irqsave(&iommu_pd_list_lock, flags);
++			list_add_tail(&dma_domain->list, &iommu_pd_list);
++			spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
++		}
+ 
+ 		dev->archdata.dma_ops = &amd_iommu_dma_ops;
+ 
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index dadf337..d9f646f 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1956,12 +1956,11 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
+ 		return -EINVAL;
+ 	}
+ 
++	write_unlock_bh(&bond->lock);
+ 	/* unregister rx_handler early so bond_handle_frame wouldn't be called
+ 	 * for this slave anymore.
+ 	 */
+ 	netdev_rx_handler_unregister(slave_dev);
+-	write_unlock_bh(&bond->lock);
+-	synchronize_net();
+ 	write_lock_bh(&bond->lock);
+ 
+ 	if (!bond->params.fail_over_mac) {
+@@ -3398,6 +3397,28 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
+ 
+ /*-------------------------- Device entry points ----------------------------*/
+ 
++static void bond_work_init_all(struct bonding *bond)
++{
++	INIT_DELAYED_WORK(&bond->mcast_work,
++			  bond_resend_igmp_join_requests_delayed);
++	INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
++	INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
++	if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
++		INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon);
++	else
++		INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon);
++	INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
++}
++
++static void bond_work_cancel_all(struct bonding *bond)
++{
++	cancel_delayed_work_sync(&bond->mii_work);
++	cancel_delayed_work_sync(&bond->arp_work);
++	cancel_delayed_work_sync(&bond->alb_work);
++	cancel_delayed_work_sync(&bond->ad_work);
++	cancel_delayed_work_sync(&bond->mcast_work);
++}
++
+ static int bond_open(struct net_device *bond_dev)
+ {
+ 	struct bonding *bond = netdev_priv(bond_dev);
+@@ -3420,41 +3441,27 @@ static int bond_open(struct net_device *bond_dev)
+ 	}
+ 	read_unlock(&bond->lock);
+ 
+-	INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed);
++	bond_work_init_all(bond);
+ 
+ 	if (bond_is_lb(bond)) {
+ 		/* bond_alb_initialize must be called before the timer
+ 		 * is started.
+ 		 */
+-		if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) {
+-			/* something went wrong - fail the open operation */
++		if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB)))
+ 			return -ENOMEM;
+-		}
+-
+-		INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
+ 		queue_delayed_work(bond->wq, &bond->alb_work, 0);
+ 	}
+ 
+-	if (bond->params.miimon) {  /* link check interval, in milliseconds. */
+-		INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
++	if (bond->params.miimon)  /* link check interval, in milliseconds. */
+ 		queue_delayed_work(bond->wq, &bond->mii_work, 0);
+-	}
+ 
+ 	if (bond->params.arp_interval) {  /* arp interval, in milliseconds. */
+-		if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
+-			INIT_DELAYED_WORK(&bond->arp_work,
+-					  bond_activebackup_arp_mon);
+-		else
+-			INIT_DELAYED_WORK(&bond->arp_work,
+-					  bond_loadbalance_arp_mon);
+-
+ 		queue_delayed_work(bond->wq, &bond->arp_work, 0);
+ 		if (bond->params.arp_validate)
+ 			bond->recv_probe = bond_arp_rcv;
+ 	}
+ 
+ 	if (bond->params.mode == BOND_MODE_8023AD) {
+-		INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
+ 		queue_delayed_work(bond->wq, &bond->ad_work, 0);
+ 		/* register to receive LACPDUs */
+ 		bond->recv_probe = bond_3ad_lacpdu_recv;
+@@ -3469,34 +3476,10 @@ static int bond_close(struct net_device *bond_dev)
+ 	struct bonding *bond = netdev_priv(bond_dev);
+ 
+ 	write_lock_bh(&bond->lock);
+-
+ 	bond->send_peer_notif = 0;
+-
+ 	write_unlock_bh(&bond->lock);
+ 
+-	if (bond->params.miimon) {  /* link check interval, in milliseconds. */
+-		cancel_delayed_work_sync(&bond->mii_work);
+-	}
+-
+-	if (bond->params.arp_interval) {  /* arp interval, in milliseconds. */
+-		cancel_delayed_work_sync(&bond->arp_work);
+-	}
+-
+-	switch (bond->params.mode) {
+-	case BOND_MODE_8023AD:
+-		cancel_delayed_work_sync(&bond->ad_work);
+-		break;
+-	case BOND_MODE_TLB:
+-	case BOND_MODE_ALB:
+-		cancel_delayed_work_sync(&bond->alb_work);
+-		break;
+-	default:
+-		break;
+-	}
+-
+-	if (delayed_work_pending(&bond->mcast_work))
+-		cancel_delayed_work_sync(&bond->mcast_work);
+-
++	bond_work_cancel_all(bond);
+ 	if (bond_is_lb(bond)) {
+ 		/* Must be called only after all
+ 		 * slaves have been released
+@@ -4375,26 +4358,6 @@ static void bond_setup(struct net_device *bond_dev)
+ 	bond_dev->features |= bond_dev->hw_features;
+ }
+ 
+-static void bond_work_cancel_all(struct bonding *bond)
+-{
+-	if (bond->params.miimon && delayed_work_pending(&bond->mii_work))
+-		cancel_delayed_work_sync(&bond->mii_work);
+-
+-	if (bond->params.arp_interval && delayed_work_pending(&bond->arp_work))
+-		cancel_delayed_work_sync(&bond->arp_work);
+-
+-	if (bond->params.mode == BOND_MODE_ALB &&
+-	    delayed_work_pending(&bond->alb_work))
+-		cancel_delayed_work_sync(&bond->alb_work);
+-
+-	if (bond->params.mode == BOND_MODE_8023AD &&
+-	    delayed_work_pending(&bond->ad_work))
+-		cancel_delayed_work_sync(&bond->ad_work);
+-
+-	if (delayed_work_pending(&bond->mcast_work))
+-		cancel_delayed_work_sync(&bond->mcast_work);
+-}
+-
+ /*
+ * Destroy a bonding device.
+ * Must be under rtnl_lock when this function is called.
+diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
+index 6734737..c40c0a8 100644
+--- a/drivers/net/bonding/bond_sysfs.c
++++ b/drivers/net/bonding/bond_sysfs.c
+@@ -183,6 +183,11 @@ int bond_create_slave_symlinks(struct net_device *master,
+ 	sprintf(linkname, "slave_%s", slave->name);
+ 	ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj),
+ 				linkname);
++
++	/* free the master link created earlier in case of error */
++	if (ret)
++		sysfs_remove_link(&(slave->dev.kobj), "master");
++
+ 	return ret;
+ 
+ }
+@@ -513,6 +518,8 @@ static ssize_t bonding_store_arp_interval(struct device *d,
+ 	int new_value, ret = count;
+ 	struct bonding *bond = to_bond(d);
+ 
++	if (!rtnl_trylock())
++		return restart_syscall();
+ 	if (sscanf(buf, "%d", &new_value) != 1) {
+ 		pr_err("%s: no arp_interval value specified.\n",
+ 		       bond->dev->name);
+@@ -520,7 +527,7 @@ static ssize_t bonding_store_arp_interval(struct device *d,
+ 		goto out;
+ 	}
+ 	if (new_value < 0) {
+-		pr_err("%s: Invalid arp_interval value %d not in range 1-%d; rejected.\n",
++		pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n",
+ 		       bond->dev->name, new_value, INT_MAX);
+ 		ret = -EINVAL;
+ 		goto out;
+@@ -535,18 +542,15 @@ static ssize_t bonding_store_arp_interval(struct device *d,
+ 	pr_info("%s: Setting ARP monitoring interval to %d.\n",
+ 		bond->dev->name, new_value);
+ 	bond->params.arp_interval = new_value;
+-	if (bond->params.miimon) {
+-		pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
+-			bond->dev->name, bond->dev->name);
+-		bond->params.miimon = 0;
+-		if (delayed_work_pending(&bond->mii_work)) {
+-			cancel_delayed_work(&bond->mii_work);
+-			flush_workqueue(bond->wq);
++	if (new_value) {
++		if (bond->params.miimon) {
++			pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
++				bond->dev->name, bond->dev->name);
++			bond->params.miimon = 0;
+ 		}
+-	}
+-	if (!bond->params.arp_targets[0]) {
+-		pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
+-			bond->dev->name);
++		if (!bond->params.arp_targets[0])
++			pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
++				bond->dev->name);
+ 	}
+ 	if (bond->dev->flags & IFF_UP) {
+ 		/* If the interface is up, we may need to fire off
+@@ -554,19 +558,15 @@ static ssize_t bonding_store_arp_interval(struct device *d,
+ 		 * timer will get fired off when the open function
+ 		 * is called.
+ 		 */
+-		if (!delayed_work_pending(&bond->arp_work)) {
+-			if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
+-				INIT_DELAYED_WORK(&bond->arp_work,
+-						  bond_activebackup_arp_mon);
+-			else
+-				INIT_DELAYED_WORK(&bond->arp_work,
+-						  bond_loadbalance_arp_mon);
+-
++		if (!new_value) {
++			cancel_delayed_work_sync(&bond->arp_work);
++		} else {
++			cancel_delayed_work_sync(&bond->mii_work);
+ 			queue_delayed_work(bond->wq, &bond->arp_work, 0);
+ 		}
+ 	}
+-
+ out:
++	rtnl_unlock();
+ 	return ret;
+ }
+ static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR,
+@@ -706,7 +706,7 @@ static ssize_t bonding_store_downdelay(struct device *d,
+ 	}
+ 	if (new_value < 0) {
+ 		pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
+-		       bond->dev->name, new_value, 1, INT_MAX);
++		       bond->dev->name, new_value, 0, INT_MAX);
+ 		ret = -EINVAL;
+ 		goto out;
+ 	} else {
+@@ -761,8 +761,8 @@ static ssize_t bonding_store_updelay(struct device *d,
+ 		goto out;
+ 	}
+ 	if (new_value < 0) {
+-		pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
+-		       bond->dev->name, new_value, 1, INT_MAX);
++		pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n",
++		       bond->dev->name, new_value, 0, INT_MAX);
+ 		ret = -EINVAL;
+ 		goto out;
+ 	} else {
+@@ -962,6 +962,8 @@ static ssize_t bonding_store_miimon(struct device *d,
+ 	int new_value, ret = count;
+ 	struct bonding *bond = to_bond(d);
+ 
++	if (!rtnl_trylock())
++		return restart_syscall();
+ 	if (sscanf(buf, "%d", &new_value) != 1) {
+ 		pr_err("%s: no miimon value specified.\n",
+ 		       bond->dev->name);
+@@ -970,50 +972,43 @@ static ssize_t bonding_store_miimon(struct device *d,
+ 	}
+ 	if (new_value < 0) {
+ 		pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n",
+-		       bond->dev->name, new_value, 1, INT_MAX);
++		       bond->dev->name, new_value, 0, INT_MAX);
+ 		ret = -EINVAL;
+ 		goto out;
+-	} else {
+-		pr_info("%s: Setting MII monitoring interval to %d.\n",
+-			bond->dev->name, new_value);
+-		bond->params.miimon = new_value;
+-		if (bond->params.updelay)
+-			pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
+-				bond->dev->name,
+-				bond->params.updelay * bond->params.miimon);
+-		if (bond->params.downdelay)
+-			pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
+-				bond->dev->name,
+-				bond->params.downdelay * bond->params.miimon);
+-		if (bond->params.arp_interval) {
+-			pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
+-				bond->dev->name);
+-			bond->params.arp_interval = 0;
+-			if (bond->params.arp_validate) {
+-				bond->params.arp_validate =
+-					BOND_ARP_VALIDATE_NONE;
+-			}
+-			if (delayed_work_pending(&bond->arp_work)) {
+-				cancel_delayed_work(&bond->arp_work);
+-				flush_workqueue(bond->wq);
+-			}
+-		}
+-
+-		if (bond->dev->flags & IFF_UP) {
+-			/* If the interface is up, we may need to fire off
+-			 * the MII timer. If the interface is down, the
+-			 * timer will get fired off when the open function
+-			 * is called.
+-			 */
+-			if (!delayed_work_pending(&bond->mii_work)) {
+-				INIT_DELAYED_WORK(&bond->mii_work,
+-						  bond_mii_monitor);
+-				queue_delayed_work(bond->wq,
+-						   &bond->mii_work, 0);
+-			}
++	}
++	pr_info("%s: Setting MII monitoring interval to %d.\n",
++		bond->dev->name, new_value);
++	bond->params.miimon = new_value;
++	if (bond->params.updelay)
++		pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
++			bond->dev->name,
++			bond->params.updelay * bond->params.miimon);
++	if (bond->params.downdelay)
++		pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
++			bond->dev->name,
++			bond->params.downdelay * bond->params.miimon);
++	if (new_value && bond->params.arp_interval) {
++		pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
++			bond->dev->name);
++		bond->params.arp_interval = 0;
++		if (bond->params.arp_validate)
++			bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
++	}
++	if (bond->dev->flags & IFF_UP) {
++		/* If the interface is up, we may need to fire off
++		 * the MII timer. If the interface is down, the
++		 * timer will get fired off when the open function
++		 * is called.
++		 */
++		if (!new_value) {
++			cancel_delayed_work_sync(&bond->mii_work);
++		} else {
++			cancel_delayed_work_sync(&bond->arp_work);
++			queue_delayed_work(bond->wq, &bond->mii_work, 0);
+ 		}
+ 	}
+ out:
++	rtnl_unlock();
+ 	return ret;
+ }
+ static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR,
+diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
+index a227586..ee280b2 100644
+--- a/drivers/net/can/sja1000/plx_pci.c
++++ b/drivers/net/can/sja1000/plx_pci.c
+@@ -329,7 +329,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
+ 	 */
+ 	if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) ==
+ 	    REG_CR_BASICCAN_INITIAL &&
+-	    (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) &&
++	    (priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_BASICCAN_INITIAL) &&
+ 	    (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL))
+ 		flag = 1;
+ 
+@@ -341,7 +341,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
+ 	 * See states on p. 23 of the Datasheet.
+ 	 */
+ 	if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL &&
+-	    priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL &&
++	    priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_PELICAN_INITIAL &&
+ 	    priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL)
+ 		return flag;
+ 
+diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
+index 5e10472..c2309ec 100644
+--- a/drivers/net/can/sja1000/sja1000.c
++++ b/drivers/net/can/sja1000/sja1000.c
+@@ -91,7 +91,7 @@ static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
+ 	 */
+ 	spin_lock_irqsave(&priv->cmdreg_lock, flags);
+ 	priv->write_reg(priv, REG_CMR, val);
+-	priv->read_reg(priv, REG_SR);
++	priv->read_reg(priv, SJA1000_REG_SR);
+ 	spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
+ }
+ 
+@@ -496,7 +496,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
+ 
+ 	while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) {
+ 		n++;
+-		status = priv->read_reg(priv, REG_SR);
++		status = priv->read_reg(priv, SJA1000_REG_SR);
+ 		/* check for absent controller due to hw unplug */
+ 		if (status == 0xFF && sja1000_is_absent(priv))
+ 			return IRQ_NONE;
+@@ -515,7 +515,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
+ 			/* receive interrupt */
+ 			while (status & SR_RBS) {
+ 				sja1000_rx(dev);
+-				status = priv->read_reg(priv, REG_SR);
++				status = priv->read_reg(priv, SJA1000_REG_SR);
+ 				/* check for absent controller */
+ 				if (status == 0xFF && sja1000_is_absent(priv))
+ 					return IRQ_NONE;
+diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
+index 23fff06..2a79543 100644
+--- a/drivers/net/can/sja1000/sja1000.h
++++ b/drivers/net/can/sja1000/sja1000.h
+@@ -56,7 +56,7 @@
+ /* SJA1000 registers - manual section 6.4 (Pelican Mode) */
+ #define REG_MOD		0x00
+ #define REG_CMR		0x01
+-#define REG_SR		0x02
++#define SJA1000_REG_SR		0x02
+ #define REG_IR		0x03
+ #define REG_IER		0x04
+ #define REG_ALC		0x0B
+diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
+index 829b5ad..edfdf6b 100644
+--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
++++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
+@@ -438,7 +438,6 @@ struct atl1e_adapter {
+ 	struct atl1e_hw        hw;
+ 	struct atl1e_hw_stats  hw_stats;
+ 
+-	bool have_msi;
+ 	u32 wol;
+ 	u16 link_speed;
+ 	u16 link_duplex;
+diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+index 93ff2b2..f964151 100644
+--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
++++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+@@ -1870,37 +1870,19 @@ static void atl1e_free_irq(struct atl1e_adapter *adapter)
+ 	struct net_device *netdev = adapter->netdev;
+ 
+ 	free_irq(adapter->pdev->irq, netdev);
+-
+-	if (adapter->have_msi)
+-		pci_disable_msi(adapter->pdev);
+ }
+ 
+ static int atl1e_request_irq(struct atl1e_adapter *adapter)
+ {
+ 	struct pci_dev    *pdev   = adapter->pdev;
+ 	struct net_device *netdev = adapter->netdev;
+-	int flags = 0;
+ 	int err = 0;
+ 
+-	adapter->have_msi = true;
+-	err = pci_enable_msi(adapter->pdev);
+-	if (err) {
+-		netdev_dbg(adapter->netdev,
+-			   "Unable to allocate MSI interrupt Error: %d\n", err);
+-		adapter->have_msi = false;
+-	} else
+-		netdev->irq = pdev->irq;
+-
+-
+-	if (!adapter->have_msi)
+-		flags |= IRQF_SHARED;
+-	err = request_irq(adapter->pdev->irq, atl1e_intr, flags,
+-			netdev->name, netdev);
++	err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED,
++			  netdev->name, netdev);
+ 	if (err) {
+ 		netdev_dbg(adapter->netdev,
+ 			   "Unable to allocate interrupt Error: %d\n", err);
+-		if (adapter->have_msi)
+-			pci_disable_msi(adapter->pdev);
+ 		return err;
+ 	}
+ 	netdev_dbg(adapter->netdev, "atl1e_request_irq OK\n");
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 15ed91f..3551ad8 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -13590,8 +13590,11 @@ static void __devinit tg3_read_vpd(struct tg3 *tp)
+ 		if (j + len > block_end)
+ 			goto partno;
+ 
+-		memcpy(tp->fw_ver, &vpd_data[j], len);
+-		strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
++		if (len >= sizeof(tp->fw_ver))
++			len = sizeof(tp->fw_ver) - 1;
++		memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
++		snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
++			 &vpd_data[j]);
+ 	}
+ 
+ partno:
+diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
+index 36499d5..a9628b6 100644
+--- a/drivers/net/ethernet/davicom/dm9000.c
++++ b/drivers/net/ethernet/davicom/dm9000.c
+@@ -257,6 +257,107 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
+ 		tmp = readl(reg);
+ }
+ 
++/*
++ * Sleep, either by using msleep() or if we are suspending, then
++ * use mdelay() to sleep.
++ */
++static void dm9000_msleep(board_info_t *db, unsigned int ms)
++{
++	if (db->in_suspend)
++		mdelay(ms);
++	else
++		msleep(ms);
++}
++
++/* Read a word from phyxcer */
++static int
++dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
++{
++	board_info_t *db = netdev_priv(dev);
++	unsigned long flags;
++	unsigned int reg_save;
++	int ret;
++
++	mutex_lock(&db->addr_lock);
++
++	spin_lock_irqsave(&db->lock, flags);
++
++	/* Save previous register address */
++	reg_save = readb(db->io_addr);
++
++	/* Fill the phyxcer register into REG_0C */
++	iow(db, DM9000_EPAR, DM9000_PHY | reg);
++
++	/* Issue phyxcer read command */
++	iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
++
++	writeb(reg_save, db->io_addr);
++	spin_unlock_irqrestore(&db->lock, flags);
++
++	dm9000_msleep(db, 1);		/* Wait read complete */
++
++	spin_lock_irqsave(&db->lock, flags);
++	reg_save = readb(db->io_addr);
++
++	iow(db, DM9000_EPCR, 0x0);	/* Clear phyxcer read command */
++
++	/* The read data keeps on REG_0D & REG_0E */
++	ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
++
++	/* restore the previous address */
++	writeb(reg_save, db->io_addr);
++	spin_unlock_irqrestore(&db->lock, flags);
++
++	mutex_unlock(&db->addr_lock);
++
++	dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
++	return ret;
++}
++
++/* Write a word to phyxcer */
++static void
++dm9000_phy_write(struct net_device *dev,
++		 int phyaddr_unused, int reg, int value)
++{
++	board_info_t *db = netdev_priv(dev);
++	unsigned long flags;
++	unsigned long reg_save;
++
++	dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
++	mutex_lock(&db->addr_lock);
++
++	spin_lock_irqsave(&db->lock, flags);
++
++	/* Save previous register address */
++	reg_save = readb(db->io_addr);
++
++	/* Fill the phyxcer register into REG_0C */
++	iow(db, DM9000_EPAR, DM9000_PHY | reg);
++
++	/* Fill the written data into REG_0D & REG_0E */
++	iow(db, DM9000_EPDRL, value);
++	iow(db, DM9000_EPDRH, value >> 8);
++
++	/* Issue phyxcer write command */
++	iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
++
++	writeb(reg_save, db->io_addr);
++	spin_unlock_irqrestore(&db->lock, flags);
++
++	dm9000_msleep(db, 1);		/* Wait write complete */
++
++	spin_lock_irqsave(&db->lock, flags);
++	reg_save = readb(db->io_addr);
++
++	iow(db, DM9000_EPCR, 0x0);	/* Clear phyxcer write command */
++
++	/* restore the previous address */
++	writeb(reg_save, db->io_addr);
++
++	spin_unlock_irqrestore(&db->lock, flags);
++	mutex_unlock(&db->addr_lock);
++}
++
+ /* dm9000_set_io
+  *
+  * select the specified set of io routines to use with the
+@@ -794,6 +895,9 @@ dm9000_init_dm9000(struct net_device *dev)
+ 
+ 	iow(db, DM9000_GPCR, GPCR_GEP_CNTL);	/* Let GPIO0 output */
+ 
++	dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
++	dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */
++
+ 	ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
+ 
+ 	/* if wol is needed, then always set NCR_WAKEEN otherwise we end
+@@ -1200,109 +1304,6 @@ dm9000_open(struct net_device *dev)
+ 	return 0;
+ }
+ 
+-/*
+- * Sleep, either by using msleep() or if we are suspending, then
+- * use mdelay() to sleep.
+- */
+-static void dm9000_msleep(board_info_t *db, unsigned int ms)
+-{
+-	if (db->in_suspend)
+-		mdelay(ms);
+-	else
+-		msleep(ms);
+-}
+-
+-/*
+- *   Read a word from phyxcer
+- */
+-static int
+-dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
+-{
+-	board_info_t *db = netdev_priv(dev);
+-	unsigned long flags;
+-	unsigned int reg_save;
+-	int ret;
+-
+-	mutex_lock(&db->addr_lock);
+-
+-	spin_lock_irqsave(&db->lock,flags);
+-
+-	/* Save previous register address */
+-	reg_save = readb(db->io_addr);
+-
+-	/* Fill the phyxcer register into REG_0C */
+-	iow(db, DM9000_EPAR, DM9000_PHY | reg);
+-
+-	iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);	/* Issue phyxcer read command */
+-
+-	writeb(reg_save, db->io_addr);
+-	spin_unlock_irqrestore(&db->lock,flags);
+-
+-	dm9000_msleep(db, 1);		/* Wait read complete */
+-
+-	spin_lock_irqsave(&db->lock,flags);
+-	reg_save = readb(db->io_addr);
+-
+-	iow(db, DM9000_EPCR, 0x0);	/* Clear phyxcer read command */
+-
+-	/* The read data keeps on REG_0D & REG_0E */
+-	ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
+-
+-	/* restore the previous address */
+-	writeb(reg_save, db->io_addr);
+-	spin_unlock_irqrestore(&db->lock,flags);
+-
+-	mutex_unlock(&db->addr_lock);
+-
+-	dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
+-	return ret;
+-}
+-
+-/*
+- *   Write a word to phyxcer
+- */
+-static void
+-dm9000_phy_write(struct net_device *dev,
+-		 int phyaddr_unused, int reg, int value)
+-{
+-	board_info_t *db = netdev_priv(dev);
+-	unsigned long flags;
+-	unsigned long reg_save;
+-
+-	dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
+-	mutex_lock(&db->addr_lock);
+-
+-	spin_lock_irqsave(&db->lock,flags);
+-
+-	/* Save previous register address */
+-	reg_save = readb(db->io_addr);
+-
+-	/* Fill the phyxcer register into REG_0C */
+-	iow(db, DM9000_EPAR, DM9000_PHY | reg);
+-
+-	/* Fill the written data into REG_0D & REG_0E */
+-	iow(db, DM9000_EPDRL, value);
+-	iow(db, DM9000_EPDRH, value >> 8);
+-
+-	iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);	/* Issue phyxcer write command */
+-
+-	writeb(reg_save, db->io_addr);
+-	spin_unlock_irqrestore(&db->lock, flags);
+-
+-	dm9000_msleep(db, 1);		/* Wait write complete */
+-
+-	spin_lock_irqsave(&db->lock,flags);
+-	reg_save = readb(db->io_addr);
+-
+-	iow(db, DM9000_EPCR, 0x0);	/* Clear phyxcer write command */
+-
+-	/* restore the previous address */
+-	writeb(reg_save, db->io_addr);
+-
+-	spin_unlock_irqrestore(&db->lock, flags);
+-	mutex_unlock(&db->addr_lock);
+-}
+-
+ static void
+ dm9000_shutdown(struct net_device *dev)
+ {
+@@ -1501,7 +1502,12 @@ dm9000_probe(struct platform_device *pdev)
+ 	db->flags |= DM9000_PLATF_SIMPLE_PHY;
+ #endif
+ 
+-	dm9000_reset(db);
++	/* Fixing bug on dm9000_probe, takeover dm9000_reset(db),
++	 * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo
++	 * while probe stage.
++	 */
++
++	iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST);
+ 
+ 	/* try multiple times, DM9000 sometimes gets the read wrong */
+ 	for (i = 0; i < 8; i++) {
+diff --git a/drivers/net/ethernet/davicom/dm9000.h b/drivers/net/ethernet/davicom/dm9000.h
+index 55688bd..9ce058a 100644
+--- a/drivers/net/ethernet/davicom/dm9000.h
++++ b/drivers/net/ethernet/davicom/dm9000.h
+@@ -69,7 +69,9 @@
+ #define NCR_WAKEEN          (1<<6)
+ #define NCR_FCOL            (1<<4)
+ #define NCR_FDX             (1<<3)
+-#define NCR_LBK             (3<<1)
++
++#define NCR_RESERVED        (3<<1)
++#define NCR_MAC_LBK         (1<<1)
+ #define NCR_RST	            (1<<0)
+ 
+ #define NSR_SPEED           (1<<7)
+@@ -167,5 +169,12 @@
+ #define ISR_LNKCHNG		(1<<5)
+ #define ISR_UNDERRUN		(1<<4)
+ 
++/* Davicom MII registers.
++ */
++
++#define MII_DM_DSPCR		0x1b    /* DSP Control Register */
++
++#define DSPCR_INIT_PARAM	0xE100	/* DSP init parameter */
++
+ #endif /* _DM9000X_H_ */
+ 
+diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
+index 2b78ddd..0dc9a35 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -1066,7 +1066,7 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
+ 		sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
+ 		sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
+ 
+-		tp = space - 2048/8;
++		tp = space - 8192/8;
+ 		sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
+ 		sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
+ 	} else {
+diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
+index 3c896ce..a0f229e 100644
+--- a/drivers/net/ethernet/marvell/sky2.h
++++ b/drivers/net/ethernet/marvell/sky2.h
+@@ -2069,7 +2069,7 @@ enum {
+ 	GM_IS_RX_FF_OR	= 1<<1,	/* Receive FIFO Overrun */
+ 	GM_IS_RX_COMPL	= 1<<0,	/* Frame Reception Complete */
+ 
+-#define GMAC_DEF_MSK     GM_IS_TX_FF_UR
++#define GMAC_DEF_MSK     (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR)
+ };
+ 
+ /*	GMAC_LINK_CTRL	16 bit	GMAC Link Control Reg (YUKON only) */
+diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
+index 5e313e9..2a417c3 100644
+--- a/drivers/net/ethernet/micrel/ks8851.c
++++ b/drivers/net/ethernet/micrel/ks8851.c
+@@ -547,7 +547,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
+ 	for (; rxfc != 0; rxfc--) {
+ 		rxh = ks8851_rdreg32(ks, KS_RXFHSR);
+ 		rxstat = rxh & 0xffff;
+-		rxlen = rxh >> 16;
++		rxlen = (rxh >> 16) & 0xfff;
+ 
+ 		netif_dbg(ks, rx_status, ks->netdev,
+ 			  "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);
+diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+index 1e38d50..8b1c2eb 100644
+--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
++++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+@@ -1740,9 +1740,9 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
+ 
+ 			skb->protocol = eth_type_trans(skb, netdev);
+ 			if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
+-				skb->ip_summed = CHECKSUM_NONE;
+-			else
+ 				skb->ip_summed = CHECKSUM_UNNECESSARY;
++			else
++				skb->ip_summed = CHECKSUM_NONE;
+ 
+ 			napi_gro_receive(&adapter->napi, skb);
+ 			(*work_done)++;
+diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
+index 6685bbb..310e353 100644
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -249,7 +249,7 @@ void cpsw_tx_handler(void *token, int len, int status)
+ 	struct cpsw_priv	*priv = netdev_priv(ndev);
+ 
+ 	if (unlikely(netif_queue_stopped(ndev)))
+-		netif_start_queue(ndev);
++		netif_wake_queue(ndev);
+ 	priv->stats.tx_packets++;
+ 	priv->stats.tx_bytes += len;
+ 	dev_kfree_skb_any(skb);
+diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
+index 08aff1a..43fada5 100644
+--- a/drivers/net/ethernet/ti/davinci_emac.c
++++ b/drivers/net/ethernet/ti/davinci_emac.c
+@@ -1052,7 +1052,7 @@ static void emac_tx_handler(void *token, int len, int status)
+ 	atomic_dec(&priv->cur_tx);
+ 
+ 	if (unlikely(netif_queue_stopped(ndev)))
+-		netif_start_queue(ndev);
++		netif_wake_queue(ndev);
+ 	ndev->stats.tx_packets++;
+ 	ndev->stats.tx_bytes += len;
+ 	dev_kfree_skb_any(skb);
+diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
+index 00103a8..5caba55 100644
+--- a/drivers/net/usb/smsc75xx.c
++++ b/drivers/net/usb/smsc75xx.c
+@@ -725,8 +725,12 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size)
+ static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
+ {
+ 	struct usbnet *dev = netdev_priv(netdev);
++	int ret;
++
++	if (new_mtu > MAX_SINGLE_PACKET_SIZE)
++		return -EINVAL;
+ 
+-	int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu);
++	ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
+ 	check_warn_return(ret, "Failed to set mac rx frame length");
+ 
+ 	return usbnet_change_mtu(netdev, new_mtu);
+@@ -979,7 +983,7 @@ static int smsc75xx_reset(struct usbnet *dev)
+ 
+ 	netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x", buf);
+ 
+-	ret = smsc75xx_set_rx_max_frame_length(dev, 1514);
++	ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
+ 	check_warn_return(ret, "Failed to set max rx frame length");
+ 
+ 	ret = smsc75xx_read_reg(dev, MAC_RX, &buf);
+@@ -1123,8 +1127,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ 			else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT))
+ 				dev->net->stats.rx_frame_errors++;
+ 		} else {
+-			/* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
+-			if (unlikely(size > (ETH_FRAME_LEN + 12))) {
++			/* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */
++			if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) {
+ 				netif_dbg(dev, rx_err, dev->net,
+ 					"size err rx_cmd_a=0x%08x", rx_cmd_a);
+ 				return 0;
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+index 9284bca..7c86415 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+@@ -938,6 +938,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
+ 					  AR_PHY_CL_TAB_1,
+ 					  AR_PHY_CL_TAB_2 };
+ 
++	/* Use chip chainmask only for calibration */
+ 	ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
+ 
+ 	if (rtt) {
+@@ -1085,6 +1086,9 @@ skip_tx_iqcal:
+ 		ar9003_hw_rtt_disable(ah);
+ 	}
+ 
++	/* Revert chainmask to runtime parameters */
++	ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
++
+ 	/* Initialize list pointers */
+ 	ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
+ 	ah->supp_cals = IQ_MISMATCH_CAL;
+diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
+index 65f831f..bb2848a 100644
+--- a/drivers/net/wireless/b43/dma.c
++++ b/drivers/net/wireless/b43/dma.c
+@@ -1487,8 +1487,12 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ 	const struct b43_dma_ops *ops;
+ 	struct b43_dmaring *ring;
+ 	struct b43_dmadesc_meta *meta;
++	static const struct b43_txstatus fake; /* filled with 0 */
++	const struct b43_txstatus *txstat;
+ 	int slot, firstused;
+ 	bool frame_succeed;
++	int skip;
++	static u8 err_out1, err_out2;
+ 
+ 	ring = parse_cookie(dev, status->cookie, &slot);
+ 	if (unlikely(!ring))
+@@ -1501,13 +1505,36 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ 	firstused = ring->current_slot - ring->used_slots + 1;
+ 	if (firstused < 0)
+ 		firstused = ring->nr_slots + firstused;
++
++	skip = 0;
+ 	if (unlikely(slot != firstused)) {
+ 		/* This possibly is a firmware bug and will result in
+-		 * malfunction, memory leaks and/or stall of DMA functionality. */
+-		b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
+-		       "Expected %d, but got %d\n",
+-		       ring->index, firstused, slot);
+-		return;
++		 * malfunction, memory leaks and/or stall of DMA functionality.
++		 */
++		if (slot == next_slot(ring, next_slot(ring, firstused))) {
++			/* If a single header/data pair was missed, skip over
++			 * the first two slots in an attempt to recover.
++			 */
++			slot = firstused;
++			skip = 2;
++			if (!err_out1) {
++				/* Report the error once. */
++				b43dbg(dev->wl,
++				       "Skip on DMA ring %d slot %d.\n",
++				       ring->index, slot);
++				err_out1 = 1;
++			}
++		} else {
++			/* More than a single header/data pair were missed.
++			 * Report this error once.
++			 */
++			if (!err_out2)
++				b43dbg(dev->wl,
++				       "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
++				       ring->index, firstused, slot);
++			err_out2 = 1;
++			return;
++		}
+ 	}
+ 
+ 	ops = ring->ops;
+@@ -1522,11 +1549,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ 			       slot, firstused, ring->index);
+ 			break;
+ 		}
++
+ 		if (meta->skb) {
+ 			struct b43_private_tx_info *priv_info =
+-				b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
++			     b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
+ 
+-			unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
++			unmap_descbuffer(ring, meta->dmaaddr,
++					 meta->skb->len, 1);
+ 			kfree(priv_info->bouncebuffer);
+ 			priv_info->bouncebuffer = NULL;
+ 		} else {
+@@ -1538,8 +1567,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ 			struct ieee80211_tx_info *info;
+ 
+ 			if (unlikely(!meta->skb)) {
+-				/* This is a scatter-gather fragment of a frame, so
+-				 * the skb pointer must not be NULL. */
++				/* This is a scatter-gather fragment of a frame,
++				 * so the skb pointer must not be NULL.
++				 */
+ 				b43dbg(dev->wl, "TX status unexpected NULL skb "
+ 				       "at slot %d (first=%d) on ring %d\n",
+ 				       slot, firstused, ring->index);
+@@ -1550,9 +1580,18 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ 
+ 			/*
+ 			 * Call back to inform the ieee80211 subsystem about
+-			 * the status of the transmission.
++			 * the status of the transmission. When skipping over
++			 * a missed TX status report, use a status structure
++			 * filled with zeros to indicate that the frame was not
++			 * sent (frame_count 0) and not acknowledged
+ 			 */
+-			frame_succeed = b43_fill_txstatus_report(dev, info, status);
++			if (unlikely(skip))
++				txstat = &fake;
++			else
++				txstat = status;
++
++			frame_succeed = b43_fill_txstatus_report(dev, info,
++								 txstat);
+ #ifdef CONFIG_B43_DEBUG
+ 			if (frame_succeed)
+ 				ring->nr_succeed_tx_packets++;
+@@ -1580,12 +1619,14 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ 		/* Everything unmapped and free'd. So it's not used anymore. */
+ 		ring->used_slots--;
+ 
+-		if (meta->is_last_fragment) {
++		if (meta->is_last_fragment && !skip) {
+ 			/* This is the last scatter-gather
+ 			 * fragment of the frame. We are done. */
+ 			break;
+ 		}
+ 		slot = next_slot(ring, slot);
++		if (skip > 0)
++			--skip;
+ 	}
+ 	if (ring->stopped) {
+ 		B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
+diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
+index 1081188..6be2f73 100644
+--- a/drivers/net/wireless/b43/phy_n.c
++++ b/drivers/net/wireless/b43/phy_n.c
+@@ -1320,7 +1320,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
+ 	u16 clip_off[2] = { 0xFFFF, 0xFFFF };
+ 
+ 	u8 vcm_final = 0;
+-	s8 offset[4];
++	s32 offset[4];
+ 	s32 results[8][4] = { };
+ 	s32 results_min[4] = { };
+ 	s32 poll_results[4] = { };
+@@ -1371,7 +1371,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
+ 		}
+ 		for (i = 0; i < 4; i++) {
+ 			s32 curr;
+-			s32 mind = 40;
++			s32 mind = 0x100000;
+ 			s32 minpoll = 249;
+ 			u8 minvcm = 0;
+ 			if (2 * core != i)
+@@ -1487,7 +1487,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
+ 	u8 regs_save_radio[2];
+ 	u16 regs_save_phy[2];
+ 
+-	s8 offset[4];
++	s32 offset[4];
+ 	u8 core;
+ 	u8 rail;
+ 
+@@ -1554,7 +1554,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
+ 	}
+ 
+ 	for (i = 0; i < 4; i++) {
+-		s32 mind = 40;
++		s32 mind = 0x100000;
+ 		u8 minvcm = 0;
+ 		s32 minpoll = 249;
+ 		s32 curr;
+diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
+index 54bb483..f9d9aed 100644
+--- a/drivers/net/wireless/mwifiex/init.c
++++ b/drivers/net/wireless/mwifiex/init.c
+@@ -584,6 +584,14 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
+ 		return ret;
+ 	}
+ 
++	/* cancel current command */
++	if (adapter->curr_cmd) {
++		dev_warn(adapter->dev, "curr_cmd is still in processing\n");
++		del_timer(&adapter->cmd_timer);
++		mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
++		adapter->curr_cmd = NULL;
++	}
++
+ 	/* shut down mwifiex */
+ 	dev_dbg(adapter->dev, "info: shutdown mwifiex...\n");
+ 
+diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
+index 6ce8484..2b6faa0 100644
+--- a/drivers/net/wireless/rtlwifi/usb.c
++++ b/drivers/net/wireless/rtlwifi/usb.c
+@@ -853,6 +853,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
+ 	if (unlikely(!_urb)) {
+ 		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ 			 "Can't allocate urb. Drop skb!\n");
++		kfree_skb(skb);
+ 		return;
+ 	}
+ 	urb_list = &rtlusb->tx_pending[ep_num];
+diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
+index c72128f..42cad5c 100644
+--- a/drivers/staging/comedi/drivers/s626.c
++++ b/drivers/staging/comedi/drivers/s626.c
+@@ -1882,7 +1882,7 @@ static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
+ 	case TRIG_NONE:
+ 		/*  continous acquisition */
+ 		devpriv->ai_continous = 1;
+-		devpriv->ai_sample_count = 0;
++		devpriv->ai_sample_count = 1;
+ 		break;
+ 	}
+ 
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 3d7e1ee..ed7cd37 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -159,7 +159,7 @@ struct atmel_uart_port {
+ };
+ 
+ static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
+-static unsigned long atmel_ports_in_use;
++static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
+ 
+ #ifdef SUPPORT_SYSRQ
+ static struct console atmel_console;
+@@ -1785,15 +1785,14 @@ static int __devinit atmel_serial_probe(struct platform_device *pdev)
+ 	if (ret < 0)
+ 		/* port id not found in platform data nor device-tree aliases:
+ 		 * auto-enumerate it */
+-		ret = find_first_zero_bit(&atmel_ports_in_use,
+-				sizeof(atmel_ports_in_use));
++		ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART);
+ 
+-	if (ret > ATMEL_MAX_UART) {
++	if (ret >= ATMEL_MAX_UART) {
+ 		ret = -ENODEV;
+ 		goto err;
+ 	}
+ 
+-	if (test_and_set_bit(ret, &atmel_ports_in_use)) {
++	if (test_and_set_bit(ret, atmel_ports_in_use)) {
+ 		/* port already in use */
+ 		ret = -EBUSY;
+ 		goto err;
+@@ -1867,7 +1866,7 @@ static int __devexit atmel_serial_remove(struct platform_device *pdev)
+ 
+ 	/* "port" is allocated statically, so we shouldn't free it */
+ 
+-	clear_bit(port->line, &atmel_ports_in_use);
++	clear_bit(port->line, atmel_ports_in_use);
+ 
+ 	clk_put(atmel_port->clk);
+ 
+diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
+index fa7268a..6abb92c 100644
+--- a/drivers/tty/vt/vc_screen.c
++++ b/drivers/tty/vt/vc_screen.c
+@@ -93,7 +93,7 @@ vcs_poll_data_free(struct vcs_poll_data *poll)
+ static struct vcs_poll_data *
+ vcs_poll_data_get(struct file *file)
+ {
+-	struct vcs_poll_data *poll = file->private_data;
++	struct vcs_poll_data *poll = file->private_data, *kill = NULL;
+ 
+ 	if (poll)
+ 		return poll;
+@@ -122,10 +122,12 @@ vcs_poll_data_get(struct file *file)
+ 		file->private_data = poll;
+ 	} else {
+ 		/* someone else raced ahead of us */
+-		vcs_poll_data_free(poll);
++		kill = poll;
+ 		poll = file->private_data;
+ 	}
+ 	spin_unlock(&file->f_lock);
++	if (kill)
++		vcs_poll_data_free(kill);
+ 
+ 	return poll;
+ }
+diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
+index e5e44f8..b0af333 100644
+--- a/drivers/usb/gadget/udc-core.c
++++ b/drivers/usb/gadget/udc-core.c
+@@ -265,7 +265,7 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
+ 		udc->driver->disconnect(udc->gadget);
+ 		usb_gadget_disconnect(udc->gadget);
+ 		udc->driver->unbind(udc->gadget);
+-		usb_gadget_udc_stop(udc->gadget, udc->driver);
++		usb_gadget_udc_stop(udc->gadget, NULL);
+ 	} else {
+ 		usb_gadget_stop(udc->gadget, udc->driver);
+ 	}
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 87ee86d..a3c9374 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2027,8 +2027,8 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 		if (event_trb != ep_ring->dequeue &&
+ 				event_trb != td->last_trb)
+ 			td->urb->actual_length =
+-				td->urb->transfer_buffer_length
+-				- TRB_LEN(le32_to_cpu(event->transfer_len));
++				td->urb->transfer_buffer_length -
++				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ 		else
+ 			td->urb->actual_length = 0;
+ 
+@@ -2060,7 +2060,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 		/* Maybe the event was for the data stage? */
+ 			td->urb->actual_length =
+ 				td->urb->transfer_buffer_length -
+-				TRB_LEN(le32_to_cpu(event->transfer_len));
++				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ 			xhci_dbg(xhci, "Waiting for status "
+ 					"stage event\n");
+ 			return 0;
+@@ -2096,7 +2096,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 	/* handle completion code */
+ 	switch (trb_comp_code) {
+ 	case COMP_SUCCESS:
+-		if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
++		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
+ 			frame->status = 0;
+ 			break;
+ 		}
+@@ -2141,7 +2141,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 				len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
+ 		}
+ 		len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
+-			TRB_LEN(le32_to_cpu(event->transfer_len));
++			EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ 
+ 		if (trb_comp_code != COMP_STOP_INVAL) {
+ 			frame->actual_length = len;
+@@ -2199,7 +2199,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 	case COMP_SUCCESS:
+ 		/* Double check that the HW transferred everything. */
+ 		if (event_trb != td->last_trb ||
+-				TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
++		    EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
+ 			xhci_warn(xhci, "WARN Successful completion "
+ 					"on short TX\n");
+ 			if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+@@ -2227,18 +2227,18 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 				"%d bytes untransferred\n",
+ 				td->urb->ep->desc.bEndpointAddress,
+ 				td->urb->transfer_buffer_length,
+-				TRB_LEN(le32_to_cpu(event->transfer_len)));
++				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
+ 	/* Fast path - was this the last TRB in the TD for this URB? */
+ 	if (event_trb == td->last_trb) {
+-		if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
++		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
+ 			td->urb->actual_length =
+ 				td->urb->transfer_buffer_length -
+-				TRB_LEN(le32_to_cpu(event->transfer_len));
++				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ 			if (td->urb->transfer_buffer_length <
+ 					td->urb->actual_length) {
+ 				xhci_warn(xhci, "HC gave bad length "
+ 						"of %d bytes left\n",
+-					  TRB_LEN(le32_to_cpu(event->transfer_len)));
++					  EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
+ 				td->urb->actual_length = 0;
+ 				if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ 					*status = -EREMOTEIO;
+@@ -2280,7 +2280,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ 		if (trb_comp_code != COMP_STOP_INVAL)
+ 			td->urb->actual_length +=
+ 				TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
+-				TRB_LEN(le32_to_cpu(event->transfer_len));
++				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ 	}
+ 
+ 	return finish_td(xhci, td, event_trb, event, ep, status, false);
+@@ -2366,7 +2366,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 	 * transfer type
+ 	 */
+ 	case COMP_SUCCESS:
+-		if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
++		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
+ 			break;
+ 		if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
+ 			trb_comp_code = COMP_SHORT_TX;
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 81d839f..15aaf58 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -968,6 +968,10 @@ struct xhci_transfer_event {
+ 	__le32	flags;
+ };
+ 
++/* Transfer event TRB length bit mask */
++/* bits 0:23 */
++#define	EVENT_TRB_LEN(p)		((p) & 0xffffff)
++
+ /** Transfer Event bit fields **/
+ #define	TRB_TO_EP_ID(p)	(((p) >> 16) & 0x1f)
+ 
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 87ef150..07a4fb0 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -648,6 +648,7 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
+ 	{ USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) },
+ 	{ USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
++	{ USB_DEVICE(MITSUBISHI_VID, MITSUBISHI_FXUSB_PID) },
+ 	{ USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
+ 	{ USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
+ 	{ USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 9d359e1..e79861e 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -584,6 +584,13 @@
+ #define CONTEC_COM1USBH_PID	0x8311	/* COM-1(USB)H */
+ 
+ /*
++ * Mitsubishi Electric Corp. (http://www.meau.com)
++ * Submitted by Konstantin Holoborodko
++ */
++#define MITSUBISHI_VID		0x06D3
++#define MITSUBISHI_FXUSB_PID	0x0284 /* USB/RS422 converters: FX-USB-AW/-BD */
++
++/*
+  * Definitions for B&B Electronics products.
+  */
+ #define BANDB_VID		0x0856	/* B&B Electronics Vendor ID */
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index a6e1e06..2f3879c 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -604,6 +604,7 @@ struct block_device *bdgrab(struct block_device *bdev)
+ 	ihold(bdev->bd_inode);
+ 	return bdev;
+ }
++EXPORT_SYMBOL(bdgrab);
+ 
+ long nr_blockdev_pages(void)
+ {
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 49fd7b6..fef1f21 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -4217,7 +4217,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
+ 	spin_lock(&sinfo->lock);
+ 	spin_lock(&block_rsv->lock);
+ 
+-	block_rsv->size = num_bytes;
++	block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
+ 
+ 	num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
+ 		    sinfo->bytes_reserved + sinfo->bytes_readonly +
+@@ -4486,14 +4486,49 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
+ 		 * If the inodes csum_bytes is the same as the original
+ 		 * csum_bytes then we know we haven't raced with any free()ers
+ 		 * so we can just reduce our inodes csum bytes and carry on.
+-		 * Otherwise we have to do the normal free thing to account for
+-		 * the case that the free side didn't free up its reserve
+-		 * because of this outstanding reservation.
+ 		 */
+-		if (BTRFS_I(inode)->csum_bytes == csum_bytes)
++		if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
+ 			calc_csum_metadata_size(inode, num_bytes, 0);
+-		else
+-			to_free = calc_csum_metadata_size(inode, num_bytes, 0);
++		} else {
++			u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
++			u64 bytes;
++
++			/*
++			 * This is tricky, but first we need to figure out how much we
++			 * free'd from any free-ers that occured during this
++			 * reservation, so we reset ->csum_bytes to the csum_bytes
++			 * before we dropped our lock, and then call the free for the
++			 * number of bytes that were freed while we were trying our
++			 * reservation.
++			 */
++			bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
++			BTRFS_I(inode)->csum_bytes = csum_bytes;
++			to_free = calc_csum_metadata_size(inode, bytes, 0);
++
++
++			/*
++			 * Now we need to see how much we would have freed had we not
++			 * been making this reservation and our ->csum_bytes were not
++			 * artificially inflated.
++			 */
++			BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
++			bytes = csum_bytes - orig_csum_bytes;
++			bytes = calc_csum_metadata_size(inode, bytes, 0);
++
++			/*
++			 * Now reset ->csum_bytes to what it should be.  If bytes is
++			 * more than to_free then we would have free'd more space had we
++			 * not had an artificially high ->csum_bytes, so we need to free
++			 * the remainder.  If bytes is the same or less then we don't
++			 * need to do anything, the other free-ers did the correct
++			 * thing.
++			 */
++			BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
++			if (bytes > to_free)
++				to_free = bytes - to_free;
++			else
++				to_free = 0;
++		}
+ 		spin_unlock(&BTRFS_I(inode)->lock);
+ 		if (dropped)
+ 			to_free += btrfs_calc_trans_metadata_size(root, dropped);
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index c9018a0..d64fda5 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -1238,6 +1238,39 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
+ 				GFP_NOFS);
+ }
+ 
++int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
++{
++	unsigned long index = start >> PAGE_CACHE_SHIFT;
++	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
++	struct page *page;
++
++	while (index <= end_index) {
++		page = find_get_page(inode->i_mapping, index);
++		BUG_ON(!page); /* Pages should be in the extent_io_tree */
++		clear_page_dirty_for_io(page);
++		page_cache_release(page);
++		index++;
++	}
++	return 0;
++}
++
++int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
++{
++	unsigned long index = start >> PAGE_CACHE_SHIFT;
++	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
++	struct page *page;
++
++	while (index <= end_index) {
++		page = find_get_page(inode->i_mapping, index);
++		BUG_ON(!page); /* Pages should be in the extent_io_tree */
++		account_page_redirty(page);
++		__set_page_dirty_nobuffers(page);
++		page_cache_release(page);
++		index++;
++	}
++	return 0;
++}
++
+ /*
+  * helper function to set both pages and extents in the tree writeback
+  */
+diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
+index b516c3b..2edf912 100644
+--- a/fs/btrfs/extent_io.h
++++ b/fs/btrfs/extent_io.h
+@@ -312,6 +312,8 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
+ 		      unsigned long *map_len);
+ int extent_range_uptodate(struct extent_io_tree *tree,
+ 			  u64 start, u64 end);
++int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
++int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
+ int extent_clear_unlock_delalloc(struct inode *inode,
+ 				struct extent_io_tree *tree,
+ 				u64 start, u64 end, struct page *locked_page,
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 0df0d1f..9e51325 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -349,6 +349,7 @@ static noinline int compress_file_range(struct inode *inode,
+ 	int i;
+ 	int will_compress;
+ 	int compress_type = root->fs_info->compress_type;
++	int redirty = 0;
+ 
+ 	/* if this is a small write inside eof, kick off a defrag */
+ 	if ((end - start + 1) < 16 * 1024 &&
+@@ -411,6 +412,17 @@ again:
+ 		if (BTRFS_I(inode)->force_compress)
+ 			compress_type = BTRFS_I(inode)->force_compress;
+ 
++		/*
++		 * we need to call clear_page_dirty_for_io on each
++		 * page in the range.  Otherwise applications with the file
++		 * mmap'd can wander in and change the page contents while
++		 * we are compressing them.
++		 *
++		 * If the compression fails for any reason, we set the pages
++		 * dirty again later on.
++		 */
++		extent_range_clear_dirty_for_io(inode, start, end);
++		redirty = 1;
+ 		ret = btrfs_compress_pages(compress_type,
+ 					   inode->i_mapping, start,
+ 					   total_compressed, pages,
+@@ -552,6 +564,8 @@ cleanup_and_bail_uncompressed:
+ 			__set_page_dirty_nobuffers(locked_page);
+ 			/* unlocked later on in the async handlers */
+ 		}
++		if (redirty)
++			extent_range_redirty_for_io(inode, start, end);
+ 		add_async_extent(async_cow, start, end - start + 1,
+ 				 0, NULL, 0, BTRFS_COMPRESS_NONE);
+ 		*num_added += 1;
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index 2f3d6f9..682e5da 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -383,7 +383,6 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
+ 	eb = path->nodes[0];
+ 	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
+ 	item_size = btrfs_item_size_nr(eb, path->slots[0]);
+-	btrfs_release_path(path);
+ 
+ 	if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+ 		do {
+@@ -398,7 +397,9 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
+ 				ret < 0 ? -1 : ref_level,
+ 				ret < 0 ? -1 : ref_root);
+ 		} while (ret != 1);
++		btrfs_release_path(path);
+ 	} else {
++		btrfs_release_path(path);
+ 		swarn.path = path;
+ 		iterate_extent_inodes(fs_info, found_key.objectid,
+ 					extent_item_pos, 1,
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index 3e1018a..d33733e 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -600,7 +600,7 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
+ 	brelse(bitmap_bh);
+ 	printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
+ 	       ", computed = %llu, %llu\n",
+-	       EXT4_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
++	       EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
+ 	       desc_count, bitmap_count);
+ 	return bitmap_count;
+ #else
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 47d1c8c..d918b55 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -316,9 +316,9 @@ struct ext4_group_desc
+  */
+ 
+ struct flex_groups {
+-	atomic_t free_inodes;
+-	atomic_t free_clusters;
+-	atomic_t used_dirs;
++	atomic64_t	free_clusters;
++	atomic_t	free_inodes;
++	atomic_t	used_dirs;
+ };
+ 
+ #define EXT4_BG_INODE_UNINIT	0x0001 /* Inode table/bitmap not in use */
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 902544e..e42b468 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -305,8 +305,8 @@ error_return:
+ }
+ 
+ struct orlov_stats {
++	__u64 free_clusters;
+ 	__u32 free_inodes;
+-	__u32 free_clusters;
+ 	__u32 used_dirs;
+ };
+ 
+@@ -323,7 +323,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
+ 
+ 	if (flex_size > 1) {
+ 		stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
+-		stats->free_clusters = atomic_read(&flex_group[g].free_clusters);
++		stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
+ 		stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
+ 		return;
+ 	}
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 3122ece..6c32dd8 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2813,8 +2813,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
+ 	if (sbi->s_log_groups_per_flex) {
+ 		ext4_group_t flex_group = ext4_flex_group(sbi,
+ 							  ac->ac_b_ex.fe_group);
+-		atomic_sub(ac->ac_b_ex.fe_len,
+-			   &sbi->s_flex_groups[flex_group].free_clusters);
++		atomic64_sub(ac->ac_b_ex.fe_len,
++			     &sbi->s_flex_groups[flex_group].free_clusters);
+ 	}
+ 
+ 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
+@@ -3433,7 +3433,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ 			win = offs;
+ 
+ 		ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
+-			EXT4_B2C(sbi, win);
++			EXT4_NUM_B2C(sbi, win);
+ 		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
+ 		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
+ 	}
+@@ -4577,7 +4577,7 @@ do_more:
+ 			EXT4_BLOCKS_PER_GROUP(sb);
+ 		count -= overflow;
+ 	}
+-	count_clusters = EXT4_B2C(sbi, count);
++	count_clusters = EXT4_NUM_B2C(sbi, count);
+ 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
+ 	if (!bitmap_bh) {
+ 		err = -EIO;
+@@ -4667,8 +4667,8 @@ do_more:
+ 
+ 	if (sbi->s_log_groups_per_flex) {
+ 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
+-		atomic_add(count_clusters,
+-			   &sbi->s_flex_groups[flex_group].free_clusters);
++		atomic64_add(count_clusters,
++			     &sbi->s_flex_groups[flex_group].free_clusters);
+ 	}
+ 
+ 	ext4_mb_unload_buddy(&e4b);
+@@ -4808,12 +4808,12 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
+ 	desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
+ 	ext4_unlock_group(sb, block_group);
+ 	percpu_counter_add(&sbi->s_freeclusters_counter,
+-			   EXT4_B2C(sbi, blocks_freed));
++			   EXT4_NUM_B2C(sbi, blocks_freed));
+ 
+ 	if (sbi->s_log_groups_per_flex) {
+ 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
+-		atomic_add(EXT4_B2C(sbi, blocks_freed),
+-			   &sbi->s_flex_groups[flex_group].free_clusters);
++		atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed),
++			     &sbi->s_flex_groups[flex_group].free_clusters);
+ 	}
+ 
+ 	ext4_mb_unload_buddy(&e4b);
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 231cacb..3fc0e8b 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -1112,7 +1112,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
+ 		ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
+ 		ext4_inode_table_set(sb, gdp, group_data->inode_table);
+ 		ext4_free_group_clusters_set(sb, gdp,
+-					     EXT4_B2C(sbi, group_data->free_blocks_count));
++			EXT4_NUM_B2C(sbi, group_data->free_blocks_count));
+ 		ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
+ 		gdp->bg_flags = cpu_to_le16(*bg_flags);
+ 		gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
+@@ -1210,7 +1210,7 @@ static void ext4_update_super(struct super_block *sb,
+ 
+ 	/* Update the free space counts */
+ 	percpu_counter_add(&sbi->s_freeclusters_counter,
+-			   EXT4_B2C(sbi, free_blocks));
++			   EXT4_NUM_B2C(sbi, free_blocks));
+ 	percpu_counter_add(&sbi->s_freeinodes_counter,
+ 			   EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
+ 
+@@ -1219,8 +1219,8 @@ static void ext4_update_super(struct super_block *sb,
+ 	    sbi->s_log_groups_per_flex) {
+ 		ext4_group_t flex_group;
+ 		flex_group = ext4_flex_group(sbi, group_data[0].group);
+-		atomic_add(EXT4_B2C(sbi, free_blocks),
+-			   &sbi->s_flex_groups[flex_group].free_clusters);
++		atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
++			     &sbi->s_flex_groups[flex_group].free_clusters);
+ 		atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
+ 			   &sbi->s_flex_groups[flex_group].free_inodes);
+ 	}
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 288f4c6..88bb68d 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1907,8 +1907,8 @@ static int ext4_fill_flex_info(struct super_block *sb)
+ 		flex_group = ext4_flex_group(sbi, i);
+ 		atomic_add(ext4_free_inodes_count(sb, gdp),
+ 			   &sbi->s_flex_groups[flex_group].free_inodes);
+-		atomic_add(ext4_free_group_clusters(sb, gdp),
+-			   &sbi->s_flex_groups[flex_group].free_clusters);
++		atomic64_add(ext4_free_group_clusters(sb, gdp),
++			     &sbi->s_flex_groups[flex_group].free_clusters);
+ 		atomic_add(ext4_used_dirs_count(sb, gdp),
+ 			   &sbi->s_flex_groups[flex_group].used_dirs);
+ 	}
+diff --git a/fs/nfs/blocklayout/blocklayoutdm.c b/fs/nfs/blocklayout/blocklayoutdm.c
+index 737d839..6fc7b5c 100644
+--- a/fs/nfs/blocklayout/blocklayoutdm.c
++++ b/fs/nfs/blocklayout/blocklayoutdm.c
+@@ -55,7 +55,8 @@ static void dev_remove(struct net *net, dev_t dev)
+ 
+ 	bl_pipe_msg.bl_wq = &nn->bl_wq;
+ 	memset(msg, 0, sizeof(*msg));
+-	msg->data = kzalloc(1 + sizeof(bl_umount_request), GFP_NOFS);
++	msg->len = sizeof(bl_msg) + bl_msg.totallen;
++	msg->data = kzalloc(msg->len, GFP_NOFS);
+ 	if (!msg->data)
+ 		goto out;
+ 
+@@ -66,7 +67,6 @@ static void dev_remove(struct net *net, dev_t dev)
+ 	memcpy(msg->data, &bl_msg, sizeof(bl_msg));
+ 	dataptr = (uint8_t *) msg->data;
+ 	memcpy(&dataptr[sizeof(bl_msg)], &bl_umount_request, sizeof(bl_umount_request));
+-	msg->len = sizeof(bl_msg) + bl_msg.totallen;
+ 
+ 	add_wait_queue(&nn->bl_wq, &wq);
+ 	if (rpc_queue_upcall(nn->bl_device_pipe, msg) < 0) {
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 967d68e..ace6745 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -263,7 +263,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
+ 		iattr->ia_valid |= ATTR_SIZE;
+ 	}
+ 	if (bmval[0] & FATTR4_WORD0_ACL) {
+-		int nace;
++		u32 nace;
+ 		struct nfs4_ace *ace;
+ 
+ 		READ_BUF(4); len += 4;
+diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
+index a545d81..45024ef 100644
+--- a/fs/sysfs/dir.c
++++ b/fs/sysfs/dir.c
+@@ -1002,6 +1002,8 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+ 		ino = parent_sd->s_ino;
+ 		if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) == 0)
+ 			filp->f_pos++;
++		else
++			return 0;
+ 	}
+ 	if (filp->f_pos == 1) {
+ 		if (parent_sd->s_parent)
+@@ -1010,6 +1012,8 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+ 			ino = parent_sd->s_ino;
+ 		if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) == 0)
+ 			filp->f_pos++;
++		else
++			return 0;
+ 	}
+ 	mutex_lock(&sysfs_mutex);
+ 	for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos);
+@@ -1040,10 +1044,21 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+ 	return 0;
+ }
+ 
++static loff_t sysfs_dir_llseek(struct file *file, loff_t offset, int whence)
++{
++	struct inode *inode = file->f_path.dentry->d_inode;
++	loff_t ret;
++
++	mutex_lock(&inode->i_mutex);
++	ret = generic_file_llseek(file, offset, whence);
++	mutex_unlock(&inode->i_mutex);
++
++	return ret;
++}
+ 
+ const struct file_operations sysfs_dir_operations = {
+ 	.read		= generic_read_dir,
+ 	.readdir	= sysfs_readdir,
+ 	.release	= sysfs_dir_release,
+-	.llseek		= generic_file_llseek,
++	.llseek		= sysfs_dir_llseek,
+ };
+diff --git a/include/asm-generic/signal.h b/include/asm-generic/signal.h
+index 555c0ae..743f7a5 100644
+--- a/include/asm-generic/signal.h
++++ b/include/asm-generic/signal.h
+@@ -99,6 +99,10 @@ typedef unsigned long old_sigset_t;
+ 
+ #include <asm-generic/signal-defs.h>
+ 
++#ifdef SA_RESTORER
++#define __ARCH_HAS_SA_RESTORER
++#endif
++
+ struct sigaction {
+ 	__sighandler_t sa_handler;
+ 	unsigned long sa_flags;
+diff --git a/include/linux/thermal.h b/include/linux/thermal.h
+index 796f1ff..1662047 100644
+--- a/include/linux/thermal.h
++++ b/include/linux/thermal.h
+@@ -108,7 +108,7 @@ struct thermal_zone_device {
+ /* Adding event notification support elements */
+ #define THERMAL_GENL_FAMILY_NAME                "thermal_event"
+ #define THERMAL_GENL_VERSION                    0x01
+-#define THERMAL_GENL_MCAST_GROUP_NAME           "thermal_mc_group"
++#define THERMAL_GENL_MCAST_GROUP_NAME           "thermal_mc_grp"
+ 
+ enum events {
+ 	THERMAL_AUX0,
+diff --git a/include/linux/unix_diag.h b/include/linux/unix_diag.h
+index b1d2bf1..0ff4d32 100644
+--- a/include/linux/unix_diag.h
++++ b/include/linux/unix_diag.h
+@@ -38,9 +38,11 @@ enum {
+ 	UNIX_DIAG_RQLEN,
+ 	UNIX_DIAG_MEMINFO,
+ 
+-	UNIX_DIAG_MAX,
++	__UNIX_DIAG_MAX,
+ };
+ 
++#define UNIX_DIAG_MAX (__UNIX_DIAG_MAX - 1)
++
+ struct unix_diag_vfs {
+ 	__u32	udiag_vfs_ino;
+ 	__u32	udiag_vfs_dev;
+diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
+index ee338bf..2af8fdb 100644
+--- a/include/xen/interface/io/blkif.h
++++ b/include/xen/interface/io/blkif.h
+@@ -138,11 +138,21 @@ struct blkif_request_discard {
+ 	uint8_t        _pad3;
+ } __attribute__((__packed__));
+ 
++struct blkif_request_other {
++	uint8_t      _pad1;
++	blkif_vdev_t _pad2;        /* only for read/write requests         */
++#ifdef CONFIG_X86_64
++	uint32_t     _pad3;        /* offsetof(blkif_req..,u.other.id)==8*/
++#endif
++	uint64_t     id;           /* private guest value, echoed in resp  */
++} __attribute__((__packed__));
++
+ struct blkif_request {
+ 	uint8_t        operation;    /* BLKIF_OP_???                         */
+ 	union {
+ 		struct blkif_request_rw rw;
+ 		struct blkif_request_discard discard;
++		struct blkif_request_other other;
+ 	} u;
+ } __attribute__((__packed__));
+ 
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 9f70f45..32b10d4 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -482,7 +482,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
+ 		if (force_default || ka->sa.sa_handler != SIG_IGN)
+ 			ka->sa.sa_handler = SIG_DFL;
+ 		ka->sa.sa_flags = 0;
+-#ifdef SA_RESTORER
++#ifdef __ARCH_HAS_SA_RESTORER
+ 		ka->sa.sa_restorer = NULL;
+ #endif
+ 		sigemptyset(&ka->sa.sa_mask);
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 681a759..8beda39 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2743,11 +2743,25 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
+ 	return -EINVAL;
+ }
+ 
+-static void set_tracer_flags(unsigned int mask, int enabled)
++/* Some tracers require overwrite to stay enabled */
++int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
++{
++	if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
++		return -1;
++
++	return 0;
++}
++
++int set_tracer_flag(unsigned int mask, int enabled)
+ {
+ 	/* do nothing if flag is already set */
+ 	if (!!(trace_flags & mask) == !!enabled)
+-		return;
++		return 0;
++
++	/* Give the tracer a chance to approve the change */
++	if (current_trace->flag_changed)
++		if (current_trace->flag_changed(current_trace, mask, !!enabled))
++			return -EINVAL;
+ 
+ 	if (enabled)
+ 		trace_flags |= mask;
+@@ -2759,6 +2773,8 @@ static void set_tracer_flags(unsigned int mask, int enabled)
+ 
+ 	if (mask == TRACE_ITER_OVERWRITE)
+ 		ring_buffer_change_overwrite(global_trace.buffer, enabled);
++
++	return 0;
+ }
+ 
+ static ssize_t
+@@ -2768,7 +2784,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
+ 	char buf[64];
+ 	char *cmp;
+ 	int neg = 0;
+-	int ret;
++	int ret = -ENODEV;
+ 	int i;
+ 
+ 	if (cnt >= sizeof(buf))
+@@ -2785,21 +2801,23 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
+ 		cmp += 2;
+ 	}
+ 
++	mutex_lock(&trace_types_lock);
++
+ 	for (i = 0; trace_options[i]; i++) {
+ 		if (strcmp(cmp, trace_options[i]) == 0) {
+-			set_tracer_flags(1 << i, !neg);
++			ret = set_tracer_flag(1 << i, !neg);
+ 			break;
+ 		}
+ 	}
+ 
+ 	/* If no option could be set, test the specific tracer options */
+-	if (!trace_options[i]) {
+-		mutex_lock(&trace_types_lock);
++	if (!trace_options[i])
+ 		ret = set_tracer_option(current_trace, cmp, neg);
+-		mutex_unlock(&trace_types_lock);
+-		if (ret)
+-			return ret;
+-	}
++
++	mutex_unlock(&trace_types_lock);
++
++	if (ret < 0)
++		return ret;
+ 
+ 	*ppos += cnt;
+ 
+@@ -3123,6 +3141,9 @@ static int tracing_set_tracer(const char *buf)
+ 		goto out;
+ 
+ 	trace_branch_disable();
++
++	current_trace->enabled = false;
++
+ 	if (current_trace && current_trace->reset)
+ 		current_trace->reset(tr);
+ 	if (current_trace && current_trace->use_max_tr) {
+@@ -3152,6 +3173,7 @@ static int tracing_set_tracer(const char *buf)
+ 			goto out;
+ 	}
+ 
++	current_trace->enabled = true;
+ 	trace_branch_enable(tr);
+  out:
+ 	mutex_unlock(&trace_types_lock);
+@@ -4486,7 +4508,13 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ 
+ 	if (val != 0 && val != 1)
+ 		return -EINVAL;
+-	set_tracer_flags(1 << index, val);
++
++	mutex_lock(&trace_types_lock);
++	ret = set_tracer_flag(1 << index, val);
++	mutex_unlock(&trace_types_lock);
++
++	if (ret < 0)
++		return ret;
+ 
+ 	*ppos += cnt;
+ 
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index f95d65d..7360674 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -278,10 +278,14 @@ struct tracer {
+ 	enum print_line_t	(*print_line)(struct trace_iterator *iter);
+ 	/* If you handled the flag setting, return 0 */
+ 	int			(*set_flag)(u32 old_flags, u32 bit, int set);
++	/* Return 0 if OK with change, else return non-zero */
++	int			(*flag_changed)(struct tracer *tracer,
++						u32 mask, int set);
+ 	struct tracer		*next;
+ 	struct tracer_flags	*flags;
+ 	int			print_max;
+ 	int			use_max_tr;
++	bool			enabled;
+ };
+ 
+ 
+@@ -826,6 +830,9 @@ extern struct list_head ftrace_events;
+ extern const char *__start___trace_bprintk_fmt[];
+ extern const char *__stop___trace_bprintk_fmt[];
+ 
++int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
++int set_tracer_flag(unsigned int mask, int enabled);
++
+ #undef FTRACE_ENTRY
+ #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter)	\
+ 	extern struct ftrace_event_call					\
+diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
+index 99d20e9..8dd139a 100644
+--- a/kernel/trace/trace_irqsoff.c
++++ b/kernel/trace/trace_irqsoff.c
+@@ -32,7 +32,7 @@ enum {
+ 
+ static int trace_type __read_mostly;
+ 
+-static int save_lat_flag;
++static int save_flags;
+ 
+ static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
+ static int start_irqsoff_tracer(struct trace_array *tr, int graph);
+@@ -557,8 +557,11 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
+ 
+ static void __irqsoff_tracer_init(struct trace_array *tr)
+ {
+-	save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
+-	trace_flags |= TRACE_ITER_LATENCY_FMT;
++	save_flags = trace_flags;
++
++	/* non overwrite screws up the latency tracers */
++	set_tracer_flag(TRACE_ITER_OVERWRITE, 1);
++	set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1);
+ 
+ 	tracing_max_latency = 0;
+ 	irqsoff_trace = tr;
+@@ -572,10 +575,13 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
+ 
+ static void irqsoff_tracer_reset(struct trace_array *tr)
+ {
++	int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
++	int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
++
+ 	stop_irqsoff_tracer(tr, is_graph());
+ 
+-	if (!save_lat_flag)
+-		trace_flags &= ~TRACE_ITER_LATENCY_FMT;
++	set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag);
++	set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag);
+ }
+ 
+ static void irqsoff_tracer_start(struct trace_array *tr)
+@@ -608,6 +614,7 @@ static struct tracer irqsoff_tracer __read_mostly =
+ 	.print_line     = irqsoff_print_line,
+ 	.flags		= &tracer_flags,
+ 	.set_flag	= irqsoff_set_flag,
++	.flag_changed	= trace_keep_overwrite,
+ #ifdef CONFIG_FTRACE_SELFTEST
+ 	.selftest    = trace_selftest_startup_irqsoff,
+ #endif
+@@ -641,6 +648,7 @@ static struct tracer preemptoff_tracer __read_mostly =
+ 	.print_line     = irqsoff_print_line,
+ 	.flags		= &tracer_flags,
+ 	.set_flag	= irqsoff_set_flag,
++	.flag_changed	= trace_keep_overwrite,
+ #ifdef CONFIG_FTRACE_SELFTEST
+ 	.selftest    = trace_selftest_startup_preemptoff,
+ #endif
+@@ -676,6 +684,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
+ 	.print_line     = irqsoff_print_line,
+ 	.flags		= &tracer_flags,
+ 	.set_flag	= irqsoff_set_flag,
++	.flag_changed	= trace_keep_overwrite,
+ #ifdef CONFIG_FTRACE_SELFTEST
+ 	.selftest    = trace_selftest_startup_preemptirqsoff,
+ #endif
+diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
+index ff791ea..9eadedc 100644
+--- a/kernel/trace/trace_sched_wakeup.c
++++ b/kernel/trace/trace_sched_wakeup.c
+@@ -36,7 +36,7 @@ static void __wakeup_reset(struct trace_array *tr);
+ static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
+ static void wakeup_graph_return(struct ftrace_graph_ret *trace);
+ 
+-static int save_lat_flag;
++static int save_flags;
+ 
+ #define TRACE_DISPLAY_GRAPH     1
+ 
+@@ -539,8 +539,11 @@ static void stop_wakeup_tracer(struct trace_array *tr)
+ 
+ static int __wakeup_tracer_init(struct trace_array *tr)
+ {
+-	save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
+-	trace_flags |= TRACE_ITER_LATENCY_FMT;
++	save_flags = trace_flags;
++
++	/* non overwrite screws up the latency tracers */
++	set_tracer_flag(TRACE_ITER_OVERWRITE, 1);
++	set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1);
+ 
+ 	tracing_max_latency = 0;
+ 	wakeup_trace = tr;
+@@ -562,12 +565,15 @@ static int wakeup_rt_tracer_init(struct trace_array *tr)
+ 
+ static void wakeup_tracer_reset(struct trace_array *tr)
+ {
++	int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
++	int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
++
+ 	stop_wakeup_tracer(tr);
+ 	/* make sure we put back any tasks we are tracing */
+ 	wakeup_reset(tr);
+ 
+-	if (!save_lat_flag)
+-		trace_flags &= ~TRACE_ITER_LATENCY_FMT;
++	set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag);
++	set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag);
+ }
+ 
+ static void wakeup_tracer_start(struct trace_array *tr)
+@@ -593,6 +599,7 @@ static struct tracer wakeup_tracer __read_mostly =
+ 	.print_line	= wakeup_print_line,
+ 	.flags		= &tracer_flags,
+ 	.set_flag	= wakeup_set_flag,
++	.flag_changed	= trace_keep_overwrite,
+ #ifdef CONFIG_FTRACE_SELFTEST
+ 	.selftest    = trace_selftest_startup_wakeup,
+ #endif
+@@ -614,6 +621,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
+ 	.print_line	= wakeup_print_line,
+ 	.flags		= &tracer_flags,
+ 	.set_flag	= wakeup_set_flag,
++	.flag_changed	= trace_keep_overwrite,
+ #ifdef CONFIG_FTRACE_SELFTEST
+ 	.selftest    = trace_selftest_startup_wakeup,
+ #endif
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+index cf4a49c..e22b8ad 100644
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -86,13 +86,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
+ 
+ 	grp = &vlan_info->grp;
+ 
+-	/* Take it out of our own structures, but be sure to interlock with
+-	 * HW accelerating devices or SW vlan input packet processing if
+-	 * VLAN is not 0 (leave it there for 802.1p).
+-	 */
+-	if (vlan_id)
+-		vlan_vid_del(real_dev, vlan_id);
+-
+ 	grp->nr_vlan_devs--;
+ 
+ 	if (vlan->flags & VLAN_FLAG_GVRP)
+@@ -108,6 +101,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
+ 	if (grp->nr_vlan_devs == 0)
+ 		vlan_gvrp_uninit_applicant(real_dev);
+ 
++	/* Take it out of our own structures, but be sure to interlock with
++	 * HW accelerating devices or SW vlan input packet processing if
++	 * VLAN is not 0 (leave it there for 802.1p).
++	 */
++	if (vlan_id)
++		vlan_vid_del(real_dev, vlan_id);
++
+ 	/* Get rid of the vlan's reference to real_dev */
+ 	dev_put(real_dev);
+ }
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index f6ab129..70c9ef2 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -378,6 +378,7 @@ static void __sco_sock_close(struct sock *sk)
+ 			sco_chan_del(sk, ECONNRESET);
+ 		break;
+ 
++	case BT_CONNECT2:
+ 	case BT_CONNECT:
+ 	case BT_DISCONN:
+ 		sco_chan_del(sk, ECONNRESET);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index dfa2f49..9e2e29b 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1482,7 +1482,6 @@ void net_enable_timestamp(void)
+ 		return;
+ 	}
+ #endif
+-	WARN_ON(in_interrupt());
+ 	static_key_slow_inc(&netstamp_needed);
+ }
+ EXPORT_SYMBOL(net_enable_timestamp);
+@@ -3125,6 +3124,7 @@ int netdev_rx_handler_register(struct net_device *dev,
+ 	if (dev->rx_handler)
+ 		return -EBUSY;
+ 
++	/* Note: rx_handler_data must be set before rx_handler */
+ 	rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
+ 	rcu_assign_pointer(dev->rx_handler, rx_handler);
+ 
+@@ -3145,6 +3145,11 @@ void netdev_rx_handler_unregister(struct net_device *dev)
+ 
+ 	ASSERT_RTNL();
+ 	RCU_INIT_POINTER(dev->rx_handler, NULL);
++	/* a reader seeing a non NULL rx_handler in a rcu_read_lock()
++	 * section has a guarantee to see a non NULL rx_handler_data
++	 * as well.
++	 */
++	synchronize_net();
+ 	RCU_INIT_POINTER(dev->rx_handler_data, NULL);
+ }
+ EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index fc2fc72..0a1f159 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2260,11 +2260,8 @@ void tcp_enter_loss(struct sock *sk, int how)
+ 	if (tcp_is_reno(tp))
+ 		tcp_reset_reno_sack(tp);
+ 
+-	if (!how) {
+-		/* Push undo marker, if it was plain RTO and nothing
+-		 * was retransmitted. */
+-		tp->undo_marker = tp->snd_una;
+-	} else {
++	tp->undo_marker = tp->snd_una;
++	if (how) {
+ 		tp->sacked_out = 0;
+ 		tp->fackets_out = 0;
+ 	}
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 952f7dd..4dca494 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1587,8 +1587,11 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
+ 			goto send_now;
+ 	}
+ 
+-	/* Ok, it looks like it is advisable to defer.  */
+-	tp->tso_deferred = 1 | (jiffies << 1);
++	/* Ok, it looks like it is advisable to defer.
++	 * Do not rearm the timer if already set to not break TCP ACK clocking.
++	 */
++	if (!tp->tso_deferred)
++		tp->tso_deferred = 1 | (jiffies << 1);
+ 
+ 	return 1;
+ 
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 81e0ad2..541a719 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -4686,26 +4686,20 @@ static void addrconf_sysctl_unregister(struct inet6_dev *idev)
+ 
+ static int __net_init addrconf_init_net(struct net *net)
+ {
+-	int err;
++	int err = -ENOMEM;
+ 	struct ipv6_devconf *all, *dflt;
+ 
+-	err = -ENOMEM;
+-	all = &ipv6_devconf;
+-	dflt = &ipv6_devconf_dflt;
++	all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
++	if (all == NULL)
++		goto err_alloc_all;
+ 
+-	if (!net_eq(net, &init_net)) {
+-		all = kmemdup(all, sizeof(ipv6_devconf), GFP_KERNEL);
+-		if (all == NULL)
+-			goto err_alloc_all;
++	dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
++	if (dflt == NULL)
++		goto err_alloc_dflt;
+ 
+-		dflt = kmemdup(dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
+-		if (dflt == NULL)
+-			goto err_alloc_dflt;
+-	} else {
+-		/* these will be inherited by all namespaces */
+-		dflt->autoconf = ipv6_defaults.autoconf;
+-		dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
+-	}
++	/* these will be inherited by all namespaces */
++	dflt->autoconf = ipv6_defaults.autoconf;
++	dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
+ 
+ 	net->ipv6.devconf_all = all;
+ 	net->ipv6.devconf_dflt = dflt;
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index d813575..37aceed 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -111,6 +111,27 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
+ 	    ipv6_addr_loopback(&hdr->daddr))
+ 		goto err;
+ 
++	/* RFC4291 Errata ID: 3480
++	 * Interface-Local scope spans only a single interface on a
++	 * node and is useful only for loopback transmission of
++	 * multicast.  Packets with interface-local scope received
++	 * from another node must be discarded.
++	 */
++	if (!(skb->pkt_type == PACKET_LOOPBACK ||
++	      dev->flags & IFF_LOOPBACK) &&
++	    ipv6_addr_is_multicast(&hdr->daddr) &&
++	    IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1)
++		goto err;
++
++	/* RFC4291 2.7
++	 * Nodes must not originate a packet to a multicast address whose scope
++	 * field contains the reserved value 0; if such a packet is received, it
++	 * must be silently dropped.
++	 */
++	if (ipv6_addr_is_multicast(&hdr->daddr) &&
++	    IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 0)
++		goto err;
++
+ 	/*
+ 	 * RFC4291 2.7
+ 	 * Multicast addresses must not be used as source addresses in IPv6
+diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
+index bb14c34..d6c291c 100644
+--- a/net/irda/af_irda.c
++++ b/net/irda/af_irda.c
+@@ -2584,8 +2584,10 @@ bed:
+ 				    NULL, NULL, NULL);
+ 
+ 		/* Check if the we got some results */
+-		if (!self->cachedaddr)
+-			return -EAGAIN;		/* Didn't find any devices */
++		if (!self->cachedaddr) {
++			err = -EAGAIN;		/* Didn't find any devices */
++			goto out;
++		}
+ 		daddr = self->cachedaddr;
+ 		/* Cleanup */
+ 		self->cachedaddr = 0;
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index 9f40441..73d3f0c 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -142,6 +142,7 @@ int genl_register_mc_group(struct genl_family *family,
+ 	int err = 0;
+ 
+ 	BUG_ON(grp->name[0] == '\0');
++	BUG_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL);
+ 
+ 	genl_lock();
+ 
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 85b9235..72d89e1 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -143,6 +143,8 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
+ 		list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
+ 	task->tk_waitqueue = queue;
+ 	queue->qlen++;
++	/* barrier matches the read in rpc_wake_up_task_queue_locked() */
++	smp_wmb();
+ 	rpc_set_queued(task);
+ 
+ 	dprintk("RPC: %5u added to queue %p \"%s\"\n",
+@@ -399,8 +401,11 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task
+  */
+ static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
+ {
+-	if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue)
+-		__rpc_do_wake_up_task(queue, task);
++	if (RPC_IS_QUEUED(task)) {
++		smp_rmb();
++		if (task->tk_waitqueue == queue)
++			__rpc_do_wake_up_task(queue, task);
++	}
+ }
+ 
+ /*
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 109e30b..fa5289a 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -374,7 +374,7 @@ static void unix_sock_destructor(struct sock *sk)
+ #endif
+ }
+ 
+-static int unix_release_sock(struct sock *sk, int embrion)
++static void unix_release_sock(struct sock *sk, int embrion)
+ {
+ 	struct unix_sock *u = unix_sk(sk);
+ 	struct path path;
+@@ -443,8 +443,6 @@ static int unix_release_sock(struct sock *sk, int embrion)
+ 
+ 	if (unix_tot_inflight)
+ 		unix_gc();		/* Garbage collect fds */
+-
+-	return 0;
+ }
+ 
+ static void init_peercred(struct sock *sk)
+@@ -694,9 +692,10 @@ static int unix_release(struct socket *sock)
+ 	if (!sk)
+ 		return 0;
+ 
++	unix_release_sock(sk, 0);
+ 	sock->sk = NULL;
+ 
+-	return unix_release_sock(sk, 0);
++	return 0;
+ }
+ 
+ static int unix_autobind(struct socket *sock)

Added: genpatches-2.6/trunk/3.4/2700_ThinkPad-30-brightness-control-fix.patch
===================================================================
--- genpatches-2.6/trunk/3.4/2700_ThinkPad-30-brightness-control-fix.patch	                        (rev 0)
+++ genpatches-2.6/trunk/3.4/2700_ThinkPad-30-brightness-control-fix.patch	2013-04-08 07:17:58 UTC (rev 2333)
@@ -0,0 +1,81 @@
+diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
+index cb96296..6c242ed 100644
+--- a/drivers/acpi/blacklist.c
++++ b/drivers/acpi/blacklist.c
+@@ -193,6 +193,13 @@  static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
+ 	return 0;
+ }
+ 
++static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
++{
++	printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
++	acpi_osi_setup("!Windows 2012");
++	return 0;
++}
++
+ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
+ 	{
+ 	.callback = dmi_disable_osi_vista,
+@@ -269,6 +276,61 @@  static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
+ 	},
+ 
+ 	/*
++	 * The following Lenovo models have a broken workaround in the
++	 * acpi_video backlight implementation to meet the Windows 8
++	 * requirement of 101 backlight levels. Reverting to pre-Win8
++	 * behavior fixes the problem.
++	 */
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad L430",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L430"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad T430s",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad T530",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T530"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad W530",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad X1 Carbon",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad X230",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
++		},
++	},
++
++	/*
+ 	 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
+ 	 * Linux ignores it, except for the machines enumerated below.
+ 	 */
+



^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2013-04-08  7:18 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-04-08  7:17 [gentoo-commits] linux-patches r2333 - in genpatches-2.6/trunk: 3.0 3.4 Tom Wijsman (tomwij)

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox