public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Thu, 13 Dec 2018 11:38:04 +0000 (UTC)	[thread overview]
Message-ID: <1544701063.edceebae5b074eeabc237ce5ebc7c0b97dece0f0.mpagano@gentoo> (raw)

commit:     edceebae5b074eeabc237ce5ebc7c0b97dece0f0
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Dec 13 11:37:43 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Dec 13 11:37:43 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=edceebae

proj/linux-patches: Linux patch 4.14.88

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1087_linux-4.14.88.patch | 2403 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2407 insertions(+)

diff --git a/0000_README b/0000_README
index b64e7d4..cd0b9dc 100644
--- a/0000_README
+++ b/0000_README
@@ -391,6 +391,10 @@ Patch:  1086_4.14.87.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.14.87
 
+Patch:  1087_4.14.88.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.14.88
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1087_linux-4.14.88.patch b/1087_linux-4.14.88.patch
new file mode 100644
index 0000000..4c1347a
--- /dev/null
+++ b/1087_linux-4.14.88.patch
@@ -0,0 +1,2403 @@
+diff --git a/Makefile b/Makefile
+index 322484348f3e..3fdee40861a1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 87
++SUBLEVEL = 88
+ EXTRAVERSION =
+ NAME = Petit Gorille
+ 
+diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
+index b2aa9b32bff2..2c118a6ab358 100644
+--- a/arch/arm/probes/kprobes/opt-arm.c
++++ b/arch/arm/probes/kprobes/opt-arm.c
+@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
+ 	}
+ 
+ 	/* Copy arch-dep-instance from template. */
+-	memcpy(code, &optprobe_template_entry,
++	memcpy(code, (unsigned char *)optprobe_template_entry,
+ 			TMPL_END_IDX * sizeof(kprobe_opcode_t));
+ 
+ 	/* Adjust buffer according to instruction. */
+diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
+index f7e965f63274..ddd4a3932127 100644
+--- a/drivers/dma/cppi41.c
++++ b/drivers/dma/cppi41.c
+@@ -723,8 +723,22 @@ static int cppi41_stop_chan(struct dma_chan *chan)
+ 
+ 	desc_phys = lower_32_bits(c->desc_phys);
+ 	desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
+-	if (!cdd->chan_busy[desc_num])
++	if (!cdd->chan_busy[desc_num]) {
++		struct cppi41_channel *cc, *_ct;
++
++		/*
++		 * channels might still be in the pendling list if
++		 * cppi41_dma_issue_pending() is called after
++		 * cppi41_runtime_suspend() is called
++		 */
++		list_for_each_entry_safe(cc, _ct, &cdd->pending, node) {
++			if (cc != c)
++				continue;
++			list_del(&cc->node);
++			break;
++		}
+ 		return 0;
++	}
+ 
+ 	ret = cppi41_tear_down_chan(c);
+ 	if (ret)
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index f43e6dafe446..0f389e008ce6 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -1064,12 +1064,12 @@ static void dwc_issue_pending(struct dma_chan *chan)
+ /*
+  * Program FIFO size of channels.
+  *
+- * By default full FIFO (1024 bytes) is assigned to channel 0. Here we
++ * By default full FIFO (512 bytes) is assigned to channel 0. Here we
+  * slice FIFO on equal parts between channels.
+  */
+ static void idma32_fifo_partition(struct dw_dma *dw)
+ {
+-	u64 value = IDMA32C_FP_PSIZE_CH0(128) | IDMA32C_FP_PSIZE_CH1(128) |
++	u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) |
+ 		    IDMA32C_FP_UPDATE;
+ 	u64 fifo_partition = 0;
+ 
+@@ -1082,7 +1082,7 @@ static void idma32_fifo_partition(struct dw_dma *dw)
+ 	/* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */
+ 	fifo_partition |= value << 32;
+ 
+-	/* Program FIFO Partition registers - 128 bytes for each channel */
++	/* Program FIFO Partition registers - 64 bytes per channel */
+ 	idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);
+ 	idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);
+ }
+diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
+index 9532d86a82f7..d99c8d8da9a0 100644
+--- a/drivers/gpio/gpio-mockup.c
++++ b/drivers/gpio/gpio-mockup.c
+@@ -35,8 +35,8 @@
+ #define GPIO_MOCKUP_MAX_RANGES	(GPIO_MOCKUP_MAX_GC * 2)
+ 
+ enum {
+-	GPIO_MOCKUP_DIR_OUT = 0,
+-	GPIO_MOCKUP_DIR_IN = 1,
++	GPIO_MOCKUP_DIR_IN = 0,
++	GPIO_MOCKUP_DIR_OUT = 1,
+ };
+ 
+ /*
+@@ -112,7 +112,7 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset)
+ {
+ 	struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
+ 
+-	return chip->lines[offset].dir;
++	return !chip->lines[offset].dir;
+ }
+ 
+ static int gpio_mockup_name_lines(struct device *dev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+index 3b3326daf32b..0f5dc97ae920 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+@@ -52,6 +52,9 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
++MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
++MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
++MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
+ 
+ static const u32 golden_settings_tonga_a11[] =
+ {
+@@ -219,13 +222,39 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
+ 		chip_name = "tonga";
+ 		break;
+ 	case CHIP_POLARIS11:
+-		chip_name = "polaris11";
++		if (((adev->pdev->device == 0x67ef) &&
++		     ((adev->pdev->revision == 0xe0) ||
++		      (adev->pdev->revision == 0xe5))) ||
++		    ((adev->pdev->device == 0x67ff) &&
++		     ((adev->pdev->revision == 0xcf) ||
++		      (adev->pdev->revision == 0xef) ||
++		      (adev->pdev->revision == 0xff))))
++			chip_name = "polaris11_k";
++		else if ((adev->pdev->device == 0x67ef) &&
++			 (adev->pdev->revision == 0xe2))
++			chip_name = "polaris11_k";
++		else
++			chip_name = "polaris11";
+ 		break;
+ 	case CHIP_POLARIS10:
+-		chip_name = "polaris10";
++		if ((adev->pdev->device == 0x67df) &&
++		    ((adev->pdev->revision == 0xe1) ||
++		     (adev->pdev->revision == 0xf7)))
++			chip_name = "polaris10_k";
++		else
++			chip_name = "polaris10";
+ 		break;
+ 	case CHIP_POLARIS12:
+-		chip_name = "polaris12";
++		if (((adev->pdev->device == 0x6987) &&
++		     ((adev->pdev->revision == 0xc0) ||
++		      (adev->pdev->revision == 0xc3))) ||
++		    ((adev->pdev->device == 0x6981) &&
++		     ((adev->pdev->revision == 0x00) ||
++		      (adev->pdev->revision == 0x01) ||
++		      (adev->pdev->revision == 0x10))))
++			chip_name = "polaris12_k";
++		else
++			chip_name = "polaris12";
+ 		break;
+ 	case CHIP_FIJI:
+ 	case CHIP_CARRIZO:
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 3fc8c0d67592..fcc688df694c 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -266,6 +266,9 @@
+ 
+ #define USB_VENDOR_ID_CIDC		0x1677
+ 
++#define I2C_VENDOR_ID_CIRQUE		0x0488
++#define I2C_PRODUCT_ID_CIRQUE_121F	0x121F
++
+ #define USB_VENDOR_ID_CJTOUCH		0x24b8
+ #define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020	0x0020
+ #define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040	0x0040
+@@ -1001,6 +1004,7 @@
+ #define USB_VENDOR_ID_SYMBOL		0x05e0
+ #define USB_DEVICE_ID_SYMBOL_SCANNER_1	0x0800
+ #define USB_DEVICE_ID_SYMBOL_SCANNER_2	0x1300
++#define USB_DEVICE_ID_SYMBOL_SCANNER_3	0x1200
+ 
+ #define USB_VENDOR_ID_SYNAPTICS		0x06cb
+ #define USB_DEVICE_ID_SYNAPTICS_TP	0x0001
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index bb984cc9753b..d146a9b545ee 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -325,6 +325,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM,
+ 		USB_DEVICE_ID_ELECOM_BM084),
+ 	  HID_BATTERY_QUIRK_IGNORE },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_SYMBOL,
++		USB_DEVICE_ID_SYMBOL_SCANNER_3),
++	  HID_BATTERY_QUIRK_IGNORE },
+ 	{}
+ };
+ 
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index c3b9bd5dba75..07d92d4a9f7c 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -1474,6 +1474,12 @@ static const struct hid_device_id mt_devices[] = {
+ 		MT_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT,
+ 			USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
+ 
++	/* Cirque devices */
++	{ .driver_data = MT_CLS_WIN_8_DUAL,
++		HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
++			I2C_VENDOR_ID_CIRQUE,
++			I2C_PRODUCT_ID_CIRQUE_121F) },
++
+ 	/* CJTouch panels */
+ 	{ .driver_data = MT_CLS_NSMU,
+ 		MT_USB_DEVICE(USB_VENDOR_ID_CJTOUCH,
+diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
+index 752c52f7353d..43eaf54736f4 100644
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -444,61 +444,16 @@ void vmbus_free_channels(void)
+ 	}
+ }
+ 
+-/*
+- * vmbus_process_offer - Process the offer by creating a channel/device
+- * associated with this offer
+- */
+-static void vmbus_process_offer(struct vmbus_channel *newchannel)
++/* Note: the function can run concurrently for primary/sub channels. */
++static void vmbus_add_channel_work(struct work_struct *work)
+ {
+-	struct vmbus_channel *channel;
+-	bool fnew = true;
++	struct vmbus_channel *newchannel =
++		container_of(work, struct vmbus_channel, add_channel_work);
++	struct vmbus_channel *primary_channel = newchannel->primary_channel;
+ 	unsigned long flags;
+ 	u16 dev_type;
+ 	int ret;
+ 
+-	/* Make sure this is a new offer */
+-	mutex_lock(&vmbus_connection.channel_mutex);
+-
+-	/*
+-	 * Now that we have acquired the channel_mutex,
+-	 * we can release the potentially racing rescind thread.
+-	 */
+-	atomic_dec(&vmbus_connection.offer_in_progress);
+-
+-	list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+-		if (!uuid_le_cmp(channel->offermsg.offer.if_type,
+-			newchannel->offermsg.offer.if_type) &&
+-			!uuid_le_cmp(channel->offermsg.offer.if_instance,
+-				newchannel->offermsg.offer.if_instance)) {
+-			fnew = false;
+-			break;
+-		}
+-	}
+-
+-	if (fnew)
+-		list_add_tail(&newchannel->listentry,
+-			      &vmbus_connection.chn_list);
+-
+-	mutex_unlock(&vmbus_connection.channel_mutex);
+-
+-	if (!fnew) {
+-		/*
+-		 * Check to see if this is a sub-channel.
+-		 */
+-		if (newchannel->offermsg.offer.sub_channel_index != 0) {
+-			/*
+-			 * Process the sub-channel.
+-			 */
+-			newchannel->primary_channel = channel;
+-			spin_lock_irqsave(&channel->lock, flags);
+-			list_add_tail(&newchannel->sc_list, &channel->sc_list);
+-			channel->num_sc++;
+-			spin_unlock_irqrestore(&channel->lock, flags);
+-		} else {
+-			goto err_free_chan;
+-		}
+-	}
+-
+ 	dev_type = hv_get_dev_type(newchannel);
+ 
+ 	init_vp_index(newchannel, dev_type);
+@@ -516,21 +471,22 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
+ 	/*
+ 	 * This state is used to indicate a successful open
+ 	 * so that when we do close the channel normally, we
+-	 * can cleanup properly
++	 * can cleanup properly.
+ 	 */
+ 	newchannel->state = CHANNEL_OPEN_STATE;
+ 
+-	if (!fnew) {
+-		if (channel->sc_creation_callback != NULL)
+-			channel->sc_creation_callback(newchannel);
++	if (primary_channel != NULL) {
++		/* newchannel is a sub-channel. */
++
++		if (primary_channel->sc_creation_callback != NULL)
++			primary_channel->sc_creation_callback(newchannel);
++
+ 		newchannel->probe_done = true;
+ 		return;
+ 	}
+ 
+ 	/*
+-	 * Start the process of binding this offer to the driver
+-	 * We need to set the DeviceObject field before calling
+-	 * vmbus_child_dev_add()
++	 * Start the process of binding the primary channel to the driver
+ 	 */
+ 	newchannel->device_obj = vmbus_device_create(
+ 		&newchannel->offermsg.offer.if_type,
+@@ -559,13 +515,28 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
+ 
+ err_deq_chan:
+ 	mutex_lock(&vmbus_connection.channel_mutex);
+-	list_del(&newchannel->listentry);
++
++	/*
++	 * We need to set the flag, otherwise
++	 * vmbus_onoffer_rescind() can be blocked.
++	 */
++	newchannel->probe_done = true;
++
++	if (primary_channel == NULL) {
++		list_del(&newchannel->listentry);
++	} else {
++		spin_lock_irqsave(&primary_channel->lock, flags);
++		list_del(&newchannel->sc_list);
++		spin_unlock_irqrestore(&primary_channel->lock, flags);
++	}
++
+ 	mutex_unlock(&vmbus_connection.channel_mutex);
+ 
+ 	if (newchannel->target_cpu != get_cpu()) {
+ 		put_cpu();
+ 		smp_call_function_single(newchannel->target_cpu,
+-					 percpu_channel_deq, newchannel, true);
++					 percpu_channel_deq,
++					 newchannel, true);
+ 	} else {
+ 		percpu_channel_deq(newchannel);
+ 		put_cpu();
+@@ -573,14 +544,104 @@ err_deq_chan:
+ 
+ 	vmbus_release_relid(newchannel->offermsg.child_relid);
+ 
+-err_free_chan:
+ 	free_channel(newchannel);
+ }
+ 
++/*
++ * vmbus_process_offer - Process the offer by creating a channel/device
++ * associated with this offer
++ */
++static void vmbus_process_offer(struct vmbus_channel *newchannel)
++{
++	struct vmbus_channel *channel;
++	struct workqueue_struct *wq;
++	unsigned long flags;
++	bool fnew = true;
++
++	mutex_lock(&vmbus_connection.channel_mutex);
++
++	/*
++	 * Now that we have acquired the channel_mutex,
++	 * we can release the potentially racing rescind thread.
++	 */
++	atomic_dec(&vmbus_connection.offer_in_progress);
++
++	list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
++		if (!uuid_le_cmp(channel->offermsg.offer.if_type,
++				 newchannel->offermsg.offer.if_type) &&
++		    !uuid_le_cmp(channel->offermsg.offer.if_instance,
++				 newchannel->offermsg.offer.if_instance)) {
++			fnew = false;
++			break;
++		}
++	}
++
++	if (fnew)
++		list_add_tail(&newchannel->listentry,
++			      &vmbus_connection.chn_list);
++	else {
++		/*
++		 * Check to see if this is a valid sub-channel.
++		 */
++		if (newchannel->offermsg.offer.sub_channel_index == 0) {
++			mutex_unlock(&vmbus_connection.channel_mutex);
++			/*
++			 * Don't call free_channel(), because newchannel->kobj
++			 * is not initialized yet.
++			 */
++			kfree(newchannel);
++			WARN_ON_ONCE(1);
++			return;
++		}
++		/*
++		 * Process the sub-channel.
++		 */
++		newchannel->primary_channel = channel;
++		spin_lock_irqsave(&channel->lock, flags);
++		list_add_tail(&newchannel->sc_list, &channel->sc_list);
++		spin_unlock_irqrestore(&channel->lock, flags);
++	}
++
++	mutex_unlock(&vmbus_connection.channel_mutex);
++
++	/*
++	 * vmbus_process_offer() mustn't call channel->sc_creation_callback()
++	 * directly for sub-channels, because sc_creation_callback() ->
++	 * vmbus_open() may never get the host's response to the
++	 * OPEN_CHANNEL message (the host may rescind a channel at any time,
++	 * e.g. in the case of hot removing a NIC), and vmbus_onoffer_rescind()
++	 * may not wake up the vmbus_open() as it's blocked due to a non-zero
++	 * vmbus_connection.offer_in_progress, and finally we have a deadlock.
++	 *
++	 * The above is also true for primary channels, if the related device
++	 * drivers use sync probing mode by default.
++	 *
++	 * And, usually the handling of primary channels and sub-channels can
++	 * depend on each other, so we should offload them to different
++	 * workqueues to avoid possible deadlock, e.g. in sync-probing mode,
++	 * NIC1's netvsc_subchan_work() can race with NIC2's netvsc_probe() ->
++	 * rtnl_lock(), and causes deadlock: the former gets the rtnl_lock
++	 * and waits for all the sub-channels to appear, but the latter
++	 * can't get the rtnl_lock and this blocks the handling of
++	 * sub-channels.
++	 */
++	INIT_WORK(&newchannel->add_channel_work, vmbus_add_channel_work);
++	wq = fnew ? vmbus_connection.handle_primary_chan_wq :
++		    vmbus_connection.handle_sub_chan_wq;
++	queue_work(wq, &newchannel->add_channel_work);
++}
++
+ /*
+  * We use this state to statically distribute the channel interrupt load.
+  */
+ static int next_numa_node_id;
++/*
++ * init_vp_index() accesses global variables like next_numa_node_id, and
++ * it can run concurrently for primary channels and sub-channels: see
++ * vmbus_process_offer(), so we need the lock to protect the global
++ * variables.
++ */
++static DEFINE_SPINLOCK(bind_channel_to_cpu_lock);
+ 
+ /*
+  * Starting with Win8, we can statically distribute the incoming
+@@ -618,6 +679,8 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
+ 		return;
+ 	}
+ 
++	spin_lock(&bind_channel_to_cpu_lock);
++
+ 	/*
+ 	 * Based on the channel affinity policy, we will assign the NUMA
+ 	 * nodes.
+@@ -700,6 +763,8 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
+ 	channel->target_cpu = cur_cpu;
+ 	channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu);
+ 
++	spin_unlock(&bind_channel_to_cpu_lock);
++
+ 	free_cpumask_var(available_mask);
+ }
+ 
+diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
+index 5449fc59b7f5..4b1b70751be3 100644
+--- a/drivers/hv/connection.c
++++ b/drivers/hv/connection.c
+@@ -161,6 +161,20 @@ int vmbus_connect(void)
+ 		goto cleanup;
+ 	}
+ 
++	vmbus_connection.handle_primary_chan_wq =
++		create_workqueue("hv_pri_chan");
++	if (!vmbus_connection.handle_primary_chan_wq) {
++		ret = -ENOMEM;
++		goto cleanup;
++	}
++
++	vmbus_connection.handle_sub_chan_wq =
++		create_workqueue("hv_sub_chan");
++	if (!vmbus_connection.handle_sub_chan_wq) {
++		ret = -ENOMEM;
++		goto cleanup;
++	}
++
+ 	INIT_LIST_HEAD(&vmbus_connection.chn_msg_list);
+ 	spin_lock_init(&vmbus_connection.channelmsg_lock);
+ 
+@@ -251,10 +265,14 @@ void vmbus_disconnect(void)
+ 	 */
+ 	vmbus_initiate_unload(false);
+ 
+-	if (vmbus_connection.work_queue) {
+-		drain_workqueue(vmbus_connection.work_queue);
++	if (vmbus_connection.handle_sub_chan_wq)
++		destroy_workqueue(vmbus_connection.handle_sub_chan_wq);
++
++	if (vmbus_connection.handle_primary_chan_wq)
++		destroy_workqueue(vmbus_connection.handle_primary_chan_wq);
++
++	if (vmbus_connection.work_queue)
+ 		destroy_workqueue(vmbus_connection.work_queue);
+-	}
+ 
+ 	if (vmbus_connection.int_page) {
+ 		free_pages((unsigned long)vmbus_connection.int_page, 0);
+diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
+index 49569f8fe038..a166de6efd99 100644
+--- a/drivers/hv/hyperv_vmbus.h
++++ b/drivers/hv/hyperv_vmbus.h
+@@ -327,7 +327,14 @@ struct vmbus_connection {
+ 	struct list_head chn_list;
+ 	struct mutex channel_mutex;
+ 
++	/*
++	 * An offer message is handled first on the work_queue, and then
++	 * is further handled on handle_primary_chan_wq or
++	 * handle_sub_chan_wq.
++	 */
+ 	struct workqueue_struct *work_queue;
++	struct workqueue_struct *handle_primary_chan_wq;
++	struct workqueue_struct *handle_sub_chan_wq;
+ };
+ 
+ 
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index 6fe2d0346073..b97984a5ddad 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -796,7 +796,8 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
+ 	entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
+ 	memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
+ 		    &entry, sizeof(entry));
+-	entry = (iommu_virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL;
++	entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
++		 (BIT_ULL(52)-1)) & ~7ULL;
+ 	memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
+ 		    &entry, sizeof(entry));
+ 	writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index aaf3fed97477..e86c1c8ec7f6 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -3086,7 +3086,7 @@ static int copy_context_table(struct intel_iommu *iommu,
+ 			}
+ 
+ 			if (old_ce)
+-				iounmap(old_ce);
++				memunmap(old_ce);
+ 
+ 			ret = 0;
+ 			if (devfn < 0x80)
+diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
+index d7def26ccf79..f5573bb9f450 100644
+--- a/drivers/iommu/intel-svm.c
++++ b/drivers/iommu/intel-svm.c
+@@ -589,7 +589,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
+ 			pr_err("%s: Page request without PASID: %08llx %08llx\n",
+ 			       iommu->name, ((unsigned long long *)req)[0],
+ 			       ((unsigned long long *)req)[1]);
+-			goto bad_req;
++			goto no_pasid;
+ 		}
+ 
+ 		if (!svm || svm->pasid != req->pasid) {
+diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
+index 5d0ba5f644c4..777aff1f549f 100644
+--- a/drivers/iommu/ipmmu-vmsa.c
++++ b/drivers/iommu/ipmmu-vmsa.c
+@@ -424,6 +424,9 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
+ 
+ static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
+ {
++	if (!domain->mmu)
++		return;
++
+ 	/*
+ 	 * Disable the context. Flush the TLB as required when modifying the
+ 	 * context registers.
+diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
+index 6e6e978263b0..c834fea5f9b0 100644
+--- a/drivers/media/platform/omap3isp/isp.c
++++ b/drivers/media/platform/omap3isp/isp.c
+@@ -1592,6 +1592,8 @@ static void isp_pm_complete(struct device *dev)
+ 
+ static void isp_unregister_entities(struct isp_device *isp)
+ {
++	media_device_unregister(&isp->media_dev);
++
+ 	omap3isp_csi2_unregister_entities(&isp->isp_csi2a);
+ 	omap3isp_ccp2_unregister_entities(&isp->isp_ccp2);
+ 	omap3isp_ccdc_unregister_entities(&isp->isp_ccdc);
+@@ -1602,7 +1604,6 @@ static void isp_unregister_entities(struct isp_device *isp)
+ 	omap3isp_stat_unregister_entities(&isp->isp_hist);
+ 
+ 	v4l2_device_unregister(&isp->v4l2_dev);
+-	media_device_unregister(&isp->media_dev);
+ 	media_device_cleanup(&isp->media_dev);
+ }
+ 
+diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
+index b49ca02b399d..09d5f7df6023 100644
+--- a/drivers/mtd/nand/qcom_nandc.c
++++ b/drivers/mtd/nand/qcom_nandc.c
+@@ -149,15 +149,15 @@
+ #define	NAND_VERSION_MINOR_SHIFT	16
+ 
+ /* NAND OP_CMDs */
+-#define	PAGE_READ			0x2
+-#define	PAGE_READ_WITH_ECC		0x3
+-#define	PAGE_READ_WITH_ECC_SPARE	0x4
+-#define	PROGRAM_PAGE			0x6
+-#define	PAGE_PROGRAM_WITH_ECC		0x7
+-#define	PROGRAM_PAGE_SPARE		0x9
+-#define	BLOCK_ERASE			0xa
+-#define	FETCH_ID			0xb
+-#define	RESET_DEVICE			0xd
++#define	OP_PAGE_READ			0x2
++#define	OP_PAGE_READ_WITH_ECC		0x3
++#define	OP_PAGE_READ_WITH_ECC_SPARE	0x4
++#define	OP_PROGRAM_PAGE			0x6
++#define	OP_PAGE_PROGRAM_WITH_ECC	0x7
++#define	OP_PROGRAM_PAGE_SPARE		0x9
++#define	OP_BLOCK_ERASE			0xa
++#define	OP_FETCH_ID			0xb
++#define	OP_RESET_DEVICE			0xd
+ 
+ /* Default Value for NAND_DEV_CMD_VLD */
+ #define NAND_DEV_CMD_VLD_VAL		(READ_START_VLD | WRITE_START_VLD | \
+@@ -629,11 +629,11 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
+ 
+ 	if (read) {
+ 		if (host->use_ecc)
+-			cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
++			cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
+ 		else
+-			cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
++			cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
+ 	} else {
+-			cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
++		cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
+ 	}
+ 
+ 	if (host->use_ecc) {
+@@ -1030,7 +1030,7 @@ static int nandc_param(struct qcom_nand_host *host)
+ 	 * in use. we configure the controller to perform a raw read of 512
+ 	 * bytes to read onfi params
+ 	 */
+-	nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
++	nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
+ 	nandc_set_reg(nandc, NAND_ADDR0, 0);
+ 	nandc_set_reg(nandc, NAND_ADDR1, 0);
+ 	nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
+@@ -1084,7 +1084,7 @@ static int erase_block(struct qcom_nand_host *host, int page_addr)
+ 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ 
+ 	nandc_set_reg(nandc, NAND_FLASH_CMD,
+-		      BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
++		      OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
+ 	nandc_set_reg(nandc, NAND_ADDR0, page_addr);
+ 	nandc_set_reg(nandc, NAND_ADDR1, 0);
+ 	nandc_set_reg(nandc, NAND_DEV0_CFG0,
+@@ -1115,7 +1115,7 @@ static int read_id(struct qcom_nand_host *host, int column)
+ 	if (column == -1)
+ 		return 0;
+ 
+-	nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
++	nandc_set_reg(nandc, NAND_FLASH_CMD, OP_FETCH_ID);
+ 	nandc_set_reg(nandc, NAND_ADDR0, column);
+ 	nandc_set_reg(nandc, NAND_ADDR1, 0);
+ 	nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
+@@ -1136,7 +1136,7 @@ static int reset(struct qcom_nand_host *host)
+ 	struct nand_chip *chip = &host->chip;
+ 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ 
+-	nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
++	nandc_set_reg(nandc, NAND_FLASH_CMD, OP_RESET_DEVICE);
+ 	nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
+ 
+ 	write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
+index 8d89204b90d2..f22dd34f4f83 100644
+--- a/drivers/mtd/spi-nor/cadence-quadspi.c
++++ b/drivers/mtd/spi-nor/cadence-quadspi.c
+@@ -625,9 +625,23 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor,
+ 	       reg_base + CQSPI_REG_INDIRECTWR);
+ 
+ 	while (remaining > 0) {
++		size_t write_words, mod_bytes;
++
+ 		write_bytes = remaining > page_size ? page_size : remaining;
+-		iowrite32_rep(cqspi->ahb_base, txbuf,
+-			      DIV_ROUND_UP(write_bytes, 4));
++		write_words = write_bytes / 4;
++		mod_bytes = write_bytes % 4;
++		/* Write 4 bytes at a time then single bytes. */
++		if (write_words) {
++			iowrite32_rep(cqspi->ahb_base, txbuf, write_words);
++			txbuf += (write_words * 4);
++		}
++		if (mod_bytes) {
++			unsigned int temp = 0xFFFFFFFF;
++
++			memcpy(&temp, txbuf, mod_bytes);
++			iowrite32(temp, cqspi->ahb_base);
++			txbuf += mod_bytes;
++		}
+ 
+ 		ret = wait_for_completion_timeout(&cqspi->transfer_complete,
+ 						  msecs_to_jiffies
+@@ -638,7 +652,6 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor,
+ 			goto failwr;
+ 		}
+ 
+-		txbuf += write_bytes;
+ 		remaining -= write_bytes;
+ 
+ 		if (remaining > 0)
+diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
+index 11662f479e76..771a46083739 100644
+--- a/drivers/net/can/rcar/rcar_can.c
++++ b/drivers/net/can/rcar/rcar_can.c
+@@ -24,6 +24,9 @@
+ 
+ #define RCAR_CAN_DRV_NAME	"rcar_can"
+ 
++#define RCAR_SUPPORTED_CLOCKS	(BIT(CLKR_CLKP1) | BIT(CLKR_CLKP2) | \
++				 BIT(CLKR_CLKEXT))
++
+ /* Mailbox configuration:
+  * mailbox 60 - 63 - Rx FIFO mailboxes
+  * mailbox 56 - 59 - Tx FIFO mailboxes
+@@ -789,7 +792,7 @@ static int rcar_can_probe(struct platform_device *pdev)
+ 		goto fail_clk;
+ 	}
+ 
+-	if (clock_select >= ARRAY_SIZE(clock_names)) {
++	if (!(BIT(clock_select) & RCAR_SUPPORTED_CLOCKS)) {
+ 		err = -EINVAL;
+ 		dev_err(&pdev->dev, "invalid CAN clock selected\n");
+ 		goto fail_clk;
+diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
+index 291ca5187f12..9845e07d40cd 100644
+--- a/drivers/net/ethernet/amd/sunlance.c
++++ b/drivers/net/ethernet/amd/sunlance.c
+@@ -1418,7 +1418,7 @@ static int sparc_lance_probe_one(struct platform_device *op,
+ 
+ 			prop = of_get_property(nd, "tpe-link-test?", NULL);
+ 			if (!prop)
+-				goto no_link_test;
++				goto node_put;
+ 
+ 			if (strcmp(prop, "true")) {
+ 				printk(KERN_NOTICE "SunLance: warning: overriding option "
+@@ -1427,6 +1427,8 @@ static int sparc_lance_probe_one(struct platform_device *op,
+ 				       "to ecd@skynet.be\n");
+ 				auxio_set_lte(AUXIO_LTE_ON);
+ 			}
++node_put:
++			of_node_put(nd);
+ no_link_test:
+ 			lp->auto_select = 1;
+ 			lp->tpe = 0;
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+index 828e2e56b75e..1b7f4342dab9 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+@@ -2187,6 +2187,13 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
+ #define PMF_DMAE_C(bp)			(BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
+ 					 E1HVN_MAX)
+ 
++/* Following is the DMAE channel number allocation for the clients.
++ *   MFW: OCBB/OCSD implementations use DMAE channels 14/15 respectively.
++ *   Driver: 0-3 and 8-11 (for PF dmae operations)
++ *           4 and 12 (for stats requests)
++ */
++#define BNX2X_FW_DMAE_C                 13 /* Channel for FW DMAE operations */
++
+ /* PCIE link and speed */
+ #define PCICFG_LINK_WIDTH		0x1f00000
+ #define PCICFG_LINK_WIDTH_SHIFT		20
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+index 8baf9d3eb4b1..453bfd83a070 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+@@ -6149,6 +6149,7 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
+ 	rdata->sd_vlan_tag	= cpu_to_le16(start_params->sd_vlan_tag);
+ 	rdata->path_id		= BP_PATH(bp);
+ 	rdata->network_cos_mode	= start_params->network_cos_mode;
++	rdata->dmae_cmd_id	= BNX2X_FW_DMAE_C;
+ 
+ 	rdata->vxlan_dst_port	= cpu_to_le16(start_params->vxlan_dst_port);
+ 	rdata->geneve_dst_port	= cpu_to_le16(start_params->geneve_dst_port);
+diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
+index 66928a922824..415fd93e9930 100644
+--- a/drivers/net/ethernet/faraday/ftmac100.c
++++ b/drivers/net/ethernet/faraday/ftmac100.c
+@@ -870,11 +870,10 @@ static irqreturn_t ftmac100_interrupt(int irq, void *dev_id)
+ 	struct net_device *netdev = dev_id;
+ 	struct ftmac100 *priv = netdev_priv(netdev);
+ 
+-	if (likely(netif_running(netdev))) {
+-		/* Disable interrupts for polling */
+-		ftmac100_disable_all_int(priv);
++	/* Disable interrupts for polling */
++	ftmac100_disable_all_int(priv);
++	if (likely(netif_running(netdev)))
+ 		napi_schedule(&priv->napi);
+-	}
+ 
+ 	return IRQ_HANDLED;
+ }
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 5c7134ccc1fd..14c53ed5cca6 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -457,8 +457,8 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
+ 
+ 		for (j = 0; j < rx_pool->size; j++) {
+ 			if (rx_pool->rx_buff[j].skb) {
+-				dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
+-				rx_pool->rx_buff[i].skb = NULL;
++				dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
++				rx_pool->rx_buff[j].skb = NULL;
+ 			}
+ 		}
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
+index 6dabd983e7e0..94f4dc4a77e9 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
++++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
+@@ -337,7 +337,7 @@ void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc)
+ static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count,
+ 				  int align, u32 skip_mask, u32 *puid)
+ {
+-	u32 uid;
++	u32 uid = 0;
+ 	u32 res;
+ 	struct mlx4_zone_allocator *zone_alloc = zone->allocator;
+ 	struct mlx4_zone_entry *curr_node;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+index c68da1986e51..aaeb446bba62 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+@@ -541,8 +541,8 @@ struct slave_list {
+ struct resource_allocator {
+ 	spinlock_t alloc_lock; /* protect quotas */
+ 	union {
+-		int res_reserved;
+-		int res_port_rsvd[MLX4_MAX_PORTS];
++		unsigned int res_reserved;
++		unsigned int res_port_rsvd[MLX4_MAX_PORTS];
+ 	};
+ 	union {
+ 		int res_free;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
+index c7c0764991c9..20043f82c1d8 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
++++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
+@@ -363,6 +363,7 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
+ 			container_of((void *)mpt_entry, struct mlx4_cmd_mailbox,
+ 				     buf);
+ 
++		(*mpt_entry)->lkey = 0;
+ 		err = mlx4_SW2HW_MPT(dev, mailbox, key);
+ 	}
+ 
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+index ef2374699726..16953c4ebd71 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+@@ -440,8 +440,16 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
+ 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ 
+ 	/* Can't have multiple flags set here */
+-	if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1)
++	if (bitmap_weight((unsigned long *)&pq_flags,
++			  sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
++		DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags);
+ 		goto err;
++	}
++
++	if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
++		DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags);
++		goto err;
++	}
+ 
+ 	switch (pq_flags) {
+ 	case PQ_FLAGS_RLS:
+@@ -465,8 +473,7 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
+ 	}
+ 
+ err:
+-	DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
+-	return NULL;
++	return &qm_info->start_pq;
+ }
+ 
+ /* save pq index in qm info */
+@@ -490,20 +497,32 @@ u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc)
+ {
+ 	u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn);
+ 
++	if (max_tc == 0) {
++		DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
++		       PQ_FLAGS_MCOS);
++		return p_hwfn->qm_info.start_pq;
++	}
++
+ 	if (tc > max_tc)
+ 		DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
+ 
+-	return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
++	return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc);
+ }
+ 
+ u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf)
+ {
+ 	u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn);
+ 
++	if (max_vf == 0) {
++		DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
++		       PQ_FLAGS_VFS);
++		return p_hwfn->qm_info.start_pq;
++	}
++
+ 	if (vf > max_vf)
+ 		DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
+ 
+-	return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
++	return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf);
+ }
+ 
+ u16 qed_get_cm_pq_idx_rl(struct qed_hwfn *p_hwfn, u8 rl)
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
+index 719cdbfe1695..7746417130bd 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
+@@ -992,6 +992,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
+ 	 */
+ 	do {
+ 		index = p_sb_attn->sb_index;
++		/* finish reading index before the loop condition */
++		dma_rmb();
+ 		attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
+ 		attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
+ 	} while (index != p_sb_attn->sb_index);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
+index 954f7ce4cf28..ecc2d4296526 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
+@@ -1561,9 +1561,9 @@ static int qed_drain(struct qed_dev *cdev)
+ 			return -EBUSY;
+ 		}
+ 		rc = qed_mcp_drain(hwfn, ptt);
++		qed_ptt_release(hwfn, ptt);
+ 		if (rc)
+ 			return rc;
+-		qed_ptt_release(hwfn, ptt);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 817451a1efd6..bd455a6cc82c 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -989,8 +989,6 @@ static void team_port_disable(struct team *team,
+ 	team->en_port_count--;
+ 	team_queue_override_port_del(team, port);
+ 	team_adjust_ops(team);
+-	team_notify_peers(team);
+-	team_mcast_rejoin(team);
+ 	team_lower_state_changed(port);
+ }
+ 
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
+index e7584b842dce..eb5db94f5745 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
+@@ -193,6 +193,9 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
+ 		}
+ 		break;
+ 	case BRCMU_CHSPEC_D11AC_BW_160:
++		ch->bw = BRCMU_CHAN_BW_160;
++		ch->sb = brcmu_maskget16(ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK,
++					 BRCMU_CHSPEC_D11AC_SB_SHIFT);
+ 		switch (ch->sb) {
+ 		case BRCMU_CHAN_SB_LLL:
+ 			ch->control_ch_num -= CH_70MHZ_APART;
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index 477f9f2f6626..670224be3c8b 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -2698,6 +2698,10 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
+ 
+ 	wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+ 
++	tasklet_hrtimer_init(&data->beacon_timer,
++			     mac80211_hwsim_beacon,
++			     CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
++
+ 	err = ieee80211_register_hw(hw);
+ 	if (err < 0) {
+ 		printk(KERN_DEBUG "mac80211_hwsim: ieee80211_register_hw failed (%d)\n",
+@@ -2722,10 +2726,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
+ 				    data->debugfs,
+ 				    data, &hwsim_simulate_radar);
+ 
+-	tasklet_hrtimer_init(&data->beacon_timer,
+-			     mac80211_hwsim_beacon,
+-			     CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+-
+ 	spin_lock_bh(&hwsim_radio_lock);
+ 	list_add_tail(&data->list, &hwsim_radios);
+ 	spin_unlock_bh(&hwsim_radio_lock);
+diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
+index 86bc19ae30da..f7b0c39ac339 100644
+--- a/drivers/nvdimm/nd-core.h
++++ b/drivers/nvdimm/nd-core.h
+@@ -105,6 +105,8 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
+ 		struct nd_mapping *nd_mapping, resource_size_t *overlap);
+ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
+ resource_size_t nd_region_available_dpa(struct nd_region *nd_region);
++int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
++		resource_size_t size);
+ resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
+ 		struct nd_label_id *label_id);
+ int alias_dpa_busy(struct device *dev, void *data);
+diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
+index 2adada1a5855..6d38191ff0da 100644
+--- a/drivers/nvdimm/pfn_devs.c
++++ b/drivers/nvdimm/pfn_devs.c
+@@ -589,14 +589,47 @@ static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys)
+ 			ALIGN_DOWN(phys, nd_pfn->align));
+ }
+ 
++/*
++ * Check if pmem collides with 'System RAM', or other regions when
++ * section aligned.  Trim it accordingly.
++ */
++static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trunc)
++{
++	struct nd_namespace_common *ndns = nd_pfn->ndns;
++	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
++	struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
++	const resource_size_t start = nsio->res.start;
++	const resource_size_t end = start + resource_size(&nsio->res);
++	resource_size_t adjust, size;
++
++	*start_pad = 0;
++	*end_trunc = 0;
++
++	adjust = start - PHYS_SECTION_ALIGN_DOWN(start);
++	size = resource_size(&nsio->res) + adjust;
++	if (region_intersects(start - adjust, size, IORESOURCE_SYSTEM_RAM,
++				IORES_DESC_NONE) == REGION_MIXED
++			|| nd_region_conflict(nd_region, start - adjust, size))
++		*start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
++
++	/* Now check that end of the range does not collide. */
++	adjust = PHYS_SECTION_ALIGN_UP(end) - end;
++	size = resource_size(&nsio->res) + adjust;
++	if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
++				IORES_DESC_NONE) == REGION_MIXED
++			|| !IS_ALIGNED(end, nd_pfn->align)
++			|| nd_region_conflict(nd_region, start, size + adjust))
++		*end_trunc = end - phys_pmem_align_down(nd_pfn, end);
++}
++
+ static int nd_pfn_init(struct nd_pfn *nd_pfn)
+ {
+ 	u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
+ 	struct nd_namespace_common *ndns = nd_pfn->ndns;
+-	u32 start_pad = 0, end_trunc = 0;
++	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+ 	resource_size_t start, size;
+-	struct nd_namespace_io *nsio;
+ 	struct nd_region *nd_region;
++	u32 start_pad, end_trunc;
+ 	struct nd_pfn_sb *pfn_sb;
+ 	unsigned long npfns;
+ 	phys_addr_t offset;
+@@ -628,30 +661,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
+ 
+ 	memset(pfn_sb, 0, sizeof(*pfn_sb));
+ 
+-	/*
+-	 * Check if pmem collides with 'System RAM' when section aligned and
+-	 * trim it accordingly
+-	 */
+-	nsio = to_nd_namespace_io(&ndns->dev);
+-	start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
+-	size = resource_size(&nsio->res);
+-	if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
+-				IORES_DESC_NONE) == REGION_MIXED) {
+-		start = nsio->res.start;
+-		start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
+-	}
+-
+-	start = nsio->res.start;
+-	size = PHYS_SECTION_ALIGN_UP(start + size) - start;
+-	if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
+-				IORES_DESC_NONE) == REGION_MIXED
+-			|| !IS_ALIGNED(start + resource_size(&nsio->res),
+-				nd_pfn->align)) {
+-		size = resource_size(&nsio->res);
+-		end_trunc = start + size - phys_pmem_align_down(nd_pfn,
+-				start + size);
+-	}
+-
++	trim_pfn_device(nd_pfn, &start_pad, &end_trunc);
+ 	if (start_pad + end_trunc)
+ 		dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n",
+ 				dev_name(&ndns->dev), start_pad + end_trunc);
+@@ -662,7 +672,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
+ 	 * implementation will limit the pfns advertised through
+ 	 * ->direct_access() to those that are included in the memmap.
+ 	 */
+-	start += start_pad;
++	start = nsio->res.start + start_pad;
+ 	size = resource_size(&nsio->res);
+ 	npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K)
+ 			/ PAGE_SIZE);
+diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
+index 050deb56ee62..708043d20d0d 100644
+--- a/drivers/nvdimm/region_devs.c
++++ b/drivers/nvdimm/region_devs.c
+@@ -1112,6 +1112,47 @@ int nvdimm_has_cache(struct nd_region *nd_region)
+ }
+ EXPORT_SYMBOL_GPL(nvdimm_has_cache);
+ 
++struct conflict_context {
++	struct nd_region *nd_region;
++	resource_size_t start, size;
++};
++
++static int region_conflict(struct device *dev, void *data)
++{
++	struct nd_region *nd_region;
++	struct conflict_context *ctx = data;
++	resource_size_t res_end, region_end, region_start;
++
++	if (!is_memory(dev))
++		return 0;
++
++	nd_region = to_nd_region(dev);
++	if (nd_region == ctx->nd_region)
++		return 0;
++
++	res_end = ctx->start + ctx->size;
++	region_start = nd_region->ndr_start;
++	region_end = region_start + nd_region->ndr_size;
++	if (ctx->start >= region_start && ctx->start < region_end)
++		return -EBUSY;
++	if (res_end > region_start && res_end <= region_end)
++		return -EBUSY;
++	return 0;
++}
++
++int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
++		resource_size_t size)
++{
++	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
++	struct conflict_context ctx = {
++		.nd_region = nd_region,
++		.start = start,
++		.size = size,
++	};
++
++	return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
++}
++
+ void __exit nd_region_devs_exit(void)
+ {
+ 	ida_destroy(&region_ida);
+diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
+index b18fe2014cf2..0847d05e138b 100644
+--- a/drivers/s390/virtio/virtio_ccw.c
++++ b/drivers/s390/virtio/virtio_ccw.c
+@@ -59,6 +59,7 @@ struct virtio_ccw_device {
+ 	unsigned int revision; /* Transport revision */
+ 	wait_queue_head_t wait_q;
+ 	spinlock_t lock;
++	struct mutex io_lock; /* Serializes I/O requests */
+ 	struct list_head virtqueues;
+ 	unsigned long indicators;
+ 	unsigned long indicators2;
+@@ -299,6 +300,7 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev,
+ 	unsigned long flags;
+ 	int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
+ 
++	mutex_lock(&vcdev->io_lock);
+ 	do {
+ 		spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
+ 		ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
+@@ -311,7 +313,9 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev,
+ 		cpu_relax();
+ 	} while (ret == -EBUSY);
+ 	wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
+-	return ret ? ret : vcdev->err;
++	ret = ret ? ret : vcdev->err;
++	mutex_unlock(&vcdev->io_lock);
++	return ret;
+ }
+ 
+ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
+@@ -831,6 +835,7 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
+ 	int ret;
+ 	struct ccw1 *ccw;
+ 	void *config_area;
++	unsigned long flags;
+ 
+ 	ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ 	if (!ccw)
+@@ -849,11 +854,13 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
+ 	if (ret)
+ 		goto out_free;
+ 
++	spin_lock_irqsave(&vcdev->lock, flags);
+ 	memcpy(vcdev->config, config_area, offset + len);
+-	if (buf)
+-		memcpy(buf, &vcdev->config[offset], len);
+ 	if (vcdev->config_ready < offset + len)
+ 		vcdev->config_ready = offset + len;
++	spin_unlock_irqrestore(&vcdev->lock, flags);
++	if (buf)
++		memcpy(buf, config_area + offset, len);
+ 
+ out_free:
+ 	kfree(config_area);
+@@ -867,6 +874,7 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
+ 	struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ 	struct ccw1 *ccw;
+ 	void *config_area;
++	unsigned long flags;
+ 
+ 	ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ 	if (!ccw)
+@@ -879,9 +887,11 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
+ 	/* Make sure we don't overwrite fields. */
+ 	if (vcdev->config_ready < offset)
+ 		virtio_ccw_get_config(vdev, 0, NULL, offset);
++	spin_lock_irqsave(&vcdev->lock, flags);
+ 	memcpy(&vcdev->config[offset], buf, len);
+ 	/* Write the config area to the host. */
+ 	memcpy(config_area, vcdev->config, sizeof(vcdev->config));
++	spin_unlock_irqrestore(&vcdev->lock, flags);
+ 	ccw->cmd_code = CCW_CMD_WRITE_CONF;
+ 	ccw->flags = 0;
+ 	ccw->count = offset + len;
+@@ -1250,6 +1260,7 @@ static int virtio_ccw_online(struct ccw_device *cdev)
+ 	init_waitqueue_head(&vcdev->wait_q);
+ 	INIT_LIST_HEAD(&vcdev->virtqueues);
+ 	spin_lock_init(&vcdev->lock);
++	mutex_init(&vcdev->io_lock);
+ 
+ 	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ 	dev_set_drvdata(&cdev->dev, vcdev);
+diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
+index 26841a7b6213..99b400c4190f 100644
+--- a/drivers/staging/lustre/lnet/lnet/config.c
++++ b/drivers/staging/lustre/lnet/lnet/config.c
+@@ -354,8 +354,7 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
+ 				CERROR("Can't allocate net interface name\n");
+ 				goto failed;
+ 			}
+-			strncpy(ni->ni_interfaces[niface], iface,
+-				strlen(iface));
++			strcpy(ni->ni_interfaces[niface], iface);
+ 			niface++;
+ 			iface = comma;
+ 		} while (iface);
+diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+index c2aadb2d1fea..fa46fe9e1bd9 100644
+--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
++++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+@@ -645,7 +645,7 @@ repeat_fid2path:
+ 		memmove(ptr + strlen(gf->gf_path) + 1, ptr,
+ 			strlen(ori_gf->gf_path));
+ 
+-		strncpy(ptr, gf->gf_path, strlen(gf->gf_path));
++		strcpy(ptr, gf->gf_path);
+ 		ptr += strlen(gf->gf_path);
+ 		*ptr = '/';
+ 	}
+diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c
+index 0fa7cb2423d8..0320c089f688 100644
+--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c
++++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c
+@@ -2860,9 +2860,7 @@ ia_css_debug_pipe_graph_dump_stage(
+ 			if (l <= ENABLE_LINE_MAX_LENGTH) {
+ 				/* It fits on one line, copy string and init */
+ 				/* other helper strings with empty string */
+-				strcpy_s(enable_info,
+-					sizeof(enable_info),
+-					ei);
++				strscpy(enable_info, ei, sizeof(enable_info));
+ 			} else {
+ 				/* Too big for one line, find last comma */
+ 				p = ENABLE_LINE_MAX_LENGTH;
+diff --git a/drivers/staging/rtl8712/mlme_linux.c b/drivers/staging/rtl8712/mlme_linux.c
+index a077069d6227..7e367452c339 100644
+--- a/drivers/staging/rtl8712/mlme_linux.c
++++ b/drivers/staging/rtl8712/mlme_linux.c
+@@ -158,7 +158,7 @@ void r8712_report_sec_ie(struct _adapter *adapter, u8 authmode, u8 *sec_ie)
+ 		p = buff;
+ 		p += sprintf(p, "ASSOCINFO(ReqIEs=");
+ 		len = sec_ie[1] + 2;
+-		len =  (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX - 1;
++		len =  (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX;
+ 		for (i = 0; i < len; i++)
+ 			p += sprintf(p, "%02x", sec_ie[i]);
+ 		p += sprintf(p, ")");
+diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
+index bf1ac22bae1c..98c7e8b229d1 100644
+--- a/drivers/staging/rtl8712/rtl871x_mlme.c
++++ b/drivers/staging/rtl8712/rtl871x_mlme.c
+@@ -1361,7 +1361,7 @@ sint r8712_restruct_sec_ie(struct _adapter *adapter, u8 *in_ie,
+ 		     u8 *out_ie, uint in_len)
+ {
+ 	u8 authmode = 0, match;
+-	u8 sec_ie[255], uncst_oui[4], bkup_ie[255];
++	u8 sec_ie[IW_CUSTOM_MAX], uncst_oui[4], bkup_ie[255];
+ 	u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01};
+ 	uint ielength, cnt, remove_cnt;
+ 	int iEntry;
+diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+index b6d137f505e1..111752f0bc27 100644
+--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
++++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+@@ -1574,7 +1574,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
+ 	if (pstat->aid > 0) {
+ 		DBG_871X("  old AID %d\n", pstat->aid);
+ 	} else {
+-		for (pstat->aid = 1; pstat->aid < NUM_STA; pstat->aid++)
++		for (pstat->aid = 1; pstat->aid <= NUM_STA; pstat->aid++)
+ 			if (pstapriv->sta_aid[pstat->aid - 1] == NULL)
+ 				break;
+ 
+diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
+index fb45770d47aa..fa909fa3c4cd 100644
+--- a/drivers/tty/serial/8250/8250_mtk.c
++++ b/drivers/tty/serial/8250/8250_mtk.c
+@@ -222,17 +222,17 @@ static int mtk8250_probe(struct platform_device *pdev)
+ 
+ 	platform_set_drvdata(pdev, data);
+ 
+-	pm_runtime_enable(&pdev->dev);
+-	if (!pm_runtime_enabled(&pdev->dev)) {
+-		err = mtk8250_runtime_resume(&pdev->dev);
+-		if (err)
+-			return err;
+-	}
++	err = mtk8250_runtime_resume(&pdev->dev);
++	if (err)
++		return err;
+ 
+ 	data->line = serial8250_register_8250_port(&uart);
+ 	if (data->line < 0)
+ 		return data->line;
+ 
++	pm_runtime_set_active(&pdev->dev);
++	pm_runtime_enable(&pdev->dev);
++
+ 	return 0;
+ }
+ 
+@@ -243,13 +243,11 @@ static int mtk8250_remove(struct platform_device *pdev)
+ 	pm_runtime_get_sync(&pdev->dev);
+ 
+ 	serial8250_unregister_port(data->line);
++	mtk8250_runtime_suspend(&pdev->dev);
+ 
+ 	pm_runtime_disable(&pdev->dev);
+ 	pm_runtime_put_noidle(&pdev->dev);
+ 
+-	if (!pm_runtime_status_suspended(&pdev->dev))
+-		mtk8250_runtime_suspend(&pdev->dev);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
+index c448225ef5ca..f2b0d8cee8ef 100644
+--- a/drivers/tty/serial/kgdboc.c
++++ b/drivers/tty/serial/kgdboc.c
+@@ -232,7 +232,7 @@ static void kgdboc_put_char(u8 chr)
+ 
+ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
+ {
+-	int len = strlen(kmessage);
++	size_t len = strlen(kmessage);
+ 
+ 	if (len >= MAX_CONFIG_LEN) {
+ 		printk(KERN_ERR "kgdboc: config string too long\n");
+@@ -254,7 +254,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
+ 
+ 	strcpy(config, kmessage);
+ 	/* Chop out \n char as a result of echo */
+-	if (config[len - 1] == '\n')
++	if (len && config[len - 1] == '\n')
+ 		config[len - 1] = '\0';
+ 
+ 	if (configured == 1)
+diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
+index 6b137194069f..c93a33701d32 100644
+--- a/drivers/tty/tty_port.c
++++ b/drivers/tty/tty_port.c
+@@ -639,7 +639,8 @@ void tty_port_close(struct tty_port *port, struct tty_struct *tty,
+ 	if (tty_port_close_start(port, tty, filp) == 0)
+ 		return;
+ 	tty_port_shutdown(port, tty);
+-	set_bit(TTY_IO_ERROR, &tty->flags);
++	if (!port->console)
++		set_bit(TTY_IO_ERROR, &tty->flags);
+ 	tty_port_close_end(port, tty);
+ 	tty_port_tty_set(port, NULL);
+ }
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 638dc6f66d70..a073cb5be013 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2231,7 +2231,7 @@ static int usb_enumerate_device_otg(struct usb_device *udev)
+ 		/* descriptor may appear anywhere in config */
+ 		err = __usb_get_extra_descriptor(udev->rawdescriptors[0],
+ 				le16_to_cpu(udev->config[0].desc.wTotalLength),
+-				USB_DT_OTG, (void **) &desc);
++				USB_DT_OTG, (void **) &desc, sizeof(*desc));
+ 		if (err || !(desc->bmAttributes & USB_OTG_HNP))
+ 			return 0;
+ 
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 808437c5ec49..cf378b1ed373 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -188,6 +188,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* Midiman M-Audio Keystation 88es */
+ 	{ USB_DEVICE(0x0763, 0x0192), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* SanDisk Ultra Fit and Ultra Flair */
++	{ USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM },
++	{ USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM },
++
+ 	/* M-Systems Flash Disk Pioneers */
+ 	{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
+index f8b50eaf6d1e..7a4e3da549fe 100644
+--- a/drivers/usb/core/usb.c
++++ b/drivers/usb/core/usb.c
+@@ -833,14 +833,14 @@ EXPORT_SYMBOL_GPL(usb_get_current_frame_number);
+  */
+ 
+ int __usb_get_extra_descriptor(char *buffer, unsigned size,
+-			       unsigned char type, void **ptr)
++			       unsigned char type, void **ptr, size_t minsize)
+ {
+ 	struct usb_descriptor_header *header;
+ 
+ 	while (size >= sizeof(struct usb_descriptor_header)) {
+ 		header = (struct usb_descriptor_header *)buffer;
+ 
+-		if (header->bLength < 2) {
++		if (header->bLength < 2 || header->bLength > size) {
+ 			printk(KERN_ERR
+ 				"%s: bogus descriptor, type %d length %d\n",
+ 				usbcore_name,
+@@ -849,7 +849,7 @@ int __usb_get_extra_descriptor(char *buffer, unsigned size,
+ 			return -1;
+ 		}
+ 
+-		if (header->bDescriptorType == type) {
++		if (header->bDescriptorType == type && header->bLength >= minsize) {
+ 			*ptr = header;
+ 			return 0;
+ 		}
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 17467545391b..52e6897fa35a 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -219,7 +219,6 @@ struct ffs_io_data {
+ 
+ 	struct mm_struct *mm;
+ 	struct work_struct work;
+-	struct work_struct cancellation_work;
+ 
+ 	struct usb_ep *ep;
+ 	struct usb_request *req;
+@@ -1074,31 +1073,22 @@ ffs_epfile_open(struct inode *inode, struct file *file)
+ 	return 0;
+ }
+ 
+-static void ffs_aio_cancel_worker(struct work_struct *work)
+-{
+-	struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
+-						   cancellation_work);
+-
+-	ENTER();
+-
+-	usb_ep_dequeue(io_data->ep, io_data->req);
+-}
+-
+ static int ffs_aio_cancel(struct kiocb *kiocb)
+ {
+ 	struct ffs_io_data *io_data = kiocb->private;
+-	struct ffs_data *ffs = io_data->ffs;
++	struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
+ 	int value;
+ 
+ 	ENTER();
+ 
+-	if (likely(io_data && io_data->ep && io_data->req)) {
+-		INIT_WORK(&io_data->cancellation_work, ffs_aio_cancel_worker);
+-		queue_work(ffs->io_completion_wq, &io_data->cancellation_work);
+-		value = -EINPROGRESS;
+-	} else {
++	spin_lock_irq(&epfile->ffs->eps_lock);
++
++	if (likely(io_data && io_data->ep && io_data->req))
++		value = usb_ep_dequeue(io_data->ep, io_data->req);
++	else
+ 		value = -EINVAL;
+-	}
++
++	spin_unlock_irq(&epfile->ffs->eps_lock);
+ 
+ 	return value;
+ }
+diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
+index da3b18038d23..216069c396a0 100644
+--- a/drivers/usb/host/hwa-hc.c
++++ b/drivers/usb/host/hwa-hc.c
+@@ -654,7 +654,7 @@ static int hwahc_security_create(struct hwahc *hwahc)
+ 	top = itr + itr_size;
+ 	result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index],
+ 			le16_to_cpu(usb_dev->actconfig->desc.wTotalLength),
+-			USB_DT_SECURITY, (void **) &secd);
++			USB_DT_SECURITY, (void **) &secd, sizeof(*secd));
+ 	if (result == -1) {
+ 		dev_warn(dev, "BUG? WUSB host has no security descriptors\n");
+ 		return 0;
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 4b07b6859b4c..0fbc549cc55c 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -144,6 +144,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		 pdev->device == 0x43bb))
+ 		xhci->quirks |= XHCI_SUSPEND_DELAY;
+ 
++	if (pdev->vendor == PCI_VENDOR_ID_AMD &&
++	    (pdev->device == 0x15e0 || pdev->device == 0x15e1))
++		xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
++
+ 	if (pdev->vendor == PCI_VENDOR_ID_AMD)
+ 		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ 
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index faf048682194..930eecd86429 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -918,6 +918,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
+ 	unsigned int		delay = XHCI_MAX_HALT_USEC;
+ 	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
+ 	u32			command;
++	u32			res;
+ 
+ 	if (!hcd->state)
+ 		return 0;
+@@ -969,11 +970,28 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
+ 	command = readl(&xhci->op_regs->command);
+ 	command |= CMD_CSS;
+ 	writel(command, &xhci->op_regs->command);
++	xhci->broken_suspend = 0;
+ 	if (xhci_handshake(&xhci->op_regs->status,
+ 				STS_SAVE, 0, 10 * 1000)) {
+-		xhci_warn(xhci, "WARN: xHC save state timeout\n");
+-		spin_unlock_irq(&xhci->lock);
+-		return -ETIMEDOUT;
++	/*
++	 * AMD SNPS xHC 3.0 occasionally does not clear the
++	 * SSS bit of USBSTS and when driver tries to poll
++	 * to see if the xHC clears BIT(8) which never happens
++	 * and driver assumes that controller is not responding
++	 * and times out. To workaround this, its good to check
++	 * if SRE and HCE bits are not set (as per xhci
++	 * Section 5.4.2) and bypass the timeout.
++	 */
++		res = readl(&xhci->op_regs->status);
++		if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
++		    (((res & STS_SRE) == 0) &&
++				((res & STS_HCE) == 0))) {
++			xhci->broken_suspend = 1;
++		} else {
++			xhci_warn(xhci, "WARN: xHC save state timeout\n");
++			spin_unlock_irq(&xhci->lock);
++			return -ETIMEDOUT;
++		}
+ 	}
+ 	spin_unlock_irq(&xhci->lock);
+ 
+@@ -1026,7 +1044,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ 	set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
+ 
+ 	spin_lock_irq(&xhci->lock);
+-	if (xhci->quirks & XHCI_RESET_ON_RESUME)
++	if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
+ 		hibernated = true;
+ 
+ 	if (!hibernated) {
+@@ -4363,6 +4381,14 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
+ {
+ 	unsigned long long timeout_ns;
+ 
++	/* Prevent U1 if service interval is shorter than U1 exit latency */
++	if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
++		if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
++			dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
++			return USB3_LPM_DISABLED;
++		}
++	}
++
+ 	if (xhci->quirks & XHCI_INTEL_HOST)
+ 		timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
+ 	else
+@@ -4419,6 +4445,14 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
+ {
+ 	unsigned long long timeout_ns;
+ 
++	/* Prevent U2 if service interval is shorter than U2 exit latency */
++	if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
++		if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
++			dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
++			return USB3_LPM_DISABLED;
++		}
++	}
++
+ 	if (xhci->quirks & XHCI_INTEL_HOST)
+ 		timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
+ 	else
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 74ba20556020..1ccff2d9dee9 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1839,6 +1839,7 @@ struct xhci_hcd {
+ #define XHCI_SUSPEND_DELAY	BIT_ULL(30)
+ #define XHCI_INTEL_USB_ROLE_SW	BIT_ULL(31)
+ #define XHCI_RESET_PLL_ON_DISCONNECT	BIT_ULL(34)
++#define XHCI_SNPS_BROKEN_SUSPEND    BIT_ULL(35)
+ 
+ 	unsigned int		num_active_eps;
+ 	unsigned int		limit_active_eps;
+@@ -1870,6 +1871,8 @@ struct xhci_hcd {
+ 
+ 	/* platform-specific data -- must come last */
+ 	unsigned long		priv[0] __aligned(sizeof(s64));
++	/* Broken Suspend flag for SNPS Suspend resume issue */
++	u8			broken_suspend;
+ };
+ 
+ /* Platform specific overrides to generic XHCI hc_driver ops */
+diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
+index 288fe3e69d52..03be7c75c5be 100644
+--- a/drivers/usb/misc/appledisplay.c
++++ b/drivers/usb/misc/appledisplay.c
+@@ -64,6 +64,7 @@ static const struct usb_device_id appledisplay_table[] = {
+ 	{ APPLEDISPLAY_DEVICE(0x921c) },
+ 	{ APPLEDISPLAY_DEVICE(0x921d) },
+ 	{ APPLEDISPLAY_DEVICE(0x9222) },
++	{ APPLEDISPLAY_DEVICE(0x9226) },
+ 	{ APPLEDISPLAY_DEVICE(0x9236) },
+ 
+ 	/* Terminating entry */
+diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
+index c9de9c41aa97..b044a0800805 100644
+--- a/drivers/vhost/vsock.c
++++ b/drivers/vhost/vsock.c
+@@ -15,6 +15,7 @@
+ #include <net/sock.h>
+ #include <linux/virtio_vsock.h>
+ #include <linux/vhost.h>
++#include <linux/hashtable.h>
+ 
+ #include <net/af_vsock.h>
+ #include "vhost.h"
+@@ -27,14 +28,14 @@ enum {
+ 
+ /* Used to track all the vhost_vsock instances on the system. */
+ static DEFINE_SPINLOCK(vhost_vsock_lock);
+-static LIST_HEAD(vhost_vsock_list);
++static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
+ 
+ struct vhost_vsock {
+ 	struct vhost_dev dev;
+ 	struct vhost_virtqueue vqs[2];
+ 
+-	/* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
+-	struct list_head list;
++	/* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */
++	struct hlist_node hash;
+ 
+ 	struct vhost_work send_pkt_work;
+ 	spinlock_t send_pkt_list_lock;
+@@ -50,11 +51,14 @@ static u32 vhost_transport_get_local_cid(void)
+ 	return VHOST_VSOCK_DEFAULT_HOST_CID;
+ }
+ 
+-static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
++/* Callers that dereference the return value must hold vhost_vsock_lock or the
++ * RCU read lock.
++ */
++static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
+ {
+ 	struct vhost_vsock *vsock;
+ 
+-	list_for_each_entry(vsock, &vhost_vsock_list, list) {
++	hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
+ 		u32 other_cid = vsock->guest_cid;
+ 
+ 		/* Skip instances that have no CID yet */
+@@ -69,17 +73,6 @@ static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
+ 	return NULL;
+ }
+ 
+-static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
+-{
+-	struct vhost_vsock *vsock;
+-
+-	spin_lock_bh(&vhost_vsock_lock);
+-	vsock = __vhost_vsock_get(guest_cid);
+-	spin_unlock_bh(&vhost_vsock_lock);
+-
+-	return vsock;
+-}
+-
+ static void
+ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
+ 			    struct vhost_virtqueue *vq)
+@@ -210,9 +203,12 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
+ 	struct vhost_vsock *vsock;
+ 	int len = pkt->len;
+ 
++	rcu_read_lock();
++
+ 	/* Find the vhost_vsock according to guest context id  */
+ 	vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
+ 	if (!vsock) {
++		rcu_read_unlock();
+ 		virtio_transport_free_pkt(pkt);
+ 		return -ENODEV;
+ 	}
+@@ -225,6 +221,8 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
+ 	spin_unlock_bh(&vsock->send_pkt_list_lock);
+ 
+ 	vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
++
++	rcu_read_unlock();
+ 	return len;
+ }
+ 
+@@ -234,12 +232,15 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
+ 	struct vhost_vsock *vsock;
+ 	struct virtio_vsock_pkt *pkt, *n;
+ 	int cnt = 0;
++	int ret = -ENODEV;
+ 	LIST_HEAD(freeme);
+ 
++	rcu_read_lock();
++
+ 	/* Find the vhost_vsock according to guest context id  */
+ 	vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
+ 	if (!vsock)
+-		return -ENODEV;
++		goto out;
+ 
+ 	spin_lock_bh(&vsock->send_pkt_list_lock);
+ 	list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
+@@ -265,7 +266,10 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
+ 			vhost_poll_queue(&tx_vq->poll);
+ 	}
+ 
+-	return 0;
++	ret = 0;
++out:
++	rcu_read_unlock();
++	return ret;
+ }
+ 
+ static struct virtio_vsock_pkt *
+@@ -531,10 +535,6 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
+ 	spin_lock_init(&vsock->send_pkt_list_lock);
+ 	INIT_LIST_HEAD(&vsock->send_pkt_list);
+ 	vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
+-
+-	spin_lock_bh(&vhost_vsock_lock);
+-	list_add_tail(&vsock->list, &vhost_vsock_list);
+-	spin_unlock_bh(&vhost_vsock_lock);
+ 	return 0;
+ 
+ out:
+@@ -575,9 +575,13 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
+ 	struct vhost_vsock *vsock = file->private_data;
+ 
+ 	spin_lock_bh(&vhost_vsock_lock);
+-	list_del(&vsock->list);
++	if (vsock->guest_cid)
++		hash_del_rcu(&vsock->hash);
+ 	spin_unlock_bh(&vhost_vsock_lock);
+ 
++	/* Wait for other CPUs to finish using vsock */
++	synchronize_rcu();
++
+ 	/* Iterating over all connections for all CIDs to find orphans is
+ 	 * inefficient.  Room for improvement here. */
+ 	vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
+@@ -618,12 +622,17 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
+ 
+ 	/* Refuse if CID is already in use */
+ 	spin_lock_bh(&vhost_vsock_lock);
+-	other = __vhost_vsock_get(guest_cid);
++	other = vhost_vsock_get(guest_cid);
+ 	if (other && other != vsock) {
+ 		spin_unlock_bh(&vhost_vsock_lock);
+ 		return -EADDRINUSE;
+ 	}
++
++	if (vsock->guest_cid)
++		hash_del_rcu(&vsock->hash);
++
+ 	vsock->guest_cid = guest_cid;
++	hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid);
+ 	spin_unlock_bh(&vhost_vsock_lock);
+ 
+ 	return 0;
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index 925844343038..ca98afda3cdb 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -174,7 +174,7 @@ cifs_bp_rename_retry:
+ 
+ 		cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath);
+ 		memcpy(full_path+dfsplen+1, cifs_sb->prepath, pplen-1);
+-		full_path[dfsplen] = '\\';
++		full_path[dfsplen] = dirsep;
+ 		for (i = 0; i < pplen-1; i++)
+ 			if (full_path[dfsplen+1+i] == '/')
+ 				full_path[dfsplen+1+i] = CIFS_DIR_SEP(cifs_sb);
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 8f6e7c3a10f8..c68b319b07aa 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -468,6 +468,9 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
+ 	struct fscrypt_ctx *ctx = NULL;
+ 	struct bio *bio;
+ 
++	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
++		return ERR_PTR(-EFAULT);
++
+ 	if (f2fs_encrypted_file(inode)) {
+ 		ctx = fscrypt_get_ctx(inode, GFP_NOFS);
+ 		if (IS_ERR(ctx))
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index 9a40724dbaa6..50818b519df8 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -62,14 +62,16 @@ static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
+ 	}
+ }
+ 
+-static bool __written_first_block(struct f2fs_sb_info *sbi,
++static int __written_first_block(struct f2fs_sb_info *sbi,
+ 					struct f2fs_inode *ri)
+ {
+ 	block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
+ 
+-	if (is_valid_data_blkaddr(sbi, addr))
+-		return true;
+-	return false;
++	if (!__is_valid_data_blkaddr(addr))
++		return 1;
++	if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC))
++		return -EFAULT;
++	return 0;
+ }
+ 
+ static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
+@@ -253,6 +255,7 @@ static int do_read_inode(struct inode *inode)
+ 	struct page *node_page;
+ 	struct f2fs_inode *ri;
+ 	projid_t i_projid;
++	int err;
+ 
+ 	/* Check if ino is within scope */
+ 	if (check_nid_range(sbi, inode->i_ino))
+@@ -307,7 +310,12 @@ static int do_read_inode(struct inode *inode)
+ 	/* get rdev by using inline_info */
+ 	__get_inode_rdev(inode, ri);
+ 
+-	if (__written_first_block(sbi, ri))
++	err = __written_first_block(sbi, ri);
++	if (err < 0) {
++		f2fs_put_page(node_page, 1);
++		return err;
++	}
++	if (!err)
+ 		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+ 
+ 	if (!need_inode_block_update(sbi, inode->i_ino))
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index b0fa83a60754..13612a848378 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -1365,12 +1365,7 @@ static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
+ 				task))
+ 		return;
+ 
+-	if (ff_layout_read_prepare_common(task, hdr))
+-		return;
+-
+-	if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
+-			hdr->args.lock_context, FMODE_READ) == -EIO)
+-		rpc_exit(task, -EIO); /* lost lock, terminate I/O */
++	ff_layout_read_prepare_common(task, hdr);
+ }
+ 
+ static void ff_layout_read_call_done(struct rpc_task *task, void *data)
+@@ -1539,12 +1534,7 @@ static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
+ 				task))
+ 		return;
+ 
+-	if (ff_layout_write_prepare_common(task, hdr))
+-		return;
+-
+-	if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
+-			hdr->args.lock_context, FMODE_WRITE) == -EIO)
+-		rpc_exit(task, -EIO); /* lost lock, terminate I/O */
++	ff_layout_write_prepare_common(task, hdr);
+ }
+ 
+ static void ff_layout_write_call_done(struct rpc_task *task, void *data)
+@@ -1734,6 +1724,10 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
+ 	fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
+ 	if (fh)
+ 		hdr->args.fh = fh;
++
++	if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
++		goto out_failed;
++
+ 	/*
+ 	 * Note that if we ever decide to split across DSes,
+ 	 * then we may need to handle dense-like offsets.
+@@ -1796,6 +1790,9 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
+ 	if (fh)
+ 		hdr->args.fh = fh;
+ 
++	if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
++		goto out_failed;
++
+ 	/*
+ 	 * Note that if we ever decide to split across DSes,
+ 	 * then we may need to handle dense-like offsets.
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h
+index 679cb087ef3f..d6515f1584f3 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.h
++++ b/fs/nfs/flexfilelayout/flexfilelayout.h
+@@ -214,6 +214,10 @@ unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
+ 		unsigned int maxnum);
+ struct nfs_fh *
+ nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx);
++int
++nfs4_ff_layout_select_ds_stateid(struct pnfs_layout_segment *lseg,
++				u32 mirror_idx,
++				nfs4_stateid *stateid);
+ 
+ struct nfs4_pnfs_ds *
+ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
+diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+index d62279d3fc5d..9f69e83810ca 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
++++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+@@ -369,6 +369,25 @@ out:
+ 	return fh;
+ }
+ 
++int
++nfs4_ff_layout_select_ds_stateid(struct pnfs_layout_segment *lseg,
++				u32 mirror_idx,
++				nfs4_stateid *stateid)
++{
++	struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, mirror_idx);
++
++	if (!ff_layout_mirror_valid(lseg, mirror, false)) {
++		pr_err_ratelimited("NFS: %s: No data server for mirror offset index %d\n",
++			__func__, mirror_idx);
++		goto out;
++	}
++
++	nfs4_stateid_copy(stateid, &mirror->stateid);
++	return 1;
++out:
++	return 0;
++}
++
+ /**
+  * nfs4_ff_layout_prepare_ds - prepare a DS connection for an RPC call
+  * @lseg: the layout segment we're operating on
+diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
+index 0c51f753652d..d1324d3c72b0 100644
+--- a/include/linux/hyperv.h
++++ b/include/linux/hyperv.h
+@@ -869,6 +869,13 @@ struct vmbus_channel {
+ 
+ 	bool probe_done;
+ 
++	/*
++	 * We must offload the handling of the primary/sub channels
++	 * from the single-threaded vmbus_connection.work_queue to
++	 * two different workqueue, otherwise we can block
++	 * vmbus_connection.work_queue and hang: see vmbus_process_offer().
++	 */
++	struct work_struct add_channel_work;
+ };
+ 
+ static inline bool is_hvsock_channel(const struct vmbus_channel *c)
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 4192a1755ccb..8c7ba40cf021 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -407,11 +407,11 @@ struct usb_host_bos {
+ };
+ 
+ int __usb_get_extra_descriptor(char *buffer, unsigned size,
+-	unsigned char type, void **ptr);
++	unsigned char type, void **ptr, size_t min);
+ #define usb_get_extra_descriptor(ifpoint, type, ptr) \
+ 				__usb_get_extra_descriptor((ifpoint)->extra, \
+ 				(ifpoint)->extralen, \
+-				type, (void **)ptr)
++				type, (void **)ptr, sizeof(**(ptr)))
+ 
+ /* ----------------------------------------------------------------------- */
+ 
+diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h
+index c704357775fc..2af7bb3ee57d 100644
+--- a/include/sound/pcm_params.h
++++ b/include/sound/pcm_params.h
+@@ -247,11 +247,13 @@ static inline int snd_interval_empty(const struct snd_interval *i)
+ static inline int snd_interval_single(const struct snd_interval *i)
+ {
+ 	return (i->min == i->max || 
+-		(i->min + 1 == i->max && i->openmax));
++		(i->min + 1 == i->max && (i->openmin || i->openmax)));
+ }
+ 
+ static inline int snd_interval_value(const struct snd_interval *i)
+ {
++	if (i->openmin && !i->openmax)
++		return i->max;
+ 	return i->min;
+ }
+ 
+diff --git a/lib/swiotlb.c b/lib/swiotlb.c
+index 20df2fd9b150..b4c768de3344 100644
+--- a/lib/swiotlb.c
++++ b/lib/swiotlb.c
+@@ -17,6 +17,8 @@
+  * 08/12/11 beckyb	Add highmem support
+  */
+ 
++#define pr_fmt(fmt) "software IO TLB: " fmt
++
+ #include <linux/cache.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/mm.h>
+@@ -177,20 +179,16 @@ static bool no_iotlb_memory;
+ void swiotlb_print_info(void)
+ {
+ 	unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
+-	unsigned char *vstart, *vend;
+ 
+ 	if (no_iotlb_memory) {
+-		pr_warn("software IO TLB: No low mem\n");
++		pr_warn("No low mem\n");
+ 		return;
+ 	}
+ 
+-	vstart = phys_to_virt(io_tlb_start);
+-	vend = phys_to_virt(io_tlb_end);
+-
+-	printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
++	pr_info("mapped [mem %#010llx-%#010llx] (%luMB)\n",
+ 	       (unsigned long long)io_tlb_start,
+ 	       (unsigned long long)io_tlb_end,
+-	       bytes >> 20, vstart, vend - 1);
++	       bytes >> 20);
+ }
+ 
+ /*
+@@ -290,7 +288,7 @@ swiotlb_init(int verbose)
+ 	if (io_tlb_start)
+ 		memblock_free_early(io_tlb_start,
+ 				    PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
+-	pr_warn("Cannot allocate SWIOTLB buffer");
++	pr_warn("Cannot allocate buffer");
+ 	no_iotlb_memory = true;
+ }
+ 
+@@ -332,8 +330,8 @@ swiotlb_late_init_with_default_size(size_t default_size)
+ 		return -ENOMEM;
+ 	}
+ 	if (order != get_order(bytes)) {
+-		printk(KERN_WARNING "Warning: only able to allocate %ld MB "
+-		       "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
++		pr_warn("only able to allocate %ld MB\n",
++			(PAGE_SIZE << order) >> 20);
+ 		io_tlb_nslabs = SLABS_PER_PAGE << order;
+ 	}
+ 	rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
+@@ -770,7 +768,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
+ 
+ err_warn:
+ 	if (warn && printk_ratelimit()) {
+-		pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n",
++		pr_warn("coherent allocation failed for device %s size=%zu\n",
+ 			dev_name(hwdev), size);
+ 		dump_stack();
+ 	}
+diff --git a/lib/test_firmware.c b/lib/test_firmware.c
+index e7008688769b..71d371f97138 100644
+--- a/lib/test_firmware.c
++++ b/lib/test_firmware.c
+@@ -838,6 +838,7 @@ static ssize_t read_firmware_show(struct device *dev,
+ 	if (req->fw->size > PAGE_SIZE) {
+ 		pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
+ 		rc = -EINVAL;
++		goto out;
+ 	}
+ 	memcpy(buf, req->fw->data, req->fw->size);
+ 
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 2074f424dabf..6be91a1a00d9 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -3862,8 +3862,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+ 	enum compact_result compact_result;
+ 	int compaction_retries;
+ 	int no_progress_loops;
+-	unsigned long alloc_start = jiffies;
+-	unsigned int stall_timeout = 10 * HZ;
+ 	unsigned int cpuset_mems_cookie;
+ 	int reserve_flags;
+ 
+@@ -3983,14 +3981,6 @@ retry:
+ 	if (!can_direct_reclaim)
+ 		goto nopage;
+ 
+-	/* Make sure we know about allocations which stall for too long */
+-	if (time_after(jiffies, alloc_start + stall_timeout)) {
+-		warn_alloc(gfp_mask & ~__GFP_NOWARN, ac->nodemask,
+-			"page allocation stalls for %ums, order:%u",
+-			jiffies_to_msecs(jiffies-alloc_start), order);
+-		stall_timeout += 10 * HZ;
+-	}
+-
+ 	/* Avoid recursion of direct reclaim */
+ 	if (current->flags & PF_MEMALLOC)
+ 		goto nopage;
+diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
+index e92dfedccc16..fbc132f4670e 100644
+--- a/net/batman-adv/bat_v_elp.c
++++ b/net/batman-adv/bat_v_elp.c
+@@ -338,19 +338,21 @@ out:
+  */
+ int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface)
+ {
++	static const size_t tvlv_padding = sizeof(__be32);
+ 	struct batadv_elp_packet *elp_packet;
+ 	unsigned char *elp_buff;
+ 	u32 random_seqno;
+ 	size_t size;
+ 	int res = -ENOMEM;
+ 
+-	size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN;
++	size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN + tvlv_padding;
+ 	hard_iface->bat_v.elp_skb = dev_alloc_skb(size);
+ 	if (!hard_iface->bat_v.elp_skb)
+ 		goto out;
+ 
+ 	skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN);
+-	elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN);
++	elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb,
++				BATADV_ELP_HLEN + tvlv_padding);
+ 	elp_packet = (struct batadv_elp_packet *)elp_buff;
+ 
+ 	elp_packet->packet_type = BATADV_ELP;
+diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
+index b6abd19ab23e..c6d37d22bd12 100644
+--- a/net/batman-adv/fragmentation.c
++++ b/net/batman-adv/fragmentation.c
+@@ -274,7 +274,7 @@ batadv_frag_merge_packets(struct hlist_head *chain)
+ 	kfree(entry);
+ 
+ 	packet = (struct batadv_frag_packet *)skb_out->data;
+-	size = ntohs(packet->total_size);
++	size = ntohs(packet->total_size) + hdr_size;
+ 
+ 	/* Make room for the rest of the fragments. */
+ 	if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index e4912858b72c..222c063244f5 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -1032,6 +1032,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
+ 	if (local->open_count == 0)
+ 		ieee80211_clear_tx_pending(local);
+ 
++	sdata->vif.bss_conf.beacon_int = 0;
++
+ 	/*
+ 	 * If the interface goes down while suspended, presumably because
+ 	 * the device was unplugged and that happens before our resume,
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index dddd498e1338..9e19ddbcb06e 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -1254,6 +1254,7 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
+ 		return RX_CONTINUE;
+ 
+ 	if (ieee80211_is_ctl(hdr->frame_control) ||
++	    ieee80211_is_nullfunc(hdr->frame_control) ||
+ 	    ieee80211_is_qos_nullfunc(hdr->frame_control) ||
+ 	    is_multicast_ether_addr(hdr->addr1))
+ 		return RX_CONTINUE;
+diff --git a/net/mac80211/status.c b/net/mac80211/status.c
+index bdf131ed5ce8..35912270087c 100644
+--- a/net/mac80211/status.c
++++ b/net/mac80211/status.c
+@@ -953,6 +953,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
+ 			/* Track when last TDLS packet was ACKed */
+ 			if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
+ 				sta->status_stats.last_tdls_pkt_time = jiffies;
++		} else if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
++			return;
+ 		} else {
+ 			ieee80211_lost_packet(sta, info);
+ 		}
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index a17a56032a21..6b9bf9c027a2 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -435,8 +435,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
+ 	if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL))
+ 		info->hw_queue = tx->sdata->vif.cab_queue;
+ 
+-	/* no stations in PS mode */
+-	if (!atomic_read(&ps->num_sta_ps))
++	/* no stations in PS mode and no buffered packets */
++	if (!atomic_read(&ps->num_sta_ps) && skb_queue_empty(&ps->bc_buf))
+ 		return TX_CONTINUE;
+ 
+ 	info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
+diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
+index 9463af4b32e8..1281b967dbf9 100644
+--- a/net/sunrpc/auth_gss/auth_gss.c
++++ b/net/sunrpc/auth_gss/auth_gss.c
+@@ -1736,6 +1736,7 @@ priv_release_snd_buf(struct rpc_rqst *rqstp)
+ 	for (i=0; i < rqstp->rq_enc_pages_num; i++)
+ 		__free_page(rqstp->rq_enc_pages[i]);
+ 	kfree(rqstp->rq_enc_pages);
++	rqstp->rq_release_snd_buf = NULL;
+ }
+ 
+ static int
+@@ -1744,6 +1745,9 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
+ 	struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
+ 	int first, last, i;
+ 
++	if (rqstp->rq_release_snd_buf)
++		rqstp->rq_release_snd_buf(rqstp);
++
+ 	if (snd_buf->page_len == 0) {
+ 		rqstp->rq_enc_pages_num = 0;
+ 		return 0;
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index ab3bf36786b6..966ac384c3f4 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -36,6 +36,7 @@
+ #include <sound/timer.h>
+ #include <sound/minors.h>
+ #include <linux/uio.h>
++#include <linux/delay.h>
+ 
+ #include "pcm_local.h"
+ 
+@@ -91,12 +92,12 @@ static DECLARE_RWSEM(snd_pcm_link_rwsem);
+  * and this may lead to a deadlock when the code path takes read sem
+  * twice (e.g. one in snd_pcm_action_nonatomic() and another in
+  * snd_pcm_stream_lock()).  As a (suboptimal) workaround, let writer to
+- * spin until it gets the lock.
++ * sleep until all the readers are completed without blocking by writer.
+  */
+-static inline void down_write_nonblock(struct rw_semaphore *lock)
++static inline void down_write_nonfifo(struct rw_semaphore *lock)
+ {
+ 	while (!down_write_trylock(lock))
+-		cond_resched();
++		msleep(1);
+ }
+ 
+ /**
+@@ -1935,7 +1936,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
+ 		res = -ENOMEM;
+ 		goto _nolock;
+ 	}
+-	down_write_nonblock(&snd_pcm_link_rwsem);
++	down_write_nonfifo(&snd_pcm_link_rwsem);
+ 	write_lock_irq(&snd_pcm_link_rwlock);
+ 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
+ 	    substream->runtime->status->state != substream1->runtime->status->state ||
+@@ -1982,7 +1983,7 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream)
+ 	struct snd_pcm_substream *s;
+ 	int res = 0;
+ 
+-	down_write_nonblock(&snd_pcm_link_rwsem);
++	down_write_nonfifo(&snd_pcm_link_rwsem);
+ 	write_lock_irq(&snd_pcm_link_rwlock);
+ 	if (!snd_pcm_stream_linked(substream)) {
+ 		res = -EALREADY;
+@@ -2337,7 +2338,8 @@ int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
+ 
+ static void pcm_release_private(struct snd_pcm_substream *substream)
+ {
+-	snd_pcm_unlink(substream);
++	if (snd_pcm_stream_linked(substream))
++		snd_pcm_unlink(substream);
+ }
+ 
+ void snd_pcm_release_substream(struct snd_pcm_substream *substream)
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 4e38905bc47d..d8e80b6f5a6b 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2513,6 +2513,10 @@ static const struct pci_device_id azx_ids[] = {
+ 	/* AMD Hudson */
+ 	{ PCI_DEVICE(0x1022, 0x780d),
+ 	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
++	/* AMD Stoney */
++	{ PCI_DEVICE(0x1022, 0x157a),
++	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
++			 AZX_DCAPS_PM_RUNTIME },
+ 	/* AMD Raven */
+ 	{ PCI_DEVICE(0x1022, 0x15e3),
+ 	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 66b0a124beae..f6136f041a81 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4863,9 +4863,18 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
+ 		{ 0x19, 0x21a11010 }, /* dock mic */
+ 		{ }
+ 	};
++	/* Assure the speaker pin to be coupled with DAC NID 0x03; otherwise
++	 * the speaker output becomes too low by some reason on Thinkpads with
++	 * ALC298 codec
++	 */
++	static hda_nid_t preferred_pairs[] = {
++		0x14, 0x03, 0x17, 0x02, 0x21, 0x02,
++		0
++	};
+ 	struct alc_spec *spec = codec->spec;
+ 
+ 	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		spec->gen.preferred_dacs = preferred_pairs;
+ 		spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
+ 		snd_hda_apply_pincfgs(codec, pincfgs);
+ 	} else if (action == HDA_FIXUP_ACT_INIT) {
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 23d1d23aefec..4169c71f8a32 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -644,9 +644,12 @@ static int usb_audio_probe(struct usb_interface *intf,
+ 
+  __error:
+ 	if (chip) {
++		/* chip->active is inside the chip->card object,
++		 * decrement before memory is possibly returned.
++		 */
++		atomic_dec(&chip->active);
+ 		if (!chip->num_interfaces)
+ 			snd_card_free(chip->card);
+-		atomic_dec(&chip->active);
+ 	}
+ 	mutex_unlock(&register_mutex);
+ 	return err;


             reply	other threads:[~2018-12-13 11:38 UTC|newest]

Thread overview: 448+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-12-13 11:38 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2023-08-30 15:01 [gentoo-commits] proj/linux-patches:4.14 commit in: / Mike Pagano
2023-08-16 16:58 Mike Pagano
2023-08-11 11:58 Mike Pagano
2023-08-08 18:44 Mike Pagano
2023-06-28 10:30 Mike Pagano
2023-06-21 14:56 Alice Ferrazzi
2023-06-14 10:22 Mike Pagano
2023-06-09 11:33 Mike Pagano
2023-05-30 12:58 Mike Pagano
2023-05-17 11:01 Mike Pagano
2023-04-26  9:35 Alice Ferrazzi
2023-04-20 11:18 Alice Ferrazzi
2023-04-05 10:02 Alice Ferrazzi
2023-03-22 14:16 Alice Ferrazzi
2023-03-17 10:47 Mike Pagano
2023-03-13 11:36 Alice Ferrazzi
2023-03-11 16:02 Mike Pagano
2023-02-25 11:40 Mike Pagano
2023-02-24  3:13 Alice Ferrazzi
2023-02-22 14:48 Alice Ferrazzi
2023-02-22 14:46 Alice Ferrazzi
2023-02-06 12:50 Mike Pagano
2023-01-24  7:18 Alice Ferrazzi
2023-01-18 11:11 Mike Pagano
2022-12-14 12:24 Mike Pagano
2022-12-08 12:39 Alice Ferrazzi
2022-11-25 17:03 Mike Pagano
2022-11-10 15:14 Mike Pagano
2022-11-03 15:09 Mike Pagano
2022-11-01 19:49 Mike Pagano
2022-10-26 11:42 Mike Pagano
2022-09-28  9:18 Mike Pagano
2022-09-20 12:04 Mike Pagano
2022-09-15 11:10 Mike Pagano
2022-09-05 12:06 Mike Pagano
2022-08-25 10:36 Mike Pagano
2022-07-29 15:27 Mike Pagano
2022-07-21 20:13 Mike Pagano
2022-07-12 16:02 Mike Pagano
2022-07-07 16:19 Mike Pagano
2022-07-02 16:06 Mike Pagano
2022-06-25 10:23 Mike Pagano
2022-06-16 11:41 Mike Pagano
2022-06-14 15:48 Mike Pagano
2022-06-06 11:06 Mike Pagano
2022-05-27 12:28 Mike Pagano
2022-05-25 11:56 Mike Pagano
2022-05-18  9:51 Mike Pagano
2022-05-15 22:13 Mike Pagano
2022-05-12 11:31 Mike Pagano
2022-04-27 11:38 Mike Pagano
2022-04-20 12:10 Mike Pagano
2022-04-02 16:32 Mike Pagano
2022-03-28 11:00 Mike Pagano
2022-03-23 11:58 Mike Pagano
2022-03-16 13:21 Mike Pagano
2022-03-11 10:57 Mike Pagano
2022-03-08 18:28 Mike Pagano
2022-03-02 13:08 Mike Pagano
2022-02-26 23:30 Mike Pagano
2022-02-23 12:40 Mike Pagano
2022-02-16 12:49 Mike Pagano
2022-02-11 12:48 Mike Pagano
2022-02-08 17:57 Mike Pagano
2022-01-29 17:45 Mike Pagano
2022-01-27 11:40 Mike Pagano
2022-01-11 13:16 Mike Pagano
2022-01-05 12:56 Mike Pagano
2021-12-29 13:12 Mike Pagano
2021-12-22 14:07 Mike Pagano
2021-12-14 10:36 Mike Pagano
2021-12-08 12:56 Mike Pagano
2021-11-26 12:00 Mike Pagano
2021-11-12 13:47 Mike Pagano
2021-11-02 19:36 Mike Pagano
2021-10-27 11:59 Mike Pagano
2021-10-20 13:33 Mike Pagano
2021-10-17 13:13 Mike Pagano
2021-10-09 21:34 Mike Pagano
2021-10-06 14:04 Mike Pagano
2021-09-26 14:14 Mike Pagano
2021-09-22 11:41 Mike Pagano
2021-09-20 22:05 Mike Pagano
2021-09-03 11:24 Mike Pagano
2021-08-26 14:05 Mike Pagano
2021-08-25 23:04 Mike Pagano
2021-08-15 20:08 Mike Pagano
2021-08-08 13:40 Mike Pagano
2021-08-04 11:55 Mike Pagano
2021-08-03 12:45 Mike Pagano
2021-07-28 12:38 Mike Pagano
2021-07-20 15:32 Alice Ferrazzi
2021-07-11 14:46 Mike Pagano
2021-06-30 14:26 Mike Pagano
2021-06-16 12:21 Mike Pagano
2021-06-10 11:16 Mike Pagano
2021-06-03 10:35 Alice Ferrazzi
2021-05-26 12:04 Mike Pagano
2021-05-22 10:03 Mike Pagano
2021-04-28 18:22 Mike Pagano
2021-04-28 11:31 Alice Ferrazzi
2021-04-16 11:17 Alice Ferrazzi
2021-04-10 13:23 Mike Pagano
2021-04-07 12:17 Mike Pagano
2021-03-30 14:15 Mike Pagano
2021-03-24 12:07 Mike Pagano
2021-03-17 16:18 Mike Pagano
2021-03-11 14:04 Mike Pagano
2021-03-07 15:14 Mike Pagano
2021-03-03 18:15 Alice Ferrazzi
2021-02-23 13:51 Alice Ferrazzi
2021-02-10 10:07 Alice Ferrazzi
2021-02-07 14:17 Alice Ferrazzi
2021-02-03 23:38 Mike Pagano
2021-01-30 12:58 Alice Ferrazzi
2021-01-23 16:35 Mike Pagano
2021-01-21 11:25 Alice Ferrazzi
2021-01-17 16:21 Mike Pagano
2021-01-12 20:07 Mike Pagano
2021-01-09 12:56 Mike Pagano
2020-12-29 14:20 Mike Pagano
2020-12-11 12:55 Mike Pagano
2020-12-08 12:05 Mike Pagano
2020-12-02 12:48 Mike Pagano
2020-11-24 13:44 Mike Pagano
2020-11-22 19:17 Mike Pagano
2020-11-18 19:24 Mike Pagano
2020-11-11 15:36 Mike Pagano
2020-11-10 13:55 Mike Pagano
2020-11-05 12:34 Mike Pagano
2020-10-29 11:17 Mike Pagano
2020-10-17 10:17 Mike Pagano
2020-10-14 20:35 Mike Pagano
2020-10-01 12:42 Mike Pagano
2020-10-01 12:34 Mike Pagano
2020-09-24 16:00 Mike Pagano
2020-09-23 12:05 Mike Pagano
2020-09-23 12:03 Mike Pagano
2020-09-12 17:50 Mike Pagano
2020-09-09 17:58 Mike Pagano
2020-09-03 11:36 Mike Pagano
2020-08-26 11:14 Mike Pagano
2020-08-21 10:51 Alice Ferrazzi
2020-08-07 19:14 Mike Pagano
2020-08-05 14:57 Thomas Deutschmann
2020-07-31 17:56 Mike Pagano
2020-07-29 12:30 Mike Pagano
2020-07-22 13:47 Mike Pagano
2020-07-09 12:10 Mike Pagano
2020-07-01 12:13 Mike Pagano
2020-06-29 17:43 Mike Pagano
2020-06-25 15:08 Mike Pagano
2020-06-22 14:45 Mike Pagano
2020-06-11 11:32 Mike Pagano
2020-06-03 11:39 Mike Pagano
2020-05-27 15:25 Mike Pagano
2020-05-20 11:26 Mike Pagano
2020-05-13 12:46 Mike Pagano
2020-05-11 22:51 Mike Pagano
2020-05-05 17:41 Mike Pagano
2020-05-02 19:23 Mike Pagano
2020-04-24 12:02 Mike Pagano
2020-04-15 17:38 Mike Pagano
2020-04-13 11:16 Mike Pagano
2020-04-02 15:23 Mike Pagano
2020-03-20 11:56 Mike Pagano
2020-03-11 18:19 Mike Pagano
2020-02-28 16:34 Mike Pagano
2020-02-14 23:46 Mike Pagano
2020-02-05 17:22 Mike Pagano
2020-02-05 14:49 Mike Pagano
2020-01-29 16:14 Mike Pagano
2020-01-27 14:24 Mike Pagano
2020-01-23 11:05 Mike Pagano
2020-01-17 19:53 Mike Pagano
2020-01-14 22:28 Mike Pagano
2020-01-12 14:53 Mike Pagano
2020-01-09 11:14 Mike Pagano
2020-01-04 16:49 Mike Pagano
2019-12-31 13:56 Mike Pagano
2019-12-21 15:00 Mike Pagano
2019-12-17 21:55 Mike Pagano
2019-12-05 15:20 Alice Ferrazzi
2019-12-01 14:08 Thomas Deutschmann
2019-11-24 15:42 Mike Pagano
2019-11-20 18:18 Mike Pagano
2019-11-12 20:59 Mike Pagano
2019-11-10 16:19 Mike Pagano
2019-11-06 14:25 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 13:59 Mike Pagano
2019-10-29 11:33 Mike Pagano
2019-10-17 22:26 Mike Pagano
2019-10-11 17:02 Mike Pagano
2019-10-07 17:39 Mike Pagano
2019-10-05 11:40 Mike Pagano
2019-09-21 16:30 Mike Pagano
2019-09-19 23:28 Mike Pagano
2019-09-19 10:03 Mike Pagano
2019-09-16 12:23 Mike Pagano
2019-09-10 11:11 Mike Pagano
2019-09-06 17:19 Mike Pagano
2019-08-29 14:13 Mike Pagano
2019-08-25 17:36 Mike Pagano
2019-08-23 22:15 Mike Pagano
2019-08-16 12:14 Mike Pagano
2019-08-09 17:34 Mike Pagano
2019-08-06 19:16 Mike Pagano
2019-08-04 16:06 Mike Pagano
2019-07-31 10:23 Mike Pagano
2019-07-21 14:40 Mike Pagano
2019-07-10 11:04 Mike Pagano
2019-07-03 13:02 Mike Pagano
2019-06-27 11:09 Mike Pagano
2019-06-25 10:52 Mike Pagano
2019-06-22 19:09 Mike Pagano
2019-06-19 17:19 Thomas Deutschmann
2019-06-17 19:20 Mike Pagano
2019-06-15 15:05 Mike Pagano
2019-06-11 17:51 Mike Pagano
2019-06-11 12:40 Mike Pagano
2019-06-09 16:17 Mike Pagano
2019-05-31 16:41 Mike Pagano
2019-05-26 17:11 Mike Pagano
2019-05-21 17:17 Mike Pagano
2019-05-16 23:02 Mike Pagano
2019-05-14 20:55 Mike Pagano
2019-05-10 19:39 Mike Pagano
2019-05-08 10:05 Mike Pagano
2019-05-04 18:34 Mike Pagano
2019-05-04 18:27 Mike Pagano
2019-05-02 10:14 Mike Pagano
2019-04-27 17:35 Mike Pagano
2019-04-24 22:58 Mike Pagano
2019-04-20 11:08 Mike Pagano
2019-04-19 19:53 Mike Pagano
2019-04-05 21:45 Mike Pagano
2019-04-03 10:58 Mike Pagano
2019-03-27 10:21 Mike Pagano
2019-03-23 14:30 Mike Pagano
2019-03-23 14:19 Mike Pagano
2019-03-19 16:57 Mike Pagano
2019-03-13 22:07 Mike Pagano
2019-03-06 19:09 Mike Pagano
2019-03-05 18:03 Mike Pagano
2019-02-27 11:22 Mike Pagano
2019-02-23 14:43 Mike Pagano
2019-02-20 11:17 Mike Pagano
2019-02-16  0:44 Mike Pagano
2019-02-15 12:51 Mike Pagano
2019-02-12 20:52 Mike Pagano
2019-02-06 17:06 Mike Pagano
2019-01-31 11:24 Mike Pagano
2019-01-26 15:06 Mike Pagano
2019-01-23 11:30 Mike Pagano
2019-01-16 23:30 Mike Pagano
2019-01-13 19:27 Mike Pagano
2019-01-09 17:53 Mike Pagano
2018-12-29 22:47 Mike Pagano
2018-12-29 18:54 Mike Pagano
2018-12-21 14:46 Mike Pagano
2018-12-17 11:40 Mike Pagano
2018-12-08 13:22 Mike Pagano
2018-12-05 19:42 Mike Pagano
2018-12-01 17:26 Mike Pagano
2018-12-01 15:06 Mike Pagano
2018-11-27 16:17 Mike Pagano
2018-11-23 12:44 Mike Pagano
2018-11-21 12:27 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 14:00 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:49 Mike Pagano
2018-11-14 13:33 Mike Pagano
2018-11-13 21:19 Mike Pagano
2018-11-11  1:19 Mike Pagano
2018-11-10 21:31 Mike Pagano
2018-11-04 17:31 Alice Ferrazzi
2018-10-20 12:41 Mike Pagano
2018-10-18 10:26 Mike Pagano
2018-10-13 16:33 Mike Pagano
2018-10-10 11:18 Mike Pagano
2018-10-04 10:42 Mike Pagano
2018-09-29 13:35 Mike Pagano
2018-09-26 10:41 Mike Pagano
2018-09-19 22:40 Mike Pagano
2018-09-15 10:12 Mike Pagano
2018-09-09 23:28 Mike Pagano
2018-09-05 15:28 Mike Pagano
2018-08-24 11:44 Mike Pagano
2018-08-22 10:01 Alice Ferrazzi
2018-08-18 18:12 Mike Pagano
2018-08-17 19:37 Mike Pagano
2018-08-17 19:26 Mike Pagano
2018-08-16 11:49 Mike Pagano
2018-08-15 16:48 Mike Pagano
2018-08-09 10:54 Mike Pagano
2018-08-07 18:11 Mike Pagano
2018-08-03 12:27 Mike Pagano
2018-07-28 10:39 Mike Pagano
2018-07-25 10:27 Mike Pagano
2018-07-22 15:13 Mike Pagano
2018-07-17 10:27 Mike Pagano
2018-07-12 16:13 Alice Ferrazzi
2018-07-09 15:07 Alice Ferrazzi
2018-07-03 13:18 Mike Pagano
2018-06-26 16:32 Alice Ferrazzi
2018-06-20 19:42 Mike Pagano
2018-06-16 15:43 Mike Pagano
2018-06-11 21:46 Mike Pagano
2018-06-08 23:48 Mike Pagano
2018-06-05 11:22 Mike Pagano
2018-05-30 22:33 Mike Pagano
2018-05-30 11:42 Mike Pagano
2018-05-25 15:36 Mike Pagano
2018-05-22 18:45 Mike Pagano
2018-05-20 22:21 Mike Pagano
2018-05-16 10:24 Mike Pagano
2018-05-09 10:55 Mike Pagano
2018-05-02 16:14 Mike Pagano
2018-04-30 10:30 Mike Pagano
2018-04-26 10:21 Mike Pagano
2018-04-24 11:27 Mike Pagano
2018-04-19 10:43 Mike Pagano
2018-04-12 15:10 Mike Pagano
2018-04-08 14:27 Mike Pagano
2018-03-31 22:18 Mike Pagano
2018-03-28 17:01 Mike Pagano
2018-03-25 13:38 Mike Pagano
2018-03-21 14:41 Mike Pagano
2018-03-19 12:01 Mike Pagano
2018-03-15 10:28 Mike Pagano
2018-03-11 17:38 Mike Pagano
2018-03-09 16:34 Alice Ferrazzi
2018-03-05  2:24 Alice Ferrazzi
2018-02-28 18:28 Alice Ferrazzi
2018-02-28 15:00 Alice Ferrazzi
2018-02-25 13:40 Alice Ferrazzi
2018-02-22 23:23 Mike Pagano
2018-02-17 14:28 Alice Ferrazzi
2018-02-17 14:27 Alice Ferrazzi
2018-02-13 13:19 Alice Ferrazzi
2018-02-08  0:41 Mike Pagano
2018-02-03 21:21 Mike Pagano
2018-01-31 13:50 Alice Ferrazzi
2018-01-23 21:20 Mike Pagano
2018-01-23 21:18 Mike Pagano
2018-01-17  9:39 Alice Ferrazzi
2018-01-17  9:14 Alice Ferrazzi
2018-01-10 11:52 Mike Pagano
2018-01-10 11:43 Mike Pagano
2018-01-05 15:41 Alice Ferrazzi
2018-01-05 15:41 Alice Ferrazzi
2018-01-05 15:02 Alice Ferrazzi
2018-01-04 15:18 Alice Ferrazzi
2018-01-04  7:40 Alice Ferrazzi
2018-01-04  7:32 Alice Ferrazzi
2018-01-04  0:23 Alice Ferrazzi
2018-01-02 20:19 Mike Pagano
2018-01-02 20:14 Mike Pagano
2017-12-30 12:20 Alice Ferrazzi
2017-12-29 17:54 Alice Ferrazzi
2017-12-29 17:18 Alice Ferrazzi
2017-12-25 14:34 Alice Ferrazzi
2017-12-20 17:51 Alice Ferrazzi
2017-12-20 12:43 Mike Pagano
2017-12-17 14:33 Alice Ferrazzi
2017-12-14  9:11 Alice Ferrazzi
2017-12-10 13:02 Alice Ferrazzi
2017-12-09 14:07 Alice Ferrazzi
2017-12-05 11:37 Mike Pagano
2017-11-30 12:15 Alice Ferrazzi
2017-11-24  9:18 Alice Ferrazzi
2017-11-24  9:15 Alice Ferrazzi
2017-11-21 11:34 Mike Pagano
2017-11-21 11:24 Mike Pagano
2017-11-16 19:08 Mike Pagano
2017-10-23 16:31 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1544701063.edceebae5b074eeabc237ce5ebc7c0b97dece0f0.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox