From: "Alice Ferrazzi" <alicef@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Wed, 12 Aug 2020 23:30:02 +0000 (UTC) [thread overview]
Message-ID: <1597274967.78e680436a512219cdb68c6285cf4b96c29d835e.alicef@gentoo> (raw)
commit: 78e680436a512219cdb68c6285cf4b96c29d835e
Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Wed Aug 12 23:29:20 2020 +0000
Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Wed Aug 12 23:29:27 2020 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=78e68043
Linux patch 5.4.58
Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
0000_README | 4 +
1057_linux-5.4.58.patch | 2905 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 2909 insertions(+)
diff --git a/0000_README b/0000_README
index bdf588b..6dd92dc 100644
--- a/0000_README
+++ b/0000_README
@@ -271,6 +271,10 @@ Patch: 1056_linux-5.4.57.patch
From: http://www.kernel.org
Desc: Linux 5.4.57
+Patch: 1057_linux-5.4.58.patch
+From: http://www.kernel.org
+Desc: Linux 5.4.58
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1057_linux-5.4.58.patch b/1057_linux-5.4.58.patch
new file mode 100644
index 0000000..f79f286
--- /dev/null
+++ b/1057_linux-5.4.58.patch
@@ -0,0 +1,2905 @@
+diff --git a/Makefile b/Makefile
+index dd753ef637fd..29948bc4a0d2 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 57
++SUBLEVEL = 58
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+
+diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
+index b68eeff77806..6db06f58deed 100644
+--- a/arch/powerpc/include/asm/kasan.h
++++ b/arch/powerpc/include/asm/kasan.h
+@@ -27,9 +27,11 @@
+
+ #ifdef CONFIG_KASAN
+ void kasan_early_init(void);
++void kasan_mmu_init(void);
+ void kasan_init(void);
+ #else
+ static inline void kasan_init(void) { }
++static inline void kasan_mmu_init(void) { }
+ #endif
+
+ #endif /* __ASSEMBLY */
+diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
+index 68f7446193d1..b04896a88d79 100644
+--- a/arch/powerpc/mm/init_32.c
++++ b/arch/powerpc/mm/init_32.c
+@@ -175,6 +175,8 @@ void __init MMU_init(void)
+ btext_unmap();
+ #endif
+
++ kasan_mmu_init();
++
+ setup_kup();
+
+ /* Shortly after that, the entire linear mapping will be available */
+diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
+index b01d4b72eccf..1cfe57b51d7e 100644
+--- a/arch/powerpc/mm/kasan/kasan_init_32.c
++++ b/arch/powerpc/mm/kasan/kasan_init_32.c
+@@ -129,7 +129,7 @@ static void __init kasan_remap_early_shadow_ro(void)
+ flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
+ }
+
+-static void __init kasan_mmu_init(void)
++void __init kasan_mmu_init(void)
+ {
+ int ret;
+ struct memblock_region *reg;
+@@ -156,8 +156,6 @@ static void __init kasan_mmu_init(void)
+
+ void __init kasan_init(void)
+ {
+- kasan_mmu_init();
+-
+ kasan_remap_early_shadow_ro();
+
+ clear_page(kasan_early_shadow_page);
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 5e6586af21b7..110dd4c2977f 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -2984,6 +2984,12 @@ static void binder_transaction(struct binder_proc *proc,
+ goto err_dead_binder;
+ }
+ e->to_node = target_node->debug_id;
++ if (WARN_ON(proc == target_proc)) {
++ return_error = BR_FAILED_REPLY;
++ return_error_param = -EINVAL;
++ return_error_line = __LINE__;
++ goto err_invalid_target_handle;
++ }
+ if (security_binder_transaction(proc->tsk,
+ target_proc->tsk) < 0) {
+ return_error = BR_FAILED_REPLY;
+@@ -3637,10 +3643,17 @@ static int binder_thread_write(struct binder_proc *proc,
+ struct binder_node *ctx_mgr_node;
+ mutex_lock(&context->context_mgr_node_lock);
+ ctx_mgr_node = context->binder_context_mgr_node;
+- if (ctx_mgr_node)
++ if (ctx_mgr_node) {
++ if (ctx_mgr_node->proc == proc) {
++ binder_user_error("%d:%d context manager tried to acquire desc 0\n",
++ proc->pid, thread->pid);
++ mutex_unlock(&context->context_mgr_node_lock);
++ return -EINVAL;
++ }
+ ret = binder_inc_ref_for_node(
+ proc, ctx_mgr_node,
+ strong, NULL, &rdata);
++ }
+ mutex_unlock(&context->context_mgr_node_lock);
+ }
+ if (ret)
+diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
+index d9fd70280482..7f814da3c2d0 100644
+--- a/drivers/atm/atmtcp.c
++++ b/drivers/atm/atmtcp.c
+@@ -433,9 +433,15 @@ static int atmtcp_remove_persistent(int itf)
+ return -EMEDIUMTYPE;
+ }
+ dev_data = PRIV(dev);
+- if (!dev_data->persist) return 0;
++ if (!dev_data->persist) {
++ atm_dev_put(dev);
++ return 0;
++ }
+ dev_data->persist = 0;
+- if (PRIV(dev)->vcc) return 0;
++ if (PRIV(dev)->vcc) {
++ atm_dev_put(dev);
++ return 0;
++ }
+ kfree(dev_data);
+ atm_dev_put(dev);
+ atm_dev_deregister(dev);
+diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
+index 039e0f91dba8..6945c3c96637 100644
+--- a/drivers/firmware/qemu_fw_cfg.c
++++ b/drivers/firmware/qemu_fw_cfg.c
+@@ -605,8 +605,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
+ /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */
+ err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
+ fw_cfg_sel_ko, "%d", entry->select);
+- if (err)
+- goto err_register;
++ if (err) {
++ kobject_put(&entry->kobj);
++ return err;
++ }
+
+ /* add raw binary content access */
+ err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
+@@ -622,7 +624,6 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
+
+ err_add_raw:
+ kobject_del(&entry->kobj);
+-err_register:
+ kfree(entry);
+ return err;
+ }
+diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
+index 02a9c1ed165b..fa50ab2523d4 100644
+--- a/drivers/gpu/drm/bochs/bochs_kms.c
++++ b/drivers/gpu/drm/bochs/bochs_kms.c
+@@ -194,6 +194,7 @@ int bochs_kms_init(struct bochs_device *bochs)
+ bochs->dev->mode_config.preferred_depth = 24;
+ bochs->dev->mode_config.prefer_shadow = 0;
+ bochs->dev->mode_config.prefer_shadow_fbdev = 1;
++ bochs->dev->mode_config.fbdev_use_iomem = true;
+ bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
+
+ bochs->dev->mode_config.funcs = &bochs_mode_funcs;
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index 8d193a58363d..6b8502bcf0fd 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -390,7 +390,11 @@ static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper,
+ unsigned int y;
+
+ for (y = clip->y1; y < clip->y2; y++) {
+- memcpy(dst, src, len);
++ if (!fb_helper->dev->mode_config.fbdev_use_iomem)
++ memcpy(dst, src, len);
++ else
++ memcpy_toio((void __iomem *)dst, src, len);
++
+ src += fb->pitches[0];
+ dst += fb->pitches[0];
+ }
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+index f439f0a5b43a..5cf2381f667e 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+@@ -315,7 +315,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
+ struct nouveau_framebuffer *fb;
+ struct nouveau_channel *chan;
+ struct nouveau_bo *nvbo;
+- struct drm_mode_fb_cmd2 mode_cmd;
++ struct drm_mode_fb_cmd2 mode_cmd = {};
+ int ret;
+
+ mode_cmd.width = sizes->surface_width;
+@@ -592,6 +592,7 @@ fini:
+ drm_fb_helper_fini(&fbcon->helper);
+ free:
+ kfree(fbcon);
++ drm->fbcon = NULL;
+ return ret;
+ }
+
+diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
+index c8296d5e74c3..501c43c5851d 100644
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -1354,6 +1354,8 @@ channel_message_table[CHANNELMSG_COUNT] = {
+ { CHANNELMSG_19, 0, NULL },
+ { CHANNELMSG_20, 0, NULL },
+ { CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL },
++ { CHANNELMSG_22, 0, NULL },
++ { CHANNELMSG_TL_CONNECT_RESULT, 0, NULL },
+ };
+
+ /*
+@@ -1365,25 +1367,16 @@ void vmbus_onmessage(void *context)
+ {
+ struct hv_message *msg = context;
+ struct vmbus_channel_message_header *hdr;
+- int size;
+
+ hdr = (struct vmbus_channel_message_header *)msg->u.payload;
+- size = msg->header.payload_size;
+
+ trace_vmbus_on_message(hdr);
+
+- if (hdr->msgtype >= CHANNELMSG_COUNT) {
+- pr_err("Received invalid channel message type %d size %d\n",
+- hdr->msgtype, size);
+- print_hex_dump_bytes("", DUMP_PREFIX_NONE,
+- (unsigned char *)msg->u.payload, size);
+- return;
+- }
+-
+- if (channel_message_table[hdr->msgtype].message_handler)
+- channel_message_table[hdr->msgtype].message_handler(hdr);
+- else
+- pr_err("Unhandled channel message type %d\n", hdr->msgtype);
++ /*
++ * vmbus_on_msg_dpc() makes sure the hdr->msgtype here can not go
++ * out of bound and the message_handler pointer can not be NULL.
++ */
++ channel_message_table[hdr->msgtype].message_handler(hdr);
+ }
+
+ /*
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 160ff640485b..24c38e44ed3b 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -1073,6 +1073,10 @@ void vmbus_on_msg_dpc(unsigned long data)
+ }
+
+ entry = &channel_message_table[hdr->msgtype];
++
++ if (!entry->message_handler)
++ goto msg_handled;
++
+ if (entry->handler_type == VMHT_BLOCKING) {
+ ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
+ if (ctx == NULL)
+diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c
+index 5427f047faf0..1589179d5eb9 100644
+--- a/drivers/i2c/i2c-core-slave.c
++++ b/drivers/i2c/i2c-core-slave.c
+@@ -18,10 +18,8 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
+ {
+ int ret;
+
+- if (!client || !slave_cb) {
+- WARN(1, "insufficient data\n");
++ if (WARN(IS_ERR_OR_NULL(client) || !slave_cb, "insufficient data\n"))
+ return -EINVAL;
+- }
+
+ if (!(client->flags & I2C_CLIENT_SLAVE))
+ dev_warn(&client->dev, "%s: client slave flag not set. You might see address collisions\n",
+@@ -60,6 +58,9 @@ int i2c_slave_unregister(struct i2c_client *client)
+ {
+ int ret;
+
++ if (IS_ERR_OR_NULL(client))
++ return -EINVAL;
++
+ if (!client->adapter->algo->unreg_slave) {
+ dev_err(&client->dev, "%s: not supported by adapter\n", __func__);
+ return -EOPNOTSUPP;
+diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
+index b3044c9a8120..465c3755cf2e 100644
+--- a/drivers/leds/leds-88pm860x.c
++++ b/drivers/leds/leds-88pm860x.c
+@@ -203,21 +203,33 @@ static int pm860x_led_probe(struct platform_device *pdev)
+ data->cdev.brightness_set_blocking = pm860x_led_set;
+ mutex_init(&data->lock);
+
+- ret = devm_led_classdev_register(chip->dev, &data->cdev);
++ ret = led_classdev_register(chip->dev, &data->cdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register LED: %d\n", ret);
+ return ret;
+ }
+ pm860x_led_set(&data->cdev, 0);
++
++ platform_set_drvdata(pdev, data);
++
+ return 0;
+ }
+
++static int pm860x_led_remove(struct platform_device *pdev)
++{
++ struct pm860x_led *data = platform_get_drvdata(pdev);
++
++ led_classdev_unregister(&data->cdev);
++
++ return 0;
++}
+
+ static struct platform_driver pm860x_led_driver = {
+ .driver = {
+ .name = "88pm860x-led",
+ },
+ .probe = pm860x_led_probe,
++ .remove = pm860x_led_remove,
+ };
+
+ module_platform_driver(pm860x_led_driver);
+diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c
+index ed1b303f699f..2b5fb00438a2 100644
+--- a/drivers/leds/leds-da903x.c
++++ b/drivers/leds/leds-da903x.c
+@@ -110,12 +110,23 @@ static int da903x_led_probe(struct platform_device *pdev)
+ led->flags = pdata->flags;
+ led->master = pdev->dev.parent;
+
+- ret = devm_led_classdev_register(led->master, &led->cdev);
++ ret = led_classdev_register(led->master, &led->cdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register LED %d\n", id);
+ return ret;
+ }
+
++ platform_set_drvdata(pdev, led);
++
++ return 0;
++}
++
++static int da903x_led_remove(struct platform_device *pdev)
++{
++ struct da903x_led *led = platform_get_drvdata(pdev);
++
++ led_classdev_unregister(&led->cdev);
++
+ return 0;
+ }
+
+@@ -124,6 +135,7 @@ static struct platform_driver da903x_led_driver = {
+ .name = "da903x-led",
+ },
+ .probe = da903x_led_probe,
++ .remove = da903x_led_remove,
+ };
+
+ module_platform_driver(da903x_led_driver);
+diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c
+index 9504ad405aef..b3edee703193 100644
+--- a/drivers/leds/leds-lm3533.c
++++ b/drivers/leds/leds-lm3533.c
+@@ -694,7 +694,7 @@ static int lm3533_led_probe(struct platform_device *pdev)
+
+ platform_set_drvdata(pdev, led);
+
+- ret = devm_led_classdev_register(pdev->dev.parent, &led->cdev);
++ ret = led_classdev_register(pdev->dev.parent, &led->cdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register LED %d\n", pdev->id);
+ return ret;
+@@ -704,13 +704,18 @@ static int lm3533_led_probe(struct platform_device *pdev)
+
+ ret = lm3533_led_setup(led, pdata);
+ if (ret)
+- return ret;
++ goto err_deregister;
+
+ ret = lm3533_ctrlbank_enable(&led->cb);
+ if (ret)
+- return ret;
++ goto err_deregister;
+
+ return 0;
++
++err_deregister:
++ led_classdev_unregister(&led->cdev);
++
++ return ret;
+ }
+
+ static int lm3533_led_remove(struct platform_device *pdev)
+@@ -720,6 +725,7 @@ static int lm3533_led_remove(struct platform_device *pdev)
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ lm3533_ctrlbank_disable(&led->cb);
++ led_classdev_unregister(&led->cdev);
+
+ return 0;
+ }
+diff --git a/drivers/leds/leds-lm36274.c b/drivers/leds/leds-lm36274.c
+index 836b60c9a2b8..db842eeb7ca2 100644
+--- a/drivers/leds/leds-lm36274.c
++++ b/drivers/leds/leds-lm36274.c
+@@ -133,7 +133,7 @@ static int lm36274_probe(struct platform_device *pdev)
+ lm36274_data->pdev = pdev;
+ lm36274_data->dev = lmu->dev;
+ lm36274_data->regmap = lmu->regmap;
+- dev_set_drvdata(&pdev->dev, lm36274_data);
++ platform_set_drvdata(pdev, lm36274_data);
+
+ ret = lm36274_parse_dt(lm36274_data);
+ if (ret) {
+@@ -147,8 +147,16 @@ static int lm36274_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+- return devm_led_classdev_register(lm36274_data->dev,
+- &lm36274_data->led_dev);
++ return led_classdev_register(lm36274_data->dev, &lm36274_data->led_dev);
++}
++
++static int lm36274_remove(struct platform_device *pdev)
++{
++ struct lm36274 *lm36274_data = platform_get_drvdata(pdev);
++
++ led_classdev_unregister(&lm36274_data->led_dev);
++
++ return 0;
+ }
+
+ static const struct of_device_id of_lm36274_leds_match[] = {
+@@ -159,6 +167,7 @@ MODULE_DEVICE_TABLE(of, of_lm36274_leds_match);
+
+ static struct platform_driver lm36274_driver = {
+ .probe = lm36274_probe,
++ .remove = lm36274_remove,
+ .driver = {
+ .name = "lm36274-leds",
+ },
+diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
+index 082df7f1dd90..67f4235cb28a 100644
+--- a/drivers/leds/leds-wm831x-status.c
++++ b/drivers/leds/leds-wm831x-status.c
+@@ -269,12 +269,23 @@ static int wm831x_status_probe(struct platform_device *pdev)
+ drvdata->cdev.blink_set = wm831x_status_blink_set;
+ drvdata->cdev.groups = wm831x_status_groups;
+
+- ret = devm_led_classdev_register(wm831x->dev, &drvdata->cdev);
++ ret = led_classdev_register(wm831x->dev, &drvdata->cdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register LED: %d\n", ret);
+ return ret;
+ }
+
++ platform_set_drvdata(pdev, drvdata);
++
++ return 0;
++}
++
++static int wm831x_status_remove(struct platform_device *pdev)
++{
++ struct wm831x_status *drvdata = platform_get_drvdata(pdev);
++
++ led_classdev_unregister(&drvdata->cdev);
++
+ return 0;
+ }
+
+@@ -283,6 +294,7 @@ static struct platform_driver wm831x_status_driver = {
+ .name = "wm831x-status",
+ },
+ .probe = wm831x_status_probe,
++ .remove = wm831x_status_remove,
+ };
+
+ module_platform_driver(wm831x_status_driver);
+diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
+index 975aed94f06c..48832f9b215c 100644
+--- a/drivers/mtd/mtdchar.c
++++ b/drivers/mtd/mtdchar.c
+@@ -354,9 +354,6 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
+ uint32_t retlen;
+ int ret = 0;
+
+- if (!(file->f_mode & FMODE_WRITE))
+- return -EPERM;
+-
+ if (length > 4096)
+ return -EINVAL;
+
+@@ -641,6 +638,48 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
+
+ pr_debug("MTD_ioctl\n");
+
++ /*
++ * Check the file mode to require "dangerous" commands to have write
++ * permissions.
++ */
++ switch (cmd) {
++ /* "safe" commands */
++ case MEMGETREGIONCOUNT:
++ case MEMGETREGIONINFO:
++ case MEMGETINFO:
++ case MEMREADOOB:
++ case MEMREADOOB64:
++ case MEMLOCK:
++ case MEMUNLOCK:
++ case MEMISLOCKED:
++ case MEMGETOOBSEL:
++ case MEMGETBADBLOCK:
++ case MEMSETBADBLOCK:
++ case OTPSELECT:
++ case OTPGETREGIONCOUNT:
++ case OTPGETREGIONINFO:
++ case OTPLOCK:
++ case ECCGETLAYOUT:
++ case ECCGETSTATS:
++ case MTDFILEMODE:
++ case BLKPG:
++ case BLKRRPART:
++ break;
++
++ /* "dangerous" commands */
++ case MEMERASE:
++ case MEMERASE64:
++ case MEMWRITEOOB:
++ case MEMWRITEOOB64:
++ case MEMWRITE:
++ if (!(file->f_mode & FMODE_WRITE))
++ return -EPERM;
++ break;
++
++ default:
++ return -ENOTTY;
++ }
++
+ switch (cmd) {
+ case MEMGETREGIONCOUNT:
+ if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
+@@ -688,9 +727,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
+ {
+ struct erase_info *erase;
+
+- if(!(file->f_mode & FMODE_WRITE))
+- return -EPERM;
+-
+ erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
+ if (!erase)
+ ret = -ENOMEM;
+@@ -983,9 +1019,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
+ ret = 0;
+ break;
+ }
+-
+- default:
+- ret = -ENOTTY;
+ }
+
+ return ret;
+@@ -1029,6 +1062,11 @@ static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
+ struct mtd_oob_buf32 buf;
+ struct mtd_oob_buf32 __user *buf_user = argp;
+
++ if (!(file->f_mode & FMODE_WRITE)) {
++ ret = -EPERM;
++ break;
++ }
++
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+index 40a44dcb3d9b..f414f5651dbd 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+@@ -2047,11 +2047,11 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
+ /* Save message data locally to prevent them from
+ * being overwritten by next ndo_set_rx_mode call().
+ */
+- spin_lock(&nic->rx_mode_wq_lock);
++ spin_lock_bh(&nic->rx_mode_wq_lock);
+ mode = vf_work->mode;
+ mc = vf_work->mc;
+ vf_work->mc = NULL;
+- spin_unlock(&nic->rx_mode_wq_lock);
++ spin_unlock_bh(&nic->rx_mode_wq_lock);
+
+ __nicvf_set_rx_mode_task(mode, mc, nic);
+ }
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+index 3177dd8ede8e..7a248cc1055a 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+@@ -2090,7 +2090,7 @@ close:
+ free:
+ fsl_mc_object_free(dpcon);
+
+- return NULL;
++ return ERR_PTR(err);
+ }
+
+ static void free_dpcon(struct dpaa2_eth_priv *priv,
+@@ -2114,8 +2114,8 @@ alloc_channel(struct dpaa2_eth_priv *priv)
+ return NULL;
+
+ channel->dpcon = setup_dpcon(priv);
+- if (IS_ERR_OR_NULL(channel->dpcon)) {
+- err = PTR_ERR_OR_ZERO(channel->dpcon);
++ if (IS_ERR(channel->dpcon)) {
++ err = PTR_ERR(channel->dpcon);
+ goto err_setup;
+ }
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index ed7e667d7eb2..3e41b20ed8eb 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -6194,9 +6194,18 @@ static void igb_reset_task(struct work_struct *work)
+ struct igb_adapter *adapter;
+ adapter = container_of(work, struct igb_adapter, reset_task);
+
++ rtnl_lock();
++ /* If we're already down or resetting, just bail */
++ if (test_bit(__IGB_DOWN, &adapter->state) ||
++ test_bit(__IGB_RESETTING, &adapter->state)) {
++ rtnl_unlock();
++ return;
++ }
++
+ igb_dump(adapter);
+ netdev_err(adapter->netdev, "Reset adapter\n");
+ igb_reinit_locked(adapter);
++ rtnl_unlock();
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 997dc811382a..d01b3a1b40f4 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -171,11 +171,21 @@ static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
+ return 0;
+ }
+
+-static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
++static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
++ phy_interface_t interface, int speed)
+ {
+ u32 val;
+ int ret;
+
++ if (interface == PHY_INTERFACE_MODE_TRGMII) {
++ mtk_w32(eth, TRGMII_MODE, INTF_MODE);
++ val = 500000000;
++ ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
++ if (ret)
++ dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
++ return;
++ }
++
+ val = (speed == SPEED_1000) ?
+ INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
+ mtk_w32(eth, val, INTF_MODE);
+@@ -262,10 +272,9 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
+ state->interface))
+ goto err_phy;
+ } else {
+- if (state->interface !=
+- PHY_INTERFACE_MODE_TRGMII)
+- mtk_gmac0_rgmii_adjust(mac->hw,
+- state->speed);
++ mtk_gmac0_rgmii_adjust(mac->hw,
++ state->interface,
++ state->speed);
+
+ /* mt7623_pad_clk_setup */
+ for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
+@@ -2869,6 +2878,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
+ eth->netdev[id]->irq = eth->irq[0];
+ eth->netdev[id]->dev.of_node = np;
+
++ eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
++
+ return 0;
+
+ free_netdev:
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index ca16ae8c8332..24bb721a12bc 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -531,12 +531,13 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
+ u32 hash;
+ struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
+
+- /* if VF is present and up then redirect packets
+- * already called with rcu_read_lock_bh
++ /* If VF is present and up then redirect packets to it.
++ * Skip the VF if it is marked down or has no carrier.
++ * If netpoll is in uses, then VF can not be used either.
+ */
+ vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
+ if (vf_netdev && netif_running(vf_netdev) &&
+- !netpoll_tx_running(net))
++ netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net))
+ return netvsc_vf_xmit(net, vf_netdev, skb);
+
+ /* We will atmost need two pages to describe the rndis
+diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
+index 66a8b835aa94..7449b97a3c89 100644
+--- a/drivers/net/usb/hso.c
++++ b/drivers/net/usb/hso.c
+@@ -2260,12 +2260,14 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
+
+ minor = get_free_serial_index();
+ if (minor < 0)
+- goto exit;
++ goto exit2;
+
+ /* register our minor number */
+ serial->parent->dev = tty_port_register_device_attr(&serial->port,
+ tty_drv, minor, &serial->parent->interface->dev,
+ serial->parent, hso_serial_dev_groups);
++ if (IS_ERR(serial->parent->dev))
++ goto exit2;
+
+ /* fill in specific data for later use */
+ serial->minor = minor;
+@@ -2310,6 +2312,7 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
+ return 0;
+ exit:
+ hso_serial_tty_unregister(serial);
++exit2:
+ hso_serial_common_free(serial);
+ return -1;
+ }
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index 1da99abc6ed1..71cc5b63d8ce 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -377,10 +377,6 @@ struct lan78xx_net {
+ struct tasklet_struct bh;
+ struct delayed_work wq;
+
+- struct usb_host_endpoint *ep_blkin;
+- struct usb_host_endpoint *ep_blkout;
+- struct usb_host_endpoint *ep_intr;
+-
+ int msg_enable;
+
+ struct urb *urb_intr;
+@@ -2868,78 +2864,12 @@ lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
+ return NETDEV_TX_OK;
+ }
+
+-static int
+-lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
+-{
+- int tmp;
+- struct usb_host_interface *alt = NULL;
+- struct usb_host_endpoint *in = NULL, *out = NULL;
+- struct usb_host_endpoint *status = NULL;
+-
+- for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
+- unsigned ep;
+-
+- in = NULL;
+- out = NULL;
+- status = NULL;
+- alt = intf->altsetting + tmp;
+-
+- for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
+- struct usb_host_endpoint *e;
+- int intr = 0;
+-
+- e = alt->endpoint + ep;
+- switch (e->desc.bmAttributes) {
+- case USB_ENDPOINT_XFER_INT:
+- if (!usb_endpoint_dir_in(&e->desc))
+- continue;
+- intr = 1;
+- /* FALLTHROUGH */
+- case USB_ENDPOINT_XFER_BULK:
+- break;
+- default:
+- continue;
+- }
+- if (usb_endpoint_dir_in(&e->desc)) {
+- if (!intr && !in)
+- in = e;
+- else if (intr && !status)
+- status = e;
+- } else {
+- if (!out)
+- out = e;
+- }
+- }
+- if (in && out)
+- break;
+- }
+- if (!alt || !in || !out)
+- return -EINVAL;
+-
+- dev->pipe_in = usb_rcvbulkpipe(dev->udev,
+- in->desc.bEndpointAddress &
+- USB_ENDPOINT_NUMBER_MASK);
+- dev->pipe_out = usb_sndbulkpipe(dev->udev,
+- out->desc.bEndpointAddress &
+- USB_ENDPOINT_NUMBER_MASK);
+- dev->ep_intr = status;
+-
+- return 0;
+-}
+-
+ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
+ {
+ struct lan78xx_priv *pdata = NULL;
+ int ret;
+ int i;
+
+- ret = lan78xx_get_endpoints(dev, intf);
+- if (ret) {
+- netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n",
+- ret);
+- return ret;
+- }
+-
+ dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
+
+ pdata = (struct lan78xx_priv *)(dev->data[0]);
+@@ -3708,6 +3638,7 @@ static void lan78xx_stat_monitor(struct timer_list *t)
+ static int lan78xx_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+ {
++ struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
+ struct lan78xx_net *dev;
+ struct net_device *netdev;
+ struct usb_device *udev;
+@@ -3756,6 +3687,34 @@ static int lan78xx_probe(struct usb_interface *intf,
+
+ mutex_init(&dev->stats.access_lock);
+
++ if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
++ ret = -ENODEV;
++ goto out2;
++ }
++
++ dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
++ ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
++ if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
++ ret = -ENODEV;
++ goto out2;
++ }
++
++ dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
++ ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
++ if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
++ ret = -ENODEV;
++ goto out2;
++ }
++
++ ep_intr = &intf->cur_altsetting->endpoint[2];
++ if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
++ ret = -ENODEV;
++ goto out2;
++ }
++
++ dev->pipe_intr = usb_rcvintpipe(dev->udev,
++ usb_endpoint_num(&ep_intr->desc));
++
+ ret = lan78xx_bind(dev, intf);
+ if (ret < 0)
+ goto out2;
+@@ -3767,23 +3726,7 @@ static int lan78xx_probe(struct usb_interface *intf,
+ netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
+ netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
+
+- if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
+- ret = -ENODEV;
+- goto out3;
+- }
+-
+- dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
+- dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
+- dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
+-
+- dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
+- dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
+-
+- dev->pipe_intr = usb_rcvintpipe(dev->udev,
+- dev->ep_intr->desc.bEndpointAddress &
+- USB_ENDPOINT_NUMBER_MASK);
+- period = dev->ep_intr->desc.bInterval;
+-
++ period = ep_intr->desc.bInterval;
+ maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
+ buf = kmalloc(maxp, GFP_KERNEL);
+ if (buf) {
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index b49b6f0cee50..f9edc76580d9 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1225,6 +1225,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ for (h = 0; h < FDB_HASH_SIZE; ++h) {
+ struct vxlan_fdb *f;
+
++ rcu_read_lock();
+ hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
+ struct vxlan_rdst *rd;
+
+@@ -1237,12 +1238,15 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGH,
+ NLM_F_MULTI, rd);
+- if (err < 0)
++ if (err < 0) {
++ rcu_read_unlock();
+ goto out;
++ }
+ skip:
+ *idx += 1;
+ }
+ }
++ rcu_read_unlock();
+ }
+ out:
+ return err;
+@@ -2546,7 +2550,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ ndst = &rt->dst;
+ skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM);
+
+- tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
++ tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
+ ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
+ err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
+ vni, md, flags, udp_sum);
+@@ -2586,7 +2590,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+
+ skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM);
+
+- tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
++ tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
+ ttl = ttl ? : ip6_dst_hoplimit(ndst);
+ skb_scrub_packet(skb, xnet);
+ err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index a13cae190196..ee7669f23cff 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3140,6 +3140,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
++ { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */
++ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
+ .driver_data = NVME_QUIRK_SINGLE_VECTOR },
+diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
+index ac93f5a0398e..b71e753419c2 100644
+--- a/drivers/pci/controller/pci-tegra.c
++++ b/drivers/pci/controller/pci-tegra.c
+@@ -181,13 +181,6 @@
+
+ #define AFI_PEXBIAS_CTRL_0 0x168
+
+-#define RP_PRIV_XP_DL 0x00000494
+-#define RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD (0x1ff << 1)
+-
+-#define RP_RX_HDR_LIMIT 0x00000e00
+-#define RP_RX_HDR_LIMIT_PW_MASK (0xff << 8)
+-#define RP_RX_HDR_LIMIT_PW (0x0e << 8)
+-
+ #define RP_ECTL_2_R1 0x00000e84
+ #define RP_ECTL_2_R1_RX_CTLE_1C_MASK 0xffff
+
+@@ -323,7 +316,6 @@ struct tegra_pcie_soc {
+ bool program_uphy;
+ bool update_clamp_threshold;
+ bool program_deskew_time;
+- bool raw_violation_fixup;
+ bool update_fc_timer;
+ bool has_cache_bars;
+ struct {
+@@ -669,23 +661,6 @@ static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port)
+ writel(value, port->base + RP_VEND_CTL0);
+ }
+
+- /* Fixup for read after write violation. */
+- if (soc->raw_violation_fixup) {
+- value = readl(port->base + RP_RX_HDR_LIMIT);
+- value &= ~RP_RX_HDR_LIMIT_PW_MASK;
+- value |= RP_RX_HDR_LIMIT_PW;
+- writel(value, port->base + RP_RX_HDR_LIMIT);
+-
+- value = readl(port->base + RP_PRIV_XP_DL);
+- value |= RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD;
+- writel(value, port->base + RP_PRIV_XP_DL);
+-
+- value = readl(port->base + RP_VEND_XP);
+- value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
+- value |= soc->update_fc_threshold;
+- writel(value, port->base + RP_VEND_XP);
+- }
+-
+ if (soc->update_fc_timer) {
+ value = readl(port->base + RP_VEND_XP);
+ value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
+@@ -2511,7 +2486,6 @@ static const struct tegra_pcie_soc tegra20_pcie = {
+ .program_uphy = true,
+ .update_clamp_threshold = false,
+ .program_deskew_time = false,
+- .raw_violation_fixup = false,
+ .update_fc_timer = false,
+ .has_cache_bars = true,
+ .ectl.enable = false,
+@@ -2541,7 +2515,6 @@ static const struct tegra_pcie_soc tegra30_pcie = {
+ .program_uphy = true,
+ .update_clamp_threshold = false,
+ .program_deskew_time = false,
+- .raw_violation_fixup = false,
+ .update_fc_timer = false,
+ .has_cache_bars = false,
+ .ectl.enable = false,
+@@ -2554,8 +2527,6 @@ static const struct tegra_pcie_soc tegra124_pcie = {
+ .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+ .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+ .pads_refclk_cfg0 = 0x44ac44ac,
+- /* FC threshold is bit[25:18] */
+- .update_fc_threshold = 0x03fc0000,
+ .has_pex_clkreq_en = true,
+ .has_pex_bias_ctrl = true,
+ .has_intr_prsnt_sense = true,
+@@ -2565,7 +2536,6 @@ static const struct tegra_pcie_soc tegra124_pcie = {
+ .program_uphy = true,
+ .update_clamp_threshold = true,
+ .program_deskew_time = false,
+- .raw_violation_fixup = true,
+ .update_fc_timer = false,
+ .has_cache_bars = false,
+ .ectl.enable = false,
+@@ -2589,7 +2559,6 @@ static const struct tegra_pcie_soc tegra210_pcie = {
+ .program_uphy = true,
+ .update_clamp_threshold = true,
+ .program_deskew_time = true,
+- .raw_violation_fixup = false,
+ .update_fc_timer = true,
+ .has_cache_bars = false,
+ .ectl = {
+@@ -2631,7 +2600,6 @@ static const struct tegra_pcie_soc tegra186_pcie = {
+ .program_uphy = false,
+ .update_clamp_threshold = false,
+ .program_deskew_time = false,
+- .raw_violation_fixup = false,
+ .update_fc_timer = false,
+ .has_cache_bars = false,
+ .ectl.enable = false,
+diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
+index c6695354b123..19b0cc5ea33f 100644
+--- a/drivers/staging/android/ashmem.c
++++ b/drivers/staging/android/ashmem.c
+@@ -95,6 +95,15 @@ static DEFINE_MUTEX(ashmem_mutex);
+ static struct kmem_cache *ashmem_area_cachep __read_mostly;
+ static struct kmem_cache *ashmem_range_cachep __read_mostly;
+
++/*
++ * A separate lockdep class for the backing shmem inodes to resolve the lockdep
++ * warning about the race between kswapd taking fs_reclaim before inode_lock
++ * and write syscall taking inode_lock and then fs_reclaim.
++ * Note that such race is impossible because ashmem does not support write
++ * syscalls operating on the backing shmem.
++ */
++static struct lock_class_key backing_shmem_inode_class;
++
+ static inline unsigned long range_size(struct ashmem_range *range)
+ {
+ return range->pgend - range->pgstart + 1;
+@@ -396,6 +405,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
+ if (!asma->file) {
+ char *name = ASHMEM_NAME_DEF;
+ struct file *vmfile;
++ struct inode *inode;
+
+ if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
+ name = asma->name;
+@@ -407,6 +417,8 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
+ goto out;
+ }
+ vmfile->f_mode |= FMODE_LSEEK;
++ inode = file_inode(vmfile);
++ lockdep_set_class(&inode->i_rwsem, &backing_shmem_inode_class);
+ asma->file = vmfile;
+ /*
+ * override mmap operation of the vmfile so that it can't be
+diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
+index 1ec3b237212e..7cee7b4d5270 100644
+--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
++++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
+@@ -1729,9 +1729,11 @@ int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_
+ if ((ndisauthmode == Ndis802_11AuthModeWPA) ||
+ (ndisauthmode == Ndis802_11AuthModeWPAPSK))
+ authmode = _WPA_IE_ID_;
+- if ((ndisauthmode == Ndis802_11AuthModeWPA2) ||
++ else if ((ndisauthmode == Ndis802_11AuthModeWPA2) ||
+ (ndisauthmode == Ndis802_11AuthModeWPA2PSK))
+ authmode = _WPA2_IE_ID_;
++ else
++ authmode = 0x0;
+
+ if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) {
+ memcpy(out_ie+ielength, psecuritypriv->wps_ie, psecuritypriv->wps_ie_len);
+diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
+index 40145c0338e4..42c0a3c947f1 100644
+--- a/drivers/staging/rtl8712/hal_init.c
++++ b/drivers/staging/rtl8712/hal_init.c
+@@ -33,7 +33,6 @@ static void rtl871x_load_fw_cb(const struct firmware *firmware, void *context)
+ {
+ struct _adapter *adapter = context;
+
+- complete(&adapter->rtl8712_fw_ready);
+ if (!firmware) {
+ struct usb_device *udev = adapter->dvobjpriv.pusbdev;
+ struct usb_interface *usb_intf = adapter->pusb_intf;
+@@ -41,11 +40,13 @@ static void rtl871x_load_fw_cb(const struct firmware *firmware, void *context)
+ dev_err(&udev->dev, "r8712u: Firmware request failed\n");
+ usb_put_dev(udev);
+ usb_set_intfdata(usb_intf, NULL);
++ complete(&adapter->rtl8712_fw_ready);
+ return;
+ }
+ adapter->fw = firmware;
+ /* firmware available - start netdev */
+ register_netdev(adapter->pnetdev);
++ complete(&adapter->rtl8712_fw_ready);
+ }
+
+ static const char firmware_file[] = "rtlwifi/rtl8712u.bin";
+diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
+index a87562f632a7..2fcd65260f4c 100644
+--- a/drivers/staging/rtl8712/usb_intf.c
++++ b/drivers/staging/rtl8712/usb_intf.c
+@@ -595,13 +595,17 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
+ if (pnetdev) {
+ struct _adapter *padapter = netdev_priv(pnetdev);
+
+- usb_set_intfdata(pusb_intf, NULL);
+- release_firmware(padapter->fw);
+ /* never exit with a firmware callback pending */
+ wait_for_completion(&padapter->rtl8712_fw_ready);
++ pnetdev = usb_get_intfdata(pusb_intf);
++ usb_set_intfdata(pusb_intf, NULL);
++ if (!pnetdev)
++ goto firmware_load_fail;
++ release_firmware(padapter->fw);
+ if (drvpriv.drv_registered)
+ padapter->surprise_removed = true;
+- unregister_netdev(pnetdev); /* will call netdev_close() */
++ if (pnetdev->reg_state != NETREG_UNINITIALIZED)
++ unregister_netdev(pnetdev); /* will call netdev_close() */
+ flush_scheduled_work();
+ udelay(1);
+ /* Stop driver mlme relation timer */
+@@ -614,6 +618,7 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
+ */
+ usb_put_dev(udev);
+ }
++firmware_load_fail:
+ /* If we didn't unplug usb dongle and remove/insert module, driver
+ * fails on sitesurvey for the first time when device is up.
+ * Reset usb port for sitesurvey fail issue.
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 07741ab9a46a..bbd616324faa 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -55,7 +55,10 @@
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
+ #define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc
++#define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042
+ #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
++#define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242
++#define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142
+
+ static const char hcd_name[] = "xhci_hcd";
+
+@@ -245,13 +248,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ xhci->quirks |= XHCI_BROKEN_STREAMS;
+
+ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+- pdev->device == 0x1042)
++ pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI)
+ xhci->quirks |= XHCI_BROKEN_STREAMS;
+ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+- pdev->device == 0x1142)
++ pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI)
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+- pdev->device == 0x2142)
++ (pdev->device == PCI_DEVICE_ID_ASMEDIA_1142_XHCI ||
++ pdev->device == PCI_DEVICE_ID_ASMEDIA_2142_XHCI))
+ xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
+
+ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
+index dce20301e367..103c69c692ba 100644
+--- a/drivers/usb/misc/iowarrior.c
++++ b/drivers/usb/misc/iowarrior.c
+@@ -2,8 +2,9 @@
+ /*
+ * Native support for the I/O-Warrior USB devices
+ *
+- * Copyright (c) 2003-2005 Code Mercenaries GmbH
+- * written by Christian Lucht <lucht@codemercs.com>
++ * Copyright (c) 2003-2005, 2020 Code Mercenaries GmbH
++ * written by Christian Lucht <lucht@codemercs.com> and
++ * Christoph Jung <jung@codemercs.com>
+ *
+ * based on
+
+@@ -802,14 +803,28 @@ static int iowarrior_probe(struct usb_interface *interface,
+
+ /* we have to check the report_size often, so remember it in the endianness suitable for our machine */
+ dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
+- if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
+- ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) ||
+- (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) ||
+- (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) ||
+- (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) ||
+- (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)))
+- /* IOWarrior56 has wMaxPacketSize different from report size */
+- dev->report_size = 7;
++
++ /*
++ * Some devices need the report size to be different than the
++ * endpoint size.
++ */
++ if (dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) {
++ switch (dev->product_id) {
++ case USB_DEVICE_ID_CODEMERCS_IOW56:
++ case USB_DEVICE_ID_CODEMERCS_IOW56AM:
++ dev->report_size = 7;
++ break;
++
++ case USB_DEVICE_ID_CODEMERCS_IOW28:
++ case USB_DEVICE_ID_CODEMERCS_IOW28L:
++ dev->report_size = 4;
++ break;
++
++ case USB_DEVICE_ID_CODEMERCS_IOW100:
++ dev->report_size = 13;
++ break;
++ }
++ }
+
+ /* create the urb and buffer for reading */
+ dev->int_in_urb = usb_alloc_urb(0, GFP_KERNEL);
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index d147feae83e6..0f60363c1bbc 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -155,6 +155,7 @@ static const struct usb_device_id id_table[] = {
+ {DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */
+ {DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */
+ {DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */
++ {DEVICE_SWI(0x1199, 0x9062)}, /* Sierra Wireless EM7305 QDL */
+ {DEVICE_SWI(0x1199, 0x9063)}, /* Sierra Wireless EM7305 */
+ {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */
+ {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */
+diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
+index bfaa9ec4bc1f..e079b910feb2 100644
+--- a/drivers/video/console/vgacon.c
++++ b/drivers/video/console/vgacon.c
+@@ -251,6 +251,10 @@ static void vgacon_scrollback_update(struct vc_data *c, int t, int count)
+ p = (void *) (c->vc_origin + t * c->vc_size_row);
+
+ while (count--) {
++ if ((vgacon_scrollback_cur->tail + c->vc_size_row) >
++ vgacon_scrollback_cur->size)
++ vgacon_scrollback_cur->tail = 0;
++
+ scr_memcpyw(vgacon_scrollback_cur->data +
+ vgacon_scrollback_cur->tail,
+ p, c->vc_size_row);
+diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
+index 7252d22dd117..bfc5c4c5a26a 100644
+--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c
++++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
+@@ -833,7 +833,7 @@ static const struct dss_features omap34xx_dss_feats = {
+ };
+
+ static const struct dss_features omap3630_dss_feats = {
+- .fck_div_max = 32,
++ .fck_div_max = 31,
+ .dss_fck_multiplier = 1,
+ .parent_clk_name = "dpll4_ck",
+ .dpi_select_source = &dss_dpi_select_source_omap2_omap3,
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index e0200406765c..be3d595a607f 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -279,6 +279,7 @@ struct sqe_submit {
+ bool has_user;
+ bool needs_lock;
+ bool needs_fixed_file;
++ u8 opcode;
+ };
+
+ /*
+@@ -505,7 +506,7 @@ static inline void io_queue_async_work(struct io_ring_ctx *ctx,
+ int rw = 0;
+
+ if (req->submit.sqe) {
+- switch (req->submit.sqe->opcode) {
++ switch (req->submit.opcode) {
+ case IORING_OP_WRITEV:
+ case IORING_OP_WRITE_FIXED:
+ rw = !(req->rw.ki_flags & IOCB_DIRECT);
+@@ -1254,23 +1255,15 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
+ }
+
+ static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
+- const struct sqe_submit *s, struct iovec **iovec,
++ struct io_kiocb *req, struct iovec **iovec,
+ struct iov_iter *iter)
+ {
+- const struct io_uring_sqe *sqe = s->sqe;
++ const struct io_uring_sqe *sqe = req->submit.sqe;
+ void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ size_t sqe_len = READ_ONCE(sqe->len);
+ u8 opcode;
+
+- /*
+- * We're reading ->opcode for the second time, but the first read
+- * doesn't care whether it's _FIXED or not, so it doesn't matter
+- * whether ->opcode changes concurrently. The first read does care
+- * about whether it is a READ or a WRITE, so we don't trust this read
+- * for that purpose and instead let the caller pass in the read/write
+- * flag.
+- */
+- opcode = READ_ONCE(sqe->opcode);
++ opcode = req->submit.opcode;
+ if (opcode == IORING_OP_READ_FIXED ||
+ opcode == IORING_OP_WRITE_FIXED) {
+ ssize_t ret = io_import_fixed(ctx, rw, sqe, iter);
+@@ -1278,7 +1271,7 @@ static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
+ return ret;
+ }
+
+- if (!s->has_user)
++ if (!req->submit.has_user)
+ return -EFAULT;
+
+ #ifdef CONFIG_COMPAT
+@@ -1425,7 +1418,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
+ if (unlikely(!(file->f_mode & FMODE_READ)))
+ return -EBADF;
+
+- ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
++ ret = io_import_iovec(req->ctx, READ, req, &iovec, &iter);
+ if (ret < 0)
+ return ret;
+
+@@ -1490,7 +1483,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
+ if (unlikely(!(file->f_mode & FMODE_WRITE)))
+ return -EBADF;
+
+- ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
++ ret = io_import_iovec(req->ctx, WRITE, req, &iovec, &iter);
+ if (ret < 0)
+ return ret;
+
+@@ -2109,15 +2102,14 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ const struct sqe_submit *s, bool force_nonblock)
+ {
+- int ret, opcode;
++ int ret;
+
+ req->user_data = READ_ONCE(s->sqe->user_data);
+
+ if (unlikely(s->index >= ctx->sq_entries))
+ return -EINVAL;
+
+- opcode = READ_ONCE(s->sqe->opcode);
+- switch (opcode) {
++ switch (req->submit.opcode) {
+ case IORING_OP_NOP:
+ ret = io_nop(req, req->user_data);
+ break;
+@@ -2181,10 +2173,10 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ return 0;
+ }
+
+-static struct async_list *io_async_list_from_sqe(struct io_ring_ctx *ctx,
+- const struct io_uring_sqe *sqe)
++static struct async_list *io_async_list_from_req(struct io_ring_ctx *ctx,
++ struct io_kiocb *req)
+ {
+- switch (sqe->opcode) {
++ switch (req->submit.opcode) {
+ case IORING_OP_READV:
+ case IORING_OP_READ_FIXED:
+ return &ctx->pending_async[READ];
+@@ -2196,12 +2188,10 @@ static struct async_list *io_async_list_from_sqe(struct io_ring_ctx *ctx,
+ }
+ }
+
+-static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
++static inline bool io_req_needs_user(struct io_kiocb *req)
+ {
+- u8 opcode = READ_ONCE(sqe->opcode);
+-
+- return !(opcode == IORING_OP_READ_FIXED ||
+- opcode == IORING_OP_WRITE_FIXED);
++ return !(req->submit.opcode == IORING_OP_READ_FIXED ||
++ req->submit.opcode == IORING_OP_WRITE_FIXED);
+ }
+
+ static void io_sq_wq_submit_work(struct work_struct *work)
+@@ -2217,7 +2207,7 @@ static void io_sq_wq_submit_work(struct work_struct *work)
+ int ret;
+
+ old_cred = override_creds(ctx->creds);
+- async_list = io_async_list_from_sqe(ctx, req->submit.sqe);
++ async_list = io_async_list_from_req(ctx, req);
+
+ allow_kernel_signal(SIGINT);
+ restart:
+@@ -2239,9 +2229,10 @@ restart:
+ }
+
+ ret = 0;
+- if (io_sqe_needs_user(sqe) && !cur_mm) {
++ if (io_req_needs_user(req) && !cur_mm) {
+ if (!mmget_not_zero(ctx->sqo_mm)) {
+ ret = -EFAULT;
++ goto end_req;
+ } else {
+ cur_mm = ctx->sqo_mm;
+ use_mm(cur_mm);
+@@ -2387,11 +2378,9 @@ static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
+ return ret;
+ }
+
+-static bool io_op_needs_file(const struct io_uring_sqe *sqe)
++static bool io_op_needs_file(struct io_kiocb *req)
+ {
+- int op = READ_ONCE(sqe->opcode);
+-
+- switch (op) {
++ switch (req->submit.opcode) {
+ case IORING_OP_NOP:
+ case IORING_OP_POLL_REMOVE:
+ case IORING_OP_TIMEOUT:
+@@ -2419,7 +2408,7 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
+ */
+ req->sequence = s->sequence;
+
+- if (!io_op_needs_file(s->sqe))
++ if (!io_op_needs_file(req))
+ return 0;
+
+ if (flags & IOSQE_FIXED_FILE) {
+@@ -2460,7 +2449,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
+
+ s->sqe = sqe_copy;
+ memcpy(&req->submit, s, sizeof(*s));
+- list = io_async_list_from_sqe(ctx, s->sqe);
++ list = io_async_list_from_req(ctx, req);
+ if (!io_add_to_prev_work(list, req)) {
+ if (list)
+ atomic_inc(&list->cnt);
+@@ -2582,7 +2571,7 @@ err:
+ req->user_data = s->sqe->user_data;
+
+ #if defined(CONFIG_NET)
+- switch (READ_ONCE(s->sqe->opcode)) {
++ switch (req->submit.opcode) {
+ case IORING_OP_SENDMSG:
+ case IORING_OP_RECVMSG:
+ spin_lock(¤t->fs->lock);
+@@ -2697,6 +2686,7 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
+ if (head < ctx->sq_entries) {
+ s->index = head;
+ s->sqe = &ctx->sq_sqes[head];
++ s->opcode = READ_ONCE(s->sqe->opcode);
+ s->sequence = ctx->cached_sq_head;
+ ctx->cached_sq_head++;
+ return true;
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 533d0fc3c96b..d6f244559e75 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -3530,17 +3530,17 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp,
+ u32 zzz = 0;
+ int pad;
+
++ /*
++ * svcrdma requires every READ payload to start somewhere
++ * in xdr->pages.
++ */
++ if (xdr->iov == xdr->buf->head) {
++ xdr->iov = NULL;
++ xdr->end = xdr->p;
++ }
++
+ len = maxcount;
+ v = 0;
+-
+- thislen = min_t(long, len, ((void *)xdr->end - (void *)xdr->p));
+- p = xdr_reserve_space(xdr, (thislen+3)&~3);
+- WARN_ON_ONCE(!p);
+- resp->rqstp->rq_vec[v].iov_base = p;
+- resp->rqstp->rq_vec[v].iov_len = thislen;
+- v++;
+- len -= thislen;
+-
+ while (len) {
+ thislen = min_t(long, len, PAGE_SIZE);
+ p = xdr_reserve_space(xdr, (thislen+3)&~3);
+@@ -3559,6 +3559,8 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp,
+ read->rd_length = maxcount;
+ if (nfserr)
+ return nfserr;
++ if (svc_encode_read_payload(resp->rqstp, starting_len + 8, maxcount))
++ return nfserr_io;
+ xdr_truncate_encode(xdr, starting_len + 8 + ((maxcount+3)&~3));
+
+ tmp = htonl(eof);
+diff --git a/fs/xattr.c b/fs/xattr.c
+index 90dd78f0eb27..f2854570d411 100644
+--- a/fs/xattr.c
++++ b/fs/xattr.c
+@@ -204,10 +204,22 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name,
+ return error;
+ }
+
+-
++/**
++ * __vfs_setxattr_locked: set an extended attribute while holding the inode
++ * lock
++ *
++ * @dentry - object to perform setxattr on
++ * @name - xattr name to set
++ * @value - value to set @name to
++ * @size - size of @value
++ * @flags - flags to pass into filesystem operations
++ * @delegated_inode - on return, will contain an inode pointer that
++ * a delegation was broken on, NULL if none.
++ */
+ int
+-vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+- size_t size, int flags)
++__vfs_setxattr_locked(struct dentry *dentry, const char *name,
++ const void *value, size_t size, int flags,
++ struct inode **delegated_inode)
+ {
+ struct inode *inode = dentry->d_inode;
+ int error;
+@@ -216,15 +228,40 @@ vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ if (error)
+ return error;
+
+- inode_lock(inode);
+ error = security_inode_setxattr(dentry, name, value, size, flags);
+ if (error)
+ goto out;
+
++ error = try_break_deleg(inode, delegated_inode);
++ if (error)
++ goto out;
++
+ error = __vfs_setxattr_noperm(dentry, name, value, size, flags);
+
+ out:
++ return error;
++}
++EXPORT_SYMBOL_GPL(__vfs_setxattr_locked);
++
++int
++vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
++ size_t size, int flags)
++{
++ struct inode *inode = dentry->d_inode;
++ struct inode *delegated_inode = NULL;
++ int error;
++
++retry_deleg:
++ inode_lock(inode);
++ error = __vfs_setxattr_locked(dentry, name, value, size, flags,
++ &delegated_inode);
+ inode_unlock(inode);
++
++ if (delegated_inode) {
++ error = break_deleg_wait(&delegated_inode);
++ if (!error)
++ goto retry_deleg;
++ }
+ return error;
+ }
+ EXPORT_SYMBOL_GPL(vfs_setxattr);
+@@ -378,8 +415,18 @@ __vfs_removexattr(struct dentry *dentry, const char *name)
+ }
+ EXPORT_SYMBOL(__vfs_removexattr);
+
++/**
++ * __vfs_removexattr_locked: set an extended attribute while holding the inode
++ * lock
++ *
++ * @dentry - object to perform setxattr on
++ * @name - name of xattr to remove
++ * @delegated_inode - on return, will contain an inode pointer that
++ * a delegation was broken on, NULL if none.
++ */
+ int
+-vfs_removexattr(struct dentry *dentry, const char *name)
++__vfs_removexattr_locked(struct dentry *dentry, const char *name,
++ struct inode **delegated_inode)
+ {
+ struct inode *inode = dentry->d_inode;
+ int error;
+@@ -388,11 +435,14 @@ vfs_removexattr(struct dentry *dentry, const char *name)
+ if (error)
+ return error;
+
+- inode_lock(inode);
+ error = security_inode_removexattr(dentry, name);
+ if (error)
+ goto out;
+
++ error = try_break_deleg(inode, delegated_inode);
++ if (error)
++ goto out;
++
+ error = __vfs_removexattr(dentry, name);
+
+ if (!error) {
+@@ -401,12 +451,32 @@ vfs_removexattr(struct dentry *dentry, const char *name)
+ }
+
+ out:
++ return error;
++}
++EXPORT_SYMBOL_GPL(__vfs_removexattr_locked);
++
++int
++vfs_removexattr(struct dentry *dentry, const char *name)
++{
++ struct inode *inode = dentry->d_inode;
++ struct inode *delegated_inode = NULL;
++ int error;
++
++retry_deleg:
++ inode_lock(inode);
++ error = __vfs_removexattr_locked(dentry, name, &delegated_inode);
+ inode_unlock(inode);
++
++ if (delegated_inode) {
++ error = break_deleg_wait(&delegated_inode);
++ if (!error)
++ goto retry_deleg;
++ }
++
+ return error;
+ }
+ EXPORT_SYMBOL_GPL(vfs_removexattr);
+
+-
+ /*
+ * Extended attribute SET operations
+ */
+diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
+index 3bcbe30339f0..198b9d060008 100644
+--- a/include/drm/drm_mode_config.h
++++ b/include/drm/drm_mode_config.h
+@@ -865,6 +865,18 @@ struct drm_mode_config {
+ */
+ bool prefer_shadow_fbdev;
+
++ /**
++ * @fbdev_use_iomem:
++ *
++ * Set to true if framebuffer reside in iomem.
++ * When set to true memcpy_toio() is used when copying the framebuffer in
++ * drm_fb_helper.drm_fb_helper_dirty_blit_real().
++ *
++ * FIXME: This should be replaced with a per-mapping is_iomem
++ * flag (like ttm does), and then used everywhere in fbdev code.
++ */
++ bool fbdev_use_iomem;
++
+ /**
+ * @quirk_addfb_prefer_xbgr_30bpp:
+ *
+diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
+index b4a017093b69..67d9b5a37460 100644
+--- a/include/linux/hyperv.h
++++ b/include/linux/hyperv.h
+@@ -423,6 +423,8 @@ enum vmbus_channel_message_type {
+ CHANNELMSG_19 = 19,
+ CHANNELMSG_20 = 20,
+ CHANNELMSG_TL_CONNECT_REQUEST = 21,
++ CHANNELMSG_22 = 22,
++ CHANNELMSG_TL_CONNECT_RESULT = 23,
+ CHANNELMSG_COUNT
+ };
+
+diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
+index 1afe38eb33f7..82665ff360fd 100644
+--- a/include/linux/sunrpc/svc.h
++++ b/include/linux/sunrpc/svc.h
+@@ -517,6 +517,9 @@ void svc_wake_up(struct svc_serv *);
+ void svc_reserve(struct svc_rqst *rqstp, int space);
+ struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu);
+ char * svc_print_addr(struct svc_rqst *, char *, size_t);
++int svc_encode_read_payload(struct svc_rqst *rqstp,
++ unsigned int offset,
++ unsigned int length);
+ unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
+ struct page **pages,
+ struct kvec *first, size_t total);
+diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
+index fddad9f5b390..26f282e5e082 100644
+--- a/include/linux/sunrpc/svc_rdma.h
++++ b/include/linux/sunrpc/svc_rdma.h
+@@ -137,6 +137,8 @@ struct svc_rdma_recv_ctxt {
+ unsigned int rc_page_count;
+ unsigned int rc_hdr_count;
+ u32 rc_inv_rkey;
++ unsigned int rc_read_payload_offset;
++ unsigned int rc_read_payload_length;
+ struct page *rc_pages[RPCSVC_MAXPAGES];
+ };
+
+@@ -171,7 +173,9 @@ extern int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma,
+ struct svc_rqst *rqstp,
+ struct svc_rdma_recv_ctxt *head, __be32 *p);
+ extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
+- __be32 *wr_ch, struct xdr_buf *xdr);
++ __be32 *wr_ch, struct xdr_buf *xdr,
++ unsigned int offset,
++ unsigned long length);
+ extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
+ __be32 *rp_ch, bool writelist,
+ struct xdr_buf *xdr);
+@@ -190,6 +194,8 @@ extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
+ struct svc_rdma_send_ctxt *ctxt,
+ struct xdr_buf *xdr, __be32 *wr_lst);
+ extern int svc_rdma_sendto(struct svc_rqst *);
++extern int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset,
++ unsigned int length);
+
+ /* svc_rdma_transport.c */
+ extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
+diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
+index ea6f46be9cb7..9e1e046de176 100644
+--- a/include/linux/sunrpc/svc_xprt.h
++++ b/include/linux/sunrpc/svc_xprt.h
+@@ -21,6 +21,8 @@ struct svc_xprt_ops {
+ int (*xpo_has_wspace)(struct svc_xprt *);
+ int (*xpo_recvfrom)(struct svc_rqst *);
+ int (*xpo_sendto)(struct svc_rqst *);
++ int (*xpo_read_payload)(struct svc_rqst *, unsigned int,
++ unsigned int);
+ void (*xpo_release_rqst)(struct svc_rqst *);
+ void (*xpo_detach)(struct svc_xprt *);
+ void (*xpo_free)(struct svc_xprt *);
+diff --git a/include/linux/xattr.h b/include/linux/xattr.h
+index 6dad031be3c2..3a71ad716da5 100644
+--- a/include/linux/xattr.h
++++ b/include/linux/xattr.h
+@@ -51,8 +51,10 @@ ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
+ ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
+ int __vfs_setxattr(struct dentry *, struct inode *, const char *, const void *, size_t, int);
+ int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
++int __vfs_setxattr_locked(struct dentry *, const char *, const void *, size_t, int, struct inode **);
+ int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int);
+ int __vfs_removexattr(struct dentry *, const char *);
++int __vfs_removexattr_locked(struct dentry *, const char *, struct inode **);
+ int vfs_removexattr(struct dentry *, const char *);
+
+ ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
+diff --git a/include/net/addrconf.h b/include/net/addrconf.h
+index 3f62b347b04a..ab8b3eb53d4b 100644
+--- a/include/net/addrconf.h
++++ b/include/net/addrconf.h
+@@ -273,6 +273,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+ int ipv6_sock_ac_drop(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
++void __ipv6_sock_ac_close(struct sock *sk);
+ void ipv6_sock_ac_close(struct sock *sk);
+
+ int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr);
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index aaaf50b25cc9..db1f5aa755f2 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -2171,6 +2171,7 @@ __perf_remove_from_context(struct perf_event *event,
+
+ if (!ctx->nr_events && ctx->is_active) {
+ ctx->is_active = 0;
++ ctx->rotate_necessary = 0;
+ if (ctx->task) {
+ WARN_ON_ONCE(cpuctx->task_ctx != ctx);
+ cpuctx->task_ctx = NULL;
+@@ -3047,12 +3048,6 @@ static void ctx_sched_out(struct perf_event_context *ctx,
+ if (!ctx->nr_active || !(is_active & EVENT_ALL))
+ return;
+
+- /*
+- * If we had been multiplexing, no rotations are necessary, now no events
+- * are active.
+- */
+- ctx->rotate_necessary = 0;
+-
+ perf_pmu_disable(ctx->pmu);
+ if (is_active & EVENT_PINNED) {
+ list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list)
+@@ -3062,6 +3057,13 @@ static void ctx_sched_out(struct perf_event_context *ctx,
+ if (is_active & EVENT_FLEXIBLE) {
+ list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list)
+ group_sched_out(event, cpuctx, ctx);
++
++ /*
++ * Since we cleared EVENT_FLEXIBLE, also clear
++ * rotate_necessary, is will be reset by
++ * ctx_flexible_sched_in() when needed.
++ */
++ ctx->rotate_necessary = 0;
+ }
+ perf_pmu_enable(ctx->pmu);
+ }
+@@ -3800,6 +3802,12 @@ ctx_event_to_rotate(struct perf_event_context *ctx)
+ typeof(*event), group_node);
+ }
+
++ /*
++ * Unconditionally clear rotate_necessary; if ctx_flexible_sched_in()
++ * finds there are unschedulable events, it will set it again.
++ */
++ ctx->rotate_necessary = 0;
++
+ return event;
+ }
+
+diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
+index 3f67803123be..12ecacf0c55f 100644
+--- a/net/9p/trans_fd.c
++++ b/net/9p/trans_fd.c
+@@ -816,20 +816,28 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
+ return -ENOMEM;
+
+ ts->rd = fget(rfd);
++ if (!ts->rd)
++ goto out_free_ts;
++ if (!(ts->rd->f_mode & FMODE_READ))
++ goto out_put_rd;
+ ts->wr = fget(wfd);
+- if (!ts->rd || !ts->wr) {
+- if (ts->rd)
+- fput(ts->rd);
+- if (ts->wr)
+- fput(ts->wr);
+- kfree(ts);
+- return -EIO;
+- }
++ if (!ts->wr)
++ goto out_put_rd;
++ if (!(ts->wr->f_mode & FMODE_WRITE))
++ goto out_put_wr;
+
+ client->trans = ts;
+ client->status = Connected;
+
+ return 0;
++
++out_put_wr:
++ fput(ts->wr);
++out_put_rd:
++ fput(ts->rd);
++out_free_ts:
++ kfree(ts);
++ return -EIO;
+ }
+
+ static int p9_socket_open(struct p9_client *client, struct socket *csocket)
+diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
+index 550c6ca007cc..9c1241292d1d 100644
+--- a/net/appletalk/atalk_proc.c
++++ b/net/appletalk/atalk_proc.c
+@@ -229,6 +229,8 @@ int __init atalk_proc_init(void)
+ sizeof(struct aarp_iter_state), NULL))
+ goto out;
+
++ return 0;
++
+ out:
+ remove_proc_subtree("atalk", init_net.proc_net);
+ return -ENOMEM;
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 44385252d7b6..7bf6860fed78 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -2444,7 +2444,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
+
+ BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
+
+- if (!num_rsp)
++ if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
+ return;
+
+ if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
+@@ -4067,6 +4067,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
+ struct inquiry_info_with_rssi_and_pscan_mode *info;
+ info = (void *) (skb->data + 1);
+
++ if (skb->len < num_rsp * sizeof(*info) + 1)
++ goto unlock;
++
+ for (; num_rsp; num_rsp--, info++) {
+ u32 flags;
+
+@@ -4088,6 +4091,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
+ } else {
+ struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
+
++ if (skb->len < num_rsp * sizeof(*info) + 1)
++ goto unlock;
++
+ for (; num_rsp; num_rsp--, info++) {
+ u32 flags;
+
+@@ -4108,6 +4114,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
+ }
+ }
+
++unlock:
+ hci_dev_unlock(hdev);
+ }
+
+@@ -4270,7 +4277,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
+
+ BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
+
+- if (!num_rsp)
++ if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
+ return;
+
+ if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
+diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
+index 1b851fd82613..47b6d73d30e5 100644
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -1751,7 +1751,7 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb)
+ while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
+ struct key_vector *local_l = NULL, *local_tp;
+
+- hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
++ hlist_for_each_entry(fa, &l->leaf, fa_list) {
+ struct fib_alias *new_fa;
+
+ if (local_tb->tb_id != fa->tb_id)
+diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
+index 4de7e962d3da..c840141876bc 100644
+--- a/net/ipv4/gre_offload.c
++++ b/net/ipv4/gre_offload.c
+@@ -15,12 +15,12 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
+ {
+ int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
++ bool need_csum, need_recompute_csum, gso_partial;
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
+ u16 mac_offset = skb->mac_header;
+ __be16 protocol = skb->protocol;
+ u16 mac_len = skb->mac_len;
+ int gre_offset, outer_hlen;
+- bool need_csum, gso_partial;
+
+ if (!skb->encapsulation)
+ goto out;
+@@ -41,6 +41,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
+ skb->protocol = skb->inner_protocol;
+
+ need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM);
++ need_recompute_csum = skb->csum_not_inet;
+ skb->encap_hdr_csum = need_csum;
+
+ features &= skb->dev->hw_enc_features;
+@@ -98,7 +99,15 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
+ }
+
+ *(pcsum + 1) = 0;
+- *pcsum = gso_make_checksum(skb, 0);
++ if (need_recompute_csum && !skb_is_gso(skb)) {
++ __wsum csum;
++
++ csum = skb_checksum(skb, gre_offset,
++ skb->len - gre_offset, 0);
++ *pcsum = csum_fold(csum);
++ } else {
++ *pcsum = gso_make_checksum(skb, 0);
++ }
+ } while ((skb = skb->next));
+ out:
+ return segs;
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 5040f7ca37ec..ab5358281000 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2944,6 +2944,8 @@ static bool tcp_ack_update_rtt(struct sock *sk, const int flag,
+ u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
+
+ if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) {
++ if (!delta)
++ delta = 1;
+ seq_rtt_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
+ ca_rtt_us = seq_rtt_us;
+ }
+diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
+index fed91ab7ec46..cf3a88a10ddd 100644
+--- a/net/ipv6/anycast.c
++++ b/net/ipv6/anycast.c
+@@ -183,7 +183,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
+ return 0;
+ }
+
+-void ipv6_sock_ac_close(struct sock *sk)
++void __ipv6_sock_ac_close(struct sock *sk)
+ {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct net_device *dev = NULL;
+@@ -191,10 +191,7 @@ void ipv6_sock_ac_close(struct sock *sk)
+ struct net *net = sock_net(sk);
+ int prev_index;
+
+- if (!np->ipv6_ac_list)
+- return;
+-
+- rtnl_lock();
++ ASSERT_RTNL();
+ pac = np->ipv6_ac_list;
+ np->ipv6_ac_list = NULL;
+
+@@ -211,6 +208,16 @@ void ipv6_sock_ac_close(struct sock *sk)
+ sock_kfree_s(sk, pac, sizeof(*pac));
+ pac = next;
+ }
++}
++
++void ipv6_sock_ac_close(struct sock *sk)
++{
++ struct ipv6_pinfo *np = inet6_sk(sk);
++
++ if (!np->ipv6_ac_list)
++ return;
++ rtnl_lock();
++ __ipv6_sock_ac_close(sk);
+ rtnl_unlock();
+ }
+
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+index 5af97b4f5df3..5352c7e68c42 100644
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -205,6 +205,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
+
+ fl6_free_socklist(sk);
+ __ipv6_sock_mc_close(sk);
++ __ipv6_sock_ac_close(sk);
+
+ /*
+ * Sock is moving from IPv6 to IPv4 (sk_prot), so
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 94ade43d5a45..46df6345bb99 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -3686,14 +3686,14 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
+ rt->fib6_src.plen = cfg->fc_src_len;
+ #endif
+ if (nh) {
+- if (!nexthop_get(nh)) {
+- NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
+- goto out;
+- }
+ if (rt->fib6_src.plen) {
+ NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
+ goto out;
+ }
++ if (!nexthop_get(nh)) {
++ NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
++ goto out;
++ }
+ rt->nh = nh;
+ fib6_nh = nexthop_fib6_nh(rt->nh);
+ } else {
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
+index 8b70298857e3..c86e404cd65b 100644
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -276,10 +276,6 @@ void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key)
+ ovs_ct_update_key(skb, NULL, key, false, false);
+ }
+
+-#define IN6_ADDR_INITIALIZER(ADDR) \
+- { (ADDR).s6_addr32[0], (ADDR).s6_addr32[1], \
+- (ADDR).s6_addr32[2], (ADDR).s6_addr32[3] }
+-
+ int ovs_ct_put_key(const struct sw_flow_key *swkey,
+ const struct sw_flow_key *output, struct sk_buff *skb)
+ {
+@@ -301,24 +297,30 @@ int ovs_ct_put_key(const struct sw_flow_key *swkey,
+
+ if (swkey->ct_orig_proto) {
+ if (swkey->eth.type == htons(ETH_P_IP)) {
+- struct ovs_key_ct_tuple_ipv4 orig = {
+- output->ipv4.ct_orig.src,
+- output->ipv4.ct_orig.dst,
+- output->ct.orig_tp.src,
+- output->ct.orig_tp.dst,
+- output->ct_orig_proto,
+- };
++ struct ovs_key_ct_tuple_ipv4 orig;
++
++ memset(&orig, 0, sizeof(orig));
++ orig.ipv4_src = output->ipv4.ct_orig.src;
++ orig.ipv4_dst = output->ipv4.ct_orig.dst;
++ orig.src_port = output->ct.orig_tp.src;
++ orig.dst_port = output->ct.orig_tp.dst;
++ orig.ipv4_proto = output->ct_orig_proto;
++
+ if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4,
+ sizeof(orig), &orig))
+ return -EMSGSIZE;
+ } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+- struct ovs_key_ct_tuple_ipv6 orig = {
+- IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.src),
+- IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.dst),
+- output->ct.orig_tp.src,
+- output->ct.orig_tp.dst,
+- output->ct_orig_proto,
+- };
++ struct ovs_key_ct_tuple_ipv6 orig;
++
++ memset(&orig, 0, sizeof(orig));
++ memcpy(orig.ipv6_src, output->ipv6.ct_orig.src.s6_addr32,
++ sizeof(orig.ipv6_src));
++ memcpy(orig.ipv6_dst, output->ipv6.ct_orig.dst.s6_addr32,
++ sizeof(orig.ipv6_dst));
++ orig.src_port = output->ct.orig_tp.src;
++ orig.dst_port = output->ct.orig_tp.dst;
++ orig.ipv6_proto = output->ct_orig_proto;
++
+ if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6,
+ sizeof(orig), &orig))
+ return -EMSGSIZE;
+diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
+index f07970207b54..38a46167523f 100644
+--- a/net/rxrpc/call_object.c
++++ b/net/rxrpc/call_object.c
+@@ -288,7 +288,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
+ */
+ ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
+ if (ret < 0)
+- goto error;
++ goto error_attached_to_socket;
+
+ trace_rxrpc_call(call->debug_id, rxrpc_call_connected,
+ atomic_read(&call->usage), here, NULL);
+@@ -308,18 +308,29 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
+ error_dup_user_ID:
+ write_unlock(&rx->call_lock);
+ release_sock(&rx->sk);
+- ret = -EEXIST;
+-
+-error:
+ __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
+- RX_CALL_DEAD, ret);
++ RX_CALL_DEAD, -EEXIST);
+ trace_rxrpc_call(call->debug_id, rxrpc_call_error,
+- atomic_read(&call->usage), here, ERR_PTR(ret));
++ atomic_read(&call->usage), here, ERR_PTR(-EEXIST));
+ rxrpc_release_call(rx, call);
+ mutex_unlock(&call->user_mutex);
+ rxrpc_put_call(call, rxrpc_call_put);
+- _leave(" = %d", ret);
+- return ERR_PTR(ret);
++ _leave(" = -EEXIST");
++ return ERR_PTR(-EEXIST);
++
++ /* We got an error, but the call is attached to the socket and is in
++ * need of release. However, we might now race with recvmsg() when
++ * completing the call queues it. Return 0 from sys_sendmsg() and
++ * leave the error to recvmsg() to deal with.
++ */
++error_attached_to_socket:
++ trace_rxrpc_call(call->debug_id, rxrpc_call_error,
++ atomic_read(&call->usage), here, ERR_PTR(ret));
++ set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
++ __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
++ RX_CALL_DEAD, ret);
++ _leave(" = c=%08x [err]", call->debug_id);
++ return call;
+ }
+
+ /*
+diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
+index 19e141eeed17..8cbe0bf20ed5 100644
+--- a/net/rxrpc/conn_object.c
++++ b/net/rxrpc/conn_object.c
+@@ -212,9 +212,11 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
+
+ call->peer->cong_cwnd = call->cong_cwnd;
+
+- spin_lock_bh(&conn->params.peer->lock);
+- hlist_del_rcu(&call->error_link);
+- spin_unlock_bh(&conn->params.peer->lock);
++ if (!hlist_unhashed(&call->error_link)) {
++ spin_lock_bh(&call->peer->lock);
++ hlist_del_rcu(&call->error_link);
++ spin_unlock_bh(&call->peer->lock);
++ }
+
+ if (rxrpc_is_client_call(call))
+ return rxrpc_disconnect_client_call(call);
+diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
+index 6896a33ef842..4f48e3bdd4b4 100644
+--- a/net/rxrpc/recvmsg.c
++++ b/net/rxrpc/recvmsg.c
+@@ -541,7 +541,7 @@ try_again:
+ goto error_unlock_call;
+ }
+
+- if (msg->msg_name) {
++ if (msg->msg_name && call->peer) {
+ struct sockaddr_rxrpc *srx = msg->msg_name;
+ size_t len = sizeof(call->peer->srx);
+
+diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
+index 49d03c8c64da..1a340eb0abf7 100644
+--- a/net/rxrpc/sendmsg.c
++++ b/net/rxrpc/sendmsg.c
+@@ -683,6 +683,9 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
+ if (IS_ERR(call))
+ return PTR_ERR(call);
+ /* ... and we have the call lock. */
++ ret = 0;
++ if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE)
++ goto out_put_unlock;
+ } else {
+ switch (READ_ONCE(call->state)) {
+ case RXRPC_CALL_UNINITIALISED:
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index d11b70552c33..f0dcb6d14bbb 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -1634,6 +1634,22 @@ u32 svc_max_payload(const struct svc_rqst *rqstp)
+ }
+ EXPORT_SYMBOL_GPL(svc_max_payload);
+
++/**
++ * svc_encode_read_payload - mark a range of bytes as a READ payload
++ * @rqstp: svc_rqst to operate on
++ * @offset: payload's byte offset in rqstp->rq_res
++ * @length: size of payload, in bytes
++ *
++ * Returns zero on success, or a negative errno if a permanent
++ * error occurred.
++ */
++int svc_encode_read_payload(struct svc_rqst *rqstp, unsigned int offset,
++ unsigned int length)
++{
++ return rqstp->rq_xprt->xpt_ops->xpo_read_payload(rqstp, offset, length);
++}
++EXPORT_SYMBOL_GPL(svc_encode_read_payload);
++
+ /**
+ * svc_fill_write_vector - Construct data argument for VFS write call
+ * @rqstp: svc_rqst to operate on
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 4260924ad9db..d52abde51f1b 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -279,6 +279,12 @@ out:
+ return len;
+ }
+
++static int svc_sock_read_payload(struct svc_rqst *rqstp, unsigned int offset,
++ unsigned int length)
++{
++ return 0;
++}
++
+ /*
+ * Report socket names for nfsdfs
+ */
+@@ -655,6 +661,7 @@ static const struct svc_xprt_ops svc_udp_ops = {
+ .xpo_create = svc_udp_create,
+ .xpo_recvfrom = svc_udp_recvfrom,
+ .xpo_sendto = svc_udp_sendto,
++ .xpo_read_payload = svc_sock_read_payload,
+ .xpo_release_rqst = svc_release_udp_skb,
+ .xpo_detach = svc_sock_detach,
+ .xpo_free = svc_sock_free,
+@@ -1175,6 +1182,7 @@ static const struct svc_xprt_ops svc_tcp_ops = {
+ .xpo_create = svc_tcp_create,
+ .xpo_recvfrom = svc_tcp_recvfrom,
+ .xpo_sendto = svc_tcp_sendto,
++ .xpo_read_payload = svc_sock_read_payload,
+ .xpo_release_rqst = svc_release_skb,
+ .xpo_detach = svc_tcp_sock_detach,
+ .xpo_free = svc_sock_free,
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+index b8ee91ffedda..0ce4e75b2981 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -193,6 +193,7 @@ svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma)
+
+ out:
+ ctxt->rc_page_count = 0;
++ ctxt->rc_read_payload_length = 0;
+ return ctxt;
+
+ out_empty:
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
+index a59912e2666d..066af6b2eb01 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
+@@ -481,18 +481,19 @@ static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info,
+ vec->iov_len);
+ }
+
+-/* Send an xdr_buf's page list by itself. A Write chunk is
+- * just the page list. a Reply chunk is the head, page list,
+- * and tail. This function is shared between the two types
+- * of chunk.
++/* Send an xdr_buf's page list by itself. A Write chunk is just
++ * the page list. A Reply chunk is @xdr's head, page list, and
++ * tail. This function is shared between the two types of chunk.
+ */
+ static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
+- struct xdr_buf *xdr)
++ struct xdr_buf *xdr,
++ unsigned int offset,
++ unsigned long length)
+ {
+ info->wi_xdr = xdr;
+- info->wi_next_off = 0;
++ info->wi_next_off = offset - xdr->head[0].iov_len;
+ return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg,
+- xdr->page_len);
++ length);
+ }
+
+ /**
+@@ -500,6 +501,8 @@ static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
+ * @rdma: controlling RDMA transport
+ * @wr_ch: Write chunk provided by client
+ * @xdr: xdr_buf containing the data payload
++ * @offset: payload's byte offset in @xdr
++ * @length: size of payload, in bytes
+ *
+ * Returns a non-negative number of bytes the chunk consumed, or
+ * %-E2BIG if the payload was larger than the Write chunk,
+@@ -509,19 +512,20 @@ static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
+ * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
+ */
+ int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch,
+- struct xdr_buf *xdr)
++ struct xdr_buf *xdr,
++ unsigned int offset, unsigned long length)
+ {
+ struct svc_rdma_write_info *info;
+ int ret;
+
+- if (!xdr->page_len)
++ if (!length)
+ return 0;
+
+ info = svc_rdma_write_info_alloc(rdma, wr_ch);
+ if (!info)
+ return -ENOMEM;
+
+- ret = svc_rdma_send_xdr_pagelist(info, xdr);
++ ret = svc_rdma_send_xdr_pagelist(info, xdr, offset, length);
+ if (ret < 0)
+ goto out_err;
+
+@@ -530,7 +534,7 @@ int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch,
+ goto out_err;
+
+ trace_svcrdma_encode_write(xdr->page_len);
+- return xdr->page_len;
++ return length;
+
+ out_err:
+ svc_rdma_write_info_free(info);
+@@ -570,7 +574,9 @@ int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch,
+ * client did not provide Write chunks.
+ */
+ if (!writelist && xdr->page_len) {
+- ret = svc_rdma_send_xdr_pagelist(info, xdr);
++ ret = svc_rdma_send_xdr_pagelist(info, xdr,
++ xdr->head[0].iov_len,
++ xdr->page_len);
+ if (ret < 0)
+ goto out_err;
+ consumed += xdr->page_len;
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+index 93ff7967389a..217106c66a13 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+@@ -856,7 +856,18 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
+
+ if (wr_lst) {
+ /* XXX: Presume the client sent only one Write chunk */
+- ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr);
++ unsigned long offset;
++ unsigned int length;
++
++ if (rctxt->rc_read_payload_length) {
++ offset = rctxt->rc_read_payload_offset;
++ length = rctxt->rc_read_payload_length;
++ } else {
++ offset = xdr->head[0].iov_len;
++ length = xdr->page_len;
++ }
++ ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr, offset,
++ length);
+ if (ret < 0)
+ goto err2;
+ svc_rdma_xdr_encode_write_list(rdma_resp, wr_lst, ret);
+@@ -891,3 +902,30 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
+ set_bit(XPT_CLOSE, &xprt->xpt_flags);
+ return -ENOTCONN;
+ }
++
++/**
++ * svc_rdma_read_payload - special processing for a READ payload
++ * @rqstp: svc_rqst to operate on
++ * @offset: payload's byte offset in @xdr
++ * @length: size of payload, in bytes
++ *
++ * Returns zero on success.
++ *
++ * For the moment, just record the xdr_buf location of the READ
++ * payload. svc_rdma_sendto will use that location later when
++ * we actually send the payload.
++ */
++int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset,
++ unsigned int length)
++{
++ struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
++
++ /* XXX: Just one READ payload slot for now, since our
++ * transport implementation currently supports only one
++ * Write chunk.
++ */
++ rctxt->rc_read_payload_offset = offset;
++ rctxt->rc_read_payload_length = length;
++
++ return 0;
++}
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+index 889220f11a70..89a12676c59d 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
+@@ -81,6 +81,7 @@ static const struct svc_xprt_ops svc_rdma_ops = {
+ .xpo_create = svc_rdma_create,
+ .xpo_recvfrom = svc_rdma_recvfrom,
+ .xpo_sendto = svc_rdma_sendto,
++ .xpo_read_payload = svc_rdma_read_payload,
+ .xpo_release_rqst = svc_rdma_release_rqst,
+ .xpo_detach = svc_rdma_detach,
+ .xpo_free = svc_rdma_free,
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index a34bbca80f49..ec559dbad56e 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -12949,13 +12949,13 @@ static int nl80211_vendor_cmd(struct sk_buff *skb, struct genl_info *info)
+ if (!wdev_running(wdev))
+ return -ENETDOWN;
+ }
+-
+- if (!vcmd->doit)
+- return -EOPNOTSUPP;
+ } else {
+ wdev = NULL;
+ }
+
++ if (!vcmd->doit)
++ return -EOPNOTSUPP;
++
+ if (info->attrs[NL80211_ATTR_VENDOR_DATA]) {
+ data = nla_data(info->attrs[NL80211_ATTR_VENDOR_DATA]);
+ len = nla_len(info->attrs[NL80211_ATTR_VENDOR_DATA]);
+diff --git a/scripts/coccinelle/misc/add_namespace.cocci b/scripts/coccinelle/misc/add_namespace.cocci
+index 99e93a6c2e24..cbf1614163cb 100644
+--- a/scripts/coccinelle/misc/add_namespace.cocci
++++ b/scripts/coccinelle/misc/add_namespace.cocci
+@@ -6,6 +6,7 @@
+ /// add a missing namespace tag to a module source file.
+ ///
+
++virtual nsdeps
+ virtual report
+
+ @has_ns_import@
+@@ -16,10 +17,15 @@ MODULE_IMPORT_NS(ns);
+
+ // Add missing imports, but only adjacent to a MODULE_LICENSE statement.
+ // That ensures we are adding it only to the main module source file.
+-@do_import depends on !has_ns_import@
++@do_import depends on !has_ns_import && nsdeps@
+ declarer name MODULE_LICENSE;
+ expression license;
+ identifier virtual.ns;
+ @@
+ MODULE_LICENSE(license);
+ + MODULE_IMPORT_NS(ns);
++
++// Dummy rule for report mode that would otherwise be empty and make spatch
++// fail ("No rules apply.")
++@script:python depends on report@
++@@
+diff --git a/scripts/nsdeps b/scripts/nsdeps
+index 04cea0921673..e547f33b96a6 100644
+--- a/scripts/nsdeps
++++ b/scripts/nsdeps
+@@ -23,7 +23,7 @@ fi
+
+ generate_deps_for_ns() {
+ $SPATCH --very-quiet --in-place --sp-file \
+- $srctree/scripts/coccinelle/misc/add_namespace.cocci -D ns=$1 $2
++ $srctree/scripts/coccinelle/misc/add_namespace.cocci -D nsdeps -D ns=$1 $2
+ }
+
+ generate_deps() {
+diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
+index 838476d780e5..d2054bec4909 100644
+--- a/security/integrity/ima/Kconfig
++++ b/security/integrity/ima/Kconfig
+@@ -227,7 +227,7 @@ config IMA_APPRAISE_REQUIRE_POLICY_SIGS
+
+ config IMA_APPRAISE_BOOTPARAM
+ bool "ima_appraise boot parameter"
+- depends on IMA_APPRAISE && !IMA_ARCH_POLICY
++ depends on IMA_APPRAISE
+ default y
+ help
+ This option enables the different "ima_appraise=" modes
+diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
+index 136ae4e0ee92..23b04c6521b2 100644
+--- a/security/integrity/ima/ima_appraise.c
++++ b/security/integrity/ima/ima_appraise.c
+@@ -18,6 +18,12 @@
+ static int __init default_appraise_setup(char *str)
+ {
+ #ifdef CONFIG_IMA_APPRAISE_BOOTPARAM
++ if (arch_ima_get_secureboot()) {
++ pr_info("Secure boot enabled: ignoring ima_appraise=%s boot parameter option",
++ str);
++ return 1;
++ }
++
+ if (strncmp(str, "off", 3) == 0)
+ ima_appraise = 0;
+ else if (strncmp(str, "log", 3) == 0)
+diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
+index c21b656b3263..840a192e9337 100644
+--- a/security/smack/smackfs.c
++++ b/security/smack/smackfs.c
+@@ -2720,7 +2720,6 @@ static int smk_open_relabel_self(struct inode *inode, struct file *file)
+ static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+ {
+- struct task_smack *tsp = smack_cred(current_cred());
+ char *data;
+ int rc;
+ LIST_HEAD(list_tmp);
+@@ -2745,11 +2744,21 @@ static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf,
+ kfree(data);
+
+ if (!rc || (rc == -EINVAL && list_empty(&list_tmp))) {
++ struct cred *new;
++ struct task_smack *tsp;
++
++ new = prepare_creds();
++ if (!new) {
++ rc = -ENOMEM;
++ goto out;
++ }
++ tsp = smack_cred(new);
+ smk_destroy_label_list(&tsp->smk_relabel);
+ list_splice(&list_tmp, &tsp->smk_relabel);
++ commit_creds(new);
+ return count;
+ }
+-
++out:
+ smk_destroy_label_list(&list_tmp);
+ return rc;
+ }
+diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
+index 17f913657304..c8b9c0b315d8 100644
+--- a/sound/core/seq/oss/seq_oss.c
++++ b/sound/core/seq/oss/seq_oss.c
+@@ -168,10 +168,16 @@ static long
+ odev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ {
+ struct seq_oss_devinfo *dp;
++ long rc;
++
+ dp = file->private_data;
+ if (snd_BUG_ON(!dp))
+ return -ENXIO;
+- return snd_seq_oss_ioctl(dp, cmd, arg);
++
++ mutex_lock(®ister_mutex);
++ rc = snd_seq_oss_ioctl(dp, cmd, arg);
++ mutex_unlock(®ister_mutex);
++ return rc;
+ }
+
+ #ifdef CONFIG_COMPAT
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 07c03c32715a..801abf0fc98b 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -2924,6 +2924,10 @@ static int hda_codec_runtime_suspend(struct device *dev)
+ struct hda_codec *codec = dev_to_hda_codec(dev);
+ unsigned int state;
+
++ /* Nothing to do if card registration fails and the component driver never probes */
++ if (!codec->card)
++ return 0;
++
+ cancel_delayed_work_sync(&codec->jackpoll_work);
+ state = hda_call_codec_suspend(codec);
+ if (codec->link_down_at_suspend ||
+@@ -2938,6 +2942,10 @@ static int hda_codec_runtime_resume(struct device *dev)
+ {
+ struct hda_codec *codec = dev_to_hda_codec(dev);
+
++ /* Nothing to do if card registration fails and the component driver never probes */
++ if (!codec->card)
++ return 0;
++
+ codec_display_power(codec, true);
+ snd_hdac_codec_link_up(&codec->core);
+ hda_call_codec_resume(codec);
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 95b0fdffc504..7353d2ec359a 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2306,7 +2306,6 @@ static int azx_probe_continue(struct azx *chip)
+
+ if (azx_has_pm_runtime(chip)) {
+ pm_runtime_use_autosuspend(&pci->dev);
+- pm_runtime_allow(&pci->dev);
+ pm_runtime_put_autosuspend(&pci->dev);
+ }
+
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index 1e904dd15ab3..6aa39339db0a 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -1182,6 +1182,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
+ SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI),
+ SND_PCI_QUIRK(0x3842, 0x1038, "EVGA X99 Classified", QUIRK_R3DI),
+ SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D),
++ SND_PCI_QUIRK(0x1102, 0x0018, "Recon3D", QUIRK_R3D),
+ SND_PCI_QUIRK(0x1102, 0x0051, "Sound Blaster AE-5", QUIRK_AE5),
+ {}
+ };
+@@ -4670,7 +4671,7 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
+ tmp = FLOAT_ONE;
+ break;
+ case QUIRK_AE5:
+- ca0113_mmio_command_set(codec, 0x48, 0x28, 0x00);
++ ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
+ tmp = FLOAT_THREE;
+ break;
+ default:
+@@ -4716,7 +4717,7 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
+ r3di_gpio_mic_set(codec, R3DI_REAR_MIC);
+ break;
+ case QUIRK_AE5:
+- ca0113_mmio_command_set(codec, 0x48, 0x28, 0x00);
++ ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
+ break;
+ default:
+ break;
+@@ -4755,7 +4756,7 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
+ tmp = FLOAT_ONE;
+ break;
+ case QUIRK_AE5:
+- ca0113_mmio_command_set(codec, 0x48, 0x28, 0x3f);
++ ca0113_mmio_command_set(codec, 0x30, 0x28, 0x3f);
+ tmp = FLOAT_THREE;
+ break;
+ default:
+@@ -5747,6 +5748,11 @@ static int ca0132_switch_get(struct snd_kcontrol *kcontrol,
+ return 0;
+ }
+
++ if (nid == ZXR_HEADPHONE_GAIN) {
++ *valp = spec->zxr_gain_set;
++ return 0;
++ }
++
+ return 0;
+ }
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f50d71da1226..ea25b8d0350d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6131,6 +6131,11 @@ enum {
+ ALC289_FIXUP_ASUS_GA502,
+ ALC256_FIXUP_ACER_MIC_NO_PRESENCE,
+ ALC285_FIXUP_HP_GPIO_AMP_INIT,
++ ALC269_FIXUP_CZC_B20,
++ ALC269_FIXUP_CZC_TMI,
++ ALC269_FIXUP_CZC_L101,
++ ALC269_FIXUP_LEMOTE_A1802,
++ ALC269_FIXUP_LEMOTE_A190X,
+ };
+
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -7369,6 +7374,89 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC285_FIXUP_HP_GPIO_LED
+ },
++ [ALC269_FIXUP_CZC_B20] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x12, 0x411111f0 },
++ { 0x14, 0x90170110 }, /* speaker */
++ { 0x15, 0x032f1020 }, /* HP out */
++ { 0x17, 0x411111f0 },
++ { 0x18, 0x03ab1040 }, /* mic */
++ { 0x19, 0xb7a7013f },
++ { 0x1a, 0x0181305f },
++ { 0x1b, 0x411111f0 },
++ { 0x1d, 0x411111f0 },
++ { 0x1e, 0x411111f0 },
++ { }
++ },
++ .chain_id = ALC269_FIXUP_DMIC,
++ },
++ [ALC269_FIXUP_CZC_TMI] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x12, 0x4000c000 },
++ { 0x14, 0x90170110 }, /* speaker */
++ { 0x15, 0x0421401f }, /* HP out */
++ { 0x17, 0x411111f0 },
++ { 0x18, 0x04a19020 }, /* mic */
++ { 0x19, 0x411111f0 },
++ { 0x1a, 0x411111f0 },
++ { 0x1b, 0x411111f0 },
++ { 0x1d, 0x40448505 },
++ { 0x1e, 0x411111f0 },
++ { 0x20, 0x8000ffff },
++ { }
++ },
++ .chain_id = ALC269_FIXUP_DMIC,
++ },
++ [ALC269_FIXUP_CZC_L101] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x12, 0x40000000 },
++ { 0x14, 0x01014010 }, /* speaker */
++ { 0x15, 0x411111f0 }, /* HP out */
++ { 0x16, 0x411111f0 },
++ { 0x18, 0x01a19020 }, /* mic */
++ { 0x19, 0x02a19021 },
++ { 0x1a, 0x0181302f },
++ { 0x1b, 0x0221401f },
++ { 0x1c, 0x411111f0 },
++ { 0x1d, 0x4044c601 },
++ { 0x1e, 0x411111f0 },
++ { }
++ },
++ .chain_id = ALC269_FIXUP_DMIC,
++ },
++ [ALC269_FIXUP_LEMOTE_A1802] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x12, 0x40000000 },
++ { 0x14, 0x90170110 }, /* speaker */
++ { 0x17, 0x411111f0 },
++ { 0x18, 0x03a19040 }, /* mic1 */
++ { 0x19, 0x90a70130 }, /* mic2 */
++ { 0x1a, 0x411111f0 },
++ { 0x1b, 0x411111f0 },
++ { 0x1d, 0x40489d2d },
++ { 0x1e, 0x411111f0 },
++ { 0x20, 0x0003ffff },
++ { 0x21, 0x03214020 },
++ { }
++ },
++ .chain_id = ALC269_FIXUP_DMIC,
++ },
++ [ALC269_FIXUP_LEMOTE_A190X] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x14, 0x99130110 }, /* speaker */
++ { 0x15, 0x0121401f }, /* HP out */
++ { 0x18, 0x01a19c20 }, /* rear mic */
++ { 0x19, 0x99a3092f }, /* front mic */
++ { 0x1b, 0x0201401f }, /* front lineout */
++ { }
++ },
++ .chain_id = ALC269_FIXUP_DMIC,
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -7658,9 +7746,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+ SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+ SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
++ SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
++ SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
++ SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
+ SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
+ SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
++ SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
++ SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
+
+ #if 0
+ /* Below is a quirk table taken from the old code.
+@@ -8945,6 +9038,7 @@ enum {
+ ALC662_FIXUP_LED_GPIO1,
+ ALC662_FIXUP_IDEAPAD,
+ ALC272_FIXUP_MARIO,
++ ALC662_FIXUP_CZC_ET26,
+ ALC662_FIXUP_CZC_P10T,
+ ALC662_FIXUP_SKU_IGNORE,
+ ALC662_FIXUP_HP_RP5800,
+@@ -9014,6 +9108,25 @@ static const struct hda_fixup alc662_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc272_fixup_mario,
+ },
++ [ALC662_FIXUP_CZC_ET26] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ {0x12, 0x403cc000},
++ {0x14, 0x90170110}, /* speaker */
++ {0x15, 0x411111f0},
++ {0x16, 0x411111f0},
++ {0x18, 0x01a19030}, /* mic */
++ {0x19, 0x90a7013f}, /* int-mic */
++ {0x1a, 0x01014020},
++ {0x1b, 0x0121401f},
++ {0x1c, 0x411111f0},
++ {0x1d, 0x411111f0},
++ {0x1e, 0x40478e35},
++ {}
++ },
++ .chained = true,
++ .chain_id = ALC662_FIXUP_SKU_IGNORE
++ },
+ [ALC662_FIXUP_CZC_P10T] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+@@ -9397,6 +9510,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO),
+ SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68),
+ SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON),
++ SND_PCI_QUIRK(0x1b35, 0x1234, "CZC ET26", ALC662_FIXUP_CZC_ET26),
+ SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
+ SND_PCI_QUIRK(0x1025, 0x0566, "Acer Aspire Ethos 8951G", ALC669_FIXUP_ACER_ASPIRE_ETHOS),
+
+diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
+index 798284f511f1..4559a15e6657 100644
+--- a/tools/lib/traceevent/event-parse.c
++++ b/tools/lib/traceevent/event-parse.c
+@@ -2861,6 +2861,7 @@ process_dynamic_array_len(struct tep_event *event, struct tep_print_arg *arg,
+ if (read_expected(TEP_EVENT_DELIM, ")") < 0)
+ goto out_err;
+
++ free_token(token);
+ type = read_token(&token);
+ *tok = token;
+
+diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c
+index 4b02933cab8a..bdc03a2097e8 100644
+--- a/tools/testing/selftests/net/msg_zerocopy.c
++++ b/tools/testing/selftests/net/msg_zerocopy.c
+@@ -125,9 +125,8 @@ static int do_setcpu(int cpu)
+ CPU_ZERO(&mask);
+ CPU_SET(cpu, &mask);
+ if (sched_setaffinity(0, sizeof(mask), &mask))
+- error(1, 0, "setaffinity %d", cpu);
+-
+- if (cfg_verbose)
++ fprintf(stderr, "cpu: unable to pin, may increase variance.\n");
++ else if (cfg_verbose)
+ fprintf(stderr, "cpu: %u\n", cpu);
+
+ return 0;
next reply other threads:[~2020-08-12 23:30 UTC|newest]
Thread overview: 348+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-08-12 23:30 Alice Ferrazzi [this message]
-- strict thread matches above, loose matches on Subject: below --
2025-10-30 6:42 [gentoo-commits] proj/linux-patches:5.4 commit in: / Arisu Tachibana
2025-10-02 13:27 Arisu Tachibana
2025-09-10 5:33 Arisu Tachibana
2025-09-04 14:32 Arisu Tachibana
2025-09-04 14:32 Arisu Tachibana
2025-08-21 7:00 Arisu Tachibana
2025-08-21 6:59 Arisu Tachibana
2025-08-21 6:58 Arisu Tachibana
2025-08-21 6:58 Arisu Tachibana
2025-08-21 6:57 Arisu Tachibana
2025-08-21 6:56 Arisu Tachibana
2025-08-21 6:56 Arisu Tachibana
2025-08-21 6:55 Arisu Tachibana
2025-08-21 6:54 Arisu Tachibana
2025-08-21 5:23 Arisu Tachibana
2025-08-21 5:23 Arisu Tachibana
2025-08-21 5:23 Arisu Tachibana
2025-08-21 5:21 Arisu Tachibana
2025-08-21 5:20 Arisu Tachibana
2025-08-21 5:19 Arisu Tachibana
2025-08-21 5:19 Arisu Tachibana
2025-08-21 5:18 Arisu Tachibana
2025-08-21 5:18 Arisu Tachibana
2025-08-21 5:17 Arisu Tachibana
2025-08-21 5:16 Arisu Tachibana
2025-08-21 1:17 Arisu Tachibana
2025-08-21 1:16 Arisu Tachibana
2025-08-21 1:13 Arisu Tachibana
2025-08-21 1:12 Arisu Tachibana
2025-08-16 3:12 Arisu Tachibana
2025-08-01 10:32 Arisu Tachibana
2025-07-24 9:19 Arisu Tachibana
2025-07-18 12:07 Arisu Tachibana
2025-07-14 16:22 Arisu Tachibana
2025-07-11 2:32 Arisu Tachibana
2025-07-11 2:29 Arisu Tachibana
2025-07-06 18:27 Arisu Tachibana
2025-07-06 18:27 Arisu Tachibana
2025-07-06 18:27 Arisu Tachibana
2025-07-06 18:27 Arisu Tachibana
2025-07-06 13:36 Arisu Tachibana
2025-07-06 13:36 Arisu Tachibana
2025-07-06 13:36 Arisu Tachibana
2024-04-18 3:06 Alice Ferrazzi
2023-10-05 14:24 Mike Pagano
2023-09-23 10:18 Mike Pagano
2023-09-02 9:58 Mike Pagano
2023-08-30 14:56 Mike Pagano
2023-08-16 17:00 Mike Pagano
2023-08-11 11:57 Mike Pagano
2023-08-08 18:42 Mike Pagano
2023-07-27 11:51 Mike Pagano
2023-07-24 20:29 Mike Pagano
2023-06-28 10:28 Mike Pagano
2023-06-21 14:55 Alice Ferrazzi
2023-06-14 10:20 Mike Pagano
2023-06-09 11:32 Mike Pagano
2023-06-05 11:50 Mike Pagano
2023-05-30 12:56 Mike Pagano
2023-05-17 11:21 Mike Pagano
2023-05-17 11:00 Mike Pagano
2023-05-10 17:58 Mike Pagano
2023-04-26 9:51 Alice Ferrazzi
2023-04-20 11:17 Alice Ferrazzi
2023-04-05 10:01 Alice Ferrazzi
2023-03-30 13:41 Alice Ferrazzi
2023-03-22 14:16 Alice Ferrazzi
2023-03-17 10:46 Mike Pagano
2023-03-13 11:34 Alice Ferrazzi
2023-03-11 16:20 Mike Pagano
2023-03-03 12:31 Mike Pagano
2023-02-25 11:42 Mike Pagano
2023-02-24 3:08 Alice Ferrazzi
2023-02-22 14:41 Alice Ferrazzi
2023-02-06 12:48 Mike Pagano
2023-02-02 19:15 Mike Pagano
2023-01-24 7:25 Alice Ferrazzi
2023-01-18 11:10 Mike Pagano
2022-12-19 12:27 Alice Ferrazzi
2022-12-14 12:14 Mike Pagano
2022-12-08 12:13 Alice Ferrazzi
2022-11-25 17:05 Mike Pagano
2022-11-10 17:59 Mike Pagano
2022-11-03 15:13 Mike Pagano
2022-11-01 19:47 Mike Pagano
2022-10-29 9:52 Mike Pagano
2022-10-26 11:44 Mike Pagano
2022-10-17 16:48 Mike Pagano
2022-10-15 10:06 Mike Pagano
2022-10-07 11:12 Mike Pagano
2022-10-05 11:58 Mike Pagano
2022-09-28 9:26 Mike Pagano
2022-09-20 12:02 Mike Pagano
2022-09-15 10:31 Mike Pagano
2022-09-05 12:04 Mike Pagano
2022-08-25 10:34 Mike Pagano
2022-08-11 12:35 Mike Pagano
2022-08-03 14:51 Alice Ferrazzi
2022-07-29 15:29 Mike Pagano
2022-07-21 20:09 Mike Pagano
2022-07-15 10:04 Mike Pagano
2022-07-12 16:01 Mike Pagano
2022-07-07 16:18 Mike Pagano
2022-07-02 16:08 Mike Pagano
2022-06-29 11:09 Mike Pagano
2022-06-27 19:03 Mike Pagano
2022-06-25 19:46 Mike Pagano
2022-06-22 13:50 Mike Pagano
2022-06-22 13:25 Mike Pagano
2022-06-22 12:47 Mike Pagano
2022-06-16 11:43 Mike Pagano
2022-06-14 17:12 Mike Pagano
2022-06-06 11:04 Mike Pagano
2022-05-25 11:55 Mike Pagano
2022-05-18 9:49 Mike Pagano
2022-05-15 22:11 Mike Pagano
2022-05-12 11:30 Mike Pagano
2022-05-09 10:55 Mike Pagano
2022-04-27 12:21 Mike Pagano
2022-04-20 12:08 Mike Pagano
2022-04-15 13:10 Mike Pagano
2022-04-12 19:21 Mike Pagano
2022-03-28 10:58 Mike Pagano
2022-03-23 11:57 Mike Pagano
2022-03-19 13:21 Mike Pagano
2022-03-16 13:31 Mike Pagano
2022-03-11 10:55 Mike Pagano
2022-03-08 18:31 Mike Pagano
2022-03-02 13:07 Mike Pagano
2022-02-23 12:38 Mike Pagano
2022-02-16 12:46 Mike Pagano
2022-02-11 12:36 Mike Pagano
2022-02-08 17:55 Mike Pagano
2022-02-05 12:14 Mike Pagano
2022-02-01 17:24 Mike Pagano
2022-01-31 13:01 Mike Pagano
2022-01-29 17:44 Mike Pagano
2022-01-27 11:38 Mike Pagano
2022-01-20 10:00 Mike Pagano
2022-01-16 10:22 Mike Pagano
2022-01-11 14:34 Mike Pagano
2022-01-05 12:54 Mike Pagano
2021-12-29 13:07 Mike Pagano
2021-12-22 14:06 Mike Pagano
2021-12-17 11:55 Mike Pagano
2021-12-16 16:51 Mike Pagano
2021-12-14 14:19 Mike Pagano
2021-12-08 12:54 Mike Pagano
2021-12-01 12:50 Mike Pagano
2021-11-26 11:58 Mike Pagano
2021-11-21 20:44 Mike Pagano
2021-11-17 12:00 Mike Pagano
2021-11-12 14:14 Mike Pagano
2021-11-06 13:26 Mike Pagano
2021-11-04 11:23 Mike Pagano
2021-11-02 19:31 Mike Pagano
2021-10-27 15:51 Mike Pagano
2021-10-27 11:58 Mike Pagano
2021-10-20 13:24 Mike Pagano
2021-10-17 13:12 Mike Pagano
2021-10-13 14:55 Alice Ferrazzi
2021-10-09 21:32 Mike Pagano
2021-10-06 14:06 Mike Pagano
2021-09-30 10:49 Mike Pagano
2021-09-26 14:13 Mike Pagano
2021-09-22 11:39 Mike Pagano
2021-09-20 22:03 Mike Pagano
2021-09-16 11:19 Mike Pagano
2021-09-15 12:00 Mike Pagano
2021-09-12 14:38 Mike Pagano
2021-09-03 11:21 Mike Pagano
2021-09-03 9:39 Alice Ferrazzi
2021-08-26 14:36 Mike Pagano
2021-08-18 12:46 Mike Pagano
2021-08-15 20:06 Mike Pagano
2021-08-12 11:52 Mike Pagano
2021-08-08 13:38 Mike Pagano
2021-08-04 11:53 Mike Pagano
2021-08-03 12:23 Mike Pagano
2021-07-31 10:32 Alice Ferrazzi
2021-07-28 12:36 Mike Pagano
2021-07-25 17:27 Mike Pagano
2021-07-20 15:39 Alice Ferrazzi
2021-07-19 11:18 Mike Pagano
2021-07-14 16:22 Mike Pagano
2021-07-13 12:37 Mike Pagano
2021-07-11 14:44 Mike Pagano
2021-07-07 13:13 Mike Pagano
2021-06-30 14:24 Mike Pagano
2021-06-23 15:11 Mike Pagano
2021-06-18 11:38 Mike Pagano
2021-06-16 12:23 Mike Pagano
2021-06-10 11:59 Mike Pagano
2021-06-07 11:23 Mike Pagano
2021-06-03 10:28 Alice Ferrazzi
2021-05-28 12:03 Alice Ferrazzi
2021-05-26 12:06 Mike Pagano
2021-05-22 10:04 Mike Pagano
2021-05-19 12:23 Mike Pagano
2021-05-14 14:10 Alice Ferrazzi
2021-05-11 14:20 Mike Pagano
2021-05-07 11:44 Alice Ferrazzi
2021-05-07 11:37 Mike Pagano
2021-05-02 16:02 Mike Pagano
2021-05-02 16:00 Mike Pagano
2021-04-30 19:01 Mike Pagano
2021-04-28 11:52 Alice Ferrazzi
2021-04-21 11:42 Mike Pagano
2021-04-16 11:14 Alice Ferrazzi
2021-04-14 11:20 Alice Ferrazzi
2021-04-10 13:25 Mike Pagano
2021-04-07 13:27 Mike Pagano
2021-03-30 13:12 Alice Ferrazzi
2021-03-24 12:09 Mike Pagano
2021-03-22 15:55 Mike Pagano
2021-03-20 14:32 Mike Pagano
2021-03-17 18:43 Mike Pagano
2021-03-16 16:04 Mike Pagano
2021-03-11 14:08 Mike Pagano
2021-03-09 12:18 Mike Pagano
2021-03-07 15:16 Mike Pagano
2021-03-04 14:51 Mike Pagano
2021-03-04 12:06 Alice Ferrazzi
2021-03-01 23:49 Mike Pagano
2021-03-01 23:44 Mike Pagano
2021-02-27 14:16 Mike Pagano
2021-02-26 10:01 Alice Ferrazzi
2021-02-23 17:01 Mike Pagano
2021-02-23 14:28 Alice Ferrazzi
2021-02-17 11:39 Alice Ferrazzi
2021-02-13 14:46 Alice Ferrazzi
2021-02-10 9:53 Alice Ferrazzi
2021-02-07 15:24 Alice Ferrazzi
2021-02-03 23:48 Mike Pagano
2021-01-30 13:37 Alice Ferrazzi
2021-01-27 11:13 Mike Pagano
2021-01-23 17:50 Mike Pagano
2021-01-23 16:37 Mike Pagano
2021-01-19 20:32 Mike Pagano
2021-01-17 16:19 Mike Pagano
2021-01-12 20:05 Mike Pagano
2021-01-09 17:51 Mike Pagano
2021-01-08 16:08 Mike Pagano
2021-01-06 14:14 Mike Pagano
2020-12-30 12:53 Mike Pagano
2020-12-21 13:27 Mike Pagano
2020-12-16 23:14 Mike Pagano
2020-12-11 12:56 Mike Pagano
2020-12-08 12:07 Mike Pagano
2020-12-02 12:50 Mike Pagano
2020-11-26 14:27 Mike Pagano
2020-11-24 14:44 Mike Pagano
2020-11-22 19:31 Mike Pagano
2020-11-18 20:19 Mike Pagano
2020-11-18 20:10 Mike Pagano
2020-11-18 20:03 Mike Pagano
2020-11-13 12:16 Mike Pagano
2020-11-11 15:48 Mike Pagano
2020-11-10 13:57 Mike Pagano
2020-11-05 12:36 Mike Pagano
2020-11-01 20:31 Mike Pagano
2020-10-29 11:19 Mike Pagano
2020-10-17 10:18 Mike Pagano
2020-10-14 20:37 Mike Pagano
2020-10-07 12:48 Mike Pagano
2020-10-01 12:49 Mike Pagano
2020-09-26 21:59 Mike Pagano
2020-09-24 15:38 Mike Pagano
2020-09-24 15:38 Mike Pagano
2020-09-24 15:38 Mike Pagano
2020-09-23 12:10 Mike Pagano
2020-09-17 14:56 Mike Pagano
2020-09-12 18:08 Mike Pagano
2020-09-09 18:00 Mike Pagano
2020-09-08 22:26 Mike Pagano
2020-09-05 10:47 Mike Pagano
2020-09-03 11:38 Mike Pagano
2020-08-26 11:16 Mike Pagano
2020-08-21 13:25 Alice Ferrazzi
2020-08-19 9:28 Alice Ferrazzi
2020-08-07 12:16 Alice Ferrazzi
2020-08-05 14:45 Thomas Deutschmann
2020-08-01 19:45 Mike Pagano
2020-07-31 18:28 Mike Pagano
2020-07-31 18:04 Mike Pagano
2020-07-30 14:58 Mike Pagano
2020-07-29 12:40 Mike Pagano
2020-07-22 12:53 Mike Pagano
2020-07-16 11:19 Mike Pagano
2020-07-09 12:13 Mike Pagano
2020-07-01 12:23 Mike Pagano
2020-06-29 17:40 Mike Pagano
2020-06-24 16:49 Mike Pagano
2020-06-22 14:48 Mike Pagano
2020-06-17 16:40 Mike Pagano
2020-06-10 19:42 Mike Pagano
2020-06-07 21:53 Mike Pagano
2020-06-03 11:43 Mike Pagano
2020-06-02 11:37 Mike Pagano
2020-05-27 16:31 Mike Pagano
2020-05-20 11:37 Mike Pagano
2020-05-20 11:33 Mike Pagano
2020-05-14 11:32 Mike Pagano
2020-05-13 12:18 Mike Pagano
2020-05-11 22:49 Mike Pagano
2020-05-09 22:12 Mike Pagano
2020-05-06 11:47 Mike Pagano
2020-05-02 19:24 Mike Pagano
2020-05-02 13:25 Mike Pagano
2020-04-29 17:56 Mike Pagano
2020-04-23 11:55 Mike Pagano
2020-04-21 11:19 Mike Pagano
2020-04-17 11:46 Mike Pagano
2020-04-15 15:52 Mike Pagano
2020-04-13 11:18 Mike Pagano
2020-04-08 12:42 Mike Pagano
2020-04-02 15:26 Mike Pagano
2020-04-01 12:03 Mike Pagano
2020-03-25 15:01 Mike Pagano
2020-03-21 18:58 Mike Pagano
2020-03-18 14:23 Mike Pagano
2020-03-12 14:04 Mike Pagano
2020-03-05 16:26 Mike Pagano
2020-02-28 16:41 Mike Pagano
2020-02-24 11:09 Mike Pagano
2020-02-19 23:48 Mike Pagano
2020-02-14 23:55 Mike Pagano
2020-02-11 15:35 Mike Pagano
2020-02-06 11:07 Mike Pagano
2020-02-01 10:53 Mike Pagano
2020-02-01 10:31 Mike Pagano
2020-01-29 16:18 Mike Pagano
2020-01-26 12:27 Mike Pagano
2020-01-23 11:09 Mike Pagano
2020-01-17 19:57 Mike Pagano
2020-01-14 22:33 Mike Pagano
2020-01-12 15:01 Mike Pagano
2020-01-09 11:17 Mike Pagano
2020-01-04 19:59 Mike Pagano
2019-12-31 17:48 Mike Pagano
2019-12-30 23:03 Mike Pagano
2019-12-21 15:01 Mike Pagano
2019-12-18 19:30 Mike Pagano
2019-12-17 21:57 Mike Pagano
2019-12-13 12:39 Mike Pagano
2019-12-05 1:04 Thomas Deutschmann
2019-11-29 21:21 Thomas Deutschmann
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1597274967.78e680436a512219cdb68c6285cf4b96c29d835e.alicef@gentoo \
--to=alicef@gentoo.org \
--cc=gentoo-commits@lists.gentoo.org \
--cc=gentoo-dev@lists.gentoo.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox