public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Tue, 10 Nov 2020 13:56:39 +0000 (UTC)	[thread overview]
Message-ID: <1605016584.96339a934e3be4fc8007433e7229ad6074feab5a.mpagano@gentoo> (raw)

commit:     96339a934e3be4fc8007433e7229ad6074feab5a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Nov 10 13:56:24 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Nov 10 13:56:24 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=96339a93

Linux patch 4.19.156

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1155_linux-4.19.156.patch | 2339 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2343 insertions(+)

diff --git a/0000_README b/0000_README
index e9e0833..54a0fb2 100644
--- a/0000_README
+++ b/0000_README
@@ -659,6 +659,10 @@ Patch:  1154_linux-4.19.155.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.19.155
 
+Patch:  1155_linux-4.19.156.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.19.156
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1155_linux-4.19.156.patch b/1155_linux-4.19.156.patch
new file mode 100644
index 0000000..8a1ecce
--- /dev/null
+++ b/1155_linux-4.19.156.patch
@@ -0,0 +1,2339 @@
+diff --git a/Makefile b/Makefile
+index 9fc16d34e1bb9..82891b34e19e0 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 155
++SUBLEVEL = 156
+ EXTRAVERSION =
+ NAME = "People's Front"
+ 
+diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
+index 705a682084232..85d9ea4a0accc 100644
+--- a/arch/arc/kernel/entry.S
++++ b/arch/arc/kernel/entry.S
+@@ -156,6 +156,7 @@ END(EV_Extension)
+ tracesys:
+ 	; save EFA in case tracer wants the PC of traced task
+ 	; using ERET won't work since next-PC has already committed
++	lr  r12, [efa]
+ 	GET_CURR_TASK_FIELD_PTR   TASK_THREAD, r11
+ 	st  r12, [r11, THREAD_FAULT_ADDR]	; thread.fault_address
+ 
+@@ -198,9 +199,15 @@ tracesys_exit:
+ ; Breakpoint TRAP
+ ; ---------------------------------------------
+ trap_with_param:
+-	mov r0, r12	; EFA in case ptracer/gdb wants stop_pc
++
++	; stop_pc info by gdb needs this info
++	lr  r0, [efa]
+ 	mov r1, sp
+ 
++	; Now that we have read EFA, it is safe to do "fake" rtie
++	;   and get out of CPU exception mode
++	FAKE_RET_FROM_EXCPN
++
+ 	; Save callee regs in case gdb wants to have a look
+ 	; SP will grow up by size of CALLEE Reg-File
+ 	; NOTE: clobbers r12
+@@ -227,10 +234,6 @@ ENTRY(EV_Trap)
+ 
+ 	EXCEPTION_PROLOGUE
+ 
+-	lr  r12, [efa]
+-
+-	FAKE_RET_FROM_EXCPN
+-
+ 	;============ TRAP 1   :breakpoints
+ 	; Check ECR for trap with arg (PROLOGUE ensures r9 has ECR)
+ 	bmsk.f 0, r9, 7
+@@ -238,6 +241,9 @@ ENTRY(EV_Trap)
+ 
+ 	;============ TRAP  (no param): syscall top level
+ 
++	; First return from Exception to pure K mode (Exception/IRQs renabled)
++	FAKE_RET_FROM_EXCPN
++
+ 	; If syscall tracing ongoing, invoke pre-post-hooks
+ 	GET_CURR_THR_INFO_FLAGS   r10
+ 	btst r10, TIF_SYSCALL_TRACE
+diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
+index bf40e06f3fb84..0fed32b959232 100644
+--- a/arch/arc/kernel/stacktrace.c
++++ b/arch/arc/kernel/stacktrace.c
+@@ -115,7 +115,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
+ 		int (*consumer_fn) (unsigned int, void *), void *arg)
+ {
+ #ifdef CONFIG_ARC_DW2_UNWIND
+-	int ret = 0;
++	int ret = 0, cnt = 0;
+ 	unsigned int address;
+ 	struct unwind_frame_info frame_info;
+ 
+@@ -135,6 +135,11 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
+ 			break;
+ 
+ 		frame_info.regs.r63 = frame_info.regs.r31;
++
++		if (cnt++ > 128) {
++			printk("unwinder looping too long, aborting !\n");
++			return 0;
++		}
+ 	}
+ 
+ 	return address;		/* return the last address it saw */
+diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
+index 5d46bb0139fad..707ad5074878a 100644
+--- a/arch/arm/boot/dts/sun4i-a10.dtsi
++++ b/arch/arm/boot/dts/sun4i-a10.dtsi
+@@ -143,7 +143,7 @@
+ 			trips {
+ 				cpu_alert0: cpu-alert0 {
+ 					/* milliCelsius */
+-					temperature = <850000>;
++					temperature = <85000>;
+ 					hysteresis = <2000>;
+ 					type = "passive";
+ 				};
+diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
+index 6cbdd66921aab..1a3e6e3b04eba 100644
+--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
++++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
+@@ -21,6 +21,10 @@
+ 
+ 	aliases {
+ 		ethernet0 = &eth0;
++		/* for dsa slave device */
++		ethernet1 = &switch0port1;
++		ethernet2 = &switch0port2;
++		ethernet3 = &switch0port3;
+ 		serial0 = &uart0;
+ 		serial1 = &uart1;
+ 	};
+@@ -136,25 +140,25 @@
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+ 
+-			port@0 {
++			switch0port0: port@0 {
+ 				reg = <0>;
+ 				label = "cpu";
+ 				ethernet = <&eth0>;
+ 			};
+ 
+-			port@1 {
++			switch0port1: port@1 {
+ 				reg = <1>;
+ 				label = "wan";
+ 				phy-handle = <&switch0phy0>;
+ 			};
+ 
+-			port@2 {
++			switch0port2: port@2 {
+ 				reg = <2>;
+ 				label = "lan0";
+ 				phy-handle = <&switch0phy1>;
+ 			};
+ 
+-			port@3 {
++			switch0port3: port@3 {
+ 				reg = <3>;
+ 				label = "lan1";
+ 				phy-handle = <&switch0phy2>;
+diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
+index 9490a2845f14b..273687986a263 100644
+--- a/arch/x86/kernel/kexec-bzimage64.c
++++ b/arch/x86/kernel/kexec-bzimage64.c
+@@ -211,8 +211,7 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params,
+ 	params->hdr.hardware_subarch = boot_params.hdr.hardware_subarch;
+ 
+ 	/* Copying screen_info will do? */
+-	memcpy(&params->screen_info, &boot_params.screen_info,
+-				sizeof(struct screen_info));
++	memcpy(&params->screen_info, &screen_info, sizeof(struct screen_info));
+ 
+ 	/* Fill in memsize later */
+ 	params->screen_info.ext_mem_k = 0;
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index a06547fe6f6b4..85bd46e0a745f 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -876,13 +876,20 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
+ 			goto fail;
+ 		}
+ 
++		if (radix_tree_preload(GFP_KERNEL)) {
++			blkg_free(new_blkg);
++			ret = -ENOMEM;
++			goto fail;
++		}
++
+ 		rcu_read_lock();
+ 		spin_lock_irq(q->queue_lock);
+ 
+ 		blkg = blkg_lookup_check(pos, pol, q);
+ 		if (IS_ERR(blkg)) {
+ 			ret = PTR_ERR(blkg);
+-			goto fail_unlock;
++			blkg_free(new_blkg);
++			goto fail_preloaded;
+ 		}
+ 
+ 		if (blkg) {
+@@ -891,10 +898,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
+ 			blkg = blkg_create(pos, q, new_blkg);
+ 			if (unlikely(IS_ERR(blkg))) {
+ 				ret = PTR_ERR(blkg);
+-				goto fail_unlock;
++				goto fail_preloaded;
+ 			}
+ 		}
+ 
++		radix_tree_preload_end();
++
+ 		if (pos == blkcg)
+ 			goto success;
+ 	}
+@@ -904,6 +913,8 @@ success:
+ 	ctx->body = body;
+ 	return 0;
+ 
++fail_preloaded:
++	radix_tree_preload_end();
+ fail_unlock:
+ 	spin_unlock_irq(q->queue_lock);
+ 	rcu_read_unlock();
+diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
+index dd4c7289610ec..cb88f3b43a940 100644
+--- a/drivers/acpi/nfit/core.c
++++ b/drivers/acpi/nfit/core.c
+@@ -1535,7 +1535,7 @@ static ssize_t format1_show(struct device *dev,
+ 					le16_to_cpu(nfit_dcr->dcr->code));
+ 			break;
+ 		}
+-		if (rc != ENXIO)
++		if (rc != -ENXIO)
+ 			break;
+ 	}
+ 	mutex_unlock(&acpi_desc->init_mutex);
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index b3c569412f4e2..4ba9231a6be80 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -931,6 +931,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
+ 
+ 	drv = dev->driver;
+ 	if (drv) {
++		pm_runtime_get_sync(dev);
++
+ 		while (device_links_busy(dev)) {
+ 			device_unlock(dev);
+ 			if (parent && dev->bus->need_parent_lock)
+@@ -946,11 +948,12 @@ static void __device_release_driver(struct device *dev, struct device *parent)
+ 			 * have released the driver successfully while this one
+ 			 * was waiting, so check for that.
+ 			 */
+-			if (dev->driver != drv)
++			if (dev->driver != drv) {
++				pm_runtime_put(dev);
+ 				return;
++			}
+ 		}
+ 
+-		pm_runtime_get_sync(dev);
+ 		pm_runtime_clean_up_links(dev);
+ 
+ 		driver_sysfs_remove(dev);
+diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
+index f7334c42ebd9e..35116cdb5e389 100644
+--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
++++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
+@@ -175,7 +175,7 @@ static struct sk_buff *alloc_ctrl_skb(struct sk_buff *skb, int len)
+ {
+ 	if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) {
+ 		__skb_trim(skb, 0);
+-		refcount_add(2, &skb->users);
++		refcount_inc(&skb->users);
+ 	} else {
+ 		skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
+ 	}
+diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c
+index 64d24823c65aa..2294fb06bef36 100644
+--- a/drivers/crypto/chelsio/chtls/chtls_hw.c
++++ b/drivers/crypto/chelsio/chtls/chtls_hw.c
+@@ -368,6 +368,9 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
+ 	if (ret)
+ 		goto out_notcb;
+ 
++	if (unlikely(csk_flag(sk, CSK_ABORT_SHUTDOWN)))
++		goto out_notcb;
++
+ 	set_wr_txq(skb, CPL_PRIORITY_DATA, csk->tlshws.txqid);
+ 	csk->wr_credits -= DIV_ROUND_UP(len, 16);
+ 	csk->wr_unacked += DIV_ROUND_UP(len, 16);
+diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
+index a262a64f56256..ba24ac698e8bc 100644
+--- a/drivers/gpu/drm/i915/i915_gpu_error.c
++++ b/drivers/gpu/drm/i915/i915_gpu_error.c
+@@ -268,6 +268,8 @@ static int compress_page(struct compress *c,
+ 
+ 		if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
+ 			return -EIO;
++
++		cond_resched();
+ 	} while (zstream->avail_in);
+ 
+ 	/* Fallback to uncompressed if we increase size? */
+@@ -347,6 +349,7 @@ static int compress_page(struct compress *c,
+ 	if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
+ 		memcpy(ptr, src, PAGE_SIZE);
+ 	dst->pages[dst->page_count++] = ptr;
++	cond_resched();
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
+index 04270a14fcaaf..868dd1ef3b693 100644
+--- a/drivers/gpu/drm/vc4/vc4_drv.c
++++ b/drivers/gpu/drm/vc4/vc4_drv.c
+@@ -312,6 +312,7 @@ unbind_all:
+ 	component_unbind_all(dev, drm);
+ gem_destroy:
+ 	vc4_gem_destroy(drm);
++	drm_mode_config_cleanup(drm);
+ 	vc4_bo_cache_destroy(drm);
+ dev_put:
+ 	drm_dev_put(drm);
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index acae87f548a16..0374a1ba10103 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -1704,7 +1704,8 @@ static inline int macb_clear_csum(struct sk_buff *skb)
+ 
+ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
+ {
+-	bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb);
++	bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
++		      skb_is_nonlinear(*skb);
+ 	int padlen = ETH_ZLEN - (*skb)->len;
+ 	int headroom = skb_headroom(*skb);
+ 	int tailroom = skb_tailroom(*skb);
+diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
+index 8243501c37574..8db0924ec681c 100644
+--- a/drivers/net/ethernet/freescale/gianfar.c
++++ b/drivers/net/ethernet/freescale/gianfar.c
+@@ -1388,7 +1388,7 @@ static int gfar_probe(struct platform_device *ofdev)
+ 
+ 	if (dev->features & NETIF_F_IP_CSUM ||
+ 	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+-		dev->needed_headroom = GMAC_FCB_LEN;
++		dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
+ 
+ 	/* Initializing some of the rx/tx queue level parameters */
+ 	for (i = 0; i < priv->num_tx_queues; i++) {
+@@ -2370,20 +2370,12 @@ static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
+ 
+ 	/* make space for additional header when fcb is needed */
+-	if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
+-		struct sk_buff *skb_new;
+-
+-		skb_new = skb_realloc_headroom(skb, fcb_len);
+-		if (!skb_new) {
++	if (fcb_len) {
++		if (unlikely(skb_cow_head(skb, fcb_len))) {
+ 			dev->stats.tx_errors++;
+ 			dev_kfree_skb_any(skb);
+ 			return NETDEV_TX_OK;
+ 		}
+-
+-		if (skb->sk)
+-			skb_set_owner_w(skb_new, skb->sk);
+-		dev_consume_skb_any(skb);
+-		skb = skb_new;
+ 	}
+ 
+ 	/* total number of fragments in the SKB */
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index 998d08ae7431a..47d518e6d5d4f 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -1886,7 +1886,8 @@ static int sfp_probe(struct platform_device *pdev)
+ 			continue;
+ 
+ 		irq = gpiod_to_irq(sfp->gpio[i]);
+-		if (!irq) {
++		if (irq < 0) {
++			irq = 0;
+ 			poll = true;
+ 			continue;
+ 		}
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index d2612b69257ea..6e0b3dc14aa47 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1268,6 +1268,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},	/* Telit ME910 dual modem */
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},	/* Telit LE920 */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)},	/* Telit LE920, LE920A4 */
++	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1230, 2)},	/* Telit LE910Cx */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)},	/* Telit LE910Cx */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)},	/* Telit LE910Cx */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)},	/* Telit LN940 series */
+diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
+index 895c83e0c7b6c..19f95552da4d8 100644
+--- a/drivers/of/of_reserved_mem.c
++++ b/drivers/of/of_reserved_mem.c
+@@ -218,6 +218,16 @@ static int __init __rmem_cmp(const void *a, const void *b)
+ 	if (ra->base > rb->base)
+ 		return 1;
+ 
++	/*
++	 * Put the dynamic allocations (address == 0, size == 0) before static
++	 * allocations at address 0x0 so that overlap detection works
++	 * correctly.
++	 */
++	if (ra->size < rb->size)
++		return -1;
++	if (ra->size > rb->size)
++		return 1;
++
+ 	return 0;
+ }
+ 
+@@ -235,8 +245,7 @@ static void __init __rmem_check_for_overlap(void)
+ 
+ 		this = &reserved_mem[i];
+ 		next = &reserved_mem[i + 1];
+-		if (!(this->base && next->base))
+-			continue;
++
+ 		if (this->base + this->size > next->base) {
+ 			phys_addr_t this_end, next_end;
+ 
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index 9a7e3a3bd5ce8..009a5b2aa3d02 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -1722,15 +1722,16 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
+  */
+ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
+ {
+-	struct async_scan_data *data;
++	struct async_scan_data *data = NULL;
+ 	unsigned long flags;
+ 
+ 	if (strncmp(scsi_scan_type, "sync", 4) == 0)
+ 		return NULL;
+ 
++	mutex_lock(&shost->scan_mutex);
+ 	if (shost->async_scan) {
+ 		shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
+-		return NULL;
++		goto err;
+ 	}
+ 
+ 	data = kmalloc(sizeof(*data), GFP_KERNEL);
+@@ -1741,7 +1742,6 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
+ 		goto err;
+ 	init_completion(&data->prev_finished);
+ 
+-	mutex_lock(&shost->scan_mutex);
+ 	spin_lock_irqsave(shost->host_lock, flags);
+ 	shost->async_scan = 1;
+ 	spin_unlock_irqrestore(shost->host_lock, flags);
+@@ -1756,6 +1756,7 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
+ 	return data;
+ 
+  err:
++	mutex_unlock(&shost->scan_mutex);
+ 	kfree(data);
+ 	return NULL;
+ }
+diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
+index 159169a48debe..cc4f0c5e5ddff 100644
+--- a/drivers/tty/serial/8250/8250_mtk.c
++++ b/drivers/tty/serial/8250/8250_mtk.c
+@@ -47,7 +47,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	 */
+ 	baud = tty_termios_baud_rate(termios);
+ 
+-	serial8250_do_set_termios(port, termios, old);
++	serial8250_do_set_termios(port, termios, NULL);
+ 
+ 	tty_termios_encode_baud_rate(termios, baud, baud);
+ 
+diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
+index 1b4008d022bfd..2f7ef64536a04 100644
+--- a/drivers/tty/serial/serial_txx9.c
++++ b/drivers/tty/serial/serial_txx9.c
+@@ -1284,6 +1284,9 @@ static int __init serial_txx9_init(void)
+ 
+ #ifdef ENABLE_SERIAL_TXX9_PCI
+ 	ret = pci_register_driver(&serial_txx9_pci_driver);
++	if (ret) {
++		platform_driver_unregister(&serial_txx9_plat_driver);
++	}
+ #endif
+ 	if (ret == 0)
+ 		goto out;
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 758f522f331e4..13ea0579f104c 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -4574,27 +4574,6 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op)
+ 	return rc;
+ }
+ 
+-static int con_font_copy(struct vc_data *vc, struct console_font_op *op)
+-{
+-	int con = op->height;
+-	int rc;
+-
+-
+-	console_lock();
+-	if (vc->vc_mode != KD_TEXT)
+-		rc = -EINVAL;
+-	else if (!vc->vc_sw->con_font_copy)
+-		rc = -ENOSYS;
+-	else if (con < 0 || !vc_cons_allocated(con))
+-		rc = -ENOTTY;
+-	else if (con == vc->vc_num)	/* nothing to do */
+-		rc = 0;
+-	else
+-		rc = vc->vc_sw->con_font_copy(vc, con);
+-	console_unlock();
+-	return rc;
+-}
+-
+ int con_font_op(struct vc_data *vc, struct console_font_op *op)
+ {
+ 	switch (op->op) {
+@@ -4605,7 +4584,8 @@ int con_font_op(struct vc_data *vc, struct console_font_op *op)
+ 	case KD_FONT_OP_SET_DEFAULT:
+ 		return con_font_default(vc, op);
+ 	case KD_FONT_OP_COPY:
+-		return con_font_copy(vc, op);
++		/* was buggy and never really used */
++		return -EINVAL;
+ 	}
+ 	return -ENOSYS;
+ }
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 4ee8105310989..5ad14cdd97623 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -378,6 +378,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x0926, 0x3333), .driver_info =
+ 			USB_QUIRK_CONFIG_INTF_STRINGS },
+ 
++	/* Kingston DataTraveler 3.0 */
++	{ USB_DEVICE(0x0951, 0x1666), .driver_info = USB_QUIRK_NO_LPM },
++
+ 	/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
+ 	{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
+ 
+diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
+index bbcd3332471dc..3828ed6d38a21 100644
+--- a/drivers/usb/mtu3/mtu3_gadget.c
++++ b/drivers/usb/mtu3/mtu3_gadget.c
+@@ -573,6 +573,7 @@ static int mtu3_gadget_stop(struct usb_gadget *g)
+ 
+ 	spin_unlock_irqrestore(&mtu->lock, flags);
+ 
++	synchronize_irq(mtu->irq);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
+index ebd76ab07b722..36dd688b5795c 100644
+--- a/drivers/usb/serial/cyberjack.c
++++ b/drivers/usb/serial/cyberjack.c
+@@ -357,11 +357,12 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
+ 	struct device *dev = &port->dev;
+ 	int status = urb->status;
+ 	unsigned long flags;
++	bool resubmitted = false;
+ 
+-	set_bit(0, &port->write_urbs_free);
+ 	if (status) {
+ 		dev_dbg(dev, "%s - nonzero write bulk status received: %d\n",
+ 			__func__, status);
++		set_bit(0, &port->write_urbs_free);
+ 		return;
+ 	}
+ 
+@@ -394,6 +395,8 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
+ 			goto exit;
+ 		}
+ 
++		resubmitted = true;
++
+ 		dev_dbg(dev, "%s - priv->wrsent=%d\n", __func__, priv->wrsent);
+ 		dev_dbg(dev, "%s - priv->wrfilled=%d\n", __func__, priv->wrfilled);
+ 
+@@ -410,6 +413,8 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
+ 
+ exit:
+ 	spin_unlock_irqrestore(&priv->lock, flags);
++	if (!resubmitted)
++		set_bit(0, &port->write_urbs_free);
+ 	usb_serial_port_softint(port);
+ }
+ 
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index c773db129bf94..f28344f03141d 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -250,6 +250,7 @@ static void option_instat_callback(struct urb *urb);
+ #define QUECTEL_PRODUCT_EP06			0x0306
+ #define QUECTEL_PRODUCT_EM12			0x0512
+ #define QUECTEL_PRODUCT_RM500Q			0x0800
++#define QUECTEL_PRODUCT_EC200T			0x6026
+ 
+ #define CMOTECH_VENDOR_ID			0x16d8
+ #define CMOTECH_PRODUCT_6001			0x6001
+@@ -1117,6 +1118,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
+ 	  .driver_info = ZLP },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
+ 
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
+@@ -1189,6 +1191,8 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = NCTRL(0) | RSVD(1) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1054, 0xff),	/* Telit FT980-KS */
+ 	  .driver_info = NCTRL(2) | RSVD(3) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1055, 0xff),	/* Telit FN980 (PCIe) */
++	  .driver_info = NCTRL(0) | RSVD(1) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+@@ -1201,6 +1205,8 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = NCTRL(0) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1203, 0xff),	/* Telit LE910Cx (RNDIS) */
++	  .driver_info = NCTRL(2) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+@@ -1215,6 +1221,10 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1230, 0xff),	/* Telit LE910Cx (rmnet) */
++	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1231, 0xff),	/* Telit LE910Cx (RNDIS) */
++	  .driver_info = NCTRL(2) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, 0x1260),
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, 0x1261),
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 554727d82d432..4d1c12faada89 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -1459,6 +1459,21 @@ do {                                                                   \
+ 
+ #define BTRFS_INODE_ROOT_ITEM_INIT	(1 << 31)
+ 
++#define BTRFS_INODE_FLAG_MASK						\
++	(BTRFS_INODE_NODATASUM |					\
++	 BTRFS_INODE_NODATACOW |					\
++	 BTRFS_INODE_READONLY |						\
++	 BTRFS_INODE_NOCOMPRESS |					\
++	 BTRFS_INODE_PREALLOC |						\
++	 BTRFS_INODE_SYNC |						\
++	 BTRFS_INODE_IMMUTABLE |					\
++	 BTRFS_INODE_APPEND |						\
++	 BTRFS_INODE_NODUMP |						\
++	 BTRFS_INODE_NOATIME |						\
++	 BTRFS_INODE_DIRSYNC |						\
++	 BTRFS_INODE_COMPRESS |						\
++	 BTRFS_INODE_ROOT_ITEM_INIT)
++
+ struct btrfs_map_token {
+ 	const struct extent_buffer *eb;
+ 	char *kaddr;
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 82d597b16152c..301111922a1a2 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -138,7 +138,61 @@ static int add_extent_changeset(struct extent_state *state, unsigned bits,
+ 	return ret;
+ }
+ 
+-static void flush_write_bio(struct extent_page_data *epd);
++static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
++				       unsigned long bio_flags)
++{
++	blk_status_t ret = 0;
++	struct bio_vec *bvec = bio_last_bvec_all(bio);
++	struct page *page = bvec->bv_page;
++	struct extent_io_tree *tree = bio->bi_private;
++	u64 start;
++
++	start = page_offset(page) + bvec->bv_offset;
++
++	bio->bi_private = NULL;
++
++	if (tree->ops)
++		ret = tree->ops->submit_bio_hook(tree->private_data, bio,
++					   mirror_num, bio_flags, start);
++	else
++		btrfsic_submit_bio(bio);
++
++	return blk_status_to_errno(ret);
++}
++
++/* Cleanup unsubmitted bios */
++static void end_write_bio(struct extent_page_data *epd, int ret)
++{
++	if (epd->bio) {
++		epd->bio->bi_status = errno_to_blk_status(ret);
++		bio_endio(epd->bio);
++		epd->bio = NULL;
++	}
++}
++
++/*
++ * Submit bio from extent page data via submit_one_bio
++ *
++ * Return 0 if everything is OK.
++ * Return <0 for error.
++ */
++static int __must_check flush_write_bio(struct extent_page_data *epd)
++{
++	int ret = 0;
++
++	if (epd->bio) {
++		ret = submit_one_bio(epd->bio, 0, 0);
++		/*
++		 * Clean up of epd->bio is handled by its endio function.
++		 * And endio is either triggered by successful bio execution
++		 * or the error handler of submit bio hook.
++		 * So at this point, no matter what happened, we don't need
++		 * to clean up epd->bio.
++		 */
++		epd->bio = NULL;
++	}
++	return ret;
++}
+ 
+ int __init extent_io_init(void)
+ {
+@@ -2710,28 +2764,6 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, int offset, int size)
+ 	return bio;
+ }
+ 
+-static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
+-				       unsigned long bio_flags)
+-{
+-	blk_status_t ret = 0;
+-	struct bio_vec *bvec = bio_last_bvec_all(bio);
+-	struct page *page = bvec->bv_page;
+-	struct extent_io_tree *tree = bio->bi_private;
+-	u64 start;
+-
+-	start = page_offset(page) + bvec->bv_offset;
+-
+-	bio->bi_private = NULL;
+-
+-	if (tree->ops)
+-		ret = tree->ops->submit_bio_hook(tree->private_data, bio,
+-					   mirror_num, bio_flags, start);
+-	else
+-		btrfsic_submit_bio(bio);
+-
+-	return blk_status_to_errno(ret);
+-}
+-
+ /*
+  * @opf:	bio REQ_OP_* and REQ_* flags as one value
+  * @tree:	tree so we can call our merge_bio hook
+@@ -3439,6 +3471,9 @@ done:
+  * records are inserted to lock ranges in the tree, and as dirty areas
+  * are found, they are marked writeback.  Then the lock bits are removed
+  * and the end_io handler clears the writeback ranges
++ *
++ * Return 0 if everything goes well.
++ * Return <0 for error.
+  */
+ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
+ 			      struct extent_page_data *epd)
+@@ -3506,6 +3541,7 @@ done:
+ 		end_extent_writepage(page, ret, start, page_end);
+ 	}
+ 	unlock_page(page);
++	ASSERT(ret <= 0);
+ 	return ret;
+ 
+ done_unlocked:
+@@ -3518,18 +3554,34 @@ void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
+ 		       TASK_UNINTERRUPTIBLE);
+ }
+ 
++static void end_extent_buffer_writeback(struct extent_buffer *eb)
++{
++	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
++	smp_mb__after_atomic();
++	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
++}
++
++/*
++ * Lock eb pages and flush the bio if we can't the locks
++ *
++ * Return  0 if nothing went wrong
++ * Return >0 is same as 0, except bio is not submitted
++ * Return <0 if something went wrong, no page is locked
++ */
+ static noinline_for_stack int
+ lock_extent_buffer_for_io(struct extent_buffer *eb,
+ 			  struct btrfs_fs_info *fs_info,
+ 			  struct extent_page_data *epd)
+ {
+-	int i, num_pages;
++	int i, num_pages, failed_page_nr;
+ 	int flush = 0;
+ 	int ret = 0;
+ 
+ 	if (!btrfs_try_tree_write_lock(eb)) {
++		ret = flush_write_bio(epd);
++		if (ret < 0)
++			return ret;
+ 		flush = 1;
+-		flush_write_bio(epd);
+ 		btrfs_tree_lock(eb);
+ 	}
+ 
+@@ -3538,7 +3590,9 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
+ 		if (!epd->sync_io)
+ 			return 0;
+ 		if (!flush) {
+-			flush_write_bio(epd);
++			ret = flush_write_bio(epd);
++			if (ret < 0)
++				return ret;
+ 			flush = 1;
+ 		}
+ 		while (1) {
+@@ -3579,7 +3633,14 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
+ 
+ 		if (!trylock_page(p)) {
+ 			if (!flush) {
+-				flush_write_bio(epd);
++				int err;
++
++				err = flush_write_bio(epd);
++				if (err < 0) {
++					ret = err;
++					failed_page_nr = i;
++					goto err_unlock;
++				}
+ 				flush = 1;
+ 			}
+ 			lock_page(p);
+@@ -3587,13 +3648,25 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
+ 	}
+ 
+ 	return ret;
+-}
+-
+-static void end_extent_buffer_writeback(struct extent_buffer *eb)
+-{
+-	clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
+-	smp_mb__after_atomic();
+-	wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
++err_unlock:
++	/* Unlock already locked pages */
++	for (i = 0; i < failed_page_nr; i++)
++		unlock_page(eb->pages[i]);
++	/*
++	 * Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
++	 * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
++	 * be made and undo everything done before.
++	 */
++	btrfs_tree_lock(eb);
++	spin_lock(&eb->refs_lock);
++	set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
++	end_extent_buffer_writeback(eb);
++	spin_unlock(&eb->refs_lock);
++	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
++				 fs_info->dirty_metadata_batch);
++	btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
++	btrfs_tree_unlock(eb);
++	return ret;
+ }
+ 
+ static void set_btree_ioerr(struct page *page)
+@@ -3869,7 +3942,44 @@ retry:
+ 		index = 0;
+ 		goto retry;
+ 	}
+-	flush_write_bio(&epd);
++	ASSERT(ret <= 0);
++	if (ret < 0) {
++		end_write_bio(&epd, ret);
++		return ret;
++	}
++	/*
++	 * If something went wrong, don't allow any metadata write bio to be
++	 * submitted.
++	 *
++	 * This would prevent use-after-free if we had dirty pages not
++	 * cleaned up, which can still happen by fuzzed images.
++	 *
++	 * - Bad extent tree
++	 *   Allowing existing tree block to be allocated for other trees.
++	 *
++	 * - Log tree operations
++	 *   Exiting tree blocks get allocated to log tree, bumps its
++	 *   generation, then get cleaned in tree re-balance.
++	 *   Such tree block will not be written back, since it's clean,
++	 *   thus no WRITTEN flag set.
++	 *   And after log writes back, this tree block is not traced by
++	 *   any dirty extent_io_tree.
++	 *
++	 * - Offending tree block gets re-dirtied from its original owner
++	 *   Since it has bumped generation, no WRITTEN flag, it can be
++	 *   reused without COWing. This tree block will not be traced
++	 *   by btrfs_transaction::dirty_pages.
++	 *
++	 *   Now such dirty tree block will not be cleaned by any dirty
++	 *   extent io tree. Thus we don't want to submit such wild eb
++	 *   if the fs already has error.
++	 */
++	if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
++		ret = flush_write_bio(&epd);
++	} else {
++		ret = -EUCLEAN;
++		end_write_bio(&epd, ret);
++	}
+ 	return ret;
+ }
+ 
+@@ -3966,7 +4076,8 @@ retry:
+ 			 * tmpfs file mapping
+ 			 */
+ 			if (!trylock_page(page)) {
+-				flush_write_bio(epd);
++				ret = flush_write_bio(epd);
++				BUG_ON(ret < 0);
+ 				lock_page(page);
+ 			}
+ 
+@@ -3976,8 +4087,10 @@ retry:
+ 			}
+ 
+ 			if (wbc->sync_mode != WB_SYNC_NONE) {
+-				if (PageWriteback(page))
+-					flush_write_bio(epd);
++				if (PageWriteback(page)) {
++					ret = flush_write_bio(epd);
++					BUG_ON(ret < 0);
++				}
+ 				wait_on_page_writeback(page);
+ 			}
+ 
+@@ -4022,8 +4135,9 @@ retry:
+ 		 * page in our current bio, and thus deadlock, so flush the
+ 		 * write bio here.
+ 		 */
+-		flush_write_bio(epd);
+-		goto retry;
++		ret = flush_write_bio(epd);
++		if (!ret)
++			goto retry;
+ 	}
+ 
+ 	if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
+@@ -4033,17 +4147,6 @@ retry:
+ 	return ret;
+ }
+ 
+-static void flush_write_bio(struct extent_page_data *epd)
+-{
+-	if (epd->bio) {
+-		int ret;
+-
+-		ret = submit_one_bio(epd->bio, 0, 0);
+-		BUG_ON(ret < 0); /* -ENOMEM */
+-		epd->bio = NULL;
+-	}
+-}
+-
+ int extent_write_full_page(struct page *page, struct writeback_control *wbc)
+ {
+ 	int ret;
+@@ -4055,8 +4158,14 @@ int extent_write_full_page(struct page *page, struct writeback_control *wbc)
+ 	};
+ 
+ 	ret = __extent_writepage(page, wbc, &epd);
++	ASSERT(ret <= 0);
++	if (ret < 0) {
++		end_write_bio(&epd, ret);
++		return ret;
++	}
+ 
+-	flush_write_bio(&epd);
++	ret = flush_write_bio(&epd);
++	ASSERT(ret <= 0);
+ 	return ret;
+ }
+ 
+@@ -4064,6 +4173,7 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
+ 			      int mode)
+ {
+ 	int ret = 0;
++	int flush_ret;
+ 	struct address_space *mapping = inode->i_mapping;
+ 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
+ 	struct page *page;
+@@ -4098,7 +4208,8 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
+ 		start += PAGE_SIZE;
+ 	}
+ 
+-	flush_write_bio(&epd);
++	flush_ret = flush_write_bio(&epd);
++	BUG_ON(flush_ret < 0);
+ 	return ret;
+ }
+ 
+@@ -4106,6 +4217,7 @@ int extent_writepages(struct address_space *mapping,
+ 		      struct writeback_control *wbc)
+ {
+ 	int ret = 0;
++	int flush_ret;
+ 	struct extent_page_data epd = {
+ 		.bio = NULL,
+ 		.tree = &BTRFS_I(mapping->host)->io_tree,
+@@ -4114,7 +4226,8 @@ int extent_writepages(struct address_space *mapping,
+ 	};
+ 
+ 	ret = extent_write_cache_pages(mapping, wbc, &epd);
+-	flush_write_bio(&epd);
++	flush_ret = flush_write_bio(&epd);
++	BUG_ON(flush_ret < 0);
+ 	return ret;
+ }
+ 
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index d98ec885b72a1..9023e6b46396a 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -448,6 +448,320 @@ static int check_block_group_item(struct btrfs_fs_info *fs_info,
+ 	return 0;
+ }
+ 
++__printf(5, 6)
++__cold
++static void chunk_err(const struct btrfs_fs_info *fs_info,
++		      const struct extent_buffer *leaf,
++		      const struct btrfs_chunk *chunk, u64 logical,
++		      const char *fmt, ...)
++{
++	bool is_sb;
++	struct va_format vaf;
++	va_list args;
++	int i;
++	int slot = -1;
++
++	/* Only superblock eb is able to have such small offset */
++	is_sb = (leaf->start == BTRFS_SUPER_INFO_OFFSET);
++
++	if (!is_sb) {
++		/*
++		 * Get the slot number by iterating through all slots, this
++		 * would provide better readability.
++		 */
++		for (i = 0; i < btrfs_header_nritems(leaf); i++) {
++			if (btrfs_item_ptr_offset(leaf, i) ==
++					(unsigned long)chunk) {
++				slot = i;
++				break;
++			}
++		}
++	}
++	va_start(args, fmt);
++	vaf.fmt = fmt;
++	vaf.va = &args;
++
++	if (is_sb)
++		btrfs_crit(fs_info,
++		"corrupt superblock syschunk array: chunk_start=%llu, %pV",
++			   logical, &vaf);
++	else
++		btrfs_crit(fs_info,
++	"corrupt leaf: root=%llu block=%llu slot=%d chunk_start=%llu, %pV",
++			   BTRFS_CHUNK_TREE_OBJECTID, leaf->start, slot,
++			   logical, &vaf);
++	va_end(args);
++}
++
++/*
++ * The common chunk check which could also work on super block sys chunk array.
++ *
++ * Return -EUCLEAN if anything is corrupted.
++ * Return 0 if everything is OK.
++ */
++int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
++			    struct extent_buffer *leaf,
++			    struct btrfs_chunk *chunk, u64 logical)
++{
++	u64 length;
++	u64 stripe_len;
++	u16 num_stripes;
++	u16 sub_stripes;
++	u64 type;
++	u64 features;
++	bool mixed = false;
++
++	length = btrfs_chunk_length(leaf, chunk);
++	stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
++	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
++	sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
++	type = btrfs_chunk_type(leaf, chunk);
++
++	if (!num_stripes) {
++		chunk_err(fs_info, leaf, chunk, logical,
++			  "invalid chunk num_stripes, have %u", num_stripes);
++		return -EUCLEAN;
++	}
++	if (!IS_ALIGNED(logical, fs_info->sectorsize)) {
++		chunk_err(fs_info, leaf, chunk, logical,
++		"invalid chunk logical, have %llu should aligned to %u",
++			  logical, fs_info->sectorsize);
++		return -EUCLEAN;
++	}
++	if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) {
++		chunk_err(fs_info, leaf, chunk, logical,
++			  "invalid chunk sectorsize, have %u expect %u",
++			  btrfs_chunk_sector_size(leaf, chunk),
++			  fs_info->sectorsize);
++		return -EUCLEAN;
++	}
++	if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) {
++		chunk_err(fs_info, leaf, chunk, logical,
++			  "invalid chunk length, have %llu", length);
++		return -EUCLEAN;
++	}
++	if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
++		chunk_err(fs_info, leaf, chunk, logical,
++			  "invalid chunk stripe length: %llu",
++			  stripe_len);
++		return -EUCLEAN;
++	}
++	if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
++	    type) {
++		chunk_err(fs_info, leaf, chunk, logical,
++			  "unrecognized chunk type: 0x%llx",
++			  ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
++			    BTRFS_BLOCK_GROUP_PROFILE_MASK) &
++			  btrfs_chunk_type(leaf, chunk));
++		return -EUCLEAN;
++	}
++
++	if (!is_power_of_2(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
++	    (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0) {
++		chunk_err(fs_info, leaf, chunk, logical,
++		"invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set",
++			  type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
++		return -EUCLEAN;
++	}
++	if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
++		chunk_err(fs_info, leaf, chunk, logical,
++	"missing chunk type flag, have 0x%llx one bit must be set in 0x%llx",
++			  type, BTRFS_BLOCK_GROUP_TYPE_MASK);
++		return -EUCLEAN;
++	}
++
++	if ((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
++	    (type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) {
++		chunk_err(fs_info, leaf, chunk, logical,
++			  "system chunk with data or metadata type: 0x%llx",
++			  type);
++		return -EUCLEAN;
++	}
++
++	features = btrfs_super_incompat_flags(fs_info->super_copy);
++	if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
++		mixed = true;
++
++	if (!mixed) {
++		if ((type & BTRFS_BLOCK_GROUP_METADATA) &&
++		    (type & BTRFS_BLOCK_GROUP_DATA)) {
++			chunk_err(fs_info, leaf, chunk, logical,
++			"mixed chunk type in non-mixed mode: 0x%llx", type);
++			return -EUCLEAN;
++		}
++	}
++
++	if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
++	    (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
++	    (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
++	    (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
++	    (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
++	    ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 && num_stripes != 1)) {
++		chunk_err(fs_info, leaf, chunk, logical,
++			"invalid num_stripes:sub_stripes %u:%u for profile %llu",
++			num_stripes, sub_stripes,
++			type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
++		return -EUCLEAN;
++	}
++
++	return 0;
++}
++
++__printf(4, 5)
++__cold
++static void dev_item_err(const struct btrfs_fs_info *fs_info,
++			 const struct extent_buffer *eb, int slot,
++			 const char *fmt, ...)
++{
++	struct btrfs_key key;
++	struct va_format vaf;
++	va_list args;
++
++	btrfs_item_key_to_cpu(eb, &key, slot);
++	va_start(args, fmt);
++
++	vaf.fmt = fmt;
++	vaf.va = &args;
++
++	btrfs_crit(fs_info,
++	"corrupt %s: root=%llu block=%llu slot=%d devid=%llu %pV",
++		btrfs_header_level(eb) == 0 ? "leaf" : "node",
++		btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
++		key.objectid, &vaf);
++	va_end(args);
++}
++
++static int check_dev_item(struct btrfs_fs_info *fs_info,
++			  struct extent_buffer *leaf,
++			  struct btrfs_key *key, int slot)
++{
++	struct btrfs_dev_item *ditem;
++
++	if (key->objectid != BTRFS_DEV_ITEMS_OBJECTID) {
++		dev_item_err(fs_info, leaf, slot,
++			     "invalid objectid: has=%llu expect=%llu",
++			     key->objectid, BTRFS_DEV_ITEMS_OBJECTID);
++		return -EUCLEAN;
++	}
++	ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item);
++	if (btrfs_device_id(leaf, ditem) != key->offset) {
++		dev_item_err(fs_info, leaf, slot,
++			     "devid mismatch: key has=%llu item has=%llu",
++			     key->offset, btrfs_device_id(leaf, ditem));
++		return -EUCLEAN;
++	}
++
++	/*
++	 * For device total_bytes, we don't have reliable way to check it, as
++	 * it can be 0 for device removal. Device size check can only be done
++	 * by dev extents check.
++	 */
++	if (btrfs_device_bytes_used(leaf, ditem) >
++	    btrfs_device_total_bytes(leaf, ditem)) {
++		dev_item_err(fs_info, leaf, slot,
++			     "invalid bytes used: have %llu expect [0, %llu]",
++			     btrfs_device_bytes_used(leaf, ditem),
++			     btrfs_device_total_bytes(leaf, ditem));
++		return -EUCLEAN;
++	}
++	/*
++	 * Remaining members like io_align/type/gen/dev_group aren't really
++	 * utilized.  Skip them to make later usage of them easier.
++	 */
++	return 0;
++}
++
++/* Inode item error output has the same format as dir_item_err() */
++#define inode_item_err(fs_info, eb, slot, fmt, ...)			\
++	dir_item_err(fs_info, eb, slot, fmt, __VA_ARGS__)
++
++static int check_inode_item(struct btrfs_fs_info *fs_info,
++			    struct extent_buffer *leaf,
++			    struct btrfs_key *key, int slot)
++{
++	struct btrfs_inode_item *iitem;
++	u64 super_gen = btrfs_super_generation(fs_info->super_copy);
++	u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777);
++	u32 mode;
++
++	if ((key->objectid < BTRFS_FIRST_FREE_OBJECTID ||
++	     key->objectid > BTRFS_LAST_FREE_OBJECTID) &&
++	    key->objectid != BTRFS_ROOT_TREE_DIR_OBJECTID &&
++	    key->objectid != BTRFS_FREE_INO_OBJECTID) {
++		generic_err(fs_info, leaf, slot,
++	"invalid key objectid: has %llu expect %llu or [%llu, %llu] or %llu",
++			    key->objectid, BTRFS_ROOT_TREE_DIR_OBJECTID,
++			    BTRFS_FIRST_FREE_OBJECTID,
++			    BTRFS_LAST_FREE_OBJECTID,
++			    BTRFS_FREE_INO_OBJECTID);
++		return -EUCLEAN;
++	}
++	if (key->offset != 0) {
++		inode_item_err(fs_info, leaf, slot,
++			"invalid key offset: has %llu expect 0",
++			key->offset);
++		return -EUCLEAN;
++	}
++	iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item);
++
++	/* Here we use super block generation + 1 to handle log tree */
++	if (btrfs_inode_generation(leaf, iitem) > super_gen + 1) {
++		inode_item_err(fs_info, leaf, slot,
++			"invalid inode generation: has %llu expect (0, %llu]",
++			       btrfs_inode_generation(leaf, iitem),
++			       super_gen + 1);
++		return -EUCLEAN;
++	}
++	/* Note for ROOT_TREE_DIR_ITEM, mkfs could set its transid 0 */
++	if (btrfs_inode_transid(leaf, iitem) > super_gen + 1) {
++		inode_item_err(fs_info, leaf, slot,
++			"invalid inode transid: has %llu expect [0, %llu]",
++			       btrfs_inode_transid(leaf, iitem), super_gen + 1);
++		return -EUCLEAN;
++	}
++
++	/*
++	 * For size and nbytes it's better not to be too strict, as for dir
++	 * item its size/nbytes can easily get wrong, but doesn't affect
++	 * anything in the fs. So here we skip the check.
++	 */
++	mode = btrfs_inode_mode(leaf, iitem);
++	if (mode & ~valid_mask) {
++		inode_item_err(fs_info, leaf, slot,
++			       "unknown mode bit detected: 0x%x",
++			       mode & ~valid_mask);
++		return -EUCLEAN;
++	}
++
++	/*
++	 * S_IFMT is not bit mapped so we can't completely rely on is_power_of_2,
++	 * but is_power_of_2() can save us from checking FIFO/CHR/DIR/REG.
++	 * Only needs to check BLK, LNK and SOCKS
++	 */
++	if (!is_power_of_2(mode & S_IFMT)) {
++		if (!S_ISLNK(mode) && !S_ISBLK(mode) && !S_ISSOCK(mode)) {
++			inode_item_err(fs_info, leaf, slot,
++			"invalid mode: has 0%o expect valid S_IF* bit(s)",
++				       mode & S_IFMT);
++			return -EUCLEAN;
++		}
++	}
++	if (S_ISDIR(mode) && btrfs_inode_nlink(leaf, iitem) > 1) {
++		inode_item_err(fs_info, leaf, slot,
++		       "invalid nlink: has %u expect no more than 1 for dir",
++			btrfs_inode_nlink(leaf, iitem));
++		return -EUCLEAN;
++	}
++	if (btrfs_inode_flags(leaf, iitem) & ~BTRFS_INODE_FLAG_MASK) {
++		inode_item_err(fs_info, leaf, slot,
++			       "unknown flags detected: 0x%llx",
++			       btrfs_inode_flags(leaf, iitem) &
++			       ~BTRFS_INODE_FLAG_MASK);
++		return -EUCLEAN;
++	}
++	return 0;
++}
++
+ /*
+  * Common point to switch the item-specific validation.
+  */
+@@ -456,6 +770,7 @@ static int check_leaf_item(struct btrfs_fs_info *fs_info,
+ 			   struct btrfs_key *key, int slot)
+ {
+ 	int ret = 0;
++	struct btrfs_chunk *chunk;
+ 
+ 	switch (key->type) {
+ 	case BTRFS_EXTENT_DATA_KEY:
+@@ -472,6 +787,17 @@ static int check_leaf_item(struct btrfs_fs_info *fs_info,
+ 	case BTRFS_BLOCK_GROUP_ITEM_KEY:
+ 		ret = check_block_group_item(fs_info, leaf, key, slot);
+ 		break;
++	case BTRFS_CHUNK_ITEM_KEY:
++		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
++		ret = btrfs_check_chunk_valid(fs_info, leaf, chunk,
++					      key->offset);
++		break;
++	case BTRFS_DEV_ITEM_KEY:
++		ret = check_dev_item(fs_info, leaf, key, slot);
++		break;
++	case BTRFS_INODE_ITEM_KEY:
++		ret = check_inode_item(fs_info, leaf, key, slot);
++		break;
+ 	}
+ 	return ret;
+ }
+diff --git a/fs/btrfs/tree-checker.h b/fs/btrfs/tree-checker.h
+index ff043275b7844..4df45e8a66595 100644
+--- a/fs/btrfs/tree-checker.h
++++ b/fs/btrfs/tree-checker.h
+@@ -25,4 +25,8 @@ int btrfs_check_leaf_relaxed(struct btrfs_fs_info *fs_info,
+ 			     struct extent_buffer *leaf);
+ int btrfs_check_node(struct btrfs_fs_info *fs_info, struct extent_buffer *node);
+ 
++int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
++			    struct extent_buffer *leaf,
++			    struct btrfs_chunk *chunk, u64 logical);
++
+ #endif
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 0879e3dc39c8c..05daa2b816c31 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -28,6 +28,7 @@
+ #include "math.h"
+ #include "dev-replace.h"
+ #include "sysfs.h"
++#include "tree-checker.h"
+ 
+ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
+ 	[BTRFS_RAID_RAID10] = {
+@@ -4605,15 +4606,6 @@ static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
+ 	btrfs_set_fs_incompat(info, RAID56);
+ }
+ 
+-#define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info)	\
+-			- sizeof(struct btrfs_chunk))		\
+-			/ sizeof(struct btrfs_stripe) + 1)
+-
+-#define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE	\
+-				- 2 * sizeof(struct btrfs_disk_key)	\
+-				- 2 * sizeof(struct btrfs_chunk))	\
+-				/ sizeof(struct btrfs_stripe) + 1)
+-
+ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
+ 			       u64 start, u64 type)
+ {
+@@ -6370,99 +6362,6 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
+ 	return dev;
+ }
+ 
+-/* Return -EIO if any error, otherwise return 0. */
+-static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
+-				   struct extent_buffer *leaf,
+-				   struct btrfs_chunk *chunk, u64 logical)
+-{
+-	u64 length;
+-	u64 stripe_len;
+-	u16 num_stripes;
+-	u16 sub_stripes;
+-	u64 type;
+-	u64 features;
+-	bool mixed = false;
+-
+-	length = btrfs_chunk_length(leaf, chunk);
+-	stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
+-	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
+-	sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
+-	type = btrfs_chunk_type(leaf, chunk);
+-
+-	if (!num_stripes) {
+-		btrfs_err(fs_info, "invalid chunk num_stripes: %u",
+-			  num_stripes);
+-		return -EIO;
+-	}
+-	if (!IS_ALIGNED(logical, fs_info->sectorsize)) {
+-		btrfs_err(fs_info, "invalid chunk logical %llu", logical);
+-		return -EIO;
+-	}
+-	if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) {
+-		btrfs_err(fs_info, "invalid chunk sectorsize %u",
+-			  btrfs_chunk_sector_size(leaf, chunk));
+-		return -EIO;
+-	}
+-	if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) {
+-		btrfs_err(fs_info, "invalid chunk length %llu", length);
+-		return -EIO;
+-	}
+-	if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
+-		btrfs_err(fs_info, "invalid chunk stripe length: %llu",
+-			  stripe_len);
+-		return -EIO;
+-	}
+-	if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
+-	    type) {
+-		btrfs_err(fs_info, "unrecognized chunk type: %llu",
+-			  ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
+-			    BTRFS_BLOCK_GROUP_PROFILE_MASK) &
+-			  btrfs_chunk_type(leaf, chunk));
+-		return -EIO;
+-	}
+-
+-	if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
+-		btrfs_err(fs_info, "missing chunk type flag: 0x%llx", type);
+-		return -EIO;
+-	}
+-
+-	if ((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
+-	    (type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) {
+-		btrfs_err(fs_info,
+-			"system chunk with data or metadata type: 0x%llx", type);
+-		return -EIO;
+-	}
+-
+-	features = btrfs_super_incompat_flags(fs_info->super_copy);
+-	if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
+-		mixed = true;
+-
+-	if (!mixed) {
+-		if ((type & BTRFS_BLOCK_GROUP_METADATA) &&
+-		    (type & BTRFS_BLOCK_GROUP_DATA)) {
+-			btrfs_err(fs_info,
+-			"mixed chunk type in non-mixed mode: 0x%llx", type);
+-			return -EIO;
+-		}
+-	}
+-
+-	if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
+-	    (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
+-	    (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
+-	    (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
+-	    (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
+-	    ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
+-	     num_stripes != 1)) {
+-		btrfs_err(fs_info,
+-			"invalid num_stripes:sub_stripes %u:%u for profile %llu",
+-			num_stripes, sub_stripes,
+-			type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
+-		return -EIO;
+-	}
+-
+-	return 0;
+-}
+-
+ static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
+ 					u64 devid, u8 *uuid, bool error)
+ {
+@@ -6493,9 +6392,15 @@ static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
+ 	length = btrfs_chunk_length(leaf, chunk);
+ 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
+ 
+-	ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, logical);
+-	if (ret)
+-		return ret;
++	/*
++	 * Only need to verify chunk item if we're reading from sys chunk array,
++	 * as chunk item in tree block is already verified by tree-checker.
++	 */
++	if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
++		ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, logical);
++		if (ret)
++			return ret;
++	}
+ 
+ 	read_lock(&map_tree->map_tree.lock);
+ 	em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
+diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
+index 8e8bf3246de18..65cd023b097c1 100644
+--- a/fs/btrfs/volumes.h
++++ b/fs/btrfs/volumes.h
+@@ -257,6 +257,15 @@ struct btrfs_fs_devices {
+ 
+ #define BTRFS_BIO_INLINE_CSUM_SIZE	64
+ 
++#define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info)	\
++			- sizeof(struct btrfs_chunk))		\
++			/ sizeof(struct btrfs_stripe) + 1)
++
++#define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE	\
++				- 2 * sizeof(struct btrfs_disk_key)	\
++				- 2 * sizeof(struct btrfs_chunk))	\
++				/ sizeof(struct btrfs_stripe) + 1)
++
+ /*
+  * we need the mirror number and stripe index to be passed around
+  * the call chain while we are processing end_io (especially errors).
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index ccdd8c821abd7..c20d71d86812a 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -870,7 +870,8 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
+ out_free:
+ 	kfree(gl->gl_lksb.sb_lvbptr);
+ 	kmem_cache_free(cachep, gl);
+-	atomic_dec(&sdp->sd_glock_disposal);
++	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
++		wake_up(&sdp->sd_glock_wait);
+ 
+ out:
+ 	return ret;
+diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
+index 15fd0277ffa69..5901b00059294 100644
+--- a/include/asm-generic/pgtable.h
++++ b/include/asm-generic/pgtable.h
+@@ -1115,10 +1115,6 @@ static inline bool arch_has_pfn_modify_check(void)
+ 
+ #endif /* !__ASSEMBLY__ */
+ 
+-#ifndef io_remap_pfn_range
+-#define io_remap_pfn_range remap_pfn_range
+-#endif
+-
+ #ifndef has_transparent_hugepage
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ #define has_transparent_hugepage() 1
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 83828c118b6b7..f6ecf41aea83d 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -2561,6 +2561,15 @@ static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma,
+ 	return VM_FAULT_NOPAGE;
+ }
+ 
++#ifndef io_remap_pfn_range
++static inline int io_remap_pfn_range(struct vm_area_struct *vma,
++				     unsigned long addr, unsigned long pfn,
++				     unsigned long size, pgprot_t prot)
++{
++	return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
++}
++#endif
++
+ static inline vm_fault_t vmf_error(int err)
+ {
+ 	if (err == -ENOMEM)
+diff --git a/include/net/dsa.h b/include/net/dsa.h
+index 461e8a7661b78..d28df7adf948a 100644
+--- a/include/net/dsa.h
++++ b/include/net/dsa.h
+@@ -196,6 +196,7 @@ struct dsa_port {
+ 	unsigned int		index;
+ 	const char		*name;
+ 	const struct dsa_port	*cpu_dp;
++	const char		*mac;
+ 	struct device_node	*dn;
+ 	unsigned int		ageing_time;
+ 	u8			stp_state;
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index a35d742b0ba82..8b94eb6437c18 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -9047,6 +9047,7 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
+ 			if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
+ 				int fpos = token == IF_SRC_FILE ? 2 : 1;
+ 
++				kfree(filename);
+ 				filename = match_strdup(&args[fpos]);
+ 				if (!filename) {
+ 					ret = -ENOMEM;
+@@ -9093,16 +9094,13 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
+ 				 */
+ 				ret = -EOPNOTSUPP;
+ 				if (!event->ctx->task)
+-					goto fail_free_name;
++					goto fail;
+ 
+ 				/* look up the path and grab its inode */
+ 				ret = kern_path(filename, LOOKUP_FOLLOW,
+ 						&filter->path);
+ 				if (ret)
+-					goto fail_free_name;
+-
+-				kfree(filename);
+-				filename = NULL;
++					goto fail;
+ 
+ 				ret = -EINVAL;
+ 				if (!filter->path.dentry ||
+@@ -9122,13 +9120,13 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
+ 	if (state != IF_STATE_ACTION)
+ 		goto fail;
+ 
++	kfree(filename);
+ 	kfree(orig);
+ 
+ 	return 0;
+ 
+-fail_free_name:
+-	kfree(filename);
+ fail:
++	kfree(filename);
+ 	free_filters_list(filters);
+ 	kfree(orig);
+ 
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 3ed29bf8eb291..f2c92c1001949 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1965,14 +1965,9 @@ static __latent_entropy struct task_struct *copy_process(
+ 	/* ok, now we should be set up.. */
+ 	p->pid = pid_nr(pid);
+ 	if (clone_flags & CLONE_THREAD) {
+-		p->exit_signal = -1;
+ 		p->group_leader = current->group_leader;
+ 		p->tgid = current->tgid;
+ 	} else {
+-		if (clone_flags & CLONE_PARENT)
+-			p->exit_signal = current->group_leader->exit_signal;
+-		else
+-			p->exit_signal = (clone_flags & CSIGNAL);
+ 		p->group_leader = p;
+ 		p->tgid = p->pid;
+ 	}
+@@ -2017,9 +2012,14 @@ static __latent_entropy struct task_struct *copy_process(
+ 	if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
+ 		p->real_parent = current->real_parent;
+ 		p->parent_exec_id = current->parent_exec_id;
++		if (clone_flags & CLONE_THREAD)
++			p->exit_signal = -1;
++		else
++			p->exit_signal = current->group_leader->exit_signal;
+ 	} else {
+ 		p->real_parent = current;
+ 		p->parent_exec_id = current->self_exec_id;
++		p->exit_signal = (clone_flags & CSIGNAL);
+ 	}
+ 
+ 	klp_copy_process(p);
+diff --git a/kernel/futex.c b/kernel/futex.c
+index eabb9180ffa89..52f641c00a65b 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2417,10 +2417,22 @@ retry:
+ 		}
+ 
+ 		/*
+-		 * Since we just failed the trylock; there must be an owner.
++		 * The trylock just failed, so either there is an owner or
++		 * there is a higher priority waiter than this one.
+ 		 */
+ 		newowner = rt_mutex_owner(&pi_state->pi_mutex);
+-		BUG_ON(!newowner);
++		/*
++		 * If the higher priority waiter has not yet taken over the
++		 * rtmutex then newowner is NULL. We can't return here with
++		 * that state because it's inconsistent vs. the user space
++		 * state. So drop the locks and try again. It's a valid
++		 * situation and not any different from the other retry
++		 * conditions.
++		 */
++		if (unlikely(!newowner)) {
++			err = -EAGAIN;
++			goto handle_err;
++		}
+ 	} else {
+ 		WARN_ON_ONCE(argowner != current);
+ 		if (oldowner == current) {
+diff --git a/kernel/kthread.c b/kernel/kthread.c
+index b786eda90bb56..2eed853ab9cc5 100644
+--- a/kernel/kthread.c
++++ b/kernel/kthread.c
+@@ -863,7 +863,8 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
+ 	/* Move the work from worker->delayed_work_list. */
+ 	WARN_ON_ONCE(list_empty(&work->node));
+ 	list_del_init(&work->node);
+-	kthread_insert_work(worker, work, &worker->work_list);
++	if (!work->canceling)
++		kthread_insert_work(worker, work, &worker->work_list);
+ 
+ 	spin_unlock(&worker->lock);
+ }
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 6a56921181393..a02a25acf2056 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -385,16 +385,17 @@ static bool task_participate_group_stop(struct task_struct *task)
+ 
+ void task_join_group_stop(struct task_struct *task)
+ {
++	unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
++	struct signal_struct *sig = current->signal;
++
++	if (sig->group_stop_count) {
++		sig->group_stop_count++;
++		mask |= JOBCTL_STOP_CONSUME;
++	} else if (!(sig->flags & SIGNAL_STOP_STOPPED))
++		return;
++
+ 	/* Have the new thread join an on-going signal group stop */
+-	unsigned long jobctl = current->jobctl;
+-	if (jobctl & JOBCTL_STOP_PENDING) {
+-		struct signal_struct *sig = current->signal;
+-		unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
+-		unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
+-		if (task_set_jobctl_pending(task, signr | gstop)) {
+-			sig->group_stop_count++;
+-		}
+-	}
++	task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
+ }
+ 
+ /*
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index 1442f6152abc2..645048bb1e869 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -521,10 +521,18 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+ 	if (!bt->msg_data)
+ 		goto err;
+ 
+-	ret = -ENOENT;
+-
+-	dir = debugfs_lookup(buts->name, blk_debugfs_root);
+-	if (!dir)
++#ifdef CONFIG_BLK_DEBUG_FS
++	/*
++	 * When tracing whole make_request drivers (multiqueue) block devices,
++	 * reuse the existing debugfs directory created by the block layer on
++	 * init. For request-based block devices, all partitions block devices,
++	 * and scsi-generic block devices we create a temporary new debugfs
++	 * directory that will be removed once the trace ends.
++	 */
++	if (q->mq_ops && bdev && bdev == bdev->bd_contains)
++		dir = q->debugfs_dir;
++	else
++#endif
+ 		bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
+ 	if (!dir)
+ 		goto err;
+@@ -583,8 +591,6 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+ 
+ 	ret = 0;
+ err:
+-	if (dir && !bt->dir)
+-		dput(dir);
+ 	if (ret)
+ 		blk_trace_free(bt);
+ 	return ret;
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index eef05eb3b2841..87ce9736043da 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -444,14 +444,16 @@ struct rb_event_info {
+ 
+ /*
+  * Used for which event context the event is in.
+- *  NMI     = 0
+- *  IRQ     = 1
+- *  SOFTIRQ = 2
+- *  NORMAL  = 3
++ *  TRANSITION = 0
++ *  NMI     = 1
++ *  IRQ     = 2
++ *  SOFTIRQ = 3
++ *  NORMAL  = 4
+  *
+  * See trace_recursive_lock() comment below for more details.
+  */
+ enum {
++	RB_CTX_TRANSITION,
+ 	RB_CTX_NMI,
+ 	RB_CTX_IRQ,
+ 	RB_CTX_SOFTIRQ,
+@@ -2620,10 +2622,10 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
+  * a bit of overhead in something as critical as function tracing,
+  * we use a bitmask trick.
+  *
+- *  bit 0 =  NMI context
+- *  bit 1 =  IRQ context
+- *  bit 2 =  SoftIRQ context
+- *  bit 3 =  normal context.
++ *  bit 1 =  NMI context
++ *  bit 2 =  IRQ context
++ *  bit 3 =  SoftIRQ context
++ *  bit 4 =  normal context.
+  *
+  * This works because this is the order of contexts that can
+  * preempt other contexts. A SoftIRQ never preempts an IRQ
+@@ -2646,6 +2648,30 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
+  * The least significant bit can be cleared this way, and it
+  * just so happens that it is the same bit corresponding to
+  * the current context.
++ *
++ * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit
++ * is set when a recursion is detected at the current context, and if
++ * the TRANSITION bit is already set, it will fail the recursion.
++ * This is needed because there's a lag between the changing of
++ * interrupt context and updating the preempt count. In this case,
++ * a false positive will be found. To handle this, one extra recursion
++ * is allowed, and this is done by the TRANSITION bit. If the TRANSITION
++ * bit is already set, then it is considered a recursion and the function
++ * ends. Otherwise, the TRANSITION bit is set, and that bit is returned.
++ *
++ * On the trace_recursive_unlock(), the TRANSITION bit will be the first
++ * to be cleared. Even if it wasn't the context that set it. That is,
++ * if an interrupt comes in while NORMAL bit is set and the ring buffer
++ * is called before preempt_count() is updated, since the check will
++ * be on the NORMAL bit, the TRANSITION bit will then be set. If an
++ * NMI then comes in, it will set the NMI bit, but when the NMI code
++ * does the trace_recursive_unlock() it will clear the TRANSTION bit
++ * and leave the NMI bit set. But this is fine, because the interrupt
++ * code that set the TRANSITION bit will then clear the NMI bit when it
++ * calls trace_recursive_unlock(). If another NMI comes in, it will
++ * set the TRANSITION bit and continue.
++ *
++ * Note: The TRANSITION bit only handles a single transition between context.
+  */
+ 
+ static __always_inline int
+@@ -2661,8 +2687,16 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
+ 		bit = pc & NMI_MASK ? RB_CTX_NMI :
+ 			pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
+ 
+-	if (unlikely(val & (1 << (bit + cpu_buffer->nest))))
+-		return 1;
++	if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
++		/*
++		 * It is possible that this was called by transitioning
++		 * between interrupt context, and preempt_count() has not
++		 * been updated yet. In this case, use the TRANSITION bit.
++		 */
++		bit = RB_CTX_TRANSITION;
++		if (val & (1 << (bit + cpu_buffer->nest)))
++			return 1;
++	}
+ 
+ 	val |= (1 << (bit + cpu_buffer->nest));
+ 	cpu_buffer->current_context = val;
+@@ -2677,8 +2711,8 @@ trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
+ 		cpu_buffer->current_context - (1 << cpu_buffer->nest);
+ }
+ 
+-/* The recursive locking above uses 4 bits */
+-#define NESTED_BITS 4
++/* The recursive locking above uses 5 bits */
++#define NESTED_BITS 5
+ 
+ /**
+  * ring_buffer_nest_start - Allow to trace while nested
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 6bf617ff03694..c3cc6aaa6f79d 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2819,7 +2819,7 @@ static char *get_trace_buf(void)
+ 
+ 	/* Interrupts must see nesting incremented before we use the buffer */
+ 	barrier();
+-	return &buffer->buffer[buffer->nesting][0];
++	return &buffer->buffer[buffer->nesting - 1][0];
+ }
+ 
+ static void put_trace_buf(void)
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index ee0c6a313ed1a..01475411fd1b2 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -534,6 +534,12 @@ enum {
+ 
+ 	TRACE_GRAPH_DEPTH_START_BIT,
+ 	TRACE_GRAPH_DEPTH_END_BIT,
++
++	/*
++	 * When transitioning between context, the preempt_count() may
++	 * not be correct. Allow for a single recursion to cover this case.
++	 */
++	TRACE_TRANSITION_BIT,
+ };
+ 
+ #define trace_recursion_set(bit)	do { (current)->trace_recursion |= (1<<(bit)); } while (0)
+@@ -588,14 +594,27 @@ static __always_inline int trace_test_and_set_recursion(int start, int max)
+ 		return 0;
+ 
+ 	bit = trace_get_context_bit() + start;
+-	if (unlikely(val & (1 << bit)))
+-		return -1;
++	if (unlikely(val & (1 << bit))) {
++		/*
++		 * It could be that preempt_count has not been updated during
++		 * a switch between contexts. Allow for a single recursion.
++		 */
++		bit = TRACE_TRANSITION_BIT;
++		if (trace_recursion_test(bit))
++			return -1;
++		trace_recursion_set(bit);
++		barrier();
++		return bit + 1;
++	}
++
++	/* Normal check passed, clear the transition to allow it again */
++	trace_recursion_clear(TRACE_TRANSITION_BIT);
+ 
+ 	val |= 1 << bit;
+ 	current->trace_recursion = val;
+ 	barrier();
+ 
+-	return bit;
++	return bit + 1;
+ }
+ 
+ static __always_inline void trace_clear_recursion(int bit)
+@@ -605,6 +624,7 @@ static __always_inline void trace_clear_recursion(int bit)
+ 	if (!bit)
+ 		return;
+ 
++	bit--;
+ 	bit = 1 << bit;
+ 	val &= ~bit;
+ 
+diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
+index 11e9daa4a568a..330ba2b081ede 100644
+--- a/kernel/trace/trace_selftest.c
++++ b/kernel/trace/trace_selftest.c
+@@ -492,8 +492,13 @@ trace_selftest_function_recursion(void)
+ 	unregister_ftrace_function(&test_rec_probe);
+ 
+ 	ret = -1;
+-	if (trace_selftest_recursion_cnt != 1) {
+-		pr_cont("*callback not called once (%d)* ",
++	/*
++	 * Recursion allows for transitions between context,
++	 * and may call the callback twice.
++	 */
++	if (trace_selftest_recursion_cnt != 1 &&
++	    trace_selftest_recursion_cnt != 2) {
++		pr_cont("*callback not called once (or twice) (%d)* ",
+ 			trace_selftest_recursion_cnt);
+ 		goto out;
+ 	}
+diff --git a/lib/crc32test.c b/lib/crc32test.c
+index 97d6a57cefcc5..61ddce2cff777 100644
+--- a/lib/crc32test.c
++++ b/lib/crc32test.c
+@@ -683,7 +683,6 @@ static int __init crc32c_test(void)
+ 
+ 	/* reduce OS noise */
+ 	local_irq_save(flags);
+-	local_irq_disable();
+ 
+ 	nsec = ktime_get_ns();
+ 	for (i = 0; i < 100; i++) {
+@@ -694,7 +693,6 @@ static int __init crc32c_test(void)
+ 	nsec = ktime_get_ns() - nsec;
+ 
+ 	local_irq_restore(flags);
+-	local_irq_enable();
+ 
+ 	pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS);
+ 
+@@ -768,7 +766,6 @@ static int __init crc32_test(void)
+ 
+ 	/* reduce OS noise */
+ 	local_irq_save(flags);
+-	local_irq_disable();
+ 
+ 	nsec = ktime_get_ns();
+ 	for (i = 0; i < 100; i++) {
+@@ -783,7 +780,6 @@ static int __init crc32_test(void)
+ 	nsec = ktime_get_ns() - nsec;
+ 
+ 	local_irq_restore(flags);
+-	local_irq_enable();
+ 
+ 	pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n",
+ 		 CRC_LE_BITS, CRC_BE_BITS);
+diff --git a/lib/fonts/font_10x18.c b/lib/fonts/font_10x18.c
+index 0e2deac97da0d..e02f9df24d1ee 100644
+--- a/lib/fonts/font_10x18.c
++++ b/lib/fonts/font_10x18.c
+@@ -8,7 +8,7 @@
+ 
+ #define FONTDATAMAX 9216
+ 
+-static struct font_data fontdata_10x18 = {
++static const struct font_data fontdata_10x18 = {
+ 	{ 0, 0, FONTDATAMAX, 0 }, {
+ 	/* 0 0x00 '^@' */
+ 	0x00, 0x00, /* 0000000000 */
+diff --git a/lib/fonts/font_6x10.c b/lib/fonts/font_6x10.c
+index 87da8acd07db0..6e3c4b7691c85 100644
+--- a/lib/fonts/font_6x10.c
++++ b/lib/fonts/font_6x10.c
+@@ -3,7 +3,7 @@
+ 
+ #define FONTDATAMAX 2560
+ 
+-static struct font_data fontdata_6x10 = {
++static const struct font_data fontdata_6x10 = {
+ 	{ 0, 0, FONTDATAMAX, 0 }, {
+ 	/* 0 0x00 '^@' */
+ 	0x00, /* 00000000 */
+diff --git a/lib/fonts/font_6x11.c b/lib/fonts/font_6x11.c
+index 5e975dfa10a53..2d22a24e816f0 100644
+--- a/lib/fonts/font_6x11.c
++++ b/lib/fonts/font_6x11.c
+@@ -9,7 +9,7 @@
+ 
+ #define FONTDATAMAX (11*256)
+ 
+-static struct font_data fontdata_6x11 = {
++static const struct font_data fontdata_6x11 = {
+ 	{ 0, 0, FONTDATAMAX, 0 }, {
+ 	/* 0 0x00 '^@' */
+ 	0x00, /* 00000000 */
+diff --git a/lib/fonts/font_7x14.c b/lib/fonts/font_7x14.c
+index 86d298f385058..9cc7ae2e03f7d 100644
+--- a/lib/fonts/font_7x14.c
++++ b/lib/fonts/font_7x14.c
+@@ -8,7 +8,7 @@
+ 
+ #define FONTDATAMAX 3584
+ 
+-static struct font_data fontdata_7x14 = {
++static const struct font_data fontdata_7x14 = {
+ 	{ 0, 0, FONTDATAMAX, 0 }, {
+ 	/* 0 0x00 '^@' */
+ 	0x00, /* 0000000 */
+diff --git a/lib/fonts/font_8x16.c b/lib/fonts/font_8x16.c
+index 37cedd36ca5ef..bab25dc59e8dd 100644
+--- a/lib/fonts/font_8x16.c
++++ b/lib/fonts/font_8x16.c
+@@ -10,7 +10,7 @@
+ 
+ #define FONTDATAMAX 4096
+ 
+-static struct font_data fontdata_8x16 = {
++static const struct font_data fontdata_8x16 = {
+ 	{ 0, 0, FONTDATAMAX, 0 }, {
+ 	/* 0 0x00 '^@' */
+ 	0x00, /* 00000000 */
+diff --git a/lib/fonts/font_8x8.c b/lib/fonts/font_8x8.c
+index 8ab695538395d..109d0572368f4 100644
+--- a/lib/fonts/font_8x8.c
++++ b/lib/fonts/font_8x8.c
+@@ -9,7 +9,7 @@
+ 
+ #define FONTDATAMAX 2048
+ 
+-static struct font_data fontdata_8x8 = {
++static const struct font_data fontdata_8x8 = {
+ 	{ 0, 0, FONTDATAMAX, 0 }, {
+ 	/* 0 0x00 '^@' */
+ 	0x00, /* 00000000 */
+diff --git a/lib/fonts/font_acorn_8x8.c b/lib/fonts/font_acorn_8x8.c
+index 069b3e80c4344..fb395f0d40317 100644
+--- a/lib/fonts/font_acorn_8x8.c
++++ b/lib/fonts/font_acorn_8x8.c
+@@ -5,7 +5,7 @@
+ 
+ #define FONTDATAMAX 2048
+ 
+-static struct font_data acorndata_8x8 = {
++static const struct font_data acorndata_8x8 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
+ /* 00 */  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^@ */
+ /* 01 */  0x7e, 0x81, 0xa5, 0x81, 0xbd, 0x99, 0x81, 0x7e, /* ^A */
+diff --git a/lib/fonts/font_mini_4x6.c b/lib/fonts/font_mini_4x6.c
+index 1449876c6a270..592774a90917b 100644
+--- a/lib/fonts/font_mini_4x6.c
++++ b/lib/fonts/font_mini_4x6.c
+@@ -43,7 +43,7 @@ __END__;
+ 
+ #define FONTDATAMAX 1536
+ 
+-static struct font_data fontdata_mini_4x6 = {
++static const struct font_data fontdata_mini_4x6 = {
+ 	{ 0, 0, FONTDATAMAX, 0 }, {
+ 	/*{*/
+ 	  	/*   Char 0: ' '  */
+diff --git a/lib/fonts/font_pearl_8x8.c b/lib/fonts/font_pearl_8x8.c
+index 32d65551e7ed2..a6f95ebce9507 100644
+--- a/lib/fonts/font_pearl_8x8.c
++++ b/lib/fonts/font_pearl_8x8.c
+@@ -14,7 +14,7 @@
+ 
+ #define FONTDATAMAX 2048
+ 
+-static struct font_data fontdata_pearl8x8 = {
++static const struct font_data fontdata_pearl8x8 = {
+    { 0, 0, FONTDATAMAX, 0 }, {
+    /* 0 0x00 '^@' */
+    0x00, /* 00000000 */
+diff --git a/lib/fonts/font_sun12x22.c b/lib/fonts/font_sun12x22.c
+index 641a6b4dca424..a5b65bd496045 100644
+--- a/lib/fonts/font_sun12x22.c
++++ b/lib/fonts/font_sun12x22.c
+@@ -3,7 +3,7 @@
+ 
+ #define FONTDATAMAX 11264
+ 
+-static struct font_data fontdata_sun12x22 = {
++static const struct font_data fontdata_sun12x22 = {
+ 	{ 0, 0, FONTDATAMAX, 0 }, {
+ 	/* 0 0x00 '^@' */
+ 	0x00, 0x00, /* 000000000000 */
+diff --git a/lib/fonts/font_sun8x16.c b/lib/fonts/font_sun8x16.c
+index 193fe6d988e08..e577e76a6a7c0 100644
+--- a/lib/fonts/font_sun8x16.c
++++ b/lib/fonts/font_sun8x16.c
+@@ -3,7 +3,7 @@
+ 
+ #define FONTDATAMAX 4096
+ 
+-static struct font_data fontdata_sun8x16 = {
++static const struct font_data fontdata_sun8x16 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
+ /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ /* */ 0x00,0x00,0x7e,0x81,0xa5,0x81,0x81,0xbd,0x99,0x81,0x81,0x7e,0x00,0x00,0x00,0x00,
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 68c46da82aac0..3cd27c1c729f6 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -496,7 +496,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
+ 	unsigned long flags = qp->flags;
+ 	int ret;
+ 	bool has_unmovable = false;
+-	pte_t *pte;
++	pte_t *pte, *mapped_pte;
+ 	spinlock_t *ptl;
+ 
+ 	ptl = pmd_trans_huge_lock(pmd, vma);
+@@ -510,7 +510,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
+ 	if (pmd_trans_unstable(pmd))
+ 		return 0;
+ 
+-	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
++	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ 	for (; addr != end; pte++, addr += PAGE_SIZE) {
+ 		if (!pte_present(*pte))
+ 			continue;
+@@ -542,7 +542,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
+ 		} else
+ 			break;
+ 	}
+-	pte_unmap_unlock(pte - 1, ptl);
++	pte_unmap_unlock(mapped_pte, ptl);
+ 	cond_resched();
+ 
+ 	if (has_unmovable)
+diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
+index b036c55f5cb1d..7c10bc4dacd31 100644
+--- a/net/dsa/dsa2.c
++++ b/net/dsa/dsa2.c
+@@ -261,6 +261,7 @@ static int dsa_port_setup(struct dsa_port *dp)
+ 	int err = 0;
+ 
+ 	memset(&dp->devlink_port, 0, sizeof(dp->devlink_port));
++	dp->mac = of_get_mac_address(dp->dn);
+ 
+ 	if (dp->type != DSA_PORT_TYPE_UNUSED)
+ 		err = devlink_port_register(ds->devlink, &dp->devlink_port,
+diff --git a/net/dsa/slave.c b/net/dsa/slave.c
+index 8ee28b6016d82..d03c67e761dfa 100644
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -1313,7 +1313,10 @@ int dsa_slave_create(struct dsa_port *port)
+ 	slave_dev->features = master->vlan_features | NETIF_F_HW_TC;
+ 	slave_dev->hw_features |= NETIF_F_HW_TC;
+ 	slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
+-	eth_hw_addr_inherit(slave_dev, master);
++	if (port->mac && is_valid_ether_addr(port->mac))
++		ether_addr_copy(slave_dev->dev_addr, port->mac);
++	else
++		eth_hw_addr_inherit(slave_dev, master);
+ 	slave_dev->priv_flags |= IFF_NO_QUEUE;
+ 	slave_dev->netdev_ops = &dsa_slave_netdev_ops;
+ 	slave_dev->switchdev_ops = &dsa_slave_switchdev_ops;
+diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
+index 567517e448115..bb4f1d0da1533 100644
+--- a/net/sctp/sm_sideeffect.c
++++ b/net/sctp/sm_sideeffect.c
+@@ -1615,12 +1615,12 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
+ 			break;
+ 
+ 		case SCTP_CMD_INIT_FAILED:
+-			sctp_cmd_init_failed(commands, asoc, cmd->obj.u32);
++			sctp_cmd_init_failed(commands, asoc, cmd->obj.u16);
+ 			break;
+ 
+ 		case SCTP_CMD_ASSOC_FAILED:
+ 			sctp_cmd_assoc_failed(commands, asoc, event_type,
+-					      subtype, chunk, cmd->obj.u32);
++					      subtype, chunk, cmd->obj.u16);
+ 			break;
+ 
+ 		case SCTP_CMD_INIT_COUNTER_INC:
+diff --git a/net/tipc/core.c b/net/tipc/core.c
+index 49bb9fc0aa068..ce0f067d0faf7 100644
+--- a/net/tipc/core.c
++++ b/net/tipc/core.c
+@@ -93,6 +93,11 @@ out_sk_rht:
+ static void __net_exit tipc_exit_net(struct net *net)
+ {
+ 	tipc_net_stop(net);
++
++	/* Make sure the tipc_net_finalize_work stopped
++	 * before releasing the resources.
++	 */
++	flush_scheduled_work();
+ 	tipc_bcast_stop(net);
+ 	tipc_nametbl_stop(net);
+ 	tipc_sk_rht_destroy(net);
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index c88dc8ee3144b..02374459c4179 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -629,7 +629,7 @@ struct sock *__vsock_create(struct net *net,
+ 		vsk->owner = get_cred(psk->owner);
+ 		vsk->connect_timeout = psk->connect_timeout;
+ 	} else {
+-		vsk->trusted = capable(CAP_NET_ADMIN);
++		vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
+ 		vsk->owner = get_current_cred();
+ 		vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT;
+ 	}
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 7743e7bc6bf22..e4d2fcc89c306 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -334,6 +334,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
+ 	switch (subs->stream->chip->usb_id) {
+ 	case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */
+ 	case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C600 */
++	case USB_ID(0x22f0, 0x0006): /* Allen&Heath Qu-16 */
+ 		ep = 0x81;
+ 		ifnum = 3;
+ 		goto add_sync_ep_from_ifnum;
+@@ -343,6 +344,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
+ 		ifnum = 2;
+ 		goto add_sync_ep_from_ifnum;
+ 	case USB_ID(0x2466, 0x8003): /* Fractal Audio Axe-Fx II */
++	case USB_ID(0x0499, 0x172a): /* Yamaha MODX */
+ 		ep = 0x86;
+ 		ifnum = 2;
+ 		goto add_sync_ep_from_ifnum;
+@@ -350,6 +352,10 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
+ 		ep = 0x81;
+ 		ifnum = 2;
+ 		goto add_sync_ep_from_ifnum;
++	case USB_ID(0x1686, 0xf029): /* Zoom UAC-2 */
++		ep = 0x82;
++		ifnum = 2;
++		goto add_sync_ep_from_ifnum;
+ 	case USB_ID(0x1397, 0x0001): /* Behringer UFX1604 */
+ 	case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */
+ 		ep = 0x81;
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index e6dea1c7112be..3effe2e86197d 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1466,6 +1466,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ 	case 0x278b:  /* Rotel? */
+ 	case 0x292b:  /* Gustard/Ess based devices */
+ 	case 0x2ab6:  /* T+A devices */
++	case 0x3353:  /* Khadas devices */
+ 	case 0x3842:  /* EVGA */
+ 	case 0xc502:  /* HiBy devices */
+ 		if (fp->dsd_raw)
+diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
+index dc58254a2b696..8c01b2cfdb1ad 100644
+--- a/tools/perf/util/util.h
++++ b/tools/perf/util/util.h
+@@ -22,7 +22,7 @@ static inline void *zalloc(size_t size)
+ 	return calloc(1, size);
+ }
+ 
+-#define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
++#define zfree(ptr) ({ free((void *)*ptr); *ptr = NULL; })
+ 
+ struct dirent;
+ struct nsinfo;


             reply	other threads:[~2020-11-10 13:56 UTC|newest]

Thread overview: 332+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-10 13:56 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2024-04-18  3:06 [gentoo-commits] proj/linux-patches:4.19 commit in: / Alice Ferrazzi
2023-09-02  9:59 Mike Pagano
2023-08-30 15:00 Mike Pagano
2023-08-16 16:59 Mike Pagano
2023-08-11 11:58 Mike Pagano
2023-08-08 18:43 Mike Pagano
2023-07-24 20:30 Mike Pagano
2023-06-28 10:29 Mike Pagano
2023-06-21 14:55 Alice Ferrazzi
2023-06-14 10:21 Mike Pagano
2023-06-09 11:32 Mike Pagano
2023-05-30 12:57 Mike Pagano
2023-05-17 11:14 Mike Pagano
2023-05-17 11:01 Mike Pagano
2023-05-10 17:59 Mike Pagano
2023-04-26  9:35 Alice Ferrazzi
2023-04-20 11:17 Alice Ferrazzi
2023-04-05 11:41 Mike Pagano
2023-03-22 14:16 Alice Ferrazzi
2023-03-17 10:46 Mike Pagano
2023-03-13 11:35 Alice Ferrazzi
2023-03-11 16:01 Mike Pagano
2023-03-03 12:31 Mike Pagano
2023-02-25 11:41 Mike Pagano
2023-02-24  3:19 Alice Ferrazzi
2023-02-24  3:15 Alice Ferrazzi
2023-02-22 14:51 Alice Ferrazzi
2023-02-06 12:49 Mike Pagano
2023-01-24  7:16 Alice Ferrazzi
2023-01-18 11:11 Mike Pagano
2022-12-14 12:15 Mike Pagano
2022-12-08 12:14 Alice Ferrazzi
2022-11-25 17:04 Mike Pagano
2022-11-23  9:39 Alice Ferrazzi
2022-11-10 17:58 Mike Pagano
2022-11-03 15:11 Mike Pagano
2022-11-01 19:48 Mike Pagano
2022-10-26 11:41 Mike Pagano
2022-10-05 11:59 Mike Pagano
2022-09-28  9:18 Mike Pagano
2022-09-20 12:03 Mike Pagano
2022-09-15 11:09 Mike Pagano
2022-09-05 12:06 Mike Pagano
2022-08-25 10:35 Mike Pagano
2022-08-11 12:36 Mike Pagano
2022-07-29 15:28 Mike Pagano
2022-07-21 20:12 Mike Pagano
2022-07-12 16:01 Mike Pagano
2022-07-07 16:18 Mike Pagano
2022-07-02 16:07 Mike Pagano
2022-06-25 10:22 Mike Pagano
2022-06-16 11:40 Mike Pagano
2022-06-14 16:02 Mike Pagano
2022-06-06 11:05 Mike Pagano
2022-05-27 12:24 Mike Pagano
2022-05-25 11:55 Mike Pagano
2022-05-18  9:50 Mike Pagano
2022-05-15 22:12 Mike Pagano
2022-05-12 11:30 Mike Pagano
2022-05-01 17:04 Mike Pagano
2022-04-27 12:03 Mike Pagano
2022-04-20 12:09 Mike Pagano
2022-04-15 13:11 Mike Pagano
2022-04-12 19:24 Mike Pagano
2022-03-28 10:59 Mike Pagano
2022-03-23 11:57 Mike Pagano
2022-03-16 13:27 Mike Pagano
2022-03-11 10:56 Mike Pagano
2022-03-08 18:30 Mike Pagano
2022-03-02 13:08 Mike Pagano
2022-02-26 21:14 Mike Pagano
2022-02-23 12:39 Mike Pagano
2022-02-16 12:47 Mike Pagano
2022-02-11 12:53 Mike Pagano
2022-02-11 12:46 Mike Pagano
2022-02-11 12:45 Mike Pagano
2022-02-11 12:37 Mike Pagano
2022-02-08 17:56 Mike Pagano
2022-01-29 17:45 Mike Pagano
2022-01-27 11:39 Mike Pagano
2022-01-11 13:14 Mike Pagano
2022-01-05 12:55 Mike Pagano
2021-12-29 13:11 Mike Pagano
2021-12-22 14:07 Mike Pagano
2021-12-14 10:36 Mike Pagano
2021-12-08 12:55 Mike Pagano
2021-12-01 12:51 Mike Pagano
2021-11-26 11:59 Mike Pagano
2021-11-12 14:16 Mike Pagano
2021-11-06 13:26 Mike Pagano
2021-11-02 19:32 Mike Pagano
2021-10-27 11:59 Mike Pagano
2021-10-20 13:26 Mike Pagano
2021-10-17 13:12 Mike Pagano
2021-10-13 15:00 Alice Ferrazzi
2021-10-09 21:33 Mike Pagano
2021-10-06 14:06 Mike Pagano
2021-09-26 14:13 Mike Pagano
2021-09-22 11:40 Mike Pagano
2021-09-20 22:05 Mike Pagano
2021-09-03 11:22 Mike Pagano
2021-09-03 10:08 Alice Ferrazzi
2021-08-26 14:06 Mike Pagano
2021-08-25 22:45 Mike Pagano
2021-08-25 20:41 Mike Pagano
2021-08-15 20:07 Mike Pagano
2021-08-12 11:51 Mike Pagano
2021-08-08 13:39 Mike Pagano
2021-08-04 11:54 Mike Pagano
2021-08-03 12:26 Mike Pagano
2021-07-31 10:34 Alice Ferrazzi
2021-07-28 12:37 Mike Pagano
2021-07-20 15:35 Alice Ferrazzi
2021-07-13 12:38 Mike Pagano
2021-07-11 14:45 Mike Pagano
2021-06-30 14:25 Mike Pagano
2021-06-16 12:22 Mike Pagano
2021-06-10 11:46 Mike Pagano
2021-06-03 10:32 Alice Ferrazzi
2021-05-26 12:05 Mike Pagano
2021-05-22 10:03 Mike Pagano
2021-05-07 11:40 Alice Ferrazzi
2021-04-30 19:02 Mike Pagano
2021-04-28 18:31 Mike Pagano
2021-04-28 11:44 Alice Ferrazzi
2021-04-16 11:15 Alice Ferrazzi
2021-04-14 11:22 Alice Ferrazzi
2021-04-10 13:24 Mike Pagano
2021-04-07 12:21 Mike Pagano
2021-03-30 14:17 Mike Pagano
2021-03-24 12:08 Mike Pagano
2021-03-22 15:50 Mike Pagano
2021-03-20 14:26 Mike Pagano
2021-03-17 16:21 Mike Pagano
2021-03-11 14:05 Mike Pagano
2021-03-07 15:15 Mike Pagano
2021-03-04 12:08 Mike Pagano
2021-02-23 14:31 Alice Ferrazzi
2021-02-13 15:28 Alice Ferrazzi
2021-02-10 10:03 Alice Ferrazzi
2021-02-07 14:40 Alice Ferrazzi
2021-02-03 23:43 Mike Pagano
2021-01-30 13:34 Alice Ferrazzi
2021-01-27 11:15 Mike Pagano
2021-01-23 16:36 Mike Pagano
2021-01-19 20:34 Mike Pagano
2021-01-17 16:20 Mike Pagano
2021-01-12 20:06 Mike Pagano
2021-01-09 12:57 Mike Pagano
2021-01-06 14:15 Mike Pagano
2020-12-30 12:52 Mike Pagano
2020-12-11 12:56 Mike Pagano
2020-12-08 12:06 Mike Pagano
2020-12-02 12:49 Mike Pagano
2020-11-24 14:40 Mike Pagano
2020-11-22 19:26 Mike Pagano
2020-11-18 19:56 Mike Pagano
2020-11-11 15:43 Mike Pagano
2020-11-05 12:35 Mike Pagano
2020-11-01 20:29 Mike Pagano
2020-10-29 11:18 Mike Pagano
2020-10-17 10:17 Mike Pagano
2020-10-14 20:36 Mike Pagano
2020-10-07 12:50 Mike Pagano
2020-10-01 12:45 Mike Pagano
2020-09-26 22:07 Mike Pagano
2020-09-26 22:00 Mike Pagano
2020-09-24 15:58 Mike Pagano
2020-09-23 12:07 Mike Pagano
2020-09-17 15:01 Mike Pagano
2020-09-17 14:55 Mike Pagano
2020-09-12 17:59 Mike Pagano
2020-09-09 17:59 Mike Pagano
2020-09-03 11:37 Mike Pagano
2020-08-26 11:15 Mike Pagano
2020-08-21 10:49 Alice Ferrazzi
2020-08-19  9:36 Alice Ferrazzi
2020-08-12 23:36 Alice Ferrazzi
2020-08-07 19:16 Mike Pagano
2020-08-05 14:51 Thomas Deutschmann
2020-07-31 18:00 Mike Pagano
2020-07-29 12:33 Mike Pagano
2020-07-22 12:42 Mike Pagano
2020-07-16 11:17 Mike Pagano
2020-07-09 12:12 Mike Pagano
2020-07-01 12:14 Mike Pagano
2020-06-29 17:41 Mike Pagano
2020-06-25 15:07 Mike Pagano
2020-06-22 14:47 Mike Pagano
2020-06-10 21:27 Mike Pagano
2020-06-07 21:52 Mike Pagano
2020-06-03 11:41 Mike Pagano
2020-05-27 16:25 Mike Pagano
2020-05-20 11:30 Mike Pagano
2020-05-20 11:27 Mike Pagano
2020-05-14 11:30 Mike Pagano
2020-05-13 12:33 Mike Pagano
2020-05-11 22:50 Mike Pagano
2020-05-09 22:20 Mike Pagano
2020-05-06 11:46 Mike Pagano
2020-05-02 19:24 Mike Pagano
2020-04-29 17:57 Mike Pagano
2020-04-23 11:44 Mike Pagano
2020-04-21 11:15 Mike Pagano
2020-04-17 11:45 Mike Pagano
2020-04-15 17:09 Mike Pagano
2020-04-13 11:34 Mike Pagano
2020-04-02 15:24 Mike Pagano
2020-03-25 14:58 Mike Pagano
2020-03-20 11:57 Mike Pagano
2020-03-18 14:21 Mike Pagano
2020-03-16 12:23 Mike Pagano
2020-03-11 17:20 Mike Pagano
2020-03-05 16:23 Mike Pagano
2020-02-28 16:38 Mike Pagano
2020-02-24 11:06 Mike Pagano
2020-02-19 23:45 Mike Pagano
2020-02-14 23:52 Mike Pagano
2020-02-11 16:20 Mike Pagano
2020-02-05 17:05 Mike Pagano
2020-02-01 10:37 Mike Pagano
2020-02-01 10:30 Mike Pagano
2020-01-29 16:16 Mike Pagano
2020-01-27 14:25 Mike Pagano
2020-01-23 11:07 Mike Pagano
2020-01-17 19:56 Mike Pagano
2020-01-14 22:30 Mike Pagano
2020-01-12 15:00 Mike Pagano
2020-01-09 11:15 Mike Pagano
2020-01-04 19:50 Mike Pagano
2019-12-31 17:46 Mike Pagano
2019-12-21 15:03 Mike Pagano
2019-12-17 21:56 Mike Pagano
2019-12-13 12:35 Mike Pagano
2019-12-05 12:03 Alice Ferrazzi
2019-12-01 14:06 Thomas Deutschmann
2019-11-24 15:44 Mike Pagano
2019-11-20 19:36 Mike Pagano
2019-11-12 21:00 Mike Pagano
2019-11-10 16:20 Mike Pagano
2019-11-06 14:26 Mike Pagano
2019-10-29 12:04 Mike Pagano
2019-10-17 22:27 Mike Pagano
2019-10-11 17:04 Mike Pagano
2019-10-07 17:42 Mike Pagano
2019-10-05 11:42 Mike Pagano
2019-10-01 10:10 Mike Pagano
2019-09-21 17:11 Mike Pagano
2019-09-19 12:34 Mike Pagano
2019-09-19 10:04 Mike Pagano
2019-09-16 12:26 Mike Pagano
2019-09-10 11:12 Mike Pagano
2019-09-06 17:25 Mike Pagano
2019-08-29 14:15 Mike Pagano
2019-08-25 17:37 Mike Pagano
2019-08-23 22:18 Mike Pagano
2019-08-16 12:26 Mike Pagano
2019-08-16 12:13 Mike Pagano
2019-08-09 17:45 Mike Pagano
2019-08-06 19:19 Mike Pagano
2019-08-04 16:15 Mike Pagano
2019-07-31 15:09 Mike Pagano
2019-07-31 10:22 Mike Pagano
2019-07-28 16:27 Mike Pagano
2019-07-26 11:35 Mike Pagano
2019-07-21 14:41 Mike Pagano
2019-07-14 15:44 Mike Pagano
2019-07-10 11:05 Mike Pagano
2019-07-03 11:34 Mike Pagano
2019-06-25 10:53 Mike Pagano
2019-06-22 19:06 Mike Pagano
2019-06-19 17:17 Thomas Deutschmann
2019-06-17 19:22 Mike Pagano
2019-06-15 15:07 Mike Pagano
2019-06-11 12:42 Mike Pagano
2019-06-10 19:43 Mike Pagano
2019-06-09 16:19 Mike Pagano
2019-06-04 11:11 Mike Pagano
2019-05-31 15:02 Mike Pagano
2019-05-26 17:10 Mike Pagano
2019-05-22 11:02 Mike Pagano
2019-05-16 23:03 Mike Pagano
2019-05-14 21:00 Mike Pagano
2019-05-10 19:40 Mike Pagano
2019-05-08 10:06 Mike Pagano
2019-05-05 13:42 Mike Pagano
2019-05-04 18:28 Mike Pagano
2019-05-02 10:13 Mike Pagano
2019-04-27 17:36 Mike Pagano
2019-04-20 11:09 Mike Pagano
2019-04-19 19:51 Mike Pagano
2019-04-05 21:46 Mike Pagano
2019-04-03 10:59 Mike Pagano
2019-03-27 10:22 Mike Pagano
2019-03-23 20:23 Mike Pagano
2019-03-19 16:58 Mike Pagano
2019-03-13 22:08 Mike Pagano
2019-03-10 14:15 Mike Pagano
2019-03-06 19:06 Mike Pagano
2019-03-05 18:04 Mike Pagano
2019-02-27 11:23 Mike Pagano
2019-02-23 11:35 Mike Pagano
2019-02-23  0:46 Mike Pagano
2019-02-20 11:19 Mike Pagano
2019-02-16  0:42 Mike Pagano
2019-02-15 12:39 Mike Pagano
2019-02-12 20:53 Mike Pagano
2019-02-06 17:08 Mike Pagano
2019-01-31 11:28 Mike Pagano
2019-01-26 15:09 Mike Pagano
2019-01-22 23:06 Mike Pagano
2019-01-16 23:32 Mike Pagano
2019-01-13 19:29 Mike Pagano
2019-01-09 17:54 Mike Pagano
2018-12-29 18:55 Mike Pagano
2018-12-29  1:08 Mike Pagano
2018-12-21 14:58 Mike Pagano
2018-12-19 19:09 Mike Pagano
2018-12-17 11:42 Mike Pagano
2018-12-13 11:40 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-05 20:16 Mike Pagano
2018-12-01 15:08 Mike Pagano
2018-11-27 16:16 Mike Pagano
2018-11-23 12:42 Mike Pagano
2018-11-21 12:30 Mike Pagano
2018-11-14  0:47 Mike Pagano
2018-11-14  0:47 Mike Pagano
2018-11-13 20:44 Mike Pagano
2018-11-04 16:22 Alice Ferrazzi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1605016584.96339a934e3be4fc8007433e7229ad6074feab5a.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox