public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Wed, 19 Dec 2018 19:09:54 +0000 (UTC)	[thread overview]
Message-ID: <1545246549.ddb74622bdf265fef8705b51a9ada6be46b68fac.mpagano@gentoo> (raw)

commit:     ddb74622bdf265fef8705b51a9ada6be46b68fac
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Dec 19 19:09:09 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Dec 19 19:09:09 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ddb74622

proj/linux-patches: Linux patch 4.19.11

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1010_linux-4.19.11.patch | 1600 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1604 insertions(+)

diff --git a/0000_README b/0000_README
index 59408e2..72bf8ce 100644
--- a/0000_README
+++ b/0000_README
@@ -83,6 +83,10 @@ Patch:  1009_linux-4.19.10.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.19.10
 
+Patch:  1010_linux-4.19.11.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.19.11
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1010_linux-4.19.11.patch b/1010_linux-4.19.11.patch
new file mode 100644
index 0000000..3742acb
--- /dev/null
+++ b/1010_linux-4.19.11.patch
@@ -0,0 +1,1600 @@
+diff --git a/Makefile b/Makefile
+index 36d9de42def3..676155d4dc3e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 10
++SUBLEVEL = 11
+ EXTRAVERSION =
+ NAME = "People's Front"
+ 
+diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
+index 4adb85e66be3..93762244be7f 100644
+--- a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
++++ b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
+@@ -31,7 +31,7 @@
+ 
+ 	wifi_pwrseq: wifi-pwrseq {
+ 		compatible = "mmc-pwrseq-simple";
+-		reset-gpios = <&expgpio 1 GPIO_ACTIVE_HIGH>;
++		reset-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>;
+ 	};
+ };
+ 
+diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
+index c318bcbc6ba7..89e6fd547c75 100644
+--- a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
++++ b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
+@@ -26,7 +26,7 @@
+ 
+ 	wifi_pwrseq: wifi-pwrseq {
+ 		compatible = "mmc-pwrseq-simple";
+-		reset-gpios = <&expgpio 1 GPIO_ACTIVE_HIGH>;
++		reset-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>;
+ 	};
+ };
+ 
+diff --git a/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval.dts b/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval.dts
+index 76b56eafaab9..f714a20649d7 100644
+--- a/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval.dts
++++ b/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval.dts
+@@ -387,6 +387,11 @@
+ 			hpd-gpio = <&tlmm_pinmux 72 GPIO_ACTIVE_HIGH>;
+ 
+ 			ports {
++				port@0 {
++					endpoint {
++						remote-endpoint = <&mdp_dtv_out>;
++					};
++				};
+ 				port@1 {
+ 					endpoint {
+ 						remote-endpoint = <&hdmi_con>;
+diff --git a/arch/arm/mach-mmp/cputype.h b/arch/arm/mach-mmp/cputype.h
+index 446edaeb78a7..a96abcf521b4 100644
+--- a/arch/arm/mach-mmp/cputype.h
++++ b/arch/arm/mach-mmp/cputype.h
+@@ -44,10 +44,12 @@ static inline int cpu_is_pxa910(void)
+ #define cpu_is_pxa910()	(0)
+ #endif
+ 
+-#ifdef CONFIG_CPU_MMP2
++#if defined(CONFIG_CPU_MMP2) || defined(CONFIG_MACH_MMP2_DT)
+ static inline int cpu_is_mmp2(void)
+ {
+-	return (((read_cpuid_id() >> 8) & 0xff) == 0x58);
++	return (((read_cpuid_id() >> 8) & 0xff) == 0x58) &&
++		(((mmp_chip_id & 0xfff) == 0x410) ||
++		 ((mmp_chip_id & 0xfff) == 0x610));
+ }
+ #else
+ #define cpu_is_mmp2()	(0)
+diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
+index 072c51fb07d7..c389f2bef938 100644
+--- a/arch/arm64/mm/dma-mapping.c
++++ b/arch/arm64/mm/dma-mapping.c
+@@ -587,9 +587,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
+ 						   prot,
+ 						   __builtin_return_address(0));
+ 		if (addr) {
+-			memset(addr, 0, size);
+ 			if (!coherent)
+ 				__dma_flush_area(page_to_virt(page), iosize);
++			memset(addr, 0, size);
+ 		} else {
+ 			iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
+ 			dma_release_from_contiguous(dev, page,
+diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
+index 33b34a58fc62..5b9dce17f0c9 100644
+--- a/arch/powerpc/kernel/legacy_serial.c
++++ b/arch/powerpc/kernel/legacy_serial.c
+@@ -372,6 +372,8 @@ void __init find_legacy_serial_ports(void)
+ 
+ 	/* Now find out if one of these is out firmware console */
+ 	path = of_get_property(of_chosen, "linux,stdout-path", NULL);
++	if (path == NULL)
++		path = of_get_property(of_chosen, "stdout-path", NULL);
+ 	if (path != NULL) {
+ 		stdout = of_find_node_by_path(path);
+ 		if (stdout)
+@@ -595,8 +597,10 @@ static int __init check_legacy_serial_console(void)
+ 	/* We are getting a weird phandle from OF ... */
+ 	/* ... So use the full path instead */
+ 	name = of_get_property(of_chosen, "linux,stdout-path", NULL);
++	if (name == NULL)
++		name = of_get_property(of_chosen, "stdout-path", NULL);
+ 	if (name == NULL) {
+-		DBG(" no linux,stdout-path !\n");
++		DBG(" no stdout-path !\n");
+ 		return -ENODEV;
+ 	}
+ 	prom_stdout = of_find_node_by_path(name);
+diff --git a/arch/powerpc/kernel/msi.c b/arch/powerpc/kernel/msi.c
+index dab616a33b8d..f2197654be07 100644
+--- a/arch/powerpc/kernel/msi.c
++++ b/arch/powerpc/kernel/msi.c
+@@ -34,5 +34,10 @@ void arch_teardown_msi_irqs(struct pci_dev *dev)
+ {
+ 	struct pci_controller *phb = pci_bus_to_host(dev->bus);
+ 
+-	phb->controller_ops.teardown_msi_irqs(dev);
++	/*
++	 * We can be called even when arch_setup_msi_irqs() returns -ENOSYS,
++	 * so check the pointer again.
++	 */
++	if (phb->controller_ops.teardown_msi_irqs)
++		phb->controller_ops.teardown_msi_irqs(dev);
+ }
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 9298f0f3817a..b84f61bc5e7a 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -223,9 +223,6 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+ 
+ # Avoid indirect branches in kernel to deal with Spectre
+ ifdef CONFIG_RETPOLINE
+-ifeq ($(RETPOLINE_CFLAGS),)
+-  $(error You are building kernel with non-retpoline compiler, please update your compiler.)
+-endif
+   KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
+ endif
+ 
+@@ -303,6 +300,13 @@ ifndef CC_HAVE_ASM_GOTO
+ 	@echo Compiler lacks asm-goto support.
+ 	@exit 1
+ endif
++ifdef CONFIG_RETPOLINE
++ifeq ($(RETPOLINE_CFLAGS),)
++	@echo "You are building kernel with non-retpoline compiler." >&2
++	@echo "Please update your compiler." >&2
++	@false
++endif
++endif
+ 
+ archclean:
+ 	$(Q)rm -rf $(objtree)/arch/i386
+diff --git a/block/bio.c b/block/bio.c
+index c4ef8aa46452..55a5386fd431 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1262,7 +1262,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
+ 		if (ret)
+ 			goto cleanup;
+ 	} else {
+-		zero_fill_bio(bio);
++		if (bmd->is_our_pages)
++			zero_fill_bio(bio);
+ 		iov_iter_advance(iter, bio->bi_iter.bi_size);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+index 8816c697b205..387f1cf1dc20 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+@@ -330,7 +330,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
+ 			case CHIP_TOPAZ:
+ 				if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
+ 				    ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
+-				    ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
++				    ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)) ||
++				    ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) ||
++				    ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) {
+ 					info->is_kicker = true;
+ 					strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
+ 				} else
+@@ -351,7 +353,6 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
+ 				if (type == CGS_UCODE_ID_SMU) {
+ 					if (((adev->pdev->device == 0x67ef) &&
+ 					     ((adev->pdev->revision == 0xe0) ||
+-					      (adev->pdev->revision == 0xe2) ||
+ 					      (adev->pdev->revision == 0xe5))) ||
+ 					    ((adev->pdev->device == 0x67ff) &&
+ 					     ((adev->pdev->revision == 0xcf) ||
+@@ -359,8 +360,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
+ 					      (adev->pdev->revision == 0xff)))) {
+ 						info->is_kicker = true;
+ 						strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
+-					} else
++					} else if ((adev->pdev->device == 0x67ef) &&
++						   (adev->pdev->revision == 0xe2)) {
++						info->is_kicker = true;
++						strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
++					} else {
+ 						strcpy(fw_name, "amdgpu/polaris11_smc.bin");
++					}
+ 				} else if (type == CGS_UCODE_ID_SMU_SK) {
+ 					strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
+ 				}
+@@ -375,17 +381,35 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
+ 					      (adev->pdev->revision == 0xe7) ||
+ 					      (adev->pdev->revision == 0xef))) ||
+ 					    ((adev->pdev->device == 0x6fdf) &&
+-					     (adev->pdev->revision == 0xef))) {
++					     ((adev->pdev->revision == 0xef) ||
++					      (adev->pdev->revision == 0xff)))) {
+ 						info->is_kicker = true;
+ 						strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
+-					} else
++					} else if ((adev->pdev->device == 0x67df) &&
++						   ((adev->pdev->revision == 0xe1) ||
++						    (adev->pdev->revision == 0xf7))) {
++						info->is_kicker = true;
++						strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
++					} else {
+ 						strcpy(fw_name, "amdgpu/polaris10_smc.bin");
++					}
+ 				} else if (type == CGS_UCODE_ID_SMU_SK) {
+ 					strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
+ 				}
+ 				break;
+ 			case CHIP_POLARIS12:
+-				strcpy(fw_name, "amdgpu/polaris12_smc.bin");
++				if (((adev->pdev->device == 0x6987) &&
++				     ((adev->pdev->revision == 0xc0) ||
++				      (adev->pdev->revision == 0xc3))) ||
++				    ((adev->pdev->device == 0x6981) &&
++				     ((adev->pdev->revision == 0x00) ||
++				      (adev->pdev->revision == 0x01) ||
++				      (adev->pdev->revision == 0x10)))) {
++					info->is_kicker = true;
++					strcpy(fw_name, "amdgpu/polaris12_k_smc.bin");
++				} else {
++					strcpy(fw_name, "amdgpu/polaris12_smc.bin");
++				}
+ 				break;
+ 			case CHIP_VEGAM:
+ 				strcpy(fw_name, "amdgpu/vegam_smc.bin");
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 0f41d8647376..8e26e1ca14c6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -761,7 +761,13 @@ static const struct pci_device_id pciidlist[] = {
+ 	{0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ 	{0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ 	{0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
++	{0x1002, 0x6869, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
++	{0x1002, 0x686a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
++	{0x1002, 0x686b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ 	{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
++	{0x1002, 0x686d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
++	{0x1002, 0x686e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
++	{0x1002, 0x686f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ 	{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ 	/* Vega 12 */
+ 	{0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 29ac74f40dce..1427675d0e5a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -326,7 +326,13 @@ static const struct kfd_deviceid supported_devices[] = {
+ 	{ 0x6864, &vega10_device_info },	/* Vega10 */
+ 	{ 0x6867, &vega10_device_info },	/* Vega10 */
+ 	{ 0x6868, &vega10_device_info },	/* Vega10 */
++	{ 0x6869, &vega10_device_info },	/* Vega10 */
++	{ 0x686A, &vega10_device_info },	/* Vega10 */
++	{ 0x686B, &vega10_device_info },	/* Vega10 */
+ 	{ 0x686C, &vega10_vf_device_info },	/* Vega10  vf*/
++	{ 0x686D, &vega10_device_info },	/* Vega10 */
++	{ 0x686E, &vega10_device_info },	/* Vega10 */
++	{ 0x686F, &vega10_device_info },	/* Vega10 */
+ 	{ 0x687F, &vega10_device_info },	/* Vega10 */
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
+index 62f36ba2435b..c1a99dfe4913 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
+@@ -386,6 +386,8 @@ typedef uint16_t PPSMC_Result;
+ #define PPSMC_MSG_AgmResetPsm                 ((uint16_t) 0x403)
+ #define PPSMC_MSG_ReadVftCell                 ((uint16_t) 0x404)
+ 
++#define PPSMC_MSG_ApplyAvfsCksOffVoltage      ((uint16_t) 0x415)
++
+ #define PPSMC_MSG_GFX_CU_PG_ENABLE            ((uint16_t) 0x280)
+ #define PPSMC_MSG_GFX_CU_PG_DISABLE           ((uint16_t) 0x281)
+ #define PPSMC_MSG_GetCurrPkgPwr               ((uint16_t) 0x282)
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+index 1276f168ff68..5b67f575cd34 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+@@ -1984,6 +1984,12 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
+ 
+ 	smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+ 
++	/* Apply avfs cks-off voltages to avoid the overshoot
++	 * when switching to the highest sclk frequency
++	 */
++	if (data->apply_avfs_cks_off_voltage)
++		smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+index 99d5e4f98f49..a6edd5df33b0 100644
+--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
++++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+@@ -37,10 +37,13 @@ MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
+ MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin");
++MODULE_FIRMWARE("amdgpu/polaris10_k2_smc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
+ MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
++MODULE_FIRMWARE("amdgpu/polaris11_k2_smc.bin");
+ MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
++MODULE_FIRMWARE("amdgpu/polaris12_k_smc.bin");
+ MODULE_FIRMWARE("amdgpu/vegam_smc.bin");
+ MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
+ MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
+diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
+index 481896fb712a..85e6736f0a32 100644
+--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
++++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
+@@ -235,7 +235,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
+ 		plane->bpp = skl_pixel_formats[fmt].bpp;
+ 		plane->drm_format = skl_pixel_formats[fmt].drm_format;
+ 	} else {
+-		plane->tiled = !!(val & DISPPLANE_TILED);
++		plane->tiled = val & DISPPLANE_TILED;
+ 		fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK);
+ 		plane->bpp = bdw_pixel_formats[fmt].bpp;
+ 		plane->drm_format = bdw_pixel_formats[fmt].drm_format;
+diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
+index 75ea87ebf9b0..6937ef0b4bfc 100644
+--- a/drivers/gpu/drm/i915/intel_lrc.c
++++ b/drivers/gpu/drm/i915/intel_lrc.c
+@@ -442,8 +442,13 @@ static u64 execlists_update_context(struct i915_request *rq)
+ 	 * may not be visible to the HW prior to the completion of the UC
+ 	 * register write and that we may begin execution from the context
+ 	 * before its image is complete leading to invalid PD chasing.
++	 *
++	 * Furthermore, Braswell, at least, wants a full mb to be sure that
++	 * the writes are coherent in memory (visible to the GPU) prior to
++	 * execution, and not just visible to other CPUs (as is the result of
++	 * wmb).
+ 	 */
+-	wmb();
++	mb();
+ 	return ce->lrc_desc;
+ }
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
+index ae2aee7ed9e1..e741d26185df 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
+@@ -1962,7 +1962,7 @@ static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus)
+ 	u32 *dump_addr = NULL;
+ 	u32 status = 0;
+ 	struct dpu_debug_bus_entry *head;
+-	phys_addr_t phys = 0;
++	dma_addr_t dma = 0;
+ 	int list_size;
+ 	int i;
+ 	u32 offset;
+@@ -2000,7 +2000,7 @@ static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus)
+ 	if (in_mem) {
+ 		if (!(*dump_mem))
+ 			*dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
+-				list_size, &phys, GFP_KERNEL);
++				list_size, &dma, GFP_KERNEL);
+ 
+ 		if (*dump_mem) {
+ 			dump_addr = *dump_mem;
+@@ -2101,7 +2101,7 @@ static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
+ 	u32 value, d0, d1;
+ 	unsigned long reg, reg1, reg2;
+ 	struct vbif_debug_bus_entry *head;
+-	phys_addr_t phys = 0;
++	dma_addr_t dma = 0;
+ 	int i, list_size = 0;
+ 	void __iomem *mem_base = NULL;
+ 	struct vbif_debug_bus_entry *dbg_bus;
+@@ -2151,7 +2151,7 @@ static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
+ 	if (in_mem) {
+ 		if (!(*dump_mem))
+ 			*dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
+-				list_size, &phys, GFP_KERNEL);
++				list_size, &dma, GFP_KERNEL);
+ 
+ 		if (*dump_mem) {
+ 			dump_addr = *dump_mem;
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+index faf7009c0a3c..2abcd7bf104f 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+@@ -197,6 +197,22 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
+ /******************************************************************************
+  * EVO channel helpers
+  *****************************************************************************/
++static void
++evo_flush(struct nv50_dmac *dmac)
++{
++	/* Push buffer fetches are not coherent with BAR1, we need to ensure
++	 * writes have been flushed right through to VRAM before writing PUT.
++	 */
++	if (dmac->push.type & NVIF_MEM_VRAM) {
++		struct nvif_device *device = dmac->base.device;
++		nvif_wr32(&device->object, 0x070000, 0x00000001);
++		nvif_msec(device, 2000,
++			if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
++				break;
++		);
++	}
++}
++
+ u32 *
+ evo_wait(struct nv50_dmac *evoc, int nr)
+ {
+@@ -207,6 +223,7 @@ evo_wait(struct nv50_dmac *evoc, int nr)
+ 	mutex_lock(&dmac->lock);
+ 	if (put + nr >= (PAGE_SIZE / 4) - 8) {
+ 		dmac->ptr[put] = 0x20000000;
++		evo_flush(dmac);
+ 
+ 		nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
+ 		if (nvif_msec(device, 2000,
+@@ -229,17 +246,7 @@ evo_kick(u32 *push, struct nv50_dmac *evoc)
+ {
+ 	struct nv50_dmac *dmac = evoc;
+ 
+-	/* Push buffer fetches are not coherent with BAR1, we need to ensure
+-	 * writes have been flushed right through to VRAM before writing PUT.
+-	 */
+-	if (dmac->push.type & NVIF_MEM_VRAM) {
+-		struct nvif_device *device = dmac->base.device;
+-		nvif_wr32(&device->object, 0x070000, 0x00000001);
+-		nvif_msec(device, 2000,
+-			if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
+-				break;
+-		);
+-	}
++	evo_flush(dmac);
+ 
+ 	nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
+ 	mutex_unlock(&dmac->lock);
+@@ -1226,6 +1233,7 @@ nv50_mstm_del(struct nv50_mstm **pmstm)
+ {
+ 	struct nv50_mstm *mstm = *pmstm;
+ 	if (mstm) {
++		drm_dp_mst_topology_mgr_destroy(&mstm->mgr);
+ 		kfree(*pmstm);
+ 		*pmstm = NULL;
+ 	}
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+index 05368fa4f956..f814d37b1db2 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+@@ -442,11 +442,6 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
+-{
+-	rockchip_drm_platform_remove(pdev);
+-}
+-
+ static const struct of_device_id rockchip_drm_dt_ids[] = {
+ 	{ .compatible = "rockchip,display-subsystem", },
+ 	{ /* sentinel */ },
+@@ -456,7 +451,6 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
+ static struct platform_driver rockchip_drm_platform_driver = {
+ 	.probe = rockchip_drm_platform_probe,
+ 	.remove = rockchip_drm_platform_remove,
+-	.shutdown = rockchip_drm_platform_shutdown,
+ 	.driver = {
+ 		.name = "rockchip-drm",
+ 		.of_match_table = rockchip_drm_dt_ids,
+diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
+index a4f956c6d567..a19fbff16861 100644
+--- a/drivers/i2c/busses/i2c-aspeed.c
++++ b/drivers/i2c/busses/i2c-aspeed.c
+@@ -555,7 +555,7 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
+ 	spin_lock(&bus->lock);
+ 
+ #if IS_ENABLED(CONFIG_I2C_SLAVE)
+-	if (aspeed_i2c_slave_irq(bus)) {
++	if (IS_ENABLED(CONFIG_I2C_SLAVE) && aspeed_i2c_slave_irq(bus)) {
+ 		dev_dbg(bus->dev, "irq handled by slave.\n");
+ 		ret = true;
+ 		goto out;
+@@ -564,7 +564,9 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
+ 
+ 	ret = aspeed_i2c_master_irq(bus);
+ 
++#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ out:
++#endif
+ 	spin_unlock(&bus->lock);
+ 	return ret ? IRQ_HANDLED : IRQ_NONE;
+ }
+diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
+index 5936de71883f..6fc93834da44 100644
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -930,6 +930,10 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
+ 	bool dirty_flag;
+ 	*result = true;
+ 
++	if (from_cblock(cmd->cache_blocks) == 0)
++		/* Nothing to do */
++		return 0;
++
+ 	r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
+ 				   from_cblock(cmd->cache_blocks), &cmd->dirty_cursor);
+ 	if (r) {
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index aaf1ad481ee8..1f225a1e08dd 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -195,7 +195,7 @@ static void throttle_unlock(struct throttle *t)
+ struct dm_thin_new_mapping;
+ 
+ /*
+- * The pool runs in 4 modes.  Ordered in degraded order for comparisons.
++ * The pool runs in various modes.  Ordered in degraded order for comparisons.
+  */
+ enum pool_mode {
+ 	PM_WRITE,		/* metadata may be changed */
+@@ -282,9 +282,38 @@ struct pool {
+ 	mempool_t mapping_pool;
+ };
+ 
+-static enum pool_mode get_pool_mode(struct pool *pool);
+ static void metadata_operation_failed(struct pool *pool, const char *op, int r);
+ 
++static enum pool_mode get_pool_mode(struct pool *pool)
++{
++	return pool->pf.mode;
++}
++
++static void notify_of_pool_mode_change(struct pool *pool)
++{
++	const char *descs[] = {
++		"write",
++		"out-of-data-space",
++		"read-only",
++		"read-only",
++		"fail"
++	};
++	const char *extra_desc = NULL;
++	enum pool_mode mode = get_pool_mode(pool);
++
++	if (mode == PM_OUT_OF_DATA_SPACE) {
++		if (!pool->pf.error_if_no_space)
++			extra_desc = " (queue IO)";
++		else
++			extra_desc = " (error IO)";
++	}
++
++	dm_table_event(pool->ti->table);
++	DMINFO("%s: switching pool to %s%s mode",
++	       dm_device_name(pool->pool_md),
++	       descs[(int)mode], extra_desc ? : "");
++}
++
+ /*
+  * Target context for a pool.
+  */
+@@ -2351,8 +2380,6 @@ static void do_waker(struct work_struct *ws)
+ 	queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
+ }
+ 
+-static void notify_of_pool_mode_change_to_oods(struct pool *pool);
+-
+ /*
+  * We're holding onto IO to allow userland time to react.  After the
+  * timeout either the pool will have been resized (and thus back in
+@@ -2365,7 +2392,7 @@ static void do_no_space_timeout(struct work_struct *ws)
+ 
+ 	if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
+ 		pool->pf.error_if_no_space = true;
+-		notify_of_pool_mode_change_to_oods(pool);
++		notify_of_pool_mode_change(pool);
+ 		error_retry_list_with_code(pool, BLK_STS_NOSPC);
+ 	}
+ }
+@@ -2433,26 +2460,6 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
+ 
+ /*----------------------------------------------------------------*/
+ 
+-static enum pool_mode get_pool_mode(struct pool *pool)
+-{
+-	return pool->pf.mode;
+-}
+-
+-static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
+-{
+-	dm_table_event(pool->ti->table);
+-	DMINFO("%s: switching pool to %s mode",
+-	       dm_device_name(pool->pool_md), new_mode);
+-}
+-
+-static void notify_of_pool_mode_change_to_oods(struct pool *pool)
+-{
+-	if (!pool->pf.error_if_no_space)
+-		notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)");
+-	else
+-		notify_of_pool_mode_change(pool, "out-of-data-space (error IO)");
+-}
+-
+ static bool passdown_enabled(struct pool_c *pt)
+ {
+ 	return pt->adjusted_pf.discard_passdown;
+@@ -2501,8 +2508,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
+ 
+ 	switch (new_mode) {
+ 	case PM_FAIL:
+-		if (old_mode != new_mode)
+-			notify_of_pool_mode_change(pool, "failure");
+ 		dm_pool_metadata_read_only(pool->pmd);
+ 		pool->process_bio = process_bio_fail;
+ 		pool->process_discard = process_bio_fail;
+@@ -2516,8 +2521,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
+ 
+ 	case PM_OUT_OF_METADATA_SPACE:
+ 	case PM_READ_ONLY:
+-		if (!is_read_only_pool_mode(old_mode))
+-			notify_of_pool_mode_change(pool, "read-only");
+ 		dm_pool_metadata_read_only(pool->pmd);
+ 		pool->process_bio = process_bio_read_only;
+ 		pool->process_discard = process_bio_success;
+@@ -2538,8 +2541,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
+ 		 * alarming rate.  Adjust your low water mark if you're
+ 		 * frequently seeing this mode.
+ 		 */
+-		if (old_mode != new_mode)
+-			notify_of_pool_mode_change_to_oods(pool);
+ 		pool->out_of_data_space = true;
+ 		pool->process_bio = process_bio_read_only;
+ 		pool->process_discard = process_discard_bio;
+@@ -2552,8 +2553,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
+ 		break;
+ 
+ 	case PM_WRITE:
+-		if (old_mode != new_mode)
+-			notify_of_pool_mode_change(pool, "write");
+ 		if (old_mode == PM_OUT_OF_DATA_SPACE)
+ 			cancel_delayed_work_sync(&pool->no_space_timeout);
+ 		pool->out_of_data_space = false;
+@@ -2573,6 +2572,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
+ 	 * doesn't cause an unexpected mode transition on resume.
+ 	 */
+ 	pt->adjusted_pf.mode = new_mode;
++
++	if (old_mode != new_mode)
++		notify_of_pool_mode_change(pool);
+ }
+ 
+ static void abort_transaction(struct pool *pool)
+diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
+index a44183ff4be0..85fb2baa8a7f 100644
+--- a/drivers/md/dm-zoned-target.c
++++ b/drivers/md/dm-zoned-target.c
+@@ -20,7 +20,6 @@ struct dmz_bioctx {
+ 	struct dm_zone		*zone;
+ 	struct bio		*bio;
+ 	atomic_t		ref;
+-	blk_status_t		status;
+ };
+ 
+ /*
+@@ -78,65 +77,66 @@ static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
+ {
+ 	struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+ 
+-	if (bioctx->status == BLK_STS_OK && status != BLK_STS_OK)
+-		bioctx->status = status;
+-	bio_endio(bio);
++	if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
++		bio->bi_status = status;
++
++	if (atomic_dec_and_test(&bioctx->ref)) {
++		struct dm_zone *zone = bioctx->zone;
++
++		if (zone) {
++			if (bio->bi_status != BLK_STS_OK &&
++			    bio_op(bio) == REQ_OP_WRITE &&
++			    dmz_is_seq(zone))
++				set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
++			dmz_deactivate_zone(zone);
++		}
++		bio_endio(bio);
++	}
+ }
+ 
+ /*
+- * Partial clone read BIO completion callback. This terminates the
++ * Completion callback for an internally cloned target BIO. This terminates the
+  * target BIO when there are no more references to its context.
+  */
+-static void dmz_read_bio_end_io(struct bio *bio)
++static void dmz_clone_endio(struct bio *clone)
+ {
+-	struct dmz_bioctx *bioctx = bio->bi_private;
+-	blk_status_t status = bio->bi_status;
++	struct dmz_bioctx *bioctx = clone->bi_private;
++	blk_status_t status = clone->bi_status;
+ 
+-	bio_put(bio);
++	bio_put(clone);
+ 	dmz_bio_endio(bioctx->bio, status);
+ }
+ 
+ /*
+- * Issue a BIO to a zone. The BIO may only partially process the
++ * Issue a clone of a target BIO. The clone may only partially process the
+  * original target BIO.
+  */
+-static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone,
+-			       struct bio *bio, sector_t chunk_block,
+-			       unsigned int nr_blocks)
++static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
++			  struct bio *bio, sector_t chunk_block,
++			  unsigned int nr_blocks)
+ {
+ 	struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+-	sector_t sector;
+ 	struct bio *clone;
+ 
+-	/* BIO remap sector */
+-	sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
+-
+-	/* If the read is not partial, there is no need to clone the BIO */
+-	if (nr_blocks == dmz_bio_blocks(bio)) {
+-		/* Setup and submit the BIO */
+-		bio->bi_iter.bi_sector = sector;
+-		atomic_inc(&bioctx->ref);
+-		generic_make_request(bio);
+-		return 0;
+-	}
+-
+-	/* Partial BIO: we need to clone the BIO */
+ 	clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
+ 	if (!clone)
+ 		return -ENOMEM;
+ 
+-	/* Setup the clone */
+-	clone->bi_iter.bi_sector = sector;
++	bio_set_dev(clone, dmz->dev->bdev);
++	clone->bi_iter.bi_sector =
++		dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
+ 	clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
+-	clone->bi_end_io = dmz_read_bio_end_io;
++	clone->bi_end_io = dmz_clone_endio;
+ 	clone->bi_private = bioctx;
+ 
+ 	bio_advance(bio, clone->bi_iter.bi_size);
+ 
+-	/* Submit the clone */
+ 	atomic_inc(&bioctx->ref);
+ 	generic_make_request(clone);
+ 
++	if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
++		zone->wp_block += nr_blocks;
++
+ 	return 0;
+ }
+ 
+@@ -214,7 +214,7 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
+ 		if (nr_blocks) {
+ 			/* Valid blocks found: read them */
+ 			nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block);
+-			ret = dmz_submit_read_bio(dmz, rzone, bio, chunk_block, nr_blocks);
++			ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks);
+ 			if (ret)
+ 				return ret;
+ 			chunk_block += nr_blocks;
+@@ -228,25 +228,6 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
+ 	return 0;
+ }
+ 
+-/*
+- * Issue a write BIO to a zone.
+- */
+-static void dmz_submit_write_bio(struct dmz_target *dmz, struct dm_zone *zone,
+-				 struct bio *bio, sector_t chunk_block,
+-				 unsigned int nr_blocks)
+-{
+-	struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+-
+-	/* Setup and submit the BIO */
+-	bio_set_dev(bio, dmz->dev->bdev);
+-	bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
+-	atomic_inc(&bioctx->ref);
+-	generic_make_request(bio);
+-
+-	if (dmz_is_seq(zone))
+-		zone->wp_block += nr_blocks;
+-}
+-
+ /*
+  * Write blocks directly in a data zone, at the write pointer.
+  * If a buffer zone is assigned, invalidate the blocks written
+@@ -265,7 +246,9 @@ static int dmz_handle_direct_write(struct dmz_target *dmz,
+ 		return -EROFS;
+ 
+ 	/* Submit write */
+-	dmz_submit_write_bio(dmz, zone, bio, chunk_block, nr_blocks);
++	ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
++	if (ret)
++		return ret;
+ 
+ 	/*
+ 	 * Validate the blocks in the data zone and invalidate
+@@ -301,7 +284,9 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
+ 		return -EROFS;
+ 
+ 	/* Submit write */
+-	dmz_submit_write_bio(dmz, bzone, bio, chunk_block, nr_blocks);
++	ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks);
++	if (ret)
++		return ret;
+ 
+ 	/*
+ 	 * Validate the blocks in the buffer zone
+@@ -600,7 +585,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
+ 	bioctx->zone = NULL;
+ 	bioctx->bio = bio;
+ 	atomic_set(&bioctx->ref, 1);
+-	bioctx->status = BLK_STS_OK;
+ 
+ 	/* Set the BIO pending in the flush list */
+ 	if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
+@@ -623,35 +607,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
+ 	return DM_MAPIO_SUBMITTED;
+ }
+ 
+-/*
+- * Completed target BIO processing.
+- */
+-static int dmz_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
+-{
+-	struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+-
+-	if (bioctx->status == BLK_STS_OK && *error)
+-		bioctx->status = *error;
+-
+-	if (!atomic_dec_and_test(&bioctx->ref))
+-		return DM_ENDIO_INCOMPLETE;
+-
+-	/* Done */
+-	bio->bi_status = bioctx->status;
+-
+-	if (bioctx->zone) {
+-		struct dm_zone *zone = bioctx->zone;
+-
+-		if (*error && bio_op(bio) == REQ_OP_WRITE) {
+-			if (dmz_is_seq(zone))
+-				set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
+-		}
+-		dmz_deactivate_zone(zone);
+-	}
+-
+-	return DM_ENDIO_DONE;
+-}
+-
+ /*
+  * Get zoned device information.
+  */
+@@ -947,7 +902,6 @@ static struct target_type dmz_type = {
+ 	.ctr		 = dmz_ctr,
+ 	.dtr		 = dmz_dtr,
+ 	.map		 = dmz_map,
+-	.end_io		 = dmz_end_io,
+ 	.io_hints	 = dmz_io_hints,
+ 	.prepare_ioctl	 = dmz_prepare_ioctl,
+ 	.postsuspend	 = dmz_suspend,
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 45abb54037fc..07d2949a8746 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1592,6 +1592,8 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
+ 		return ret;
+ 	}
+ 
++	blk_queue_split(md->queue, &bio);
++
+ 	init_clone_info(&ci, md, map, bio);
+ 
+ 	if (bio->bi_opf & REQ_PREFLUSH) {
+diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
+index 5653e8eebe2b..16c7b20cbf61 100644
+--- a/drivers/media/common/videobuf2/videobuf2-core.c
++++ b/drivers/media/common/videobuf2/videobuf2-core.c
+@@ -1755,10 +1755,8 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
+ 		if (ret)
+ 			return ret;
+ 		ret = vb2_start_streaming(q);
+-		if (ret) {
+-			__vb2_queue_cancel(q);
++		if (ret)
+ 			return ret;
+-		}
+ 	}
+ 
+ 	q->streaming = 1;
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index e201ccb3fda4..f6755b86eba2 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -472,7 +472,7 @@ out:
+ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ 			       struct mmc_blk_ioc_data *idata)
+ {
+-	struct mmc_command cmd = {};
++	struct mmc_command cmd = {}, sbc = {};
+ 	struct mmc_data data = {};
+ 	struct mmc_request mrq = {};
+ 	struct scatterlist sg;
+@@ -550,10 +550,15 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ 	}
+ 
+ 	if (idata->rpmb) {
+-		err = mmc_set_blockcount(card, data.blocks,
+-			idata->ic.write_flag & (1 << 31));
+-		if (err)
+-			return err;
++		sbc.opcode = MMC_SET_BLOCK_COUNT;
++		/*
++		 * We don't do any blockcount validation because the max size
++		 * may be increased by a future standard. We just copy the
++		 * 'Reliable Write' bit here.
++		 */
++		sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31));
++		sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
++		mrq.sbc = &sbc;
+ 	}
+ 
+ 	if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
+diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
+index adf32682f27a..c60a7625b1fa 100644
+--- a/drivers/mmc/host/omap.c
++++ b/drivers/mmc/host/omap.c
+@@ -104,6 +104,7 @@ struct mmc_omap_slot {
+ 	unsigned int		vdd;
+ 	u16			saved_con;
+ 	u16			bus_mode;
++	u16			power_mode;
+ 	unsigned int		fclk_freq;
+ 
+ 	struct tasklet_struct	cover_tasklet;
+@@ -1157,7 +1158,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+ 	struct mmc_omap_slot *slot = mmc_priv(mmc);
+ 	struct mmc_omap_host *host = slot->host;
+ 	int i, dsor;
+-	int clk_enabled;
++	int clk_enabled, init_stream;
+ 
+ 	mmc_omap_select_slot(slot, 0);
+ 
+@@ -1167,6 +1168,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+ 		slot->vdd = ios->vdd;
+ 
+ 	clk_enabled = 0;
++	init_stream = 0;
+ 	switch (ios->power_mode) {
+ 	case MMC_POWER_OFF:
+ 		mmc_omap_set_power(slot, 0, ios->vdd);
+@@ -1174,13 +1176,17 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+ 	case MMC_POWER_UP:
+ 		/* Cannot touch dsor yet, just power up MMC */
+ 		mmc_omap_set_power(slot, 1, ios->vdd);
++		slot->power_mode = ios->power_mode;
+ 		goto exit;
+ 	case MMC_POWER_ON:
+ 		mmc_omap_fclk_enable(host, 1);
+ 		clk_enabled = 1;
+ 		dsor |= 1 << 11;
++		if (slot->power_mode != MMC_POWER_ON)
++			init_stream = 1;
+ 		break;
+ 	}
++	slot->power_mode = ios->power_mode;
+ 
+ 	if (slot->bus_mode != ios->bus_mode) {
+ 		if (slot->pdata->set_bus_mode != NULL)
+@@ -1196,7 +1202,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+ 	for (i = 0; i < 2; i++)
+ 		OMAP_MMC_WRITE(host, CON, dsor);
+ 	slot->saved_con = dsor;
+-	if (ios->power_mode == MMC_POWER_ON) {
++	if (init_stream) {
+ 		/* worst case at 400kHz, 80 cycles makes 200 microsecs */
+ 		int usecs = 250;
+ 
+@@ -1234,6 +1240,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
+ 	slot->host = host;
+ 	slot->mmc = mmc;
+ 	slot->id = id;
++	slot->power_mode = MMC_POWER_UNDEFINED;
+ 	slot->pdata = &host->pdata->slots[id];
+ 
+ 	host->slots[id] = slot;
+diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
+index 88347ce78f23..d264391616f9 100644
+--- a/drivers/mmc/host/sdhci-omap.c
++++ b/drivers/mmc/host/sdhci-omap.c
+@@ -288,9 +288,9 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ 	struct device *dev = omap_host->dev;
+ 	struct mmc_ios *ios = &mmc->ios;
+ 	u32 start_window = 0, max_window = 0;
++	bool dcrc_was_enabled = false;
+ 	u8 cur_match, prev_match = 0;
+ 	u32 length = 0, max_len = 0;
+-	u32 ier = host->ier;
+ 	u32 phase_delay = 0;
+ 	int ret = 0;
+ 	u32 reg;
+@@ -317,9 +317,10 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ 	 * during the tuning procedure. So disable it during the
+ 	 * tuning procedure.
+ 	 */
+-	ier &= ~SDHCI_INT_DATA_CRC;
+-	sdhci_writel(host, ier, SDHCI_INT_ENABLE);
+-	sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
++	if (host->ier & SDHCI_INT_DATA_CRC) {
++		host->ier &= ~SDHCI_INT_DATA_CRC;
++		dcrc_was_enabled = true;
++	}
+ 
+ 	while (phase_delay <= MAX_PHASE_DELAY) {
+ 		sdhci_omap_set_dll(omap_host, phase_delay);
+@@ -366,6 +367,9 @@ tuning_error:
+ 
+ ret:
+ 	sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
++	/* Reenable forbidden interrupt */
++	if (dcrc_was_enabled)
++		host->ier |= SDHCI_INT_DATA_CRC;
+ 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ 	return ret;
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 1b3fbd9bd5c5..654051e00117 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -193,8 +193,12 @@ void sdhci_reset(struct sdhci_host *host, u8 mask)
+ 	timeout = ktime_add_ms(ktime_get(), 100);
+ 
+ 	/* hw clears the bit when it's done */
+-	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
+-		if (ktime_after(ktime_get(), timeout)) {
++	while (1) {
++		bool timedout = ktime_after(ktime_get(), timeout);
++
++		if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
++			break;
++		if (timedout) {
+ 			pr_err("%s: Reset 0x%x never completed.\n",
+ 				mmc_hostname(host->mmc), (int)mask);
+ 			sdhci_dumpregs(host);
+@@ -1495,9 +1499,13 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
+ 
+ 	/* Wait max 20 ms */
+ 	timeout = ktime_add_ms(ktime_get(), 20);
+-	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
+-		& SDHCI_CLOCK_INT_STABLE)) {
+-		if (ktime_after(ktime_get(), timeout)) {
++	while (1) {
++		bool timedout = ktime_after(ktime_get(), timeout);
++
++		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
++		if (clk & SDHCI_CLOCK_INT_STABLE)
++			break;
++		if (timedout) {
+ 			pr_err("%s: Internal clock never stabilised.\n",
+ 			       mmc_hostname(host->mmc));
+ 			sdhci_dumpregs(host);
+diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
+index 6624499eae72..4ada80317a3b 100644
+--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
++++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
+@@ -568,7 +568,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
+ 	SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11),
+ 		  SUNXI_FUNCTION(0x0, "gpio_in"),
+ 		  SUNXI_FUNCTION(0x1, "gpio_out"),
+-		  SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)),	/* PH_EINT11 */
++		  SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)),	/* PH_EINT11 */
+ };
+ 
+ static const struct sunxi_pinctrl_desc sun8i_a83t_pinctrl_data = {
+diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
+index ea88906d2cc5..5c3d6e1e0145 100644
+--- a/drivers/scsi/raid_class.c
++++ b/drivers/scsi/raid_class.c
+@@ -63,8 +63,7 @@ static int raid_match(struct attribute_container *cont, struct device *dev)
+ 	 * emulated RAID devices, so start with SCSI */
+ 	struct raid_internal *i = ac_to_raid_internal(cont);
+ 
+-#if defined(CONFIG_SCSI) || defined(CONFIG_SCSI_MODULE)
+-	if (scsi_is_sdev_device(dev)) {
++	if (IS_ENABLED(CONFIG_SCSI) && scsi_is_sdev_device(dev)) {
+ 		struct scsi_device *sdev = to_scsi_device(dev);
+ 
+ 		if (i->f->cookie != sdev->host->hostt)
+@@ -72,7 +71,6 @@ static int raid_match(struct attribute_container *cont, struct device *dev)
+ 
+ 		return i->f->is_raid(dev);
+ 	}
+-#endif
+ 	/* FIXME: look at other subsystems too */
+ 	return 0;
+ }
+diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
+index 8be4d6786c61..14a9d18306cb 100644
+--- a/drivers/slimbus/qcom-ngd-ctrl.c
++++ b/drivers/slimbus/qcom-ngd-ctrl.c
+@@ -1467,7 +1467,7 @@ static int qcom_slim_ngd_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-static int qcom_slim_ngd_runtime_idle(struct device *dev)
++static int __maybe_unused qcom_slim_ngd_runtime_idle(struct device *dev)
+ {
+ 	struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
+ 
+@@ -1477,8 +1477,7 @@ static int qcom_slim_ngd_runtime_idle(struct device *dev)
+ 	return -EAGAIN;
+ }
+ 
+-#ifdef CONFIG_PM
+-static int qcom_slim_ngd_runtime_suspend(struct device *dev)
++static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev)
+ {
+ 	struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
+ 	int ret = 0;
+@@ -1491,7 +1490,6 @@ static int qcom_slim_ngd_runtime_suspend(struct device *dev)
+ 
+ 	return ret;
+ }
+-#endif
+ 
+ static const struct dev_pm_ops qcom_slim_ngd_dev_pm_ops = {
+ 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
+index c91a56f77bcb..192cc8d0853f 100644
+--- a/drivers/staging/olpc_dcon/Kconfig
++++ b/drivers/staging/olpc_dcon/Kconfig
+@@ -2,6 +2,7 @@ config FB_OLPC_DCON
+ 	tristate "One Laptop Per Child Display CONtroller support"
+ 	depends on OLPC && FB
+ 	depends on I2C
++	depends on BACKLIGHT_LCD_SUPPORT
+ 	depends on (GPIO_CS5535 || GPIO_CS5535=n)
+ 	select BACKLIGHT_CLASS_DEVICE
+ 	help
+diff --git a/fs/aio.c b/fs/aio.c
+index 04c4d6218978..44551d96eaa4 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -45,6 +45,7 @@
+ 
+ #include <asm/kmap_types.h>
+ #include <linux/uaccess.h>
++#include <linux/nospec.h>
+ 
+ #include "internal.h"
+ 
+@@ -1038,6 +1039,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
+ 	if (!table || id >= table->nr)
+ 		goto out;
+ 
++	id = array_index_nospec(id, table->nr);
+ 	ctx = rcu_dereference(table->table[id]);
+ 	if (ctx && ctx->user_id == ctx_id) {
+ 		if (percpu_ref_tryget_live(&ctx->users))
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index 0979609d6eba..82a13221775e 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1439,7 +1439,7 @@ static int fuse_dir_open(struct inode *inode, struct file *file)
+ 
+ static int fuse_dir_release(struct inode *inode, struct file *file)
+ {
+-	fuse_release_common(file, FUSE_RELEASEDIR);
++	fuse_release_common(file, true);
+ 
+ 	return 0;
+ }
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index a0ffed34b85d..fbd6978479cb 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -87,12 +87,12 @@ static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
+ 	iput(req->misc.release.inode);
+ }
+ 
+-static void fuse_file_put(struct fuse_file *ff, bool sync)
++static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir)
+ {
+ 	if (refcount_dec_and_test(&ff->count)) {
+ 		struct fuse_req *req = ff->reserved_req;
+ 
+-		if (ff->fc->no_open) {
++		if (ff->fc->no_open && !isdir) {
+ 			/*
+ 			 * Drop the release request when client does not
+ 			 * implement 'open'
+@@ -245,10 +245,11 @@ static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
+ 	req->in.args[0].value = inarg;
+ }
+ 
+-void fuse_release_common(struct file *file, int opcode)
++void fuse_release_common(struct file *file, bool isdir)
+ {
+ 	struct fuse_file *ff = file->private_data;
+ 	struct fuse_req *req = ff->reserved_req;
++	int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
+ 
+ 	fuse_prepare_release(ff, file->f_flags, opcode);
+ 
+@@ -270,7 +271,7 @@ void fuse_release_common(struct file *file, int opcode)
+ 	 * synchronous RELEASE is allowed (and desirable) in this case
+ 	 * because the server can be trusted not to screw up.
+ 	 */
+-	fuse_file_put(ff, ff->fc->destroy_req != NULL);
++	fuse_file_put(ff, ff->fc->destroy_req != NULL, isdir);
+ }
+ 
+ static int fuse_open(struct inode *inode, struct file *file)
+@@ -286,7 +287,7 @@ static int fuse_release(struct inode *inode, struct file *file)
+ 	if (fc->writeback_cache)
+ 		write_inode_now(inode, 1);
+ 
+-	fuse_release_common(file, FUSE_RELEASE);
++	fuse_release_common(file, false);
+ 
+ 	/* return value is ignored by VFS */
+ 	return 0;
+@@ -300,7 +301,7 @@ void fuse_sync_release(struct fuse_file *ff, int flags)
+ 	 * iput(NULL) is a no-op and since the refcount is 1 and everything's
+ 	 * synchronous, we are fine with not doing igrab() here"
+ 	 */
+-	fuse_file_put(ff, true);
++	fuse_file_put(ff, true, false);
+ }
+ EXPORT_SYMBOL_GPL(fuse_sync_release);
+ 
+@@ -805,7 +806,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
+ 		put_page(page);
+ 	}
+ 	if (req->ff)
+-		fuse_file_put(req->ff, false);
++		fuse_file_put(req->ff, false, false);
+ }
+ 
+ static void fuse_send_readpages(struct fuse_req *req, struct file *file)
+@@ -1459,7 +1460,7 @@ static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
+ 		__free_page(req->pages[i]);
+ 
+ 	if (req->ff)
+-		fuse_file_put(req->ff, false);
++		fuse_file_put(req->ff, false, false);
+ }
+ 
+ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
+@@ -1616,7 +1617,7 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
+ 	ff = __fuse_write_file_get(fc, fi);
+ 	err = fuse_flush_times(inode, ff);
+ 	if (ff)
+-		fuse_file_put(ff, 0);
++		fuse_file_put(ff, false, false);
+ 
+ 	return err;
+ }
+@@ -1930,7 +1931,7 @@ static int fuse_writepages(struct address_space *mapping,
+ 		err = 0;
+ 	}
+ 	if (data.ff)
+-		fuse_file_put(data.ff, false);
++		fuse_file_put(data.ff, false, false);
+ 
+ 	kfree(data.orig_pages);
+ out:
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index f78e9614bb5f..cec8b8e74969 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -749,7 +749,7 @@ void fuse_sync_release(struct fuse_file *ff, int flags);
+ /**
+  * Send RELEASE or RELEASEDIR request
+  */
+-void fuse_release_common(struct file *file, int opcode);
++void fuse_release_common(struct file *file, bool isdir);
+ 
+ /**
+  * Send FSYNC or FSYNCDIR request
+diff --git a/fs/iomap.c b/fs/iomap.c
+index ec15cf2ec696..37da7a61a6c5 100644
+--- a/fs/iomap.c
++++ b/fs/iomap.c
+@@ -117,6 +117,12 @@ iomap_page_create(struct inode *inode, struct page *page)
+ 	atomic_set(&iop->read_count, 0);
+ 	atomic_set(&iop->write_count, 0);
+ 	bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
++
++	/*
++	 * migrate_page_move_mapping() assumes that pages with private data have
++	 * their count elevated by 1.
++	 */
++	get_page(page);
+ 	set_page_private(page, (unsigned long)iop);
+ 	SetPagePrivate(page);
+ 	return iop;
+@@ -133,6 +139,7 @@ iomap_page_release(struct page *page)
+ 	WARN_ON_ONCE(atomic_read(&iop->write_count));
+ 	ClearPagePrivate(page);
+ 	set_page_private(page, 0);
++	put_page(page);
+ 	kfree(iop);
+ }
+ 
+diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
+index 3bbde0a9f48f..b2aadd3e1fec 100644
+--- a/fs/overlayfs/dir.c
++++ b/fs/overlayfs/dir.c
+@@ -652,6 +652,18 @@ static int ovl_symlink(struct inode *dir, struct dentry *dentry,
+ 	return ovl_create_object(dentry, S_IFLNK, 0, link);
+ }
+ 
++static int ovl_set_link_redirect(struct dentry *dentry)
++{
++	const struct cred *old_cred;
++	int err;
++
++	old_cred = ovl_override_creds(dentry->d_sb);
++	err = ovl_set_redirect(dentry, false);
++	revert_creds(old_cred);
++
++	return err;
++}
++
+ static int ovl_link(struct dentry *old, struct inode *newdir,
+ 		    struct dentry *new)
+ {
+@@ -672,7 +684,7 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
+ 		goto out_drop_write;
+ 
+ 	if (ovl_is_metacopy_dentry(old)) {
+-		err = ovl_set_redirect(old, false);
++		err = ovl_set_link_redirect(old);
+ 		if (err)
+ 			goto out_drop_write;
+ 	}
+diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
+index 8fa37cd7818a..54e5d17d7f3e 100644
+--- a/fs/overlayfs/export.c
++++ b/fs/overlayfs/export.c
+@@ -754,9 +754,8 @@ static struct dentry *ovl_lower_fh_to_d(struct super_block *sb,
+ 		goto out;
+ 	}
+ 
+-	/* Otherwise, get a connected non-upper dir or disconnected non-dir */
+-	if (d_is_dir(origin.dentry) &&
+-	    (origin.dentry->d_flags & DCACHE_DISCONNECTED)) {
++	/* Find origin.dentry again with ovl_acceptable() layer check */
++	if (d_is_dir(origin.dentry)) {
+ 		dput(origin.dentry);
+ 		origin.dentry = NULL;
+ 		err = ovl_check_origin_fh(ofs, fh, true, NULL, &stack);
+@@ -769,6 +768,7 @@ static struct dentry *ovl_lower_fh_to_d(struct super_block *sb,
+ 			goto out_err;
+ 	}
+ 
++	/* Get a connected non-upper dir or disconnected non-dir */
+ 	dentry = ovl_get_dentry(sb, NULL, &origin, index);
+ 
+ out:
+diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
+index cd58939dc977..7a85e609fc27 100644
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -1566,7 +1566,6 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
+ 		cond_resched();
+ 
+ 		BUG_ON(!vma_can_userfault(vma));
+-		WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
+ 
+ 		/*
+ 		 * Nothing to do: this vma is already registered into this
+@@ -1575,6 +1574,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
+ 		if (!vma->vm_userfaultfd_ctx.ctx)
+ 			goto skip;
+ 
++		WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
++
+ 		if (vma->vm_start > start)
+ 			start = vma->vm_start;
+ 		vma_end = min(end, vma->vm_end);
+diff --git a/init/Kconfig b/init/Kconfig
+index 1e234e2f1cba..317d5ccb5191 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -415,6 +415,11 @@ config IRQ_TIME_ACCOUNTING
+ 
+ 	  If in doubt, say N here.
+ 
++config HAVE_SCHED_AVG_IRQ
++	def_bool y
++	depends on IRQ_TIME_ACCOUNTING || PARAVIRT_TIME_ACCOUNTING
++	depends on SMP
++
+ config BSD_PROCESS_ACCT
+ 	bool "BSD Process Accounting"
+ 	depends on MULTIUSER
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 2beda4b726e2..13ddfa46d741 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -135,9 +135,8 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
+  * In theory, the compile should just see 0 here, and optimize out the call
+  * to sched_rt_avg_update. But I don't trust it...
+  */
+-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+-	s64 steal = 0, irq_delta = 0;
+-#endif
++	s64 __maybe_unused steal = 0, irq_delta = 0;
++
+ #ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ 	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
+ 
+@@ -177,7 +176,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
+ 
+ 	rq->clock_task += delta;
+ 
+-#ifdef HAVE_SCHED_AVG_IRQ
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
+ 	if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
+ 		update_irq_load_avg(rq, irq_delta + steal);
+ #endif
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 1162552dc3cc..eabbf6b10b44 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -7361,7 +7361,7 @@ static inline bool others_have_blocked(struct rq *rq)
+ 	if (READ_ONCE(rq->avg_dl.util_avg))
+ 		return true;
+ 
+-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
+ 	if (READ_ONCE(rq->avg_irq.util_avg))
+ 		return true;
+ #endif
+diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
+index 35475c0c5419..48a126486435 100644
+--- a/kernel/sched/pelt.c
++++ b/kernel/sched/pelt.c
+@@ -358,7 +358,7 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+ 	return 0;
+ }
+ 
+-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
+ /*
+  * irq:
+  *
+diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
+index d2894db28955..7e56b489ff32 100644
+--- a/kernel/sched/pelt.h
++++ b/kernel/sched/pelt.h
+@@ -6,7 +6,7 @@ int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq);
+ int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
+ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
+ 
+-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
+ int update_irq_load_avg(struct rq *rq, u64 running);
+ #else
+ static inline int
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 6c25bbe87bd3..b63172288f7b 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -859,8 +859,7 @@ struct rq {
+ 
+ 	struct sched_avg	avg_rt;
+ 	struct sched_avg	avg_dl;
+-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+-#define HAVE_SCHED_AVG_IRQ
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
+ 	struct sched_avg	avg_irq;
+ #endif
+ 	u64			idle_stamp;
+@@ -2215,7 +2214,7 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
+ }
+ #endif
+ 
+-#ifdef HAVE_SCHED_AVG_IRQ
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
+ static inline unsigned long cpu_util_irq(struct rq *rq)
+ {
+ 	return rq->avg_irq.util_avg;
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 77734451cb05..e23eb9fc77aa 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -5460,6 +5460,7 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops)
+ 	if (ops->flags & FTRACE_OPS_FL_ENABLED)
+ 		ftrace_shutdown(ops, 0);
+ 	ops->flags |= FTRACE_OPS_FL_DELETED;
++	ftrace_free_filter(ops);
+ 	mutex_unlock(&ftrace_lock);
+ }
+ 
+diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
+index 84a65173b1e9..5574e862de8d 100644
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -570,11 +570,13 @@ predicate_parse(const char *str, int nr_parens, int nr_preds,
+ 		}
+ 	}
+ 
++	kfree(op_stack);
++	kfree(inverts);
+ 	return prog;
+ out_free:
+ 	kfree(op_stack);
+-	kfree(prog_stack);
+ 	kfree(inverts);
++	kfree(prog_stack);
+ 	return ERR_PTR(ret);
+ }
+ 
+@@ -1718,6 +1720,7 @@ static int create_filter(struct trace_event_call *call,
+ 	err = process_preds(call, filter_string, *filterp, pe);
+ 	if (err && set_str)
+ 		append_filter_err(pe, *filterp);
++	create_filter_finish(pe);
+ 
+ 	return err;
+ }
+diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
+index 2152d1e530cb..cd12ecb66eb9 100644
+--- a/kernel/trace/trace_events_trigger.c
++++ b/kernel/trace/trace_events_trigger.c
+@@ -732,8 +732,10 @@ int set_trigger_filter(char *filter_str,
+ 
+ 	/* The filter is for the 'trigger' event, not the triggered event */
+ 	ret = create_event_filter(file->event_call, filter_str, false, &filter);
+-	if (ret)
+-		goto out;
++	/*
++	 * If create_event_filter() fails, filter still needs to be freed.
++	 * Which the calling code will do with data->filter.
++	 */
+  assign:
+ 	tmp = rcu_access_pointer(data->filter);
+ 
+diff --git a/scripts/spdxcheck.py b/scripts/spdxcheck.py
+index 5056fb3b897d..e559c6294c39 100755
+--- a/scripts/spdxcheck.py
++++ b/scripts/spdxcheck.py
+@@ -168,6 +168,7 @@ class id_parser(object):
+         self.curline = 0
+         try:
+             for line in fd:
++                line = line.decode(locale.getpreferredencoding(False), errors='ignore')
+                 self.curline += 1
+                 if self.curline > maxlines:
+                     break
+@@ -249,12 +250,13 @@ if __name__ == '__main__':
+ 
+     try:
+         if len(args.path) and args.path[0] == '-':
+-            parser.parse_lines(sys.stdin, args.maxlines, '-')
++            stdin = os.fdopen(sys.stdin.fileno(), 'rb')
++            parser.parse_lines(stdin, args.maxlines, '-')
+         else:
+             if args.path:
+                 for p in args.path:
+                     if os.path.isfile(p):
+-                        parser.parse_lines(open(p), args.maxlines, p)
++                        parser.parse_lines(open(p, 'rb'), args.maxlines, p)
+                     elif os.path.isdir(p):
+                         scan_git_subtree(repo.head.reference.commit.tree, p)
+                     else:


             reply	other threads:[~2018-12-19 19:10 UTC|newest]

Thread overview: 332+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-12-19 19:09 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2024-04-18  3:06 [gentoo-commits] proj/linux-patches:4.19 commit in: / Alice Ferrazzi
2023-09-02  9:59 Mike Pagano
2023-08-30 15:00 Mike Pagano
2023-08-16 16:59 Mike Pagano
2023-08-11 11:58 Mike Pagano
2023-08-08 18:43 Mike Pagano
2023-07-24 20:30 Mike Pagano
2023-06-28 10:29 Mike Pagano
2023-06-21 14:55 Alice Ferrazzi
2023-06-14 10:21 Mike Pagano
2023-06-09 11:32 Mike Pagano
2023-05-30 12:57 Mike Pagano
2023-05-17 11:14 Mike Pagano
2023-05-17 11:01 Mike Pagano
2023-05-10 17:59 Mike Pagano
2023-04-26  9:35 Alice Ferrazzi
2023-04-20 11:17 Alice Ferrazzi
2023-04-05 11:41 Mike Pagano
2023-03-22 14:16 Alice Ferrazzi
2023-03-17 10:46 Mike Pagano
2023-03-13 11:35 Alice Ferrazzi
2023-03-11 16:01 Mike Pagano
2023-03-03 12:31 Mike Pagano
2023-02-25 11:41 Mike Pagano
2023-02-24  3:19 Alice Ferrazzi
2023-02-24  3:15 Alice Ferrazzi
2023-02-22 14:51 Alice Ferrazzi
2023-02-06 12:49 Mike Pagano
2023-01-24  7:16 Alice Ferrazzi
2023-01-18 11:11 Mike Pagano
2022-12-14 12:15 Mike Pagano
2022-12-08 12:14 Alice Ferrazzi
2022-11-25 17:04 Mike Pagano
2022-11-23  9:39 Alice Ferrazzi
2022-11-10 17:58 Mike Pagano
2022-11-03 15:11 Mike Pagano
2022-11-01 19:48 Mike Pagano
2022-10-26 11:41 Mike Pagano
2022-10-05 11:59 Mike Pagano
2022-09-28  9:18 Mike Pagano
2022-09-20 12:03 Mike Pagano
2022-09-15 11:09 Mike Pagano
2022-09-05 12:06 Mike Pagano
2022-08-25 10:35 Mike Pagano
2022-08-11 12:36 Mike Pagano
2022-07-29 15:28 Mike Pagano
2022-07-21 20:12 Mike Pagano
2022-07-12 16:01 Mike Pagano
2022-07-07 16:18 Mike Pagano
2022-07-02 16:07 Mike Pagano
2022-06-25 10:22 Mike Pagano
2022-06-16 11:40 Mike Pagano
2022-06-14 16:02 Mike Pagano
2022-06-06 11:05 Mike Pagano
2022-05-27 12:24 Mike Pagano
2022-05-25 11:55 Mike Pagano
2022-05-18  9:50 Mike Pagano
2022-05-15 22:12 Mike Pagano
2022-05-12 11:30 Mike Pagano
2022-05-01 17:04 Mike Pagano
2022-04-27 12:03 Mike Pagano
2022-04-20 12:09 Mike Pagano
2022-04-15 13:11 Mike Pagano
2022-04-12 19:24 Mike Pagano
2022-03-28 10:59 Mike Pagano
2022-03-23 11:57 Mike Pagano
2022-03-16 13:27 Mike Pagano
2022-03-11 10:56 Mike Pagano
2022-03-08 18:30 Mike Pagano
2022-03-02 13:08 Mike Pagano
2022-02-26 21:14 Mike Pagano
2022-02-23 12:39 Mike Pagano
2022-02-16 12:47 Mike Pagano
2022-02-11 12:53 Mike Pagano
2022-02-11 12:46 Mike Pagano
2022-02-11 12:45 Mike Pagano
2022-02-11 12:37 Mike Pagano
2022-02-08 17:56 Mike Pagano
2022-01-29 17:45 Mike Pagano
2022-01-27 11:39 Mike Pagano
2022-01-11 13:14 Mike Pagano
2022-01-05 12:55 Mike Pagano
2021-12-29 13:11 Mike Pagano
2021-12-22 14:07 Mike Pagano
2021-12-14 10:36 Mike Pagano
2021-12-08 12:55 Mike Pagano
2021-12-01 12:51 Mike Pagano
2021-11-26 11:59 Mike Pagano
2021-11-12 14:16 Mike Pagano
2021-11-06 13:26 Mike Pagano
2021-11-02 19:32 Mike Pagano
2021-10-27 11:59 Mike Pagano
2021-10-20 13:26 Mike Pagano
2021-10-17 13:12 Mike Pagano
2021-10-13 15:00 Alice Ferrazzi
2021-10-09 21:33 Mike Pagano
2021-10-06 14:06 Mike Pagano
2021-09-26 14:13 Mike Pagano
2021-09-22 11:40 Mike Pagano
2021-09-20 22:05 Mike Pagano
2021-09-03 11:22 Mike Pagano
2021-09-03 10:08 Alice Ferrazzi
2021-08-26 14:06 Mike Pagano
2021-08-25 22:45 Mike Pagano
2021-08-25 20:41 Mike Pagano
2021-08-15 20:07 Mike Pagano
2021-08-12 11:51 Mike Pagano
2021-08-08 13:39 Mike Pagano
2021-08-04 11:54 Mike Pagano
2021-08-03 12:26 Mike Pagano
2021-07-31 10:34 Alice Ferrazzi
2021-07-28 12:37 Mike Pagano
2021-07-20 15:35 Alice Ferrazzi
2021-07-13 12:38 Mike Pagano
2021-07-11 14:45 Mike Pagano
2021-06-30 14:25 Mike Pagano
2021-06-16 12:22 Mike Pagano
2021-06-10 11:46 Mike Pagano
2021-06-03 10:32 Alice Ferrazzi
2021-05-26 12:05 Mike Pagano
2021-05-22 10:03 Mike Pagano
2021-05-07 11:40 Alice Ferrazzi
2021-04-30 19:02 Mike Pagano
2021-04-28 18:31 Mike Pagano
2021-04-28 11:44 Alice Ferrazzi
2021-04-16 11:15 Alice Ferrazzi
2021-04-14 11:22 Alice Ferrazzi
2021-04-10 13:24 Mike Pagano
2021-04-07 12:21 Mike Pagano
2021-03-30 14:17 Mike Pagano
2021-03-24 12:08 Mike Pagano
2021-03-22 15:50 Mike Pagano
2021-03-20 14:26 Mike Pagano
2021-03-17 16:21 Mike Pagano
2021-03-11 14:05 Mike Pagano
2021-03-07 15:15 Mike Pagano
2021-03-04 12:08 Mike Pagano
2021-02-23 14:31 Alice Ferrazzi
2021-02-13 15:28 Alice Ferrazzi
2021-02-10 10:03 Alice Ferrazzi
2021-02-07 14:40 Alice Ferrazzi
2021-02-03 23:43 Mike Pagano
2021-01-30 13:34 Alice Ferrazzi
2021-01-27 11:15 Mike Pagano
2021-01-23 16:36 Mike Pagano
2021-01-19 20:34 Mike Pagano
2021-01-17 16:20 Mike Pagano
2021-01-12 20:06 Mike Pagano
2021-01-09 12:57 Mike Pagano
2021-01-06 14:15 Mike Pagano
2020-12-30 12:52 Mike Pagano
2020-12-11 12:56 Mike Pagano
2020-12-08 12:06 Mike Pagano
2020-12-02 12:49 Mike Pagano
2020-11-24 14:40 Mike Pagano
2020-11-22 19:26 Mike Pagano
2020-11-18 19:56 Mike Pagano
2020-11-11 15:43 Mike Pagano
2020-11-10 13:56 Mike Pagano
2020-11-05 12:35 Mike Pagano
2020-11-01 20:29 Mike Pagano
2020-10-29 11:18 Mike Pagano
2020-10-17 10:17 Mike Pagano
2020-10-14 20:36 Mike Pagano
2020-10-07 12:50 Mike Pagano
2020-10-01 12:45 Mike Pagano
2020-09-26 22:07 Mike Pagano
2020-09-26 22:00 Mike Pagano
2020-09-24 15:58 Mike Pagano
2020-09-23 12:07 Mike Pagano
2020-09-17 15:01 Mike Pagano
2020-09-17 14:55 Mike Pagano
2020-09-12 17:59 Mike Pagano
2020-09-09 17:59 Mike Pagano
2020-09-03 11:37 Mike Pagano
2020-08-26 11:15 Mike Pagano
2020-08-21 10:49 Alice Ferrazzi
2020-08-19  9:36 Alice Ferrazzi
2020-08-12 23:36 Alice Ferrazzi
2020-08-07 19:16 Mike Pagano
2020-08-05 14:51 Thomas Deutschmann
2020-07-31 18:00 Mike Pagano
2020-07-29 12:33 Mike Pagano
2020-07-22 12:42 Mike Pagano
2020-07-16 11:17 Mike Pagano
2020-07-09 12:12 Mike Pagano
2020-07-01 12:14 Mike Pagano
2020-06-29 17:41 Mike Pagano
2020-06-25 15:07 Mike Pagano
2020-06-22 14:47 Mike Pagano
2020-06-10 21:27 Mike Pagano
2020-06-07 21:52 Mike Pagano
2020-06-03 11:41 Mike Pagano
2020-05-27 16:25 Mike Pagano
2020-05-20 11:30 Mike Pagano
2020-05-20 11:27 Mike Pagano
2020-05-14 11:30 Mike Pagano
2020-05-13 12:33 Mike Pagano
2020-05-11 22:50 Mike Pagano
2020-05-09 22:20 Mike Pagano
2020-05-06 11:46 Mike Pagano
2020-05-02 19:24 Mike Pagano
2020-04-29 17:57 Mike Pagano
2020-04-23 11:44 Mike Pagano
2020-04-21 11:15 Mike Pagano
2020-04-17 11:45 Mike Pagano
2020-04-15 17:09 Mike Pagano
2020-04-13 11:34 Mike Pagano
2020-04-02 15:24 Mike Pagano
2020-03-25 14:58 Mike Pagano
2020-03-20 11:57 Mike Pagano
2020-03-18 14:21 Mike Pagano
2020-03-16 12:23 Mike Pagano
2020-03-11 17:20 Mike Pagano
2020-03-05 16:23 Mike Pagano
2020-02-28 16:38 Mike Pagano
2020-02-24 11:06 Mike Pagano
2020-02-19 23:45 Mike Pagano
2020-02-14 23:52 Mike Pagano
2020-02-11 16:20 Mike Pagano
2020-02-05 17:05 Mike Pagano
2020-02-01 10:37 Mike Pagano
2020-02-01 10:30 Mike Pagano
2020-01-29 16:16 Mike Pagano
2020-01-27 14:25 Mike Pagano
2020-01-23 11:07 Mike Pagano
2020-01-17 19:56 Mike Pagano
2020-01-14 22:30 Mike Pagano
2020-01-12 15:00 Mike Pagano
2020-01-09 11:15 Mike Pagano
2020-01-04 19:50 Mike Pagano
2019-12-31 17:46 Mike Pagano
2019-12-21 15:03 Mike Pagano
2019-12-17 21:56 Mike Pagano
2019-12-13 12:35 Mike Pagano
2019-12-05 12:03 Alice Ferrazzi
2019-12-01 14:06 Thomas Deutschmann
2019-11-24 15:44 Mike Pagano
2019-11-20 19:36 Mike Pagano
2019-11-12 21:00 Mike Pagano
2019-11-10 16:20 Mike Pagano
2019-11-06 14:26 Mike Pagano
2019-10-29 12:04 Mike Pagano
2019-10-17 22:27 Mike Pagano
2019-10-11 17:04 Mike Pagano
2019-10-07 17:42 Mike Pagano
2019-10-05 11:42 Mike Pagano
2019-10-01 10:10 Mike Pagano
2019-09-21 17:11 Mike Pagano
2019-09-19 12:34 Mike Pagano
2019-09-19 10:04 Mike Pagano
2019-09-16 12:26 Mike Pagano
2019-09-10 11:12 Mike Pagano
2019-09-06 17:25 Mike Pagano
2019-08-29 14:15 Mike Pagano
2019-08-25 17:37 Mike Pagano
2019-08-23 22:18 Mike Pagano
2019-08-16 12:26 Mike Pagano
2019-08-16 12:13 Mike Pagano
2019-08-09 17:45 Mike Pagano
2019-08-06 19:19 Mike Pagano
2019-08-04 16:15 Mike Pagano
2019-07-31 15:09 Mike Pagano
2019-07-31 10:22 Mike Pagano
2019-07-28 16:27 Mike Pagano
2019-07-26 11:35 Mike Pagano
2019-07-21 14:41 Mike Pagano
2019-07-14 15:44 Mike Pagano
2019-07-10 11:05 Mike Pagano
2019-07-03 11:34 Mike Pagano
2019-06-25 10:53 Mike Pagano
2019-06-22 19:06 Mike Pagano
2019-06-19 17:17 Thomas Deutschmann
2019-06-17 19:22 Mike Pagano
2019-06-15 15:07 Mike Pagano
2019-06-11 12:42 Mike Pagano
2019-06-10 19:43 Mike Pagano
2019-06-09 16:19 Mike Pagano
2019-06-04 11:11 Mike Pagano
2019-05-31 15:02 Mike Pagano
2019-05-26 17:10 Mike Pagano
2019-05-22 11:02 Mike Pagano
2019-05-16 23:03 Mike Pagano
2019-05-14 21:00 Mike Pagano
2019-05-10 19:40 Mike Pagano
2019-05-08 10:06 Mike Pagano
2019-05-05 13:42 Mike Pagano
2019-05-04 18:28 Mike Pagano
2019-05-02 10:13 Mike Pagano
2019-04-27 17:36 Mike Pagano
2019-04-20 11:09 Mike Pagano
2019-04-19 19:51 Mike Pagano
2019-04-05 21:46 Mike Pagano
2019-04-03 10:59 Mike Pagano
2019-03-27 10:22 Mike Pagano
2019-03-23 20:23 Mike Pagano
2019-03-19 16:58 Mike Pagano
2019-03-13 22:08 Mike Pagano
2019-03-10 14:15 Mike Pagano
2019-03-06 19:06 Mike Pagano
2019-03-05 18:04 Mike Pagano
2019-02-27 11:23 Mike Pagano
2019-02-23 11:35 Mike Pagano
2019-02-23  0:46 Mike Pagano
2019-02-20 11:19 Mike Pagano
2019-02-16  0:42 Mike Pagano
2019-02-15 12:39 Mike Pagano
2019-02-12 20:53 Mike Pagano
2019-02-06 17:08 Mike Pagano
2019-01-31 11:28 Mike Pagano
2019-01-26 15:09 Mike Pagano
2019-01-22 23:06 Mike Pagano
2019-01-16 23:32 Mike Pagano
2019-01-13 19:29 Mike Pagano
2019-01-09 17:54 Mike Pagano
2018-12-29 18:55 Mike Pagano
2018-12-29  1:08 Mike Pagano
2018-12-21 14:58 Mike Pagano
2018-12-17 11:42 Mike Pagano
2018-12-13 11:40 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-05 20:16 Mike Pagano
2018-12-01 15:08 Mike Pagano
2018-11-27 16:16 Mike Pagano
2018-11-23 12:42 Mike Pagano
2018-11-21 12:30 Mike Pagano
2018-11-14  0:47 Mike Pagano
2018-11-14  0:47 Mike Pagano
2018-11-13 20:44 Mike Pagano
2018-11-04 16:22 Alice Ferrazzi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1545246549.ddb74622bdf265fef8705b51a9ada6be46b68fac.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox