From: "Alice Ferrazzi" <alicef@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Wed, 10 Feb 2021 10:15:54 +0000 (UTC) [thread overview]
Message-ID: <1612952117.7a70358f7d8e29ea67ced2af4015fde285ae84a7.alicef@gentoo> (raw)
commit: 7a70358f7d8e29ea67ced2af4015fde285ae84a7
Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Wed Feb 10 10:15:07 2021 +0000
Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Wed Feb 10 10:15:17 2021 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7a70358f
Linux patch 4.9.257
Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
0000_README | 4 +
1256_linux-4.9.257.patch | 1823 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 1827 insertions(+)
diff --git a/0000_README b/0000_README
index d822171..e08f1e6 100644
--- a/0000_README
+++ b/0000_README
@@ -1067,6 +1067,10 @@ Patch: 1255_linux-4.9.256.patch
From: http://www.kernel.org
Desc: Linux 4.9.256
+Patch: 1256_linux-4.9.257.patch
+From: http://www.kernel.org
+Desc: Linux 4.9.257
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1256_linux-4.9.257.patch b/1256_linux-4.9.257.patch
new file mode 100644
index 0000000..7d4e271
--- /dev/null
+++ b/1256_linux-4.9.257.patch
@@ -0,0 +1,1823 @@
+diff --git a/Makefile b/Makefile
+index 69af44d3dcd14..e53096154f816 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 256
++SUBLEVEL = 257
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+
+@@ -841,12 +841,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
+ # change __FILE__ to the relative path from the srctree
+ KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
+
+-# ensure -fcf-protection is disabled when using retpoline as it is
+-# incompatible with -mindirect-branch=thunk-extern
+-ifdef CONFIG_RETPOLINE
+-KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
+-endif
+-
+ # use the deterministic mode of AR if available
+ KBUILD_ARFLAGS := $(call ar-option,D)
+
+@@ -1141,7 +1135,7 @@ endef
+
+ define filechk_version.h
+ (echo \#define LINUX_VERSION_CODE $(shell \
+- expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL)); \
++ expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 255); \
+ echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))';)
+ endef
+
+diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
+index 96a3d73ef4bf4..fd6c9169fa78e 100644
+--- a/arch/arm/mach-footbridge/dc21285.c
++++ b/arch/arm/mach-footbridge/dc21285.c
+@@ -69,15 +69,15 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ if (addr)
+ switch (size) {
+ case 1:
+- asm("ldrb %0, [%1, %2]"
++ asm volatile("ldrb %0, [%1, %2]"
+ : "=r" (v) : "r" (addr), "r" (where) : "cc");
+ break;
+ case 2:
+- asm("ldrh %0, [%1, %2]"
++ asm volatile("ldrh %0, [%1, %2]"
+ : "=r" (v) : "r" (addr), "r" (where) : "cc");
+ break;
+ case 4:
+- asm("ldr %0, [%1, %2]"
++ asm volatile("ldr %0, [%1, %2]"
+ : "=r" (v) : "r" (addr), "r" (where) : "cc");
+ break;
+ }
+@@ -103,17 +103,17 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where,
+ if (addr)
+ switch (size) {
+ case 1:
+- asm("strb %0, [%1, %2]"
++ asm volatile("strb %0, [%1, %2]"
+ : : "r" (value), "r" (addr), "r" (where)
+ : "cc");
+ break;
+ case 2:
+- asm("strh %0, [%1, %2]"
++ asm volatile("strh %0, [%1, %2]"
+ : : "r" (value), "r" (addr), "r" (where)
+ : "cc");
+ break;
+ case 4:
+- asm("str %0, [%1, %2]"
++ asm volatile("str %0, [%1, %2]"
+ : : "r" (value), "r" (addr), "r" (where)
+ : "cc");
+ break;
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 940ed27a62123..a95d414663b1e 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -137,6 +137,9 @@ else
+ KBUILD_CFLAGS += -mno-red-zone
+ KBUILD_CFLAGS += -mcmodel=kernel
+
++ # Intel CET isn't enabled in the kernel
++ KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
++
+ # -funit-at-a-time shrinks the kernel .text considerably
+ # unfortunately it makes reading oopses harder.
+ KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index f39fd349cef65..a6d034257b7bb 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -176,16 +176,6 @@ static inline void lapic_update_tsc_freq(void) { }
+ #endif /* !CONFIG_X86_LOCAL_APIC */
+
+ #ifdef CONFIG_X86_X2APIC
+-/*
+- * Make previous memory operations globally visible before
+- * sending the IPI through x2apic wrmsr. We need a serializing instruction or
+- * mfence for this.
+- */
+-static inline void x2apic_wrmsr_fence(void)
+-{
+- asm volatile("mfence" : : : "memory");
+-}
+-
+ static inline void native_apic_msr_write(u32 reg, u32 v)
+ {
+ if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
+diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
+index a0f450b21d676..89b75edf24af8 100644
+--- a/arch/x86/include/asm/barrier.h
++++ b/arch/x86/include/asm/barrier.h
+@@ -110,4 +110,22 @@ do { \
+
+ #include <asm-generic/barrier.h>
+
++/*
++ * Make previous memory operations globally visible before
++ * a WRMSR.
++ *
++ * MFENCE makes writes visible, but only affects load/store
++ * instructions. WRMSR is unfortunately not a load/store
++ * instruction and is unaffected by MFENCE. The LFENCE ensures
++ * that the WRMSR is not reordered.
++ *
++ * Most WRMSRs are full serializing instructions themselves and
++ * do not require this barrier. This is only required for the
++ * IA32_TSC_DEADLINE and X2APIC MSRs.
++ */
++static inline void weak_wrmsr_fence(void)
++{
++ asm volatile("mfence; lfence" : : : "memory");
++}
++
+ #endif /* _ASM_X86_BARRIER_H */
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 722a76b88bcc0..107a9eff587bf 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -42,6 +42,7 @@
+ #include <asm/x86_init.h>
+ #include <asm/pgalloc.h>
+ #include <linux/atomic.h>
++#include <asm/barrier.h>
+ #include <asm/mpspec.h>
+ #include <asm/i8259.h>
+ #include <asm/proto.h>
+@@ -476,6 +477,9 @@ static int lapic_next_deadline(unsigned long delta,
+ {
+ u64 tsc;
+
++ /* This MSR is special and need a special fence: */
++ weak_wrmsr_fence();
++
+ tsc = rdtsc();
+ wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
+ return 0;
+diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
+index 200af5ae96626..ca64c150d1c53 100644
+--- a/arch/x86/kernel/apic/x2apic_cluster.c
++++ b/arch/x86/kernel/apic/x2apic_cluster.c
+@@ -27,7 +27,8 @@ static void x2apic_send_IPI(int cpu, int vector)
+ {
+ u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
+
+- x2apic_wrmsr_fence();
++ /* x2apic MSRs are special and need a special fence: */
++ weak_wrmsr_fence();
+ __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
+ }
+
+@@ -40,7 +41,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
+ unsigned long flags;
+ u32 dest;
+
+- x2apic_wrmsr_fence();
++ /* x2apic MSRs are special and need a special fence: */
++ weak_wrmsr_fence();
+
+ local_irq_save(flags);
+
+diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
+index ff111f05a3145..8889420ea7c6f 100644
+--- a/arch/x86/kernel/apic/x2apic_phys.c
++++ b/arch/x86/kernel/apic/x2apic_phys.c
+@@ -40,7 +40,8 @@ static void x2apic_send_IPI(int cpu, int vector)
+ {
+ u32 dest = per_cpu(x86_cpu_to_apicid, cpu);
+
+- x2apic_wrmsr_fence();
++ /* x2apic MSRs are special and need a special fence: */
++ weak_wrmsr_fence();
+ __x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL);
+ }
+
+@@ -51,7 +52,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
+ unsigned long this_cpu;
+ unsigned long flags;
+
+- x2apic_wrmsr_fence();
++ /* x2apic MSRs are special and need a special fence: */
++ weak_wrmsr_fence();
+
+ local_irq_save(flags);
+
+diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
+index 35e8fbca10ad5..c53c88b531639 100644
+--- a/drivers/acpi/thermal.c
++++ b/drivers/acpi/thermal.c
+@@ -188,6 +188,8 @@ struct acpi_thermal {
+ int tz_enabled;
+ int kelvin_offset;
+ struct work_struct thermal_check_work;
++ struct mutex thermal_check_lock;
++ atomic_t thermal_check_count;
+ };
+
+ /* --------------------------------------------------------------------------
+@@ -513,17 +515,6 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
+ return 0;
+ }
+
+-static void acpi_thermal_check(void *data)
+-{
+- struct acpi_thermal *tz = data;
+-
+- if (!tz->tz_enabled)
+- return;
+-
+- thermal_zone_device_update(tz->thermal_zone,
+- THERMAL_EVENT_UNSPECIFIED);
+-}
+-
+ /* sys I/F for generic thermal sysfs support */
+
+ static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
+@@ -557,6 +548,8 @@ static int thermal_get_mode(struct thermal_zone_device *thermal,
+ return 0;
+ }
+
++static void acpi_thermal_check_fn(struct work_struct *work);
++
+ static int thermal_set_mode(struct thermal_zone_device *thermal,
+ enum thermal_device_mode mode)
+ {
+@@ -582,7 +575,7 @@ static int thermal_set_mode(struct thermal_zone_device *thermal,
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "%s kernel ACPI thermal control\n",
+ tz->tz_enabled ? "Enable" : "Disable"));
+- acpi_thermal_check(tz);
++ acpi_thermal_check_fn(&tz->thermal_check_work);
+ }
+ return 0;
+ }
+@@ -951,6 +944,12 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
+ Driver Interface
+ -------------------------------------------------------------------------- */
+
++static void acpi_queue_thermal_check(struct acpi_thermal *tz)
++{
++ if (!work_pending(&tz->thermal_check_work))
++ queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
++}
++
+ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
+ {
+ struct acpi_thermal *tz = acpi_driver_data(device);
+@@ -961,17 +960,17 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
+
+ switch (event) {
+ case ACPI_THERMAL_NOTIFY_TEMPERATURE:
+- acpi_thermal_check(tz);
++ acpi_queue_thermal_check(tz);
+ break;
+ case ACPI_THERMAL_NOTIFY_THRESHOLDS:
+ acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS);
+- acpi_thermal_check(tz);
++ acpi_queue_thermal_check(tz);
+ acpi_bus_generate_netlink_event(device->pnp.device_class,
+ dev_name(&device->dev), event, 0);
+ break;
+ case ACPI_THERMAL_NOTIFY_DEVICES:
+ acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
+- acpi_thermal_check(tz);
++ acpi_queue_thermal_check(tz);
+ acpi_bus_generate_netlink_event(device->pnp.device_class,
+ dev_name(&device->dev), event, 0);
+ break;
+@@ -1071,7 +1070,27 @@ static void acpi_thermal_check_fn(struct work_struct *work)
+ {
+ struct acpi_thermal *tz = container_of(work, struct acpi_thermal,
+ thermal_check_work);
+- acpi_thermal_check(tz);
++
++ if (!tz->tz_enabled)
++ return;
++ /*
++ * In general, it is not sufficient to check the pending bit, because
++ * subsequent instances of this function may be queued after one of them
++ * has started running (e.g. if _TMP sleeps). Avoid bailing out if just
++ * one of them is running, though, because it may have done the actual
++ * check some time ago, so allow at least one of them to block on the
++ * mutex while another one is running the update.
++ */
++ if (!atomic_add_unless(&tz->thermal_check_count, -1, 1))
++ return;
++
++ mutex_lock(&tz->thermal_check_lock);
++
++ thermal_zone_device_update(tz->thermal_zone, THERMAL_EVENT_UNSPECIFIED);
++
++ atomic_inc(&tz->thermal_check_count);
++
++ mutex_unlock(&tz->thermal_check_lock);
+ }
+
+ static int acpi_thermal_add(struct acpi_device *device)
+@@ -1103,6 +1122,8 @@ static int acpi_thermal_add(struct acpi_device *device)
+ if (result)
+ goto free_memory;
+
++ atomic_set(&tz->thermal_check_count, 3);
++ mutex_init(&tz->thermal_check_lock);
+ INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn);
+
+ pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
+@@ -1168,7 +1189,7 @@ static int acpi_thermal_resume(struct device *dev)
+ tz->state.active |= tz->trips.active[i].flags.enabled;
+ }
+
+- queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
++ acpi_queue_thermal_check(tz);
+
+ return AE_OK;
+ }
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 637f1347cd13d..815b69d35722c 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -232,9 +232,17 @@ static const struct xpad_device {
+ { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
+- { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
++ { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
++ { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
++ { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
++ { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
++ { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
++ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
++ { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
++ { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
++ { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
+@@ -313,6 +321,9 @@ static const struct xpad_device {
+ { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
++ { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
++ { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
++ { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
+ { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
+@@ -446,8 +457,12 @@ static const struct usb_device_id xpad_table[] = {
+ XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
+ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
+ XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
++ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */
++ XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */
+ XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
+ XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
++ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */
++ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
+ { }
+ };
+
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index fa07be0b4500e..2317f8d3fef6f 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -223,6 +223,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
+ },
++ },
++ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 593a4bfcba428..b2061cfa05ab4 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -3323,6 +3323,12 @@ static int __init init_dmars(void)
+
+ if (!ecap_pass_through(iommu->ecap))
+ hw_pass_through = 0;
++
++ if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) {
++ pr_info("Disable batched IOTLB flush due to virtualization");
++ intel_iommu_strict = 1;
++ }
++
+ #ifdef CONFIG_INTEL_IOMMU_SVM
+ if (pasid_enabled(iommu))
+ intel_svm_alloc_pasid_tables(iommu);
+diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
+index 934c4816d78bf..14c56cf66ddc6 100644
+--- a/drivers/mmc/core/sdio_cis.c
++++ b/drivers/mmc/core/sdio_cis.c
+@@ -24,6 +24,8 @@
+ #include "sdio_cis.h"
+ #include "sdio_ops.h"
+
++#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */
++
+ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
+ const unsigned char *buf, unsigned size)
+ {
+@@ -269,6 +271,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
+
+ do {
+ unsigned char tpl_code, tpl_link;
++ unsigned long timeout = jiffies +
++ msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS);
+
+ ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code);
+ if (ret)
+@@ -321,6 +325,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
+ prev = &this->next;
+
+ if (ret == -ENOENT) {
++ if (time_after(jiffies, timeout))
++ break;
+ /* warn about unknown tuples */
+ pr_warn_ratelimited("%s: queuing unknown"
+ " CIS tuple 0x%02x (%u bytes)\n",
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index 84def5819d2ec..a3742a3b413cd 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -515,15 +515,19 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
+ /* Find our integrated MDIO bus node */
+ dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
+ priv->master_mii_bus = of_mdio_find_bus(dn);
+- if (!priv->master_mii_bus)
++ if (!priv->master_mii_bus) {
++ of_node_put(dn);
+ return -EPROBE_DEFER;
++ }
+
+ get_device(&priv->master_mii_bus->dev);
+ priv->master_mii_dn = dn;
+
+ priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
+- if (!priv->slave_mii_bus)
++ if (!priv->slave_mii_bus) {
++ of_node_put(dn);
+ return -ENOMEM;
++ }
+
+ priv->slave_mii_bus->priv = priv;
+ priv->slave_mii_bus->name = "sf2 slave mii";
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index a8ff4f8a6b87d..f23559a2b2bd1 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -3496,6 +3496,12 @@ static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
+ while (!done) {
+ /* Pull all the valid messages off the CRQ */
+ while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
++ /* This barrier makes sure ibmvnic_next_crq()'s
++ * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
++ * before ibmvnic_handle_crq()'s
++ * switch(gen_crq->first) and switch(gen_crq->cmd).
++ */
++ dma_rmb();
+ ibmvnic_handle_crq(crq, adapter);
+ crq->generic.first = 0;
+ }
+diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
+index 04b3ac17531db..7865feb8e5e83 100644
+--- a/drivers/scsi/ibmvscsi/ibmvfc.c
++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
+@@ -2891,8 +2891,10 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+- if (sdev->type == TYPE_DISK)
++ if (sdev->type == TYPE_DISK) {
+ sdev->allow_restart = 1;
++ blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
++ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return 0;
+ }
+diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
+index d0a86ef806522..59fd6101f188b 100644
+--- a/drivers/scsi/libfc/fc_exch.c
++++ b/drivers/scsi/libfc/fc_exch.c
+@@ -1585,8 +1585,13 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+ rc = fc_exch_done_locked(ep);
+ WARN_ON(fc_seq_exch(sp) != ep);
+ spin_unlock_bh(&ep->ex_lock);
+- if (!rc)
++ if (!rc) {
+ fc_exch_delete(ep);
++ } else {
++ FC_EXCH_DBG(ep, "ep is completed already,"
++ "hence skip calling the resp\n");
++ goto skip_resp;
++ }
+ }
+
+ /*
+@@ -1605,6 +1610,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+ if (!fc_invoke_resp(ep, sp, fp))
+ fc_frame_free(fp);
+
++skip_resp:
+ fc_exch_release(ep);
+ return;
+ rel:
+@@ -1848,10 +1854,16 @@ static void fc_exch_reset(struct fc_exch *ep)
+
+ fc_exch_hold(ep);
+
+- if (!rc)
++ if (!rc) {
+ fc_exch_delete(ep);
++ } else {
++ FC_EXCH_DBG(ep, "ep is completed already,"
++ "hence skip calling the resp\n");
++ goto skip_resp;
++ }
+
+ fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
++skip_resp:
+ fc_seq_set_resp(sp, NULL, ep->arg);
+ fc_exch_release(ep);
+ }
+diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
+index 76701d6ce92c3..582099f4f449f 100644
+--- a/drivers/usb/class/usblp.c
++++ b/drivers/usb/class/usblp.c
+@@ -1349,14 +1349,17 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol)
+ if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL)
+ return -EINVAL;
+
+- alts = usblp->protocol[protocol].alt_setting;
+- if (alts < 0)
+- return -EINVAL;
+- r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
+- if (r < 0) {
+- printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
+- alts, usblp->ifnum);
+- return r;
++ /* Don't unnecessarily set the interface if there's a single alt. */
++ if (usblp->intf->num_altsetting > 1) {
++ alts = usblp->protocol[protocol].alt_setting;
++ if (alts < 0)
++ return -EINVAL;
++ r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
++ if (r < 0) {
++ printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
++ alts, usblp->ifnum);
++ return r;
++ }
+ }
+
+ usblp->bidir = (usblp->protocol[protocol].epread != NULL);
+diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
+index 9381a108a9851..6bde5db1490a1 100644
+--- a/drivers/usb/dwc2/gadget.c
++++ b/drivers/usb/dwc2/gadget.c
+@@ -942,7 +942,6 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep,
+ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
+ u32 windex)
+ {
+- struct dwc2_hsotg_ep *ep;
+ int dir = (windex & USB_DIR_IN) ? 1 : 0;
+ int idx = windex & 0x7F;
+
+@@ -952,12 +951,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
+ if (idx > hsotg->num_of_eps)
+ return NULL;
+
+- ep = index_to_ep(hsotg, idx, dir);
+-
+- if (idx && ep->dir_in != dir)
+- return NULL;
+-
+- return ep;
++ return index_to_ep(hsotg, idx, dir);
+ }
+
+ /**
+diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c
+index 25a2c2e485920..3396e7193dba2 100644
+--- a/drivers/usb/gadget/legacy/ether.c
++++ b/drivers/usb/gadget/legacy/ether.c
+@@ -407,8 +407,10 @@ static int eth_bind(struct usb_composite_dev *cdev)
+ struct usb_descriptor_header *usb_desc;
+
+ usb_desc = usb_otg_descriptor_alloc(gadget);
+- if (!usb_desc)
++ if (!usb_desc) {
++ status = -ENOMEM;
+ goto fail1;
++ }
+ usb_otg_descriptor_init(gadget, usb_desc);
+ otg_desc[0] = usb_desc;
+ otg_desc[1] = NULL;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 6e1d65fda97e0..69dbfffde4df0 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -692,11 +692,16 @@ void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
+ DMA_FROM_DEVICE);
+ /* for in tranfers we need to copy the data from bounce to sg */
+- len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
+- seg->bounce_len, seg->bounce_offs);
+- if (len != seg->bounce_len)
+- xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
+- len, seg->bounce_len);
++ if (urb->num_sgs) {
++ len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
++ seg->bounce_len, seg->bounce_offs);
++ if (len != seg->bounce_len)
++ xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
++ len, seg->bounce_len);
++ } else {
++ memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf,
++ seg->bounce_len);
++ }
+ seg->bounce_len = 0;
+ seg->bounce_offs = 0;
+ }
+@@ -3196,12 +3201,16 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
+
+ /* create a max max_pkt sized bounce buffer pointed to by last trb */
+ if (usb_urb_dir_out(urb)) {
+- len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
+- seg->bounce_buf, new_buff_len, enqd_len);
+- if (len != new_buff_len)
+- xhci_warn(xhci,
+- "WARN Wrong bounce buffer write length: %zu != %d\n",
+- len, new_buff_len);
++ if (urb->num_sgs) {
++ len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
++ seg->bounce_buf, new_buff_len, enqd_len);
++ if (len != new_buff_len)
++ xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n",
++ len, new_buff_len);
++ } else {
++ memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len);
++ }
++
+ seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
+ max_pkt, DMA_TO_DEVICE);
+ } else {
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index abf8a3cac2651..1847074b4d819 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -58,6 +58,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
+ { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
+ { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
++ { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */
+ { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
+ { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
+ { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
+@@ -198,6 +199,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
+ { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
+ { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
++ { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */
+ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
+ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 1998b314368e0..3c536eed07541 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -425,6 +425,8 @@ static void option_instat_callback(struct urb *urb);
+ #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
+ #define CINTERION_PRODUCT_AHXX_AUDIO 0x0085
+ #define CINTERION_PRODUCT_CLS8 0x00b0
++#define CINTERION_PRODUCT_MV31_MBIM 0x00b3
++#define CINTERION_PRODUCT_MV31_RMNET 0x00b7
+
+ /* Olivetti products */
+ #define OLIVETTI_VENDOR_ID 0x0b3c
+@@ -1896,6 +1898,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff),
++ .driver_info = RSVD(3)},
++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
++ .driver_info = RSVD(0)},
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
+ .driver_info = RSVD(4) },
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index 0262c8f7e7c76..09f49dab77393 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -830,6 +830,7 @@ static int
+ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
+ {
+ struct inode *inode;
++ int rc;
+
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+@@ -839,8 +840,25 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
+ if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
+ CIFS_I(inode)->time = 0; /* force reval */
+
+- if (cifs_revalidate_dentry(direntry))
+- return 0;
++ rc = cifs_revalidate_dentry(direntry);
++ if (rc) {
++ cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc);
++ switch (rc) {
++ case -ENOENT:
++ case -ESTALE:
++ /*
++ * Those errors mean the dentry is invalid
++ * (file was deleted or recreated)
++ */
++ return 0;
++ default:
++ /*
++ * Otherwise some unexpected error happened
++ * report it as-is to VFS layer
++ */
++ return rc;
++ }
++ }
+ else {
+ /*
+ * If the inode wasn't known to be a dfs entry when
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index 253b03451b727..ba1909b887efb 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -665,8 +665,9 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
+
+ mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+
++ set_page_huge_active(page);
+ /*
+- * page_put due to reference from alloc_huge_page()
++ * put_page() due to reference from alloc_huge_page()
+ * unlock_page because locked by add_to_page_cache()
+ */
+ put_page(page);
+diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
+index 698d51a0eea3f..4adf7faeaeb59 100644
+--- a/include/linux/elfcore.h
++++ b/include/linux/elfcore.h
+@@ -55,6 +55,7 @@ static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregse
+ }
+ #endif
+
++#if defined(CONFIG_UM) || defined(CONFIG_IA64)
+ /*
+ * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
+ * extra segments containing the gate DSO contents. Dumping its
+@@ -69,5 +70,26 @@ elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset);
+ extern int
+ elf_core_write_extra_data(struct coredump_params *cprm);
+ extern size_t elf_core_extra_data_size(void);
++#else
++static inline Elf_Half elf_core_extra_phdrs(void)
++{
++ return 0;
++}
++
++static inline int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
++{
++ return 1;
++}
++
++static inline int elf_core_write_extra_data(struct coredump_params *cprm)
++{
++ return 1;
++}
++
++static inline size_t elf_core_extra_data_size(void)
++{
++ return 0;
++}
++#endif
+
+ #endif /* _LINUX_ELFCORE_H */
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 6b8a7b654771a..4e4c35a6bfc5a 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -502,6 +502,9 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
+ {
+ atomic_long_sub(l, &mm->hugetlb_usage);
+ }
++
++void set_page_huge_active(struct page *page);
++
+ #else /* CONFIG_HUGETLB_PAGE */
+ struct hstate {};
+ #define alloc_huge_page(v, a, r) NULL
+diff --git a/kernel/Makefile b/kernel/Makefile
+index 92488cf6ad913..6c4a28cf680e4 100644
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -90,7 +90,6 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
+ obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
+ obj-$(CONFIG_TRACEPOINTS) += tracepoint.o
+ obj-$(CONFIG_LATENCYTOP) += latencytop.o
+-obj-$(CONFIG_ELFCORE) += elfcore.o
+ obj-$(CONFIG_FUNCTION_TRACER) += trace/
+ obj-$(CONFIG_TRACING) += trace/
+ obj-$(CONFIG_TRACE_CLOCK) += trace/
+diff --git a/kernel/elfcore.c b/kernel/elfcore.c
+deleted file mode 100644
+index a2b29b9bdfcb2..0000000000000
+--- a/kernel/elfcore.c
++++ /dev/null
+@@ -1,25 +0,0 @@
+-#include <linux/elf.h>
+-#include <linux/fs.h>
+-#include <linux/mm.h>
+-#include <linux/binfmts.h>
+-#include <linux/elfcore.h>
+-
+-Elf_Half __weak elf_core_extra_phdrs(void)
+-{
+- return 0;
+-}
+-
+-int __weak elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
+-{
+- return 1;
+-}
+-
+-int __weak elf_core_write_extra_data(struct coredump_params *cprm)
+-{
+- return 1;
+-}
+-
+-size_t __weak elf_core_extra_data_size(void)
+-{
+- return 0;
+-}
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 2ef8c5aef35d0..83db5787c67ef 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -837,6 +837,29 @@ static struct futex_pi_state * alloc_pi_state(void)
+ return pi_state;
+ }
+
++static void pi_state_update_owner(struct futex_pi_state *pi_state,
++ struct task_struct *new_owner)
++{
++ struct task_struct *old_owner = pi_state->owner;
++
++ lockdep_assert_held(&pi_state->pi_mutex.wait_lock);
++
++ if (old_owner) {
++ raw_spin_lock(&old_owner->pi_lock);
++ WARN_ON(list_empty(&pi_state->list));
++ list_del_init(&pi_state->list);
++ raw_spin_unlock(&old_owner->pi_lock);
++ }
++
++ if (new_owner) {
++ raw_spin_lock(&new_owner->pi_lock);
++ WARN_ON(!list_empty(&pi_state->list));
++ list_add(&pi_state->list, &new_owner->pi_state_list);
++ pi_state->owner = new_owner;
++ raw_spin_unlock(&new_owner->pi_lock);
++ }
++}
++
+ /*
+ * Drops a reference to the pi_state object and frees or caches it
+ * when the last reference is gone.
+@@ -856,11 +879,8 @@ static void put_pi_state(struct futex_pi_state *pi_state)
+ * and has cleaned up the pi_state already
+ */
+ if (pi_state->owner) {
+- raw_spin_lock_irq(&pi_state->owner->pi_lock);
+- list_del_init(&pi_state->list);
+- raw_spin_unlock_irq(&pi_state->owner->pi_lock);
+-
+- rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
++ pi_state_update_owner(pi_state, NULL);
++ rt_mutex_proxy_unlock(&pi_state->pi_mutex);
+ }
+
+ if (current->pi_state_cache)
+@@ -941,7 +961,7 @@ static void exit_pi_state_list(struct task_struct *curr)
+ pi_state->owner = NULL;
+ raw_spin_unlock_irq(&curr->pi_lock);
+
+- rt_mutex_unlock(&pi_state->pi_mutex);
++ rt_mutex_futex_unlock(&pi_state->pi_mutex);
+
+ spin_unlock(&hb->lock);
+
+@@ -997,7 +1017,8 @@ static void exit_pi_state_list(struct task_struct *curr)
+ * FUTEX_OWNER_DIED bit. See [4]
+ *
+ * [10] There is no transient state which leaves owner and user space
+- * TID out of sync.
++ * TID out of sync. Except one error case where the kernel is denied
++ * write access to the user address, see fixup_pi_state_owner().
+ */
+
+ /*
+@@ -1394,12 +1415,19 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
+ new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
+
+ /*
+- * It is possible that the next waiter (the one that brought
+- * this owner to the kernel) timed out and is no longer
+- * waiting on the lock.
++ * When we interleave with futex_lock_pi() where it does
++ * rt_mutex_timed_futex_lock(), we might observe @this futex_q waiter,
++ * but the rt_mutex's wait_list can be empty (either still, or again,
++ * depending on which side we land).
++ *
++ * When this happens, give up our locks and try again, giving the
++ * futex_lock_pi() instance time to complete, either by waiting on the
++ * rtmutex or removing itself from the futex queue.
+ */
+- if (!new_owner)
+- new_owner = this->task;
++ if (!new_owner) {
++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
++ return -EAGAIN;
++ }
+
+ /*
+ * We pass it to the next owner. The WAITERS bit is always
+@@ -1425,36 +1453,24 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
+ else
+ ret = -EINVAL;
+ }
+- if (ret) {
+- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+- return ret;
+- }
+-
+- raw_spin_lock(&pi_state->owner->pi_lock);
+- WARN_ON(list_empty(&pi_state->list));
+- list_del_init(&pi_state->list);
+- raw_spin_unlock(&pi_state->owner->pi_lock);
+
+- raw_spin_lock(&new_owner->pi_lock);
+- WARN_ON(!list_empty(&pi_state->list));
+- list_add(&pi_state->list, &new_owner->pi_state_list);
+- pi_state->owner = new_owner;
+- raw_spin_unlock(&new_owner->pi_lock);
++ if (!ret) {
++ /*
++ * This is a point of no return; once we modified the uval
++ * there is no going back and subsequent operations must
++ * not fail.
++ */
++ pi_state_update_owner(pi_state, new_owner);
++ deboost = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
++ }
+
+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+-
+- deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
+-
+- /*
+- * First unlock HB so the waiter does not spin on it once he got woken
+- * up. Second wake up the waiter before the priority is adjusted. If we
+- * deboost first (and lose our higher priority), then the task might get
+- * scheduled away before the wake up can take place.
+- */
+ spin_unlock(&hb->lock);
+- wake_up_q(&wake_q);
+- if (deboost)
++
++ if (deboost) {
++ wake_up_q(&wake_q);
+ rt_mutex_adjust_prio(current);
++ }
+
+ return 0;
+ }
+@@ -2257,30 +2273,32 @@ static void unqueue_me_pi(struct futex_q *q)
+ spin_unlock(q->lock_ptr);
+ }
+
+-/*
+- * Fixup the pi_state owner with the new owner.
+- *
+- * Must be called with hash bucket lock held and mm->sem held for non
+- * private futexes.
+- */
+-static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+- struct task_struct *newowner)
++static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
++ struct task_struct *argowner)
+ {
+- u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
+ struct futex_pi_state *pi_state = q->pi_state;
+- struct task_struct *oldowner = pi_state->owner;
+- u32 uval, uninitialized_var(curval), newval;
+- int ret;
++ struct task_struct *oldowner, *newowner;
++ u32 uval, curval, newval, newtid;
++ int err = 0;
++
++ oldowner = pi_state->owner;
+
+ /* Owner died? */
+ if (!pi_state->owner)
+ newtid |= FUTEX_OWNER_DIED;
+
+ /*
+- * We are here either because we stole the rtmutex from the
+- * previous highest priority waiter or we are the highest priority
+- * waiter but failed to get the rtmutex the first time.
+- * We have to replace the newowner TID in the user space variable.
++ * We are here because either:
++ *
++ * - we stole the lock and pi_state->owner needs updating to reflect
++ * that (@argowner == current),
++ *
++ * or:
++ *
++ * - someone stole our lock and we need to fix things to point to the
++ * new owner (@argowner == NULL).
++ *
++ * Either way, we have to replace the TID in the user space variable.
+ * This must be atomic as we have to preserve the owner died bit here.
+ *
+ * Note: We write the user space value _before_ changing the pi_state
+@@ -2294,6 +2312,39 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+ * in lookup_pi_state.
+ */
+ retry:
++ if (!argowner) {
++ if (oldowner != current) {
++ /*
++ * We raced against a concurrent self; things are
++ * already fixed up. Nothing to do.
++ */
++ return 0;
++ }
++
++ if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
++ /* We got the lock after all, nothing to fix. */
++ return 1;
++ }
++
++ /*
++ * Since we just failed the trylock; there must be an owner.
++ */
++ newowner = rt_mutex_owner(&pi_state->pi_mutex);
++ BUG_ON(!newowner);
++ } else {
++ WARN_ON_ONCE(argowner != current);
++ if (oldowner == current) {
++ /*
++ * We raced against a concurrent self; things are
++ * already fixed up. Nothing to do.
++ */
++ return 1;
++ }
++ newowner = argowner;
++ }
++
++ newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
++
+ if (get_futex_value_locked(&uval, uaddr))
+ goto handle_fault;
+
+@@ -2311,19 +2362,8 @@ retry:
+ * We fixed up user space. Now we need to fix the pi_state
+ * itself.
+ */
+- if (pi_state->owner != NULL) {
+- raw_spin_lock_irq(&pi_state->owner->pi_lock);
+- WARN_ON(list_empty(&pi_state->list));
+- list_del_init(&pi_state->list);
+- raw_spin_unlock_irq(&pi_state->owner->pi_lock);
+- }
+-
+- pi_state->owner = newowner;
++ pi_state_update_owner(pi_state, newowner);
+
+- raw_spin_lock_irq(&newowner->pi_lock);
+- WARN_ON(!list_empty(&pi_state->list));
+- list_add(&pi_state->list, &newowner->pi_state_list);
+- raw_spin_unlock_irq(&newowner->pi_lock);
+ return 0;
+
+ /*
+@@ -2339,7 +2379,7 @@ retry:
+ handle_fault:
+ spin_unlock(q->lock_ptr);
+
+- ret = fault_in_user_writeable(uaddr);
++ err = fault_in_user_writeable(uaddr);
+
+ spin_lock(q->lock_ptr);
+
+@@ -2347,12 +2387,45 @@ handle_fault:
+ * Check if someone else fixed it for us:
+ */
+ if (pi_state->owner != oldowner)
+- return 0;
++ return argowner == current;
+
+- if (ret)
+- return ret;
++ /* Retry if err was -EAGAIN or the fault in succeeded */
++ if (!err)
++ goto retry;
+
+- goto retry;
++ /*
++ * fault_in_user_writeable() failed so user state is immutable. At
++ * best we can make the kernel state consistent but user state will
++ * be most likely hosed and any subsequent unlock operation will be
++ * rejected due to PI futex rule [10].
++ *
++ * Ensure that the rtmutex owner is also the pi_state owner despite
++ * the user space value claiming something different. There is no
++ * point in unlocking the rtmutex if current is the owner as it
++ * would need to wait until the next waiter has taken the rtmutex
++ * to guarantee consistent state. Keep it simple. Userspace asked
++ * for this wreckaged state.
++ *
++ * The rtmutex has an owner - either current or some other
++ * task. See the EAGAIN loop above.
++ */
++ pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex));
++
++ return err;
++}
++
++static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
++ struct task_struct *argowner)
++{
++ struct futex_pi_state *pi_state = q->pi_state;
++ int ret;
++
++ lockdep_assert_held(q->lock_ptr);
++
++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
++ ret = __fixup_pi_state_owner(uaddr, q, argowner);
++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
++ return ret;
+ }
+
+ static long futex_wait_restart(struct restart_block *restart);
+@@ -2374,13 +2447,16 @@ static long futex_wait_restart(struct restart_block *restart);
+ */
+ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
+ {
+- struct task_struct *owner;
+ int ret = 0;
+
+ if (locked) {
+ /*
+ * Got the lock. We might not be the anticipated owner if we
+ * did a lock-steal - fix up the PI-state in that case:
++ *
++ * Speculative pi_state->owner read (we don't hold wait_lock);
++ * since we own the lock pi_state->owner == current is the
++ * stable state, anything else needs more attention.
+ */
+ if (q->pi_state->owner != current)
+ ret = fixup_pi_state_owner(uaddr, q, current);
+@@ -2388,43 +2464,24 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
+ }
+
+ /*
+- * Catch the rare case, where the lock was released when we were on the
+- * way back before we locked the hash bucket.
++ * If we didn't get the lock; check if anybody stole it from us. In
++ * that case, we need to fix up the uval to point to them instead of
++ * us, otherwise bad things happen. [10]
++ *
++ * Another speculative read; pi_state->owner == current is unstable
++ * but needs our attention.
+ */
+ if (q->pi_state->owner == current) {
+- /*
+- * Try to get the rt_mutex now. This might fail as some other
+- * task acquired the rt_mutex after we removed ourself from the
+- * rt_mutex waiters list.
+- */
+- if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
+- locked = 1;
+- goto out;
+- }
+-
+- /*
+- * pi_state is incorrect, some other task did a lock steal and
+- * we returned due to timeout or signal without taking the
+- * rt_mutex. Too late.
+- */
+- raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock);
+- owner = rt_mutex_owner(&q->pi_state->pi_mutex);
+- if (!owner)
+- owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
+- raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock);
+- ret = fixup_pi_state_owner(uaddr, q, owner);
++ ret = fixup_pi_state_owner(uaddr, q, NULL);
+ goto out;
+ }
+
+ /*
+ * Paranoia check. If we did not take the lock, then we should not be
+- * the owner of the rt_mutex.
++ * the owner of the rt_mutex. Warn and establish consistent state.
+ */
+- if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
+- printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
+- "pi-state %p\n", ret,
+- q->pi_state->pi_mutex.owner,
+- q->pi_state->owner);
++ if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current))
++ return fixup_pi_state_owner(uaddr, q, current);
+
+ out:
+ return ret ? ret : locked;
+@@ -2721,7 +2778,7 @@ retry_private:
+ if (!trylock) {
+ ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
+ } else {
+- ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
++ ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
+ /* Fixup the trylock return value: */
+ ret = ret ? 0 : -EWOULDBLOCK;
+ }
+@@ -2739,13 +2796,6 @@ retry_private:
+ if (res)
+ ret = (res < 0) ? res : 0;
+
+- /*
+- * If fixup_owner() faulted and was unable to handle the fault, unlock
+- * it and return the fault to userspace.
+- */
+- if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
+- rt_mutex_unlock(&q.pi_state->pi_mutex);
+-
+ /* Unqueue and drop the lock */
+ unqueue_me_pi(&q);
+
+@@ -3050,8 +3100,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ if (q.pi_state && (q.pi_state->owner != current)) {
+ spin_lock(q.lock_ptr);
+ ret = fixup_pi_state_owner(uaddr2, &q, current);
+- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
+- rt_mutex_unlock(&q.pi_state->pi_mutex);
+ /*
+ * Drop the reference to the pi state which
+ * the requeue_pi() code acquired for us.
+@@ -3088,14 +3136,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ if (res)
+ ret = (res < 0) ? res : 0;
+
+- /*
+- * If fixup_pi_state_owner() faulted and was unable to handle
+- * the fault, unlock the rt_mutex and return the fault to
+- * userspace.
+- */
+- if (ret && rt_mutex_owner(pi_mutex) == current)
+- rt_mutex_unlock(pi_mutex);
+-
+ /* Unqueue and drop the lock. */
+ unqueue_me_pi(&q);
+ }
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 3938e4670b89b..51867a2e537fa 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -1884,6 +1884,10 @@ int register_kretprobe(struct kretprobe *rp)
+ int i;
+ void *addr;
+
++ /* If only rp->kp.addr is specified, check reregistering kprobes */
++ if (rp->kp.addr && check_kprobe_rereg(&rp->kp))
++ return -EINVAL;
++
+ if (kretprobe_blacklist_size) {
+ addr = kprobe_addr(&rp->kp);
+ if (IS_ERR(addr))
+diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c
+index 62b6cee8ea7f9..0613c4b1d0596 100644
+--- a/kernel/locking/rtmutex-debug.c
++++ b/kernel/locking/rtmutex-debug.c
+@@ -173,12 +173,3 @@ void debug_rt_mutex_init(struct rt_mutex *lock, const char *name)
+ lock->name = name;
+ }
+
+-void
+-rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task)
+-{
+-}
+-
+-void rt_mutex_deadlock_account_unlock(struct task_struct *task)
+-{
+-}
+-
+diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h
+index d0519c3432b67..b585af9a1b508 100644
+--- a/kernel/locking/rtmutex-debug.h
++++ b/kernel/locking/rtmutex-debug.h
+@@ -9,9 +9,6 @@
+ * This file contains macros used solely by rtmutex.c. Debug version.
+ */
+
+-extern void
+-rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task);
+-extern void rt_mutex_deadlock_account_unlock(struct task_struct *task);
+ extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
+ extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter);
+ extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name);
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 7615e7722258c..6ff4156b3929e 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -956,8 +956,6 @@ takeit:
+ */
+ rt_mutex_set_owner(lock, task);
+
+- rt_mutex_deadlock_account_lock(lock, task);
+-
+ return 1;
+ }
+
+@@ -1316,6 +1314,19 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ return ret;
+ }
+
++static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock)
++{
++ int ret = try_to_take_rt_mutex(lock, current, NULL);
++
++ /*
++ * try_to_take_rt_mutex() sets the lock waiters bit
++ * unconditionally. Clean this up.
++ */
++ fixup_rt_mutex_waiters(lock);
++
++ return ret;
++}
++
+ /*
+ * Slow path try-lock function:
+ */
+@@ -1338,13 +1349,7 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
+ */
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
+
+- ret = try_to_take_rt_mutex(lock, current, NULL);
+-
+- /*
+- * try_to_take_rt_mutex() sets the lock waiters bit
+- * unconditionally. Clean this up.
+- */
+- fixup_rt_mutex_waiters(lock);
++ ret = __rt_mutex_slowtrylock(lock);
+
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+@@ -1365,8 +1370,6 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
+
+ debug_rt_mutex_unlock(lock);
+
+- rt_mutex_deadlock_account_unlock(current);
+-
+ /*
+ * We must be careful here if the fast path is enabled. If we
+ * have no waiters queued we cannot set owner to NULL here
+@@ -1432,11 +1435,10 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+ enum rtmutex_chainwalk chwalk))
+ {
+- if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
+- rt_mutex_deadlock_account_lock(lock, current);
++ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
+ return 0;
+- } else
+- return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
++
++ return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
+ }
+
+ static inline int
+@@ -1448,21 +1450,19 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
+ enum rtmutex_chainwalk chwalk))
+ {
+ if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
+- likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
+- rt_mutex_deadlock_account_lock(lock, current);
++ likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
+ return 0;
+- } else
+- return slowfn(lock, state, timeout, chwalk);
++
++ return slowfn(lock, state, timeout, chwalk);
+ }
+
+ static inline int
+ rt_mutex_fasttrylock(struct rt_mutex *lock,
+ int (*slowfn)(struct rt_mutex *lock))
+ {
+- if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
+- rt_mutex_deadlock_account_lock(lock, current);
++ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
+ return 1;
+- }
++
+ return slowfn(lock);
+ }
+
+@@ -1472,19 +1472,18 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
+ struct wake_q_head *wqh))
+ {
+ WAKE_Q(wake_q);
++ bool deboost;
+
+- if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
+- rt_mutex_deadlock_account_unlock(current);
++ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
++ return;
+
+- } else {
+- bool deboost = slowfn(lock, &wake_q);
++ deboost = slowfn(lock, &wake_q);
+
+- wake_up_q(&wake_q);
++ wake_up_q(&wake_q);
+
+- /* Undo pi boosting if necessary: */
+- if (deboost)
+- rt_mutex_adjust_prio(current);
+- }
++ /* Undo pi boosting if necessary: */
++ if (deboost)
++ rt_mutex_adjust_prio(current);
+ }
+
+ /**
+@@ -1519,15 +1518,28 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
+
+ /*
+ * Futex variant with full deadlock detection.
++ * Futex variants must not use the fast-path, see __rt_mutex_futex_unlock().
+ */
+-int rt_mutex_timed_futex_lock(struct rt_mutex *lock,
++int __sched rt_mutex_timed_futex_lock(struct rt_mutex *lock,
+ struct hrtimer_sleeper *timeout)
+ {
+ might_sleep();
+
+- return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
+- RT_MUTEX_FULL_CHAINWALK,
+- rt_mutex_slowlock);
++ return rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE,
++ timeout, RT_MUTEX_FULL_CHAINWALK);
++}
++
++/*
++ * Futex variant, must not use fastpath.
++ */
++int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
++{
++ return rt_mutex_slowtrylock(lock);
++}
++
++int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
++{
++ return __rt_mutex_slowtrylock(lock);
+ }
+
+ /**
+@@ -1586,20 +1598,38 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
+ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
+
+ /**
+- * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock
+- * @lock: the rt_mutex to be unlocked
+- *
+- * Returns: true/false indicating whether priority adjustment is
+- * required or not.
++ * Futex variant, that since futex variants do not use the fast-path, can be
++ * simple and will not need to retry.
+ */
+-bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
+- struct wake_q_head *wqh)
++bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
++ struct wake_q_head *wake_q)
+ {
+- if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
+- rt_mutex_deadlock_account_unlock(current);
+- return false;
++ lockdep_assert_held(&lock->wait_lock);
++
++ debug_rt_mutex_unlock(lock);
++
++ if (!rt_mutex_has_waiters(lock)) {
++ lock->owner = NULL;
++ return false; /* done */
++ }
++
++ mark_wakeup_next_waiter(wake_q, lock);
++ return true; /* deboost and wakeups */
++}
++
++void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
++{
++ WAKE_Q(wake_q);
++ bool deboost;
++
++ raw_spin_lock_irq(&lock->wait_lock);
++ deboost = __rt_mutex_futex_unlock(lock, &wake_q);
++ raw_spin_unlock_irq(&lock->wait_lock);
++
++ if (deboost) {
++ wake_up_q(&wake_q);
++ rt_mutex_adjust_prio(current);
+ }
+- return rt_mutex_slowunlock(lock, wqh);
+ }
+
+ /**
+@@ -1656,7 +1686,6 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+ __rt_mutex_init(lock, NULL);
+ debug_rt_mutex_proxy_lock(lock, proxy_owner);
+ rt_mutex_set_owner(lock, proxy_owner);
+- rt_mutex_deadlock_account_lock(lock, proxy_owner);
+ }
+
+ /**
+@@ -1667,12 +1696,10 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+ * No locking. Caller has to do serializing itself
+ * Special API call for PI-futex support
+ */
+-void rt_mutex_proxy_unlock(struct rt_mutex *lock,
+- struct task_struct *proxy_owner)
++void rt_mutex_proxy_unlock(struct rt_mutex *lock)
+ {
+ debug_rt_mutex_proxy_unlock(lock);
+ rt_mutex_set_owner(lock, NULL);
+- rt_mutex_deadlock_account_unlock(proxy_owner);
+ }
+
+ /**
+diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h
+index c4060584c4076..6607802efa8bd 100644
+--- a/kernel/locking/rtmutex.h
++++ b/kernel/locking/rtmutex.h
+@@ -11,8 +11,6 @@
+ */
+
+ #define rt_mutex_deadlock_check(l) (0)
+-#define rt_mutex_deadlock_account_lock(m, t) do { } while (0)
+-#define rt_mutex_deadlock_account_unlock(l) do { } while (0)
+ #define debug_rt_mutex_init_waiter(w) do { } while (0)
+ #define debug_rt_mutex_free_waiter(w) do { } while (0)
+ #define debug_rt_mutex_lock(l) do { } while (0)
+diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
+index 14cbafed00142..bea5d677fe343 100644
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
+@@ -102,8 +102,7 @@ enum rtmutex_chainwalk {
+ extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
+ extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+ struct task_struct *proxy_owner);
+-extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
+- struct task_struct *proxy_owner);
++extern void rt_mutex_proxy_unlock(struct rt_mutex *lock);
+ extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+ struct rt_mutex_waiter *waiter,
+ struct task_struct *task);
+@@ -113,8 +112,13 @@ extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
+ extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
+ struct rt_mutex_waiter *waiter);
+ extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to);
+-extern bool rt_mutex_futex_unlock(struct rt_mutex *lock,
+- struct wake_q_head *wqh);
++extern int rt_mutex_futex_trylock(struct rt_mutex *l);
++extern int __rt_mutex_futex_trylock(struct rt_mutex *l);
++
++extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
++extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
++ struct wake_q_head *wqh);
++
+ extern void rt_mutex_adjust_prio(struct task_struct *task);
+
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 0385c57a2b7af..05ca01ef97f7f 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1753,7 +1753,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+ spinlock_t *ptl;
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long haddr = address & HPAGE_PMD_MASK;
+- bool was_locked = false;
++ bool do_unlock_page = false;
+ pmd_t _pmd;
+
+ mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
+@@ -1766,7 +1766,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+ VM_BUG_ON(freeze && !page);
+ if (page) {
+ VM_WARN_ON_ONCE(!PageLocked(page));
+- was_locked = true;
+ if (page != pmd_page(*pmd))
+ goto out;
+ }
+@@ -1775,19 +1774,29 @@ repeat:
+ if (pmd_trans_huge(*pmd)) {
+ if (!page) {
+ page = pmd_page(*pmd);
+- if (unlikely(!trylock_page(page))) {
+- get_page(page);
+- _pmd = *pmd;
+- spin_unlock(ptl);
+- lock_page(page);
+- spin_lock(ptl);
+- if (unlikely(!pmd_same(*pmd, _pmd))) {
+- unlock_page(page);
++ /*
++ * An anonymous page must be locked, to ensure that a
++ * concurrent reuse_swap_page() sees stable mapcount;
++ * but reuse_swap_page() is not used on shmem or file,
++ * and page lock must not be taken when zap_pmd_range()
++ * calls __split_huge_pmd() while i_mmap_lock is held.
++ */
++ if (PageAnon(page)) {
++ if (unlikely(!trylock_page(page))) {
++ get_page(page);
++ _pmd = *pmd;
++ spin_unlock(ptl);
++ lock_page(page);
++ spin_lock(ptl);
++ if (unlikely(!pmd_same(*pmd, _pmd))) {
++ unlock_page(page);
++ put_page(page);
++ page = NULL;
++ goto repeat;
++ }
+ put_page(page);
+- page = NULL;
+- goto repeat;
+ }
+- put_page(page);
++ do_unlock_page = true;
+ }
+ }
+ if (PageMlocked(page))
+@@ -1797,7 +1806,7 @@ repeat:
+ __split_huge_pmd_locked(vma, pmd, haddr, freeze);
+ out:
+ spin_unlock(ptl);
+- if (!was_locked && page)
++ if (do_unlock_page)
+ unlock_page(page);
+ mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE);
+ }
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 52b5e0e026d60..5a16d892c891c 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1210,12 +1210,11 @@ struct hstate *size_to_hstate(unsigned long size)
+ */
+ bool page_huge_active(struct page *page)
+ {
+- VM_BUG_ON_PAGE(!PageHuge(page), page);
+- return PageHead(page) && PagePrivate(&page[1]);
++ return PageHeadHuge(page) && PagePrivate(&page[1]);
+ }
+
+ /* never called for tail page */
+-static void set_page_huge_active(struct page *page)
++void set_page_huge_active(struct page *page)
+ {
+ VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
+ SetPagePrivate(&page[1]);
+@@ -4657,9 +4656,9 @@ bool isolate_huge_page(struct page *page, struct list_head *list)
+ {
+ bool ret = true;
+
+- VM_BUG_ON_PAGE(!PageHead(page), page);
+ spin_lock(&hugetlb_lock);
+- if (!page_huge_active(page) || !get_page_unless_zero(page)) {
++ if (!PageHeadHuge(page) || !page_huge_active(page) ||
++ !get_page_unless_zero(page)) {
+ ret = false;
+ goto unlock;
+ }
+diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c
+index 482c94d9d958a..d1c7dcc234486 100644
+--- a/net/lapb/lapb_out.c
++++ b/net/lapb/lapb_out.c
+@@ -87,7 +87,8 @@ void lapb_kick(struct lapb_cb *lapb)
+ skb = skb_dequeue(&lapb->write_queue);
+
+ do {
+- if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
++ skbn = skb_copy(skb, GFP_ATOMIC);
++ if (!skbn) {
+ skb_queue_head(&lapb->write_queue, skb);
+ break;
+ }
+diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
+index f783d1377d9a8..9f0f437a09b95 100644
+--- a/net/mac80211/driver-ops.c
++++ b/net/mac80211/driver-ops.c
+@@ -128,8 +128,11 @@ int drv_sta_state(struct ieee80211_local *local,
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC) {
+ ret = drv_sta_add(local, sdata, &sta->sta);
+- if (ret == 0)
++ if (ret == 0) {
+ sta->uploaded = true;
++ if (rcu_access_pointer(sta->sta.rates))
++ drv_sta_rate_tbl_update(local, sdata, &sta->sta);
++ }
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH) {
+ drv_sta_remove(local, sdata, &sta->sta);
+diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
+index e6096dfd02105..41421609a0f02 100644
+--- a/net/mac80211/rate.c
++++ b/net/mac80211/rate.c
+@@ -892,7 +892,8 @@ int rate_control_set_rates(struct ieee80211_hw *hw,
+ if (old)
+ kfree_rcu(old, rcu_head);
+
+- drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
++ if (sta->uploaded)
++ drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
+
+ return 0;
+ }
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 9be82ed02e0e5..c38d68131d02e 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -3802,6 +3802,8 @@ void ieee80211_check_fast_rx(struct sta_info *sta)
+
+ rcu_read_lock();
+ key = rcu_dereference(sta->ptk[sta->ptk_idx]);
++ if (!key)
++ key = rcu_dereference(sdata->default_unicast_key);
+ if (key) {
+ switch (key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 912ed9b901ac9..45038c837eab4 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -393,7 +393,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
+ {
+ struct qdisc_rate_table *rtab;
+
+- if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
++ if (tab == NULL || r->rate == 0 ||
++ r->cell_log == 0 || r->cell_log >= 32 ||
+ nla_len(tab) != TC_RTAB_SIZE)
+ return NULL;
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 720de648510dc..b003cb07254a8 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6284,7 +6284,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ SND_HDA_PIN_QUIRK(0x10ec0299, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+ ALC225_STANDARD_PINS,
+ {0x12, 0xb7a60130},
+- {0x13, 0xb8a60140},
++ {0x13, 0xb8a61140},
+ {0x17, 0x90170110}),
+ {}
+ };
+diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
+index d84c28eac262d..0ba5bb51bd93c 100644
+--- a/tools/objtool/elf.c
++++ b/tools/objtool/elf.c
+@@ -226,8 +226,11 @@ static int read_symbols(struct elf *elf)
+
+ symtab = find_section_by_name(elf, ".symtab");
+ if (!symtab) {
+- WARN("missing symbol table");
+- return -1;
++ /*
++ * A missing symbol table is actually possible if it's an empty
++ * .o file. This can happen for thunk_64.o.
++ */
++ return 0;
+ }
+
+ symbols_nr = symtab->sh.sh_size / symtab->sh.sh_entsize;
next reply other threads:[~2021-02-10 10:15 UTC|newest]
Thread overview: 393+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-02-10 10:15 Alice Ferrazzi [this message]
-- strict thread matches above, loose matches on Subject: below --
2023-01-07 11:37 [gentoo-commits] proj/linux-patches:4.9 commit in: / Mike Pagano
2022-12-14 12:24 Mike Pagano
2022-12-08 13:09 Alice Ferrazzi
2022-11-25 17:02 Mike Pagano
2022-11-10 15:14 Mike Pagano
2022-11-03 15:09 Mike Pagano
2022-10-26 11:43 Mike Pagano
2022-09-28 9:19 Mike Pagano
2022-09-20 12:04 Mike Pagano
2022-09-15 11:10 Mike Pagano
2022-09-05 12:08 Mike Pagano
2022-08-25 10:37 Mike Pagano
2022-07-29 15:25 Mike Pagano
2022-07-21 20:14 Mike Pagano
2022-07-12 16:03 Mike Pagano
2022-07-07 16:20 Mike Pagano
2022-07-02 16:04 Mike Pagano
2022-06-25 10:24 Mike Pagano
2022-06-16 11:42 Mike Pagano
2022-06-14 15:49 Mike Pagano
2022-06-06 11:07 Mike Pagano
2022-05-27 12:41 Mike Pagano
2022-05-25 11:57 Mike Pagano
2022-05-18 9:52 Mike Pagano
2022-05-15 22:14 Mike Pagano
2022-05-12 11:32 Mike Pagano
2022-04-27 11:38 Mike Pagano
2022-04-20 12:12 Mike Pagano
2022-03-28 11:01 Mike Pagano
2022-03-23 11:59 Mike Pagano
2022-03-16 13:22 Mike Pagano
2022-03-11 10:57 Mike Pagano
2022-03-08 18:28 Mike Pagano
2022-03-02 13:09 Mike Pagano
2022-02-26 23:38 Mike Pagano
2022-02-23 12:40 Mike Pagano
2022-02-16 12:49 Mike Pagano
2022-02-11 12:38 Mike Pagano
2022-02-08 18:03 Mike Pagano
2022-01-29 17:46 Mike Pagano
2022-01-27 11:41 Mike Pagano
2022-01-11 12:59 Mike Pagano
2022-01-05 12:57 Mike Pagano
2021-12-29 13:13 Mike Pagano
2021-12-22 14:08 Mike Pagano
2021-12-14 10:37 Mike Pagano
2021-12-08 12:57 Mike Pagano
2021-11-26 12:01 Mike Pagano
2021-11-12 13:38 Mike Pagano
2021-11-02 17:06 Mike Pagano
2021-10-27 12:00 Mike Pagano
2021-10-17 13:14 Mike Pagano
2021-10-09 21:35 Mike Pagano
2021-10-06 11:32 Mike Pagano
2021-09-26 14:15 Mike Pagano
2021-09-22 11:42 Mike Pagano
2021-09-20 22:06 Mike Pagano
2021-09-03 11:24 Mike Pagano
2021-08-26 14:03 Mike Pagano
2021-08-25 23:14 Mike Pagano
2021-08-25 23:13 Mike Pagano
2021-08-15 20:10 Mike Pagano
2021-08-08 13:41 Mike Pagano
2021-08-04 11:55 Mike Pagano
2021-08-03 12:49 Mike Pagano
2021-07-28 12:39 Mike Pagano
2021-07-20 15:29 Alice Ferrazzi
2021-07-11 14:47 Mike Pagano
2021-06-30 14:28 Mike Pagano
2021-06-17 14:23 Alice Ferrazzi
2021-06-17 11:08 Alice Ferrazzi
2021-06-10 11:10 Mike Pagano
2021-06-03 10:41 Alice Ferrazzi
2021-05-26 12:03 Mike Pagano
2021-05-22 10:01 Mike Pagano
2021-04-28 11:03 Alice Ferrazzi
2021-04-16 11:19 Alice Ferrazzi
2021-04-10 13:22 Mike Pagano
2021-04-07 12:14 Mike Pagano
2021-03-30 14:14 Mike Pagano
2021-03-24 12:07 Mike Pagano
2021-03-17 15:58 Mike Pagano
2021-03-11 14:04 Mike Pagano
2021-03-07 15:13 Mike Pagano
2021-03-03 17:24 Alice Ferrazzi
2021-02-23 13:38 Alice Ferrazzi
2021-02-05 14:53 Alice Ferrazzi
2021-02-03 23:25 Mike Pagano
2021-01-30 13:18 Alice Ferrazzi
2021-01-23 16:34 Mike Pagano
2021-01-17 16:22 Mike Pagano
2021-01-12 20:08 Mike Pagano
2021-01-09 12:54 Mike Pagano
2020-12-29 14:18 Mike Pagano
2020-12-11 12:54 Mike Pagano
2020-12-02 12:48 Mike Pagano
2020-11-24 13:39 Mike Pagano
2020-11-22 19:12 Mike Pagano
2020-11-18 19:23 Mike Pagano
2020-11-11 15:32 Mike Pagano
2020-11-10 13:54 Mike Pagano
2020-10-29 11:17 Mike Pagano
2020-10-17 10:14 Mike Pagano
2020-10-14 20:34 Mike Pagano
2020-10-01 19:03 Mike Pagano
2020-10-01 18:59 Mike Pagano
2020-09-24 16:02 Mike Pagano
2020-09-23 11:59 Mike Pagano
2020-09-23 11:57 Mike Pagano
2020-09-12 17:31 Mike Pagano
2020-09-03 11:34 Mike Pagano
2020-08-26 11:13 Mike Pagano
2020-08-21 11:23 Alice Ferrazzi
2020-08-21 11:02 Alice Ferrazzi
2020-07-31 16:13 Mike Pagano
2020-07-22 12:30 Mike Pagano
2020-07-09 12:07 Mike Pagano
2020-07-01 12:10 Mike Pagano
2020-06-22 14:44 Mike Pagano
2020-06-11 11:28 Mike Pagano
2020-06-03 11:37 Mike Pagano
2020-05-27 15:26 Mike Pagano
2020-05-20 11:24 Mike Pagano
2020-05-13 12:50 Mike Pagano
2020-05-11 22:52 Mike Pagano
2020-05-05 17:39 Mike Pagano
2020-05-02 19:22 Mike Pagano
2020-04-24 12:01 Mike Pagano
2020-04-15 17:55 Mike Pagano
2020-04-13 11:15 Mike Pagano
2020-04-02 18:55 Mike Pagano
2020-03-20 11:54 Mike Pagano
2020-03-11 10:15 Mike Pagano
2020-02-28 15:29 Mike Pagano
2020-02-14 23:36 Mike Pagano
2020-02-05 14:48 Mike Pagano
2020-01-29 12:36 Mike Pagano
2020-01-23 11:02 Mike Pagano
2020-01-14 22:26 Mike Pagano
2020-01-12 14:52 Mike Pagano
2020-01-04 16:48 Mike Pagano
2019-12-21 14:54 Mike Pagano
2019-12-05 15:17 Alice Ferrazzi
2019-11-29 21:39 Thomas Deutschmann
2019-11-28 23:51 Mike Pagano
2019-11-25 12:08 Mike Pagano
2019-11-16 10:54 Mike Pagano
2019-11-12 20:58 Mike Pagano
2019-11-10 16:15 Mike Pagano
2019-11-06 14:24 Mike Pagano
2019-10-29 11:16 Mike Pagano
2019-10-17 22:21 Mike Pagano
2019-10-07 17:37 Mike Pagano
2019-10-05 11:39 Mike Pagano
2019-09-21 15:57 Mike Pagano
2019-09-19 23:16 Mike Pagano
2019-09-16 12:22 Mike Pagano
2019-09-10 11:10 Mike Pagano
2019-09-06 17:18 Mike Pagano
2019-08-25 17:34 Mike Pagano
2019-08-11 10:59 Mike Pagano
2019-08-06 19:16 Mike Pagano
2019-08-04 16:05 Mike Pagano
2019-07-21 14:38 Mike Pagano
2019-07-10 11:03 Mike Pagano
2019-06-27 11:10 Mike Pagano
2019-06-22 19:04 Mike Pagano
2019-06-17 19:19 Mike Pagano
2019-06-11 17:40 Mike Pagano
2019-06-11 12:39 Mike Pagano
2019-05-31 16:42 Mike Pagano
2019-05-26 17:12 Mike Pagano
2019-05-21 17:14 Mike Pagano
2019-05-16 22:59 Mike Pagano
2019-05-14 20:08 Mike Pagano
2019-05-10 19:38 Mike Pagano
2019-05-08 10:03 Mike Pagano
2019-05-04 18:26 Mike Pagano
2019-05-02 10:16 Mike Pagano
2019-04-27 17:29 Mike Pagano
2019-04-20 11:06 Mike Pagano
2019-04-19 19:54 Mike Pagano
2019-04-05 21:42 Mike Pagano
2019-04-03 10:48 Mike Pagano
2019-03-27 10:20 Mike Pagano
2019-03-23 14:57 Mike Pagano
2019-03-23 14:18 Mike Pagano
2019-03-19 16:56 Mike Pagano
2019-03-13 22:05 Mike Pagano
2019-03-06 19:12 Mike Pagano
2019-03-05 17:59 Mike Pagano
2019-02-27 11:20 Mike Pagano
2019-02-23 14:42 Mike Pagano
2019-02-20 11:16 Mike Pagano
2019-02-15 12:46 Mike Pagano
2019-02-12 20:51 Mike Pagano
2019-02-06 20:14 Mike Pagano
2019-01-31 11:22 Mike Pagano
2019-01-26 15:03 Mike Pagano
2019-01-23 11:29 Mike Pagano
2019-01-16 23:29 Mike Pagano
2019-01-13 19:26 Mike Pagano
2019-01-09 18:09 Mike Pagano
2019-01-09 17:52 Mike Pagano
2018-12-29 22:53 Mike Pagano
2018-12-29 18:51 Mike Pagano
2018-12-21 14:44 Mike Pagano
2018-12-17 11:39 Mike Pagano
2018-12-13 11:36 Mike Pagano
2018-12-08 13:25 Mike Pagano
2018-12-05 19:44 Mike Pagano
2018-12-01 18:00 Mike Pagano
2018-12-01 15:04 Mike Pagano
2018-11-27 16:22 Mike Pagano
2018-11-23 12:48 Mike Pagano
2018-11-23 12:45 Mike Pagano
2018-11-21 12:20 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-13 21:20 Mike Pagano
2018-11-11 1:44 Mike Pagano
2018-11-11 1:31 Mike Pagano
2018-11-10 21:30 Mike Pagano
2018-10-20 12:43 Mike Pagano
2018-10-18 10:25 Mike Pagano
2018-10-13 16:34 Mike Pagano
2018-10-10 11:19 Mike Pagano
2018-10-04 10:40 Mike Pagano
2018-09-29 13:33 Mike Pagano
2018-09-26 10:42 Mike Pagano
2018-09-19 22:38 Mike Pagano
2018-09-15 10:10 Mike Pagano
2018-09-09 23:27 Mike Pagano
2018-09-05 15:27 Mike Pagano
2018-08-24 11:43 Mike Pagano
2018-08-22 10:06 Alice Ferrazzi
2018-08-18 18:07 Mike Pagano
2018-08-17 19:32 Mike Pagano
2018-08-17 19:25 Mike Pagano
2018-08-16 11:51 Mike Pagano
2018-08-15 16:46 Mike Pagano
2018-08-09 10:52 Mike Pagano
2018-08-07 18:12 Mike Pagano
2018-08-03 12:25 Mike Pagano
2018-07-28 10:38 Mike Pagano
2018-07-25 10:26 Mike Pagano
2018-07-22 15:14 Mike Pagano
2018-07-17 10:25 Mike Pagano
2018-07-12 15:42 Alice Ferrazzi
2018-07-03 13:16 Mike Pagano
2018-06-26 16:34 Alice Ferrazzi
2018-06-16 15:42 Mike Pagano
2018-06-13 15:03 Mike Pagano
2018-06-06 18:04 Mike Pagano
2018-06-05 11:21 Mike Pagano
2018-05-30 22:34 Mike Pagano
2018-05-30 11:39 Mike Pagano
2018-05-25 14:54 Mike Pagano
2018-05-22 17:28 Mike Pagano
2018-05-20 22:20 Mike Pagano
2018-05-16 10:23 Mike Pagano
2018-05-09 10:54 Mike Pagano
2018-05-02 16:13 Mike Pagano
2018-04-30 10:29 Mike Pagano
2018-04-24 11:30 Mike Pagano
2018-04-20 11:12 Mike Pagano
2018-04-13 22:21 Mike Pagano
2018-04-08 14:26 Mike Pagano
2018-03-31 22:17 Mike Pagano
2018-03-28 17:42 Mike Pagano
2018-03-25 14:31 Mike Pagano
2018-03-25 13:39 Mike Pagano
2018-03-22 12:58 Mike Pagano
2018-03-18 22:15 Mike Pagano
2018-03-11 18:26 Mike Pagano
2018-03-05 2:38 Alice Ferrazzi
2018-02-28 18:46 Alice Ferrazzi
2018-02-28 15:02 Alice Ferrazzi
2018-02-25 15:47 Mike Pagano
2018-02-22 23:22 Mike Pagano
2018-02-17 15:02 Alice Ferrazzi
2018-02-13 13:25 Alice Ferrazzi
2018-02-03 21:22 Mike Pagano
2018-01-31 13:31 Alice Ferrazzi
2018-01-23 21:17 Mike Pagano
2018-01-17 10:18 Alice Ferrazzi
2018-01-17 10:18 Alice Ferrazzi
2018-01-17 9:16 Alice Ferrazzi
2018-01-15 14:57 Alice Ferrazzi
2018-01-10 12:21 Alice Ferrazzi
2018-01-10 11:47 Mike Pagano
2018-01-05 15:54 Alice Ferrazzi
2018-01-05 15:04 Alice Ferrazzi
2018-01-02 20:13 Mike Pagano
2017-12-29 17:20 Alice Ferrazzi
2017-12-25 14:36 Alice Ferrazzi
2017-12-20 12:44 Mike Pagano
2017-12-16 17:42 Alice Ferrazzi
2017-12-14 8:58 Alice Ferrazzi
2017-12-09 23:29 Mike Pagano
2017-12-05 11:38 Mike Pagano
2017-11-30 12:19 Alice Ferrazzi
2017-11-24 9:44 Alice Ferrazzi
2017-11-21 9:18 Alice Ferrazzi
2017-11-18 18:24 Mike Pagano
2017-11-15 15:44 Mike Pagano
2017-11-08 13:49 Mike Pagano
2017-11-02 10:03 Mike Pagano
2017-10-27 10:29 Mike Pagano
2017-10-21 20:15 Mike Pagano
2017-10-18 13:46 Mike Pagano
2017-10-12 22:26 Mike Pagano
2017-10-12 12:37 Mike Pagano
2017-10-08 14:23 Mike Pagano
2017-10-08 14:21 Mike Pagano
2017-10-08 14:13 Mike Pagano
2017-10-05 11:38 Mike Pagano
2017-09-27 16:38 Mike Pagano
2017-09-20 10:11 Mike Pagano
2017-09-14 11:39 Mike Pagano
2017-09-13 22:28 Mike Pagano
2017-09-13 16:25 Mike Pagano
2017-09-10 14:38 Mike Pagano
2017-09-07 22:43 Mike Pagano
2017-09-02 17:45 Mike Pagano
2017-08-30 10:06 Mike Pagano
2017-08-25 10:59 Mike Pagano
2017-08-16 22:29 Mike Pagano
2017-08-13 16:51 Mike Pagano
2017-08-11 17:41 Mike Pagano
2017-08-07 10:26 Mike Pagano
2017-05-14 13:31 Mike Pagano
2017-05-08 10:43 Mike Pagano
2017-05-03 17:45 Mike Pagano
2017-04-27 9:05 Alice Ferrazzi
2017-04-22 17:01 Mike Pagano
2017-04-18 10:23 Mike Pagano
2017-04-12 18:01 Mike Pagano
2017-04-08 13:53 Mike Pagano
2017-03-31 10:44 Mike Pagano
2017-03-30 18:15 Mike Pagano
2017-03-26 11:54 Mike Pagano
2017-03-23 18:38 Mike Pagano
2017-03-22 12:42 Mike Pagano
2017-03-18 14:34 Mike Pagano
2017-03-15 19:21 Mike Pagano
2017-03-12 12:22 Mike Pagano
2017-03-02 16:23 Mike Pagano
2017-02-26 20:38 Mike Pagano
2017-02-26 20:36 Mike Pagano
2017-02-23 20:34 Mike Pagano
2017-02-23 20:11 Mike Pagano
2017-02-18 20:37 Mike Pagano
2017-02-18 16:13 Alice Ferrazzi
2017-02-15 16:02 Alice Ferrazzi
2017-02-14 23:08 Mike Pagano
2017-02-09 11:11 Alice Ferrazzi
2017-02-04 11:34 Alice Ferrazzi
2017-02-01 13:07 Alice Ferrazzi
2017-01-29 23:08 Alice Ferrazzi
2017-01-26 8:51 Alice Ferrazzi
2017-01-20 11:33 Alice Ferrazzi
2017-01-15 22:59 Mike Pagano
2017-01-12 22:53 Mike Pagano
2017-01-09 12:41 Mike Pagano
2017-01-07 0:55 Mike Pagano
2017-01-06 23:09 Mike Pagano
2016-12-31 19:39 Mike Pagano
2016-12-11 23:20 Mike Pagano
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1612952117.7a70358f7d8e29ea67ced2af4015fde285ae84a7.alicef@gentoo \
--to=alicef@gentoo.org \
--cc=gentoo-commits@lists.gentoo.org \
--cc=gentoo-dev@lists.gentoo.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox