From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Sat, 25 May 2024 15:14:59 +0000 (UTC) [thread overview]
Message-ID: <1716650084.c60e6e4dd9d01c3e54b863430b79ffe2f6a0432b.mpagano@gentoo> (raw)
commit: c60e6e4dd9d01c3e54b863430b79ffe2f6a0432b
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat May 25 15:14:44 2024 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat May 25 15:14:44 2024 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c60e6e4d
Linux patch 5.10.218
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +
1217_linux-5.10.218.patch | 805 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 809 insertions(+)
diff --git a/0000_README b/0000_README
index ce7db8ae..c4548db6 100644
--- a/0000_README
+++ b/0000_README
@@ -911,6 +911,10 @@ Patch: 1216_linux-5.10.217.patch
From: https://www.kernel.org
Desc: Linux 5.10.217
+Patch: 1217_linux-5.10.218.patch
+From: https://www.kernel.org
+Desc: Linux 5.10.218
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1217_linux-5.10.218.patch b/1217_linux-5.10.218.patch
new file mode 100644
index 00000000..83f6fb25
--- /dev/null
+++ b/1217_linux-5.10.218.patch
@@ -0,0 +1,805 @@
+diff --git a/Documentation/sphinx/kernel_include.py b/Documentation/sphinx/kernel_include.py
+index f523aa68a36b3..cf601bd058abe 100755
+--- a/Documentation/sphinx/kernel_include.py
++++ b/Documentation/sphinx/kernel_include.py
+@@ -94,7 +94,6 @@ class KernelInclude(Include):
+ # HINT: this is the only line I had to change / commented out:
+ #path = utils.relative_path(None, path)
+
+- path = nodes.reprunicode(path)
+ encoding = self.options.get(
+ 'encoding', self.state.document.settings.input_encoding)
+ e_handler=self.state.document.settings.input_encoding_error_handler
+diff --git a/Makefile b/Makefile
+index d9557382a0286..3bc56bf43c822 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 10
+-SUBLEVEL = 217
++SUBLEVEL = 218
+ EXTRAVERSION =
+ NAME = Dare mighty things
+
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index 1631a9a1566e3..bd785386d6291 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -46,14 +46,6 @@
+ .code64
+ .section .entry.text, "ax"
+
+-#ifdef CONFIG_PARAVIRT_XXL
+-SYM_CODE_START(native_usergs_sysret64)
+- UNWIND_HINT_EMPTY
+- swapgs
+- sysretq
+-SYM_CODE_END(native_usergs_sysret64)
+-#endif /* CONFIG_PARAVIRT_XXL */
+-
+ /*
+ * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
+ *
+@@ -128,7 +120,12 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
+ * Try to use SYSRET instead of IRET if we're returning to
+ * a completely clean 64-bit userspace context. If we're not,
+ * go to the slow exit path.
++ * In the Xen PV case we must use iret anyway.
+ */
++
++ ALTERNATIVE "", "jmp swapgs_restore_regs_and_return_to_usermode", \
++ X86_FEATURE_XENPV
++
+ movq RCX(%rsp), %rcx
+ movq RIP(%rsp), %r11
+
+@@ -220,7 +217,9 @@ syscall_return_via_sysret:
+
+ popq %rdi
+ popq %rsp
+- USERGS_SYSRET64
++ swapgs
++ CLEAR_CPU_BUFFERS
++ sysretq
+ SYM_CODE_END(entry_SYSCALL_64)
+
+ /*
+diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
+index f40dea50dfbf3..e585a4705b8dd 100644
+--- a/arch/x86/include/asm/irqflags.h
++++ b/arch/x86/include/asm/irqflags.h
+@@ -132,13 +132,6 @@ static __always_inline unsigned long arch_local_irq_save(void)
+ #endif
+
+ #define INTERRUPT_RETURN jmp native_iret
+-#define USERGS_SYSRET64 \
+- swapgs; \
+- CLEAR_CPU_BUFFERS; \
+- sysretq;
+-#define USERGS_SYSRET32 \
+- swapgs; \
+- sysretl
+
+ #else
+ #define INTERRUPT_RETURN iret
+diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
+index 4a32b0d343762..3c89c1f648719 100644
+--- a/arch/x86/include/asm/paravirt.h
++++ b/arch/x86/include/asm/paravirt.h
+@@ -776,11 +776,6 @@ extern void default_banner(void);
+
+ #ifdef CONFIG_X86_64
+ #ifdef CONFIG_PARAVIRT_XXL
+-#define USERGS_SYSRET64 \
+- PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64), \
+- ANNOTATE_RETPOLINE_SAFE; \
+- jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);)
+-
+ #ifdef CONFIG_DEBUG_ENTRY
+ #define SAVE_FLAGS(clobbers) \
+ PARA_SITE(PARA_PATCH(PV_IRQ_save_fl), \
+diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
+index 903d71884fa25..55d8b7950e61f 100644
+--- a/arch/x86/include/asm/paravirt_types.h
++++ b/arch/x86/include/asm/paravirt_types.h
+@@ -157,14 +157,6 @@ struct pv_cpu_ops {
+
+ u64 (*read_pmc)(int counter);
+
+- /*
+- * Switch to usermode gs and return to 64-bit usermode using
+- * sysret. Only used in 64-bit kernels to return to 64-bit
+- * processes. Usermode register state, including %rsp, must
+- * already be restored.
+- */
+- void (*usergs_sysret64)(void);
+-
+ /* Normal iret. Jump to this with the standard iret stack
+ frame set up. */
+ void (*iret)(void);
+diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
+index 1354bc30614d7..b14533af76762 100644
+--- a/arch/x86/kernel/asm-offsets_64.c
++++ b/arch/x86/kernel/asm-offsets_64.c
+@@ -13,8 +13,6 @@ int main(void)
+ {
+ #ifdef CONFIG_PARAVIRT
+ #ifdef CONFIG_PARAVIRT_XXL
+- OFFSET(PV_CPU_usergs_sysret64, paravirt_patch_template,
+- cpu.usergs_sysret64);
+ #ifdef CONFIG_DEBUG_ENTRY
+ OFFSET(PV_IRQ_save_fl, paravirt_patch_template, irq.save_fl);
+ #endif
+diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
+index f0e4ad8595ca7..9d91061b862c9 100644
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -124,8 +124,7 @@ unsigned paravirt_patch_default(u8 type, void *insn_buff,
+ else if (opfunc == _paravirt_ident_64)
+ ret = paravirt_patch_ident_64(insn_buff, len);
+
+- else if (type == PARAVIRT_PATCH(cpu.iret) ||
+- type == PARAVIRT_PATCH(cpu.usergs_sysret64))
++ else if (type == PARAVIRT_PATCH(cpu.iret))
+ /* If operation requires a jmp, then jmp */
+ ret = paravirt_patch_jmp(insn_buff, opfunc, addr, len);
+ #endif
+@@ -159,7 +158,6 @@ static u64 native_steal_clock(int cpu)
+
+ /* These are in entry.S */
+ extern void native_iret(void);
+-extern void native_usergs_sysret64(void);
+
+ static struct resource reserve_ioports = {
+ .start = 0,
+@@ -299,7 +297,6 @@ struct paravirt_patch_template pv_ops = {
+
+ .cpu.load_sp0 = native_load_sp0,
+
+- .cpu.usergs_sysret64 = native_usergs_sysret64,
+ .cpu.iret = native_iret,
+
+ #ifdef CONFIG_X86_IOPL_IOPERM
+diff --git a/arch/x86/kernel/paravirt_patch.c b/arch/x86/kernel/paravirt_patch.c
+index 7c518b08aa3c5..2fada2c347c98 100644
+--- a/arch/x86/kernel/paravirt_patch.c
++++ b/arch/x86/kernel/paravirt_patch.c
+@@ -27,7 +27,6 @@ struct patch_xxl {
+ const unsigned char mmu_write_cr3[3];
+ const unsigned char irq_restore_fl[2];
+ const unsigned char cpu_wbinvd[2];
+- const unsigned char cpu_usergs_sysret64[6];
+ const unsigned char mov64[3];
+ };
+
+@@ -40,8 +39,6 @@ static const struct patch_xxl patch_data_xxl = {
+ .mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3
+ .irq_restore_fl = { 0x57, 0x9d }, // push %rdi; popfq
+ .cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd
+- .cpu_usergs_sysret64 = { 0x0f, 0x01, 0xf8,
+- 0x48, 0x0f, 0x07 }, // swapgs; sysretq
+ .mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax
+ };
+
+@@ -83,7 +80,6 @@ unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr,
+ PATCH_CASE(mmu, read_cr3, xxl, insn_buff, len);
+ PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len);
+
+- PATCH_CASE(cpu, usergs_sysret64, xxl, insn_buff, len);
+ PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len);
+ #endif
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 8e0b957c62193..bc295439360e5 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8501,13 +8501,20 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
+
+ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
+ {
++ /*
++ * Suppress the error code if the vCPU is in Real Mode, as Real Mode
++ * exceptions don't report error codes. The presence of an error code
++ * is carried with the exception and only stripped when the exception
++ * is injected as intercepted #PF VM-Exits for AMD's Paged Real Mode do
++ * report an error code despite the CPU being in Real Mode.
++ */
++ vcpu->arch.exception.has_error_code &= is_protmode(vcpu);
++
+ trace_kvm_inj_exception(vcpu->arch.exception.nr,
+ vcpu->arch.exception.has_error_code,
+ vcpu->arch.exception.error_code,
+ vcpu->arch.exception.injected);
+
+- if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
+- vcpu->arch.exception.error_code = false;
+ kvm_x86_ops.queue_exception(vcpu);
+ }
+
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index 94804670caab8..b1efc4b4f42ad 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -1059,7 +1059,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
+ .read_pmc = xen_read_pmc,
+
+ .iret = xen_iret,
+- .usergs_sysret64 = xen_sysret64,
+
+ .load_tr_desc = paravirt_nop,
+ .set_ldt = xen_set_ldt,
+diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
+index e3031afcb1039..3a33713cf449f 100644
+--- a/arch/x86/xen/xen-asm.S
++++ b/arch/x86/xen/xen-asm.S
+@@ -220,27 +220,6 @@ SYM_CODE_START(xen_iret)
+ jmp hypercall_iret
+ SYM_CODE_END(xen_iret)
+
+-SYM_CODE_START(xen_sysret64)
+- UNWIND_HINT_EMPTY
+- /*
+- * We're already on the usermode stack at this point, but
+- * still with the kernel gs, so we can easily switch back.
+- *
+- * tss.sp2 is scratch space.
+- */
+- movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
+- movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+-
+- pushq $__USER_DS
+- pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
+- pushq %r11
+- pushq $__USER_CS
+- pushq %rcx
+-
+- pushq $VGCF_in_syscall
+- jmp hypercall_iret
+-SYM_CODE_END(xen_sysret64)
+-
+ /*
+ * XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is
+ * also the kernel stack. Reusing swapgs_restore_regs_and_return_to_usermode()
+diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
+index 8695809b88f08..98242430d07e7 100644
+--- a/arch/x86/xen/xen-ops.h
++++ b/arch/x86/xen/xen-ops.h
+@@ -138,8 +138,6 @@ __visible unsigned long xen_read_cr2_direct(void);
+
+ /* These are not functions, and cannot be called normally */
+ __visible void xen_iret(void);
+-__visible void xen_sysret32(void);
+-__visible void xen_sysret64(void);
+
+ extern int xen_panic_handler_init(void);
+
+diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
+index a981a22cfe891..b8388a3b9c064 100644
+--- a/drivers/firmware/arm_scmi/reset.c
++++ b/drivers/firmware/arm_scmi/reset.c
+@@ -149,8 +149,12 @@ static int scmi_domain_reset(const struct scmi_handle *handle, u32 domain,
+ struct scmi_xfer *t;
+ struct scmi_msg_reset_domain_reset *dom;
+ struct scmi_reset_info *pi = handle->reset_priv;
+- struct reset_dom_info *rdom = pi->dom_info + domain;
++ struct reset_dom_info *rdom;
+
++ if (domain >= pi->num_domains)
++ return -EINVAL;
++
++ rdom = pi->dom_info + domain;
+ if (rdom->async_reset)
+ flags |= ASYNCHRONOUS_RESET;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+index a8f1c4969fac7..e971d2b9e3c00 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+@@ -765,6 +765,9 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
+ if (!obj)
+ return -EINVAL;
+
++ if (!info || info->head.block == AMDGPU_RAS_BLOCK_COUNT)
++ return -EINVAL;
++
+ switch (info->head.block) {
+ case AMDGPU_RAS_BLOCK__UMC:
+ if (adev->umc.funcs->query_ras_error_count)
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index 89dc69d1807e1..2fc21aae1004e 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -2420,14 +2420,18 @@ static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
+ {
+ u32 reg;
+
++ spin_lock_bh(&priv->reg_lock);
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+- if (reg & CMD_SW_RESET)
++ if (reg & CMD_SW_RESET) {
++ spin_unlock_bh(&priv->reg_lock);
+ return;
++ }
+ if (enable)
+ reg |= mask;
+ else
+ reg &= ~mask;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++ spin_unlock_bh(&priv->reg_lock);
+
+ /* UniMAC stops on a packet boundary, wait for a full-size packet
+ * to be processed
+@@ -2443,8 +2447,10 @@ static void reset_umac(struct bcmgenet_priv *priv)
+ udelay(10);
+
+ /* issue soft reset and disable MAC while updating its registers */
++ spin_lock_bh(&priv->reg_lock);
+ bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
+ udelay(2);
++ spin_unlock_bh(&priv->reg_lock);
+ }
+
+ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
+@@ -3572,16 +3578,19 @@ static void bcmgenet_set_rx_mode(struct net_device *dev)
+ * 3. The number of filters needed exceeds the number filters
+ * supported by the hardware.
+ */
++ spin_lock(&priv->reg_lock);
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
+ (nfilter > MAX_MDF_FILTER)) {
+ reg |= CMD_PROMISC;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++ spin_unlock(&priv->reg_lock);
+ bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
+ return;
+ } else {
+ reg &= ~CMD_PROMISC;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++ spin_unlock(&priv->reg_lock);
+ }
+
+ /* update MDF filter */
+@@ -3975,6 +3984,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
+ goto err;
+ }
+
++ spin_lock_init(&priv->reg_lock);
+ spin_lock_init(&priv->lock);
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+index c7853d5304b09..82f9fdf591036 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+@@ -627,6 +627,8 @@ struct bcmgenet_rxnfc_rule {
+ /* device context */
+ struct bcmgenet_priv {
+ void __iomem *base;
++ /* reg_lock: lock to serialize access to shared registers */
++ spinlock_t reg_lock;
+ enum bcmgenet_version version;
+ struct net_device *dev;
+
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+index 2c2a56d5a0a1a..35c12938cb348 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+@@ -134,6 +134,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+ }
+
+ /* Can't suspend with WoL if MAC is still in reset */
++ spin_lock_bh(&priv->reg_lock);
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ if (reg & CMD_SW_RESET)
+ reg &= ~CMD_SW_RESET;
+@@ -141,6 +142,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+ /* disable RX */
+ reg &= ~CMD_RX_EN;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++ spin_unlock_bh(&priv->reg_lock);
+ mdelay(10);
+
+ if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
+@@ -186,6 +188,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+ }
+
+ /* Enable CRC forward */
++ spin_lock_bh(&priv->reg_lock);
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ priv->crc_fwd_en = 1;
+ reg |= CMD_CRC_FWD;
+@@ -193,6 +196,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+ /* Receiver must be enabled for WOL MP detection */
+ reg |= CMD_RX_EN;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++ spin_unlock_bh(&priv->reg_lock);
+
+ reg = UMAC_IRQ_MPD_R;
+ if (hfb_enable)
+@@ -239,7 +243,9 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+ }
+
+ /* Disable CRC Forward */
++ spin_lock_bh(&priv->reg_lock);
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~CMD_CRC_FWD;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++ spin_unlock_bh(&priv->reg_lock);
+ }
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+index becc717aad131..1e07f57ff3edd 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+@@ -91,6 +91,7 @@ void bcmgenet_mii_setup(struct net_device *dev)
+ reg |= RGMII_LINK;
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+
++ spin_lock_bh(&priv->reg_lock);
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
+ CMD_HD_EN |
+@@ -103,6 +104,7 @@ void bcmgenet_mii_setup(struct net_device *dev)
+ reg |= CMD_TX_EN | CMD_RX_EN;
+ }
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
++ spin_unlock_bh(&priv->reg_lock);
+
+ priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
+ bcmgenet_eee_enable_set(dev,
+@@ -264,6 +266,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
+ * block for the interface to work
+ */
+ if (priv->ext_phy) {
++ mutex_lock(&phydev->lock);
+ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg &= ~ID_MODE_DIS;
+ reg |= id_mode_dis;
+@@ -272,6 +275,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
+ else
+ reg |= RGMII_MODE_EN;
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
++ mutex_unlock(&phydev->lock);
+ }
+
+ if (init)
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index 4d46260e6bff3..ee99dc56c5448 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -205,6 +205,7 @@ static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev,
+ const struct pinctrl_pin_desc *pin)
+ {
+ struct pin_desc *pindesc;
++ int error;
+
+ pindesc = pin_desc_get(pctldev, pin->number);
+ if (pindesc) {
+@@ -226,18 +227,25 @@ static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev,
+ } else {
+ pindesc->name = kasprintf(GFP_KERNEL, "PIN%u", pin->number);
+ if (!pindesc->name) {
+- kfree(pindesc);
+- return -ENOMEM;
++ error = -ENOMEM;
++ goto failed;
+ }
+ pindesc->dynamic_name = true;
+ }
+
+ pindesc->drv_data = pin->drv_data;
+
+- radix_tree_insert(&pctldev->pin_desc_tree, pin->number, pindesc);
++ error = radix_tree_insert(&pctldev->pin_desc_tree, pin->number, pindesc);
++ if (error)
++ goto failed;
++
+ pr_debug("registered pin %d (%s) on %s\n",
+ pin->number, pindesc->name, pctldev->desc->name);
+ return 0;
++
++failed:
++ kfree(pindesc);
++ return error;
+ }
+
+ static int pinctrl_register_pins(struct pinctrl_dev *pctldev,
+diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
+index 79b7db8580e05..d988511f8b326 100644
+--- a/drivers/tty/serial/kgdboc.c
++++ b/drivers/tty/serial/kgdboc.c
+@@ -19,6 +19,7 @@
+ #include <linux/console.h>
+ #include <linux/vt_kern.h>
+ #include <linux/input.h>
++#include <linux/irq_work.h>
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
+ #include <linux/serial_core.h>
+@@ -48,6 +49,25 @@ static struct kgdb_io kgdboc_earlycon_io_ops;
+ static int (*earlycon_orig_exit)(struct console *con);
+ #endif /* IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */
+
++/*
++ * When we leave the debug trap handler we need to reset the keyboard status
++ * (since the original keyboard state gets partially clobbered by kdb use of
++ * the keyboard).
++ *
++ * The path to deliver the reset is somewhat circuitous.
++ *
++ * To deliver the reset we register an input handler, reset the keyboard and
++ * then deregister the input handler. However, to get this done right, we do
++ * have to carefully manage the calling context because we can only register
++ * input handlers from task context.
++ *
++ * In particular we need to trigger the action from the debug trap handler with
++ * all its NMI and/or NMI-like oddities. To solve this the kgdboc trap exit code
++ * (the "post_exception" callback) uses irq_work_queue(), which is NMI-safe, to
++ * schedule a callback from a hardirq context. From there we have to defer the
++ * work again, this time using schedule_work(), to get a callback using the
++ * system workqueue, which runs in task context.
++ */
+ #ifdef CONFIG_KDB_KEYBOARD
+ static int kgdboc_reset_connect(struct input_handler *handler,
+ struct input_dev *dev,
+@@ -99,10 +119,17 @@ static void kgdboc_restore_input_helper(struct work_struct *dummy)
+
+ static DECLARE_WORK(kgdboc_restore_input_work, kgdboc_restore_input_helper);
+
++static void kgdboc_queue_restore_input_helper(struct irq_work *unused)
++{
++ schedule_work(&kgdboc_restore_input_work);
++}
++
++static DEFINE_IRQ_WORK(kgdboc_restore_input_irq_work, kgdboc_queue_restore_input_helper);
++
+ static void kgdboc_restore_input(void)
+ {
+ if (likely(system_state == SYSTEM_RUNNING))
+- schedule_work(&kgdboc_restore_input_work);
++ irq_work_queue(&kgdboc_restore_input_irq_work);
+ }
+
+ static int kgdboc_register_kbd(char **cptr)
+@@ -133,6 +160,7 @@ static void kgdboc_unregister_kbd(void)
+ i--;
+ }
+ }
++ irq_work_sync(&kgdboc_restore_input_irq_work);
+ flush_work(&kgdboc_restore_input_work);
+ }
+ #else /* ! CONFIG_KDB_KEYBOARD */
+diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
+index 261131c9e37c6..4446c4066679d 100644
+--- a/drivers/usb/typec/ucsi/displayport.c
++++ b/drivers/usb/typec/ucsi/displayport.c
+@@ -249,8 +249,6 @@ static void ucsi_displayport_work(struct work_struct *work)
+ struct ucsi_dp *dp = container_of(work, struct ucsi_dp, work);
+ int ret;
+
+- mutex_lock(&dp->con->lock);
+-
+ ret = typec_altmode_vdm(dp->alt, dp->header,
+ dp->vdo_data, dp->vdo_size);
+ if (ret)
+@@ -259,8 +257,6 @@ static void ucsi_displayport_work(struct work_struct *work)
+ dp->vdo_data = NULL;
+ dp->vdo_size = 0;
+ dp->header = 0;
+-
+- mutex_unlock(&dp->con->lock);
+ }
+
+ void ucsi_displayport_remove_partner(struct typec_altmode *alt)
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 51298d749824c..209eb85b6c270 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -3194,6 +3194,7 @@ static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
+ * alignment and size).
+ */
+ ret = -EUCLEAN;
++ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
+ goto error;
+ }
+
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 6be7e75922918..36fa456f42ba9 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2645,6 +2645,8 @@ static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk))
+ mptcp_subflow_early_fallback(msk, subflow);
+
++ WRITE_ONCE(msk->write_seq, subflow->idsn);
++
+ do_connect:
+ err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
+ sock->state = ssock->state;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index a2b14434d7aa0..ac3678d2d6d52 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1927,7 +1927,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ struct sock *sk = sock->sk;
+ struct netlink_sock *nlk = nlk_sk(sk);
+ int noblock = flags & MSG_DONTWAIT;
+- size_t copied;
++ size_t copied, max_recvmsg_len;
+ struct sk_buff *skb, *data_skb;
+ int err, ret;
+
+@@ -1960,9 +1960,10 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ #endif
+
+ /* Record the max length of recvmsg() calls for future allocations */
+- nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
+- nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
+- SKB_WITH_OVERHEAD(32768));
++ max_recvmsg_len = max(READ_ONCE(nlk->max_recvmsg_len), len);
++ max_recvmsg_len = min_t(size_t, max_recvmsg_len,
++ SKB_WITH_OVERHEAD(32768));
++ WRITE_ONCE(nlk->max_recvmsg_len, max_recvmsg_len);
+
+ copied = data_skb->len;
+ if (len < copied) {
+@@ -2211,6 +2212,7 @@ static int netlink_dump(struct sock *sk)
+ struct netlink_ext_ack extack = {};
+ struct netlink_callback *cb;
+ struct sk_buff *skb = NULL;
++ size_t max_recvmsg_len;
+ struct module *module;
+ int err = -ENOBUFS;
+ int alloc_min_size;
+@@ -2233,8 +2235,9 @@ static int netlink_dump(struct sock *sk)
+ cb = &nlk->cb;
+ alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
+
+- if (alloc_min_size < nlk->max_recvmsg_len) {
+- alloc_size = nlk->max_recvmsg_len;
++ max_recvmsg_len = READ_ONCE(nlk->max_recvmsg_len);
++ if (alloc_min_size < max_recvmsg_len) {
++ alloc_size = max_recvmsg_len;
+ skb = alloc_skb(alloc_size,
+ (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
+ __GFP_NOWARN | __GFP_NORETRY);
+diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
+index 1c403e8a8044c..4f5d44037081b 100644
+--- a/security/integrity/ima/ima_policy.c
++++ b/security/integrity/ima/ima_policy.c
+@@ -210,7 +210,7 @@ static struct ima_rule_entry *arch_policy_entry __ro_after_init;
+ static LIST_HEAD(ima_default_rules);
+ static LIST_HEAD(ima_policy_rules);
+ static LIST_HEAD(ima_temp_rules);
+-static struct list_head *ima_rules = &ima_default_rules;
++static struct list_head __rcu *ima_rules = (struct list_head __rcu *)(&ima_default_rules);
+
+ static int ima_policy __initdata;
+
+@@ -648,12 +648,14 @@ int ima_match_policy(struct inode *inode, const struct cred *cred, u32 secid,
+ {
+ struct ima_rule_entry *entry;
+ int action = 0, actmask = flags | (flags << 1);
++ struct list_head *ima_rules_tmp;
+
+ if (template_desc)
+ *template_desc = ima_template_desc_current();
+
+ rcu_read_lock();
+- list_for_each_entry_rcu(entry, ima_rules, list) {
++ ima_rules_tmp = rcu_dereference(ima_rules);
++ list_for_each_entry_rcu(entry, ima_rules_tmp, list) {
+
+ if (!(entry->action & actmask))
+ continue;
+@@ -701,11 +703,15 @@ int ima_match_policy(struct inode *inode, const struct cred *cred, u32 secid,
+ void ima_update_policy_flag(void)
+ {
+ struct ima_rule_entry *entry;
++ struct list_head *ima_rules_tmp;
+
+- list_for_each_entry(entry, ima_rules, list) {
++ rcu_read_lock();
++ ima_rules_tmp = rcu_dereference(ima_rules);
++ list_for_each_entry_rcu(entry, ima_rules_tmp, list) {
+ if (entry->action & IMA_DO_MASK)
+ ima_policy_flag |= entry->action;
+ }
++ rcu_read_unlock();
+
+ ima_appraise |= (build_ima_appraise | temp_ima_appraise);
+ if (!ima_appraise)
+@@ -898,10 +904,10 @@ void ima_update_policy(void)
+
+ list_splice_tail_init_rcu(&ima_temp_rules, policy, synchronize_rcu);
+
+- if (ima_rules != policy) {
++ if (ima_rules != (struct list_head __rcu *)policy) {
+ ima_policy_flag = 0;
+- ima_rules = policy;
+
++ rcu_assign_pointer(ima_rules, policy);
+ /*
+ * IMA architecture specific policy rules are specified
+ * as strings and converted to an array of ima_entry_rules
+@@ -989,7 +995,7 @@ static int ima_lsm_rule_init(struct ima_rule_entry *entry,
+ pr_warn("rule for LSM \'%s\' is undefined\n",
+ entry->lsm[lsm_rule].args_p);
+
+- if (ima_rules == &ima_default_rules) {
++ if (ima_rules == (struct list_head __rcu *)(&ima_default_rules)) {
+ kfree(entry->lsm[lsm_rule].args_p);
+ entry->lsm[lsm_rule].args_p = NULL;
+ result = -EINVAL;
+@@ -1598,9 +1604,11 @@ void *ima_policy_start(struct seq_file *m, loff_t *pos)
+ {
+ loff_t l = *pos;
+ struct ima_rule_entry *entry;
++ struct list_head *ima_rules_tmp;
+
+ rcu_read_lock();
+- list_for_each_entry_rcu(entry, ima_rules, list) {
++ ima_rules_tmp = rcu_dereference(ima_rules);
++ list_for_each_entry_rcu(entry, ima_rules_tmp, list) {
+ if (!l--) {
+ rcu_read_unlock();
+ return entry;
+@@ -1619,7 +1627,8 @@ void *ima_policy_next(struct seq_file *m, void *v, loff_t *pos)
+ rcu_read_unlock();
+ (*pos)++;
+
+- return (&entry->list == ima_rules) ? NULL : entry;
++ return (&entry->list == &ima_default_rules ||
++ &entry->list == &ima_policy_rules) ? NULL : entry;
+ }
+
+ void ima_policy_stop(struct seq_file *m, void *v)
+@@ -1823,6 +1832,7 @@ bool ima_appraise_signature(enum kernel_read_file_id id)
+ struct ima_rule_entry *entry;
+ bool found = false;
+ enum ima_hooks func;
++ struct list_head *ima_rules_tmp;
+
+ if (id >= READING_MAX_ID)
+ return false;
+@@ -1834,7 +1844,8 @@ bool ima_appraise_signature(enum kernel_read_file_id id)
+ func = read_idmap[id] ?: FILE_CHECK;
+
+ rcu_read_lock();
+- list_for_each_entry_rcu(entry, ima_rules, list) {
++ ima_rules_tmp = rcu_dereference(ima_rules);
++ list_for_each_entry_rcu(entry, ima_rules_tmp, list) {
+ if (entry->action != APPRAISE)
+ continue;
+
+diff --git a/tools/testing/selftests/vm/map_hugetlb.c b/tools/testing/selftests/vm/map_hugetlb.c
+index c65c55b7a789f..312889edb84ab 100644
+--- a/tools/testing/selftests/vm/map_hugetlb.c
++++ b/tools/testing/selftests/vm/map_hugetlb.c
+@@ -15,7 +15,6 @@
+ #include <unistd.h>
+ #include <sys/mman.h>
+ #include <fcntl.h>
+-#include "vm_util.h"
+
+ #define LENGTH (256UL*1024*1024)
+ #define PROTECTION (PROT_READ | PROT_WRITE)
+@@ -71,16 +70,10 @@ int main(int argc, char **argv)
+ {
+ void *addr;
+ int ret;
+- size_t hugepage_size;
+ size_t length = LENGTH;
+ int flags = FLAGS;
+ int shift = 0;
+
+- hugepage_size = default_huge_page_size();
+- /* munmap with fail if the length is not page aligned */
+- if (hugepage_size > length)
+- length = hugepage_size;
+-
+ if (argc > 1)
+ length = atol(argv[1]) << 20;
+ if (argc > 2) {
next reply other threads:[~2024-05-25 15:15 UTC|newest]
Thread overview: 312+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-05-25 15:14 Mike Pagano [this message]
-- strict thread matches above, loose matches on Subject: below --
2025-10-02 13:27 [gentoo-commits] proj/linux-patches:5.10 commit in: / Arisu Tachibana
2025-09-12 3:58 Arisu Tachibana
2025-09-10 5:33 Arisu Tachibana
2025-09-04 15:19 Arisu Tachibana
2025-09-04 14:32 Arisu Tachibana
2025-08-29 9:13 Arisu Tachibana
2025-08-28 16:55 Arisu Tachibana
2025-07-18 12:07 Arisu Tachibana
2025-06-27 11:21 Mike Pagano
2025-06-04 18:15 Mike Pagano
2025-05-02 10:58 Mike Pagano
2025-04-10 13:16 Mike Pagano
2025-03-13 12:58 Mike Pagano
2025-02-01 23:10 Mike Pagano
2025-01-09 13:58 Mike Pagano
2024-12-19 18:10 Mike Pagano
2024-12-14 23:50 Mike Pagano
2024-11-30 17:35 Mike Pagano
2024-11-17 18:19 Mike Pagano
2024-11-08 17:46 Mike Pagano
2024-10-22 17:00 Mike Pagano
2024-10-17 14:11 Mike Pagano
2024-10-17 14:08 Mike Pagano
2024-09-12 12:42 Mike Pagano
2024-09-04 13:53 Mike Pagano
2024-08-19 10:44 Mike Pagano
2024-07-27 9:20 Mike Pagano
2024-07-27 9:17 Mike Pagano
2024-07-18 12:17 Mike Pagano
2024-07-05 10:53 Mike Pagano
2024-07-05 10:51 Mike Pagano
2024-06-21 14:08 Mike Pagano
2024-06-16 14:35 Mike Pagano
2024-05-17 11:38 Mike Pagano
2024-05-05 18:14 Mike Pagano
2024-05-02 15:03 Mike Pagano
2024-04-27 22:57 Mike Pagano
2024-04-13 13:09 Mike Pagano
2024-03-27 11:26 Mike Pagano
2024-03-15 22:02 Mike Pagano
2024-03-06 18:09 Mike Pagano
2024-03-01 13:09 Mike Pagano
2024-02-23 12:45 Mike Pagano
2024-02-23 12:39 Mike Pagano
2024-01-25 23:34 Mike Pagano
2024-01-15 18:49 Mike Pagano
2024-01-12 20:35 Mike Pagano
2024-01-05 14:29 Mike Pagano
2023-12-20 15:21 Mike Pagano
2023-12-13 18:29 Mike Pagano
2023-12-08 11:16 Mike Pagano
2023-12-01 17:47 Mike Pagano
2023-11-28 17:52 Mike Pagano
2023-11-20 11:25 Mike Pagano
2023-11-08 17:28 Mike Pagano
2023-10-25 11:38 Mike Pagano
2023-10-18 20:16 Mike Pagano
2023-10-10 20:34 Mike Pagano
2023-10-05 14:24 Mike Pagano
2023-09-23 10:19 Mike Pagano
2023-09-21 11:29 Mike Pagano
2023-09-19 13:22 Mike Pagano
2023-09-02 9:59 Mike Pagano
2023-08-30 14:45 Mike Pagano
2023-08-26 15:21 Mike Pagano
2023-08-16 17:01 Mike Pagano
2023-08-11 11:56 Mike Pagano
2023-08-08 18:42 Mike Pagano
2023-07-27 11:50 Mike Pagano
2023-07-24 20:28 Mike Pagano
2023-06-28 10:27 Mike Pagano
2023-06-21 14:55 Alice Ferrazzi
2023-06-14 10:34 Mike Pagano
2023-06-14 10:20 Mike Pagano
2023-06-09 11:31 Mike Pagano
2023-06-05 11:50 Mike Pagano
2023-05-30 12:56 Mike Pagano
2023-05-17 11:25 Mike Pagano
2023-05-17 10:59 Mike Pagano
2023-05-10 17:56 Mike Pagano
2023-04-27 14:11 Mike Pagano
2023-04-26 9:50 Alice Ferrazzi
2023-04-20 11:17 Alice Ferrazzi
2023-04-05 10:01 Alice Ferrazzi
2023-03-22 14:15 Alice Ferrazzi
2023-03-17 10:45 Mike Pagano
2023-03-13 11:32 Alice Ferrazzi
2023-03-11 16:05 Mike Pagano
2023-03-03 15:01 Mike Pagano
2023-03-03 12:30 Mike Pagano
2023-02-25 11:44 Mike Pagano
2023-02-24 3:06 Alice Ferrazzi
2023-02-22 14:04 Alice Ferrazzi
2023-02-15 16:40 Mike Pagano
2023-02-06 12:47 Mike Pagano
2023-02-02 19:11 Mike Pagano
2023-02-01 8:09 Alice Ferrazzi
2023-01-24 7:13 Alice Ferrazzi
2023-01-18 11:09 Mike Pagano
2023-01-14 13:52 Mike Pagano
2023-01-04 11:39 Mike Pagano
2022-12-21 19:00 Alice Ferrazzi
2022-12-19 12:33 Alice Ferrazzi
2022-12-14 12:14 Mike Pagano
2022-12-08 11:51 Alice Ferrazzi
2022-12-02 17:26 Mike Pagano
2022-11-25 17:06 Mike Pagano
2022-11-16 12:08 Alice Ferrazzi
2022-11-10 18:05 Mike Pagano
2022-11-03 15:17 Mike Pagano
2022-10-30 9:33 Mike Pagano
2022-10-28 13:38 Mike Pagano
2022-10-26 11:46 Mike Pagano
2022-10-17 16:46 Mike Pagano
2022-10-15 10:05 Mike Pagano
2022-10-05 11:58 Mike Pagano
2022-09-28 9:30 Mike Pagano
2022-09-23 12:40 Mike Pagano
2022-09-20 12:01 Mike Pagano
2022-09-15 10:31 Mike Pagano
2022-09-08 10:46 Mike Pagano
2022-09-05 12:04 Mike Pagano
2022-08-31 15:39 Mike Pagano
2022-08-29 10:46 Mike Pagano
2022-08-25 10:33 Mike Pagano
2022-08-21 16:52 Mike Pagano
2022-08-11 12:34 Mike Pagano
2022-08-03 14:24 Alice Ferrazzi
2022-07-29 16:37 Mike Pagano
2022-07-25 10:19 Alice Ferrazzi
2022-07-21 20:08 Mike Pagano
2022-07-15 10:03 Mike Pagano
2022-07-12 15:59 Mike Pagano
2022-07-07 16:17 Mike Pagano
2022-07-02 16:10 Mike Pagano
2022-06-29 11:08 Mike Pagano
2022-06-27 11:12 Mike Pagano
2022-06-25 19:45 Mike Pagano
2022-06-22 12:45 Mike Pagano
2022-06-16 11:44 Mike Pagano
2022-06-14 17:12 Mike Pagano
2022-06-09 11:27 Mike Pagano
2022-06-06 11:03 Mike Pagano
2022-05-30 13:59 Mike Pagano
2022-05-25 11:54 Mike Pagano
2022-05-18 9:48 Mike Pagano
2022-05-15 22:10 Mike Pagano
2022-05-12 11:29 Mike Pagano
2022-05-09 10:56 Mike Pagano
2022-04-27 12:24 Mike Pagano
2022-04-27 12:20 Mike Pagano
2022-04-26 12:17 Mike Pagano
2022-04-20 12:07 Mike Pagano
2022-04-13 20:20 Mike Pagano
2022-04-13 19:48 Mike Pagano
2022-04-12 19:08 Mike Pagano
2022-04-08 13:16 Mike Pagano
2022-03-28 10:58 Mike Pagano
2022-03-23 11:55 Mike Pagano
2022-03-19 13:20 Mike Pagano
2022-03-16 13:33 Mike Pagano
2022-03-11 11:31 Mike Pagano
2022-03-08 18:32 Mike Pagano
2022-03-02 13:06 Mike Pagano
2022-02-26 20:27 Mike Pagano
2022-02-23 12:37 Mike Pagano
2022-02-16 12:46 Mike Pagano
2022-02-11 12:35 Mike Pagano
2022-02-08 17:54 Mike Pagano
2022-02-05 19:04 Mike Pagano
2022-02-05 12:13 Mike Pagano
2022-02-01 17:23 Mike Pagano
2022-01-31 12:25 Mike Pagano
2022-01-29 17:43 Mike Pagano
2022-01-27 11:37 Mike Pagano
2022-01-20 10:00 Mike Pagano
2022-01-16 10:21 Mike Pagano
2022-01-11 14:50 Mike Pagano
2022-01-05 12:53 Mike Pagano
2021-12-29 13:06 Mike Pagano
2021-12-22 14:05 Mike Pagano
2021-12-21 19:37 Mike Pagano
2021-12-17 11:55 Mike Pagano
2021-12-16 16:04 Mike Pagano
2021-12-14 12:51 Mike Pagano
2021-12-14 12:12 Mike Pagano
2021-12-08 12:53 Mike Pagano
2021-12-01 12:49 Mike Pagano
2021-11-26 11:57 Mike Pagano
2021-11-21 20:42 Mike Pagano
2021-11-18 15:33 Mike Pagano
2021-11-12 14:18 Mike Pagano
2021-11-06 13:36 Mike Pagano
2021-11-02 19:30 Mike Pagano
2021-10-27 14:55 Mike Pagano
2021-10-27 11:57 Mike Pagano
2021-10-20 13:23 Mike Pagano
2021-10-18 21:17 Mike Pagano
2021-10-17 13:11 Mike Pagano
2021-10-13 9:35 Alice Ferrazzi
2021-10-09 21:31 Mike Pagano
2021-10-06 14:18 Mike Pagano
2021-09-30 10:48 Mike Pagano
2021-09-26 14:12 Mike Pagano
2021-09-22 11:38 Mike Pagano
2021-09-20 22:02 Mike Pagano
2021-09-18 16:07 Mike Pagano
2021-09-17 12:50 Mike Pagano
2021-09-17 12:46 Mike Pagano
2021-09-16 11:20 Mike Pagano
2021-09-15 12:00 Mike Pagano
2021-09-12 14:38 Mike Pagano
2021-09-08 13:00 Alice Ferrazzi
2021-09-03 11:47 Mike Pagano
2021-09-03 11:20 Mike Pagano
2021-08-26 14:34 Mike Pagano
2021-08-25 16:23 Mike Pagano
2021-08-24 21:33 Mike Pagano
2021-08-24 21:32 Mike Pagano
2021-08-21 14:17 Mike Pagano
2021-08-19 11:56 Mike Pagano
2021-08-18 12:46 Mike Pagano
2021-08-15 20:05 Mike Pagano
2021-08-12 11:53 Mike Pagano
2021-08-10 11:49 Mike Pagano
2021-08-10 11:49 Mike Pagano
2021-08-08 13:36 Mike Pagano
2021-08-04 11:52 Mike Pagano
2021-08-03 11:03 Mike Pagano
2021-08-02 22:35 Mike Pagano
2021-07-31 10:30 Alice Ferrazzi
2021-07-28 13:22 Mike Pagano
2021-07-25 17:28 Mike Pagano
2021-07-25 17:26 Mike Pagano
2021-07-20 15:44 Alice Ferrazzi
2021-07-19 11:17 Mike Pagano
2021-07-14 16:31 Mike Pagano
2021-07-14 16:21 Mike Pagano
2021-07-13 12:37 Mike Pagano
2021-07-12 17:25 Mike Pagano
2021-07-11 15:11 Mike Pagano
2021-07-11 14:43 Mike Pagano
2021-07-08 12:27 Mike Pagano
2021-07-08 3:27 Alice Ferrazzi
2021-07-07 13:13 Mike Pagano
2021-07-02 19:38 Mike Pagano
2021-07-01 14:32 Mike Pagano
2021-06-30 14:23 Mike Pagano
2021-06-23 15:12 Mike Pagano
2021-06-18 11:37 Mike Pagano
2021-06-16 12:24 Mike Pagano
2021-06-11 17:34 Mike Pagano
2021-06-10 13:14 Mike Pagano
2021-06-10 12:09 Mike Pagano
2021-06-08 22:42 Mike Pagano
2021-06-03 10:26 Alice Ferrazzi
2021-05-28 12:15 Alice Ferrazzi
2021-05-26 12:07 Mike Pagano
2021-05-22 16:59 Mike Pagano
2021-05-19 12:24 Mike Pagano
2021-05-14 14:07 Alice Ferrazzi
2021-05-11 14:20 Mike Pagano
2021-05-07 11:27 Alice Ferrazzi
2021-05-02 16:03 Mike Pagano
2021-04-30 18:58 Mike Pagano
2021-04-28 12:03 Alice Ferrazzi
2021-04-21 11:42 Mike Pagano
2021-04-16 11:02 Alice Ferrazzi
2021-04-14 11:07 Alice Ferrazzi
2021-04-10 13:26 Mike Pagano
2021-04-07 13:27 Mike Pagano
2021-03-30 12:57 Alice Ferrazzi
2021-03-25 9:04 Alice Ferrazzi
2021-03-22 15:57 Mike Pagano
2021-03-20 14:35 Mike Pagano
2021-03-17 17:00 Mike Pagano
2021-03-11 15:08 Mike Pagano
2021-03-09 12:18 Mike Pagano
2021-03-07 15:17 Mike Pagano
2021-03-04 12:04 Alice Ferrazzi
2021-02-26 13:22 Mike Pagano
2021-02-26 10:42 Alice Ferrazzi
2021-02-23 15:16 Alice Ferrazzi
2021-02-18 20:45 Mike Pagano
2021-02-18 14:48 Mike Pagano
2021-02-17 11:14 Alice Ferrazzi
2021-02-13 15:51 Mike Pagano
2021-02-13 15:48 Mike Pagano
2021-02-13 14:42 Alice Ferrazzi
2021-02-10 10:23 Alice Ferrazzi
2021-02-10 9:51 Alice Ferrazzi
2021-02-09 19:10 Mike Pagano
2021-02-07 15:20 Alice Ferrazzi
2021-02-03 23:43 Alice Ferrazzi
2021-01-30 13:27 Alice Ferrazzi
2021-01-27 11:29 Mike Pagano
2021-01-23 16:38 Mike Pagano
2021-01-19 20:31 Mike Pagano
2021-01-17 16:18 Mike Pagano
2021-01-12 20:03 Mike Pagano
2021-01-09 17:58 Mike Pagano
2021-01-09 0:14 Mike Pagano
2021-01-06 14:54 Mike Pagano
2020-12-30 12:54 Mike Pagano
2020-12-26 15:32 Mike Pagano
2020-12-26 15:29 Mike Pagano
2020-12-21 13:26 Mike Pagano
2020-12-18 16:08 Mike Pagano
2020-12-14 20:45 Mike Pagano
2020-12-13 16:09 Mike Pagano
2020-11-19 13:03 Mike Pagano
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1716650084.c60e6e4dd9d01c3e54b863430b79ffe2f6a0432b.mpagano@gentoo \
--to=mpagano@gentoo.org \
--cc=gentoo-commits@lists.gentoo.org \
--cc=gentoo-dev@lists.gentoo.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox