From: "Arisu Tachibana" <alicef@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:6.1 commit in: /
Date: Thu, 25 Sep 2025 12:03:23 +0000 (UTC) [thread overview]
Message-ID: <1758801790.fa6cd114aa6233820f14a1c35a32e2c37fbb476f.alicef@gentoo> (raw)
commit: fa6cd114aa6233820f14a1c35a32e2c37fbb476f
Author: Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Sep 25 12:03:10 2025 +0000
Commit: Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Sep 25 12:03:10 2025 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=fa6cd114
Linux patch 6.1.154
Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>
0000_README | 4 +
1153_linux-6.1.154.patch | 1959 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 1963 insertions(+)
diff --git a/0000_README b/0000_README
index beeb2a18..916b250e 100644
--- a/0000_README
+++ b/0000_README
@@ -655,6 +655,10 @@ Patch: 1152_linux-6.1.153.patch
From: https://www.kernel.org
Desc: Linux 6.1.153
+Patch: 1153_linux-6.1.154.patch
+From: https://www.kernel.org
+Desc: Linux 6.1.154
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1153_linux-6.1.154.patch b/1153_linux-6.1.154.patch
new file mode 100644
index 00000000..6b46b7d2
--- /dev/null
+++ b/1153_linux-6.1.154.patch
@@ -0,0 +1,1959 @@
+diff --git a/Makefile b/Makefile
+index 77ebc6dea10089..380b99998dd83d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 153
++SUBLEVEL = 154
+ EXTRAVERSION =
+ NAME = Curry Ramen
+
+diff --git a/arch/loongarch/include/asm/acenv.h b/arch/loongarch/include/asm/acenv.h
+index 52f298f7293bab..483c955f2ae50d 100644
+--- a/arch/loongarch/include/asm/acenv.h
++++ b/arch/loongarch/include/asm/acenv.h
+@@ -10,9 +10,8 @@
+ #ifndef _ASM_LOONGARCH_ACENV_H
+ #define _ASM_LOONGARCH_ACENV_H
+
+-/*
+- * This header is required by ACPI core, but we have nothing to fill in
+- * right now. Will be updated later when needed.
+- */
++#ifdef CONFIG_ARCH_STRICT_ALIGN
++#define ACPI_MISALIGNMENT_NOT_SUPPORTED
++#endif /* CONFIG_ARCH_STRICT_ALIGN */
+
+ #endif /* _ASM_LOONGARCH_ACENV_H */
+diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c
+index 6d56a463b091c0..5cc527a41a9715 100644
+--- a/arch/loongarch/kernel/env.c
++++ b/arch/loongarch/kernel/env.c
+@@ -70,6 +70,8 @@ static int __init boardinfo_init(void)
+ struct kobject *loongson_kobj;
+
+ loongson_kobj = kobject_create_and_add("loongson", firmware_kobj);
++ if (!loongson_kobj)
++ return -ENOMEM;
+
+ return sysfs_create_file(loongson_kobj, &boardinfo_attr.attr);
+ }
+diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
+index ddd080f6dd82e2..d288dbed5f5bca 100644
+--- a/arch/um/drivers/virtio_uml.c
++++ b/arch/um/drivers/virtio_uml.c
+@@ -1229,10 +1229,12 @@ static int virtio_uml_probe(struct platform_device *pdev)
+ device_set_wakeup_capable(&vu_dev->vdev.dev, true);
+
+ rc = register_virtio_device(&vu_dev->vdev);
+- if (rc)
++ if (rc) {
+ put_device(&vu_dev->vdev.dev);
++ return rc;
++ }
+ vu_dev->registered = 1;
+- return rc;
++ return 0;
+
+ error_init:
+ os_close_file(vu_dev->sock);
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 5a6bd9d5cceb77..f56dcbbbf73413 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -3834,8 +3834,7 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
+ struct vcpu_svm *svm = to_svm(vcpu);
+ u64 cr8;
+
+- if (nested_svm_virtualize_tpr(vcpu) ||
+- kvm_vcpu_apicv_active(vcpu))
++ if (nested_svm_virtualize_tpr(vcpu))
+ return;
+
+ cr8 = kvm_get_cr8(vcpu);
+diff --git a/crypto/af_alg.c b/crypto/af_alg.c
+index fef69d2a6b183d..3e333be303fe23 100644
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -859,6 +859,12 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ }
+
+ lock_sock(sk);
++ if (ctx->write) {
++ release_sock(sk);
++ return -EBUSY;
++ }
++ ctx->write = true;
++
+ if (ctx->init && !ctx->more) {
+ if (ctx->used) {
+ err = -EINVAL;
+@@ -908,6 +914,8 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ continue;
+ }
+
++ ctx->merge = 0;
++
+ if (!af_alg_writable(sk)) {
+ err = af_alg_wait_for_wmem(sk, msg->msg_flags);
+ if (err)
+@@ -927,35 +935,38 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ if (sgl->cur)
+ sg_unmark_end(sg + sgl->cur - 1);
+
+- do {
+- struct page *pg;
+- unsigned int i = sgl->cur;
++ if (1 /* TODO check MSG_SPLICE_PAGES */) {
++ do {
++ struct page *pg;
++ unsigned int i = sgl->cur;
+
+- plen = min_t(size_t, len, PAGE_SIZE);
++ plen = min_t(size_t, len, PAGE_SIZE);
+
+- pg = alloc_page(GFP_KERNEL);
+- if (!pg) {
+- err = -ENOMEM;
+- goto unlock;
+- }
++ pg = alloc_page(GFP_KERNEL);
++ if (!pg) {
++ err = -ENOMEM;
++ goto unlock;
++ }
+
+- sg_assign_page(sg + i, pg);
++ sg_assign_page(sg + i, pg);
+
+- err = memcpy_from_msg(page_address(sg_page(sg + i)),
+- msg, plen);
+- if (err) {
+- __free_page(sg_page(sg + i));
+- sg_assign_page(sg + i, NULL);
+- goto unlock;
+- }
++ err = memcpy_from_msg(
++ page_address(sg_page(sg + i)),
++ msg, plen);
++ if (err) {
++ __free_page(sg_page(sg + i));
++ sg_assign_page(sg + i, NULL);
++ goto unlock;
++ }
+
+- sg[i].length = plen;
+- len -= plen;
+- ctx->used += plen;
+- copied += plen;
+- size -= plen;
+- sgl->cur++;
+- } while (len && sgl->cur < MAX_SGL_ENTS);
++ sg[i].length = plen;
++ len -= plen;
++ ctx->used += plen;
++ copied += plen;
++ size -= plen;
++ sgl->cur++;
++ } while (len && sgl->cur < MAX_SGL_ENTS);
++ }
+
+ if (!size)
+ sg_mark_end(sg + sgl->cur - 1);
+@@ -969,6 +980,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
+
+ unlock:
+ af_alg_data_wakeup(sk);
++ ctx->write = false;
+ release_sock(sk);
+
+ return copied ?: err;
+@@ -988,53 +1000,17 @@ EXPORT_SYMBOL_GPL(af_alg_sendmsg);
+ ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
+ int offset, size_t size, int flags)
+ {
+- struct sock *sk = sock->sk;
+- struct alg_sock *ask = alg_sk(sk);
+- struct af_alg_ctx *ctx = ask->private;
+- struct af_alg_tsgl *sgl;
+- int err = -EINVAL;
++ struct bio_vec bvec;
++ struct msghdr msg = {
++ .msg_flags = flags | MSG_SPLICE_PAGES,
++ };
+
+ if (flags & MSG_SENDPAGE_NOTLAST)
+- flags |= MSG_MORE;
+-
+- lock_sock(sk);
+- if (!ctx->more && ctx->used)
+- goto unlock;
+-
+- if (!size)
+- goto done;
+-
+- if (!af_alg_writable(sk)) {
+- err = af_alg_wait_for_wmem(sk, flags);
+- if (err)
+- goto unlock;
+- }
+-
+- err = af_alg_alloc_tsgl(sk);
+- if (err)
+- goto unlock;
+-
+- ctx->merge = 0;
+- sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list);
+-
+- if (sgl->cur)
+- sg_unmark_end(sgl->sg + sgl->cur - 1);
+-
+- sg_mark_end(sgl->sg + sgl->cur);
+-
+- get_page(page);
+- sg_set_page(sgl->sg + sgl->cur, page, size, offset);
+- sgl->cur++;
+- ctx->used += size;
+-
+-done:
+- ctx->more = flags & MSG_MORE;
+-
+-unlock:
+- af_alg_data_wakeup(sk);
+- release_sock(sk);
++ msg.msg_flags |= MSG_MORE;
+
+- return err ?: size;
++ bvec_set_page(&bvec, page, size, offset);
++ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
++ return sock_sendmsg(sock, &msg);
+ }
+ EXPORT_SYMBOL_GPL(af_alg_sendpage);
+
+diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
+index e14c9fdabe2bae..690a0e7f5f6e2b 100644
+--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
++++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
+@@ -2697,7 +2697,7 @@ static int anx7625_i2c_probe(struct i2c_client *client)
+ ret = devm_request_threaded_irq(dev, platform->pdata.intp_irq,
+ NULL, anx7625_intr_hpd_isr,
+ IRQF_TRIGGER_FALLING |
+- IRQF_ONESHOT,
++ IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ "anx7625-intp", platform);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "fail to request irq\n");
+@@ -2767,8 +2767,10 @@ static int anx7625_i2c_probe(struct i2c_client *client)
+ }
+
+ /* Add work function */
+- if (platform->pdata.intp_irq)
++ if (platform->pdata.intp_irq) {
++ enable_irq(platform->pdata.intp_irq);
+ queue_work(platform->workqueue, &platform->work);
++ }
+
+ if (platform->pdata.audio_en)
+ anx7625_register_audio(dev, platform);
+diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+index 1b7c14d7c5ee36..aaa9f44d27be50 100644
+--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
++++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+@@ -2042,8 +2042,10 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
+ mhdp_state = to_cdns_mhdp_bridge_state(new_state);
+
+ mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
+- if (!mhdp_state->current_mode)
+- return;
++ if (!mhdp_state->current_mode) {
++ ret = -EINVAL;
++ goto out;
++ }
+
+ drm_mode_set_name(mhdp_state->current_mode);
+
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 5847ac29d976f0..5418f3d6eacfd9 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -2205,6 +2205,10 @@ static void switch_to_super_page(struct dmar_domain *domain,
+ struct dma_pte *pte = NULL;
+ unsigned long i;
+
++ if (WARN_ON(!IS_ALIGNED(start_pfn, lvl_pages) ||
++ !IS_ALIGNED(end_pfn + 1, lvl_pages)))
++ return;
++
+ while (start_pfn <= end_pfn) {
+ if (!pte)
+ pte = pfn_to_dma_pte(domain, start_pfn, &level);
+@@ -2272,7 +2276,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ unsigned long pages_to_remove;
+
+ pteval |= DMA_PTE_LARGE_PAGE;
+- pages_to_remove = min_t(unsigned long, nr_pages,
++ pages_to_remove = min_t(unsigned long,
++ round_down(nr_pages, lvl_pages),
+ nr_pte_to_next_page(pte) * lvl_pages);
+ end_pfn = iov_pfn + pages_to_remove - 1;
+ switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
+diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
+index b4f6a0a2fcb51c..bc31921e2c4dfa 100644
+--- a/drivers/mmc/host/mvsdio.c
++++ b/drivers/mmc/host/mvsdio.c
+@@ -292,7 +292,7 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data,
+ host->pio_ptr = NULL;
+ host->pio_size = 0;
+ } else {
+- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags,
++ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+ }
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 11c58b88f9ce7f..69ea7db784fd70 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3206,7 +3206,6 @@ static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
+ /* Find out through which dev should the packet go */
+ memset(&fl6, 0, sizeof(struct flowi6));
+ fl6.daddr = targets[i];
+- fl6.flowi6_oif = bond->dev->ifindex;
+
+ dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6);
+ if (dst->error) {
+diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
+index 2198e35d9e1818..1e3eff91877d00 100644
+--- a/drivers/net/ethernet/broadcom/cnic.c
++++ b/drivers/net/ethernet/broadcom/cnic.c
+@@ -4222,8 +4222,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
+
+ cnic_bnx2x_delete_wait(dev, 0);
+
+- cancel_delayed_work(&cp->delete_task);
+- flush_workqueue(cnic_wq);
++ cancel_delayed_work_sync(&cp->delete_task);
+
+ if (atomic_read(&cp->iscsi_conn) != 0)
+ netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
+diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
+index 8e59c2825533a3..2a066f193bca1d 100644
+--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
++++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
+@@ -135,7 +135,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
+ oct->io_qmask.iq |= BIT_ULL(iq_no);
+
+ /* Set the 32B/64B mode for each input queue */
+- oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
++ oct->io_qmask.iq64B |= ((u64)(conf->instr_type == 64) << iq_no);
+ iq->iqcmd_64B = (conf->instr_type == 64);
+
+ oct->fn_list.setup_iq_regs(oct, iq_no);
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index 732fd2e389c417..e928fea16e8413 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -2680,7 +2680,7 @@ static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
+ dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
+ goto err_get_attr;
+ }
+- ethsw->bpid = dpbp_attrs.id;
++ ethsw->bpid = dpbp_attrs.bpid;
+
+ return 0;
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index 3d83fccf742b14..2ede35ba3919b6 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -949,9 +949,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
+ if (!eop_desc)
+ break;
+
+- /* prevent any other reads prior to eop_desc */
+- smp_rmb();
+-
+ i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
+ /* we have caught up to head, no work left to do */
+ if (tx_head == tx_desc)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+index 896b2f9bac3441..d2584b450f272a 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+@@ -365,7 +365,7 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
+ if (!ptp)
+ return;
+
+- cancel_delayed_work(&pfvf->ptp->synctstamp_work);
++ cancel_delayed_work_sync(&pfvf->ptp->synctstamp_work);
+
+ ptp_clock_unregister(ptp->ptp_clock);
+ kfree(ptp);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index ae3a7b96f79780..7612070b66160c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -107,8 +107,6 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv)
+ if (up) {
+ netdev_info(priv->netdev, "Link up\n");
+ netif_carrier_on(priv->netdev);
+- mlx5e_port_manual_buffer_config(priv, 0, priv->netdev->mtu,
+- NULL, NULL, NULL);
+ } else {
+ netdev_info(priv->netdev, "Link down\n");
+ netif_carrier_off(priv->netdev);
+diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
+index 998586872599b3..c692d2e878b2e3 100644
+--- a/drivers/net/ethernet/natsemi/ns83820.c
++++ b/drivers/net/ethernet/natsemi/ns83820.c
+@@ -820,7 +820,7 @@ static void rx_irq(struct net_device *ndev)
+ struct ns83820 *dev = PRIV(ndev);
+ struct rx_info *info = &dev->rx_info;
+ unsigned next_rx;
+- int rx_rc, len;
++ int len;
+ u32 cmdsts;
+ __le32 *desc;
+ unsigned long flags;
+@@ -881,8 +881,10 @@ static void rx_irq(struct net_device *ndev)
+ if (likely(CMDSTS_OK & cmdsts)) {
+ #endif
+ skb_put(skb, len);
+- if (unlikely(!skb))
++ if (unlikely(!skb)) {
++ ndev->stats.rx_dropped++;
+ goto netdev_mangle_me_harder_failed;
++ }
+ if (cmdsts & CMDSTS_DEST_MULTI)
+ ndev->stats.multicast++;
+ ndev->stats.rx_packets++;
+@@ -901,15 +903,12 @@ static void rx_irq(struct net_device *ndev)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag);
+ }
+ #endif
+- rx_rc = netif_rx(skb);
+- if (NET_RX_DROP == rx_rc) {
+-netdev_mangle_me_harder_failed:
+- ndev->stats.rx_dropped++;
+- }
++ netif_rx(skb);
+ } else {
+ dev_kfree_skb_irq(skb);
+ }
+
++netdev_mangle_me_harder_failed:
+ nr++;
+ next_rx = info->next_rx;
+ desc = info->descs + (DESC_SIZE * next_rx);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+index cdcead614e9fa8..ae421c2707785f 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+@@ -4461,10 +4461,11 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
+ goto out;
+ }
+
+- /* Add override window info to buffer */
++ /* Add override window info to buffer, preventing buffer overflow */
+ override_window_dwords =
+- qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+- PROTECTION_OVERRIDE_ELEMENT_DWORDS;
++ min(qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
++ PROTECTION_OVERRIDE_ELEMENT_DWORDS,
++ PROTECTION_OVERRIDE_DEPTH_DWORDS);
+ if (override_window_dwords) {
+ addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
+index e22a752052f2f9..b8260dd12b1a80 100644
+--- a/drivers/pcmcia/omap_cf.c
++++ b/drivers/pcmcia/omap_cf.c
+@@ -305,7 +305,13 @@ static int __exit omap_cf_remove(struct platform_device *pdev)
+ return 0;
+ }
+
+-static struct platform_driver omap_cf_driver = {
++/*
++ * omap_cf_remove() lives in .exit.text. For drivers registered via
++ * platform_driver_probe() this is ok because they cannot get unbound at
++ * runtime. So mark the driver struct with __refdata to prevent modpost
++ * triggering a section mismatch warning.
++ */
++static struct platform_driver omap_cf_driver __refdata = {
+ .driver = {
+ .name = driver_name,
+ },
+diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb3.c b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
+index bbfad209c890fc..2c8b1b7dda5bdc 100644
+--- a/drivers/phy/broadcom/phy-bcm-ns-usb3.c
++++ b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
+@@ -16,10 +16,11 @@
+ #include <linux/iopoll.h>
+ #include <linux/mdio.h>
+ #include <linux/module.h>
++#include <linux/of.h>
+ #include <linux/of_address.h>
+-#include <linux/of_platform.h>
+ #include <linux/platform_device.h>
+ #include <linux/phy/phy.h>
++#include <linux/property.h>
+ #include <linux/slab.h>
+
+ #define BCM_NS_USB3_PHY_BASE_ADDR_REG 0x1f
+@@ -189,7 +190,6 @@ static int bcm_ns_usb3_mdio_phy_write(struct bcm_ns_usb3 *usb3, u16 reg,
+ static int bcm_ns_usb3_mdio_probe(struct mdio_device *mdiodev)
+ {
+ struct device *dev = &mdiodev->dev;
+- const struct of_device_id *of_id;
+ struct phy_provider *phy_provider;
+ struct device_node *syscon_np;
+ struct bcm_ns_usb3 *usb3;
+@@ -203,10 +203,7 @@ static int bcm_ns_usb3_mdio_probe(struct mdio_device *mdiodev)
+ usb3->dev = dev;
+ usb3->mdiodev = mdiodev;
+
+- of_id = of_match_device(bcm_ns_usb3_id_table, dev);
+- if (!of_id)
+- return -EINVAL;
+- usb3->family = (enum bcm_ns_family)of_id->data;
++ usb3->family = (enum bcm_ns_family)device_get_match_data(dev);
+
+ syscon_np = of_parse_phandle(dev->of_node, "usb3-dmp-syscon", 0);
+ err = of_address_to_resource(syscon_np, 0, &res);
+diff --git a/drivers/phy/marvell/phy-berlin-usb.c b/drivers/phy/marvell/phy-berlin-usb.c
+index 78ef6ae72a9a74..f26bf630da2c9f 100644
+--- a/drivers/phy/marvell/phy-berlin-usb.c
++++ b/drivers/phy/marvell/phy-berlin-usb.c
+@@ -8,9 +8,10 @@
+
+ #include <linux/io.h>
+ #include <linux/module.h>
+-#include <linux/of_device.h>
++#include <linux/of.h>
+ #include <linux/phy/phy.h>
+ #include <linux/platform_device.h>
++#include <linux/property.h>
+ #include <linux/reset.h>
+
+ #define USB_PHY_PLL 0x04
+@@ -162,8 +163,6 @@ MODULE_DEVICE_TABLE(of, phy_berlin_usb_of_match);
+
+ static int phy_berlin_usb_probe(struct platform_device *pdev)
+ {
+- const struct of_device_id *match =
+- of_match_device(phy_berlin_usb_of_match, &pdev->dev);
+ struct phy_berlin_usb_priv *priv;
+ struct phy *phy;
+ struct phy_provider *phy_provider;
+@@ -180,7 +179,7 @@ static int phy_berlin_usb_probe(struct platform_device *pdev)
+ if (IS_ERR(priv->rst_ctrl))
+ return PTR_ERR(priv->rst_ctrl);
+
+- priv->pll_divider = *((u32 *)match->data);
++ priv->pll_divider = *((u32 *)device_get_match_data(&pdev->dev));
+
+ phy = devm_phy_create(&pdev->dev, NULL, &phy_berlin_usb_ops);
+ if (IS_ERR(phy)) {
+diff --git a/drivers/phy/ralink/phy-ralink-usb.c b/drivers/phy/ralink/phy-ralink-usb.c
+index 2bd8ad2e76eda0..41bce5290e9220 100644
+--- a/drivers/phy/ralink/phy-ralink-usb.c
++++ b/drivers/phy/ralink/phy-ralink-usb.c
+@@ -13,9 +13,10 @@
+ #include <linux/mfd/syscon.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+-#include <linux/of_platform.h>
++#include <linux/of.h>
+ #include <linux/phy/phy.h>
+ #include <linux/platform_device.h>
++#include <linux/platform_device.h>
+ #include <linux/regmap.h>
+ #include <linux/reset.h>
+
+@@ -171,18 +172,13 @@ static int ralink_usb_phy_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+- const struct of_device_id *match;
+ struct ralink_usb_phy *phy;
+
+- match = of_match_device(ralink_usb_phy_of_match, &pdev->dev);
+- if (!match)
+- return -ENODEV;
+-
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+- phy->clk = (uintptr_t)match->data;
++ phy->clk = (uintptr_t)device_get_match_data(&pdev->dev);
+ phy->base = NULL;
+
+ phy->sysctl = syscon_regmap_lookup_by_phandle(dev->of_node, "ralink,sysctl");
+diff --git a/drivers/phy/rockchip/phy-rockchip-pcie.c b/drivers/phy/rockchip/phy-rockchip-pcie.c
+index 75216091d90121..c6b4c0b5a6beac 100644
+--- a/drivers/phy/rockchip/phy-rockchip-pcie.c
++++ b/drivers/phy/rockchip/phy-rockchip-pcie.c
+@@ -12,10 +12,9 @@
+ #include <linux/mfd/syscon.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+-#include <linux/of_address.h>
+-#include <linux/of_platform.h>
+ #include <linux/phy/phy.h>
+ #include <linux/platform_device.h>
++#include <linux/property.h>
+ #include <linux/regmap.h>
+ #include <linux/reset.h>
+
+@@ -63,7 +62,7 @@ struct rockchip_pcie_data {
+ };
+
+ struct rockchip_pcie_phy {
+- struct rockchip_pcie_data *phy_data;
++ const struct rockchip_pcie_data *phy_data;
+ struct regmap *reg_base;
+ struct phy_pcie_instance {
+ struct phy *phy;
+@@ -365,7 +364,6 @@ static int rockchip_pcie_phy_probe(struct platform_device *pdev)
+ struct rockchip_pcie_phy *rk_phy;
+ struct phy_provider *phy_provider;
+ struct regmap *grf;
+- const struct of_device_id *of_id;
+ int i;
+ u32 phy_num;
+
+@@ -379,11 +377,10 @@ static int rockchip_pcie_phy_probe(struct platform_device *pdev)
+ if (!rk_phy)
+ return -ENOMEM;
+
+- of_id = of_match_device(rockchip_pcie_phy_dt_ids, &pdev->dev);
+- if (!of_id)
++ rk_phy->phy_data = device_get_match_data(&pdev->dev);
++ if (!rk_phy->phy_data)
+ return -EINVAL;
+
+- rk_phy->phy_data = (struct rockchip_pcie_data *)of_id->data;
+ rk_phy->reg_base = grf;
+
+ mutex_init(&rk_phy->pcie_mutex);
+diff --git a/drivers/phy/rockchip/phy-rockchip-usb.c b/drivers/phy/rockchip/phy-rockchip-usb.c
+index 8454285977ebc1..666a896c8f0a08 100644
+--- a/drivers/phy/rockchip/phy-rockchip-usb.c
++++ b/drivers/phy/rockchip/phy-rockchip-usb.c
+@@ -13,10 +13,9 @@
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <linux/of.h>
+-#include <linux/of_address.h>
+-#include <linux/of_platform.h>
+ #include <linux/phy/phy.h>
+ #include <linux/platform_device.h>
++#include <linux/property.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/reset.h>
+ #include <linux/regmap.h>
+@@ -458,7 +457,6 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
+ struct device *dev = &pdev->dev;
+ struct rockchip_usb_phy_base *phy_base;
+ struct phy_provider *phy_provider;
+- const struct of_device_id *match;
+ struct device_node *child;
+ int err;
+
+@@ -466,14 +464,12 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
+ if (!phy_base)
+ return -ENOMEM;
+
+- match = of_match_device(dev->driver->of_match_table, dev);
+- if (!match || !match->data) {
++ phy_base->pdata = device_get_match_data(dev);
++ if (!phy_base->pdata) {
+ dev_err(dev, "missing phy data\n");
+ return -EINVAL;
+ }
+
+- phy_base->pdata = match->data;
+-
+ phy_base->dev = dev;
+ phy_base->reg_base = ERR_PTR(-ENODEV);
+ if (dev->parent && dev->parent->of_node)
+diff --git a/drivers/phy/ti/phy-omap-control.c b/drivers/phy/ti/phy-omap-control.c
+index 76c5595f0859cb..2fdb8f4241c742 100644
+--- a/drivers/phy/ti/phy-omap-control.c
++++ b/drivers/phy/ti/phy-omap-control.c
+@@ -8,9 +8,9 @@
+
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
++#include <linux/property.h>
+ #include <linux/slab.h>
+ #include <linux/of.h>
+-#include <linux/of_device.h>
+ #include <linux/err.h>
+ #include <linux/io.h>
+ #include <linux/clk.h>
+@@ -268,20 +268,15 @@ MODULE_DEVICE_TABLE(of, omap_control_phy_id_table);
+
+ static int omap_control_phy_probe(struct platform_device *pdev)
+ {
+- const struct of_device_id *of_id;
+ struct omap_control_phy *control_phy;
+
+- of_id = of_match_device(omap_control_phy_id_table, &pdev->dev);
+- if (!of_id)
+- return -EINVAL;
+-
+ control_phy = devm_kzalloc(&pdev->dev, sizeof(*control_phy),
+ GFP_KERNEL);
+ if (!control_phy)
+ return -ENOMEM;
+
+ control_phy->dev = &pdev->dev;
+- control_phy->type = *(enum omap_control_phy_type *)of_id->data;
++ control_phy->type = *(enum omap_control_phy_type *)device_get_match_data(&pdev->dev);
+
+ if (control_phy->type == OMAP_CTRL_TYPE_OTGHS) {
+ control_phy->otghs_control =
+diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
+index 63c45809943ff2..913a6673f459f4 100644
+--- a/drivers/phy/ti/phy-omap-usb2.c
++++ b/drivers/phy/ti/phy-omap-usb2.c
+@@ -19,6 +19,7 @@
+ #include <linux/phy/phy.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm_runtime.h>
++#include <linux/property.h>
+ #include <linux/regmap.h>
+ #include <linux/slab.h>
+ #include <linux/sys_soc.h>
+@@ -362,6 +363,13 @@ static void omap_usb2_init_errata(struct omap_usb *phy)
+ phy->flags |= OMAP_USB2_DISABLE_CHRG_DET;
+ }
+
++static void omap_usb2_put_device(void *_dev)
++{
++ struct device *dev = _dev;
++
++ put_device(dev);
++}
++
+ static int omap_usb2_probe(struct platform_device *pdev)
+ {
+ struct omap_usb *phy;
+@@ -371,16 +379,13 @@ static int omap_usb2_probe(struct platform_device *pdev)
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *control_node;
+ struct platform_device *control_pdev;
+- const struct of_device_id *of_id;
+- struct usb_phy_data *phy_data;
+-
+- of_id = of_match_device(omap_usb2_id_table, &pdev->dev);
++ const struct usb_phy_data *phy_data;
++ int ret;
+
+- if (!of_id)
++ phy_data = device_get_match_data(&pdev->dev);
++ if (!phy_data)
+ return -EINVAL;
+
+- phy_data = (struct usb_phy_data *)of_id->data;
+-
+ phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+@@ -426,6 +431,11 @@ static int omap_usb2_probe(struct platform_device *pdev)
+ return -EINVAL;
+ }
+ phy->control_dev = &control_pdev->dev;
++
++ ret = devm_add_action_or_reset(&pdev->dev, omap_usb2_put_device,
++ phy->control_dev);
++ if (ret)
++ return ret;
+ } else {
+ if (of_property_read_u32_index(node,
+ "syscon-phy-power", 1,
+diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
+index df482485159fcc..57e5bcfb6910fb 100644
+--- a/drivers/phy/ti/phy-ti-pipe3.c
++++ b/drivers/phy/ti/phy-ti-pipe3.c
+@@ -8,6 +8,7 @@
+
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
++#include <linux/property.h>
+ #include <linux/slab.h>
+ #include <linux/phy/phy.h>
+ #include <linux/of.h>
+@@ -791,23 +792,16 @@ static int ti_pipe3_probe(struct platform_device *pdev)
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ int ret;
+- const struct of_device_id *match;
+- struct pipe3_data *data;
++ const struct pipe3_data *data;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+- match = of_match_device(ti_pipe3_id_table, dev);
+- if (!match)
++ data = device_get_match_data(dev);
++ if (!data)
+ return -EINVAL;
+
+- data = (struct pipe3_data *)match->data;
+- if (!data) {
+- dev_err(dev, "no driver data\n");
+- return -EINVAL;
+- }
+-
+ phy->dev = dev;
+ phy->mode = data->mode;
+ phy->dpll_map = data->dpll_map;
+diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
+index b3dd64ab8d32cc..5a11424ae774ef 100644
+--- a/drivers/power/supply/bq27xxx_battery.c
++++ b/drivers/power/supply/bq27xxx_battery.c
+@@ -1872,8 +1872,8 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di)
+ bool has_singe_flag = di->opts & BQ27XXX_O_ZERO;
+
+ cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
+- if ((cache.flags & 0xff) == 0xff)
+- cache.flags = -1; /* read error */
++ if (di->chip == BQ27000 && (cache.flags & 0xff) == 0xff)
++ cache.flags = -ENODEV; /* bq27000 hdq read error */
+ if (cache.flags >= 0) {
+ cache.temperature = bq27xxx_battery_read_temperature(di);
+ if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR)
+diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
+index 4346a316c77ab0..1963d9a45b6731 100644
+--- a/drivers/usb/host/xhci-dbgcap.c
++++ b/drivers/usb/host/xhci-dbgcap.c
+@@ -86,13 +86,34 @@ static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
+ return string_length;
+ }
+
++static void xhci_dbc_init_ep_contexts(struct xhci_dbc *dbc)
++{
++ struct xhci_ep_ctx *ep_ctx;
++ unsigned int max_burst;
++ dma_addr_t deq;
++
++ max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
++
++ /* Populate bulk out endpoint context: */
++ ep_ctx = dbc_bulkout_ctx(dbc);
++ deq = dbc_bulkout_enq(dbc);
++ ep_ctx->ep_info = 0;
++ ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
++ ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
++
++ /* Populate bulk in endpoint context: */
++ ep_ctx = dbc_bulkin_ctx(dbc);
++ deq = dbc_bulkin_enq(dbc);
++ ep_ctx->ep_info = 0;
++ ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
++ ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
++}
++
+ static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
+ {
+ struct dbc_info_context *info;
+- struct xhci_ep_ctx *ep_ctx;
+ u32 dev_info;
+- dma_addr_t deq, dma;
+- unsigned int max_burst;
++ dma_addr_t dma;
+
+ if (!dbc)
+ return;
+@@ -106,20 +127,8 @@ static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
+ info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
+ info->length = cpu_to_le32(string_length);
+
+- /* Populate bulk out endpoint context: */
+- ep_ctx = dbc_bulkout_ctx(dbc);
+- max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
+- deq = dbc_bulkout_enq(dbc);
+- ep_ctx->ep_info = 0;
+- ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
+- ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
+-
+- /* Populate bulk in endpoint context: */
+- ep_ctx = dbc_bulkin_ctx(dbc);
+- deq = dbc_bulkin_enq(dbc);
+- ep_ctx->ep_info = 0;
+- ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
+- ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
++ /* Populate bulk in and out endpoint contexts: */
++ xhci_dbc_init_ep_contexts(dbc);
+
+ /* Set DbC context and info registers: */
+ lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
+@@ -421,6 +430,42 @@ dbc_alloc_ctx(struct device *dev, gfp_t flags)
+ return ctx;
+ }
+
++static void xhci_dbc_ring_init(struct xhci_ring *ring)
++{
++ struct xhci_segment *seg = ring->first_seg;
++
++ /* clear all trbs on ring in case of old ring */
++ memset(seg->trbs, 0, TRB_SEGMENT_SIZE);
++
++ /* Only event ring does not use link TRB */
++ if (ring->type != TYPE_EVENT) {
++ union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
++
++ trb->link.segment_ptr = cpu_to_le64(ring->first_seg->dma);
++ trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
++ }
++ xhci_initialize_ring_info(ring, 1);
++}
++
++static int xhci_dbc_reinit_ep_rings(struct xhci_dbc *dbc)
++{
++ struct xhci_ring *in_ring = dbc->eps[BULK_IN].ring;
++ struct xhci_ring *out_ring = dbc->eps[BULK_OUT].ring;
++
++ if (!in_ring || !out_ring || !dbc->ctx) {
++ dev_warn(dbc->dev, "Can't re-init unallocated endpoints\n");
++ return -ENODEV;
++ }
++
++ xhci_dbc_ring_init(in_ring);
++ xhci_dbc_ring_init(out_ring);
++
++ /* set ep context enqueue, dequeue, and cycle to initial values */
++ xhci_dbc_init_ep_contexts(dbc);
++
++ return 0;
++}
++
+ static struct xhci_ring *
+ xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
+ {
+@@ -449,15 +494,10 @@ xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
+
+ seg->dma = dma;
+
+- /* Only event ring does not use link TRB */
+- if (type != TYPE_EVENT) {
+- union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
+-
+- trb->link.segment_ptr = cpu_to_le64(dma);
+- trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
+- }
+ INIT_LIST_HEAD(&ring->td_list);
+- xhci_initialize_ring_info(ring, 1);
++
++ xhci_dbc_ring_init(ring);
++
+ return ring;
+ dma_fail:
+ kfree(seg);
+@@ -850,7 +890,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
+ dev_info(dbc->dev, "DbC cable unplugged\n");
+ dbc->state = DS_ENABLED;
+ xhci_dbc_flush_requests(dbc);
+-
++ xhci_dbc_reinit_ep_rings(dbc);
+ return EVT_DISC;
+ }
+
+@@ -860,7 +900,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
+ writel(portsc, &dbc->regs->portsc);
+ dbc->state = DS_ENABLED;
+ xhci_dbc_flush_requests(dbc);
+-
++ xhci_dbc_reinit_ep_rings(dbc);
+ return EVT_DISC;
+ }
+
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index bf3822b25c58f6..d3e5429ee03d2e 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -1612,10 +1612,10 @@ static int check_inode_ref(struct extent_buffer *leaf,
+ while (ptr < end) {
+ u16 namelen;
+
+- if (unlikely(ptr + sizeof(iref) > end)) {
++ if (unlikely(ptr + sizeof(*iref) > end)) {
+ inode_ref_err(leaf, slot,
+ "inode ref overflow, ptr %lu end %lu inode_ref_size %zu",
+- ptr, end, sizeof(iref));
++ ptr, end, sizeof(*iref));
+ return -EUCLEAN;
+ }
+
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 6e8e90bce04678..e4cc287eee993b 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -1934,7 +1934,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
+
+ search_key.objectid = log_key.objectid;
+ search_key.type = BTRFS_INODE_EXTREF_KEY;
+- search_key.offset = key->objectid;
++ search_key.offset = btrfs_extref_hash(key->objectid, name.name, name.len);
+ ret = backref_in_log(root->log_root, &search_key, key->objectid, &name);
+ if (ret < 0) {
+ goto out;
+diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
+index 905c7eadf9676d..59fda8ce0790bc 100644
+--- a/fs/nilfs2/sysfs.c
++++ b/fs/nilfs2/sysfs.c
+@@ -1075,7 +1075,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs)
+ ************************************************************************/
+
+ static ssize_t nilfs_feature_revision_show(struct kobject *kobj,
+- struct attribute *attr, char *buf)
++ struct kobj_attribute *attr, char *buf)
+ {
+ return sysfs_emit(buf, "%d.%d\n",
+ NILFS_CURRENT_REV, NILFS_MINOR_REV);
+@@ -1087,7 +1087,7 @@ static const char features_readme_str[] =
+ "(1) revision\n\tshow current revision of NILFS file system driver.\n";
+
+ static ssize_t nilfs_feature_README_show(struct kobject *kobj,
+- struct attribute *attr,
++ struct kobj_attribute *attr,
+ char *buf)
+ {
+ return sysfs_emit(buf, features_readme_str);
+diff --git a/fs/nilfs2/sysfs.h b/fs/nilfs2/sysfs.h
+index 78a87a016928b7..d370cd5cce3f5d 100644
+--- a/fs/nilfs2/sysfs.h
++++ b/fs/nilfs2/sysfs.h
+@@ -50,16 +50,16 @@ struct nilfs_sysfs_dev_subgroups {
+ struct completion sg_segments_kobj_unregister;
+ };
+
+-#define NILFS_COMMON_ATTR_STRUCT(name) \
++#define NILFS_KOBJ_ATTR_STRUCT(name) \
+ struct nilfs_##name##_attr { \
+ struct attribute attr; \
+- ssize_t (*show)(struct kobject *, struct attribute *, \
++ ssize_t (*show)(struct kobject *, struct kobj_attribute *, \
+ char *); \
+- ssize_t (*store)(struct kobject *, struct attribute *, \
++ ssize_t (*store)(struct kobject *, struct kobj_attribute *, \
+ const char *, size_t); \
+ }
+
+-NILFS_COMMON_ATTR_STRUCT(feature);
++NILFS_KOBJ_ATTR_STRUCT(feature);
+
+ #define NILFS_DEV_ATTR_STRUCT(name) \
+ struct nilfs_##name##_attr { \
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index d47eae133a202e..b648bb30401d57 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -1084,8 +1084,10 @@ static int smbd_negotiate(struct smbd_connection *info)
+ log_rdma_event(INFO, "smbd_post_recv rc=%d iov.addr=0x%llx iov.length=%u iov.lkey=0x%x\n",
+ rc, response->sge.addr,
+ response->sge.length, response->sge.lkey);
+- if (rc)
++ if (rc) {
++ put_receive_buffer(info, response);
+ return rc;
++ }
+
+ init_completion(&info->negotiate_completion);
+ info->negotiate_done = false;
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+index 3006d76d805907..323b8a401a8c00 100644
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -548,7 +548,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ case SMB_DIRECT_MSG_DATA_TRANSFER: {
+ struct smb_direct_data_transfer *data_transfer =
+ (struct smb_direct_data_transfer *)recvmsg->packet;
+- unsigned int data_length;
++ u32 remaining_data_length, data_offset, data_length;
+ int avail_recvmsg_count, receive_credits;
+
+ if (wc->byte_len <
+@@ -558,15 +558,25 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ return;
+ }
+
++ remaining_data_length = le32_to_cpu(data_transfer->remaining_data_length);
+ data_length = le32_to_cpu(data_transfer->data_length);
+- if (data_length) {
+- if (wc->byte_len < sizeof(struct smb_direct_data_transfer) +
+- (u64)data_length) {
+- put_recvmsg(t, recvmsg);
+- smb_direct_disconnect_rdma_connection(t);
+- return;
+- }
++ data_offset = le32_to_cpu(data_transfer->data_offset);
++ if (wc->byte_len < data_offset ||
++ wc->byte_len < (u64)data_offset + data_length) {
++ put_recvmsg(t, recvmsg);
++ smb_direct_disconnect_rdma_connection(t);
++ return;
++ }
++ if (remaining_data_length > t->max_fragmented_recv_size ||
++ data_length > t->max_fragmented_recv_size ||
++ (u64)remaining_data_length + (u64)data_length >
++ (u64)t->max_fragmented_recv_size) {
++ put_recvmsg(t, recvmsg);
++ smb_direct_disconnect_rdma_connection(t);
++ return;
++ }
+
++ if (data_length) {
+ if (t->full_packet_received)
+ recvmsg->first_segment = true;
+
+diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
+index a406e281ae571e..1424200fe88cfb 100644
+--- a/include/crypto/if_alg.h
++++ b/include/crypto/if_alg.h
+@@ -136,6 +136,7 @@ struct af_alg_async_req {
+ * SG?
+ * @enc: Cryptographic operation to be performed when
+ * recvmsg is invoked.
++ * @write: True if we are in the middle of a write.
+ * @init: True if metadata has been sent.
+ * @len: Length of memory allocated for this data structure.
+ * @inflight: Non-zero when AIO requests are in flight.
+@@ -151,10 +152,11 @@ struct af_alg_ctx {
+ size_t used;
+ atomic_t rcvused;
+
+- bool more;
+- bool merge;
+- bool enc;
+- bool init;
++ u32 more:1,
++ merge:1,
++ enc:1,
++ write:1,
++ init:1;
+
+ unsigned int len;
+
+diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h
+index dfe19bf13f4c8c..dacc7af2ea1e10 100644
+--- a/include/uapi/linux/mptcp.h
++++ b/include/uapi/linux/mptcp.h
+@@ -81,6 +81,8 @@ enum {
+
+ #define MPTCP_PM_ADDR_ATTR_MAX (__MPTCP_PM_ADDR_ATTR_MAX - 1)
+
++#define MPTCP_PM_EV_FLAG_DENY_JOIN_ID0 _BITUL(0)
++
+ #define MPTCP_PM_ADDR_FLAG_SIGNAL (1 << 0)
+ #define MPTCP_PM_ADDR_FLAG_SUBFLOW (1 << 1)
+ #define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2)
+@@ -127,13 +129,13 @@ struct mptcp_info {
+
+ /*
+ * MPTCP_EVENT_CREATED: token, family, saddr4 | saddr6, daddr4 | daddr6,
+- * sport, dport
++ * sport, dport, server-side, [flags]
+ * A new MPTCP connection has been created. It is the good time to allocate
+ * memory and send ADD_ADDR if needed. Depending on the traffic-patterns
+ * it can take a long time until the MPTCP_EVENT_ESTABLISHED is sent.
+ *
+ * MPTCP_EVENT_ESTABLISHED: token, family, saddr4 | saddr6, daddr4 | daddr6,
+- * sport, dport
++ * sport, dport, server-side, [flags]
+ * A MPTCP connection is established (can start new subflows).
+ *
+ * MPTCP_EVENT_CLOSED: token
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 29adfc6d6ec24a..2aae0de6169ce2 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -1248,9 +1248,10 @@ static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
+
+ void io_req_task_submit(struct io_kiocb *req, bool *locked)
+ {
+- io_tw_lock(req->ctx, locked);
+- /* req->task == current here, checking PF_EXITING is safe */
+- if (likely(!(req->task->flags & PF_EXITING)))
++ struct io_ring_ctx *ctx = req->ctx;
++
++ io_tw_lock(ctx, locked);
++ if (likely(!io_should_terminate_tw(ctx)))
+ io_queue_sqe(req);
+ else
+ io_req_complete_failed(req, -EFAULT);
+@@ -1772,8 +1773,10 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
+
+ int io_poll_issue(struct io_kiocb *req, bool *locked)
+ {
+- io_tw_lock(req->ctx, locked);
+- if (unlikely(req->task->flags & PF_EXITING))
++ struct io_ring_ctx *ctx = req->ctx;
++
++ io_tw_lock(ctx, locked);
++ if (unlikely(io_should_terminate_tw(ctx)))
+ return -EFAULT;
+ return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT);
+ }
+diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
+index 886921d2d58deb..194e3230f853d5 100644
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -395,6 +395,19 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
+ ctx->submitter_task == current);
+ }
+
++/*
++ * Terminate the request if either of these conditions are true:
++ *
++ * 1) It's being executed by the original task, but that task is marked
++ * with PF_EXITING as it's exiting.
++ * 2) PF_KTHREAD is set, in which case the invoker of the task_work is
++ * our fallback task_work.
++ */
++static inline bool io_should_terminate_tw(struct io_ring_ctx *ctx)
++{
++ return (current->flags & (PF_KTHREAD | PF_EXITING)) || percpu_ref_is_dying(&ctx->refs);
++}
++
+ static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
+ {
+ io_req_set_res(req, res, 0);
+diff --git a/io_uring/poll.c b/io_uring/poll.c
+index ab27a627fd4c62..e9f83d3fc835f1 100644
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -241,8 +241,7 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
+ struct io_ring_ctx *ctx = req->ctx;
+ int v;
+
+- /* req->task == current here, checking PF_EXITING is safe */
+- if (unlikely(req->task->flags & PF_EXITING))
++ if (unlikely(io_should_terminate_tw(ctx)))
+ return -ECANCELED;
+
+ do {
+diff --git a/io_uring/timeout.c b/io_uring/timeout.c
+index 7cdc234c5f53fb..0bfd111e9164c0 100644
+--- a/io_uring/timeout.c
++++ b/io_uring/timeout.c
+@@ -275,7 +275,7 @@ static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
+ int ret = -ENOENT;
+
+ if (prev) {
+- if (!(req->task->flags & PF_EXITING)) {
++ if (!io_should_terminate_tw(req->ctx)) {
+ struct io_cancel_data cd = {
+ .ctx = req->ctx,
+ .data = prev->cqe.user_data,
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 7997c8021b62ff..9742574ec62fdc 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -123,8 +123,31 @@ DEFINE_PERCPU_RWSEM(cgroup_threadgroup_rwsem);
+ * of concurrent destructions. Use a separate workqueue so that cgroup
+ * destruction work items don't end up filling up max_active of system_wq
+ * which may lead to deadlock.
++ *
++ * A cgroup destruction should enqueue work sequentially to:
++ * cgroup_offline_wq: use for css offline work
++ * cgroup_release_wq: use for css release work
++ * cgroup_free_wq: use for free work
++ *
++ * Rationale for using separate workqueues:
++ * The cgroup root free work may depend on completion of other css offline
++ * operations. If all tasks were enqueued to a single workqueue, this could
++ * create a deadlock scenario where:
++ * - Free work waits for other css offline work to complete.
++ * - But other css offline work is queued after free work in the same queue.
++ *
++ * Example deadlock scenario with single workqueue (cgroup_destroy_wq):
++ * 1. umount net_prio
++ * 2. net_prio root destruction enqueues work to cgroup_destroy_wq (CPUx)
++ * 3. perf_event CSS A offline enqueues work to same cgroup_destroy_wq (CPUx)
++ * 4. net_prio cgroup_destroy_root->cgroup_lock_and_drain_offline.
++ * 5. net_prio root destruction blocks waiting for perf_event CSS A offline,
++ * which can never complete as it's behind in the same queue and
++ * workqueue's max_active is 1.
+ */
+-static struct workqueue_struct *cgroup_destroy_wq;
++static struct workqueue_struct *cgroup_offline_wq;
++static struct workqueue_struct *cgroup_release_wq;
++static struct workqueue_struct *cgroup_free_wq;
+
+ /* generate an array of cgroup subsystem pointers */
+ #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
+@@ -5444,7 +5467,7 @@ static void css_release_work_fn(struct work_struct *work)
+ cgroup_unlock();
+
+ INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
+- queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
++ queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
+ }
+
+ static void css_release(struct percpu_ref *ref)
+@@ -5453,7 +5476,7 @@ static void css_release(struct percpu_ref *ref)
+ container_of(ref, struct cgroup_subsys_state, refcnt);
+
+ INIT_WORK(&css->destroy_work, css_release_work_fn);
+- queue_work(cgroup_destroy_wq, &css->destroy_work);
++ queue_work(cgroup_release_wq, &css->destroy_work);
+ }
+
+ static void init_and_link_css(struct cgroup_subsys_state *css,
+@@ -5575,7 +5598,7 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
+ err_free_css:
+ list_del_rcu(&css->rstat_css_node);
+ INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
+- queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
++ queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
+ return ERR_PTR(err);
+ }
+
+@@ -5811,7 +5834,7 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
+
+ if (atomic_dec_and_test(&css->online_cnt)) {
+ INIT_WORK(&css->destroy_work, css_killed_work_fn);
+- queue_work(cgroup_destroy_wq, &css->destroy_work);
++ queue_work(cgroup_offline_wq, &css->destroy_work);
+ }
+ }
+
+@@ -6183,8 +6206,14 @@ static int __init cgroup_wq_init(void)
+ * We would prefer to do this in cgroup_init() above, but that
+ * is called before init_workqueues(): so leave this until after.
+ */
+- cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
+- BUG_ON(!cgroup_destroy_wq);
++ cgroup_offline_wq = alloc_workqueue("cgroup_offline", 0, 1);
++ BUG_ON(!cgroup_offline_wq);
++
++ cgroup_release_wq = alloc_workqueue("cgroup_release", 0, 1);
++ BUG_ON(!cgroup_release_wq);
++
++ cgroup_free_wq = alloc_workqueue("cgroup_free", 0, 1);
++ BUG_ON(!cgroup_free_wq);
+ return 0;
+ }
+ core_initcall(cgroup_wq_init);
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index b64d53590f25ca..d94daa296d59d1 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3130,6 +3130,7 @@ int tcp_disconnect(struct sock *sk, int flags)
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ int old_state = sk->sk_state;
++ struct request_sock *req;
+ u32 seq;
+
+ if (old_state != TCP_CLOSE)
+@@ -3239,6 +3240,10 @@ int tcp_disconnect(struct sock *sk, int flags)
+
+
+ /* Clean up fastopen related fields */
++ req = rcu_dereference_protected(tp->fastopen_rsk,
++ lockdep_sock_is_held(sk));
++ if (req)
++ reqsk_fastopen_remove(sk, req, false);
+ tcp_free_fastopen_req(tp);
+ inet->defer_connect = 0;
+ tp->fastopen_client_fail = 0;
+diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
+index e685c12757f4bf..1f961944ecc98b 100644
+--- a/net/mac80211/driver-ops.h
++++ b/net/mac80211/driver-ops.h
+@@ -1216,7 +1216,7 @@ drv_get_ftm_responder_stats(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_ftm_responder_stats *ftm_stats)
+ {
+- u32 ret = -EOPNOTSUPP;
++ int ret = -EOPNOTSUPP;
+
+ if (local->ops->get_ftm_responder_stats)
+ ret = local->ops->get_ftm_responder_stats(&local->hw,
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 683301d9f50846..7831e412c7b9de 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -945,7 +945,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ int result, i;
+ enum nl80211_band band;
+ int channels, max_bitrates;
+- bool supp_ht, supp_vht, supp_he, supp_eht;
++ bool supp_ht, supp_vht, supp_he, supp_eht, supp_s1g;
+ struct cfg80211_chan_def dflt_chandef = {};
+
+ if (ieee80211_hw_check(hw, QUEUE_CONTROL) &&
+@@ -1061,6 +1061,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ supp_vht = false;
+ supp_he = false;
+ supp_eht = false;
++ supp_s1g = false;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ struct ieee80211_supported_band *sband;
+
+@@ -1097,6 +1098,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ max_bitrates = sband->n_bitrates;
+ supp_ht = supp_ht || sband->ht_cap.ht_supported;
+ supp_vht = supp_vht || sband->vht_cap.vht_supported;
++ supp_s1g = supp_s1g || sband->s1g_cap.s1g;
+
+ for (i = 0; i < sband->n_iftype_data; i++) {
+ const struct ieee80211_sband_iftype_data *iftd;
+@@ -1219,6 +1221,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ local->scan_ies_len +=
+ 2 + sizeof(struct ieee80211_vht_cap);
+
++ if (supp_s1g)
++ local->scan_ies_len += 2 + sizeof(struct ieee80211_s1g_cap);
++
+ /*
+ * HE cap element is variable in size - set len to allow max size */
+ if (supp_he) {
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index cf9244a3644f10..7e72862a6b54dc 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -2242,6 +2242,7 @@ static int mptcp_event_created(struct sk_buff *skb,
+ const struct sock *ssk)
+ {
+ int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token);
++ u16 flags = 0;
+
+ if (err)
+ return err;
+@@ -2249,6 +2250,12 @@ static int mptcp_event_created(struct sk_buff *skb,
+ if (nla_put_u8(skb, MPTCP_ATTR_SERVER_SIDE, READ_ONCE(msk->pm.server_side)))
+ return -EMSGSIZE;
+
++ if (READ_ONCE(msk->pm.remote_deny_join_id0))
++ flags |= MPTCP_PM_EV_FLAG_DENY_JOIN_ID0;
++
++ if (flags && nla_put_u16(skb, MPTCP_ATTR_FLAGS, flags))
++ return -EMSGSIZE;
++
+ return mptcp_event_add_subflow(skb, ssk);
+ }
+
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 883efcbb8dfc3c..ea715a12824258 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -425,6 +425,19 @@ static void mptcp_close_wake_up(struct sock *sk)
+ sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+ }
+
++static void mptcp_shutdown_subflows(struct mptcp_sock *msk)
++{
++ struct mptcp_subflow_context *subflow;
++
++ mptcp_for_each_subflow(msk, subflow) {
++ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
++ bool slow;
++
++ slow = lock_sock_fast(ssk);
++ tcp_shutdown(ssk, SEND_SHUTDOWN);
++ unlock_sock_fast(ssk, slow);
++ }
++}
+ static bool mptcp_pending_data_fin_ack(struct sock *sk)
+ {
+ struct mptcp_sock *msk = mptcp_sk(sk);
+@@ -448,6 +461,7 @@ static void mptcp_check_data_fin_ack(struct sock *sk)
+ break;
+ case TCP_CLOSING:
+ case TCP_LAST_ACK:
++ mptcp_shutdown_subflows(msk);
+ inet_sk_state_store(sk, TCP_CLOSE);
+ break;
+ }
+@@ -615,6 +629,7 @@ static bool mptcp_check_data_fin(struct sock *sk)
+ inet_sk_state_store(sk, TCP_CLOSING);
+ break;
+ case TCP_FIN_WAIT2:
++ mptcp_shutdown_subflows(msk);
+ inet_sk_state_store(sk, TCP_CLOSE);
+ break;
+ default:
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index cff23281069282..2ff72b7940fe95 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -773,6 +773,10 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ goto fallback;
+
+ owner = mptcp_sk(ctx->conn);
++
++ if (mp_opt.deny_join_id0)
++ WRITE_ONCE(owner->pm.remote_deny_join_id0, true);
++
+ mptcp_pm_new_connection(owner, child, 1);
+
+ /* with OoO packets we can reach here without ingress
+diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
+index 28c1b00221780f..bd861191157b54 100644
+--- a/net/rds/ib_frmr.c
++++ b/net/rds/ib_frmr.c
+@@ -133,12 +133,15 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
+
+ ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len,
+ &off, PAGE_SIZE);
+- if (unlikely(ret != ibmr->sg_dma_len))
+- return ret < 0 ? ret : -EINVAL;
++ if (unlikely(ret != ibmr->sg_dma_len)) {
++ ret = ret < 0 ? ret : -EINVAL;
++ goto out_inc;
++ }
+
+- if (cmpxchg(&frmr->fr_state,
+- FRMR_IS_FREE, FRMR_IS_INUSE) != FRMR_IS_FREE)
+- return -EBUSY;
++ if (cmpxchg(&frmr->fr_state, FRMR_IS_FREE, FRMR_IS_INUSE) != FRMR_IS_FREE) {
++ ret = -EBUSY;
++ goto out_inc;
++ }
+
+ atomic_inc(&ibmr->ic->i_fastreg_inuse_count);
+
+@@ -166,11 +169,10 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
+ /* Failure here can be because of -ENOMEM as well */
+ rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
+
+- atomic_inc(&ibmr->ic->i_fastreg_wrs);
+ if (printk_ratelimit())
+ pr_warn("RDS/IB: %s returned error(%d)\n",
+ __func__, ret);
+- goto out;
++ goto out_inc;
+ }
+
+ /* Wait for the registration to complete in order to prevent an invalid
+@@ -179,8 +181,10 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
+ */
+ wait_event(frmr->fr_reg_done, !frmr->fr_reg);
+
+-out:
++ return ret;
+
++out_inc:
++ atomic_inc(&ibmr->ic->i_fastreg_wrs);
+ return ret;
+ }
+
+diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
+index 2df5bf240b64a9..1a3560cdba3e92 100644
+--- a/net/rfkill/rfkill-gpio.c
++++ b/net/rfkill/rfkill-gpio.c
+@@ -78,16 +78,25 @@ static int rfkill_gpio_acpi_probe(struct device *dev,
+ static int rfkill_gpio_probe(struct platform_device *pdev)
+ {
+ struct rfkill_gpio_data *rfkill;
++ const char *type_name = NULL;
++ const char *name_property;
++ const char *type_property;
+ struct gpio_desc *gpio;
+- const char *type_name;
+ int ret;
+
+ rfkill = devm_kzalloc(&pdev->dev, sizeof(*rfkill), GFP_KERNEL);
+ if (!rfkill)
+ return -ENOMEM;
+
+- device_property_read_string(&pdev->dev, "name", &rfkill->name);
+- device_property_read_string(&pdev->dev, "type", &type_name);
++ if (dev_of_node(&pdev->dev)) {
++ name_property = "label";
++ type_property = "radio-type";
++ } else {
++ name_property = "name";
++ type_property = "type";
++ }
++ device_property_read_string(&pdev->dev, name_property, &rfkill->name);
++ device_property_read_string(&pdev->dev, type_property, &type_name);
+
+ if (!rfkill->name)
+ rfkill->name = dev_name(&pdev->dev);
+@@ -169,12 +178,19 @@ static const struct acpi_device_id rfkill_acpi_match[] = {
+ MODULE_DEVICE_TABLE(acpi, rfkill_acpi_match);
+ #endif
+
++static const struct of_device_id rfkill_of_match[] __maybe_unused = {
++ { .compatible = "rfkill-gpio", },
++ { },
++};
++MODULE_DEVICE_TABLE(of, rfkill_of_match);
++
+ static struct platform_driver rfkill_gpio_driver = {
+ .probe = rfkill_gpio_probe,
+ .remove = rfkill_gpio_remove,
+ .driver = {
+ .name = "rfkill_gpio",
+ .acpi_match_table = ACPI_PTR(rfkill_acpi_match),
++ .of_match_table = of_match_ptr(rfkill_of_match),
+ },
+ };
+
+diff --git a/net/tls/tls.h b/net/tls/tls.h
+index 4922668fefaa87..f25699517bdf8f 100644
+--- a/net/tls/tls.h
++++ b/net/tls/tls.h
+@@ -91,6 +91,7 @@ int tls_sk_query(struct sock *sk, int optname, char __user *optval,
+ int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
+ unsigned int optlen);
+ void tls_err_abort(struct sock *sk, int err);
++void tls_strp_abort_strp(struct tls_strparser *strp, int err);
+
+ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
+ void tls_update_rx_zc_capable(struct tls_context *tls_ctx);
+diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
+index b7ed76c0e576e9..532230bed13b00 100644
+--- a/net/tls/tls_strp.c
++++ b/net/tls/tls_strp.c
+@@ -12,7 +12,7 @@
+
+ static struct workqueue_struct *tls_strp_wq;
+
+-static void tls_strp_abort_strp(struct tls_strparser *strp, int err)
++void tls_strp_abort_strp(struct tls_strparser *strp, int err)
+ {
+ if (strp->stopped)
+ return;
+@@ -210,11 +210,17 @@ static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb,
+ struct sk_buff *in_skb, unsigned int offset,
+ size_t in_len)
+ {
++ unsigned int nfrag = skb->len / PAGE_SIZE;
+ size_t len, chunk;
+ skb_frag_t *frag;
+ int sz;
+
+- frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE];
++ if (unlikely(nfrag >= skb_shinfo(skb)->nr_frags)) {
++ DEBUG_NET_WARN_ON_ONCE(1);
++ return -EMSGSIZE;
++ }
++
++ frag = &skb_shinfo(skb)->frags[nfrag];
+
+ len = in_len;
+ /* First make sure we got the header */
+@@ -515,10 +521,8 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
+ tls_strp_load_anchor_with_queue(strp, inq);
+ if (!strp->stm.full_len) {
+ sz = tls_rx_msg_size(strp, strp->anchor);
+- if (sz < 0) {
+- tls_strp_abort_strp(strp, sz);
++ if (sz < 0)
+ return sz;
+- }
+
+ strp->stm.full_len = sz;
+
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 96e62e8f1dad23..fe6514e964ba3a 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -2435,8 +2435,7 @@ int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb)
+ return data_len + TLS_HEADER_SIZE;
+
+ read_failure:
+- tls_err_abort(strp->sk, ret);
+-
++ tls_strp_abort_strp(strp, ret);
+ return ret;
+ }
+
+diff --git a/sound/firewire/motu/motu-hwdep.c b/sound/firewire/motu/motu-hwdep.c
+index 88d1f4b56e4be4..a220ac0c8eb831 100644
+--- a/sound/firewire/motu/motu-hwdep.c
++++ b/sound/firewire/motu/motu-hwdep.c
+@@ -111,7 +111,7 @@ static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+ events = 0;
+ spin_unlock_irq(&motu->lock);
+
+- return events | EPOLLOUT;
++ return events;
+ }
+
+ static int hwdep_get_info(struct snd_motu *motu, void __user *arg)
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 503e7376558a06..fcc22aa9917483 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10005,6 +10005,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x8992, "HP EliteBook 845 G9", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x103c, 0x8994, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8995, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2),
++ SND_PCI_QUIRK(0x103c, 0x89a0, "HP Laptop 15-dw4xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
+ SND_PCI_QUIRK(0x103c, 0x89a4, "HP ProBook 440 G9", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x89a6, "HP ProBook 450 G9", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x89aa, "HP EliteBook 630 G9", ALC236_FIXUP_HP_GPIO_LED),
+diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
+index 8dac9fd8854705..2e70045d5920d7 100644
+--- a/sound/soc/codecs/wm8940.c
++++ b/sound/soc/codecs/wm8940.c
+@@ -218,7 +218,7 @@ static const struct snd_kcontrol_new wm8940_snd_controls[] = {
+ SOC_SINGLE_TLV("Digital Capture Volume", WM8940_ADCVOL,
+ 0, 255, 0, wm8940_adc_tlv),
+ SOC_ENUM("Mic Bias Level", wm8940_mic_bias_level_enum),
+- SOC_SINGLE_TLV("Capture Boost Volue", WM8940_ADCBOOST,
++ SOC_SINGLE_TLV("Capture Boost Volume", WM8940_ADCBOOST,
+ 8, 1, 0, wm8940_capture_boost_vol_tlv),
+ SOC_SINGLE_TLV("Speaker Playback Volume", WM8940_SPKVOL,
+ 0, 63, 0, wm8940_spk_vol_tlv),
+diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
+index 1becbf2c6ffad2..587e2009137673 100644
+--- a/sound/soc/codecs/wm8974.c
++++ b/sound/soc/codecs/wm8974.c
+@@ -419,10 +419,14 @@ static int wm8974_update_clocks(struct snd_soc_dai *dai)
+ fs256 = 256 * priv->fs;
+
+ f = wm8974_get_mclkdiv(priv->mclk, fs256, &mclkdiv);
+-
+ if (f != priv->mclk) {
+ /* The PLL performs best around 90MHz */
+- fpll = wm8974_get_mclkdiv(22500000, fs256, &mclkdiv);
++ if (fs256 % 8000)
++ f = 22579200;
++ else
++ f = 24576000;
++
++ fpll = wm8974_get_mclkdiv(f, fs256, &mclkdiv);
+ }
+
+ wm8974_set_dai_pll(dai, 0, 0, priv->mclk, fpll);
+diff --git a/sound/soc/qcom/qdsp6/audioreach.c b/sound/soc/qcom/qdsp6/audioreach.c
+index 01dac32c50fda6..918c9d3f49388b 100644
+--- a/sound/soc/qcom/qdsp6/audioreach.c
++++ b/sound/soc/qcom/qdsp6/audioreach.c
+@@ -728,6 +728,7 @@ static int audioreach_i2s_set_media_format(struct q6apm_graph *graph,
+ param_data->param_id = PARAM_ID_I2S_INTF_CFG;
+ param_data->param_size = ic_sz - APM_MODULE_PARAM_DATA_SIZE;
+
++ intf_cfg->cfg.lpaif_type = module->hw_interface_type;
+ intf_cfg->cfg.intf_idx = module->hw_interface_idx;
+ intf_cfg->cfg.sd_line_idx = module->sd_line_idx;
+
+diff --git a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
+index 23d23bc6fbaa77..5e4fea534ed129 100644
+--- a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
++++ b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
+@@ -109,14 +109,17 @@ static void q6apm_lpass_dai_shutdown(struct snd_pcm_substream *substream, struct
+ struct q6apm_lpass_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ int rc;
+
+- if (!dai_data->is_port_started[dai->id])
+- return;
+- rc = q6apm_graph_stop(dai_data->graph[dai->id]);
+- if (rc < 0)
+- dev_err(dai->dev, "fail to close APM port (%d)\n", rc);
+-
+- q6apm_graph_close(dai_data->graph[dai->id]);
+- dai_data->is_port_started[dai->id] = false;
++ if (dai_data->is_port_started[dai->id]) {
++ rc = q6apm_graph_stop(dai_data->graph[dai->id]);
++ dai_data->is_port_started[dai->id] = false;
++ if (rc < 0)
++ dev_err(dai->dev, "fail to close APM port (%d)\n", rc);
++ }
++
++ if (dai_data->graph[dai->id]) {
++ q6apm_graph_close(dai_data->graph[dai->id]);
++ dai_data->graph[dai->id] = NULL;
++ }
+ }
+
+ static int q6apm_lpass_dai_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+@@ -130,6 +133,11 @@ static int q6apm_lpass_dai_prepare(struct snd_pcm_substream *substream, struct s
+ if (dai_data->is_port_started[dai->id]) {
+ q6apm_graph_stop(dai_data->graph[dai->id]);
+ dai_data->is_port_started[dai->id] = false;
++
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
++ q6apm_graph_close(dai_data->graph[dai->id]);
++ dai_data->graph[dai->id] = NULL;
++ }
+ }
+
+ /**
+@@ -148,26 +156,31 @@ static int q6apm_lpass_dai_prepare(struct snd_pcm_substream *substream, struct s
+
+ cfg->direction = substream->stream;
+ rc = q6apm_graph_media_format_pcm(dai_data->graph[dai->id], cfg);
+-
+ if (rc) {
+ dev_err(dai->dev, "Failed to set media format %d\n", rc);
+- return rc;
++ goto err;
+ }
+
+ rc = q6apm_graph_prepare(dai_data->graph[dai->id]);
+ if (rc) {
+ dev_err(dai->dev, "Failed to prepare Graph %d\n", rc);
+- return rc;
++ goto err;
+ }
+
+ rc = q6apm_graph_start(dai_data->graph[dai->id]);
+ if (rc < 0) {
+ dev_err(dai->dev, "fail to start APM port %x\n", dai->id);
+- return rc;
++ goto err;
+ }
+ dai_data->is_port_started[dai->id] = true;
+
+ return 0;
++err:
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
++ q6apm_graph_close(dai_data->graph[dai->id]);
++ dai_data->graph[dai->id] = NULL;
++ }
++ return rc;
+ }
+
+ static int q6apm_lpass_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+@@ -204,6 +217,7 @@ static const struct snd_soc_dai_ops q6dma_ops = {
+ .shutdown = q6apm_lpass_dai_shutdown,
+ .set_channel_map = q6dma_set_channel_map,
+ .hw_params = q6dma_hw_params,
++ .set_fmt = q6i2s_set_fmt,
+ };
+
+ static const struct snd_soc_dai_ops q6i2s_ops = {
+diff --git a/sound/soc/sof/intel/hda-stream.c b/sound/soc/sof/intel/hda-stream.c
+index be60e7785da940..fafaf9a8e6c449 100644
+--- a/sound/soc/sof/intel/hda-stream.c
++++ b/sound/soc/sof/intel/hda-stream.c
+@@ -837,7 +837,7 @@ int hda_dsp_stream_init(struct snd_sof_dev *sdev)
+
+ if (num_capture >= SOF_HDA_CAPTURE_STREAMS) {
+ dev_err(sdev->dev, "error: too many capture streams %d\n",
+- num_playback);
++ num_capture);
+ return -EINVAL;
+ }
+
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+index a842f5ade6e370..0d49b675301178 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
+@@ -1005,6 +1005,7 @@ int main_loop_s(int listensock)
+ struct pollfd polls;
+ socklen_t salen;
+ int remotesock;
++ int err = 0;
+ int fd = 0;
+
+ again:
+@@ -1036,7 +1037,7 @@ int main_loop_s(int listensock)
+
+ SOCK_TEST_TCPULP(remotesock, 0);
+
+- copyfd_io(fd, remotesock, 1, true);
++ err = copyfd_io(fd, remotesock, 1, true, &winfo);
+ } else {
+ perror("accept");
+ return 1;
+@@ -1045,10 +1046,10 @@ int main_loop_s(int listensock)
+ if (cfg_input)
+ close(fd);
+
+- if (--cfg_repeat > 0)
++ if (!err && --cfg_repeat > 0)
+ goto again;
+
+- return 0;
++ return err;
+ }
+
+ static void init_rng(void)
+@@ -1153,7 +1154,7 @@ void xdisconnect(int fd)
+ else
+ xerror("bad family");
+
+- strcpy(cmd, "ss -M | grep -q ");
++ strcpy(cmd, "ss -Mnt | grep -q ");
+ cmdlen = strlen(cmd);
+ if (!inet_ntop(addr.ss_family, raw_addr, &cmd[cmdlen],
+ sizeof(cmd) - cmdlen))
+@@ -1163,7 +1164,7 @@ void xdisconnect(int fd)
+
+ /*
+ * wait until the pending data is completely flushed and all
+- * the MPTCP sockets reached the closed status.
++ * the sockets reached the closed status.
+ * disconnect will bypass/ignore/drop any pending data.
+ */
+ for (i = 0; ; i += msec_sleep) {
+diff --git a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+index 1887bd61bd9a5f..17e5b7ec53b67c 100644
+--- a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
++++ b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+@@ -188,6 +188,13 @@ static int capture_events(int fd, int event_group)
+ fprintf(stderr, ",error:%u", *(__u8 *)RTA_DATA(attrs));
+ else if (attrs->rta_type == MPTCP_ATTR_SERVER_SIDE)
+ fprintf(stderr, ",server_side:%u", *(__u8 *)RTA_DATA(attrs));
++ else if (attrs->rta_type == MPTCP_ATTR_FLAGS) {
++ __u16 flags = *(__u16 *)RTA_DATA(attrs);
++
++ /* only print when present, easier */
++ if (flags & MPTCP_PM_EV_FLAG_DENY_JOIN_ID0)
++ fprintf(stderr, ",deny_join_id0:1");
++ }
+
+ attrs = RTA_NEXT(attrs, msg_len);
+ }
+diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+index cb6c28d4012936..a82ac12e816d14 100755
+--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
++++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+@@ -139,6 +139,9 @@ make_connection()
+ is_v6="v4"
+ fi
+
++ # set this on the client side only: will not affect the rest
++ ip netns exec "$ns2" sysctl -q net.mptcp.allow_join_initial_addr_port=0
++
+ # Capture netlink events over the two network namespaces running
+ # the MPTCP client and server
+ local client_evts
+@@ -173,21 +176,28 @@ make_connection()
+ local client_token
+ local client_port
+ local client_serverside
++ local client_nojoin
+ local server_token
+ local server_serverside
++ local server_nojoin
+
+ client_token=$(sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
+ client_port=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
+ client_serverside=$(sed --unbuffered -n 's/.*\(server_side:\)\([[:digit:]]*\).*$/\2/p;q'\
+ "$client_evts")
++ client_nojoin=$(sed --unbuffered -n 's/.*\(deny_join_id0:\)\([[:digit:]]*\).*$/\2/p;q'\
++ "$client_evts")
+ kill_wait $server_evts_pid
+ server_token=$(sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$server_evts")
+ server_serverside=$(sed --unbuffered -n 's/.*\(server_side:\)\([[:digit:]]*\).*$/\2/p;q'\
+ "$server_evts")
++ server_nojoin=$(sed --unbuffered -n 's/.*\(deny_join_id0:\)\([[:digit:]]*\).*$/\2/p;q'\
++ "$server_evts")
+ rm -f "$client_evts" "$server_evts" "$file"
+
+- if [ "$client_token" != "" ] && [ "$server_token" != "" ] && [ "$client_serverside" = 0 ] &&
+- [ "$server_serverside" = 1 ]
++ if [ "${client_token}" != "" ] && [ "${server_token}" != "" ] &&
++ [ "${client_serverside}" = 0 ] && [ "${server_serverside}" = 1 ] &&
++ [ "${client_nojoin:-0}" = 0 ] && [ "${server_nojoin:-0}" = 1 ]
+ then
+ stdbuf -o0 -e0 printf "Established IP%s MPTCP Connection ns2 => ns1 \t\t[OK]\n" $is_v6
+ else
next reply other threads:[~2025-09-25 12:03 UTC|newest]
Thread overview: 215+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-25 12:03 Arisu Tachibana [this message]
-- strict thread matches above, loose matches on Subject: below --
2025-10-20 5:31 [gentoo-commits] proj/linux-patches:6.1 commit in: / Arisu Tachibana
2025-10-15 17:30 Arisu Tachibana
2025-10-02 13:26 Arisu Tachibana
2025-09-20 5:26 Arisu Tachibana
2025-09-12 3:57 Arisu Tachibana
2025-09-10 5:32 Arisu Tachibana
2025-09-04 14:31 Arisu Tachibana
2025-08-28 15:26 Arisu Tachibana
2025-08-16 3:11 Arisu Tachibana
2025-07-24 9:18 Arisu Tachibana
2025-07-18 12:21 Arisu Tachibana
2025-07-18 12:06 Arisu Tachibana
2025-07-14 16:21 Arisu Tachibana
2025-07-11 2:29 Arisu Tachibana
2025-07-06 13:28 Arisu Tachibana
2025-06-27 11:19 Mike Pagano
2025-06-04 18:13 Mike Pagano
2025-05-22 13:39 Mike Pagano
2025-05-18 14:34 Mike Pagano
2025-05-09 10:59 Mike Pagano
2025-05-05 11:32 Mike Pagano
2025-05-03 20:22 Mike Pagano
2025-05-02 10:56 Mike Pagano
2025-04-25 11:49 Mike Pagano
2025-04-10 13:35 Mike Pagano
2025-04-07 10:31 Mike Pagano
2025-03-29 10:49 Mike Pagano
2025-03-13 12:56 Mike Pagano
2025-03-07 16:38 Mike Pagano
2025-02-21 13:32 Mike Pagano
2025-02-01 23:08 Mike Pagano
2025-01-30 12:56 Mike Pagano
2025-01-23 17:04 Mike Pagano
2025-01-19 10:58 Mike Pagano
2025-01-17 13:19 Mike Pagano
2025-01-09 13:54 Mike Pagano
2025-01-02 12:35 Mike Pagano
2024-12-27 14:09 Mike Pagano
2024-12-19 18:08 Mike Pagano
2024-12-14 23:49 Mike Pagano
2024-12-12 19:42 Mike Pagano
2024-11-22 17:48 Mike Pagano
2024-11-17 18:17 Mike Pagano
2024-11-14 14:55 Mike Pagano
2024-11-08 16:31 Mike Pagano
2024-11-04 20:52 Mike Pagano
2024-11-03 13:58 Mike Pagano
2024-11-01 11:33 Mike Pagano
2024-11-01 11:28 Mike Pagano
2024-10-25 11:46 Mike Pagano
2024-10-22 16:58 Mike Pagano
2024-10-17 14:24 Mike Pagano
2024-10-17 14:06 Mike Pagano
2024-09-30 16:04 Mike Pagano
2024-09-18 18:04 Mike Pagano
2024-09-12 12:35 Mike Pagano
2024-09-08 11:06 Mike Pagano
2024-09-04 13:52 Mike Pagano
2024-08-29 16:49 Mike Pagano
2024-08-19 10:43 Mike Pagano
2024-08-14 15:06 Mike Pagano
2024-08-14 14:11 Mike Pagano
2024-08-11 13:32 Mike Pagano
2024-08-11 13:29 Mike Pagano
2024-08-10 15:45 Mike Pagano
2024-08-03 15:28 Mike Pagano
2024-07-27 13:47 Mike Pagano
2024-07-25 12:15 Mike Pagano
2024-07-25 12:09 Mike Pagano
2024-07-18 12:15 Mike Pagano
2024-07-15 11:16 Mike Pagano
2024-07-11 11:49 Mike Pagano
2024-07-05 11:07 Mike Pagano
2024-06-27 13:10 Mike Pagano
2024-06-27 12:33 Mike Pagano
2024-06-21 14:07 Mike Pagano
2024-06-16 14:33 Mike Pagano
2024-06-12 10:16 Mike Pagano
2024-05-25 15:16 Mike Pagano
2024-05-17 11:36 Mike Pagano
2024-05-05 18:10 Mike Pagano
2024-05-02 15:01 Mike Pagano
2024-04-29 11:30 Mike Pagano
2024-04-29 11:27 Mike Pagano
2024-04-27 22:45 Mike Pagano
2024-04-27 17:06 Mike Pagano
2024-04-18 3:05 Alice Ferrazzi
2024-04-13 13:07 Mike Pagano
2024-04-10 15:10 Mike Pagano
2024-04-03 13:54 Mike Pagano
2024-03-27 11:24 Mike Pagano
2024-03-15 22:00 Mike Pagano
2024-03-06 18:07 Mike Pagano
2024-03-01 13:07 Mike Pagano
2024-02-23 13:19 Mike Pagano
2024-02-23 12:37 Mike Pagano
2024-02-16 19:00 Mike Pagano
2024-02-05 21:01 Mike Pagano
2024-02-01 1:23 Mike Pagano
2024-01-26 0:09 Mike Pagano
2024-01-20 11:45 Mike Pagano
2024-01-15 18:47 Mike Pagano
2024-01-10 17:16 Mike Pagano
2024-01-05 14:54 Mike Pagano
2024-01-05 14:50 Mike Pagano
2024-01-04 16:10 Mike Pagano
2024-01-01 13:46 Mike Pagano
2023-12-20 16:56 Mike Pagano
2023-12-13 18:27 Mike Pagano
2023-12-11 14:20 Mike Pagano
2023-12-08 10:55 Mike Pagano
2023-12-03 11:16 Mike Pagano
2023-12-01 10:36 Mike Pagano
2023-11-28 17:51 Mike Pagano
2023-11-20 11:23 Mike Pagano
2023-11-08 14:02 Mike Pagano
2023-11-02 11:10 Mike Pagano
2023-10-25 11:36 Mike Pagano
2023-10-22 22:53 Mike Pagano
2023-10-19 22:30 Mike Pagano
2023-10-18 20:04 Mike Pagano
2023-10-15 17:40 Mike Pagano
2023-10-10 22:56 Mike Pagano
2023-10-06 13:18 Mike Pagano
2023-10-05 14:23 Mike Pagano
2023-09-23 11:03 Mike Pagano
2023-09-23 10:16 Mike Pagano
2023-09-19 13:20 Mike Pagano
2023-09-15 18:04 Mike Pagano
2023-09-13 11:19 Mike Pagano
2023-09-13 11:05 Mike Pagano
2023-09-06 22:16 Mike Pagano
2023-09-02 9:56 Mike Pagano
2023-08-30 14:42 Mike Pagano
2023-08-27 21:41 Mike Pagano
2023-08-26 15:19 Mike Pagano
2023-08-26 15:00 Mike Pagano
2023-08-23 18:08 Mike Pagano
2023-08-16 18:32 Mike Pagano
2023-08-16 18:32 Mike Pagano
2023-08-11 11:55 Mike Pagano
2023-08-08 18:40 Mike Pagano
2023-08-03 11:54 Mike Pagano
2023-08-03 11:48 Mike Pagano
2023-07-27 11:48 Mike Pagano
2023-07-24 20:27 Mike Pagano
2023-07-23 15:14 Mike Pagano
2023-07-19 17:05 Mike Pagano
2023-07-05 20:34 Mike Pagano
2023-07-05 20:28 Mike Pagano
2023-07-04 13:15 Mike Pagano
2023-07-01 18:27 Mike Pagano
2023-06-28 10:26 Mike Pagano
2023-06-21 14:54 Alice Ferrazzi
2023-06-14 10:17 Mike Pagano
2023-06-09 12:02 Mike Pagano
2023-06-09 11:29 Mike Pagano
2023-06-05 11:48 Mike Pagano
2023-06-02 15:07 Mike Pagano
2023-05-30 16:51 Mike Pagano
2023-05-24 17:05 Mike Pagano
2023-05-17 10:57 Mike Pagano
2023-05-11 16:08 Mike Pagano
2023-05-11 14:49 Mike Pagano
2023-05-10 17:54 Mike Pagano
2023-05-10 16:18 Mike Pagano
2023-04-30 23:50 Alice Ferrazzi
2023-04-26 13:19 Mike Pagano
2023-04-20 11:16 Alice Ferrazzi
2023-04-13 16:09 Mike Pagano
2023-04-06 10:41 Alice Ferrazzi
2023-03-30 20:52 Mike Pagano
2023-03-30 11:21 Alice Ferrazzi
2023-03-22 14:15 Alice Ferrazzi
2023-03-21 13:32 Mike Pagano
2023-03-17 10:43 Mike Pagano
2023-03-13 11:30 Alice Ferrazzi
2023-03-11 14:09 Mike Pagano
2023-03-11 11:19 Mike Pagano
2023-03-10 12:57 Mike Pagano
2023-03-10 12:47 Mike Pagano
2023-03-06 17:30 Mike Pagano
2023-03-03 13:01 Mike Pagano
2023-03-03 12:28 Mike Pagano
2023-02-27 16:59 Mike Pagano
2023-02-26 18:24 Mike Pagano
2023-02-26 18:16 Mike Pagano
2023-02-25 11:02 Alice Ferrazzi
2023-02-24 3:03 Alice Ferrazzi
2023-02-22 13:46 Alice Ferrazzi
2023-02-14 18:35 Mike Pagano
2023-02-13 13:38 Mike Pagano
2023-02-09 12:52 Mike Pagano
2023-02-09 12:49 Mike Pagano
2023-02-09 12:47 Mike Pagano
2023-02-09 12:40 Mike Pagano
2023-02-09 12:34 Mike Pagano
2023-02-06 12:46 Mike Pagano
2023-02-02 19:02 Mike Pagano
2023-02-01 8:05 Alice Ferrazzi
2023-01-24 7:19 Alice Ferrazzi
2023-01-22 14:59 Mike Pagano
2023-01-18 11:29 Mike Pagano
2023-01-14 13:48 Mike Pagano
2023-01-12 15:25 Mike Pagano
2023-01-12 12:16 Mike Pagano
2023-01-07 11:10 Mike Pagano
2023-01-04 11:37 Mike Pagano
2022-12-31 15:28 Mike Pagano
2022-12-21 19:05 Alice Ferrazzi
2022-12-16 20:25 Mike Pagano
2022-12-16 19:44 Mike Pagano
2022-12-11 23:32 Mike Pagano
2022-12-11 14:28 Mike Pagano
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1758801790.fa6cd114aa6233820f14a1c35a32e2c37fbb476f.alicef@gentoo \
--to=alicef@gentoo.org \
--cc=gentoo-commits@lists.gentoo.org \
--cc=gentoo-dev@lists.gentoo.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox