From: "Arisu Tachibana" <alicef@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:6.6 commit in: /
Date: Thu, 24 Jul 2025 09:18:34 +0000 (UTC) [thread overview]
Message-ID: <1753348701.4e660f86be6502e7b0d201bf56af1d3ac3fb1f9c.alicef@gentoo> (raw)
commit: 4e660f86be6502e7b0d201bf56af1d3ac3fb1f9c
Author: Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Jul 24 09:18:21 2025 +0000
Commit: Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Jul 24 09:18:21 2025 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4e660f86
Linux patch 6.6.100
Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>
0000_README | 4 +
1099_linux-6.6.100.patch | 3139 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 3143 insertions(+)
diff --git a/0000_README b/0000_README
index 18d87339..ade2cd5b 100644
--- a/0000_README
+++ b/0000_README
@@ -439,6 +439,10 @@ Patch: 1098_linux-6.6.99.patch
From: https://www.kernel.org
Desc: Linux 6.6.99
+Patch: 1099_linux-6.6.100.patch
+From: https://www.kernel.org
+Desc: Linux 6.6.100
+
Patch: 1510_fs-enable-link-security-restrictions-by-default.patch
From: http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch
Desc: Enable link security restrictions by default.
diff --git a/1099_linux-6.6.100.patch b/1099_linux-6.6.100.patch
new file mode 100644
index 00000000..c112ce74
--- /dev/null
+++ b/1099_linux-6.6.100.patch
@@ -0,0 +1,3139 @@
+diff --git a/Makefile b/Makefile
+index 2aede51d98ea36..8d6550abc9cb2c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 6
+-SUBLEVEL = 99
++SUBLEVEL = 100
+ EXTRAVERSION =
+ NAME = Pinguïn Aangedreven
+
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+index 6457d2c377017a..218b5482190784 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+@@ -470,6 +470,7 @@ reg_vdd_phy: LDO4 {
+ };
+
+ reg_nvcc_sd: LDO5 {
++ regulator-always-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "On-module +V3.3_1.8_SD (LDO5)";
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
+index faa370a5885ff4..34619f085623b5 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
+@@ -185,7 +185,7 @@ tpm@0 {
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ reg = <0x0>;
+- spi-max-frequency = <36000000>;
++ spi-max-frequency = <25000000>;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi b/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi
+index 2963d634baba99..403a295bde6ab2 100644
+--- a/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi
++++ b/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi
+@@ -344,6 +344,18 @@ pmic_int: pmic-int {
+ <0 RK_PA7 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
++
++ spi1 {
++ spi1_csn0_gpio_pin: spi1-csn0-gpio-pin {
++ rockchip,pins =
++ <3 RK_PB1 RK_FUNC_GPIO &pcfg_pull_up_4ma>;
++ };
++
++ spi1_csn1_gpio_pin: spi1-csn1-gpio-pin {
++ rockchip,pins =
++ <3 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up_4ma>;
++ };
++ };
+ };
+
+ &saradc {
+@@ -355,6 +367,17 @@ &sdmmc {
+ vqmmc-supply = <&vccio_sd>;
+ };
+
++&spi1 {
++ /*
++ * Hardware CS has a very slow rise time of about 6us,
++ * causing transmission errors.
++ * With cs-gpios we have a rise time of about 20ns.
++ */
++ cs-gpios = <&gpio3 RK_PB1 GPIO_ACTIVE_LOW>, <&gpio3 RK_PB2 GPIO_ACTIVE_LOW>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&spi1_clk &spi1_csn0_gpio_pin &spi1_csn1_gpio_pin &spi1_miso &spi1_mosi>;
++};
++
+ &tsadc {
+ status = "okay";
+ };
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 82778258855d1a..b6d381f743f3ec 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -2804,6 +2804,13 @@ static bool has_sve_feature(const struct arm64_cpu_capabilities *cap, int scope)
+ }
+ #endif
+
++#ifdef CONFIG_ARM64_SME
++static bool has_sme_feature(const struct arm64_cpu_capabilities *cap, int scope)
++{
++ return system_supports_sme() && has_user_cpuid_feature(cap, scope);
++}
++#endif
++
+ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
+ HWCAP_CAP(ID_AA64ISAR0_EL1, AES, PMULL, CAP_HWCAP, KERNEL_HWCAP_PMULL),
+ HWCAP_CAP(ID_AA64ISAR0_EL1, AES, AES, CAP_HWCAP, KERNEL_HWCAP_AES),
+@@ -2875,20 +2882,20 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
+ HWCAP_CAP(ID_AA64ISAR2_EL1, MOPS, IMP, CAP_HWCAP, KERNEL_HWCAP_MOPS),
+ HWCAP_CAP(ID_AA64ISAR2_EL1, BC, IMP, CAP_HWCAP, KERNEL_HWCAP_HBC),
+ #ifdef CONFIG_ARM64_SME
+- HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME),
+- HWCAP_CAP(ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
+- HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1),
+- HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2),
+- HWCAP_CAP(ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64),
+- HWCAP_CAP(ID_AA64SMFR0_EL1, F64F64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64),
+- HWCAP_CAP(ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32),
+- HWCAP_CAP(ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16),
+- HWCAP_CAP(ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16),
+- HWCAP_CAP(ID_AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32),
+- HWCAP_CAP(ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32),
+- HWCAP_CAP(ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32),
+- HWCAP_CAP(ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32),
+- HWCAP_CAP(ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32),
++ HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME),
++ HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
++ HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1),
++ HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2),
++ HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64),
++ HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F64F64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64),
++ HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32),
++ HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16),
++ HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16),
++ HWCAP_CAP(ID_MATCH_ID(has_sme_feature, AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32),
++ HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32),
++ HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32),
++ HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32),
++ HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32),
+ #endif /* CONFIG_ARM64_SME */
+ {},
+ };
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index a40c7ff91caf03..2d8facfd4e4252 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -539,7 +539,15 @@ static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target)
+ {
+ memcpy(plt, &bpf_plt, sizeof(*plt));
+ plt->ret = ret;
+- plt->target = target;
++ /*
++ * (target == NULL) implies that the branch to this PLT entry was
++ * patched and became a no-op. However, some CPU could have jumped
++ * to this PLT entry before patching and may be still executing it.
++ *
++ * Since the intention in this case is to make the PLT entry a no-op,
++ * make the target point to the return label instead of NULL.
++ */
++ plt->target = target ?: ret;
+ }
+
+ /*
+diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
+index c4a158758cb740..94c7fee7a2259f 100644
+--- a/arch/x86/kvm/xen.c
++++ b/arch/x86/kvm/xen.c
+@@ -1260,7 +1260,7 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
+ if (kvm_read_guest_virt(vcpu, (gva_t)sched_poll.ports, ports,
+ sched_poll.nr_ports * sizeof(*ports), &e)) {
+ *r = -EFAULT;
+- return true;
++ goto out;
+ }
+
+ for (i = 0; i < sched_poll.nr_ports; i++) {
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index 74839f6f2e0cb3..8d15c73a520bd7 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -909,4 +909,5 @@ void blk_unregister_queue(struct gendisk *disk)
+ mutex_unlock(&q->sysfs_dir_lock);
+
+ blk_debugfs_remove(disk);
++ kobject_put(&disk->queue_kobj);
+ }
+diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
+index cc2c3a5a6d35a4..4f09f9a5a0431b 100644
+--- a/drivers/base/power/domain_governor.c
++++ b/drivers/base/power/domain_governor.c
+@@ -8,6 +8,7 @@
+ #include <linux/pm_domain.h>
+ #include <linux/pm_qos.h>
+ #include <linux/hrtimer.h>
++#include <linux/cpu.h>
+ #include <linux/cpuidle.h>
+ #include <linux/cpumask.h>
+ #include <linux/ktime.h>
+@@ -345,6 +346,8 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
+ struct cpuidle_device *dev;
+ ktime_t domain_wakeup, next_hrtimer;
+ ktime_t now = ktime_get();
++ struct device *cpu_dev;
++ s64 cpu_constraint, global_constraint;
+ s64 idle_duration_ns;
+ int cpu, i;
+
+@@ -355,6 +358,7 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
+ if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
+ return true;
+
++ global_constraint = cpu_latency_qos_limit();
+ /*
+ * Find the next wakeup for any of the online CPUs within the PM domain
+ * and its subdomains. Note, we only need the genpd->cpus, as it already
+@@ -368,8 +372,16 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
+ if (ktime_before(next_hrtimer, domain_wakeup))
+ domain_wakeup = next_hrtimer;
+ }
++
++ cpu_dev = get_cpu_device(cpu);
++ if (cpu_dev) {
++ cpu_constraint = dev_pm_qos_raw_resume_latency(cpu_dev);
++ if (cpu_constraint < global_constraint)
++ global_constraint = cpu_constraint;
++ }
+ }
+
++ global_constraint *= NSEC_PER_USEC;
+ /* The minimum idle duration is from now - until the next wakeup. */
+ idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
+ if (idle_duration_ns <= 0)
+@@ -385,8 +397,10 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
+ */
+ i = genpd->state_idx;
+ do {
+- if (idle_duration_ns >= (genpd->states[i].residency_ns +
+- genpd->states[i].power_off_latency_ns)) {
++ if ((idle_duration_ns >= (genpd->states[i].residency_ns +
++ genpd->states[i].power_off_latency_ns)) &&
++ (global_constraint >= (genpd->states[i].power_on_latency_ns +
++ genpd->states[i].power_off_latency_ns))) {
+ genpd->state_idx = i;
+ return true;
+ }
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index e0dd6988960883..db507a66fa8acd 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -3736,6 +3736,32 @@ static const struct qca_device_info qca_devices_table[] = {
+ { 0x00190200, 40, 4, 16 }, /* WCN785x 2.0 */
+ };
+
++static u16 qca_extract_board_id(const struct qca_version *ver)
++{
++ u16 flag = le16_to_cpu(ver->flag);
++ u16 board_id = 0;
++
++ if (((flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) {
++ /* The board_id should be split into two bytes
++ * The 1st byte is chip ID, and the 2nd byte is platform ID
++ * For example, board ID 0x010A, 0x01 is platform ID. 0x0A is chip ID
++ * we have several platforms, and platform IDs are continuously added
++ * Platform ID:
++ * 0x00 is for Mobile
++ * 0x01 is for X86
++ * 0x02 is for Automotive
++ * 0x03 is for Consumer electronic
++ */
++ board_id = (ver->chip_id << 8) + ver->platform_id;
++ }
++
++ /* Take 0xffff as invalid board ID */
++ if (board_id == 0xffff)
++ board_id = 0;
++
++ return board_id;
++}
++
+ static int btusb_qca_send_vendor_req(struct usb_device *udev, u8 request,
+ void *data, u16 size)
+ {
+@@ -3892,44 +3918,28 @@ static void btusb_generate_qca_nvm_name(char *fwname, size_t max_size,
+ const struct qca_version *ver)
+ {
+ u32 rom_version = le32_to_cpu(ver->rom_version);
+- u16 flag = le16_to_cpu(ver->flag);
++ const char *variant;
++ int len;
++ u16 board_id;
+
+- if (((flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) {
+- /* The board_id should be split into two bytes
+- * The 1st byte is chip ID, and the 2nd byte is platform ID
+- * For example, board ID 0x010A, 0x01 is platform ID. 0x0A is chip ID
+- * we have several platforms, and platform IDs are continuously added
+- * Platform ID:
+- * 0x00 is for Mobile
+- * 0x01 is for X86
+- * 0x02 is for Automotive
+- * 0x03 is for Consumer electronic
+- */
+- u16 board_id = (ver->chip_id << 8) + ver->platform_id;
+- const char *variant;
++ board_id = qca_extract_board_id(ver);
+
+- switch (le32_to_cpu(ver->ram_version)) {
+- case WCN6855_2_0_RAM_VERSION_GF:
+- case WCN6855_2_1_RAM_VERSION_GF:
+- variant = "_gf";
+- break;
+- default:
+- variant = "";
+- break;
+- }
+-
+- if (board_id == 0) {
+- snprintf(fwname, max_size, "qca/nvm_usb_%08x%s.bin",
+- rom_version, variant);
+- } else {
+- snprintf(fwname, max_size, "qca/nvm_usb_%08x%s_%04x.bin",
+- rom_version, variant, board_id);
+- }
+- } else {
+- snprintf(fwname, max_size, "qca/nvm_usb_%08x.bin",
+- rom_version);
++ switch (le32_to_cpu(ver->ram_version)) {
++ case WCN6855_2_0_RAM_VERSION_GF:
++ case WCN6855_2_1_RAM_VERSION_GF:
++ variant = "_gf";
++ break;
++ default:
++ variant = NULL;
++ break;
+ }
+
++ len = snprintf(fwname, max_size, "qca/nvm_usb_%08x", rom_version);
++ if (variant)
++ len += snprintf(fwname + len, max_size - len, "%s", variant);
++ if (board_id)
++ len += snprintf(fwname + len, max_size - len, "_%04x", board_id);
++ len += snprintf(fwname + len, max_size - len, ".bin");
+ }
+
+ static int btusb_setup_qca_load_nvm(struct hci_dev *hdev,
+diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
+index 81763e3f948467..e4d62cdaff462d 100644
+--- a/drivers/comedi/comedi_fops.c
++++ b/drivers/comedi/comedi_fops.c
+@@ -1556,21 +1556,27 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
+ }
+
+ for (i = 0; i < n_insns; ++i) {
++ unsigned int n = insns[i].n;
++
+ if (insns[i].insn & INSN_MASK_WRITE) {
+ if (copy_from_user(data, insns[i].data,
+- insns[i].n * sizeof(unsigned int))) {
++ n * sizeof(unsigned int))) {
+ dev_dbg(dev->class_dev,
+ "copy_from_user failed\n");
+ ret = -EFAULT;
+ goto error;
+ }
++ if (n < MIN_SAMPLES) {
++ memset(&data[n], 0, (MIN_SAMPLES - n) *
++ sizeof(unsigned int));
++ }
+ }
+ ret = parse_insn(dev, insns + i, data, file);
+ if (ret < 0)
+ goto error;
+ if (insns[i].insn & INSN_MASK_READ) {
+ if (copy_to_user(insns[i].data, data,
+- insns[i].n * sizeof(unsigned int))) {
++ n * sizeof(unsigned int))) {
+ dev_dbg(dev->class_dev,
+ "copy_to_user failed\n");
+ ret = -EFAULT;
+@@ -1589,6 +1595,16 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
+ return i;
+ }
+
++#define MAX_INSNS MAX_SAMPLES
++static int check_insnlist_len(struct comedi_device *dev, unsigned int n_insns)
++{
++ if (n_insns > MAX_INSNS) {
++ dev_dbg(dev->class_dev, "insnlist length too large\n");
++ return -EINVAL;
++ }
++ return 0;
++}
++
+ /*
+ * COMEDI_INSN ioctl
+ * synchronous instruction
+@@ -1633,6 +1649,10 @@ static int do_insn_ioctl(struct comedi_device *dev,
+ ret = -EFAULT;
+ goto error;
+ }
++ if (insn->n < MIN_SAMPLES) {
++ memset(&data[insn->n], 0,
++ (MIN_SAMPLES - insn->n) * sizeof(unsigned int));
++ }
+ }
+ ret = parse_insn(dev, insn, data, file);
+ if (ret < 0)
+@@ -2239,6 +2259,9 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
+ rc = -EFAULT;
+ break;
+ }
++ rc = check_insnlist_len(dev, insnlist.n_insns);
++ if (rc)
++ break;
+ insns = kcalloc(insnlist.n_insns, sizeof(*insns), GFP_KERNEL);
+ if (!insns) {
+ rc = -ENOMEM;
+@@ -3090,6 +3113,9 @@ static int compat_insnlist(struct file *file, unsigned long arg)
+ if (copy_from_user(&insnlist32, compat_ptr(arg), sizeof(insnlist32)))
+ return -EFAULT;
+
++ rc = check_insnlist_len(dev, insnlist32.n_insns);
++ if (rc)
++ return rc;
+ insns = kcalloc(insnlist32.n_insns, sizeof(*insns), GFP_KERNEL);
+ if (!insns)
+ return -ENOMEM;
+diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c
+index d4e2ed709bfc7a..086213bcc49933 100644
+--- a/drivers/comedi/drivers.c
++++ b/drivers/comedi/drivers.c
+@@ -338,10 +338,10 @@ int comedi_dio_insn_config(struct comedi_device *dev,
+ unsigned int *data,
+ unsigned int mask)
+ {
+- unsigned int chan_mask = 1 << CR_CHAN(insn->chanspec);
++ unsigned int chan = CR_CHAN(insn->chanspec);
+
+- if (!mask)
+- mask = chan_mask;
++ if (!mask && chan < 32)
++ mask = 1U << chan;
+
+ switch (data[0]) {
+ case INSN_CONFIG_DIO_INPUT:
+@@ -381,7 +381,7 @@ EXPORT_SYMBOL_GPL(comedi_dio_insn_config);
+ unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
+ unsigned int *data)
+ {
+- unsigned int chanmask = (s->n_chan < 32) ? ((1 << s->n_chan) - 1)
++ unsigned int chanmask = (s->n_chan < 32) ? ((1U << s->n_chan) - 1)
+ : 0xffffffff;
+ unsigned int mask = data[0] & chanmask;
+ unsigned int bits = data[1];
+@@ -614,6 +614,9 @@ static int insn_rw_emulate_bits(struct comedi_device *dev,
+ unsigned int _data[2];
+ int ret;
+
++ if (insn->n == 0)
++ return 0;
++
+ memset(_data, 0, sizeof(_data));
+ memset(&_insn, 0, sizeof(_insn));
+ _insn.insn = INSN_BITS;
+@@ -624,8 +627,8 @@ static int insn_rw_emulate_bits(struct comedi_device *dev,
+ if (insn->insn == INSN_WRITE) {
+ if (!(s->subdev_flags & SDF_WRITABLE))
+ return -EINVAL;
+- _data[0] = 1 << (chan - base_chan); /* mask */
+- _data[1] = data[0] ? (1 << (chan - base_chan)) : 0; /* bits */
++ _data[0] = 1U << (chan - base_chan); /* mask */
++ _data[1] = data[0] ? (1U << (chan - base_chan)) : 0; /* bits */
+ }
+
+ ret = s->insn_bits(dev, s, &_insn, _data);
+@@ -708,7 +711,7 @@ static int __comedi_device_postconfig(struct comedi_device *dev)
+
+ if (s->type == COMEDI_SUBD_DO) {
+ if (s->n_chan < 32)
+- s->io_bits = (1 << s->n_chan) - 1;
++ s->io_bits = (1U << s->n_chan) - 1;
+ else
+ s->io_bits = 0xffffffff;
+ }
+diff --git a/drivers/comedi/drivers/aio_iiro_16.c b/drivers/comedi/drivers/aio_iiro_16.c
+index b00fab0b89d4c4..739cc4db52ac7e 100644
+--- a/drivers/comedi/drivers/aio_iiro_16.c
++++ b/drivers/comedi/drivers/aio_iiro_16.c
+@@ -177,7 +177,8 @@ static int aio_iiro_16_attach(struct comedi_device *dev,
+ * Digital input change of state interrupts are optionally supported
+ * using IRQ 2-7, 10-12, 14, or 15.
+ */
+- if ((1 << it->options[1]) & 0xdcfc) {
++ if (it->options[1] > 0 && it->options[1] < 16 &&
++ (1 << it->options[1]) & 0xdcfc) {
+ ret = request_irq(it->options[1], aio_iiro_16_cos, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+diff --git a/drivers/comedi/drivers/das16m1.c b/drivers/comedi/drivers/das16m1.c
+index 275effb77746b6..99e63d51c506c5 100644
+--- a/drivers/comedi/drivers/das16m1.c
++++ b/drivers/comedi/drivers/das16m1.c
+@@ -522,7 +522,8 @@ static int das16m1_attach(struct comedi_device *dev,
+ devpriv->extra_iobase = dev->iobase + DAS16M1_8255_IOBASE;
+
+ /* only irqs 2, 3, 4, 5, 6, 7, 10, 11, 12, 14, and 15 are valid */
+- if ((1 << it->options[1]) & 0xdcfc) {
++ if (it->options[1] >= 2 && it->options[1] <= 15 &&
++ (1 << it->options[1]) & 0xdcfc) {
+ ret = request_irq(it->options[1], das16m1_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+diff --git a/drivers/comedi/drivers/das6402.c b/drivers/comedi/drivers/das6402.c
+index 1af394591e7480..e730668236c268 100644
+--- a/drivers/comedi/drivers/das6402.c
++++ b/drivers/comedi/drivers/das6402.c
+@@ -567,7 +567,8 @@ static int das6402_attach(struct comedi_device *dev,
+ das6402_reset(dev);
+
+ /* IRQs 2,3,5,6,7, 10,11,15 are valid for "enhanced" mode */
+- if ((1 << it->options[1]) & 0x8cec) {
++ if (it->options[1] > 0 && it->options[1] < 16 &&
++ (1 << it->options[1]) & 0x8cec) {
+ ret = request_irq(it->options[1], das6402_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0) {
+diff --git a/drivers/comedi/drivers/pcl812.c b/drivers/comedi/drivers/pcl812.c
+index 70dbc129fcf558..e0459f286fa613 100644
+--- a/drivers/comedi/drivers/pcl812.c
++++ b/drivers/comedi/drivers/pcl812.c
+@@ -1149,7 +1149,8 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
+ if (!dev->pacer)
+ return -ENOMEM;
+
+- if ((1 << it->options[1]) & board->irq_bits) {
++ if (it->options[1] > 0 && it->options[1] < 16 &&
++ (1 << it->options[1]) & board->irq_bits) {
+ ret = request_irq(it->options[1], pcl812_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
+index 0b2f96fd8bf0c6..a361f8c29cd38d 100644
+--- a/drivers/dma/nbpfaxi.c
++++ b/drivers/dma/nbpfaxi.c
+@@ -1351,7 +1351,7 @@ static int nbpf_probe(struct platform_device *pdev)
+ if (irqs == 1) {
+ eirq = irqbuf[0];
+
+- for (i = 0; i <= num_channels; i++)
++ for (i = 0; i < num_channels; i++)
+ nbpf->chan[i].irq = irqbuf[0];
+ } else {
+ eirq = platform_get_irq_byname(pdev, "error");
+@@ -1361,16 +1361,15 @@ static int nbpf_probe(struct platform_device *pdev)
+ if (irqs == num_channels + 1) {
+ struct nbpf_channel *chan;
+
+- for (i = 0, chan = nbpf->chan; i <= num_channels;
++ for (i = 0, chan = nbpf->chan; i < num_channels;
+ i++, chan++) {
+ /* Skip the error IRQ */
+ if (irqbuf[i] == eirq)
+ i++;
++ if (i >= ARRAY_SIZE(irqbuf))
++ return -EINVAL;
+ chan->irq = irqbuf[i];
+ }
+-
+- if (chan != nbpf->chan + num_channels)
+- return -EINVAL;
+ } else {
+ /* 2 IRQs and more than one channel */
+ if (irqbuf[0] == eirq)
+@@ -1378,7 +1377,7 @@ static int nbpf_probe(struct platform_device *pdev)
+ else
+ irq = irqbuf[0];
+
+- for (i = 0; i <= num_channels; i++)
++ for (i = 0; i < num_channels; i++)
+ nbpf->chan[i].irq = irq;
+ }
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 1943beb135c4c2..472cb0f9e8f6c0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -4656,6 +4656,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
+ memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
+ /* reset ring buffer */
+ ring->wptr = 0;
++ atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
+ amdgpu_ring_clear_ring(ring);
+ }
+ return 0;
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 8f156a69912c89..266cd56dec5056 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1873,9 +1873,12 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
+ /*
+ * 7 extra bytes are necessary to achieve proper functionality
+ * of implement() working on 8 byte chunks
++ * 1 extra byte for the report ID if it is null (not used) so
++ * we can reserve that extra byte in the first position of the buffer
++ * when sending it to .raw_request()
+ */
+
+- u32 len = hid_report_len(report) + 7;
++ u32 len = hid_report_len(report) + 7 + (report->id == 0);
+
+ return kzalloc(len, flags);
+ }
+@@ -1938,7 +1941,7 @@ static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
+ int __hid_request(struct hid_device *hid, struct hid_report *report,
+ enum hid_class_request reqtype)
+ {
+- char *buf;
++ char *buf, *data_buf;
+ int ret;
+ u32 len;
+
+@@ -1946,13 +1949,19 @@ int __hid_request(struct hid_device *hid, struct hid_report *report,
+ if (!buf)
+ return -ENOMEM;
+
++ data_buf = buf;
+ len = hid_report_len(report);
+
++ if (report->id == 0) {
++ /* reserve the first byte for the report ID */
++ data_buf++;
++ len++;
++ }
++
+ if (reqtype == HID_REQ_SET_REPORT)
+- hid_output_report(report, buf);
++ hid_output_report(report, data_buf);
+
+- ret = hid->ll_driver->raw_request(hid, report->id, buf, len,
+- report->type, reqtype);
++ ret = hid_hw_raw_request(hid, report->id, buf, len, report->type, reqtype);
+ if (ret < 0) {
+ dbg_hid("unable to complete request: %d\n", ret);
+ goto out;
+diff --git a/drivers/hwmon/corsair-cpro.c b/drivers/hwmon/corsair-cpro.c
+index 280b90646a8735..b37f36f55f88a5 100644
+--- a/drivers/hwmon/corsair-cpro.c
++++ b/drivers/hwmon/corsair-cpro.c
+@@ -84,6 +84,7 @@ struct ccp_device {
+ struct mutex mutex; /* whenever buffer is used, lock before send_usb_cmd */
+ u8 *cmd_buffer;
+ u8 *buffer;
++ int buffer_recv_size; /* number of received bytes in buffer */
+ int target[6];
+ DECLARE_BITMAP(temp_cnct, NUM_TEMP_SENSORS);
+ DECLARE_BITMAP(fan_cnct, NUM_FANS);
+@@ -139,6 +140,9 @@ static int send_usb_cmd(struct ccp_device *ccp, u8 command, u8 byte1, u8 byte2,
+ if (!t)
+ return -ETIMEDOUT;
+
++ if (ccp->buffer_recv_size != IN_BUFFER_SIZE)
++ return -EPROTO;
++
+ return ccp_get_errno(ccp);
+ }
+
+@@ -150,6 +154,7 @@ static int ccp_raw_event(struct hid_device *hdev, struct hid_report *report, u8
+ spin_lock(&ccp->wait_input_report_lock);
+ if (!completion_done(&ccp->wait_input_report)) {
+ memcpy(ccp->buffer, data, min(IN_BUFFER_SIZE, size));
++ ccp->buffer_recv_size = size;
+ complete_all(&ccp->wait_input_report);
+ }
+ spin_unlock(&ccp->wait_input_report_lock);
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index 982007a112c2a0..8d4270664ebd11 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -899,6 +899,7 @@ config I2C_OMAP
+ tristate "OMAP I2C adapter"
+ depends on ARCH_OMAP || ARCH_K3 || COMPILE_TEST
+ default MACH_OMAP_OSK
++ select MULTIPLEXER
+ help
+ If you say yes to this option, support will be included for the
+ I2C interface on the Texas Instruments OMAP1/2 family of processors.
+diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
+index 22975bfd6b252b..fad88ab32716f0 100644
+--- a/drivers/i2c/busses/i2c-omap.c
++++ b/drivers/i2c/busses/i2c-omap.c
+@@ -24,6 +24,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/clk.h>
+ #include <linux/io.h>
++#include <linux/mux/consumer.h>
+ #include <linux/of.h>
+ #include <linux/of_device.h>
+ #include <linux/slab.h>
+@@ -211,6 +212,7 @@ struct omap_i2c_dev {
+ u16 syscstate;
+ u16 westate;
+ u16 errata;
++ struct mux_state *mux_state;
+ };
+
+ static const u8 reg_map_ip_v1[] = {
+@@ -1455,8 +1457,27 @@ omap_i2c_probe(struct platform_device *pdev)
+ (1000 * omap->speed / 8);
+ }
+
++ if (of_property_present(node, "mux-states")) {
++ struct mux_state *mux_state;
++
++ mux_state = devm_mux_state_get(&pdev->dev, NULL);
++ if (IS_ERR(mux_state)) {
++ r = PTR_ERR(mux_state);
++ dev_dbg(&pdev->dev, "failed to get I2C mux: %d\n", r);
++ goto err_put_pm;
++ }
++ omap->mux_state = mux_state;
++ r = mux_state_select(omap->mux_state);
++ if (r) {
++ dev_err(&pdev->dev, "failed to select I2C mux: %d\n", r);
++ goto err_put_pm;
++ }
++ }
++
+ /* reset ASAP, clearing any IRQs */
+- omap_i2c_init(omap);
++ r = omap_i2c_init(omap);
++ if (r)
++ goto err_mux_state_deselect;
+
+ if (omap->rev < OMAP_I2C_OMAP1_REV_2)
+ r = devm_request_irq(&pdev->dev, omap->irq, omap_i2c_omap1_isr,
+@@ -1499,6 +1520,10 @@ omap_i2c_probe(struct platform_device *pdev)
+
+ err_unuse_clocks:
+ omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
++err_mux_state_deselect:
++ if (omap->mux_state)
++ mux_state_deselect(omap->mux_state);
++err_put_pm:
+ pm_runtime_dont_use_autosuspend(omap->dev);
+ pm_runtime_put_sync(omap->dev);
+ err_disable_pm:
+@@ -1514,6 +1539,9 @@ static void omap_i2c_remove(struct platform_device *pdev)
+
+ i2c_del_adapter(&omap->adapter);
+
++ if (omap->mux_state)
++ mux_state_deselect(omap->mux_state);
++
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ dev_err(omap->dev, "Failed to resume hardware, skip disable\n");
+diff --git a/drivers/i2c/busses/i2c-stm32.c b/drivers/i2c/busses/i2c-stm32.c
+index 157c64e27d0bd3..f84ec056e36dfe 100644
+--- a/drivers/i2c/busses/i2c-stm32.c
++++ b/drivers/i2c/busses/i2c-stm32.c
+@@ -102,7 +102,6 @@ int stm32_i2c_prep_dma_xfer(struct device *dev, struct stm32_i2c_dma *dma,
+ void *dma_async_param)
+ {
+ struct dma_async_tx_descriptor *txdesc;
+- struct device *chan_dev;
+ int ret;
+
+ if (rd_wr) {
+@@ -116,11 +115,10 @@ int stm32_i2c_prep_dma_xfer(struct device *dev, struct stm32_i2c_dma *dma,
+ }
+
+ dma->dma_len = len;
+- chan_dev = dma->chan_using->device->dev;
+
+- dma->dma_buf = dma_map_single(chan_dev, buf, dma->dma_len,
++ dma->dma_buf = dma_map_single(dev, buf, dma->dma_len,
+ dma->dma_data_dir);
+- if (dma_mapping_error(chan_dev, dma->dma_buf)) {
++ if (dma_mapping_error(dev, dma->dma_buf)) {
+ dev_err(dev, "DMA mapping failed\n");
+ return -EINVAL;
+ }
+@@ -150,7 +148,7 @@ int stm32_i2c_prep_dma_xfer(struct device *dev, struct stm32_i2c_dma *dma,
+ return 0;
+
+ err:
+- dma_unmap_single(chan_dev, dma->dma_buf, dma->dma_len,
++ dma_unmap_single(dev, dma->dma_buf, dma->dma_len,
+ dma->dma_data_dir);
+ return ret;
+ }
+diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
+index b4f10ff31102b6..85f8fd6a21ec4d 100644
+--- a/drivers/i2c/busses/i2c-stm32f7.c
++++ b/drivers/i2c/busses/i2c-stm32f7.c
+@@ -728,10 +728,10 @@ static void stm32f7_i2c_dma_callback(void *arg)
+ {
+ struct stm32f7_i2c_dev *i2c_dev = (struct stm32f7_i2c_dev *)arg;
+ struct stm32_i2c_dma *dma = i2c_dev->dma;
+- struct device *dev = dma->chan_using->device->dev;
+
+ stm32f7_i2c_disable_dma_req(i2c_dev);
+- dma_unmap_single(dev, dma->dma_buf, dma->dma_len, dma->dma_data_dir);
++ dma_unmap_single(i2c_dev->dev, dma->dma_buf, dma->dma_len,
++ dma->dma_data_dir);
+ complete(&dma->dma_complete);
+ }
+
+diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
+index 17820b2c3a1dec..f4e253c6c7d70c 100644
+--- a/drivers/iio/accel/fxls8962af-core.c
++++ b/drivers/iio/accel/fxls8962af-core.c
+@@ -865,6 +865,8 @@ static int fxls8962af_buffer_predisable(struct iio_dev *indio_dev)
+ if (ret)
+ return ret;
+
++ synchronize_irq(data->irq);
++
+ ret = __fxls8962af_fifo_set_mode(data, false);
+
+ if (data->enable_event)
+diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
+index b31581616ce34e..5304235d733f18 100644
+--- a/drivers/iio/adc/max1363.c
++++ b/drivers/iio/adc/max1363.c
+@@ -510,10 +510,10 @@ static const struct iio_event_spec max1363_events[] = {
+ MAX1363_CHAN_U(1, _s1, 1, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_U(2, _s2, 2, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_U(3, _s3, 3, bits, ev_spec, num_ev_spec), \
+- MAX1363_CHAN_B(0, 1, d0m1, 4, bits, ev_spec, num_ev_spec), \
+- MAX1363_CHAN_B(2, 3, d2m3, 5, bits, ev_spec, num_ev_spec), \
+- MAX1363_CHAN_B(1, 0, d1m0, 6, bits, ev_spec, num_ev_spec), \
+- MAX1363_CHAN_B(3, 2, d3m2, 7, bits, ev_spec, num_ev_spec), \
++ MAX1363_CHAN_B(0, 1, d0m1, 12, bits, ev_spec, num_ev_spec), \
++ MAX1363_CHAN_B(2, 3, d2m3, 13, bits, ev_spec, num_ev_spec), \
++ MAX1363_CHAN_B(1, 0, d1m0, 18, bits, ev_spec, num_ev_spec), \
++ MAX1363_CHAN_B(3, 2, d3m2, 19, bits, ev_spec, num_ev_spec), \
+ IIO_CHAN_SOFT_TIMESTAMP(8) \
+ }
+
+@@ -531,23 +531,23 @@ static const struct iio_chan_spec max1363_channels[] =
+ /* Applies to max1236, max1237 */
+ static const enum max1363_modes max1236_mode_list[] = {
+ _s0, _s1, _s2, _s3,
+- s0to1, s0to2, s0to3,
++ s0to1, s0to2, s2to3, s0to3,
+ d0m1, d2m3, d1m0, d3m2,
+ d0m1to2m3, d1m0to3m2,
+- s2to3,
+ };
+
+ /* Applies to max1238, max1239 */
+ static const enum max1363_modes max1238_mode_list[] = {
+ _s0, _s1, _s2, _s3, _s4, _s5, _s6, _s7, _s8, _s9, _s10, _s11,
+ s0to1, s0to2, s0to3, s0to4, s0to5, s0to6,
++ s6to7, s6to8, s6to9, s6to10, s6to11,
+ s0to7, s0to8, s0to9, s0to10, s0to11,
+ d0m1, d2m3, d4m5, d6m7, d8m9, d10m11,
+ d1m0, d3m2, d5m4, d7m6, d9m8, d11m10,
+- d0m1to2m3, d0m1to4m5, d0m1to6m7, d0m1to8m9, d0m1to10m11,
+- d1m0to3m2, d1m0to5m4, d1m0to7m6, d1m0to9m8, d1m0to11m10,
+- s6to7, s6to8, s6to9, s6to10, s6to11,
+- d6m7to8m9, d6m7to10m11, d7m6to9m8, d7m6to11m10,
++ d0m1to2m3, d0m1to4m5, d0m1to6m7, d6m7to8m9,
++ d0m1to8m9, d6m7to10m11, d0m1to10m11, d1m0to3m2,
++ d1m0to5m4, d1m0to7m6, d7m6to9m8, d1m0to9m8,
++ d7m6to11m10, d1m0to11m10,
+ };
+
+ #define MAX1363_12X_CHANS(bits) { \
+@@ -583,16 +583,15 @@ static const struct iio_chan_spec max1238_channels[] = MAX1363_12X_CHANS(12);
+
+ static const enum max1363_modes max11607_mode_list[] = {
+ _s0, _s1, _s2, _s3,
+- s0to1, s0to2, s0to3,
+- s2to3,
++ s0to1, s0to2, s2to3,
++ s0to3,
+ d0m1, d2m3, d1m0, d3m2,
+ d0m1to2m3, d1m0to3m2,
+ };
+
+ static const enum max1363_modes max11608_mode_list[] = {
+ _s0, _s1, _s2, _s3, _s4, _s5, _s6, _s7,
+- s0to1, s0to2, s0to3, s0to4, s0to5, s0to6, s0to7,
+- s6to7,
++ s0to1, s0to2, s0to3, s0to4, s0to5, s0to6, s6to7, s0to7,
+ d0m1, d2m3, d4m5, d6m7,
+ d1m0, d3m2, d5m4, d7m6,
+ d0m1to2m3, d0m1to4m5, d0m1to6m7,
+@@ -608,14 +607,14 @@ static const enum max1363_modes max11608_mode_list[] = {
+ MAX1363_CHAN_U(5, _s5, 5, bits, NULL, 0), \
+ MAX1363_CHAN_U(6, _s6, 6, bits, NULL, 0), \
+ MAX1363_CHAN_U(7, _s7, 7, bits, NULL, 0), \
+- MAX1363_CHAN_B(0, 1, d0m1, 8, bits, NULL, 0), \
+- MAX1363_CHAN_B(2, 3, d2m3, 9, bits, NULL, 0), \
+- MAX1363_CHAN_B(4, 5, d4m5, 10, bits, NULL, 0), \
+- MAX1363_CHAN_B(6, 7, d6m7, 11, bits, NULL, 0), \
+- MAX1363_CHAN_B(1, 0, d1m0, 12, bits, NULL, 0), \
+- MAX1363_CHAN_B(3, 2, d3m2, 13, bits, NULL, 0), \
+- MAX1363_CHAN_B(5, 4, d5m4, 14, bits, NULL, 0), \
+- MAX1363_CHAN_B(7, 6, d7m6, 15, bits, NULL, 0), \
++ MAX1363_CHAN_B(0, 1, d0m1, 12, bits, NULL, 0), \
++ MAX1363_CHAN_B(2, 3, d2m3, 13, bits, NULL, 0), \
++ MAX1363_CHAN_B(4, 5, d4m5, 14, bits, NULL, 0), \
++ MAX1363_CHAN_B(6, 7, d6m7, 15, bits, NULL, 0), \
++ MAX1363_CHAN_B(1, 0, d1m0, 18, bits, NULL, 0), \
++ MAX1363_CHAN_B(3, 2, d3m2, 19, bits, NULL, 0), \
++ MAX1363_CHAN_B(5, 4, d5m4, 20, bits, NULL, 0), \
++ MAX1363_CHAN_B(7, 6, d7m6, 21, bits, NULL, 0), \
+ IIO_CHAN_SOFT_TIMESTAMP(16) \
+ }
+ static const struct iio_chan_spec max11602_channels[] = MAX1363_8X_CHANS(8);
+diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
+index bbd5bdd732f01b..9f1566a378de47 100644
+--- a/drivers/iio/adc/stm32-adc-core.c
++++ b/drivers/iio/adc/stm32-adc-core.c
+@@ -428,10 +428,9 @@ static int stm32_adc_irq_probe(struct platform_device *pdev,
+ return -ENOMEM;
+ }
+
+- for (i = 0; i < priv->cfg->num_irqs; i++) {
+- irq_set_chained_handler(priv->irq[i], stm32_adc_irq_handler);
+- irq_set_handler_data(priv->irq[i], priv);
+- }
++ for (i = 0; i < priv->cfg->num_irqs; i++)
++ irq_set_chained_handler_and_data(priv->irq[i],
++ stm32_adc_irq_handler, priv);
+
+ return 0;
+ }
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 05c00421ff2b7e..09fcc14051f285 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -169,12 +169,12 @@ static const struct xpad_device {
+ { 0x046d, 0xca88, "Logitech Compact Controller for Xbox", 0, XTYPE_XBOX },
+ { 0x046d, 0xca8a, "Logitech Precision Vibration Feedback Wheel", 0, XTYPE_XBOX },
+ { 0x046d, 0xcaa3, "Logitech DriveFx Racing Wheel", 0, XTYPE_XBOX360 },
++ { 0x0502, 0x1305, "Acer NGR200", 0, XTYPE_XBOX360 },
+ { 0x056e, 0x2004, "Elecom JC-U3613M", 0, XTYPE_XBOX360 },
+ { 0x05fd, 0x1007, "Mad Catz Controller (unverified)", 0, XTYPE_XBOX },
+ { 0x05fd, 0x107a, "InterAct 'PowerPad Pro' X-Box pad (Germany)", 0, XTYPE_XBOX },
+ { 0x05fe, 0x3030, "Chic Controller", 0, XTYPE_XBOX },
+ { 0x05fe, 0x3031, "Chic Controller", 0, XTYPE_XBOX },
+- { 0x0502, 0x1305, "Acer NGR200", 0, XTYPE_XBOX },
+ { 0x062a, 0x0020, "Logic3 Xbox GamePad", 0, XTYPE_XBOX },
+ { 0x062a, 0x0033, "Competition Pro Steering Wheel", 0, XTYPE_XBOX },
+ { 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX },
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index 2d3afeaf886877..02838beee9d121 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -2708,7 +2708,11 @@ static unsigned long __evict_many(struct dm_bufio_client *c,
+ __make_buffer_clean(b);
+ __free_buffer_wake(b);
+
+- cond_resched();
++ if (need_resched()) {
++ dm_bufio_unlock(c);
++ cond_resched();
++ dm_bufio_lock(c);
++ }
+ }
+
+ return count;
+diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
+index bbfaf6536903d3..ac71abdce1b254 100644
+--- a/drivers/memstick/core/memstick.c
++++ b/drivers/memstick/core/memstick.c
+@@ -323,7 +323,7 @@ EXPORT_SYMBOL(memstick_init_req);
+ static int h_memstick_read_dev_id(struct memstick_dev *card,
+ struct memstick_request **mrq)
+ {
+- struct ms_id_register id_reg;
++ struct ms_id_register id_reg = {};
+
+ if (!(*mrq)) {
+ memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg,
+diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
+index 35d8fdea668b91..f923447ed2ce23 100644
+--- a/drivers/mmc/host/bcm2835.c
++++ b/drivers/mmc/host/bcm2835.c
+@@ -502,7 +502,8 @@ void bcm2835_prepare_dma(struct bcm2835_host *host, struct mmc_data *data)
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (!desc) {
+- dma_unmap_sg(dma_chan->device->dev, data->sg, sg_len, dir_data);
++ dma_unmap_sg(dma_chan->device->dev, data->sg, data->sg_len,
++ dir_data);
+ return;
+ }
+
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index dbfe0a5324eaf0..2ea5357e3bf0ea 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -911,7 +911,8 @@ static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
+ {
+ return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
+ (dmi_match(DMI_BIOS_VENDOR, "LENOVO") ||
+- dmi_match(DMI_SYS_VENDOR, "IRBIS"));
++ dmi_match(DMI_SYS_VENDOR, "IRBIS") ||
++ dmi_match(DMI_SYS_VENDOR, "Positivo Tecnologia SA"));
+ }
+
+ static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot)
+diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
+index 562034af653ebb..fb89b6062351ad 100644
+--- a/drivers/mmc/host/sdhci_am654.c
++++ b/drivers/mmc/host/sdhci_am654.c
+@@ -559,7 +559,8 @@ static struct sdhci_ops sdhci_am654_ops = {
+ static const struct sdhci_pltfm_data sdhci_am654_pdata = {
+ .ops = &sdhci_am654_ops,
+ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
++ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
++ SDHCI_QUIRK2_DISABLE_HW_TIMEOUT,
+ };
+
+ static const struct sdhci_am654_driver_data sdhci_am654_sr1_drvdata = {
+@@ -589,7 +590,8 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = {
+ static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = {
+ .ops = &sdhci_j721e_8bit_ops,
+ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
++ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
++ SDHCI_QUIRK2_DISABLE_HW_TIMEOUT,
+ };
+
+ static const struct sdhci_am654_driver_data sdhci_j721e_8bit_drvdata = {
+@@ -613,7 +615,8 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = {
+ static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = {
+ .ops = &sdhci_j721e_4bit_ops,
+ .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
++ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
++ SDHCI_QUIRK2_DISABLE_HW_TIMEOUT,
+ };
+
+ static const struct sdhci_am654_driver_data sdhci_j721e_4bit_drvdata = {
+diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
+index 4db0b770420e65..8ed9918ea4e894 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lag.c
++++ b/drivers/net/ethernet/intel/ice/ice_lag.c
+@@ -2129,7 +2129,8 @@ bool ice_lag_is_switchdev_running(struct ice_pf *pf)
+ struct ice_lag *lag = pf->lag;
+ struct net_device *tmp_nd;
+
+- if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) || !lag)
++ if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) ||
++ !lag || !lag->upper_netdev)
+ return false;
+
+ rcu_read_lock();
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 57b0e26696e306..d5731f7be04fd1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -1159,8 +1159,9 @@ static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
+ }
+ }
+
+-static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
+- u32 cqe_bcnt)
++static unsigned int mlx5e_lro_update_hdr(struct sk_buff *skb,
++ struct mlx5_cqe64 *cqe,
++ u32 cqe_bcnt)
+ {
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ struct tcphdr *tcp;
+@@ -1211,6 +1212,8 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
+ tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
+ IPPROTO_TCP, check);
+ }
++
++ return (unsigned int)((unsigned char *)tcp + tcp->doff * 4 - skb->data);
+ }
+
+ static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
+@@ -1567,8 +1570,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+ mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe);
+
+ if (lro_num_seg > 1) {
+- mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
+- skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
++ unsigned int hdrlen = mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
++
++ skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt - hdrlen, lro_num_seg);
+ /* Subtract one since we already counted this as one
+ * "regular" packet in mlx5e_complete_rx_cqe()
+ */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 96136229b1b070..32fa789a696056 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -2206,6 +2206,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
+ { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
+ { PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */
+ { PCI_VDEVICE(MELLANOX, 0x1025) }, /* ConnectX-9 */
++ { PCI_VDEVICE(MELLANOX, 0x1027) }, /* ConnectX-10 */
+ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
+ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
+ { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+index d6bc2309d2a388..663a8988d27a73 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
++++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+@@ -1348,7 +1348,6 @@ static void wx_configure_rx_ring(struct wx *wx,
+ struct wx_ring *ring)
+ {
+ u16 reg_idx = ring->reg_idx;
+- union wx_rx_desc *rx_desc;
+ u64 rdba = ring->dma;
+ u32 rxdctl;
+
+@@ -1378,9 +1377,9 @@ static void wx_configure_rx_ring(struct wx *wx,
+ memset(ring->rx_buffer_info, 0,
+ sizeof(struct wx_rx_buffer) * ring->count);
+
+- /* initialize Rx descriptor 0 */
+- rx_desc = WX_RX_DESC(ring, 0);
+- rx_desc->wb.upper.length = 0;
++ /* reset ntu and ntc to place SW in sync with hardware */
++ ring->next_to_clean = 0;
++ ring->next_to_use = 0;
+
+ /* enable receive descriptor ring */
+ wr32m(wx, WX_PX_RR_CFG(reg_idx),
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+index 97c6b4d2763433..23dbe4e4b36c04 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
++++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+@@ -171,10 +171,6 @@ static void wx_dma_sync_frag(struct wx_ring *rx_ring,
+ skb_frag_off(frag),
+ skb_frag_size(frag),
+ DMA_FROM_DEVICE);
+-
+- /* If the page was released, just unmap it. */
+- if (unlikely(WX_CB(skb)->page_released))
+- page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
+ }
+
+ static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring,
+@@ -224,10 +220,6 @@ static void wx_put_rx_buffer(struct wx_ring *rx_ring,
+ struct sk_buff *skb,
+ int rx_buffer_pgcnt)
+ {
+- if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma)
+- /* the page has been released from the ring */
+- WX_CB(skb)->page_released = true;
+-
+ /* clear contents of rx_buffer */
+ rx_buffer->page = NULL;
+ rx_buffer->skb = NULL;
+@@ -315,7 +307,7 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring,
+ return false;
+ dma = page_pool_get_dma_addr(page);
+
+- bi->page_dma = dma;
++ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = 0;
+
+@@ -352,7 +344,7 @@ void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count)
+ DMA_FROM_DEVICE);
+
+ rx_desc->read.pkt_addr =
+- cpu_to_le64(bi->page_dma + bi->page_offset);
++ cpu_to_le64(bi->dma + bi->page_offset);
+
+ rx_desc++;
+ bi++;
+@@ -365,6 +357,8 @@ void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count)
+
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.upper.status_error = 0;
++ /* clear the length for the next_to_use descriptor */
++ rx_desc->wb.upper.length = 0;
+
+ cleaned_count--;
+ } while (cleaned_count);
+@@ -2158,9 +2152,6 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring)
+ if (rx_buffer->skb) {
+ struct sk_buff *skb = rx_buffer->skb;
+
+- if (WX_CB(skb)->page_released)
+- page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
+-
+ dev_kfree_skb(skb);
+ }
+
+@@ -2184,6 +2175,9 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring)
+ }
+ }
+
++ /* Zero out the descriptor ring */
++ memset(rx_ring->desc, 0, rx_ring->size);
++
+ rx_ring->next_to_alloc = 0;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
+index c555af9ed51b29..0fef9dfdd9a6b7 100644
+--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
++++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
+@@ -668,7 +668,6 @@ enum wx_reset_type {
+ struct wx_cb {
+ dma_addr_t dma;
+ u16 append_cnt; /* number of skb's appended */
+- bool page_released;
+ bool dma_released;
+ };
+
+@@ -756,7 +755,6 @@ struct wx_tx_buffer {
+ struct wx_rx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+- dma_addr_t page_dma;
+ struct page *page;
+ unsigned int page_offset;
+ };
+diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+index b358ecc6722781..0eff5d4fe35dff 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
++++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+@@ -285,7 +285,7 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
+
+ /* Read the remaining data */
+ for (; length > 0; length--)
+- *to_u8_ptr = *from_u8_ptr;
++ *to_u8_ptr++ = *from_u8_ptr++;
+ }
+ }
+
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index ce6ac26131b347..f33f9167ba6b6e 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -2313,8 +2313,11 @@ static int netvsc_prepare_bonding(struct net_device *vf_netdev)
+ if (!ndev)
+ return NOTIFY_DONE;
+
+- /* set slave flag before open to prevent IPv6 addrconf */
++ /* Set slave flag and no addrconf flag before open
++ * to prevent IPv6 addrconf.
++ */
+ vf_netdev->flags |= IFF_SLAVE;
++ vf_netdev->priv_flags |= IFF_NO_ADDRCONF;
+ return NOTIFY_DONE;
+ }
+
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index cde0e80474a1de..875788918bcb3d 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -3377,7 +3377,8 @@ static int phy_probe(struct device *dev)
+ /* Get the LEDs from the device tree, and instantiate standard
+ * LEDs for them.
+ */
+- if (IS_ENABLED(CONFIG_PHYLIB_LEDS))
++ if (IS_ENABLED(CONFIG_PHYLIB_LEDS) && !phy_driver_is_genphy(phydev) &&
++ !phy_driver_is_genphy_10g(phydev))
+ err = of_phy_leds(phydev);
+
+ out:
+@@ -3394,7 +3395,8 @@ static int phy_remove(struct device *dev)
+
+ cancel_delayed_work_sync(&phydev->state_queue);
+
+- if (IS_ENABLED(CONFIG_PHYLIB_LEDS))
++ if (IS_ENABLED(CONFIG_PHYLIB_LEDS) && !phy_driver_is_genphy(phydev) &&
++ !phy_driver_is_genphy_10g(phydev))
+ phy_leds_unregister(phydev);
+
+ phydev->state = PHY_DOWN;
+diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
+index 673d3aa8379267..42b66adb35f1bc 100644
+--- a/drivers/net/usb/sierra_net.c
++++ b/drivers/net/usb/sierra_net.c
+@@ -689,6 +689,10 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
+ status);
+ return -ENODEV;
+ }
++ if (!dev->status) {
++ dev_err(&dev->udev->dev, "No status endpoint found");
++ return -ENODEV;
++ }
+ /* Initialize sierra private data */
+ priv = kzalloc(sizeof *priv, GFP_KERNEL);
+ if (!priv)
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 6e2d0fda3ba4aa..13221cc0d17d43 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -689,6 +689,10 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
+ !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
+ !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
+ return BLK_STS_RESOURCE;
++
++ if (!(rq->rq_flags & RQF_DONTPREP))
++ nvme_clear_nvme_request(rq);
++
+ return nvme_host_path_error(rq);
+ }
+ EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
+@@ -3596,7 +3600,7 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
+ return;
+ }
+ }
+- list_add(&ns->list, &ns->ctrl->namespaces);
++ list_add_rcu(&ns->list, &ns->ctrl->namespaces);
+ }
+
+ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
+diff --git a/drivers/nvmem/imx-ocotp-ele.c b/drivers/nvmem/imx-ocotp-ele.c
+index 1356ec93bfd00c..66e468af0f2fe8 100644
+--- a/drivers/nvmem/imx-ocotp-ele.c
++++ b/drivers/nvmem/imx-ocotp-ele.c
+@@ -12,6 +12,7 @@
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
++#include <linux/if_ether.h> /* ETH_ALEN */
+
+ enum fuse_type {
+ FUSE_FSB = 1,
+@@ -114,9 +115,11 @@ static int imx_ocotp_cell_pp(void *context, const char *id, int index,
+ int i;
+
+ /* Deal with some post processing of nvmem cell data */
+- if (id && !strcmp(id, "mac-address"))
++ if (id && !strcmp(id, "mac-address")) {
++ bytes = min(bytes, ETH_ALEN);
+ for (i = 0; i < bytes / 2; i++)
+ swap(buf[i], buf[bytes - i - 1]);
++ }
+
+ return 0;
+ }
+diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
+index 79dd4fda03295a..7bf7656d4f9631 100644
+--- a/drivers/nvmem/imx-ocotp.c
++++ b/drivers/nvmem/imx-ocotp.c
+@@ -23,6 +23,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
+ #include <linux/delay.h>
++#include <linux/if_ether.h> /* ETH_ALEN */
+
+ #define IMX_OCOTP_OFFSET_B0W0 0x400 /* Offset from base address of the
+ * OTP Bank0 Word0
+@@ -227,9 +228,11 @@ static int imx_ocotp_cell_pp(void *context, const char *id, int index,
+ int i;
+
+ /* Deal with some post processing of nvmem cell data */
+- if (id && !strcmp(id, "mac-address"))
++ if (id && !strcmp(id, "mac-address")) {
++ bytes = min(bytes, ETH_ALEN);
+ for (i = 0; i < bytes / 2; i++)
+ swap(buf[i], buf[bytes - i - 1]);
++ }
+
+ return 0;
+ }
+diff --git a/drivers/nvmem/u-boot-env.c b/drivers/nvmem/u-boot-env.c
+index adabbfdad6fb6d..8712d3709a2683 100644
+--- a/drivers/nvmem/u-boot-env.c
++++ b/drivers/nvmem/u-boot-env.c
+@@ -132,7 +132,7 @@ static int u_boot_env_parse(struct u_boot_env *priv)
+ size_t crc32_data_offset;
+ size_t crc32_data_len;
+ size_t crc32_offset;
+- __le32 *crc32_addr;
++ uint32_t *crc32_addr;
+ size_t data_offset;
+ size_t data_len;
+ size_t dev_size;
+@@ -183,8 +183,8 @@ static int u_boot_env_parse(struct u_boot_env *priv)
+ goto err_kfree;
+ }
+
+- crc32_addr = (__le32 *)(buf + crc32_offset);
+- crc32 = le32_to_cpu(*crc32_addr);
++ crc32_addr = (uint32_t *)(buf + crc32_offset);
++ crc32 = *crc32_addr;
+ crc32_data_len = dev_size - crc32_data_offset;
+ data_len = dev_size - data_offset;
+
+diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
+index 23a23f2d64e586..e818f6c3980e6b 100644
+--- a/drivers/phy/tegra/xusb-tegra186.c
++++ b/drivers/phy/tegra/xusb-tegra186.c
+@@ -648,14 +648,15 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl)
+ udelay(100);
+ }
+
+- if (padctl->soc->trk_hw_mode) {
+- value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
+- value |= USB2_TRK_HW_MODE;
++ value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
++ if (padctl->soc->trk_update_on_idle)
+ value &= ~CYA_TRK_CODE_UPDATE_ON_IDLE;
+- padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
+- } else {
++ if (padctl->soc->trk_hw_mode)
++ value |= USB2_TRK_HW_MODE;
++ padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
++
++ if (!padctl->soc->trk_hw_mode)
+ clk_disable_unprepare(priv->usb2_trk_clk);
+- }
+ }
+
+ static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl)
+@@ -782,13 +783,15 @@ static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
+ }
+
+ static int tegra186_xusb_padctl_id_override(struct tegra_xusb_padctl *padctl,
+- bool status)
++ struct tegra_xusb_usb2_port *port, bool status)
+ {
+- u32 value;
++ u32 value, id_override;
++ int err = 0;
+
+ dev_dbg(padctl->dev, "%s id override\n", status ? "set" : "clear");
+
+ value = padctl_readl(padctl, USB2_VBUS_ID);
++ id_override = value & ID_OVERRIDE(~0);
+
+ if (status) {
+ if (value & VBUS_OVERRIDE) {
+@@ -799,15 +802,35 @@ static int tegra186_xusb_padctl_id_override(struct tegra_xusb_padctl *padctl,
+ value = padctl_readl(padctl, USB2_VBUS_ID);
+ }
+
+- value &= ~ID_OVERRIDE(~0);
+- value |= ID_OVERRIDE_GROUNDED;
++ if (id_override != ID_OVERRIDE_GROUNDED) {
++ value &= ~ID_OVERRIDE(~0);
++ value |= ID_OVERRIDE_GROUNDED;
++ padctl_writel(padctl, value, USB2_VBUS_ID);
++
++ err = regulator_enable(port->supply);
++ if (err) {
++ dev_err(padctl->dev, "Failed to enable regulator: %d\n", err);
++ return err;
++ }
++ }
+ } else {
+- value &= ~ID_OVERRIDE(~0);
+- value |= ID_OVERRIDE_FLOATING;
++ if (id_override == ID_OVERRIDE_GROUNDED) {
++ /*
++ * The regulator is disabled only when the role transitions
++ * from USB_ROLE_HOST to USB_ROLE_NONE.
++ */
++ err = regulator_disable(port->supply);
++ if (err) {
++ dev_err(padctl->dev, "Failed to disable regulator: %d\n", err);
++ return err;
++ }
++
++ value &= ~ID_OVERRIDE(~0);
++ value |= ID_OVERRIDE_FLOATING;
++ padctl_writel(padctl, value, USB2_VBUS_ID);
++ }
+ }
+
+- padctl_writel(padctl, value, USB2_VBUS_ID);
+-
+ return 0;
+ }
+
+@@ -826,27 +849,20 @@ static int tegra186_utmi_phy_set_mode(struct phy *phy, enum phy_mode mode,
+
+ if (mode == PHY_MODE_USB_OTG) {
+ if (submode == USB_ROLE_HOST) {
+- tegra186_xusb_padctl_id_override(padctl, true);
+-
+- err = regulator_enable(port->supply);
++ err = tegra186_xusb_padctl_id_override(padctl, port, true);
++ if (err)
++ goto out;
+ } else if (submode == USB_ROLE_DEVICE) {
+ tegra186_xusb_padctl_vbus_override(padctl, true);
+ } else if (submode == USB_ROLE_NONE) {
+- /*
+- * When port is peripheral only or role transitions to
+- * USB_ROLE_NONE from USB_ROLE_DEVICE, regulator is not
+- * enabled.
+- */
+- if (regulator_is_enabled(port->supply))
+- regulator_disable(port->supply);
+-
+- tegra186_xusb_padctl_id_override(padctl, false);
++ err = tegra186_xusb_padctl_id_override(padctl, port, false);
++ if (err)
++ goto out;
+ tegra186_xusb_padctl_vbus_override(padctl, false);
+ }
+ }
+-
++out:
+ mutex_unlock(&padctl->lock);
+-
+ return err;
+ }
+
+@@ -1710,7 +1726,8 @@ const struct tegra_xusb_padctl_soc tegra234_xusb_padctl_soc = {
+ .num_supplies = ARRAY_SIZE(tegra194_xusb_padctl_supply_names),
+ .supports_gen2 = true,
+ .poll_trk_completed = true,
+- .trk_hw_mode = true,
++ .trk_hw_mode = false,
++ .trk_update_on_idle = true,
+ .supports_lp_cfg_en = true,
+ };
+ EXPORT_SYMBOL_GPL(tegra234_xusb_padctl_soc);
+diff --git a/drivers/phy/tegra/xusb.h b/drivers/phy/tegra/xusb.h
+index 6e45d194c68947..d2b5f95651324a 100644
+--- a/drivers/phy/tegra/xusb.h
++++ b/drivers/phy/tegra/xusb.h
+@@ -434,6 +434,7 @@ struct tegra_xusb_padctl_soc {
+ bool need_fake_usb3_port;
+ bool poll_trk_completed;
+ bool trk_hw_mode;
++ bool trk_update_on_idle;
+ bool supports_lp_cfg_en;
+ };
+
+diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
+index 226ca4c62673ff..60cfcd741c2af3 100644
+--- a/drivers/regulator/pwm-regulator.c
++++ b/drivers/regulator/pwm-regulator.c
+@@ -157,6 +157,13 @@ static int pwm_regulator_get_voltage(struct regulator_dev *rdev)
+
+ pwm_get_state(drvdata->pwm, &pstate);
+
++ if (!pstate.enabled) {
++ if (pstate.polarity == PWM_POLARITY_INVERSED)
++ pstate.duty_cycle = pstate.period;
++ else
++ pstate.duty_cycle = 0;
++ }
++
+ voltage = pwm_get_relative_duty_cycle(&pstate, duty_unit);
+ if (voltage < min(max_uV_duty, min_uV_duty) ||
+ voltage > max(max_uV_duty, min_uV_duty))
+@@ -316,6 +323,32 @@ static int pwm_regulator_init_continuous(struct platform_device *pdev,
+ return 0;
+ }
+
++static int pwm_regulator_init_boot_on(struct platform_device *pdev,
++ struct pwm_regulator_data *drvdata,
++ const struct regulator_init_data *init_data)
++{
++ struct pwm_state pstate;
++
++ if (!init_data->constraints.boot_on || drvdata->enb_gpio)
++ return 0;
++
++ pwm_get_state(drvdata->pwm, &pstate);
++ if (pstate.enabled)
++ return 0;
++
++ /*
++ * Update the duty cycle so the output does not change
++ * when the regulator core enables the regulator (and
++ * thus the PWM channel).
++ */
++ if (pstate.polarity == PWM_POLARITY_INVERSED)
++ pstate.duty_cycle = pstate.period;
++ else
++ pstate.duty_cycle = 0;
++
++ return pwm_apply_might_sleep(drvdata->pwm, &pstate);
++}
++
+ static int pwm_regulator_probe(struct platform_device *pdev)
+ {
+ const struct regulator_init_data *init_data;
+@@ -375,6 +408,13 @@ static int pwm_regulator_probe(struct platform_device *pdev)
+ if (ret)
+ return ret;
+
++ ret = pwm_regulator_init_boot_on(pdev, drvdata, init_data);
++ if (ret) {
++ dev_err(&pdev->dev, "Failed to apply boot_on settings: %d\n",
++ ret);
++ return ret;
++ }
++
+ regulator = devm_regulator_register(&pdev->dev,
+ &drvdata->desc, &config);
+ if (IS_ERR(regulator)) {
+diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
+index 0f2ffee321dd9c..3dd2ab8336aa3a 100644
+--- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
++++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
+@@ -58,6 +58,7 @@ struct aspeed_lpc_snoop_model_data {
+ };
+
+ struct aspeed_lpc_snoop_channel {
++ bool enabled;
+ struct kfifo fifo;
+ wait_queue_head_t wq;
+ struct miscdevice miscdev;
+@@ -190,6 +191,9 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
+ const struct aspeed_lpc_snoop_model_data *model_data =
+ of_device_get_match_data(dev);
+
++ if (WARN_ON(lpc_snoop->chan[channel].enabled))
++ return -EBUSY;
++
+ init_waitqueue_head(&lpc_snoop->chan[channel].wq);
+ /* Create FIFO datastructure */
+ rc = kfifo_alloc(&lpc_snoop->chan[channel].fifo,
+@@ -236,6 +240,8 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
+ regmap_update_bits(lpc_snoop->regmap, HICRB,
+ hicrb_en, hicrb_en);
+
++ lpc_snoop->chan[channel].enabled = true;
++
+ return 0;
+
+ err_misc_deregister:
+@@ -248,6 +254,9 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
+ static void aspeed_lpc_disable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
+ int channel)
+ {
++ if (!lpc_snoop->chan[channel].enabled)
++ return;
++
+ switch (channel) {
+ case 0:
+ regmap_update_bits(lpc_snoop->regmap, HICR5,
+@@ -263,8 +272,10 @@ static void aspeed_lpc_disable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
+ return;
+ }
+
+- kfifo_free(&lpc_snoop->chan[channel].fifo);
++ lpc_snoop->chan[channel].enabled = false;
++ /* Consider improving safety wrt concurrent reader(s) */
+ misc_deregister(&lpc_snoop->chan[channel].miscdev);
++ kfifo_free(&lpc_snoop->chan[channel].fifo);
+ }
+
+ static int aspeed_lpc_snoop_probe(struct platform_device *pdev)
+diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
+index 31b203ebbae0ca..b89f8067e6cdd7 100644
+--- a/drivers/soundwire/amd_manager.c
++++ b/drivers/soundwire/amd_manager.c
+@@ -205,7 +205,7 @@ static u64 amd_sdw_send_cmd_get_resp(struct amd_sdw_manager *amd_manager, u32 lo
+
+ if (sts & AMD_SDW_IMM_RES_VALID) {
+ dev_err(amd_manager->dev, "SDW%x manager is in bad state\n", amd_manager->instance);
+- writel(0x00, amd_manager->mmio + ACP_SW_IMM_CMD_STS);
++ writel(AMD_SDW_IMM_RES_VALID, amd_manager->mmio + ACP_SW_IMM_CMD_STS);
+ }
+ writel(upper_data, amd_manager->mmio + ACP_SW_IMM_CMD_UPPER_WORD);
+ writel(lower_data, amd_manager->mmio + ACP_SW_IMM_CMD_LOWER_QWORD);
+@@ -1135,9 +1135,11 @@ static int __maybe_unused amd_suspend(struct device *dev)
+ }
+
+ if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
++ cancel_work_sync(&amd_manager->amd_sdw_work);
+ amd_sdw_wake_enable(amd_manager, false);
+ return amd_sdw_clock_stop(amd_manager);
+ } else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
++ cancel_work_sync(&amd_manager->amd_sdw_work);
+ amd_sdw_wake_enable(amd_manager, false);
+ /*
+ * As per hardware programming sequence on AMD platforms,
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 72e514cee056dd..cfb6755c0730f4 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -4011,10 +4011,13 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
+ xfer->tx_nbits != SPI_NBITS_OCTAL)
+ return -EINVAL;
+ if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
+- !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
++ !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL)))
+ return -EINVAL;
+ if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
+- !(spi->mode & SPI_TX_QUAD))
++ !(spi->mode & (SPI_TX_QUAD | SPI_TX_OCTAL)))
++ return -EINVAL;
++ if ((xfer->tx_nbits == SPI_NBITS_OCTAL) &&
++ !(spi->mode & SPI_TX_OCTAL))
+ return -EINVAL;
+ }
+ /* Check transfer rx_nbits */
+@@ -4027,10 +4030,13 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
+ xfer->rx_nbits != SPI_NBITS_OCTAL)
+ return -EINVAL;
+ if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
+- !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
++ !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
+ return -EINVAL;
+ if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
+- !(spi->mode & SPI_RX_QUAD))
++ !(spi->mode & (SPI_RX_QUAD | SPI_RX_OCTAL)))
++ return -EINVAL;
++ if ((xfer->rx_nbits == SPI_NBITS_OCTAL) &&
++ !(spi->mode & SPI_RX_OCTAL))
+ return -EINVAL;
+ }
+
+diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
+index 52cb1a3bb8c786..df83383800a8ef 100644
+--- a/drivers/thunderbolt/switch.c
++++ b/drivers/thunderbolt/switch.c
+@@ -1465,7 +1465,7 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
+ return ret;
+
+ data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
+- data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
++ data[1] &= ~ADP_DP_CS_1_AUX_TX_HOPID_MASK;
+ data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
+
+ data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
+@@ -3439,7 +3439,7 @@ void tb_sw_set_unplugged(struct tb_switch *sw)
+ }
+ }
+
+-static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
++static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime)
+ {
+ if (flags)
+ tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
+@@ -3447,7 +3447,7 @@ static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
+ tb_sw_dbg(sw, "disabling wakeup\n");
+
+ if (tb_switch_is_usb4(sw))
+- return usb4_switch_set_wake(sw, flags);
++ return usb4_switch_set_wake(sw, flags, runtime);
+ return tb_lc_set_wake(sw, flags);
+ }
+
+@@ -3523,7 +3523,7 @@ int tb_switch_resume(struct tb_switch *sw, bool runtime)
+ tb_switch_check_wakes(sw);
+
+ /* Disable wakes */
+- tb_switch_set_wake(sw, 0);
++ tb_switch_set_wake(sw, 0, true);
+
+ err = tb_switch_tmu_init(sw);
+ if (err)
+@@ -3604,7 +3604,7 @@ void tb_switch_suspend(struct tb_switch *sw, bool runtime)
+ flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
+ }
+
+- tb_switch_set_wake(sw, flags);
++ tb_switch_set_wake(sw, flags, runtime);
+
+ if (tb_switch_is_usb4(sw))
+ usb4_switch_set_sleep(sw);
+diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
+index 920dac8a63e1df..d67a25f33fd1c5 100644
+--- a/drivers/thunderbolt/tb.h
++++ b/drivers/thunderbolt/tb.h
+@@ -1266,7 +1266,7 @@ int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
+ int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
+ size_t size);
+ bool usb4_switch_lane_bonding_possible(struct tb_switch *sw);
+-int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags);
++int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime);
+ int usb4_switch_set_sleep(struct tb_switch *sw);
+ int usb4_switch_nvm_sector_size(struct tb_switch *sw);
+ int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
+diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
+index e445516290f912..d4b4f58e717c12 100644
+--- a/drivers/thunderbolt/usb4.c
++++ b/drivers/thunderbolt/usb4.c
+@@ -405,12 +405,12 @@ bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
+ * usb4_switch_set_wake() - Enabled/disable wake
+ * @sw: USB4 router
+ * @flags: Wakeup flags (%0 to disable)
++ * @runtime: Wake is being programmed during system runtime
+ *
+ * Enables/disables router to wake up from sleep.
+ */
+-int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
++int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime)
+ {
+- struct usb4_port *usb4;
+ struct tb_port *port;
+ u64 route = tb_route(sw);
+ u32 val;
+@@ -440,13 +440,11 @@ int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
+ val |= PORT_CS_19_WOU4;
+ } else {
+ bool configured = val & PORT_CS_19_PC;
+- usb4 = port->usb4;
++ bool wakeup = runtime || device_may_wakeup(&port->usb4->dev);
+
+- if (((flags & TB_WAKE_ON_CONNECT) &&
+- device_may_wakeup(&usb4->dev)) && !configured)
++ if ((flags & TB_WAKE_ON_CONNECT) && wakeup && !configured)
+ val |= PORT_CS_19_WOC;
+- if (((flags & TB_WAKE_ON_DISCONNECT) &&
+- device_may_wakeup(&usb4->dev)) && configured)
++ if ((flags & TB_WAKE_ON_DISCONNECT) && wakeup && configured)
+ val |= PORT_CS_19_WOD;
+ if ((flags & TB_WAKE_ON_USB4) && configured)
+ val |= PORT_CS_19_WOU4;
+diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
+index cc83b772b7ca9c..7b868ea48ad597 100644
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -967,7 +967,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
+ __func__);
+ return 0;
+ }
+- dma_sync_sg_for_device(port->dev, priv->sg_tx_p, nent, DMA_TO_DEVICE);
++ dma_sync_sg_for_device(port->dev, priv->sg_tx_p, num, DMA_TO_DEVICE);
+ priv->desc_tx = desc;
+ desc->callback = pch_dma_tx_complete;
+ desc->callback_param = priv;
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 855356752380b6..bebc41308ca930 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -66,6 +66,12 @@
+ */
+ #define USB_SHORT_SET_ADDRESS_REQ_TIMEOUT 500 /* ms */
+
++/*
++ * Give SS hubs 200ms time after wake to train downstream links before
++ * assuming no port activity and allowing hub to runtime suspend back.
++ */
++#define USB_SS_PORT_U0_WAKE_TIME 200 /* ms */
++
+ /* Protect struct usb_device->state and ->children members
+ * Note: Both are also protected by ->dev.sem, except that ->state can
+ * change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
+@@ -1065,6 +1071,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ goto init2;
+ goto init3;
+ }
++
+ hub_get(hub);
+
+ /* The superspeed hub except for root hub has to use Hub Depth
+@@ -1313,6 +1320,17 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ device_unlock(&hdev->dev);
+ }
+
++ if (type == HUB_RESUME && hub_is_superspeed(hub->hdev)) {
++ /* give usb3 downstream links training time after hub resume */
++ usb_autopm_get_interface_no_resume(
++ to_usb_interface(hub->intfdev));
++
++ queue_delayed_work(system_power_efficient_wq,
++ &hub->post_resume_work,
++ msecs_to_jiffies(USB_SS_PORT_U0_WAKE_TIME));
++ return;
++ }
++
+ hub_put(hub);
+ }
+
+@@ -1331,6 +1349,14 @@ static void hub_init_func3(struct work_struct *ws)
+ hub_activate(hub, HUB_INIT3);
+ }
+
++static void hub_post_resume(struct work_struct *ws)
++{
++ struct usb_hub *hub = container_of(ws, struct usb_hub, post_resume_work.work);
++
++ usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
++ hub_put(hub);
++}
++
+ enum hub_quiescing_type {
+ HUB_DISCONNECT, HUB_PRE_RESET, HUB_SUSPEND
+ };
+@@ -1356,6 +1382,7 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
+
+ /* Stop hub_wq and related activity */
+ del_timer_sync(&hub->irq_urb_retry);
++ flush_delayed_work(&hub->post_resume_work);
+ usb_kill_urb(hub->urb);
+ if (hub->has_indicators)
+ cancel_delayed_work_sync(&hub->leds);
+@@ -1914,6 +1941,7 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ hub->hdev = hdev;
+ INIT_DELAYED_WORK(&hub->leds, led_work);
+ INIT_DELAYED_WORK(&hub->init_work, NULL);
++ INIT_DELAYED_WORK(&hub->post_resume_work, hub_post_resume);
+ INIT_WORK(&hub->events, hub_event);
+ INIT_LIST_HEAD(&hub->onboard_hub_devs);
+ spin_lock_init(&hub->irq_urb_lock);
+@@ -5686,6 +5714,7 @@ static void port_event(struct usb_hub *hub, int port1)
+ struct usb_device *hdev = hub->hdev;
+ u16 portstatus, portchange;
+ int i = 0;
++ int err;
+
+ connect_change = test_bit(port1, hub->change_bits);
+ clear_bit(port1, hub->event_bits);
+@@ -5782,8 +5811,11 @@ static void port_event(struct usb_hub *hub, int port1)
+ } else if (!udev || !(portstatus & USB_PORT_STAT_CONNECTION)
+ || udev->state == USB_STATE_NOTATTACHED) {
+ dev_dbg(&port_dev->dev, "do warm reset, port only\n");
+- if (hub_port_reset(hub, port1, NULL,
+- HUB_BH_RESET_TIME, true) < 0)
++ err = hub_port_reset(hub, port1, NULL,
++ HUB_BH_RESET_TIME, true);
++ if (!udev && err == -ENOTCONN)
++ connect_change = 0;
++ else if (err < 0)
+ hub_port_disable(hub, port1, 1);
+ } else {
+ dev_dbg(&port_dev->dev, "do warm reset, full device\n");
+diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
+index 6610cf6131c673..59c426e0e0b9d4 100644
+--- a/drivers/usb/core/hub.h
++++ b/drivers/usb/core/hub.h
+@@ -69,6 +69,7 @@ struct usb_hub {
+ u8 indicator[USB_MAXCHILDREN];
+ struct delayed_work leds;
+ struct delayed_work init_work;
++ struct delayed_work post_resume_work;
+ struct work_struct events;
+ spinlock_t irq_urb_lock;
+ struct timer_list irq_urb_retry;
+diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
+index 82544374110b03..add808efb87165 100644
+--- a/drivers/usb/dwc3/dwc3-qcom.c
++++ b/drivers/usb/dwc3/dwc3-qcom.c
+@@ -854,13 +854,13 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
+ ret = reset_control_deassert(qcom->resets);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to deassert resets, err=%d\n", ret);
+- goto reset_assert;
++ return ret;
+ }
+
+ ret = dwc3_qcom_clk_init(qcom, of_clk_get_parent_count(np));
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to get clocks\n");
+- goto reset_assert;
++ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+@@ -964,8 +964,6 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
+ clk_disable_unprepare(qcom->clks[i]);
+ clk_put(qcom->clks[i]);
+ }
+-reset_assert:
+- reset_control_assert(qcom->resets);
+
+ return ret;
+ }
+@@ -995,8 +993,6 @@ static void dwc3_qcom_remove(struct platform_device *pdev)
+ qcom->num_clocks = 0;
+
+ dwc3_qcom_interconnect_exit(qcom);
+- reset_control_assert(qcom->resets);
+-
+ pm_runtime_allow(dev);
+ pm_runtime_disable(dev);
+ }
+diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
+index 3a80600d68068f..4c89f7629d5308 100644
+--- a/drivers/usb/gadget/configfs.c
++++ b/drivers/usb/gadget/configfs.c
+@@ -1062,6 +1062,8 @@ static ssize_t webusb_landingPage_store(struct config_item *item, const char *pa
+ unsigned int bytes_to_strip = 0;
+ int l = len;
+
++ if (!len)
++ return len;
+ if (page[l - 1] == '\n') {
+ --l;
+ ++bytes_to_strip;
+@@ -1185,6 +1187,8 @@ static ssize_t os_desc_qw_sign_store(struct config_item *item, const char *page,
+ struct gadget_info *gi = os_desc_item_to_gadget_info(item);
+ int res, l;
+
++ if (!len)
++ return len;
+ l = min((int)len, OS_STRING_QW_SIGN_LEN >> 1);
+ if (page[l - 1] == '\n')
+ --l;
+diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
+index f175cb2c3e7bd2..cfac9792786549 100644
+--- a/drivers/usb/musb/musb_gadget.c
++++ b/drivers/usb/musb/musb_gadget.c
+@@ -1925,6 +1925,7 @@ static int musb_gadget_stop(struct usb_gadget *g)
+ * gadget driver here and have everything work;
+ * that currently misbehaves.
+ */
++ usb_gadget_set_state(g, USB_STATE_NOTATTACHED);
+
+ /* Force check of devctl register for PM runtime */
+ pm_runtime_mark_last_busy(musb->controller);
+@@ -2031,6 +2032,7 @@ void musb_g_disconnect(struct musb *musb)
+ case OTG_STATE_B_PERIPHERAL:
+ case OTG_STATE_B_IDLE:
+ musb_set_state(musb, OTG_STATE_B_IDLE);
++ usb_gadget_set_state(&musb->g, USB_STATE_NOTATTACHED);
+ break;
+ case OTG_STATE_B_SRP_INIT:
+ break;
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index b583b31ea5e72e..8906c9dc348edd 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -803,6 +803,8 @@ static const struct usb_device_id id_table_combined[] = {
+ .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
++ { USB_DEVICE(FTDI_NDI_VID, FTDI_NDI_EMGUIDE_GEMINI_PID),
++ .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
+ { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
+ { USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) },
+ { USB_DEVICE(FTDI_VID, RTSYSTEMS_USB_VX8_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 9acb6f83732763..4cc1fae8acb970 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -204,6 +204,9 @@
+ #define FTDI_NDI_FUTURE_3_PID 0xDA73 /* NDI future device #3 */
+ #define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */
+
++#define FTDI_NDI_VID 0x23F2
++#define FTDI_NDI_EMGUIDE_GEMINI_PID 0x0003 /* NDI Emguide Gemini */
++
+ /*
+ * ChamSys Limited (www.chamsys.co.uk) USB wing/interface product IDs
+ */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 5d669511609892..6c6387d39db82c 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1415,6 +1415,9 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = NCTRL(5) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x60) },
++ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10c7, 0xff, 0xff, 0x30), /* Telit FE910C04 (ECM) */
++ .driver_info = NCTRL(4) },
++ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10c7, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x30), /* Telit FN990B (MBIM) */
+ .driver_info = NCTRL(6) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x40) },
+@@ -2343,6 +2346,8 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe145, 0xff), /* Foxconn T99W651 RNDIS */
+ .driver_info = RSVD(5) | RSVD(6) },
++ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe167, 0xff), /* Foxconn T99W640 MBIM */
++ .driver_info = RSVD(3) },
+ { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
+ .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
+ { USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */
+diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c
+index 009d23cd435b54..239a6002083d70 100644
+--- a/fs/cachefiles/io.c
++++ b/fs/cachefiles/io.c
+@@ -346,8 +346,6 @@ int __cachefiles_write(struct cachefiles_object *object,
+ default:
+ ki->was_async = false;
+ cachefiles_write_complete(&ki->iocb, ret);
+- if (ret > 0)
+- ret = 0;
+ break;
+ }
+
+diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
+index 3389a373faf680..cfa8f23fdfb655 100644
+--- a/fs/cachefiles/ondemand.c
++++ b/fs/cachefiles/ondemand.c
+@@ -84,10 +84,8 @@ static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb,
+
+ trace_cachefiles_ondemand_fd_write(object, file_inode(file), pos, len);
+ ret = __cachefiles_write(object, file, pos, iter, NULL, NULL);
+- if (!ret) {
+- ret = len;
++ if (ret > 0)
+ kiocb->ki_pos += ret;
+- }
+
+ out:
+ fput(file);
+diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
+index 54075fe3de9b1f..d989bdcfaa26c7 100644
+--- a/fs/isofs/inode.c
++++ b/fs/isofs/inode.c
+@@ -1486,9 +1486,16 @@ static int isofs_read_inode(struct inode *inode, int relocated)
+ inode->i_op = &page_symlink_inode_operations;
+ inode_nohighmem(inode);
+ inode->i_data.a_ops = &isofs_symlink_aops;
+- } else
++ } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
++ S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
+ /* XXX - parse_rock_ridge_inode() had already set i_rdev. */
+ init_special_inode(inode, inode->i_mode, inode->i_rdev);
++ } else {
++ printk(KERN_DEBUG "ISOFS: Invalid file type 0%04o for inode %lu.\n",
++ inode->i_mode, inode->i_ino);
++ ret = -EIO;
++ goto fail;
++ }
+
+ ret = 0;
+ out:
+diff --git a/fs/namespace.c b/fs/namespace.c
+index cebcb9fa2acc07..6a9c53c800c4e4 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2110,6 +2110,11 @@ struct vfsmount *clone_private_mount(const struct path *path)
+ if (!check_mnt(old_mnt))
+ goto invalid;
+
++ if (!ns_capable(old_mnt->mnt_ns->user_ns, CAP_SYS_ADMIN)) {
++ up_read(&namespace_sem);
++ return ERR_PTR(-EPERM);
++ }
++
+ if (has_locked_children(old_mnt, path->dentry))
+ goto invalid;
+
+diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
+index d883ed75022c4a..99a8c6fbd41a65 100644
+--- a/fs/smb/client/file.c
++++ b/fs/smb/client/file.c
+@@ -5042,7 +5042,8 @@ void cifs_oplock_break(struct work_struct *work)
+ struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
+ oplock_break);
+ struct inode *inode = d_inode(cfile->dentry);
+- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++ struct super_block *sb = inode->i_sb;
++ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ struct cifsInodeInfo *cinode = CIFS_I(inode);
+ struct cifs_tcon *tcon;
+ struct TCP_Server_Info *server;
+@@ -5052,6 +5053,12 @@ void cifs_oplock_break(struct work_struct *work)
+ __u64 persistent_fid, volatile_fid;
+ __u16 net_fid;
+
++ /*
++ * Hold a reference to the superblock to prevent it and its inodes from
++ * being freed while we are accessing cinode. Otherwise, _cifsFileInfo_put()
++ * may release the last reference to the sb and trigger inode eviction.
++ */
++ cifs_sb_active(sb);
+ wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
+ TASK_UNINTERRUPTIBLE);
+
+@@ -5124,6 +5131,7 @@ void cifs_oplock_break(struct work_struct *work)
+ cifs_put_tlink(tlink);
+ out:
+ cifs_done_oplock_break(cinode);
++ cifs_sb_deactive(sb);
+ }
+
+ /*
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 4e3eacbec96d14..2385e570e3311a 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -4271,6 +4271,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ u8 key[SMB3_ENC_DEC_KEY_SIZE];
+ struct aead_request *req;
+ u8 *iv;
++ DECLARE_CRYPTO_WAIT(wait);
+ unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
+ void *creq;
+ size_t sensitive_size;
+@@ -4321,7 +4322,11 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
+ aead_request_set_crypt(req, sg, sg, crypt_len, iv);
+ aead_request_set_ad(req, assoc_data_len);
+
+- rc = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
++ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
++ crypto_req_done, &wait);
++
++ rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
++ : crypto_aead_decrypt(req), &wait);
+
+ if (!rc && enc)
+ memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
+diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
+index 802ea3080d0b36..2fb3151ea7c9e9 100644
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -2543,7 +2543,7 @@ struct cfg80211_scan_request {
+ struct cfg80211_scan_6ghz_params *scan_6ghz_params;
+
+ /* keep last */
+- struct ieee80211_channel *channels[] __counted_by(n_channels);
++ struct ieee80211_channel *channels[];
+ };
+
+ static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask)
+diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
+index 4085765c337054..a2c987289401ee 100644
+--- a/include/net/netfilter/nf_conntrack.h
++++ b/include/net/netfilter/nf_conntrack.h
+@@ -302,8 +302,19 @@ static inline bool nf_ct_is_expired(const struct nf_conn *ct)
+ /* use after obtaining a reference count */
+ static inline bool nf_ct_should_gc(const struct nf_conn *ct)
+ {
+- return nf_ct_is_expired(ct) && nf_ct_is_confirmed(ct) &&
+- !nf_ct_is_dying(ct);
++ if (!nf_ct_is_confirmed(ct))
++ return false;
++
++ /* load ct->timeout after is_confirmed() test.
++ * Pairs with __nf_conntrack_confirm() which:
++ * 1. Increases ct->timeout value
++ * 2. Inserts ct into rcu hlist
++ * 3. Sets the confirmed bit
++ * 4. Unlocks the hlist lock
++ */
++ smp_acquire__after_ctrl_dep();
++
++ return nf_ct_is_expired(ct) && !nf_ct_is_dying(ct);
+ }
+
+ #define NF_CT_DAY (86400 * HZ)
+diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
+index e7c7b638943629..743f8f1f42a742 100644
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -278,12 +278,15 @@
+ EM(rxrpc_call_put_userid, "PUT user-id ") \
+ EM(rxrpc_call_see_accept, "SEE accept ") \
+ EM(rxrpc_call_see_activate_client, "SEE act-clnt") \
++ EM(rxrpc_call_see_already_released, "SEE alrdy-rl") \
+ EM(rxrpc_call_see_connect_failed, "SEE con-fail") \
+ EM(rxrpc_call_see_connected, "SEE connect ") \
+ EM(rxrpc_call_see_conn_abort, "SEE conn-abt") \
++ EM(rxrpc_call_see_discard, "SEE discard ") \
+ EM(rxrpc_call_see_disconnected, "SEE disconn ") \
+ EM(rxrpc_call_see_distribute_error, "SEE dist-err") \
+ EM(rxrpc_call_see_input, "SEE input ") \
++ EM(rxrpc_call_see_recvmsg, "SEE recvmsg ") \
+ EM(rxrpc_call_see_release, "SEE release ") \
+ EM(rxrpc_call_see_userid_exists, "SEE u-exists") \
+ EM(rxrpc_call_see_waiting_call, "SEE q-conn ") \
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 4948a67bbac480..e455f051e62ef7 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -1537,9 +1537,11 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
+ io = &__io;
+ }
+
+- if (unlikely(req->flags & REQ_F_FAIL)) {
+- ret = -ECONNRESET;
+- goto out;
++ if (connect->in_progress) {
++ struct poll_table_struct pt = { ._key = EPOLLERR };
++
++ if (vfs_poll(req->file, &pt) & EPOLLERR)
++ goto get_sock_err;
+ }
+
+ file_flags = force_nonblock ? O_NONBLOCK : 0;
+@@ -1571,8 +1573,10 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
+ * which means the previous result is good. For both of these,
+ * grab the sock_error() and use that for the completion.
+ */
+- if (ret == -EBADFD || ret == -EISCONN)
++ if (ret == -EBADFD || ret == -EISCONN) {
++get_sock_err:
+ ret = sock_error(sock_from_file(req->file)->sk);
++ }
+ }
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+diff --git a/io_uring/poll.c b/io_uring/poll.c
+index 2390bf5f1710b4..65935ec8de89c4 100644
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -308,8 +308,6 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
+ return IOU_POLL_REISSUE;
+ }
+ }
+- if (unlikely(req->cqe.res & EPOLLERR))
+- req_set_fail(req);
+ if (req->apoll_events & EPOLLONESHOT)
+ return IOU_POLL_DONE;
+
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index 8f0b62b04deebe..4b20a72ab8cffe 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -885,6 +885,13 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
+ if (fmt[i] == 'p') {
+ sizeof_cur_arg = sizeof(long);
+
++ if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
++ ispunct(fmt[i + 1])) {
++ if (tmp_buf)
++ cur_arg = raw_args[num_spec];
++ goto nocopy_fmt;
++ }
++
+ if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
+ fmt[i + 2] == 's') {
+ fmt_ptype = fmt[i + 1];
+@@ -892,11 +899,9 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
+ goto fmt_str;
+ }
+
+- if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
+- ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' ||
++ if (fmt[i + 1] == 'K' ||
+ fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
+ fmt[i + 1] == 'S') {
+- /* just kernel pointers */
+ if (tmp_buf)
+ cur_arg = raw_args[num_spec];
+ i++;
+diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c
+index a3e13e6d5ee40d..bee2f9ea5e4aec 100644
+--- a/kernel/cgroup/legacy_freezer.c
++++ b/kernel/cgroup/legacy_freezer.c
+@@ -66,15 +66,9 @@ static struct freezer *parent_freezer(struct freezer *freezer)
+ bool cgroup_freezing(struct task_struct *task)
+ {
+ bool ret;
+- unsigned int state;
+
+ rcu_read_lock();
+- /* Check if the cgroup is still FREEZING, but not FROZEN. The extra
+- * !FROZEN check is required, because the FREEZING bit is not cleared
+- * when the state FROZEN is reached.
+- */
+- state = task_freezer(task)->state;
+- ret = (state & CGROUP_FREEZING) && !(state & CGROUP_FROZEN);
++ ret = task_freezer(task)->state & CGROUP_FREEZING;
+ rcu_read_unlock();
+
+ return ret;
+diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
+index 52c8f8226b0d35..35537ea03fff0d 100644
+--- a/kernel/sched/loadavg.c
++++ b/kernel/sched/loadavg.c
+@@ -80,7 +80,7 @@ long calc_load_fold_active(struct rq *this_rq, long adjust)
+ long nr_active, delta = 0;
+
+ nr_active = this_rq->nr_running - adjust;
+- nr_active += (int)this_rq->nr_uninterruptible;
++ nr_active += (long)this_rq->nr_uninterruptible;
+
+ if (nr_active != this_rq->calc_load_active) {
+ delta = nr_active - this_rq->calc_load_active;
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 60dc51f43dd91f..f7cb505ab337a5 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1011,7 +1011,7 @@ struct rq {
+ * one CPU and if it got migrated afterwards it may decrease
+ * it on another CPU. Always updated under the runqueue lock:
+ */
+- unsigned int nr_uninterruptible;
++ unsigned long nr_uninterruptible;
+
+ struct task_struct __rcu *curr;
+ struct task_struct *idle;
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 5f74e9f9c8a734..7b3c55bb0235a7 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -2845,7 +2845,10 @@ __register_event(struct trace_event_call *call, struct module *mod)
+ if (ret < 0)
+ return ret;
+
++ down_write(&trace_event_sem);
+ list_add(&call->list, &ftrace_events);
++ up_write(&trace_event_sem);
++
+ if (call->flags & TRACE_EVENT_FL_DYNAMIC)
+ atomic_set(&call->refcnt, 0);
+ else
+@@ -3437,6 +3440,8 @@ __trace_add_event_dirs(struct trace_array *tr)
+ struct trace_event_call *call;
+ int ret;
+
++ lockdep_assert_held(&trace_event_sem);
++
+ list_for_each_entry(call, &ftrace_events, list) {
+ ret = __trace_add_new_event(call, tr);
+ if (ret < 0)
+diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
+index 5bd781359d38b0..4d12d02965a4b7 100644
+--- a/kernel/trace/trace_osnoise.c
++++ b/kernel/trace/trace_osnoise.c
+@@ -665,8 +665,8 @@ __timerlat_dump_stack(struct trace_buffer *buffer, struct trace_stack *fstack, u
+
+ entry = ring_buffer_event_data(event);
+
+- memcpy(&entry->caller, fstack->calls, size);
+ entry->size = fstack->nr_entries;
++ memcpy(&entry->caller, fstack->calls, size);
+
+ if (!call_filter_check_discard(call, entry, buffer, event))
+ trace_buffer_unlock_commit_nostack(buffer, event);
+diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
+index 694f32d843d90c..187b1fc403c13d 100644
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -656,7 +656,7 @@ static int parse_btf_arg(char *varname,
+ ret = query_btf_context(ctx);
+ if (ret < 0 || ctx->nr_params == 0) {
+ trace_probe_log_err(ctx->offset, NO_BTF_ENTRY);
+- return PTR_ERR(params);
++ return -ENOENT;
+ }
+ }
+ params = ctx->params;
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+index b477ba37a6991f..422f726346ea51 100644
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -358,6 +358,35 @@ static int __vlan_device_event(struct net_device *dev, unsigned long event)
+ return err;
+ }
+
++static void vlan_vid0_add(struct net_device *dev)
++{
++ struct vlan_info *vlan_info;
++ int err;
++
++ if (!(dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
++ return;
++
++ pr_info("adding VLAN 0 to HW filter on device %s\n", dev->name);
++
++ err = vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
++ if (err)
++ return;
++
++ vlan_info = rtnl_dereference(dev->vlan_info);
++ vlan_info->auto_vid0 = true;
++}
++
++static void vlan_vid0_del(struct net_device *dev)
++{
++ struct vlan_info *vlan_info = rtnl_dereference(dev->vlan_info);
++
++ if (!vlan_info || !vlan_info->auto_vid0)
++ return;
++
++ vlan_info->auto_vid0 = false;
++ vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
++}
++
+ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
+ void *ptr)
+ {
+@@ -379,15 +408,10 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
+ return notifier_from_errno(err);
+ }
+
+- if ((event == NETDEV_UP) &&
+- (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
+- pr_info("adding VLAN 0 to HW filter on device %s\n",
+- dev->name);
+- vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
+- }
+- if (event == NETDEV_DOWN &&
+- (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+- vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
++ if (event == NETDEV_UP)
++ vlan_vid0_add(dev);
++ else if (event == NETDEV_DOWN)
++ vlan_vid0_del(dev);
+
+ vlan_info = rtnl_dereference(dev->vlan_info);
+ if (!vlan_info)
+diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
+index 5eaf38875554b0..c7ffe591d59366 100644
+--- a/net/8021q/vlan.h
++++ b/net/8021q/vlan.h
+@@ -33,6 +33,7 @@ struct vlan_info {
+ struct vlan_group grp;
+ struct list_head vid_list;
+ unsigned int nr_vids;
++ bool auto_vid0;
+ struct rcu_head rcu;
+ };
+
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index e1df1c62017d91..01aca077071174 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -6796,8 +6796,8 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
+ return 0;
+ }
+
+- /* No privacy so use a public address. */
+- *own_addr_type = ADDR_LE_DEV_PUBLIC;
++ /* No privacy, use the current address */
++ hci_copy_identity_address(hdev, rand_addr, own_addr_type);
+
+ return 0;
+ }
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index f9995a405e35c3..dabc07700197c5 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -3485,12 +3485,28 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
+ /* Configure output options and let the other side know
+ * which ones we don't like. */
+
+- /* If MTU is not provided in configure request, use the most recently
+- * explicitly or implicitly accepted value for the other direction,
+- * or the default value.
++ /* If MTU is not provided in configure request, try adjusting it
++ * to the current output MTU if it has been set
++ *
++ * Bluetooth Core 6.1, Vol 3, Part A, Section 4.5
++ *
++ * Each configuration parameter value (if any is present) in an
++ * L2CAP_CONFIGURATION_RSP packet reflects an ‘adjustment’ to a
++ * configuration parameter value that has been sent (or, in case
++ * of default values, implied) in the corresponding
++ * L2CAP_CONFIGURATION_REQ packet.
+ */
+- if (mtu == 0)
+- mtu = chan->imtu ? chan->imtu : L2CAP_DEFAULT_MTU;
++ if (!mtu) {
++ /* Only adjust for ERTM channels as for older modes the
++ * remote stack may not be able to detect that the
++ * adjustment causing it to silently drop packets.
++ */
++ if (chan->mode == L2CAP_MODE_ERTM &&
++ chan->omtu && chan->omtu != L2CAP_DEFAULT_MTU)
++ mtu = chan->omtu;
++ else
++ mtu = L2CAP_DEFAULT_MTU;
++ }
+
+ if (mtu < L2CAP_DEFAULT_MIN_MTU)
+ result = L2CAP_CONF_UNACCEPT;
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index aaaaf9733b5894..9a906977c8723c 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -1687,6 +1687,9 @@ static void l2cap_sock_resume_cb(struct l2cap_chan *chan)
+ {
+ struct sock *sk = chan->data;
+
++ if (!sk)
++ return;
++
+ if (test_and_clear_bit(FLAG_PENDING_SECURITY, &chan->flags)) {
+ sk->sk_state = BT_CONNECTED;
+ chan->state = BT_CONNECTED;
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index 56f7f041c9a604..4c00bc50de811e 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -1380,7 +1380,7 @@ static void smp_timeout(struct work_struct *work)
+
+ bt_dev_dbg(conn->hcon->hdev, "conn %p", conn);
+
+- hci_disconnect(conn->hcon, HCI_ERROR_REMOTE_USER_TERM);
++ hci_disconnect(conn->hcon, HCI_ERROR_AUTH_FAILURE);
+ }
+
+ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
+@@ -2978,8 +2978,25 @@ static int smp_sig_channel(struct l2cap_chan *chan, struct sk_buff *skb)
+ if (code > SMP_CMD_MAX)
+ goto drop;
+
+- if (smp && !test_and_clear_bit(code, &smp->allow_cmd))
++ if (smp && !test_and_clear_bit(code, &smp->allow_cmd)) {
++ /* If there is a context and the command is not allowed consider
++ * it a failure so the session is cleanup properly.
++ */
++ switch (code) {
++ case SMP_CMD_IDENT_INFO:
++ case SMP_CMD_IDENT_ADDR_INFO:
++ case SMP_CMD_SIGN_INFO:
++ /* 3.6.1. Key distribution and generation
++ *
++ * A device may reject a distributed key by sending the
++ * Pairing Failed command with the reason set to
++ * "Key Rejected".
++ */
++ smp_failure(conn, SMP_KEY_REJECTED);
++ break;
++ }
+ goto drop;
++ }
+
+ /* If we don't have a context the only allowed commands are
+ * pairing request and security request.
+diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
+index 87a59ec2c9f02b..c5da53dfab04f2 100644
+--- a/net/bluetooth/smp.h
++++ b/net/bluetooth/smp.h
+@@ -138,6 +138,7 @@ struct smp_cmd_keypress_notify {
+ #define SMP_NUMERIC_COMP_FAILED 0x0c
+ #define SMP_BREDR_PAIRING_IN_PROGRESS 0x0d
+ #define SMP_CROSS_TRANSP_NOT_ALLOWED 0x0e
++#define SMP_KEY_REJECTED 0x0f
+
+ #define SMP_MIN_ENC_KEY_SIZE 7
+ #define SMP_MAX_ENC_KEY_SIZE 16
+diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
+index 7b41ee8740cbba..f10bd6a233dcf9 100644
+--- a/net/bridge/br_switchdev.c
++++ b/net/bridge/br_switchdev.c
+@@ -17,6 +17,9 @@ static bool nbp_switchdev_can_offload_tx_fwd(const struct net_bridge_port *p,
+ if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload))
+ return false;
+
++ if (br_multicast_igmp_type(skb))
++ return false;
++
+ return (p->flags & BR_TX_FWD_OFFLOAD) &&
+ (p->hwdom != BR_INPUT_SKB_CB(skb)->src_hwdom);
+ }
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 74f7f3e8d96083..f6188bd9f55ba6 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -7383,7 +7383,8 @@ int __init addrconf_init(void)
+ if (err < 0)
+ goto out_addrlabel;
+
+- addrconf_wq = create_workqueue("ipv6_addrconf");
++ /* All works using addrconf_wq need to lock rtnl. */
++ addrconf_wq = create_singlethread_workqueue("ipv6_addrconf");
+ if (!addrconf_wq) {
+ err = -ENOMEM;
+ goto out_nowq;
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index 9bb246c09fcee8..e153dac47a530d 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -803,8 +803,8 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
+ } else {
+ im->mca_crcount = idev->mc_qrv;
+ }
+- in6_dev_put(pmc->idev);
+ ip6_mc_clear_src(pmc);
++ in6_dev_put(pmc->idev);
+ kfree_rcu(pmc, rcu);
+ }
+ }
+diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c
+index 28fc7fae579723..523aa8c9b382fe 100644
+--- a/net/ipv6/rpl_iptunnel.c
++++ b/net/ipv6/rpl_iptunnel.c
+@@ -129,13 +129,13 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt,
+ struct dst_entry *cache_dst)
+ {
+ struct ipv6_rpl_sr_hdr *isrh, *csrh;
+- const struct ipv6hdr *oldhdr;
++ struct ipv6hdr oldhdr;
+ struct ipv6hdr *hdr;
+ unsigned char *buf;
+ size_t hdrlen;
+ int err;
+
+- oldhdr = ipv6_hdr(skb);
++ memcpy(&oldhdr, ipv6_hdr(skb), sizeof(oldhdr));
+
+ buf = kcalloc(struct_size(srh, segments.addr, srh->segments_left), 2, GFP_ATOMIC);
+ if (!buf)
+@@ -147,7 +147,7 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt,
+ memcpy(isrh, srh, sizeof(*isrh));
+ memcpy(isrh->rpl_segaddr, &srh->rpl_segaddr[1],
+ (srh->segments_left - 1) * 16);
+- isrh->rpl_segaddr[srh->segments_left - 1] = oldhdr->daddr;
++ isrh->rpl_segaddr[srh->segments_left - 1] = oldhdr.daddr;
+
+ ipv6_rpl_srh_compress(csrh, isrh, &srh->rpl_segaddr[0],
+ isrh->segments_left - 1);
+@@ -169,7 +169,7 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt,
+ skb_mac_header_rebuild(skb);
+
+ hdr = ipv6_hdr(skb);
+- memmove(hdr, oldhdr, sizeof(*hdr));
++ memmove(hdr, &oldhdr, sizeof(*hdr));
+ isrh = (void *)hdr + sizeof(*hdr);
+ memcpy(isrh, csrh, hdrlen);
+
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 34ad5975fbf3ba..0081c1a0d5e563 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -1075,6 +1075,12 @@ static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
+
+ hlist_nulls_add_head_rcu(&loser_ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
+ &nf_conntrack_hash[repl_idx]);
++ /* confirmed bit must be set after hlist add, not before:
++ * loser_ct can still be visible to other cpu due to
++ * SLAB_TYPESAFE_BY_RCU.
++ */
++ smp_mb__before_atomic();
++ set_bit(IPS_CONFIRMED_BIT, &loser_ct->status);
+
+ NF_CT_STAT_INC(net, clash_resolve);
+ return NF_ACCEPT;
+@@ -1211,8 +1217,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
+ * user context, else we insert an already 'dead' hash, blocking
+ * further use of that particular connection -JM.
+ */
+- ct->status |= IPS_CONFIRMED;
+-
+ if (unlikely(nf_ct_is_dying(ct))) {
+ NF_CT_STAT_INC(net, insert_failed);
+ goto dying;
+@@ -1244,7 +1248,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
+ }
+ }
+
+- /* Timer relative to confirmation time, not original
++ /* Timeout is relative to confirmation time, not original
+ setting time, otherwise we'd get timer wrap in
+ weird delay cases. */
+ ct->timeout += nfct_time_stamp;
+@@ -1252,11 +1256,21 @@ __nf_conntrack_confirm(struct sk_buff *skb)
+ __nf_conntrack_insert_prepare(ct);
+
+ /* Since the lookup is lockless, hash insertion must be done after
+- * starting the timer and setting the CONFIRMED bit. The RCU barriers
+- * guarantee that no other CPU can find the conntrack before the above
+- * stores are visible.
++ * setting ct->timeout. The RCU barriers guarantee that no other CPU
++ * can find the conntrack before the above stores are visible.
+ */
+ __nf_conntrack_hash_insert(ct, hash, reply_hash);
++
++ /* IPS_CONFIRMED unset means 'ct not (yet) in hash', conntrack lookups
++ * skip entries that lack this bit. This happens when a CPU is looking
++ * at a stale entry that is being recycled due to SLAB_TYPESAFE_BY_RCU
++ * or when another CPU encounters this entry right after the insertion
++ * but before the set-confirm-bit below. This bit must not be set until
++ * after __nf_conntrack_hash_insert().
++ */
++ smp_mb__before_atomic();
++ set_bit(IPS_CONFIRMED_BIT, &ct->status);
++
+ nf_conntrack_double_unlock(hash, reply_hash);
+ local_bh_enable();
+
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 4abf7e9ac4f2f7..9cac7cb78c0f54 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2791,7 +2791,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ int len_sum = 0;
+ int status = TP_STATUS_AVAILABLE;
+ int hlen, tlen, copylen = 0;
+- long timeo = 0;
++ long timeo;
+
+ mutex_lock(&po->pg_vec_lock);
+
+@@ -2845,22 +2845,28 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !vnet_hdr_sz)
+ size_max = dev->mtu + reserve + VLAN_HLEN;
+
++ timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
+ reinit_completion(&po->skb_completion);
+
+ do {
+ ph = packet_current_frame(po, &po->tx_ring,
+ TP_STATUS_SEND_REQUEST);
+ if (unlikely(ph == NULL)) {
+- if (need_wait && skb) {
+- timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
++ /* Note: packet_read_pending() might be slow if we
++ * have to call it as it's per_cpu variable, but in
++ * fast-path we don't have to call it, only when ph
++ * is NULL, we need to check the pending_refcnt.
++ */
++ if (need_wait && packet_read_pending(&po->tx_ring)) {
+ timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
+ if (timeo <= 0) {
+ err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
+ goto out_put;
+ }
+- }
+- /* check for additional frames */
+- continue;
++ /* check for additional frames */
++ continue;
++ } else
++ break;
+ }
+
+ skb = NULL;
+@@ -2949,14 +2955,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ }
+ packet_increment_head(&po->tx_ring);
+ len_sum += tp_len;
+- } while (likely((ph != NULL) ||
+- /* Note: packet_read_pending() might be slow if we have
+- * to call it as it's per_cpu variable, but in fast-path
+- * we already short-circuit the loop with the first
+- * condition, and luckily don't have to go that path
+- * anyway.
+- */
+- (need_wait && packet_read_pending(&po->tx_ring))));
++ } while (1);
+
+ err = len_sum;
+ goto out_put;
+diff --git a/net/phonet/pep.c b/net/phonet/pep.c
+index 3dd5f52bc1b58e..e14b2b2d3b5cd7 100644
+--- a/net/phonet/pep.c
++++ b/net/phonet/pep.c
+@@ -826,6 +826,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
+ }
+
+ /* Check for duplicate pipe handle */
++ pn_skb_get_dst_sockaddr(skb, &dst);
+ newsk = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
+ if (unlikely(newsk)) {
+ __sock_put(newsk);
+@@ -850,7 +851,6 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
+ newsk->sk_destruct = pipe_destruct;
+
+ newpn = pep_sk(newsk);
+- pn_skb_get_dst_sockaddr(skb, &dst);
+ pn_skb_get_src_sockaddr(skb, &src);
+ newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
+ newpn->pn_sk.dobject = pn_sockaddr_get_object(&src);
+diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
+index 773bdb2e37dafd..37ac8a66567866 100644
+--- a/net/rxrpc/call_accept.c
++++ b/net/rxrpc/call_accept.c
+@@ -219,6 +219,7 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
+ tail = b->call_backlog_tail;
+ while (CIRC_CNT(head, tail, size) > 0) {
+ struct rxrpc_call *call = b->call_backlog[tail];
++ rxrpc_see_call(call, rxrpc_call_see_discard);
+ rcu_assign_pointer(call->socket, rx);
+ if (rx->discard_new_call) {
+ _debug("discard %lx", call->user_call_ID);
+diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
+index cad6a7d18e0405..4bbb27a48bd8ae 100644
+--- a/net/rxrpc/output.c
++++ b/net/rxrpc/output.c
+@@ -589,6 +589,9 @@ void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
+ __be32 code;
+ int ret, ioc;
+
++ if (sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
++ return; /* Never abort an abort. */
++
+ rxrpc_see_skb(skb, rxrpc_skb_see_reject);
+
+ iov[0].iov_base = &whdr;
+diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
+index a482f88c5fc5b6..e24a44bae9a32c 100644
+--- a/net/rxrpc/recvmsg.c
++++ b/net/rxrpc/recvmsg.c
+@@ -351,6 +351,16 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ goto try_again;
+ }
+
++ rxrpc_see_call(call, rxrpc_call_see_recvmsg);
++ if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
++ rxrpc_see_call(call, rxrpc_call_see_already_released);
++ list_del_init(&call->recvmsg_link);
++ spin_unlock_irq(&rx->recvmsg_lock);
++ release_sock(&rx->sk);
++ trace_rxrpc_recvmsg(call->debug_id, rxrpc_recvmsg_unqueue, 0);
++ rxrpc_put_call(call, rxrpc_call_put_recvmsg);
++ goto try_again;
++ }
+ if (!(flags & MSG_PEEK))
+ list_del_init(&call->recvmsg_link);
+ else
+@@ -374,8 +384,13 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+
+ release_sock(&rx->sk);
+
+- if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
+- BUG();
++ if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
++ rxrpc_see_call(call, rxrpc_call_see_already_released);
++ mutex_unlock(&call->user_mutex);
++ if (!(flags & MSG_PEEK))
++ rxrpc_put_call(call, rxrpc_call_put_recvmsg);
++ goto try_again;
++ }
+
+ if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
+ if (flags & MSG_CMSG_COMPAT) {
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 716da8c6b3def3..113b305b0d154c 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -821,7 +821,9 @@ static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
+ u32 *pid;
+ } stk[TC_HTB_MAXDEPTH], *sp = stk;
+
+- BUG_ON(!hprio->row.rb_node);
++ if (unlikely(!hprio->row.rb_node))
++ return NULL;
++
+ sp->root = hprio->row.rb_node;
+ sp->pptr = &hprio->ptr;
+ sp->pid = &hprio->last_ptr_id;
+diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
+index 5e557b960bde33..a2b321fec13c1d 100644
+--- a/net/sched/sch_qfq.c
++++ b/net/sched/sch_qfq.c
+@@ -412,7 +412,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ bool existing = false;
+ struct nlattr *tb[TCA_QFQ_MAX + 1];
+ struct qfq_aggregate *new_agg = NULL;
+- u32 weight, lmax, inv_w;
++ u32 weight, lmax, inv_w, old_weight, old_lmax;
+ int err;
+ int delta_w;
+
+@@ -446,12 +446,16 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ inv_w = ONE_FP / weight;
+ weight = ONE_FP / inv_w;
+
+- if (cl != NULL &&
+- lmax == cl->agg->lmax &&
+- weight == cl->agg->class_weight)
+- return 0; /* nothing to change */
++ if (cl != NULL) {
++ sch_tree_lock(sch);
++ old_weight = cl->agg->class_weight;
++ old_lmax = cl->agg->lmax;
++ sch_tree_unlock(sch);
++ if (lmax == old_lmax && weight == old_weight)
++ return 0; /* nothing to change */
++ }
+
+- delta_w = weight - (cl ? cl->agg->class_weight : 0);
++ delta_w = weight - (cl ? old_weight : 0);
+
+ if (q->wsum + delta_w > QFQ_MAX_WSUM) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+@@ -558,10 +562,10 @@ static int qfq_delete_class(struct Qdisc *sch, unsigned long arg,
+
+ qdisc_purge_queue(cl->qdisc);
+ qdisc_class_hash_remove(&q->clhash, &cl->common);
++ qfq_destroy_class(sch, cl);
+
+ sch_tree_unlock(sch);
+
+- qfq_destroy_class(sch, cl);
+ return 0;
+ }
+
+@@ -628,6 +632,7 @@ static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
+ {
+ struct qfq_class *cl = (struct qfq_class *)arg;
+ struct nlattr *nest;
++ u32 class_weight, lmax;
+
+ tcm->tcm_parent = TC_H_ROOT;
+ tcm->tcm_handle = cl->common.classid;
+@@ -636,8 +641,13 @@ static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
+ nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
+ if (nest == NULL)
+ goto nla_put_failure;
+- if (nla_put_u32(skb, TCA_QFQ_WEIGHT, cl->agg->class_weight) ||
+- nla_put_u32(skb, TCA_QFQ_LMAX, cl->agg->lmax))
++
++ sch_tree_lock(sch);
++ class_weight = cl->agg->class_weight;
++ lmax = cl->agg->lmax;
++ sch_tree_unlock(sch);
++ if (nla_put_u32(skb, TCA_QFQ_WEIGHT, class_weight) ||
++ nla_put_u32(skb, TCA_QFQ_LMAX, lmax))
+ goto nla_put_failure;
+ return nla_nest_end(skb, nest);
+
+@@ -654,8 +664,10 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
+
+ memset(&xstats, 0, sizeof(xstats));
+
++ sch_tree_lock(sch);
+ xstats.weight = cl->agg->class_weight;
+ xstats.lmax = cl->agg->lmax;
++ sch_tree_unlock(sch);
+
+ if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
+ gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
+diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
+index 1852fac3e72b76..bea60b0160d1fc 100644
+--- a/net/tls/tls_strp.c
++++ b/net/tls/tls_strp.c
+@@ -511,9 +511,8 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
+ if (inq < strp->stm.full_len)
+ return tls_strp_read_copy(strp, true);
+
++ tls_strp_load_anchor_with_queue(strp, inq);
+ if (!strp->stm.full_len) {
+- tls_strp_load_anchor_with_queue(strp, inq);
+-
+ sz = tls_rx_msg_size(strp, strp->anchor);
+ if (sz < 0) {
+ tls_strp_abort_strp(strp, sz);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1c2059e37fdab6..614784c0ba3188 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10303,6 +10303,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
+ SND_PCI_QUIRK(0x1043, 0x1a63, "ASUS UX3405MA", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x1a83, "ASUS UM5302LA", ALC294_FIXUP_CS35L41_I2C_2),
++ SND_PCI_QUIRK(0x1043, 0x1a8e, "ASUS G712LWS", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+ SND_PCI_QUIRK(0x1043, 0x1a8f, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x1b11, "ASUS UX431DA", ALC294_FIXUP_ASUS_COEF_1B),
+ SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index aa15f56ca139d2..886f5c29939b8e 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -762,13 +762,15 @@ static void fsl_sai_config_disable(struct fsl_sai *sai, int dir)
+ * anymore. Add software reset to fix this issue.
+ * This is a hardware bug, and will be fix in the
+ * next sai version.
++ *
++ * In consumer mode, this can happen even after a
++ * single open/close, especially if both tx and rx
++ * are running concurrently.
+ */
+- if (!sai->is_consumer_mode) {
+- /* Software Reset */
+- regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR);
+- /* Clear SR bit to finish the reset */
+- regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), 0);
+- }
++ /* Software Reset */
++ regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), FSL_SAI_CSR_SR);
++ /* Clear SR bit to finish the reset */
++ regmap_write(sai->regmap, FSL_SAI_xCSR(tx, ofs), 0);
+ }
+
+ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd,
+diff --git a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c
+index d3d94596ab79cf..dd926c00f41469 100644
+--- a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c
++++ b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c
+@@ -147,31 +147,6 @@ static void test_dummy_sleepable(void)
+ dummy_st_ops_success__destroy(skel);
+ }
+
+-/* dummy_st_ops.test_sleepable() parameter is not marked as nullable,
+- * thus bpf_prog_test_run_opts() below should be rejected as it tries
+- * to pass NULL for this parameter.
+- */
+-static void test_dummy_sleepable_reject_null(void)
+-{
+- __u64 args[1] = {0};
+- LIBBPF_OPTS(bpf_test_run_opts, attr,
+- .ctx_in = args,
+- .ctx_size_in = sizeof(args),
+- );
+- struct dummy_st_ops_success *skel;
+- int fd, err;
+-
+- skel = dummy_st_ops_success__open_and_load();
+- if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
+- return;
+-
+- fd = bpf_program__fd(skel->progs.test_sleepable);
+- err = bpf_prog_test_run_opts(fd, &attr);
+- ASSERT_EQ(err, -EINVAL, "test_run");
+-
+- dummy_st_ops_success__destroy(skel);
+-}
+-
+ void test_dummy_st_ops(void)
+ {
+ if (test__start_subtest("dummy_st_ops_attach"))
+@@ -184,8 +159,6 @@ void test_dummy_st_ops(void)
+ test_dummy_multiple_args();
+ if (test__start_subtest("dummy_sleepable"))
+ test_dummy_sleepable();
+- if (test__start_subtest("dummy_sleepable_reject_null"))
+- test_dummy_sleepable_reject_null();
+
+ RUN_TESTS(dummy_st_ops_fail);
+ }
+diff --git a/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c b/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c
+index ec0c595d47af84..151e3a3ea27fb8 100644
+--- a/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c
++++ b/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c
+@@ -11,17 +11,8 @@ int BPF_PROG(test_1, struct bpf_dummy_ops_state *state)
+ {
+ int ret;
+
+- /* Check that 'state' nullable status is detected correctly.
+- * If 'state' argument would be assumed non-null by verifier
+- * the code below would be deleted as dead (which it shouldn't).
+- * Hide it from the compiler behind 'asm' block to avoid
+- * unnecessary optimizations.
+- */
+- asm volatile (
+- "if %[state] != 0 goto +2;"
+- "r0 = 0xf2f3f4f5;"
+- "exit;"
+- ::[state]"p"(state));
++ if (!state)
++ return 0xf2f3f4f5;
+
+ ret = state->val;
+ state->val = 0x5a;
+diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh
+index 53341c8135e889..b65cf09f9914e9 100755
+--- a/tools/testing/selftests/net/udpgro.sh
++++ b/tools/testing/selftests/net/udpgro.sh
+@@ -50,7 +50,7 @@ run_one() {
+
+ cfg_veth
+
+- ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} &
++ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 100 ${rx_args} &
+ local PID1=$!
+
+ wait_local_port_listen ${PEER_NS} 8000 udp
+@@ -97,7 +97,7 @@ run_one_nat() {
+ # will land on the 'plain' one
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -G ${family} -b ${addr1} -n 0 &
+ local PID1=$!
+- ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${family} -b ${addr2%/*} ${rx_args} &
++ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 100 ${family} -b ${addr2%/*} ${rx_args} &
+ local PID2=$!
+
+ wait_local_port_listen "${PEER_NS}" 8000 udp
+@@ -119,9 +119,9 @@ run_one_2sock() {
+
+ cfg_veth
+
+- ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} -p 12345 &
++ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 100 ${rx_args} -p 12345 &
+ local PID1=$!
+- ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 2000 -R 10 ${rx_args} &
++ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 2000 -R 100 ${rx_args} &
+ local PID2=$!
+
+ wait_local_port_listen "${PEER_NS}" 12345 udp
next reply other threads:[~2025-07-24 9:18 UTC|newest]
Thread overview: 173+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-07-24 9:18 Arisu Tachibana [this message]
-- strict thread matches above, loose matches on Subject: below --
2025-10-15 17:30 [gentoo-commits] proj/linux-patches:6.6 commit in: / Arisu Tachibana
2025-10-13 11:57 Arisu Tachibana
2025-10-06 11:07 Arisu Tachibana
2025-10-02 13:25 Arisu Tachibana
2025-09-25 12:03 Arisu Tachibana
2025-09-20 6:12 Arisu Tachibana
2025-09-20 5:26 Arisu Tachibana
2025-09-12 3:57 Arisu Tachibana
2025-09-10 6:59 Arisu Tachibana
2025-09-10 6:25 Arisu Tachibana
2025-09-10 6:23 Arisu Tachibana
2025-09-10 5:32 Arisu Tachibana
2025-09-04 14:31 Arisu Tachibana
2025-08-28 17:07 Arisu Tachibana
2025-08-28 15:34 Arisu Tachibana
2025-08-16 3:10 Arisu Tachibana
2025-08-01 10:31 Arisu Tachibana
2025-07-18 12:06 Arisu Tachibana
2025-07-14 16:20 Arisu Tachibana
2025-07-11 2:28 Arisu Tachibana
2025-07-06 13:41 Arisu Tachibana
2025-06-27 11:19 Mike Pagano
2025-06-20 12:16 Mike Pagano
2025-06-19 14:23 Mike Pagano
2025-06-04 18:27 Mike Pagano
2025-06-04 18:11 Mike Pagano
2025-05-27 20:06 Mike Pagano
2025-05-22 13:38 Mike Pagano
2025-05-18 14:33 Mike Pagano
2025-05-09 10:57 Mike Pagano
2025-05-03 19:45 Mike Pagano
2025-05-02 10:55 Mike Pagano
2025-04-25 11:48 Mike Pagano
2025-04-10 13:30 Mike Pagano
2025-04-07 10:31 Mike Pagano
2025-03-29 10:48 Mike Pagano
2025-03-23 11:46 Mike Pagano
2025-03-23 11:44 Mike Pagano
2025-03-23 11:33 Mike Pagano
2025-03-13 12:55 Mike Pagano
2025-03-09 10:48 Mike Pagano
2025-03-07 16:37 Mike Pagano
2025-02-27 13:23 Mike Pagano
2025-02-21 13:31 Mike Pagano
2025-02-17 11:22 Mike Pagano
2025-02-17 11:17 Mike Pagano
2025-02-11 11:44 Mike Pagano
2025-02-08 11:27 Mike Pagano
2025-02-01 23:07 Mike Pagano
2025-01-30 12:49 Mike Pagano
2025-01-23 17:22 Mike Pagano
2025-01-23 17:03 Mike Pagano
2025-01-21 11:36 Mike Pagano
2025-01-17 13:18 Mike Pagano
2025-01-10 14:18 Mike Pagano
2025-01-09 13:53 Mike Pagano
2025-01-06 23:29 Mike Pagano
2025-01-02 12:33 Mike Pagano
2024-12-30 11:17 Mike Pagano
2024-12-30 11:17 Mike Pagano
2024-12-30 0:06 Mike Pagano
2024-12-27 14:08 Mike Pagano
2024-12-25 12:28 Mike Pagano
2024-12-19 18:08 Mike Pagano
2024-12-14 23:48 Mike Pagano
2024-12-12 19:41 Mike Pagano
2024-12-11 17:00 Mike Pagano
2024-12-09 11:36 Mike Pagano
2024-11-22 17:52 Mike Pagano
2024-11-22 17:47 Mike Pagano
2024-11-19 19:20 Mike Pagano
2024-11-17 18:16 Mike Pagano
2024-11-14 14:54 Mike Pagano
2024-11-14 13:27 Mike Pagano
2024-11-08 16:30 Mike Pagano
2024-11-04 20:46 Mike Pagano
2024-11-03 11:26 Mike Pagano
2024-11-01 12:02 Mike Pagano
2024-11-01 11:52 Mike Pagano
2024-11-01 11:27 Mike Pagano
2024-10-26 22:46 Mike Pagano
2024-10-25 11:44 Mike Pagano
2024-10-22 16:57 Mike Pagano
2024-10-17 14:28 Mike Pagano
2024-10-17 14:05 Mike Pagano
2024-10-10 11:37 Mike Pagano
2024-10-04 15:23 Mike Pagano
2024-09-30 16:04 Mike Pagano
2024-09-30 15:18 Mike Pagano
2024-09-18 18:03 Mike Pagano
2024-09-12 12:32 Mike Pagano
2024-09-08 11:06 Mike Pagano
2024-09-04 13:51 Mike Pagano
2024-08-29 16:49 Mike Pagano
2024-08-19 10:24 Mike Pagano
2024-08-14 15:14 Mike Pagano
2024-08-14 14:51 Mike Pagano
2024-08-14 14:10 Mike Pagano
2024-08-11 13:28 Mike Pagano
2024-08-10 15:43 Mike Pagano
2024-08-03 15:22 Mike Pagano
2024-07-27 13:46 Mike Pagano
2024-07-25 15:48 Mike Pagano
2024-07-25 12:09 Mike Pagano
2024-07-18 12:15 Mike Pagano
2024-07-15 11:15 Mike Pagano
2024-07-11 11:48 Mike Pagano
2024-07-09 10:45 Mike Pagano
2024-07-05 10:49 Mike Pagano
2024-06-27 12:32 Mike Pagano
2024-06-21 14:06 Mike Pagano
2024-06-16 14:33 Mike Pagano
2024-06-12 10:23 Mike Pagano
2024-05-25 15:17 Mike Pagano
2024-05-17 11:49 Mike Pagano
2024-05-17 11:35 Mike Pagano
2024-05-05 18:06 Mike Pagano
2024-05-02 15:01 Mike Pagano
2024-04-27 22:05 Mike Pagano
2024-04-27 17:21 Mike Pagano
2024-04-27 17:05 Mike Pagano
2024-04-18 6:38 Alice Ferrazzi
2024-04-18 3:05 Alice Ferrazzi
2024-04-13 13:06 Mike Pagano
2024-04-11 14:49 Mike Pagano
2024-04-10 15:09 Mike Pagano
2024-04-04 19:06 Mike Pagano
2024-04-03 14:03 Mike Pagano
2024-03-27 11:24 Mike Pagano
2024-03-15 22:00 Mike Pagano
2024-03-06 18:07 Mike Pagano
2024-03-02 22:37 Mike Pagano
2024-03-01 13:06 Mike Pagano
2024-02-23 13:25 Mike Pagano
2024-02-23 12:36 Mike Pagano
2024-02-22 13:39 Mike Pagano
2024-02-16 19:06 Mike Pagano
2024-02-16 18:59 Mike Pagano
2024-02-06 17:27 Mike Pagano
2024-02-06 15:38 Mike Pagano
2024-02-06 15:34 Mike Pagano
2024-02-05 21:04 Mike Pagano
2024-02-05 21:00 Mike Pagano
2024-02-01 23:18 Mike Pagano
2024-02-01 1:22 Mike Pagano
2024-01-26 22:48 Mike Pagano
2024-01-26 0:08 Mike Pagano
2024-01-25 13:49 Mike Pagano
2024-01-20 11:45 Mike Pagano
2024-01-15 18:46 Mike Pagano
2024-01-10 17:20 Mike Pagano
2024-01-10 17:16 Mike Pagano
2024-01-05 14:49 Mike Pagano
2024-01-04 15:36 Mike Pagano
2024-01-01 13:45 Mike Pagano
2023-12-20 16:55 Mike Pagano
2023-12-17 14:55 Mike Pagano
2023-12-13 18:26 Mike Pagano
2023-12-11 14:19 Mike Pagano
2023-12-08 12:01 Mike Pagano
2023-12-08 10:54 Mike Pagano
2023-12-07 18:53 Mike Pagano
2023-12-03 11:24 Mike Pagano
2023-12-03 11:15 Mike Pagano
2023-12-01 10:31 Mike Pagano
2023-11-28 18:16 Mike Pagano
2023-11-28 17:50 Mike Pagano
2023-11-20 11:40 Mike Pagano
2023-11-19 15:18 Mike Pagano
2023-11-19 14:41 Mike Pagano
2023-11-08 11:52 Mike Pagano
2023-10-30 11:30 Mike Pagano
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1753348701.4e660f86be6502e7b0d201bf56af1d3ac3fb1f9c.alicef@gentoo \
--to=alicef@gentoo.org \
--cc=gentoo-commits@lists.gentoo.org \
--cc=gentoo-dev@lists.gentoo.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox