From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Sat, 21 Oct 2017 20:15:13 +0000 (UTC) [thread overview]
Message-ID: <1508616901.8f54decd9aeae66b06a1e63ab9fd02605b94f345.mpagano@gentoo> (raw)
commit: 8f54decd9aeae66b06a1e63ab9fd02605b94f345
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Oct 21 20:15:01 2017 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Oct 21 20:15:01 2017 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8f54decd
Linux patch 4.9.58
0000_README | 4 +
1057_linux-4.9.58.patch | 1491 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 1495 insertions(+)
diff --git a/0000_README b/0000_README
index 2263df0..76bcc9e 100644
--- a/0000_README
+++ b/0000_README
@@ -271,6 +271,10 @@ Patch: 1056_linux-4.9.57.patch
From: http://www.kernel.org
Desc: Linux 4.9.57
+Patch: 1057_linux-4.9.58.patch
+From: http://www.kernel.org
+Desc: Linux 4.9.58
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1057_linux-4.9.58.patch b/1057_linux-4.9.58.patch
new file mode 100644
index 0000000..6bda02c
--- /dev/null
+++ b/1057_linux-4.9.58.patch
@@ -0,0 +1,1491 @@
+diff --git a/Makefile b/Makefile
+index d5a2ab9b3291..32686667bb7e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 57
++SUBLEVEL = 58
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+
+diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
+index ddd1c918103b..c5d351786416 100644
+--- a/arch/mips/include/asm/irq.h
++++ b/arch/mips/include/asm/irq.h
+@@ -18,7 +18,7 @@
+ #include <irq.h>
+
+ #define IRQ_STACK_SIZE THREAD_SIZE
+-#define IRQ_STACK_START (IRQ_STACK_SIZE - sizeof(unsigned long))
++#define IRQ_STACK_START (IRQ_STACK_SIZE - 16)
+
+ extern void *irq_stack[NR_CPUS];
+
+diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
+index 4d0a4e5017c2..8e6dd17fe603 100644
+--- a/arch/powerpc/perf/isa207-common.h
++++ b/arch/powerpc/perf/isa207-common.h
+@@ -201,6 +201,10 @@
+ CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \
+ CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL
+
++/*
++ * Lets restrict use of PMC5 for instruction counting.
++ */
++#define P9_DD1_TEST_ADDER (ISA207_TEST_ADDER | CNST_PMC_VAL(5))
+
+ /* Bits in MMCR1 for PowerISA v2.07 */
+ #define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
+diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
+index 8e9a81967ff8..9abcd8f65504 100644
+--- a/arch/powerpc/perf/power9-pmu.c
++++ b/arch/powerpc/perf/power9-pmu.c
+@@ -295,7 +295,7 @@ static struct power_pmu power9_pmu = {
+ .name = "POWER9",
+ .n_counter = MAX_PMU_COUNTERS,
+ .add_fields = ISA207_ADD_FIELDS,
+- .test_adder = ISA207_TEST_ADDER,
++ .test_adder = P9_DD1_TEST_ADDER,
+ .compute_mmcr = isa207_compute_mmcr,
+ .config_bhrb = power9_config_bhrb,
+ .bhrb_filter_map = power9_bhrb_filter_map,
+diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
+index be0cc1beed41..3fae200dd251 100644
+--- a/arch/sparc/include/asm/setup.h
++++ b/arch/sparc/include/asm/setup.h
+@@ -59,8 +59,11 @@ extern atomic_t dcpage_flushes;
+ extern atomic_t dcpage_flushes_xcall;
+
+ extern int sysctl_tsb_ratio;
+-#endif
+
++#ifdef CONFIG_SERIAL_SUNHV
++void sunhv_migrate_hvcons_irq(int cpu);
++#endif
++#endif
+ void sun_do_break(void);
+ extern int stop_a_enabled;
+ extern int scons_pwroff;
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index 2deb89ef1d5f..ca7cb8e57ab0 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -1465,8 +1465,12 @@ void smp_send_stop(void)
+ int cpu;
+
+ if (tlb_type == hypervisor) {
++ int this_cpu = smp_processor_id();
++#ifdef CONFIG_SERIAL_SUNHV
++ sunhv_migrate_hvcons_irq(this_cpu);
++#endif
+ for_each_online_cpu(cpu) {
+- if (cpu == smp_processor_id())
++ if (cpu == this_cpu)
+ continue;
+ #ifdef CONFIG_SUN_LDOMS
+ if (ldom_domaining_enabled) {
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
+index 9a324fc8bed8..3e27ded6ac65 100644
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -689,7 +689,7 @@ static void __meminit free_pagetable(struct page *page, int order)
+ if (PageReserved(page)) {
+ __ClearPageReserved(page);
+
+- magic = (unsigned long)page->lru.next;
++ magic = (unsigned long)page->freelist;
+ if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
+ while (nr_pages--)
+ put_page_bootmem(page++);
+diff --git a/block/bsg-lib.c b/block/bsg-lib.c
+index 341b8d858e67..650f427d915b 100644
+--- a/block/bsg-lib.c
++++ b/block/bsg-lib.c
+@@ -147,6 +147,7 @@ static int bsg_create_job(struct device *dev, struct request *req)
+ failjob_rls_rqst_payload:
+ kfree(job->request_payload.sg_list);
+ failjob_rls_job:
++ kfree(job);
+ return -ENOMEM;
+ }
+
+diff --git a/crypto/Kconfig b/crypto/Kconfig
+index 84d71482bf08..fa98ad7edb60 100644
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -360,6 +360,7 @@ config CRYPTO_XTS
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_MANAGER
+ select CRYPTO_GF128MUL
++ select CRYPTO_ECB
+ help
+ XTS: IEEE1619/D16 narrow block cipher use with aes-xts-plain,
+ key size 256, 384 or 512 bits. This implementation currently
+diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
+index d02f2c14df32..c738baeb6d45 100644
+--- a/drivers/bluetooth/btmrvl_sdio.c
++++ b/drivers/bluetooth/btmrvl_sdio.c
+@@ -1682,8 +1682,12 @@ static int btmrvl_sdio_resume(struct device *dev)
+ /* Disable platform specific wakeup interrupt */
+ if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0) {
+ disable_irq_wake(card->plt_wake_cfg->irq_bt);
+- if (!card->plt_wake_cfg->wake_by_bt)
+- disable_irq(card->plt_wake_cfg->irq_bt);
++ disable_irq(card->plt_wake_cfg->irq_bt);
++ if (card->plt_wake_cfg->wake_by_bt)
++ /* Undo our disable, since interrupt handler already
++ * did this.
++ */
++ enable_irq(card->plt_wake_cfg->irq_bt);
+ }
+
+ return 0;
+diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
+index d89b8afe23b6..bc3917d6015a 100644
+--- a/drivers/cpufreq/Kconfig.arm
++++ b/drivers/cpufreq/Kconfig.arm
+@@ -244,7 +244,7 @@ config ARM_PXA2xx_CPUFREQ
+
+ config ACPI_CPPC_CPUFREQ
+ tristate "CPUFreq driver based on the ACPI CPPC spec"
+- depends on ACPI
++ depends on ACPI_PROCESSOR
+ select ACPI_CPPC_LIB
+ default n
+ help
+diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
+index daaac2c79ca7..7db692ed3dea 100644
+--- a/drivers/edac/mce_amd.c
++++ b/drivers/edac/mce_amd.c
+@@ -981,20 +981,19 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
+ pr_cont("]: 0x%016llx\n", m->status);
+
+ if (m->status & MCI_STATUS_ADDRV)
+- pr_emerg(HW_ERR "Error Addr: 0x%016llx", m->addr);
++ pr_emerg(HW_ERR "Error Addr: 0x%016llx\n", m->addr);
+
+ if (boot_cpu_has(X86_FEATURE_SMCA)) {
++ pr_emerg(HW_ERR "IPID: 0x%016llx", m->ipid);
++
+ if (m->status & MCI_STATUS_SYNDV)
+ pr_cont(", Syndrome: 0x%016llx", m->synd);
+
+- pr_cont(", IPID: 0x%016llx", m->ipid);
+-
+ pr_cont("\n");
+
+ decode_smca_errors(m);
+ goto err_code;
+- } else
+- pr_cont("\n");
++ }
+
+ if (!fam_ops)
+ goto err_code;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index 264899df9bfc..05ff98b43c50 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -491,6 +491,9 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
+ case TTM_PL_TT:
+ break;
+ case TTM_PL_VRAM:
++ if (mem->start == AMDGPU_BO_INVALID_OFFSET)
++ return -EINVAL;
++
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ /* check if it's visible */
+ if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size)
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+index 6584d505460c..133f89600279 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+@@ -1129,7 +1129,7 @@ gf100_gr_trap_intr(struct gf100_gr *gr)
+ if (trap & 0x00000008) {
+ u32 stat = nvkm_rd32(device, 0x408030);
+
+- nvkm_snprintbf(error, sizeof(error), gf100_m2mf_error,
++ nvkm_snprintbf(error, sizeof(error), gf100_ccache_error,
+ stat & 0x3fffffff);
+ nvkm_error(subdev, "CCACHE %08x [%s]\n", stat, error);
+ nvkm_wr32(device, 0x408030, 0xc0000000);
+diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
+index 0b86c6173e07..c925a690cb32 100644
+--- a/drivers/i2c/busses/i2c-at91.c
++++ b/drivers/i2c/busses/i2c-at91.c
+@@ -1180,6 +1180,7 @@ static int at91_twi_suspend_noirq(struct device *dev)
+
+ static int at91_twi_resume_noirq(struct device *dev)
+ {
++ struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
+ int ret;
+
+ if (!pm_runtime_status_suspended(dev)) {
+@@ -1191,6 +1192,8 @@ static int at91_twi_resume_noirq(struct device *dev)
+ pm_runtime_mark_last_busy(dev);
+ pm_request_autosuspend(dev);
+
++ at91_init_twi_bus(twi_dev);
++
+ return 0;
+ }
+
+diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
+index 0a6beb3d99cb..56cf5907a5f0 100644
+--- a/drivers/iio/adc/xilinx-xadc-core.c
++++ b/drivers/iio/adc/xilinx-xadc-core.c
+@@ -1208,7 +1208,7 @@ static int xadc_probe(struct platform_device *pdev)
+
+ ret = xadc->ops->setup(pdev, indio_dev, irq);
+ if (ret)
+- goto err_free_samplerate_trigger;
++ goto err_clk_disable_unprepare;
+
+ ret = request_irq(irq, xadc->ops->interrupt_handler, 0,
+ dev_name(&pdev->dev), indio_dev);
+@@ -1268,6 +1268,8 @@ static int xadc_probe(struct platform_device *pdev)
+
+ err_free_irq:
+ free_irq(irq, indio_dev);
++err_clk_disable_unprepare:
++ clk_disable_unprepare(xadc->clk);
+ err_free_samplerate_trigger:
+ if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
+ iio_trigger_free(xadc->samplerate_trigger);
+@@ -1277,8 +1279,6 @@ static int xadc_probe(struct platform_device *pdev)
+ err_triggered_buffer_cleanup:
+ if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
+ iio_triggered_buffer_cleanup(indio_dev);
+-err_clk_disable_unprepare:
+- clk_disable_unprepare(xadc->clk);
+ err_device_free:
+ kfree(indio_dev->channels);
+
+diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
+index 34cfd341b6d6..a3dd27b1305d 100644
+--- a/drivers/infiniband/hw/hfi1/init.c
++++ b/drivers/infiniband/hw/hfi1/init.c
+@@ -297,14 +297,15 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
+ * The resulting value will be rounded down to the closest
+ * multiple of dd->rcv_entries.group_size.
+ */
+- rcd->egrbufs.buffers = kcalloc(rcd->egrbufs.count,
+- sizeof(*rcd->egrbufs.buffers),
+- GFP_KERNEL);
++ rcd->egrbufs.buffers = kzalloc_node(
++ rcd->egrbufs.count * sizeof(*rcd->egrbufs.buffers),
++ GFP_KERNEL, numa);
+ if (!rcd->egrbufs.buffers)
+ goto bail;
+- rcd->egrbufs.rcvtids = kcalloc(rcd->egrbufs.count,
+- sizeof(*rcd->egrbufs.rcvtids),
+- GFP_KERNEL);
++ rcd->egrbufs.rcvtids = kzalloc_node(
++ rcd->egrbufs.count *
++ sizeof(*rcd->egrbufs.rcvtids),
++ GFP_KERNEL, numa);
+ if (!rcd->egrbufs.rcvtids)
+ goto bail;
+ rcd->egrbufs.size = eager_buffer_size;
+@@ -322,8 +323,8 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
+ rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
+
+ if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */
+- rcd->opstats = kzalloc(sizeof(*rcd->opstats),
+- GFP_KERNEL);
++ rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
++ GFP_KERNEL, numa);
+ if (!rcd->opstats)
+ goto bail;
+ }
+diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
+index 4ac8f330c5cb..335613a1a46a 100644
+--- a/drivers/infiniband/hw/hfi1/pcie.c
++++ b/drivers/infiniband/hw/hfi1/pcie.c
+@@ -673,12 +673,12 @@ MODULE_PARM_DESC(pcie_retry, "Driver will try this many times to reach requested
+
+ #define UNSET_PSET 255
+ #define DEFAULT_DISCRETE_PSET 2 /* discrete HFI */
+-#define DEFAULT_MCP_PSET 4 /* MCP HFI */
++#define DEFAULT_MCP_PSET 6 /* MCP HFI */
+ static uint pcie_pset = UNSET_PSET;
+ module_param(pcie_pset, uint, S_IRUGO);
+ MODULE_PARM_DESC(pcie_pset, "PCIe Eq Pset value to use, range is 0-10");
+
+-static uint pcie_ctle = 1; /* discrete on, integrated off */
++static uint pcie_ctle = 3; /* discrete on, integrated on */
+ module_param(pcie_ctle, uint, S_IRUGO);
+ MODULE_PARM_DESC(pcie_ctle, "PCIe static CTLE mode, bit 0 - discrete on/off, bit 1 - integrated on/off");
+
+diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
+index 1eef56a89b1f..05bbf171df37 100644
+--- a/drivers/irqchip/irq-crossbar.c
++++ b/drivers/irqchip/irq-crossbar.c
+@@ -198,7 +198,8 @@ static const struct irq_domain_ops crossbar_domain_ops = {
+
+ static int __init crossbar_of_init(struct device_node *node)
+ {
+- int i, size, max = 0, reserved = 0, entry;
++ int i, size, reserved = 0;
++ u32 max = 0, entry;
+ const __be32 *irqsr;
+ int ret = -ENOMEM;
+
+diff --git a/drivers/md/linear.c b/drivers/md/linear.c
+index b0c0aef92a37..12abf69d568a 100644
+--- a/drivers/md/linear.c
++++ b/drivers/md/linear.c
+@@ -223,7 +223,8 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
+ * oldconf until no one uses it anymore.
+ */
+ mddev_suspend(mddev);
+- oldconf = rcu_dereference(mddev->private);
++ oldconf = rcu_dereference_protected(mddev->private,
++ lockdep_is_held(&mddev->reconfig_mutex));
+ mddev->raid_disks++;
+ WARN_ONCE(mddev->raid_disks != newconf->raid_disks,
+ "copied raid_disks doesn't match mddev->raid_disks");
+diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
+index 0a4e81a253fb..ed6fae964ec5 100644
+--- a/drivers/net/ethernet/marvell/mvpp2.c
++++ b/drivers/net/ethernet/marvell/mvpp2.c
+@@ -4413,13 +4413,12 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
+ struct mvpp2_txq_pcpu_buf *tx_buf =
+ txq_pcpu->buffs + txq_pcpu->txq_get_index;
+
+- mvpp2_txq_inc_get(txq_pcpu);
+-
+ dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
+ tx_buf->size, DMA_TO_DEVICE);
+- if (!tx_buf->skb)
+- continue;
+- dev_kfree_skb_any(tx_buf->skb);
++ if (tx_buf->skb)
++ dev_kfree_skb_any(tx_buf->skb);
++
++ mvpp2_txq_inc_get(txq_pcpu);
+ }
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+index a5fc46bbcbe2..d4d97ca12e83 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+@@ -88,10 +88,17 @@ void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
+ }
+ }
+
++#define MLX4_EN_WRAP_AROUND_SEC 10UL
++/* By scheduling the overflow check every 5 seconds, we have a reasonably
++ * good chance we wont miss a wrap around.
++ * TOTO: Use a timer instead of a work queue to increase the guarantee.
++ */
++#define MLX4_EN_OVERFLOW_PERIOD (MLX4_EN_WRAP_AROUND_SEC * HZ / 2)
++
+ void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
+ {
+ bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
+- mdev->overflow_period);
++ MLX4_EN_OVERFLOW_PERIOD);
+ unsigned long flags;
+
+ if (timeout) {
+@@ -236,7 +243,6 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
+ .enable = mlx4_en_phc_enable,
+ };
+
+-#define MLX4_EN_WRAP_AROUND_SEC 10ULL
+
+ /* This function calculates the max shift that enables the user range
+ * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
+@@ -261,7 +267,6 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
+ {
+ struct mlx4_dev *dev = mdev->dev;
+ unsigned long flags;
+- u64 ns, zero = 0;
+
+ /* mlx4_en_init_timestamp is called for each netdev.
+ * mdev->ptp_clock is common for all ports, skip initialization if
+@@ -285,13 +290,6 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
+ ktime_to_ns(ktime_get_real()));
+ write_unlock_irqrestore(&mdev->clock_lock, flags);
+
+- /* Calculate period in seconds to call the overflow watchdog - to make
+- * sure counter is checked at least once every wrap around.
+- */
+- ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero);
+- do_div(ns, NSEC_PER_SEC / 2 / HZ);
+- mdev->overflow_period = ns;
+-
+ /* Configure the PHC */
+ mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
+ snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");
+diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
+index ba652d8a2b93..727122de7df0 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -841,8 +841,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
+ return -ENOSYS;
+ }
+
+- mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
+-
+ dev->caps.hca_core_clock = hca_param.hca_core_clock;
+
+ memset(&dev_cap, 0, sizeof(dev_cap));
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+index a3528dd1e72e..df0f39611c5e 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -419,7 +419,6 @@ struct mlx4_en_dev {
+ struct cyclecounter cycles;
+ struct timecounter clock;
+ unsigned long last_overflow_check;
+- unsigned long overflow_period;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ struct notifier_block nb;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
+index 653bb5735f0c..433f8be57847 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed.h
++++ b/drivers/net/ethernet/qlogic/qed/qed.h
+@@ -642,7 +642,9 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
+ #define OOO_LB_TC 9
+
+ int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
+-void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate);
++void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
++ struct qed_ptt *p_ptt,
++ u32 min_pf_rate);
+
+ void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+ #define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+index edae5fc5fccd..afe5e57d9acb 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+@@ -877,7 +877,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+ /* Either EDPM is mandatory, or we are attempting to allocate a
+ * WID per CPU.
+ */
+- n_cpus = num_active_cpus();
++ n_cpus = num_present_cpus();
+ rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
+ }
+
+@@ -2732,7 +2732,8 @@ int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
+ }
+
+ /* API to configure WFQ from mcp link change */
+-void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
++void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
++ struct qed_ptt *p_ptt, u32 min_pf_rate)
+ {
+ int i;
+
+@@ -2746,8 +2747,7 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+- __qed_configure_vp_wfq_on_link_change(p_hwfn,
+- p_hwfn->p_dpc_ptt,
++ __qed_configure_vp_wfq_on_link_change(p_hwfn, p_ptt,
+ min_pf_rate);
+ }
+ }
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+index bdc9ba92f6d4..8b7d2f963ee1 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+@@ -628,7 +628,8 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
+
+ /* Min bandwidth configuration */
+ __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
+- qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate);
++ qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt,
++ p_link->min_pf_rate);
+
+ p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
+ p_link->an_complete = !!(status &
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
+index f3a825a8f8d5..d9dcb0d1714c 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
+@@ -1766,13 +1766,13 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
+ if (rc)
+ goto err_resp;
+
+- dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
+- p_resp_ramrod_res, resp_ramrod_res_phys);
+-
+ out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
+ rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
+ ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
+
++ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
++ p_resp_ramrod_res, resp_ramrod_res_phys);
++
+ if (!(qp->req_offloaded)) {
+ /* Don't send query qp for the requester */
+ out_params->sq_psn = qp->sq_psn;
+@@ -1813,9 +1813,6 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
+ if (rc)
+ goto err_req;
+
+- dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
+- p_req_ramrod_res, req_ramrod_res_phys);
+-
+ out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
+ sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
+ ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
+@@ -1823,6 +1820,9 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
+ GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
+ ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
+
++ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
++ p_req_ramrod_res, req_ramrod_res_phys);
++
+ out_params->draining = false;
+
+ if (rq_err_state)
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+index 7567cc464b88..634e4149af22 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+@@ -1221,7 +1221,7 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
+ struct qede_rx_queue *rxq = NULL;
+ struct sw_rx_data *sw_rx_data;
+ union eth_rx_cqe *cqe;
+- int i, rc = 0;
++ int i, iter, rc = 0;
+ u8 *data_ptr;
+
+ for_each_queue(i) {
+@@ -1240,7 +1240,7 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
+ * enabled. This is because the queue 0 is configured as the default
+ * queue and that the loopback traffic is not IP.
+ */
+- for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
++ for (iter = 0; iter < QEDE_SELFTEST_POLL_COUNT; iter++) {
+ if (!qede_has_rx_work(rxq)) {
+ usleep_range(100, 200);
+ continue;
+@@ -1287,7 +1287,7 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
+ qed_chain_recycle_consumed(&rxq->rx_comp_ring);
+ }
+
+- if (i == QEDE_SELFTEST_POLL_COUNT) {
++ if (iter == QEDE_SELFTEST_POLL_COUNT) {
+ DP_NOTICE(edev, "Failed to receive the traffic\n");
+ return -1;
+ }
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index c06932c5ecdb..d2a28a9d3209 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -3046,6 +3046,7 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
+ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
+ {
+ struct hwsim_new_radio_params param = { 0 };
++ const char *hwname = NULL;
+
+ param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
+ param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
+@@ -3059,8 +3060,14 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
+ if (info->attrs[HWSIM_ATTR_NO_VIF])
+ param.no_vif = true;
+
+- if (info->attrs[HWSIM_ATTR_RADIO_NAME])
+- param.hwname = nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]);
++ if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
++ hwname = kasprintf(GFP_KERNEL, "%.*s",
++ nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
++ (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
++ if (!hwname)
++ return -ENOMEM;
++ param.hwname = hwname;
++ }
+
+ if (info->attrs[HWSIM_ATTR_USE_CHANCTX])
+ param.use_chanctx = true;
+@@ -3088,11 +3095,15 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
+ s64 idx = -1;
+ const char *hwname = NULL;
+
+- if (info->attrs[HWSIM_ATTR_RADIO_ID])
++ if (info->attrs[HWSIM_ATTR_RADIO_ID]) {
+ idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
+- else if (info->attrs[HWSIM_ATTR_RADIO_NAME])
+- hwname = (void *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]);
+- else
++ } else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
++ hwname = kasprintf(GFP_KERNEL, "%.*s",
++ nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
++ (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
++ if (!hwname)
++ return -ENOMEM;
++ } else
+ return -EINVAL;
+
+ spin_lock_bh(&hwsim_radio_lock);
+@@ -3101,7 +3112,8 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
+ if (data->idx != idx)
+ continue;
+ } else {
+- if (strcmp(hwname, wiphy_name(data->hw->wiphy)))
++ if (!hwname ||
++ strcmp(hwname, wiphy_name(data->hw->wiphy)))
+ continue;
+ }
+
+@@ -3112,10 +3124,12 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
+ spin_unlock_bh(&hwsim_radio_lock);
+ mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
+ info);
++ kfree(hwname);
+ return 0;
+ }
+ spin_unlock_bh(&hwsim_radio_lock);
+
++ kfree(hwname);
+ return -ENODEV;
+ }
+
+diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
+index e8c5dddc54ba..3c4c58b9fe76 100644
+--- a/drivers/net/xen-netback/hash.c
++++ b/drivers/net/xen-netback/hash.c
+@@ -39,7 +39,7 @@ static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
+ unsigned long flags;
+ bool found;
+
+- new = kmalloc(sizeof(*entry), GFP_KERNEL);
++ new = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!new)
+ return;
+
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 9faccfceb53c..9403245503de 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -4507,6 +4507,16 @@ static int __init regulator_init_complete(void)
+ if (of_have_populated_dt())
+ has_full_constraints = true;
+
++ /*
++ * Regulators may had failed to resolve their input supplies
++ * when were registered, either because the input supply was
++ * not registered yet or because its parent device was not
++ * bound yet. So attempt to resolve the input supplies for
++ * pending regulators before trying to disable unused ones.
++ */
++ class_for_each_device(®ulator_class, NULL, NULL,
++ regulator_register_resolve_supply);
++
+ /* If we have a full configuration then disable any regulators
+ * we have permission to change the status for and which are
+ * not in use or always_on. This is effectively the default
+diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
+index 375d81850f15..d5f6fbfa17bf 100644
+--- a/drivers/scsi/device_handler/scsi_dh_emc.c
++++ b/drivers/scsi/device_handler/scsi_dh_emc.c
+@@ -461,7 +461,7 @@ static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
+ static int clariion_std_inquiry(struct scsi_device *sdev,
+ struct clariion_dh_data *csdev)
+ {
+- int err;
++ int err = SCSI_DH_OK;
+ char *sp_model;
+
+ err = send_inquiry_cmd(sdev, 0, csdev);
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+index 6d459ef8c121..f72eebc71dd8 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+@@ -106,8 +106,14 @@ int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
+
+ g_virt_to_bus_offset = virt_to_dma(dev, (void *)0);
+
+- (void)of_property_read_u32(dev->of_node, "cache-line-size",
++ err = of_property_read_u32(dev->of_node, "cache-line-size",
+ &g_cache_line_size);
++
++ if (err) {
++ dev_err(dev, "Missing cache-line-size property\n");
++ return -ENODEV;
++ }
++
+ g_fragments_size = 2 * g_cache_line_size;
+
+ /* Allocate space for the channels in coherent memory */
+diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
+index efc453ef6831..ab92a1bc9666 100644
+--- a/drivers/target/iscsi/iscsi_target_erl0.c
++++ b/drivers/target/iscsi/iscsi_target_erl0.c
+@@ -44,10 +44,8 @@ void iscsit_set_dataout_sequence_values(
+ */
+ if (cmd->unsolicited_data) {
+ cmd->seq_start_offset = cmd->write_data_done;
+- cmd->seq_end_offset = (cmd->write_data_done +
+- ((cmd->se_cmd.data_length >
+- conn->sess->sess_ops->FirstBurstLength) ?
+- conn->sess->sess_ops->FirstBurstLength : cmd->se_cmd.data_length));
++ cmd->seq_end_offset = min(cmd->se_cmd.data_length,
++ conn->sess->sess_ops->FirstBurstLength);
+ return;
+ }
+
+diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
+index 4e603d060e80..59828d819145 100644
+--- a/drivers/tty/serial/sunhv.c
++++ b/drivers/tty/serial/sunhv.c
+@@ -398,6 +398,12 @@ static struct uart_driver sunhv_reg = {
+
+ static struct uart_port *sunhv_port;
+
++void sunhv_migrate_hvcons_irq(int cpu)
++{
++ /* Migrate hvcons irq to param cpu */
++ irq_force_affinity(sunhv_port->irq, cpumask_of(cpu));
++}
++
+ /* Copy 's' into the con_write_page, decoding "\n" into
+ * "\r\n" along the way. We have to return two lengths
+ * because the caller needs to know how much to advance
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index f92c680e3937..c61ddbf94bc7 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -817,9 +817,42 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
+ if (!node) {
+ trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
+
++ /*
++ * USB Specification 2.0 Section 5.9.2 states that: "If
++ * there is only a single transaction in the microframe,
++ * only a DATA0 data packet PID is used. If there are
++ * two transactions per microframe, DATA1 is used for
++ * the first transaction data packet and DATA0 is used
++ * for the second transaction data packet. If there are
++ * three transactions per microframe, DATA2 is used for
++ * the first transaction data packet, DATA1 is used for
++ * the second, and DATA0 is used for the third."
++ *
++ * IOW, we should satisfy the following cases:
++ *
++ * 1) length <= maxpacket
++ * - DATA0
++ *
++ * 2) maxpacket < length <= (2 * maxpacket)
++ * - DATA1, DATA0
++ *
++ * 3) (2 * maxpacket) < length <= (3 * maxpacket)
++ * - DATA2, DATA1, DATA0
++ */
+ if (speed == USB_SPEED_HIGH) {
+ struct usb_ep *ep = &dep->endpoint;
+- trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1);
++ unsigned int mult = ep->mult - 1;
++ unsigned int maxp;
++
++ maxp = usb_endpoint_maxp(ep->desc) & 0x07ff;
++
++ if (length <= (2 * maxp))
++ mult--;
++
++ if (length <= maxp)
++ mult--;
++
++ trb->size |= DWC3_TRB_SIZE_PCM1(mult);
+ }
+ } else {
+ trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
+diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
+index 8e302d0e346c..3efa295ac627 100644
+--- a/drivers/watchdog/kempld_wdt.c
++++ b/drivers/watchdog/kempld_wdt.c
+@@ -140,12 +140,19 @@ static int kempld_wdt_set_stage_timeout(struct kempld_wdt_data *wdt_data,
+ unsigned int timeout)
+ {
+ struct kempld_device_data *pld = wdt_data->pld;
+- u32 prescaler = kempld_prescaler[PRESCALER_21];
++ u32 prescaler;
+ u64 stage_timeout64;
+ u32 stage_timeout;
+ u32 remainder;
+ u8 stage_cfg;
+
++#if GCC_VERSION < 40400
++ /* work around a bug compiling do_div() */
++ prescaler = READ_ONCE(kempld_prescaler[PRESCALER_21]);
++#else
++ prescaler = kempld_prescaler[PRESCALER_21];
++#endif
++
+ if (!stage)
+ return -EINVAL;
+
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 71261b459863..77f9efc1f7aa 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -1680,6 +1680,9 @@ static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
+ {
+ int ret;
+
++ if (ino == BTRFS_FIRST_FREE_OBJECTID)
++ return 1;
++
+ ret = get_cur_inode_state(sctx, ino, gen);
+ if (ret < 0)
+ goto out;
+@@ -1865,7 +1868,7 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
+ * not deleted and then re-created, if it was then we have no overwrite
+ * and we can just unlink this entry.
+ */
+- if (sctx->parent_root) {
++ if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
+ ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
+ NULL, NULL, NULL);
+ if (ret < 0 && ret != -ENOENT)
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index 953275b651bc..4a6df2ce0f76 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -1323,8 +1323,8 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
+ ceph_dir_clear_ordered(dir);
+ dout("d_delete %p\n", dn);
+ d_delete(dn);
+- } else {
+- if (have_lease && d_unhashed(dn))
++ } else if (have_lease) {
++ if (d_unhashed(dn))
+ d_add(dn, NULL);
+ update_dentry_lease(dn, rinfo->dlease,
+ session,
+diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
+index 7d752d53353a..4c9c72f26eb9 100644
+--- a/fs/ceph/ioctl.c
++++ b/fs/ceph/ioctl.c
+@@ -25,7 +25,7 @@ static long ceph_ioctl_get_layout(struct file *file, void __user *arg)
+ l.stripe_count = ci->i_layout.stripe_count;
+ l.object_size = ci->i_layout.object_size;
+ l.data_pool = ci->i_layout.pool_id;
+- l.preferred_osd = (s32)-1;
++ l.preferred_osd = -1;
+ if (copy_to_user(arg, &l, sizeof(l)))
+ return -EFAULT;
+ }
+@@ -97,7 +97,7 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
+ nl.data_pool = ci->i_layout.pool_id;
+
+ /* this is obsolete, and always -1 */
+- nl.preferred_osd = le64_to_cpu(-1);
++ nl.preferred_osd = -1;
+
+ err = __validate_layout(mdsc, &nl);
+ if (err)
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index e3e1a80b351e..c0f52c443c34 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -1782,13 +1782,18 @@ static int build_dentry_path(struct dentry *dentry,
+ int *pfreepath)
+ {
+ char *path;
++ struct inode *dir;
+
+- if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP) {
+- *pino = ceph_ino(d_inode(dentry->d_parent));
++ rcu_read_lock();
++ dir = d_inode_rcu(dentry->d_parent);
++ if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
++ *pino = ceph_ino(dir);
++ rcu_read_unlock();
+ *ppath = dentry->d_name.name;
+ *ppathlen = dentry->d_name.len;
+ return 0;
+ }
++ rcu_read_unlock();
+ path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 14db4b712021..99432b59c5cb 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -1619,7 +1619,12 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
+ goto fail;
+ }
+ repeat:
+- page = grab_cache_page_write_begin(mapping, index, flags);
++ /*
++ * Do not use grab_cache_page_write_begin() to avoid deadlock due to
++ * wait_for_stable_page. Will wait that below with our IO control.
++ */
++ page = pagecache_get_page(mapping, index,
++ FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
+ if (!page) {
+ err = -ENOMEM;
+ goto fail;
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 74a2b444406d..e10f61684ea4 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -1263,7 +1263,7 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
+ const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
+
+- if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0, 0))
++ if (IS_NODESEG(type))
+ return v_ops->get_victim(sbi,
+ &(curseg)->next_segno, BG_GC, type, SSR);
+
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index 211dc2aed8e1..3069cd46ea66 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -753,6 +753,14 @@ int set_callback_cred(void)
+ return 0;
+ }
+
++void cleanup_callback_cred(void)
++{
++ if (callback_cred) {
++ put_rpccred(callback_cred);
++ callback_cred = NULL;
++ }
++}
++
+ static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses)
+ {
+ if (clp->cl_minorversion == 0) {
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index a0dee8ae9f97..d35eb077330f 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -7012,23 +7012,24 @@ nfs4_state_start(void)
+
+ ret = set_callback_cred();
+ if (ret)
+- return -ENOMEM;
++ return ret;
++
+ laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
+ if (laundry_wq == NULL) {
+ ret = -ENOMEM;
+- goto out_recovery;
++ goto out_cleanup_cred;
+ }
+ ret = nfsd4_create_callback_queue();
+ if (ret)
+ goto out_free_laundry;
+
+ set_max_delegations();
+-
+ return 0;
+
+ out_free_laundry:
+ destroy_workqueue(laundry_wq);
+-out_recovery:
++out_cleanup_cred:
++ cleanup_callback_cred();
+ return ret;
+ }
+
+@@ -7086,6 +7087,7 @@ nfs4_state_shutdown(void)
+ {
+ destroy_workqueue(laundry_wq);
+ nfsd4_destroy_callback_queue();
++ cleanup_callback_cred();
+ }
+
+ static void
+diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
+index 4516e8b7d776..005c911b34ac 100644
+--- a/fs/nfsd/state.h
++++ b/fs/nfsd/state.h
+@@ -615,6 +615,7 @@ extern struct nfs4_client_reclaim *nfsd4_find_reclaim_client(const char *recdir,
+ extern __be32 nfs4_check_open_reclaim(clientid_t *clid,
+ struct nfsd4_compound_state *cstate, struct nfsd_net *nn);
+ extern int set_callback_cred(void);
++extern void cleanup_callback_cred(void);
+ extern void nfsd4_probe_callback(struct nfs4_client *clp);
+ extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
+ extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
+diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
+index 77d1632e905d..8dce4099a6ca 100644
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -532,6 +532,7 @@ void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
+ init_waitqueue_head(&res->l_event);
+ INIT_LIST_HEAD(&res->l_blocked_list);
+ INIT_LIST_HEAD(&res->l_mask_waiters);
++ INIT_LIST_HEAD(&res->l_holders);
+ }
+
+ void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
+@@ -749,6 +750,50 @@ void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
+ res->l_flags = 0UL;
+ }
+
++/*
++ * Keep a list of processes who have interest in a lockres.
++ * Note: this is now only uesed for check recursive cluster locking.
++ */
++static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres,
++ struct ocfs2_lock_holder *oh)
++{
++ INIT_LIST_HEAD(&oh->oh_list);
++ oh->oh_owner_pid = get_pid(task_pid(current));
++
++ spin_lock(&lockres->l_lock);
++ list_add_tail(&oh->oh_list, &lockres->l_holders);
++ spin_unlock(&lockres->l_lock);
++}
++
++static inline void ocfs2_remove_holder(struct ocfs2_lock_res *lockres,
++ struct ocfs2_lock_holder *oh)
++{
++ spin_lock(&lockres->l_lock);
++ list_del(&oh->oh_list);
++ spin_unlock(&lockres->l_lock);
++
++ put_pid(oh->oh_owner_pid);
++}
++
++static inline int ocfs2_is_locked_by_me(struct ocfs2_lock_res *lockres)
++{
++ struct ocfs2_lock_holder *oh;
++ struct pid *pid;
++
++ /* look in the list of holders for one with the current task as owner */
++ spin_lock(&lockres->l_lock);
++ pid = task_pid(current);
++ list_for_each_entry(oh, &lockres->l_holders, oh_list) {
++ if (oh->oh_owner_pid == pid) {
++ spin_unlock(&lockres->l_lock);
++ return 1;
++ }
++ }
++ spin_unlock(&lockres->l_lock);
++
++ return 0;
++}
++
+ static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
+ int level)
+ {
+@@ -2333,8 +2378,9 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
+ goto getbh;
+ }
+
+- if (ocfs2_mount_local(osb))
+- goto local;
++ if ((arg_flags & OCFS2_META_LOCK_GETBH) ||
++ ocfs2_mount_local(osb))
++ goto update;
+
+ if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
+ ocfs2_wait_for_recovery(osb);
+@@ -2363,7 +2409,7 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
+ if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
+ ocfs2_wait_for_recovery(osb);
+
+-local:
++update:
+ /*
+ * We only see this flag if we're being called from
+ * ocfs2_read_locked_inode(). It means we're locking an inode
+@@ -2497,6 +2543,59 @@ void ocfs2_inode_unlock(struct inode *inode,
+ ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
+ }
+
++/*
++ * This _tracker variantes are introduced to deal with the recursive cluster
++ * locking issue. The idea is to keep track of a lock holder on the stack of
++ * the current process. If there's a lock holder on the stack, we know the
++ * task context is already protected by cluster locking. Currently, they're
++ * used in some VFS entry routines.
++ *
++ * return < 0 on error, return == 0 if there's no lock holder on the stack
++ * before this call, return == 1 if this call would be a recursive locking.
++ */
++int ocfs2_inode_lock_tracker(struct inode *inode,
++ struct buffer_head **ret_bh,
++ int ex,
++ struct ocfs2_lock_holder *oh)
++{
++ int status;
++ int arg_flags = 0, has_locked;
++ struct ocfs2_lock_res *lockres;
++
++ lockres = &OCFS2_I(inode)->ip_inode_lockres;
++ has_locked = ocfs2_is_locked_by_me(lockres);
++ /* Just get buffer head if the cluster lock has been taken */
++ if (has_locked)
++ arg_flags = OCFS2_META_LOCK_GETBH;
++
++ if (likely(!has_locked || ret_bh)) {
++ status = ocfs2_inode_lock_full(inode, ret_bh, ex, arg_flags);
++ if (status < 0) {
++ if (status != -ENOENT)
++ mlog_errno(status);
++ return status;
++ }
++ }
++ if (!has_locked)
++ ocfs2_add_holder(lockres, oh);
++
++ return has_locked;
++}
++
++void ocfs2_inode_unlock_tracker(struct inode *inode,
++ int ex,
++ struct ocfs2_lock_holder *oh,
++ int had_lock)
++{
++ struct ocfs2_lock_res *lockres;
++
++ lockres = &OCFS2_I(inode)->ip_inode_lockres;
++ if (!had_lock) {
++ ocfs2_remove_holder(lockres, oh);
++ ocfs2_inode_unlock(inode, ex);
++ }
++}
++
+ int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
+ {
+ struct ocfs2_lock_res *lockres;
+diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
+index d293a22c32c5..a7fc18ba0dc1 100644
+--- a/fs/ocfs2/dlmglue.h
++++ b/fs/ocfs2/dlmglue.h
+@@ -70,6 +70,11 @@ struct ocfs2_orphan_scan_lvb {
+ __be32 lvb_os_seqno;
+ };
+
++struct ocfs2_lock_holder {
++ struct list_head oh_list;
++ struct pid *oh_owner_pid;
++};
++
+ /* ocfs2_inode_lock_full() 'arg_flags' flags */
+ /* don't wait on recovery. */
+ #define OCFS2_META_LOCK_RECOVERY (0x01)
+@@ -77,6 +82,8 @@ struct ocfs2_orphan_scan_lvb {
+ #define OCFS2_META_LOCK_NOQUEUE (0x02)
+ /* don't block waiting for the downconvert thread, instead return -EAGAIN */
+ #define OCFS2_LOCK_NONBLOCK (0x04)
++/* just get back disk inode bh if we've got cluster lock. */
++#define OCFS2_META_LOCK_GETBH (0x08)
+
+ /* Locking subclasses of inode cluster lock */
+ enum {
+@@ -170,4 +177,15 @@ void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug);
+
+ /* To set the locking protocol on module initialization */
+ void ocfs2_set_locking_protocol(void);
++
++/* The _tracker pair is used to avoid cluster recursive locking */
++int ocfs2_inode_lock_tracker(struct inode *inode,
++ struct buffer_head **ret_bh,
++ int ex,
++ struct ocfs2_lock_holder *oh);
++void ocfs2_inode_unlock_tracker(struct inode *inode,
++ int ex,
++ struct ocfs2_lock_holder *oh,
++ int had_lock);
++
+ #endif /* DLMGLUE_H */
+diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
+index e63af7ddfe68..594575e380e8 100644
+--- a/fs/ocfs2/ocfs2.h
++++ b/fs/ocfs2/ocfs2.h
+@@ -172,6 +172,7 @@ struct ocfs2_lock_res {
+
+ struct list_head l_blocked_list;
+ struct list_head l_mask_waiters;
++ struct list_head l_holders;
+
+ unsigned long l_flags;
+ char l_name[OCFS2_LOCK_ID_MAX_LEN];
+diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h
+index 5062fb5751e1..ed5721148768 100644
+--- a/include/uapi/linux/mroute6.h
++++ b/include/uapi/linux/mroute6.h
+@@ -4,6 +4,7 @@
+ #include <linux/kernel.h>
+ #include <linux/types.h>
+ #include <linux/sockios.h>
++#include <linux/in6.h> /* For struct sockaddr_in6. */
+
+ /*
+ * Based on the MROUTING 3.5 defines primarily to keep
+diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
+index 0f9265cb2a96..7af20a136429 100644
+--- a/include/uapi/linux/rds.h
++++ b/include/uapi/linux/rds.h
+@@ -35,6 +35,7 @@
+ #define _LINUX_RDS_H
+
+ #include <linux/types.h>
++#include <linux/socket.h> /* For __kernel_sockaddr_storage. */
+
+ #define RDS_IB_ABI_VERSION 0x301
+
+@@ -223,7 +224,7 @@ struct rds_get_mr_args {
+ };
+
+ struct rds_get_mr_for_dest_args {
+- struct sockaddr_storage dest_addr;
++ struct __kernel_sockaddr_storage dest_addr;
+ struct rds_iovec vec;
+ uint64_t cookie_addr;
+ uint64_t flags;
+diff --git a/init/initramfs.c b/init/initramfs.c
+index b32ad7d97ac9..981f286c1d16 100644
+--- a/init/initramfs.c
++++ b/init/initramfs.c
+@@ -18,6 +18,7 @@
+ #include <linux/dirent.h>
+ #include <linux/syscalls.h>
+ #include <linux/utime.h>
++#include <linux/file.h>
+
+ static ssize_t __init xwrite(int fd, const char *p, size_t count)
+ {
+@@ -647,6 +648,7 @@ static int __init populate_rootfs(void)
+ printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err);
+ free_initrd();
+ #endif
++ flush_delayed_fput();
+ /*
+ * Try loading default modules from initramfs. This gives
+ * us a chance to load before device_initcalls.
+diff --git a/init/main.c b/init/main.c
+index ae3996ae9bac..25bac88bc66e 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -70,7 +70,6 @@
+ #include <linux/shmem_fs.h>
+ #include <linux/slab.h>
+ #include <linux/perf_event.h>
+-#include <linux/file.h>
+ #include <linux/ptrace.h>
+ #include <linux/blkdev.h>
+ #include <linux/elevator.h>
+@@ -947,8 +946,6 @@ static int __ref kernel_init(void *unused)
+ system_state = SYSTEM_RUNNING;
+ numa_default_policy();
+
+- flush_delayed_fput();
+-
+ rcu_end_inkernel_boot();
+
+ if (ramdisk_execute_command) {
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index 4d7ffc0a0d00..6599c7f3071d 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -3260,10 +3260,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
+ if (depth) {
+ hlock = curr->held_locks + depth - 1;
+ if (hlock->class_idx == class_idx && nest_lock) {
+- if (hlock->references)
++ if (hlock->references) {
++ /*
++ * Check: unsigned int references:12, overflow.
++ */
++ if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
++ return 0;
++
+ hlock->references++;
+- else
++ } else {
+ hlock->references = 2;
++ }
+
+ return 1;
+ }
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index d7dda36fbc7b..02e7ad860b52 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1141,6 +1141,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
+ int ret = 0;
+
+ rq = task_rq_lock(p, &rf);
++ update_rq_clock(rq);
+
+ if (p->flags & PF_KTHREAD) {
+ /*
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index bb5ec425dfe0..eeb7f2f5698d 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -94,17 +94,15 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
+ };
+
+ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
++ /* Make sure we catch unsupported clockids */
++ [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES,
++
+ [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
+ [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
+ [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
+ [CLOCK_TAI] = HRTIMER_BASE_TAI,
+ };
+
+-static inline int hrtimer_clockid_to_base(clockid_t clock_id)
+-{
+- return hrtimer_clock_to_base_table[clock_id];
+-}
+-
+ /*
+ * Functions and macros which are different for UP/SMP systems are kept in a
+ * single place
+@@ -1112,6 +1110,18 @@ u64 hrtimer_get_next_event(void)
+ }
+ #endif
+
++static inline int hrtimer_clockid_to_base(clockid_t clock_id)
++{
++ if (likely(clock_id < MAX_CLOCKS)) {
++ int base = hrtimer_clock_to_base_table[clock_id];
++
++ if (likely(base != HRTIMER_MAX_CLOCK_BASES))
++ return base;
++ }
++ WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id);
++ return HRTIMER_BASE_MONOTONIC;
++}
++
+ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+ enum hrtimer_mode mode)
+ {
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index ede137345a99..c9f715b2917f 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -179,7 +179,7 @@ static void release_memory_resource(struct resource *res)
+ void get_page_bootmem(unsigned long info, struct page *page,
+ unsigned long type)
+ {
+- page->lru.next = (struct list_head *) type;
++ page->freelist = (void *)type;
+ SetPagePrivate(page);
+ set_page_private(page, info);
+ page_ref_inc(page);
+@@ -189,11 +189,12 @@ void put_page_bootmem(struct page *page)
+ {
+ unsigned long type;
+
+- type = (unsigned long) page->lru.next;
++ type = (unsigned long) page->freelist;
+ BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
+ type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
+
+ if (page_ref_dec_return(page) == 1) {
++ page->freelist = NULL;
+ ClearPagePrivate(page);
+ set_page_private(page, 0);
+ INIT_LIST_HEAD(&page->lru);
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index 5d2f24fbafc5..622f6b6ae844 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -255,7 +255,7 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
+ {
+ struct kmem_cache *s;
+
+- if (slab_nomerge || (flags & SLAB_NEVER_MERGE))
++ if (slab_nomerge)
+ return NULL;
+
+ if (ctor)
+@@ -266,6 +266,9 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
+ size = ALIGN(size, align);
+ flags = kmem_cache_flags(size, flags, name, NULL);
+
++ if (flags & SLAB_NEVER_MERGE)
++ return NULL;
++
+ list_for_each_entry_reverse(s, &slab_caches, list) {
+ if (slab_unmergeable(s))
+ continue;
+diff --git a/mm/sparse.c b/mm/sparse.c
+index 1e168bf2779a..8c4c82e358e6 100644
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -662,7 +662,7 @@ static void free_map_bootmem(struct page *memmap)
+ >> PAGE_SHIFT;
+
+ for (i = 0; i < nr_pages; i++, page++) {
+- magic = (unsigned long) page->lru.next;
++ magic = (unsigned long) page->freelist;
+
+ BUG_ON(magic == NODE_INFO);
+
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index b2c823ffad74..348700b424ea 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -688,7 +688,7 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
+ }
+
+ /* No need to do anything if the driver does all */
+- if (ieee80211_hw_check(&local->hw, AP_LINK_PS))
++ if (ieee80211_hw_check(&local->hw, AP_LINK_PS) && !local->ops->set_tim)
+ return;
+
+ if (sta->dead)
+diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
+index f8dbacf66795..0d6c72d6b9ba 100644
+--- a/net/netfilter/nf_conntrack_expect.c
++++ b/net/netfilter/nf_conntrack_expect.c
+@@ -411,7 +411,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
+ struct net *net = nf_ct_exp_net(expect);
+ struct hlist_node *next;
+ unsigned int h;
+- int ret = 1;
++ int ret = 0;
+
+ if (!master_help) {
+ ret = -ESHUTDOWN;
+@@ -461,7 +461,7 @@ int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
+
+ spin_lock_bh(&nf_conntrack_expect_lock);
+ ret = __nf_ct_expect_check(expect);
+- if (ret <= 0)
++ if (ret < 0)
+ goto out;
+
+ ret = nf_ct_expect_insert(expect);
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 775c67818bf1..bd650222e711 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -3685,6 +3685,7 @@ HDA_CODEC_ENTRY(0x80862808, "Broadwell HDMI", patch_i915_hsw_hdmi),
+ HDA_CODEC_ENTRY(0x80862809, "Skylake HDMI", patch_i915_hsw_hdmi),
+ HDA_CODEC_ENTRY(0x8086280a, "Broxton HDMI", patch_i915_hsw_hdmi),
+ HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI", patch_i915_hsw_hdmi),
++HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_hsw_hdmi),
+ HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
+ HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi),
+ HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi),
+diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
+index 05cf809cf9e1..d7013bde6f45 100644
+--- a/sound/soc/mediatek/Kconfig
++++ b/sound/soc/mediatek/Kconfig
+@@ -13,7 +13,7 @@ config SND_SOC_MT2701
+
+ config SND_SOC_MT2701_CS42448
+ tristate "ASoc Audio driver for MT2701 with CS42448 codec"
+- depends on SND_SOC_MT2701
++ depends on SND_SOC_MT2701 && I2C
+ select SND_SOC_CS42XX8_I2C
+ select SND_SOC_BT_SCO
+ help
next reply other threads:[~2017-10-21 20:15 UTC|newest]
Thread overview: 393+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-10-21 20:15 Mike Pagano [this message]
-- strict thread matches above, loose matches on Subject: below --
2023-01-07 11:37 [gentoo-commits] proj/linux-patches:4.9 commit in: / Mike Pagano
2022-12-14 12:24 Mike Pagano
2022-12-08 13:09 Alice Ferrazzi
2022-11-25 17:02 Mike Pagano
2022-11-10 15:14 Mike Pagano
2022-11-03 15:09 Mike Pagano
2022-10-26 11:43 Mike Pagano
2022-09-28 9:19 Mike Pagano
2022-09-20 12:04 Mike Pagano
2022-09-15 11:10 Mike Pagano
2022-09-05 12:08 Mike Pagano
2022-08-25 10:37 Mike Pagano
2022-07-29 15:25 Mike Pagano
2022-07-21 20:14 Mike Pagano
2022-07-12 16:03 Mike Pagano
2022-07-07 16:20 Mike Pagano
2022-07-02 16:04 Mike Pagano
2022-06-25 10:24 Mike Pagano
2022-06-16 11:42 Mike Pagano
2022-06-14 15:49 Mike Pagano
2022-06-06 11:07 Mike Pagano
2022-05-27 12:41 Mike Pagano
2022-05-25 11:57 Mike Pagano
2022-05-18 9:52 Mike Pagano
2022-05-15 22:14 Mike Pagano
2022-05-12 11:32 Mike Pagano
2022-04-27 11:38 Mike Pagano
2022-04-20 12:12 Mike Pagano
2022-03-28 11:01 Mike Pagano
2022-03-23 11:59 Mike Pagano
2022-03-16 13:22 Mike Pagano
2022-03-11 10:57 Mike Pagano
2022-03-08 18:28 Mike Pagano
2022-03-02 13:09 Mike Pagano
2022-02-26 23:38 Mike Pagano
2022-02-23 12:40 Mike Pagano
2022-02-16 12:49 Mike Pagano
2022-02-11 12:38 Mike Pagano
2022-02-08 18:03 Mike Pagano
2022-01-29 17:46 Mike Pagano
2022-01-27 11:41 Mike Pagano
2022-01-11 12:59 Mike Pagano
2022-01-05 12:57 Mike Pagano
2021-12-29 13:13 Mike Pagano
2021-12-22 14:08 Mike Pagano
2021-12-14 10:37 Mike Pagano
2021-12-08 12:57 Mike Pagano
2021-11-26 12:01 Mike Pagano
2021-11-12 13:38 Mike Pagano
2021-11-02 17:06 Mike Pagano
2021-10-27 12:00 Mike Pagano
2021-10-17 13:14 Mike Pagano
2021-10-09 21:35 Mike Pagano
2021-10-06 11:32 Mike Pagano
2021-09-26 14:15 Mike Pagano
2021-09-22 11:42 Mike Pagano
2021-09-20 22:06 Mike Pagano
2021-09-03 11:24 Mike Pagano
2021-08-26 14:03 Mike Pagano
2021-08-25 23:14 Mike Pagano
2021-08-25 23:13 Mike Pagano
2021-08-15 20:10 Mike Pagano
2021-08-08 13:41 Mike Pagano
2021-08-04 11:55 Mike Pagano
2021-08-03 12:49 Mike Pagano
2021-07-28 12:39 Mike Pagano
2021-07-20 15:29 Alice Ferrazzi
2021-07-11 14:47 Mike Pagano
2021-06-30 14:28 Mike Pagano
2021-06-17 14:23 Alice Ferrazzi
2021-06-17 11:08 Alice Ferrazzi
2021-06-10 11:10 Mike Pagano
2021-06-03 10:41 Alice Ferrazzi
2021-05-26 12:03 Mike Pagano
2021-05-22 10:01 Mike Pagano
2021-04-28 11:03 Alice Ferrazzi
2021-04-16 11:19 Alice Ferrazzi
2021-04-10 13:22 Mike Pagano
2021-04-07 12:14 Mike Pagano
2021-03-30 14:14 Mike Pagano
2021-03-24 12:07 Mike Pagano
2021-03-17 15:58 Mike Pagano
2021-03-11 14:04 Mike Pagano
2021-03-07 15:13 Mike Pagano
2021-03-03 17:24 Alice Ferrazzi
2021-02-23 13:38 Alice Ferrazzi
2021-02-10 10:15 Alice Ferrazzi
2021-02-05 14:53 Alice Ferrazzi
2021-02-03 23:25 Mike Pagano
2021-01-30 13:18 Alice Ferrazzi
2021-01-23 16:34 Mike Pagano
2021-01-17 16:22 Mike Pagano
2021-01-12 20:08 Mike Pagano
2021-01-09 12:54 Mike Pagano
2020-12-29 14:18 Mike Pagano
2020-12-11 12:54 Mike Pagano
2020-12-02 12:48 Mike Pagano
2020-11-24 13:39 Mike Pagano
2020-11-22 19:12 Mike Pagano
2020-11-18 19:23 Mike Pagano
2020-11-11 15:32 Mike Pagano
2020-11-10 13:54 Mike Pagano
2020-10-29 11:17 Mike Pagano
2020-10-17 10:14 Mike Pagano
2020-10-14 20:34 Mike Pagano
2020-10-01 19:03 Mike Pagano
2020-10-01 18:59 Mike Pagano
2020-09-24 16:02 Mike Pagano
2020-09-23 11:59 Mike Pagano
2020-09-23 11:57 Mike Pagano
2020-09-12 17:31 Mike Pagano
2020-09-03 11:34 Mike Pagano
2020-08-26 11:13 Mike Pagano
2020-08-21 11:23 Alice Ferrazzi
2020-08-21 11:02 Alice Ferrazzi
2020-07-31 16:13 Mike Pagano
2020-07-22 12:30 Mike Pagano
2020-07-09 12:07 Mike Pagano
2020-07-01 12:10 Mike Pagano
2020-06-22 14:44 Mike Pagano
2020-06-11 11:28 Mike Pagano
2020-06-03 11:37 Mike Pagano
2020-05-27 15:26 Mike Pagano
2020-05-20 11:24 Mike Pagano
2020-05-13 12:50 Mike Pagano
2020-05-11 22:52 Mike Pagano
2020-05-05 17:39 Mike Pagano
2020-05-02 19:22 Mike Pagano
2020-04-24 12:01 Mike Pagano
2020-04-15 17:55 Mike Pagano
2020-04-13 11:15 Mike Pagano
2020-04-02 18:55 Mike Pagano
2020-03-20 11:54 Mike Pagano
2020-03-11 10:15 Mike Pagano
2020-02-28 15:29 Mike Pagano
2020-02-14 23:36 Mike Pagano
2020-02-05 14:48 Mike Pagano
2020-01-29 12:36 Mike Pagano
2020-01-23 11:02 Mike Pagano
2020-01-14 22:26 Mike Pagano
2020-01-12 14:52 Mike Pagano
2020-01-04 16:48 Mike Pagano
2019-12-21 14:54 Mike Pagano
2019-12-05 15:17 Alice Ferrazzi
2019-11-29 21:39 Thomas Deutschmann
2019-11-28 23:51 Mike Pagano
2019-11-25 12:08 Mike Pagano
2019-11-16 10:54 Mike Pagano
2019-11-12 20:58 Mike Pagano
2019-11-10 16:15 Mike Pagano
2019-11-06 14:24 Mike Pagano
2019-10-29 11:16 Mike Pagano
2019-10-17 22:21 Mike Pagano
2019-10-07 17:37 Mike Pagano
2019-10-05 11:39 Mike Pagano
2019-09-21 15:57 Mike Pagano
2019-09-19 23:16 Mike Pagano
2019-09-16 12:22 Mike Pagano
2019-09-10 11:10 Mike Pagano
2019-09-06 17:18 Mike Pagano
2019-08-25 17:34 Mike Pagano
2019-08-11 10:59 Mike Pagano
2019-08-06 19:16 Mike Pagano
2019-08-04 16:05 Mike Pagano
2019-07-21 14:38 Mike Pagano
2019-07-10 11:03 Mike Pagano
2019-06-27 11:10 Mike Pagano
2019-06-22 19:04 Mike Pagano
2019-06-17 19:19 Mike Pagano
2019-06-11 17:40 Mike Pagano
2019-06-11 12:39 Mike Pagano
2019-05-31 16:42 Mike Pagano
2019-05-26 17:12 Mike Pagano
2019-05-21 17:14 Mike Pagano
2019-05-16 22:59 Mike Pagano
2019-05-14 20:08 Mike Pagano
2019-05-10 19:38 Mike Pagano
2019-05-08 10:03 Mike Pagano
2019-05-04 18:26 Mike Pagano
2019-05-02 10:16 Mike Pagano
2019-04-27 17:29 Mike Pagano
2019-04-20 11:06 Mike Pagano
2019-04-19 19:54 Mike Pagano
2019-04-05 21:42 Mike Pagano
2019-04-03 10:48 Mike Pagano
2019-03-27 10:20 Mike Pagano
2019-03-23 14:57 Mike Pagano
2019-03-23 14:18 Mike Pagano
2019-03-19 16:56 Mike Pagano
2019-03-13 22:05 Mike Pagano
2019-03-06 19:12 Mike Pagano
2019-03-05 17:59 Mike Pagano
2019-02-27 11:20 Mike Pagano
2019-02-23 14:42 Mike Pagano
2019-02-20 11:16 Mike Pagano
2019-02-15 12:46 Mike Pagano
2019-02-12 20:51 Mike Pagano
2019-02-06 20:14 Mike Pagano
2019-01-31 11:22 Mike Pagano
2019-01-26 15:03 Mike Pagano
2019-01-23 11:29 Mike Pagano
2019-01-16 23:29 Mike Pagano
2019-01-13 19:26 Mike Pagano
2019-01-09 18:09 Mike Pagano
2019-01-09 17:52 Mike Pagano
2018-12-29 22:53 Mike Pagano
2018-12-29 18:51 Mike Pagano
2018-12-21 14:44 Mike Pagano
2018-12-17 11:39 Mike Pagano
2018-12-13 11:36 Mike Pagano
2018-12-08 13:25 Mike Pagano
2018-12-05 19:44 Mike Pagano
2018-12-01 18:00 Mike Pagano
2018-12-01 15:04 Mike Pagano
2018-11-27 16:22 Mike Pagano
2018-11-23 12:48 Mike Pagano
2018-11-23 12:45 Mike Pagano
2018-11-21 12:20 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-14 14:37 Mike Pagano
2018-11-13 21:20 Mike Pagano
2018-11-11 1:44 Mike Pagano
2018-11-11 1:31 Mike Pagano
2018-11-10 21:30 Mike Pagano
2018-10-20 12:43 Mike Pagano
2018-10-18 10:25 Mike Pagano
2018-10-13 16:34 Mike Pagano
2018-10-10 11:19 Mike Pagano
2018-10-04 10:40 Mike Pagano
2018-09-29 13:33 Mike Pagano
2018-09-26 10:42 Mike Pagano
2018-09-19 22:38 Mike Pagano
2018-09-15 10:10 Mike Pagano
2018-09-09 23:27 Mike Pagano
2018-09-05 15:27 Mike Pagano
2018-08-24 11:43 Mike Pagano
2018-08-22 10:06 Alice Ferrazzi
2018-08-18 18:07 Mike Pagano
2018-08-17 19:32 Mike Pagano
2018-08-17 19:25 Mike Pagano
2018-08-16 11:51 Mike Pagano
2018-08-15 16:46 Mike Pagano
2018-08-09 10:52 Mike Pagano
2018-08-07 18:12 Mike Pagano
2018-08-03 12:25 Mike Pagano
2018-07-28 10:38 Mike Pagano
2018-07-25 10:26 Mike Pagano
2018-07-22 15:14 Mike Pagano
2018-07-17 10:25 Mike Pagano
2018-07-12 15:42 Alice Ferrazzi
2018-07-03 13:16 Mike Pagano
2018-06-26 16:34 Alice Ferrazzi
2018-06-16 15:42 Mike Pagano
2018-06-13 15:03 Mike Pagano
2018-06-06 18:04 Mike Pagano
2018-06-05 11:21 Mike Pagano
2018-05-30 22:34 Mike Pagano
2018-05-30 11:39 Mike Pagano
2018-05-25 14:54 Mike Pagano
2018-05-22 17:28 Mike Pagano
2018-05-20 22:20 Mike Pagano
2018-05-16 10:23 Mike Pagano
2018-05-09 10:54 Mike Pagano
2018-05-02 16:13 Mike Pagano
2018-04-30 10:29 Mike Pagano
2018-04-24 11:30 Mike Pagano
2018-04-20 11:12 Mike Pagano
2018-04-13 22:21 Mike Pagano
2018-04-08 14:26 Mike Pagano
2018-03-31 22:17 Mike Pagano
2018-03-28 17:42 Mike Pagano
2018-03-25 14:31 Mike Pagano
2018-03-25 13:39 Mike Pagano
2018-03-22 12:58 Mike Pagano
2018-03-18 22:15 Mike Pagano
2018-03-11 18:26 Mike Pagano
2018-03-05 2:38 Alice Ferrazzi
2018-02-28 18:46 Alice Ferrazzi
2018-02-28 15:02 Alice Ferrazzi
2018-02-25 15:47 Mike Pagano
2018-02-22 23:22 Mike Pagano
2018-02-17 15:02 Alice Ferrazzi
2018-02-13 13:25 Alice Ferrazzi
2018-02-03 21:22 Mike Pagano
2018-01-31 13:31 Alice Ferrazzi
2018-01-23 21:17 Mike Pagano
2018-01-17 10:18 Alice Ferrazzi
2018-01-17 10:18 Alice Ferrazzi
2018-01-17 9:16 Alice Ferrazzi
2018-01-15 14:57 Alice Ferrazzi
2018-01-10 12:21 Alice Ferrazzi
2018-01-10 11:47 Mike Pagano
2018-01-05 15:54 Alice Ferrazzi
2018-01-05 15:04 Alice Ferrazzi
2018-01-02 20:13 Mike Pagano
2017-12-29 17:20 Alice Ferrazzi
2017-12-25 14:36 Alice Ferrazzi
2017-12-20 12:44 Mike Pagano
2017-12-16 17:42 Alice Ferrazzi
2017-12-14 8:58 Alice Ferrazzi
2017-12-09 23:29 Mike Pagano
2017-12-05 11:38 Mike Pagano
2017-11-30 12:19 Alice Ferrazzi
2017-11-24 9:44 Alice Ferrazzi
2017-11-21 9:18 Alice Ferrazzi
2017-11-18 18:24 Mike Pagano
2017-11-15 15:44 Mike Pagano
2017-11-08 13:49 Mike Pagano
2017-11-02 10:03 Mike Pagano
2017-10-27 10:29 Mike Pagano
2017-10-18 13:46 Mike Pagano
2017-10-12 22:26 Mike Pagano
2017-10-12 12:37 Mike Pagano
2017-10-08 14:23 Mike Pagano
2017-10-08 14:21 Mike Pagano
2017-10-08 14:13 Mike Pagano
2017-10-05 11:38 Mike Pagano
2017-09-27 16:38 Mike Pagano
2017-09-20 10:11 Mike Pagano
2017-09-14 11:39 Mike Pagano
2017-09-13 22:28 Mike Pagano
2017-09-13 16:25 Mike Pagano
2017-09-10 14:38 Mike Pagano
2017-09-07 22:43 Mike Pagano
2017-09-02 17:45 Mike Pagano
2017-08-30 10:06 Mike Pagano
2017-08-25 10:59 Mike Pagano
2017-08-16 22:29 Mike Pagano
2017-08-13 16:51 Mike Pagano
2017-08-11 17:41 Mike Pagano
2017-08-07 10:26 Mike Pagano
2017-05-14 13:31 Mike Pagano
2017-05-08 10:43 Mike Pagano
2017-05-03 17:45 Mike Pagano
2017-04-27 9:05 Alice Ferrazzi
2017-04-22 17:01 Mike Pagano
2017-04-18 10:23 Mike Pagano
2017-04-12 18:01 Mike Pagano
2017-04-08 13:53 Mike Pagano
2017-03-31 10:44 Mike Pagano
2017-03-30 18:15 Mike Pagano
2017-03-26 11:54 Mike Pagano
2017-03-23 18:38 Mike Pagano
2017-03-22 12:42 Mike Pagano
2017-03-18 14:34 Mike Pagano
2017-03-15 19:21 Mike Pagano
2017-03-12 12:22 Mike Pagano
2017-03-02 16:23 Mike Pagano
2017-02-26 20:38 Mike Pagano
2017-02-26 20:36 Mike Pagano
2017-02-23 20:34 Mike Pagano
2017-02-23 20:11 Mike Pagano
2017-02-18 20:37 Mike Pagano
2017-02-18 16:13 Alice Ferrazzi
2017-02-15 16:02 Alice Ferrazzi
2017-02-14 23:08 Mike Pagano
2017-02-09 11:11 Alice Ferrazzi
2017-02-04 11:34 Alice Ferrazzi
2017-02-01 13:07 Alice Ferrazzi
2017-01-29 23:08 Alice Ferrazzi
2017-01-26 8:51 Alice Ferrazzi
2017-01-20 11:33 Alice Ferrazzi
2017-01-15 22:59 Mike Pagano
2017-01-12 22:53 Mike Pagano
2017-01-09 12:41 Mike Pagano
2017-01-07 0:55 Mike Pagano
2017-01-06 23:09 Mike Pagano
2016-12-31 19:39 Mike Pagano
2016-12-11 23:20 Mike Pagano
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1508616901.8f54decd9aeae66b06a1e63ab9fd02605b94f345.mpagano@gentoo \
--to=mpagano@gentoo.org \
--cc=gentoo-commits@lists.gentoo.org \
--cc=gentoo-dev@lists.gentoo.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox