public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Sat, 21 Dec 2019 15:03:17 +0000 (UTC)	[thread overview]
Message-ID: <1576940582.96ffa6fb8e2e9fb0a95fec61d517ba8eb1ba9d2a.mpagano@gentoo> (raw)

commit:     96ffa6fb8e2e9fb0a95fec61d517ba8eb1ba9d2a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Dec 21 15:03:02 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Dec 21 15:03:02 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=96ffa6fb

Linux patch 4.19.91

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1090_linux-4.19.91.patch | 1527 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1531 insertions(+)

diff --git a/0000_README b/0000_README
index aecec9f..304b54d 100644
--- a/0000_README
+++ b/0000_README
@@ -399,6 +399,10 @@ Patch:  1089_linux-4.19.90.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.19.90
 
+Patch:  1090_linux-4.19.91.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.19.91
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1090_linux-4.19.91.patch b/1090_linux-4.19.91.patch
new file mode 100644
index 0000000..4880cbe
--- /dev/null
+++ b/1090_linux-4.19.91.patch
@@ -0,0 +1,1527 @@
+diff --git a/Makefile b/Makefile
+index 39bdbafae517..4e07d786bec2 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 90
++SUBLEVEL = 91
+ EXTRAVERSION =
+ NAME = "People's Front"
+ 
+diff --git a/arch/arm/boot/dts/s3c6410-mini6410.dts b/arch/arm/boot/dts/s3c6410-mini6410.dts
+index 0e159c884f97..1aeac33b0d34 100644
+--- a/arch/arm/boot/dts/s3c6410-mini6410.dts
++++ b/arch/arm/boot/dts/s3c6410-mini6410.dts
+@@ -165,6 +165,10 @@
+ 	};
+ };
+ 
++&clocks {
++	clocks = <&fin_pll>;
++};
++
+ &sdhci0 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>;
+diff --git a/arch/arm/boot/dts/s3c6410-smdk6410.dts b/arch/arm/boot/dts/s3c6410-smdk6410.dts
+index a9a5689dc462..3bf6c450a26e 100644
+--- a/arch/arm/boot/dts/s3c6410-smdk6410.dts
++++ b/arch/arm/boot/dts/s3c6410-smdk6410.dts
+@@ -69,6 +69,10 @@
+ 	};
+ };
+ 
++&clocks {
++	clocks = <&fin_pll>;
++};
++
+ &sdhci0 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>;
+diff --git a/arch/arm/mach-tegra/reset-handler.S b/arch/arm/mach-tegra/reset-handler.S
+index 805f306fa6f7..e31f167a8199 100644
+--- a/arch/arm/mach-tegra/reset-handler.S
++++ b/arch/arm/mach-tegra/reset-handler.S
+@@ -56,16 +56,16 @@ ENTRY(tegra_resume)
+ 	cmp	r6, #TEGRA20
+ 	beq	1f				@ Yes
+ 	/* Clear the flow controller flags for this CPU. */
+-	cpu_to_csr_reg r1, r0
++	cpu_to_csr_reg r3, r0
+ 	mov32	r2, TEGRA_FLOW_CTRL_BASE
+-	ldr	r1, [r2, r1]
++	ldr	r1, [r2, r3]
+ 	/* Clear event & intr flag */
+ 	orr	r1, r1, \
+ 		#FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG
+ 	movw	r0, #0x3FFD	@ enable, cluster_switch, immed, bitmaps
+ 				@ & ext flags for CPU power mgnt
+ 	bic	r1, r1, r0
+-	str	r1, [r2]
++	str	r1, [r2, r3]
+ 1:
+ 
+ 	mov32	r9, 0xc09
+diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
+index 0c100506a29a..5a97ac853168 100644
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -683,9 +683,11 @@ USER(\label, ic	ivau, \tmp2)			// invalidate I line PoU
+ 	.macro		if_will_cond_yield_neon
+ #ifdef CONFIG_PREEMPT
+ 	get_thread_info	x0
+-	ldr		x0, [x0, #TSK_TI_PREEMPT]
+-	sub		x0, x0, #PREEMPT_DISABLE_OFFSET
+-	cbz		x0, .Lyield_\@
++	ldr		w1, [x0, #TSK_TI_PREEMPT]
++	ldr		x0, [x0, #TSK_TI_FLAGS]
++	cmp		w1, #PREEMPT_DISABLE_OFFSET
++	csel		x0, x0, xzr, eq
++	tbnz		x0, #TIF_NEED_RESCHED, .Lyield_\@	// needs rescheduling?
+ 	/* fall through to endif_yield_neon */
+ 	.subsection	1
+ .Lyield_\@ :
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index bb6832353045..5f800384cb9a 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -622,8 +622,10 @@ el1_irq:
+ 	irq_handler
+ 
+ #ifdef CONFIG_PREEMPT
+-	ldr	x24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
+-	cbnz	x24, 1f				// preempt count != 0
++	ldr	w24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
++	cbnz	w24, 1f				// preempt count != 0
++	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get flags
++	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
+ 	bl	el1_preempt
+ 1:
+ #endif
+diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
+index 59153d0aa890..b43f03620843 100644
+--- a/arch/xtensa/mm/tlb.c
++++ b/arch/xtensa/mm/tlb.c
+@@ -216,6 +216,8 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
+ 	unsigned tlbidx = w | (e << PAGE_SHIFT);
+ 	unsigned r0 = dtlb ?
+ 		read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
++	unsigned r1 = dtlb ?
++		read_dtlb_translation(tlbidx) : read_itlb_translation(tlbidx);
+ 	unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
+ 	unsigned pte = get_pte_for_vaddr(vpn);
+ 	unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
+@@ -231,8 +233,6 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
+ 	}
+ 
+ 	if (tlb_asid == mm_asid) {
+-		unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
+-			read_itlb_translation(tlbidx);
+ 		if ((pte ^ r1) & PAGE_MASK) {
+ 			pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
+ 					dtlb ? 'D' : 'I', w, e, r0, r1, pte);
+diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
+index 35dd06479867..91d620994123 100644
+--- a/drivers/dma-buf/sync_file.c
++++ b/drivers/dma-buf/sync_file.c
+@@ -230,7 +230,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
+ 	a_fences = get_fences(a, &a_num_fences);
+ 	b_fences = get_fences(b, &b_num_fences);
+ 	if (a_num_fences > INT_MAX - b_num_fences)
+-		return NULL;
++		goto err;
+ 
+ 	num_fences = a_num_fences + b_num_fences;
+ 
+diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
+index f7945bae3b4a..e1760140e3c2 100644
+--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
++++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
+@@ -75,6 +75,25 @@ struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = {
+ 	},
+ };
+ 
++static const struct meson_cvbs_mode *
++meson_cvbs_get_mode(const struct drm_display_mode *req_mode)
++{
++	int i;
++
++	for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
++		struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
++
++		if (drm_mode_match(req_mode, &meson_mode->mode,
++				   DRM_MODE_MATCH_TIMINGS |
++				   DRM_MODE_MATCH_CLOCK |
++				   DRM_MODE_MATCH_FLAGS |
++				   DRM_MODE_MATCH_3D_FLAGS))
++			return meson_mode;
++	}
++
++	return NULL;
++}
++
+ /* Connector */
+ 
+ static void meson_cvbs_connector_destroy(struct drm_connector *connector)
+@@ -147,14 +166,8 @@ static int meson_venc_cvbs_encoder_atomic_check(struct drm_encoder *encoder,
+ 					struct drm_crtc_state *crtc_state,
+ 					struct drm_connector_state *conn_state)
+ {
+-	int i;
+-
+-	for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
+-		struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
+-
+-		if (drm_mode_equal(&crtc_state->mode, &meson_mode->mode))
+-			return 0;
+-	}
++	if (meson_cvbs_get_mode(&crtc_state->mode))
++		return 0;
+ 
+ 	return -EINVAL;
+ }
+@@ -192,24 +205,17 @@ static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder,
+ 				   struct drm_display_mode *mode,
+ 				   struct drm_display_mode *adjusted_mode)
+ {
++	const struct meson_cvbs_mode *meson_mode = meson_cvbs_get_mode(mode);
+ 	struct meson_venc_cvbs *meson_venc_cvbs =
+ 					encoder_to_meson_venc_cvbs(encoder);
+ 	struct meson_drm *priv = meson_venc_cvbs->priv;
+-	int i;
+ 
+-	for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
+-		struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
++	if (meson_mode) {
++		meson_venci_cvbs_mode_set(priv, meson_mode->enci);
+ 
+-		if (drm_mode_equal(mode, &meson_mode->mode)) {
+-			meson_venci_cvbs_mode_set(priv,
+-						  meson_mode->enci);
+-
+-			/* Setup 27MHz vclk2 for ENCI and VDAC */
+-			meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS,
+-					 MESON_VCLK_CVBS, MESON_VCLK_CVBS,
+-					 MESON_VCLK_CVBS, true);
+-			break;
+-		}
++		/* Setup 27MHz vclk2 for ENCI and VDAC */
++		meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, MESON_VCLK_CVBS,
++				 MESON_VCLK_CVBS, MESON_VCLK_CVBS, true);
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
+index 7d39ed63e5be..b24401f21e93 100644
+--- a/drivers/gpu/drm/radeon/r100.c
++++ b/drivers/gpu/drm/radeon/r100.c
+@@ -1820,8 +1820,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
+ 			track->textures[i].use_pitch = 1;
+ 		} else {
+ 			track->textures[i].use_pitch = 0;
+-			track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
+-			track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
++			track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT);
++			track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT);
+ 		}
+ 		if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
+ 			track->textures[i].tex_coord_type = 2;
+diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
+index c22321cc5a41..c2b506c707a2 100644
+--- a/drivers/gpu/drm/radeon/r200.c
++++ b/drivers/gpu/drm/radeon/r200.c
+@@ -476,8 +476,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
+ 			track->textures[i].use_pitch = 1;
+ 		} else {
+ 			track->textures[i].use_pitch = 0;
+-			track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
+-			track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
++			track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT);
++			track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT);
+ 		}
+ 		if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE)
+ 			track->textures[i].lookup_disable = true;
+diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
+index 481e54ded9dc..def4f6ec290b 100644
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -609,45 +609,10 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
+ 	return pgpath;
+ }
+ 
+-static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio)
+-{
+-	struct pgpath *pgpath;
+-	unsigned long flags;
+-
+-	/* Do we need to select a new pgpath? */
+-	/*
+-	 * FIXME: currently only switching path if no path (due to failure, etc)
+-	 * - which negates the point of using a path selector
+-	 */
+-	pgpath = READ_ONCE(m->current_pgpath);
+-	if (!pgpath)
+-		pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
+-
+-	if (!pgpath) {
+-		if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
+-			/* Queue for the daemon to resubmit */
+-			spin_lock_irqsave(&m->lock, flags);
+-			bio_list_add(&m->queued_bios, bio);
+-			spin_unlock_irqrestore(&m->lock, flags);
+-			queue_work(kmultipathd, &m->process_queued_bios);
+-
+-			return ERR_PTR(-EAGAIN);
+-		}
+-		return NULL;
+-	}
+-
+-	return pgpath;
+-}
+-
+ static int __multipath_map_bio(struct multipath *m, struct bio *bio,
+ 			       struct dm_mpath_io *mpio)
+ {
+-	struct pgpath *pgpath;
+-
+-	if (!m->hw_handler_name)
+-		pgpath = __map_bio_fast(m, bio);
+-	else
+-		pgpath = __map_bio(m, bio);
++	struct pgpath *pgpath = __map_bio(m, bio);
+ 
+ 	if (IS_ERR(pgpath))
+ 		return DM_MAPIO_SUBMITTED;
+diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
+index 21ea537bd55e..eff04fa23dfa 100644
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -203,7 +203,13 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
+ 	struct btree_node *right = r->n;
+ 	uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ 	uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
+-	unsigned threshold = 2 * merge_threshold(left) + 1;
++	/*
++	 * Ensure the number of entries in each child will be greater
++	 * than or equal to (max_entries / 3 + 1), so no matter which
++	 * child is used for removal, the number will still be not
++	 * less than (max_entries / 3).
++	 */
++	unsigned int threshold = 2 * (merge_threshold(left) + 1);
+ 
+ 	if (nr_left + nr_right < threshold) {
+ 		/*
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index 527ab15c421f..60eac66dc9f0 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -409,38 +409,6 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
+ 	return 0;
+ }
+ 
+-static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
+-				       u32 retries_max)
+-{
+-	int err;
+-	u32 retry_count = 0;
+-
+-	if (!status || !retries_max)
+-		return -EINVAL;
+-
+-	do {
+-		err = __mmc_send_status(card, status, 5);
+-		if (err)
+-			break;
+-
+-		if (!R1_STATUS(*status) &&
+-				(R1_CURRENT_STATE(*status) != R1_STATE_PRG))
+-			break; /* RPMB programming operation complete */
+-
+-		/*
+-		 * Rechedule to give the MMC device a chance to continue
+-		 * processing the previous command without being polled too
+-		 * frequently.
+-		 */
+-		usleep_range(1000, 5000);
+-	} while (++retry_count < retries_max);
+-
+-	if (retry_count == retries_max)
+-		err = -EPERM;
+-
+-	return err;
+-}
+-
+ static int ioctl_do_sanitize(struct mmc_card *card)
+ {
+ 	int err;
+@@ -469,6 +437,58 @@ out:
+ 	return err;
+ }
+ 
++static inline bool mmc_blk_in_tran_state(u32 status)
++{
++	/*
++	 * Some cards mishandle the status bits, so make sure to check both the
++	 * busy indication and the card state.
++	 */
++	return status & R1_READY_FOR_DATA &&
++	       (R1_CURRENT_STATE(status) == R1_STATE_TRAN);
++}
++
++static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
++			    u32 *resp_errs)
++{
++	unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
++	int err = 0;
++	u32 status;
++
++	do {
++		bool done = time_after(jiffies, timeout);
++
++		err = __mmc_send_status(card, &status, 5);
++		if (err) {
++			dev_err(mmc_dev(card->host),
++				"error %d requesting status\n", err);
++			return err;
++		}
++
++		/* Accumulate any response error bits seen */
++		if (resp_errs)
++			*resp_errs |= status;
++
++		/*
++		 * Timeout if the device never becomes ready for data and never
++		 * leaves the program state.
++		 */
++		if (done) {
++			dev_err(mmc_dev(card->host),
++				"Card stuck in wrong state! %s status: %#x\n",
++				 __func__, status);
++			return -ETIMEDOUT;
++		}
++
++		/*
++		 * Some cards mishandle the status bits,
++		 * so make sure to check both the busy
++		 * indication and the card state.
++		 */
++	} while (!mmc_blk_in_tran_state(status));
++
++	return err;
++}
++
+ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ 			       struct mmc_blk_ioc_data *idata)
+ {
+@@ -478,7 +498,6 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ 	struct scatterlist sg;
+ 	int err;
+ 	unsigned int target_part;
+-	u32 status = 0;
+ 
+ 	if (!card || !md || !idata)
+ 		return -EINVAL;
+@@ -612,16 +631,12 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+ 
+ 	memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
+ 
+-	if (idata->rpmb) {
++	if (idata->rpmb || (cmd.flags & MMC_RSP_R1B)) {
+ 		/*
+-		 * Ensure RPMB command has completed by polling CMD13
++		 * Ensure RPMB/R1B command has completed by polling CMD13
+ 		 * "Send Status".
+ 		 */
+-		err = ioctl_rpmb_card_status_poll(card, &status, 5);
+-		if (err)
+-			dev_err(mmc_dev(card->host),
+-					"%s: Card Status=0x%08X, error %d\n",
+-					__func__, status, err);
++		err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, NULL);
+ 	}
+ 
+ 	return err;
+@@ -971,58 +986,6 @@ static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host,
+ 	return ms;
+ }
+ 
+-static inline bool mmc_blk_in_tran_state(u32 status)
+-{
+-	/*
+-	 * Some cards mishandle the status bits, so make sure to check both the
+-	 * busy indication and the card state.
+-	 */
+-	return status & R1_READY_FOR_DATA &&
+-	       (R1_CURRENT_STATE(status) == R1_STATE_TRAN);
+-}
+-
+-static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
+-			    struct request *req, u32 *resp_errs)
+-{
+-	unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+-	int err = 0;
+-	u32 status;
+-
+-	do {
+-		bool done = time_after(jiffies, timeout);
+-
+-		err = __mmc_send_status(card, &status, 5);
+-		if (err) {
+-			pr_err("%s: error %d requesting status\n",
+-			       req->rq_disk->disk_name, err);
+-			return err;
+-		}
+-
+-		/* Accumulate any response error bits seen */
+-		if (resp_errs)
+-			*resp_errs |= status;
+-
+-		/*
+-		 * Timeout if the device never becomes ready for data and never
+-		 * leaves the program state.
+-		 */
+-		if (done) {
+-			pr_err("%s: Card stuck in wrong state! %s %s status: %#x\n",
+-				mmc_hostname(card->host),
+-				req->rq_disk->disk_name, __func__, status);
+-			return -ETIMEDOUT;
+-		}
+-
+-		/*
+-		 * Some cards mishandle the status bits,
+-		 * so make sure to check both the busy
+-		 * indication and the card state.
+-		 */
+-	} while (!mmc_blk_in_tran_state(status));
+-
+-	return err;
+-}
+-
+ static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
+ 			 int type)
+ {
+@@ -1678,7 +1641,7 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
+ 
+ 	mmc_blk_send_stop(card, timeout);
+ 
+-	err = card_busy_detect(card, timeout, req, NULL);
++	err = card_busy_detect(card, timeout, NULL);
+ 
+ 	mmc_retune_release(card->host);
+ 
+@@ -1902,7 +1865,7 @@ static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
+ 	if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
+ 		return 0;
+ 
+-	err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, req, &status);
++	err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, &status);
+ 
+ 	/*
+ 	 * Do not assume data transferred correctly if there are any error bits
+diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+index e337da6ba2a4..8ae28f82aafd 100644
+--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+@@ -1118,7 +1118,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
+ 				       phy_interface_mode(lmac->lmac_type)))
+ 			return -ENODEV;
+ 
+-		phy_start_aneg(lmac->phydev);
++		phy_start(lmac->phydev);
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+index 4ab0d030b544..28d56e44ed9d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+@@ -155,8 +155,11 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
+ 		}
+ 
+ 		if (port_buffer->buffer[i].size <
+-		    (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
++		    (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) {
++			pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n",
++			       i, port_buffer->buffer[i].size);
+ 			return -ENOMEM;
++		}
+ 
+ 		port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
+ 		port_buffer->buffer[i].xon  =
+@@ -232,6 +235,26 @@ static int update_buffer_lossy(unsigned int max_mtu,
+ 	return 0;
+ }
+ 
++static int fill_pfc_en(struct mlx5_core_dev *mdev, u8 *pfc_en)
++{
++	u32 g_rx_pause, g_tx_pause;
++	int err;
++
++	err = mlx5_query_port_pause(mdev, &g_rx_pause, &g_tx_pause);
++	if (err)
++		return err;
++
++	/* If global pause enabled, set all active buffers to lossless.
++	 * Otherwise, check PFC setting.
++	 */
++	if (g_rx_pause || g_tx_pause)
++		*pfc_en = 0xff;
++	else
++		err = mlx5_query_port_pfc(mdev, pfc_en, NULL);
++
++	return err;
++}
++
+ #define MINIMUM_MAX_MTU 9216
+ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
+ 				    u32 change, unsigned int mtu,
+@@ -277,7 +300,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
+ 
+ 	if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) {
+ 		update_prio2buffer = true;
+-		err = mlx5_query_port_pfc(priv->mdev, &curr_pfc_en, NULL);
++		err = fill_pfc_en(priv->mdev, &curr_pfc_en);
+ 		if (err)
+ 			return err;
+ 
+diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
+index 8417d4c17844..0b7a3eb06a65 100644
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -954,8 +954,8 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
+ {
+ 	struct cpsw_common *cpsw = dev_id;
+ 
+-	cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
+ 	writel(0, &cpsw->wr_regs->rx_en);
++	cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
+ 
+ 	if (cpsw->quirk_irq) {
+ 		disable_irq_nosync(cpsw->irqs_table[0]);
+diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
+index 811cf83f956d..ef6071807072 100644
+--- a/drivers/pci/hotplug/pciehp.h
++++ b/drivers/pci/hotplug/pciehp.h
+@@ -106,6 +106,7 @@ struct slot {
+  *	that has not yet been cleared by the user
+  * @pending_events: used by the IRQ handler to save events retrieved from the
+  *	Slot Status register for later consumption by the IRQ thread
++ * @ist_running: flag to keep user request waiting while IRQ thread is running
+  * @request_result: result of last user request submitted to the IRQ thread
+  * @requester: wait queue to wake up on completion of user request,
+  *	used for synchronous slot enable/disable request via sysfs
+@@ -125,6 +126,7 @@ struct controller {
+ 	unsigned int notification_enabled:1;
+ 	unsigned int power_fault_detected;
+ 	atomic_t pending_events;
++	unsigned int ist_running;
+ 	int request_result;
+ 	wait_queue_head_t requester;
+ };
+diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
+index 9c397fa8704c..c71964e29b01 100644
+--- a/drivers/pci/hotplug/pciehp_ctrl.c
++++ b/drivers/pci/hotplug/pciehp_ctrl.c
+@@ -383,7 +383,8 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot)
+ 		ctrl->request_result = -ENODEV;
+ 		pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC);
+ 		wait_event(ctrl->requester,
+-			   !atomic_read(&ctrl->pending_events));
++			   !atomic_read(&ctrl->pending_events) &&
++			   !ctrl->ist_running);
+ 		return ctrl->request_result;
+ 	case POWERON_STATE:
+ 		ctrl_info(ctrl, "Slot(%s): Already in powering on state\n",
+@@ -416,7 +417,8 @@ int pciehp_sysfs_disable_slot(struct slot *p_slot)
+ 		mutex_unlock(&p_slot->lock);
+ 		pciehp_request(ctrl, DISABLE_SLOT);
+ 		wait_event(ctrl->requester,
+-			   !atomic_read(&ctrl->pending_events));
++			   !atomic_read(&ctrl->pending_events) &&
++			   !ctrl->ist_running);
+ 		return ctrl->request_result;
+ 	case POWEROFF_STATE:
+ 		ctrl_info(ctrl, "Slot(%s): Already in powering off state\n",
+diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
+index a938abdb41ce..c3e3f5358b3b 100644
+--- a/drivers/pci/hotplug/pciehp_hpc.c
++++ b/drivers/pci/hotplug/pciehp_hpc.c
+@@ -620,6 +620,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
+ 	irqreturn_t ret;
+ 	u32 events;
+ 
++	ctrl->ist_running = true;
+ 	pci_config_pm_runtime_get(pdev);
+ 
+ 	/* rerun pciehp_isr() if the port was inaccessible on interrupt */
+@@ -666,6 +667,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
+ 	up_read(&ctrl->reset_lock);
+ 
+ 	pci_config_pm_runtime_put(pdev);
++	ctrl->ist_running = false;
+ 	wake_up(&ctrl->requester);
+ 	return IRQ_HANDLED;
+ }
+diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
+index 971dddf62374..23a363fd4c59 100644
+--- a/drivers/pci/msi.c
++++ b/drivers/pci/msi.c
+@@ -211,7 +211,7 @@ u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
+ 		return 0;
+ 
+ 	mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
+-	if (flag)
++	if (flag & PCI_MSIX_ENTRY_CTRL_MASKBIT)
+ 		mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ 	writel(mask_bits, pci_msix_desc_addr(desc) + PCI_MSIX_ENTRY_VECTOR_CTRL);
+ 
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index ec317bcb1bca..bc1ff41ce3d3 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -1042,17 +1042,22 @@ static int pci_pm_thaw_noirq(struct device *dev)
+ 			return error;
+ 	}
+ 
+-	if (pci_has_legacy_pm_support(pci_dev))
+-		return pci_legacy_resume_early(dev);
+-
+ 	/*
+-	 * pci_restore_state() requires the device to be in D0 (because of MSI
+-	 * restoration among other things), so force it into D0 in case the
+-	 * driver's "freeze" callbacks put it into a low-power state directly.
++	 * Both the legacy ->resume_early() and the new pm->thaw_noirq()
++	 * callbacks assume the device has been returned to D0 and its
++	 * config state has been restored.
++	 *
++	 * In addition, pci_restore_state() restores MSI-X state in MMIO
++	 * space, which requires the device to be in D0, so return it to D0
++	 * in case the driver's "freeze" callbacks put it into a low-power
++	 * state.
+ 	 */
+ 	pci_set_power_state(pci_dev, PCI_D0);
+ 	pci_restore_state(pci_dev);
+ 
++	if (pci_has_legacy_pm_support(pci_dev))
++		return pci_legacy_resume_early(dev);
++
+ 	if (drv && drv->pm && drv->pm->thaw_noirq)
+ 		error = drv->pm->thaw_noirq(dev);
+ 
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 64933994f772..20a57a48ae1e 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -4219,15 +4219,21 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
+ 
+ static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
+ {
++	if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
++		return false;
++
++	switch (dev->device) {
+ 	/*
+-	 * Effectively selects all downstream ports for whole ThunderX 1
+-	 * family by 0xf800 mask (which represents 8 SoCs), while the lower
+-	 * bits of device ID are used to indicate which subdevice is used
+-	 * within the SoC.
++	 * Effectively selects all downstream ports for whole ThunderX1
++	 * (which represents 8 SoCs).
+ 	 */
+-	return (pci_is_pcie(dev) &&
+-		(pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) &&
+-		((dev->device & 0xf800) == 0xa000));
++	case 0xa000 ... 0xa7ff: /* ThunderX1 */
++	case 0xaf84:  /* ThunderX2 */
++	case 0xb884:  /* ThunderX3 */
++		return true;
++	default:
++		return false;
++	}
+ }
+ 
+ static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
+@@ -4576,7 +4582,7 @@ int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
+ #define INTEL_BSPR_REG_BPPD  (1 << 9)
+ 
+ /* Upstream Peer Decode Configuration Register */
+-#define INTEL_UPDCR_REG 0x1114
++#define INTEL_UPDCR_REG 0x1014
+ /* 5:0 Peer Decode Enable bits */
+ #define INTEL_UPDCR_REG_MASK 0x3f
+ 
+diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
+index e2ce4e638258..25c394a7077b 100644
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -241,10 +241,31 @@ static void qcom_glink_channel_release(struct kref *ref)
+ {
+ 	struct glink_channel *channel = container_of(ref, struct glink_channel,
+ 						     refcount);
++	struct glink_core_rx_intent *intent;
++	struct glink_core_rx_intent *tmp;
+ 	unsigned long flags;
++	int iid;
++
++	/* cancel pending rx_done work */
++	cancel_work_sync(&channel->intent_work);
+ 
+ 	spin_lock_irqsave(&channel->intent_lock, flags);
++	/* Free all non-reuse intents pending rx_done work */
++	list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) {
++		if (!intent->reuse) {
++			kfree(intent->data);
++			kfree(intent);
++		}
++	}
++
++	idr_for_each_entry(&channel->liids, tmp, iid) {
++		kfree(tmp->data);
++		kfree(tmp);
++	}
+ 	idr_destroy(&channel->liids);
++
++	idr_for_each_entry(&channel->riids, tmp, iid)
++		kfree(tmp);
+ 	idr_destroy(&channel->riids);
+ 	spin_unlock_irqrestore(&channel->intent_lock, flags);
+ 
+@@ -1097,13 +1118,12 @@ static int qcom_glink_create_remote(struct qcom_glink *glink,
+ close_link:
+ 	/*
+ 	 * Send a close request to "undo" our open-ack. The close-ack will
+-	 * release the last reference.
++	 * release qcom_glink_send_open_req() reference and the last reference
++	 * will be relesed after receiving remote_close or transport unregister
++	 * by calling qcom_glink_native_remove().
+ 	 */
+ 	qcom_glink_send_close_req(glink, channel);
+ 
+-	/* Release qcom_glink_send_open_req() reference */
+-	kref_put(&channel->refcount, qcom_glink_channel_release);
+-
+ 	return ret;
+ }
+ 
+@@ -1418,15 +1438,13 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
+ 
+ 		ret = rpmsg_register_device(rpdev);
+ 		if (ret)
+-			goto free_rpdev;
++			goto rcid_remove;
+ 
+ 		channel->rpdev = rpdev;
+ 	}
+ 
+ 	return 0;
+ 
+-free_rpdev:
+-	kfree(rpdev);
+ rcid_remove:
+ 	spin_lock_irqsave(&glink->idr_lock, flags);
+ 	idr_remove(&glink->rcids, channel->rcid);
+@@ -1547,6 +1565,18 @@ static void qcom_glink_work(struct work_struct *work)
+ 	}
+ }
+ 
++static void qcom_glink_cancel_rx_work(struct qcom_glink *glink)
++{
++	struct glink_defer_cmd *dcmd;
++	struct glink_defer_cmd *tmp;
++
++	/* cancel any pending deferred rx_work */
++	cancel_work_sync(&glink->rx_work);
++
++	list_for_each_entry_safe(dcmd, tmp, &glink->rx_queue, node)
++		kfree(dcmd);
++}
++
+ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
+ 					   unsigned long features,
+ 					   struct qcom_glink_pipe *rx,
+@@ -1622,23 +1652,24 @@ void qcom_glink_native_remove(struct qcom_glink *glink)
+ 	struct glink_channel *channel;
+ 	int cid;
+ 	int ret;
+-	unsigned long flags;
+ 
+ 	disable_irq(glink->irq);
+-	cancel_work_sync(&glink->rx_work);
++	qcom_glink_cancel_rx_work(glink);
+ 
+ 	ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device);
+ 	if (ret)
+ 		dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret);
+ 
+-	spin_lock_irqsave(&glink->idr_lock, flags);
+ 	/* Release any defunct local channels, waiting for close-ack */
+ 	idr_for_each_entry(&glink->lcids, channel, cid)
+ 		kref_put(&channel->refcount, qcom_glink_channel_release);
+ 
++	/* Release any defunct local channels, waiting for close-req */
++	idr_for_each_entry(&glink->rcids, channel, cid)
++		kref_put(&channel->refcount, qcom_glink_channel_release);
++
+ 	idr_destroy(&glink->lcids);
+ 	idr_destroy(&glink->rcids);
+-	spin_unlock_irqrestore(&glink->idr_lock, flags);
+ 	mbox_free_channel(glink->mbox_chan);
+ }
+ EXPORT_SYMBOL_GPL(qcom_glink_native_remove);
+diff --git a/drivers/rpmsg/qcom_glink_smem.c b/drivers/rpmsg/qcom_glink_smem.c
+index 7b6544348a3e..bbd5759cc38e 100644
+--- a/drivers/rpmsg/qcom_glink_smem.c
++++ b/drivers/rpmsg/qcom_glink_smem.c
+@@ -105,7 +105,7 @@ static void glink_smem_rx_advance(struct qcom_glink_pipe *np,
+ 	tail = le32_to_cpu(*pipe->tail);
+ 
+ 	tail += count;
+-	if (tail > pipe->native.length)
++	if (tail >= pipe->native.length)
+ 		tail -= pipe->native.length;
+ 
+ 	*pipe->tail = cpu_to_le32(tail);
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 4ad61cfa69c0..7a05c7271766 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -1983,7 +1983,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
+ 
+ 	ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
+ 
+-	spin_lock(&session->frwd_lock);
++	spin_lock_bh(&session->frwd_lock);
+ 	task = (struct iscsi_task *)sc->SCp.ptr;
+ 	if (!task) {
+ 		/*
+@@ -2110,7 +2110,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
+ done:
+ 	if (task)
+ 		task->last_timeout = jiffies;
+-	spin_unlock(&session->frwd_lock);
++	spin_unlock_bh(&session->frwd_lock);
+ 	ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
+ 		     "timer reset" : "shutdown or nh");
+ 	return rc;
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index d734dcf517b9..4512aaa16f78 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -966,6 +966,7 @@ int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+ 
+ 	e->u.fcport.fcport = fcport;
+ 	fcport->flags |= FCF_ASYNC_ACTIVE;
++	fcport->disc_state = DSC_LOGIN_PEND;
+ 	return qla2x00_post_work(vha, e);
+ }
+ 
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 02843c16f9c7..8f180bf7561a 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -868,6 +868,14 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
+ 			status |= USB_PORT_STAT_C_BH_RESET << 16;
+ 		if ((raw_port_status & PORT_CEC))
+ 			status |= USB_PORT_STAT_C_CONFIG_ERROR << 16;
++
++		/* USB3 remote wake resume signaling completed */
++		if (bus_state->port_remote_wakeup & (1 << wIndex) &&
++		    (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME &&
++		    (raw_port_status & PORT_PLS_MASK) != XDEV_RECOVERY) {
++			bus_state->port_remote_wakeup &= ~(1 << wIndex);
++			usb_hcd_end_port_resume(&hcd->self, wIndex);
++		}
+ 	}
+ 
+ 	if (hcd->speed < HCD_USB3) {
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index b03974958a28..98b67605d3cf 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1609,7 +1609,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
+ 		slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
+ 		if (slot_id && xhci->devs[slot_id])
+ 			xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR;
+-		bus_state->port_remote_wakeup &= ~(1 << hcd_portnum);
+ 	}
+ 
+ 	if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
+@@ -1630,6 +1629,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
+ 			bus_state->port_remote_wakeup |= 1 << hcd_portnum;
+ 			xhci_test_and_clear_bit(xhci, port, PORT_PLC);
+ 			xhci_set_link_state(xhci, port, XDEV_U0);
++			usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
+ 			/* Need to wait until the next link state change
+ 			 * indicates the device is actually in U0.
+ 			 */
+@@ -1669,7 +1669,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
+ 		if (slot_id && xhci->devs[slot_id])
+ 			xhci_ring_device(xhci, slot_id);
+ 		if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) {
+-			bus_state->port_remote_wakeup &= ~(1 << hcd_portnum);
+ 			xhci_test_and_clear_bit(xhci, port, PORT_PLC);
+ 			usb_wakeup_notification(hcd->self.root_hub,
+ 					hcd_portnum + 1);
+diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
+index 1c46045b0e7f..94594dc63c41 100644
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -297,8 +297,8 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
+ 	irq = pci_irq_vector(pdev, vector);
+ 
+ 	if (vdev->ctx[vector].trigger) {
+-		free_irq(irq, vdev->ctx[vector].trigger);
+ 		irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
++		free_irq(irq, vdev->ctx[vector].trigger);
+ 		kfree(vdev->ctx[vector].name);
+ 		eventfd_ctx_put(vdev->ctx[vector].trigger);
+ 		vdev->ctx[vector].trigger = NULL;
+diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
+index 06576797cf31..a85aeff90df8 100644
+--- a/fs/cifs/cifs_debug.c
++++ b/fs/cifs/cifs_debug.c
+@@ -210,6 +210,11 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+ 		if (!server->rdma)
+ 			goto skip_rdma;
+ 
++		if (!server->smbd_conn) {
++			seq_printf(m, "\nSMBDirect transport not available");
++			goto skip_rdma;
++		}
++
+ 		seq_printf(m, "\nSMBDirect (in hex) protocol version: %x "
+ 			"transport status: %x",
+ 			server->smbd_conn->protocol,
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 455d0282363f..ad93b063f866 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -726,6 +726,13 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
+ 	if (backup_cred(cifs_sb))
+ 		create_options |= CREATE_OPEN_BACKUP_INTENT;
+ 
++	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
++	if (cfile->f_flags & O_SYNC)
++		create_options |= CREATE_WRITE_THROUGH;
++
++	if (cfile->f_flags & O_DIRECT)
++		create_options |= CREATE_NO_BUFFER;
++
+ 	if (server->ops->get_lease_key)
+ 		server->ops->get_lease_key(inode, &cfile->fid);
+ 
+diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
+index 449d1584ff72..766974fe637a 100644
+--- a/fs/cifs/smb2misc.c
++++ b/fs/cifs/smb2misc.c
+@@ -743,36 +743,67 @@ smb2_cancelled_close_fid(struct work_struct *work)
+ 	kfree(cancelled);
+ }
+ 
++/* Caller should already has an extra reference to @tcon */
++static int
++__smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
++			      __u64 volatile_fid)
++{
++	struct close_cancelled_open *cancelled;
++
++	cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
++	if (!cancelled)
++		return -ENOMEM;
++
++	cancelled->fid.persistent_fid = persistent_fid;
++	cancelled->fid.volatile_fid = volatile_fid;
++	cancelled->tcon = tcon;
++	INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
++	WARN_ON(queue_work(cifsiod_wq, &cancelled->work) == false);
++
++	return 0;
++}
++
++int
++smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
++			    __u64 volatile_fid)
++{
++	int rc;
++
++	cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
++	spin_lock(&cifs_tcp_ses_lock);
++	tcon->tc_count++;
++	spin_unlock(&cifs_tcp_ses_lock);
++
++	rc = __smb2_handle_cancelled_close(tcon, persistent_fid, volatile_fid);
++	if (rc)
++		cifs_put_tcon(tcon);
++
++	return rc;
++}
++
+ int
+ smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
+ {
+ 	struct smb2_sync_hdr *sync_hdr = (struct smb2_sync_hdr *)buffer;
+ 	struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer;
+ 	struct cifs_tcon *tcon;
+-	struct close_cancelled_open *cancelled;
++	int rc;
+ 
+ 	if (sync_hdr->Command != SMB2_CREATE ||
+ 	    sync_hdr->Status != STATUS_SUCCESS)
+ 		return 0;
+ 
+-	cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
+-	if (!cancelled)
+-		return -ENOMEM;
+-
+ 	tcon = smb2_find_smb_tcon(server, sync_hdr->SessionId,
+ 				  sync_hdr->TreeId);
+-	if (!tcon) {
+-		kfree(cancelled);
++	if (!tcon)
+ 		return -ENOENT;
+-	}
+ 
+-	cancelled->fid.persistent_fid = rsp->PersistentFileId;
+-	cancelled->fid.volatile_fid = rsp->VolatileFileId;
+-	cancelled->tcon = tcon;
+-	INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
+-	queue_work(cifsiod_wq, &cancelled->work);
++	rc = __smb2_handle_cancelled_close(tcon, rsp->PersistentFileId,
++					   rsp->VolatileFileId);
++	if (rc)
++		cifs_put_tcon(tcon);
+ 
+-	return 0;
++	return rc;
+ }
+ 
+ /**
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 4563699bbe6c..43f29621e51f 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -2629,7 +2629,21 @@ int
+ SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
+ 	   u64 persistent_fid, u64 volatile_fid)
+ {
+-	return SMB2_close_flags(xid, tcon, persistent_fid, volatile_fid, 0);
++	int rc;
++	int tmp_rc;
++
++	rc = SMB2_close_flags(xid, tcon, persistent_fid, volatile_fid, 0);
++
++	/* retry close in a worker thread if this one is interrupted */
++	if (rc == -EINTR) {
++		tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid,
++						     volatile_fid);
++		if (tmp_rc)
++			cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n",
++				 persistent_fid, tmp_rc);
++	}
++
++	return rc;
+ }
+ 
+ int
+diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
+index b4076577eeb7..c66d7da80330 100644
+--- a/fs/cifs/smb2proto.h
++++ b/fs/cifs/smb2proto.h
+@@ -204,6 +204,9 @@ extern int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
+ extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
+ 			     const u64 persistent_fid, const u64 volatile_fid,
+ 			     const __u8 oplock_level);
++extern int smb2_handle_cancelled_close(struct cifs_tcon *tcon,
++				       __u64 persistent_fid,
++				       __u64 volatile_fid);
+ extern int smb2_handle_cancelled_mid(char *buffer,
+ 					struct TCP_Server_Info *server);
+ void smb2_cancelled_close_fid(struct work_struct *work);
+diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
+index 1959931e14c1..784628ec4bc4 100644
+--- a/fs/cifs/smbdirect.c
++++ b/fs/cifs/smbdirect.c
+@@ -1164,7 +1164,7 @@ static int smbd_post_send_data(
+ 
+ 	if (n_vec > SMBDIRECT_MAX_SGE) {
+ 		cifs_dbg(VFS, "Can't fit data to SGL, n_vec=%d\n", n_vec);
+-		return -ENOMEM;
++		return -EINVAL;
+ 	}
+ 
+ 	sg_init_table(sgl, n_vec);
+@@ -1491,6 +1491,7 @@ void smbd_destroy(struct smbd_connection *info)
+ 		info->transport_status == SMBD_DESTROYED);
+ 
+ 	destroy_workqueue(info->workqueue);
++	log_rdma_event(INFO,  "rdma session destroyed\n");
+ 	kfree(info);
+ }
+ 
+@@ -1528,8 +1529,9 @@ create_conn:
+ 	log_rdma_event(INFO, "creating rdma session\n");
+ 	server->smbd_conn = smbd_get_connection(
+ 		server, (struct sockaddr *) &server->dstaddr);
+-	log_rdma_event(INFO, "created rdma session info=%p\n",
+-		server->smbd_conn);
++
++	if (server->smbd_conn)
++		cifs_dbg(VFS, "RDMA transport re-established\n");
+ 
+ 	return server->smbd_conn ? 0 : -ENOENT;
+ }
+diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
+index fe77f41bff9f..0c4df56c825a 100644
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -286,8 +286,11 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ 	int val = 1;
+ 	__be32 rfc1002_marker;
+ 
+-	if (cifs_rdma_enabled(server) && server->smbd_conn) {
+-		rc = smbd_send(server, num_rqst, rqst);
++	if (cifs_rdma_enabled(server)) {
++		/* return -EAGAIN when connecting or reconnecting */
++		rc = -EAGAIN;
++		if (server->smbd_conn)
++			rc = smbd_send(server, num_rqst, rqst);
+ 		goto smbd_done;
+ 	}
+ 	if (ssocket == NULL)
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 5ada5fd9652d..9dfa0ae173ac 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1834,6 +1834,11 @@ struct net_device {
+ 	unsigned char		if_port;
+ 	unsigned char		dma;
+ 
++	/* Note : dev->mtu is often read without holding a lock.
++	 * Writers usually hold RTNL.
++	 * It is recommended to use READ_ONCE() to annotate the reads,
++	 * and to use WRITE_ONCE() to annotate the writes.
++	 */
+ 	unsigned int		mtu;
+ 	unsigned int		min_mtu;
+ 	unsigned int		max_mtu;
+diff --git a/include/linux/time.h b/include/linux/time.h
+index 27d83fd2ae61..5f3e49978837 100644
+--- a/include/linux/time.h
++++ b/include/linux/time.h
+@@ -96,4 +96,17 @@ static inline bool itimerspec64_valid(const struct itimerspec64 *its)
+  */
+ #define time_after32(a, b)	((s32)((u32)(b) - (u32)(a)) < 0)
+ #define time_before32(b, a)	time_after32(a, b)
++
++/**
++ * time_between32 - check if a 32-bit timestamp is within a given time range
++ * @t:	the time which may be within [l,h]
++ * @l:	the lower bound of the range
++ * @h:	the higher bound of the range
++ *
++ * time_before32(t, l, h) returns true if @l <= @t <= @h. All operands are
++ * treated as 32-bit integers.
++ *
++ * Equivalent to !(time_before32(@t, @l) || time_after32(@t, @h)).
++ */
++#define time_between32(t, l, h) ((u32)(h) - (u32)(l) >= (u32)(t) - (u32)(l))
+ #endif
+diff --git a/include/net/ip.h b/include/net/ip.h
+index cfc3dd5ff085..5b29f357862d 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -692,4 +692,9 @@ int ip_misc_proc_init(void);
+ int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
+ 				struct netlink_ext_ack *extack);
+ 
++static inline bool inetdev_valid_mtu(unsigned int mtu)
++{
++	return likely(mtu >= IPV4_MIN_MTU);
++}
++
+ #endif	/* _IP_H */
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 3f4223a550d9..ac4ffe8013d8 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -485,15 +485,16 @@ static inline void tcp_synq_overflow(const struct sock *sk)
+ 		reuse = rcu_dereference(sk->sk_reuseport_cb);
+ 		if (likely(reuse)) {
+ 			last_overflow = READ_ONCE(reuse->synq_overflow_ts);
+-			if (time_after32(now, last_overflow + HZ))
++			if (!time_between32(now, last_overflow,
++					    last_overflow + HZ))
+ 				WRITE_ONCE(reuse->synq_overflow_ts, now);
+ 			return;
+ 		}
+ 	}
+ 
+-	last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+-	if (time_after32(now, last_overflow + HZ))
+-		tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
++	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
++	if (!time_between32(now, last_overflow, last_overflow + HZ))
++		WRITE_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp, now);
+ }
+ 
+ /* syncookies: no recent synqueue overflow on this listening socket? */
+@@ -508,13 +509,23 @@ static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
+ 		reuse = rcu_dereference(sk->sk_reuseport_cb);
+ 		if (likely(reuse)) {
+ 			last_overflow = READ_ONCE(reuse->synq_overflow_ts);
+-			return time_after32(now, last_overflow +
+-					    TCP_SYNCOOKIE_VALID);
++			return !time_between32(now, last_overflow - HZ,
++					       last_overflow +
++					       TCP_SYNCOOKIE_VALID);
+ 		}
+ 	}
+ 
+-	last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+-	return time_after32(now, last_overflow + TCP_SYNCOOKIE_VALID);
++	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
++
++	/* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
++	 * then we're under synflood. However, we have to use
++	 * 'last_overflow - HZ' as lower bound. That's because a concurrent
++	 * tcp_synq_overflow() could update .ts_recent_stamp after we read
++	 * jiffies but before we store .ts_recent_stamp into last_overflow,
++	 * which could lead to rejecting a valid syncookie.
++	 */
++	return !time_between32(now, last_overflow - HZ,
++			       last_overflow + TCP_SYNCOOKIE_VALID);
+ }
+ 
+ static inline u32 tcp_cookie_time(void)
+diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
+index e682a668ce57..9ce661e2590d 100644
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -246,6 +246,12 @@ static int br_set_mac_address(struct net_device *dev, void *p)
+ 	if (!is_valid_ether_addr(addr->sa_data))
+ 		return -EADDRNOTAVAIL;
+ 
++	/* dev_set_mac_addr() can be called by a master device on bridge's
++	 * NETDEV_UNREGISTER, but since it's being destroyed do nothing
++	 */
++	if (dev->reg_state != NETREG_REGISTERED)
++		return -EBUSY;
++
+ 	spin_lock_bh(&br->lock);
+ 	if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
+ 		/* Mac address will be changed in br_stp_change_bridge_id(). */
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 91179febdeee..8ff21d461f08 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -7595,7 +7595,8 @@ int __dev_set_mtu(struct net_device *dev, int new_mtu)
+ 	if (ops->ndo_change_mtu)
+ 		return ops->ndo_change_mtu(dev, new_mtu);
+ 
+-	dev->mtu = new_mtu;
++	/* Pairs with all the lockless reads of dev->mtu in the stack */
++	WRITE_ONCE(dev->mtu, new_mtu);
+ 	return 0;
+ }
+ EXPORT_SYMBOL(__dev_set_mtu);
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index 4362ffe9b5ef..994dd1520f07 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -630,9 +630,10 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
+ 		nhoff = skb_network_offset(skb);
+ 		hlen = skb_headlen(skb);
+ #if IS_ENABLED(CONFIG_NET_DSA)
+-		if (unlikely(skb->dev && netdev_uses_dsa(skb->dev))) {
++		if (unlikely(skb->dev && netdev_uses_dsa(skb->dev) &&
++			     proto == htons(ETH_P_XDSA))) {
+ 			const struct dsa_device_ops *ops;
+-			int offset;
++			int offset = 0;
+ 
+ 			ops = skb->dev->dsa_ptr->tag_ops;
+ 			if (ops->flow_dissect &&
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index d23746143cd2..fabece4b8997 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -1441,11 +1441,6 @@ skip:
+ 	}
+ }
+ 
+-static bool inetdev_valid_mtu(unsigned int mtu)
+-{
+-	return mtu >= IPV4_MIN_MTU;
+-}
+-
+ static void inetdev_send_gratuitous_arp(struct net_device *dev,
+ 					struct in_device *in_dev)
+ 
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 73894ed12a70..d63091812342 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -1142,15 +1142,18 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
+ 		cork->addr = ipc->addr;
+ 	}
+ 
+-	/*
+-	 * We steal reference to this route, caller should not release it
+-	 */
+-	*rtp = NULL;
+ 	cork->fragsize = ip_sk_use_pmtu(sk) ?
+-			 dst_mtu(&rt->dst) : rt->dst.dev->mtu;
++			 dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
++
++	if (!inetdev_valid_mtu(cork->fragsize))
++		return -ENETUNREACH;
+ 
+ 	cork->gso_size = ipc->gso_size;
++
+ 	cork->dst = &rt->dst;
++	/* We stole this route, caller should not release it. */
++	*rtp = NULL;
++
+ 	cork->length = 0;
+ 	cork->ttl = ipc->ttl;
+ 	cork->tos = ipc->tos;
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 8971cc15d278..ad787e7882f7 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -740,8 +740,9 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
+ 			min_t(unsigned int, eff_sacks,
+ 			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
+ 			      TCPOLEN_SACK_PERBLOCK);
+-		size += TCPOLEN_SACK_BASE_ALIGNED +
+-			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
++		if (likely(opts->num_sack_blocks))
++			size += TCPOLEN_SACK_BASE_ALIGNED +
++				opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
+ 	}
+ 
+ 	return size;
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
+index 46aa1aa51db4..6dcb59f272e1 100644
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -897,6 +897,17 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
+ 	}
+ 	err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype);
+ 
++	if (err == NF_ACCEPT &&
++	    ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
++		if (maniptype == NF_NAT_MANIP_SRC)
++			maniptype = NF_NAT_MANIP_DST;
++		else
++			maniptype = NF_NAT_MANIP_SRC;
++
++		err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range,
++					 maniptype);
++	}
++
+ 	/* Mark NAT done if successful and update the flow key. */
+ 	if (err == NF_ACCEPT)
+ 		ovs_nat_update_key(key, skb, maniptype);
+diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
+index 15c6eb547684..c008a316e943 100644
+--- a/net/sched/sch_mq.c
++++ b/net/sched/sch_mq.c
+@@ -158,6 +158,7 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
+ 			__gnet_stats_copy_queue(&sch->qstats,
+ 						qdisc->cpu_qstats,
+ 						&qdisc->qstats, qlen);
++			sch->q.qlen		+= qlen;
+ 		} else {
+ 			sch->q.qlen		+= qdisc->q.qlen;
+ 			sch->bstats.bytes	+= qdisc->bstats.bytes;
+diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
+index 68ce86cab63c..008db8d59ace 100644
+--- a/net/sched/sch_mqprio.c
++++ b/net/sched/sch_mqprio.c
+@@ -413,6 +413,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
+ 			__gnet_stats_copy_queue(&sch->qstats,
+ 						qdisc->cpu_qstats,
+ 						&qdisc->qstats, qlen);
++			sch->q.qlen		+= qlen;
+ 		} else {
+ 			sch->q.qlen		+= qdisc->q.qlen;
+ 			sch->bstats.bytes	+= qdisc->bstats.bytes;
+@@ -435,7 +436,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
+ 		opt.offset[tc] = dev->tc_to_txq[tc].offset;
+ 	}
+ 
+-	if (nla_put(skb, TCA_OPTIONS, NLA_ALIGN(sizeof(opt)), &opt))
++	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
+ 		goto nla_put_failure;
+ 
+ 	if ((priv->flags & TC_MQPRIO_F_MODE) &&
+diff --git a/net/tipc/core.c b/net/tipc/core.c
+index eb0f701f9bf1..49bb9fc0aa06 100644
+--- a/net/tipc/core.c
++++ b/net/tipc/core.c
+@@ -120,14 +120,6 @@ static int __init tipc_init(void)
+ 	sysctl_tipc_rmem[1] = RCVBUF_DEF;
+ 	sysctl_tipc_rmem[2] = RCVBUF_MAX;
+ 
+-	err = tipc_netlink_start();
+-	if (err)
+-		goto out_netlink;
+-
+-	err = tipc_netlink_compat_start();
+-	if (err)
+-		goto out_netlink_compat;
+-
+ 	err = tipc_register_sysctl();
+ 	if (err)
+ 		goto out_sysctl;
+@@ -148,8 +140,21 @@ static int __init tipc_init(void)
+ 	if (err)
+ 		goto out_bearer;
+ 
++	err = tipc_netlink_start();
++	if (err)
++		goto out_netlink;
++
++	err = tipc_netlink_compat_start();
++	if (err)
++		goto out_netlink_compat;
++
+ 	pr_info("Started in single node mode\n");
+ 	return 0;
++
++out_netlink_compat:
++	tipc_netlink_stop();
++out_netlink:
++	tipc_bearer_cleanup();
+ out_bearer:
+ 	unregister_pernet_device(&tipc_topsrv_net_ops);
+ out_pernet_topsrv:
+@@ -159,22 +164,18 @@ out_socket:
+ out_pernet:
+ 	tipc_unregister_sysctl();
+ out_sysctl:
+-	tipc_netlink_compat_stop();
+-out_netlink_compat:
+-	tipc_netlink_stop();
+-out_netlink:
+ 	pr_err("Unable to start in single node mode\n");
+ 	return err;
+ }
+ 
+ static void __exit tipc_exit(void)
+ {
++	tipc_netlink_compat_stop();
++	tipc_netlink_stop();
+ 	tipc_bearer_cleanup();
+ 	unregister_pernet_device(&tipc_topsrv_net_ops);
+ 	tipc_socket_stop();
+ 	unregister_pernet_device(&tipc_net_ops);
+-	tipc_netlink_stop();
+-	tipc_netlink_compat_stop();
+ 	tipc_unregister_sysctl();
+ 
+ 	pr_info("Deactivated\n");


             reply	other threads:[~2019-12-21 15:03 UTC|newest]

Thread overview: 332+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-12-21 15:03 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2024-04-18  3:06 [gentoo-commits] proj/linux-patches:4.19 commit in: / Alice Ferrazzi
2023-09-02  9:59 Mike Pagano
2023-08-30 15:00 Mike Pagano
2023-08-16 16:59 Mike Pagano
2023-08-11 11:58 Mike Pagano
2023-08-08 18:43 Mike Pagano
2023-07-24 20:30 Mike Pagano
2023-06-28 10:29 Mike Pagano
2023-06-21 14:55 Alice Ferrazzi
2023-06-14 10:21 Mike Pagano
2023-06-09 11:32 Mike Pagano
2023-05-30 12:57 Mike Pagano
2023-05-17 11:14 Mike Pagano
2023-05-17 11:01 Mike Pagano
2023-05-10 17:59 Mike Pagano
2023-04-26  9:35 Alice Ferrazzi
2023-04-20 11:17 Alice Ferrazzi
2023-04-05 11:41 Mike Pagano
2023-03-22 14:16 Alice Ferrazzi
2023-03-17 10:46 Mike Pagano
2023-03-13 11:35 Alice Ferrazzi
2023-03-11 16:01 Mike Pagano
2023-03-03 12:31 Mike Pagano
2023-02-25 11:41 Mike Pagano
2023-02-24  3:19 Alice Ferrazzi
2023-02-24  3:15 Alice Ferrazzi
2023-02-22 14:51 Alice Ferrazzi
2023-02-06 12:49 Mike Pagano
2023-01-24  7:16 Alice Ferrazzi
2023-01-18 11:11 Mike Pagano
2022-12-14 12:15 Mike Pagano
2022-12-08 12:14 Alice Ferrazzi
2022-11-25 17:04 Mike Pagano
2022-11-23  9:39 Alice Ferrazzi
2022-11-10 17:58 Mike Pagano
2022-11-03 15:11 Mike Pagano
2022-11-01 19:48 Mike Pagano
2022-10-26 11:41 Mike Pagano
2022-10-05 11:59 Mike Pagano
2022-09-28  9:18 Mike Pagano
2022-09-20 12:03 Mike Pagano
2022-09-15 11:09 Mike Pagano
2022-09-05 12:06 Mike Pagano
2022-08-25 10:35 Mike Pagano
2022-08-11 12:36 Mike Pagano
2022-07-29 15:28 Mike Pagano
2022-07-21 20:12 Mike Pagano
2022-07-12 16:01 Mike Pagano
2022-07-07 16:18 Mike Pagano
2022-07-02 16:07 Mike Pagano
2022-06-25 10:22 Mike Pagano
2022-06-16 11:40 Mike Pagano
2022-06-14 16:02 Mike Pagano
2022-06-06 11:05 Mike Pagano
2022-05-27 12:24 Mike Pagano
2022-05-25 11:55 Mike Pagano
2022-05-18  9:50 Mike Pagano
2022-05-15 22:12 Mike Pagano
2022-05-12 11:30 Mike Pagano
2022-05-01 17:04 Mike Pagano
2022-04-27 12:03 Mike Pagano
2022-04-20 12:09 Mike Pagano
2022-04-15 13:11 Mike Pagano
2022-04-12 19:24 Mike Pagano
2022-03-28 10:59 Mike Pagano
2022-03-23 11:57 Mike Pagano
2022-03-16 13:27 Mike Pagano
2022-03-11 10:56 Mike Pagano
2022-03-08 18:30 Mike Pagano
2022-03-02 13:08 Mike Pagano
2022-02-26 21:14 Mike Pagano
2022-02-23 12:39 Mike Pagano
2022-02-16 12:47 Mike Pagano
2022-02-11 12:53 Mike Pagano
2022-02-11 12:46 Mike Pagano
2022-02-11 12:45 Mike Pagano
2022-02-11 12:37 Mike Pagano
2022-02-08 17:56 Mike Pagano
2022-01-29 17:45 Mike Pagano
2022-01-27 11:39 Mike Pagano
2022-01-11 13:14 Mike Pagano
2022-01-05 12:55 Mike Pagano
2021-12-29 13:11 Mike Pagano
2021-12-22 14:07 Mike Pagano
2021-12-14 10:36 Mike Pagano
2021-12-08 12:55 Mike Pagano
2021-12-01 12:51 Mike Pagano
2021-11-26 11:59 Mike Pagano
2021-11-12 14:16 Mike Pagano
2021-11-06 13:26 Mike Pagano
2021-11-02 19:32 Mike Pagano
2021-10-27 11:59 Mike Pagano
2021-10-20 13:26 Mike Pagano
2021-10-17 13:12 Mike Pagano
2021-10-13 15:00 Alice Ferrazzi
2021-10-09 21:33 Mike Pagano
2021-10-06 14:06 Mike Pagano
2021-09-26 14:13 Mike Pagano
2021-09-22 11:40 Mike Pagano
2021-09-20 22:05 Mike Pagano
2021-09-03 11:22 Mike Pagano
2021-09-03 10:08 Alice Ferrazzi
2021-08-26 14:06 Mike Pagano
2021-08-25 22:45 Mike Pagano
2021-08-25 20:41 Mike Pagano
2021-08-15 20:07 Mike Pagano
2021-08-12 11:51 Mike Pagano
2021-08-08 13:39 Mike Pagano
2021-08-04 11:54 Mike Pagano
2021-08-03 12:26 Mike Pagano
2021-07-31 10:34 Alice Ferrazzi
2021-07-28 12:37 Mike Pagano
2021-07-20 15:35 Alice Ferrazzi
2021-07-13 12:38 Mike Pagano
2021-07-11 14:45 Mike Pagano
2021-06-30 14:25 Mike Pagano
2021-06-16 12:22 Mike Pagano
2021-06-10 11:46 Mike Pagano
2021-06-03 10:32 Alice Ferrazzi
2021-05-26 12:05 Mike Pagano
2021-05-22 10:03 Mike Pagano
2021-05-07 11:40 Alice Ferrazzi
2021-04-30 19:02 Mike Pagano
2021-04-28 18:31 Mike Pagano
2021-04-28 11:44 Alice Ferrazzi
2021-04-16 11:15 Alice Ferrazzi
2021-04-14 11:22 Alice Ferrazzi
2021-04-10 13:24 Mike Pagano
2021-04-07 12:21 Mike Pagano
2021-03-30 14:17 Mike Pagano
2021-03-24 12:08 Mike Pagano
2021-03-22 15:50 Mike Pagano
2021-03-20 14:26 Mike Pagano
2021-03-17 16:21 Mike Pagano
2021-03-11 14:05 Mike Pagano
2021-03-07 15:15 Mike Pagano
2021-03-04 12:08 Mike Pagano
2021-02-23 14:31 Alice Ferrazzi
2021-02-13 15:28 Alice Ferrazzi
2021-02-10 10:03 Alice Ferrazzi
2021-02-07 14:40 Alice Ferrazzi
2021-02-03 23:43 Mike Pagano
2021-01-30 13:34 Alice Ferrazzi
2021-01-27 11:15 Mike Pagano
2021-01-23 16:36 Mike Pagano
2021-01-19 20:34 Mike Pagano
2021-01-17 16:20 Mike Pagano
2021-01-12 20:06 Mike Pagano
2021-01-09 12:57 Mike Pagano
2021-01-06 14:15 Mike Pagano
2020-12-30 12:52 Mike Pagano
2020-12-11 12:56 Mike Pagano
2020-12-08 12:06 Mike Pagano
2020-12-02 12:49 Mike Pagano
2020-11-24 14:40 Mike Pagano
2020-11-22 19:26 Mike Pagano
2020-11-18 19:56 Mike Pagano
2020-11-11 15:43 Mike Pagano
2020-11-10 13:56 Mike Pagano
2020-11-05 12:35 Mike Pagano
2020-11-01 20:29 Mike Pagano
2020-10-29 11:18 Mike Pagano
2020-10-17 10:17 Mike Pagano
2020-10-14 20:36 Mike Pagano
2020-10-07 12:50 Mike Pagano
2020-10-01 12:45 Mike Pagano
2020-09-26 22:07 Mike Pagano
2020-09-26 22:00 Mike Pagano
2020-09-24 15:58 Mike Pagano
2020-09-23 12:07 Mike Pagano
2020-09-17 15:01 Mike Pagano
2020-09-17 14:55 Mike Pagano
2020-09-12 17:59 Mike Pagano
2020-09-09 17:59 Mike Pagano
2020-09-03 11:37 Mike Pagano
2020-08-26 11:15 Mike Pagano
2020-08-21 10:49 Alice Ferrazzi
2020-08-19  9:36 Alice Ferrazzi
2020-08-12 23:36 Alice Ferrazzi
2020-08-07 19:16 Mike Pagano
2020-08-05 14:51 Thomas Deutschmann
2020-07-31 18:00 Mike Pagano
2020-07-29 12:33 Mike Pagano
2020-07-22 12:42 Mike Pagano
2020-07-16 11:17 Mike Pagano
2020-07-09 12:12 Mike Pagano
2020-07-01 12:14 Mike Pagano
2020-06-29 17:41 Mike Pagano
2020-06-25 15:07 Mike Pagano
2020-06-22 14:47 Mike Pagano
2020-06-10 21:27 Mike Pagano
2020-06-07 21:52 Mike Pagano
2020-06-03 11:41 Mike Pagano
2020-05-27 16:25 Mike Pagano
2020-05-20 11:30 Mike Pagano
2020-05-20 11:27 Mike Pagano
2020-05-14 11:30 Mike Pagano
2020-05-13 12:33 Mike Pagano
2020-05-11 22:50 Mike Pagano
2020-05-09 22:20 Mike Pagano
2020-05-06 11:46 Mike Pagano
2020-05-02 19:24 Mike Pagano
2020-04-29 17:57 Mike Pagano
2020-04-23 11:44 Mike Pagano
2020-04-21 11:15 Mike Pagano
2020-04-17 11:45 Mike Pagano
2020-04-15 17:09 Mike Pagano
2020-04-13 11:34 Mike Pagano
2020-04-02 15:24 Mike Pagano
2020-03-25 14:58 Mike Pagano
2020-03-20 11:57 Mike Pagano
2020-03-18 14:21 Mike Pagano
2020-03-16 12:23 Mike Pagano
2020-03-11 17:20 Mike Pagano
2020-03-05 16:23 Mike Pagano
2020-02-28 16:38 Mike Pagano
2020-02-24 11:06 Mike Pagano
2020-02-19 23:45 Mike Pagano
2020-02-14 23:52 Mike Pagano
2020-02-11 16:20 Mike Pagano
2020-02-05 17:05 Mike Pagano
2020-02-01 10:37 Mike Pagano
2020-02-01 10:30 Mike Pagano
2020-01-29 16:16 Mike Pagano
2020-01-27 14:25 Mike Pagano
2020-01-23 11:07 Mike Pagano
2020-01-17 19:56 Mike Pagano
2020-01-14 22:30 Mike Pagano
2020-01-12 15:00 Mike Pagano
2020-01-09 11:15 Mike Pagano
2020-01-04 19:50 Mike Pagano
2019-12-31 17:46 Mike Pagano
2019-12-17 21:56 Mike Pagano
2019-12-13 12:35 Mike Pagano
2019-12-05 12:03 Alice Ferrazzi
2019-12-01 14:06 Thomas Deutschmann
2019-11-24 15:44 Mike Pagano
2019-11-20 19:36 Mike Pagano
2019-11-12 21:00 Mike Pagano
2019-11-10 16:20 Mike Pagano
2019-11-06 14:26 Mike Pagano
2019-10-29 12:04 Mike Pagano
2019-10-17 22:27 Mike Pagano
2019-10-11 17:04 Mike Pagano
2019-10-07 17:42 Mike Pagano
2019-10-05 11:42 Mike Pagano
2019-10-01 10:10 Mike Pagano
2019-09-21 17:11 Mike Pagano
2019-09-19 12:34 Mike Pagano
2019-09-19 10:04 Mike Pagano
2019-09-16 12:26 Mike Pagano
2019-09-10 11:12 Mike Pagano
2019-09-06 17:25 Mike Pagano
2019-08-29 14:15 Mike Pagano
2019-08-25 17:37 Mike Pagano
2019-08-23 22:18 Mike Pagano
2019-08-16 12:26 Mike Pagano
2019-08-16 12:13 Mike Pagano
2019-08-09 17:45 Mike Pagano
2019-08-06 19:19 Mike Pagano
2019-08-04 16:15 Mike Pagano
2019-07-31 15:09 Mike Pagano
2019-07-31 10:22 Mike Pagano
2019-07-28 16:27 Mike Pagano
2019-07-26 11:35 Mike Pagano
2019-07-21 14:41 Mike Pagano
2019-07-14 15:44 Mike Pagano
2019-07-10 11:05 Mike Pagano
2019-07-03 11:34 Mike Pagano
2019-06-25 10:53 Mike Pagano
2019-06-22 19:06 Mike Pagano
2019-06-19 17:17 Thomas Deutschmann
2019-06-17 19:22 Mike Pagano
2019-06-15 15:07 Mike Pagano
2019-06-11 12:42 Mike Pagano
2019-06-10 19:43 Mike Pagano
2019-06-09 16:19 Mike Pagano
2019-06-04 11:11 Mike Pagano
2019-05-31 15:02 Mike Pagano
2019-05-26 17:10 Mike Pagano
2019-05-22 11:02 Mike Pagano
2019-05-16 23:03 Mike Pagano
2019-05-14 21:00 Mike Pagano
2019-05-10 19:40 Mike Pagano
2019-05-08 10:06 Mike Pagano
2019-05-05 13:42 Mike Pagano
2019-05-04 18:28 Mike Pagano
2019-05-02 10:13 Mike Pagano
2019-04-27 17:36 Mike Pagano
2019-04-20 11:09 Mike Pagano
2019-04-19 19:51 Mike Pagano
2019-04-05 21:46 Mike Pagano
2019-04-03 10:59 Mike Pagano
2019-03-27 10:22 Mike Pagano
2019-03-23 20:23 Mike Pagano
2019-03-19 16:58 Mike Pagano
2019-03-13 22:08 Mike Pagano
2019-03-10 14:15 Mike Pagano
2019-03-06 19:06 Mike Pagano
2019-03-05 18:04 Mike Pagano
2019-02-27 11:23 Mike Pagano
2019-02-23 11:35 Mike Pagano
2019-02-23  0:46 Mike Pagano
2019-02-20 11:19 Mike Pagano
2019-02-16  0:42 Mike Pagano
2019-02-15 12:39 Mike Pagano
2019-02-12 20:53 Mike Pagano
2019-02-06 17:08 Mike Pagano
2019-01-31 11:28 Mike Pagano
2019-01-26 15:09 Mike Pagano
2019-01-22 23:06 Mike Pagano
2019-01-16 23:32 Mike Pagano
2019-01-13 19:29 Mike Pagano
2019-01-09 17:54 Mike Pagano
2018-12-29 18:55 Mike Pagano
2018-12-29  1:08 Mike Pagano
2018-12-21 14:58 Mike Pagano
2018-12-19 19:09 Mike Pagano
2018-12-17 11:42 Mike Pagano
2018-12-13 11:40 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-08 13:17 Mike Pagano
2018-12-05 20:16 Mike Pagano
2018-12-01 15:08 Mike Pagano
2018-11-27 16:16 Mike Pagano
2018-11-23 12:42 Mike Pagano
2018-11-21 12:30 Mike Pagano
2018-11-14  0:47 Mike Pagano
2018-11-14  0:47 Mike Pagano
2018-11-13 20:44 Mike Pagano
2018-11-04 16:22 Alice Ferrazzi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1576940582.96ffa6fb8e2e9fb0a95fec61d517ba8eb1ba9d2a.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox