public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Alice Ferrazzi" <alicef@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Sat, 31 Jul 2021 10:30:32 +0000 (UTC)	[thread overview]
Message-ID: <1627727415.fffb3b021d5afffa2a7eba74d2e5fc5d114650ac.alicef@gentoo> (raw)

commit:     fffb3b021d5afffa2a7eba74d2e5fc5d114650ac
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Sat Jul 31 10:29:25 2021 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Sat Jul 31 10:30:15 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=fffb3b02

Linux patch 5.10.55

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README              |   4 +
 1054_linux-5.10.55.patch | 730 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 734 insertions(+)

diff --git a/0000_README b/0000_README
index e70652a..f1cdfa7 100644
--- a/0000_README
+++ b/0000_README
@@ -259,6 +259,10 @@ Patch:  1053_linux-5.10.54.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.10.54
 
+Patch:  1054_linux-5.10.55.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.10.55
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1054_linux-5.10.55.patch b/1054_linux-5.10.55.patch
new file mode 100644
index 0000000..25c76f1
--- /dev/null
+++ b/1054_linux-5.10.55.patch
@@ -0,0 +1,730 @@
+diff --git a/Makefile b/Makefile
+index eb01d3028b020..7fb6405f3b60f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 10
+-SUBLEVEL = 54
++SUBLEVEL = 55
+ EXTRAVERSION =
+ NAME = Dare mighty things
+ 
+diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts
+index 37bd41ff8dffa..151c0220047dd 100644
+--- a/arch/arm/boot/dts/versatile-ab.dts
++++ b/arch/arm/boot/dts/versatile-ab.dts
+@@ -195,16 +195,15 @@
+ 		#size-cells = <1>;
+ 		ranges;
+ 
+-		vic: intc@10140000 {
++		vic: interrupt-controller@10140000 {
+ 			compatible = "arm,versatile-vic";
+ 			interrupt-controller;
+ 			#interrupt-cells = <1>;
+ 			reg = <0x10140000 0x1000>;
+-			clear-mask = <0xffffffff>;
+ 			valid-mask = <0xffffffff>;
+ 		};
+ 
+-		sic: intc@10003000 {
++		sic: interrupt-controller@10003000 {
+ 			compatible = "arm,versatile-sic";
+ 			interrupt-controller;
+ 			#interrupt-cells = <1>;
+diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
+index 06a0fdf24026c..e7e751a858d81 100644
+--- a/arch/arm/boot/dts/versatile-pb.dts
++++ b/arch/arm/boot/dts/versatile-pb.dts
+@@ -7,7 +7,7 @@
+ 
+ 	amba {
+ 		/* The Versatile PB is using more SIC IRQ lines than the AB */
+-		sic: intc@10003000 {
++		sic: interrupt-controller@10003000 {
+ 			clear-mask = <0xffffffff>;
+ 			/*
+ 			 * Valid interrupt lines mask according to
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 800914e9e12b9..3ad6f77ea1c45 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -541,8 +541,6 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
+ 
+ 	if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
+ 	queue:
+-		if (has_error && !is_protmode(vcpu))
+-			has_error = false;
+ 		if (reinject) {
+ 			/*
+ 			 * On vmentry, vcpu->arch.exception.pending is only
+@@ -8265,6 +8263,13 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
+ 	kvm_x86_ops.update_cr8_intercept(vcpu, tpr, max_irr);
+ }
+ 
++static void kvm_inject_exception(struct kvm_vcpu *vcpu)
++{
++	if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
++		vcpu->arch.exception.error_code = false;
++	kvm_x86_ops.queue_exception(vcpu);
++}
++
+ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit)
+ {
+ 	int r;
+@@ -8273,7 +8278,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
+ 	/* try to reinject previous events if any */
+ 
+ 	if (vcpu->arch.exception.injected) {
+-		kvm_x86_ops.queue_exception(vcpu);
++		kvm_inject_exception(vcpu);
+ 		can_inject = false;
+ 	}
+ 	/*
+@@ -8336,7 +8341,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
+ 			}
+ 		}
+ 
+-		kvm_x86_ops.queue_exception(vcpu);
++		kvm_inject_exception(vcpu);
+ 		can_inject = false;
+ 	}
+ 
+diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
+index af4560dab6b40..8c9663258d5d4 100644
+--- a/drivers/firmware/arm_scmi/driver.c
++++ b/drivers/firmware/arm_scmi/driver.c
+@@ -43,7 +43,6 @@ enum scmi_error_codes {
+ 	SCMI_ERR_GENERIC = -8,	/* Generic Error */
+ 	SCMI_ERR_HARDWARE = -9,	/* Hardware Error */
+ 	SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
+-	SCMI_ERR_MAX
+ };
+ 
+ /* List of all SCMI devices active in system */
+@@ -118,8 +117,10 @@ static const int scmi_linux_errmap[] = {
+ 
+ static inline int scmi_to_linux_errno(int errno)
+ {
+-	if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
+-		return scmi_linux_errmap[-errno];
++	int err_idx = -errno;
++
++	if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
++		return scmi_linux_errmap[err_idx];
+ 	return -EIO;
+ }
+ 
+@@ -614,8 +615,9 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo,
+ 	const struct scmi_desc *desc = sinfo->desc;
+ 
+ 	/* Pre-allocated messages, no more than what hdr.seq can support */
+-	if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
+-		dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
++	if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) {
++		dev_err(dev,
++			"Invalid maximum messages %d, not in range [1 - %lu]\n",
+ 			desc->max_msg, MSG_TOKEN_MAX);
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
+index 1da0e277c5111..ce9d127edbb5d 100644
+--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
++++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
+@@ -147,6 +147,9 @@ int ttm_range_man_fini(struct ttm_bo_device *bdev,
+ 	struct drm_mm *mm = &rman->mm;
+ 	int ret;
+ 
++	if (!man)
++		return 0;
++
+ 	ttm_resource_manager_set_used(man, false);
+ 
+ 	ret = ttm_resource_manager_force_list_clean(bdev, man);
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index b0b06eb86edfb..81e0877237770 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -497,8 +497,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+ 	p = buf;
+ 	while (bytes_left >= sizeof(*p)) {
+ 		info->speed = le64_to_cpu(p->LinkSpeed);
+-		info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
+-		info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
++		info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
++		info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
+ 
+ 		cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
+ 		cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
+diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c
+index 4af318fbda774..ef9498a6e88ac 100644
+--- a/fs/hfs/bfind.c
++++ b/fs/hfs/bfind.c
+@@ -25,7 +25,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
+ 	fd->key = ptr + tree->max_key_len + 2;
+ 	hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
+ 		tree->cnid, __builtin_return_address(0));
+-	mutex_lock(&tree->tree_lock);
++	switch (tree->cnid) {
++	case HFS_CAT_CNID:
++		mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
++		break;
++	case HFS_EXT_CNID:
++		mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
++		break;
++	case HFS_ATTR_CNID:
++		mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
++		break;
++	default:
++		return -EINVAL;
++	}
+ 	return 0;
+ }
+ 
+diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
+index b63a4df7327b6..c0a73a6ffb28b 100644
+--- a/fs/hfs/bnode.c
++++ b/fs/hfs/bnode.c
+@@ -15,16 +15,31 @@
+ 
+ #include "btree.h"
+ 
+-void hfs_bnode_read(struct hfs_bnode *node, void *buf,
+-		int off, int len)
++void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
+ {
+ 	struct page *page;
++	int pagenum;
++	int bytes_read;
++	int bytes_to_read;
++	void *vaddr;
+ 
+ 	off += node->page_offset;
+-	page = node->page[0];
++	pagenum = off >> PAGE_SHIFT;
++	off &= ~PAGE_MASK; /* compute page offset for the first page */
+ 
+-	memcpy(buf, kmap(page) + off, len);
+-	kunmap(page);
++	for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) {
++		if (pagenum >= node->tree->pages_per_bnode)
++			break;
++		page = node->page[pagenum];
++		bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off);
++
++		vaddr = kmap_atomic(page);
++		memcpy(buf + bytes_read, vaddr + off, bytes_to_read);
++		kunmap_atomic(vaddr);
++
++		pagenum++;
++		off = 0; /* page offset only applies to the first page */
++	}
+ }
+ 
+ u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
+diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h
+index 4ba45caf59392..0e6baee932453 100644
+--- a/fs/hfs/btree.h
++++ b/fs/hfs/btree.h
+@@ -13,6 +13,13 @@ typedef int (*btree_keycmp)(const btree_key *, const btree_key *);
+ 
+ #define NODE_HASH_SIZE  256
+ 
++/* B-tree mutex nested subclasses */
++enum hfs_btree_mutex_classes {
++	CATALOG_BTREE_MUTEX,
++	EXTENTS_BTREE_MUTEX,
++	ATTR_BTREE_MUTEX,
++};
++
+ /* A HFS BTree held in memory */
+ struct hfs_btree {
+ 	struct super_block *sb;
+diff --git a/fs/hfs/super.c b/fs/hfs/super.c
+index 44d07c9e3a7f0..12d9bae393631 100644
+--- a/fs/hfs/super.c
++++ b/fs/hfs/super.c
+@@ -420,14 +420,12 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
+ 	if (!res) {
+ 		if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
+ 			res =  -EIO;
+-			goto bail;
++			goto bail_hfs_find;
+ 		}
+ 		hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
+ 	}
+-	if (res) {
+-		hfs_find_exit(&fd);
+-		goto bail_no_root;
+-	}
++	if (res)
++		goto bail_hfs_find;
+ 	res = -EINVAL;
+ 	root_inode = hfs_iget(sb, &fd.search_key->cat, &rec);
+ 	hfs_find_exit(&fd);
+@@ -443,6 +441,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
+ 	/* everything's okay */
+ 	return 0;
+ 
++bail_hfs_find:
++	hfs_find_exit(&fd);
+ bail_no_root:
+ 	pr_err("get root inode failed\n");
+ bail:
+diff --git a/fs/internal.h b/fs/internal.h
+index a7cd0f64faa4a..5155f6ce95c79 100644
+--- a/fs/internal.h
++++ b/fs/internal.h
+@@ -64,7 +64,6 @@ extern void __init chrdev_init(void);
+  */
+ extern const struct fs_context_operations legacy_fs_context_ops;
+ extern int parse_monolithic_mount_data(struct fs_context *, void *);
+-extern void fc_drop_locked(struct fs_context *);
+ extern void vfs_clean_context(struct fs_context *fc);
+ extern int finish_clean_context(struct fs_context *fc);
+ 
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 07f08c424d17b..525b44140d7a3 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -6266,7 +6266,6 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
+ 	if (prev) {
+ 		io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
+ 		io_put_req_deferred(prev, 1);
+-		io_put_req_deferred(req, 1);
+ 	} else {
+ 		io_cqring_add_event(req, -ETIME, 0);
+ 		io_put_req_deferred(req, 1);
+diff --git a/fs/iomap/seek.c b/fs/iomap/seek.c
+index 107ee80c35683..220c306167f79 100644
+--- a/fs/iomap/seek.c
++++ b/fs/iomap/seek.c
+@@ -140,23 +140,20 @@ loff_t
+ iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
+ {
+ 	loff_t size = i_size_read(inode);
+-	loff_t length = size - offset;
+ 	loff_t ret;
+ 
+ 	/* Nothing to be found before or beyond the end of the file. */
+ 	if (offset < 0 || offset >= size)
+ 		return -ENXIO;
+ 
+-	while (length > 0) {
+-		ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
+-				  &offset, iomap_seek_hole_actor);
++	while (offset < size) {
++		ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
++				  ops, &offset, iomap_seek_hole_actor);
+ 		if (ret < 0)
+ 			return ret;
+ 		if (ret == 0)
+ 			break;
+-
+ 		offset += ret;
+-		length -= ret;
+ 	}
+ 
+ 	return offset;
+@@ -186,27 +183,23 @@ loff_t
+ iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
+ {
+ 	loff_t size = i_size_read(inode);
+-	loff_t length = size - offset;
+ 	loff_t ret;
+ 
+ 	/* Nothing to be found before or beyond the end of the file. */
+ 	if (offset < 0 || offset >= size)
+ 		return -ENXIO;
+ 
+-	while (length > 0) {
+-		ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
+-				  &offset, iomap_seek_data_actor);
++	while (offset < size) {
++		ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
++				  ops, &offset, iomap_seek_data_actor);
+ 		if (ret < 0)
+ 			return ret;
+ 		if (ret == 0)
+-			break;
+-
++			return offset;
+ 		offset += ret;
+-		length -= ret;
+ 	}
+ 
+-	if (length <= 0)
+-		return -ENXIO;
+-	return offset;
++	/* We've reached the end of the file without finding data */
++	return -ENXIO;
+ }
+ EXPORT_SYMBOL_GPL(iomap_seek_data);
+diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
+index 37e1e8f7f08da..5b44b0195a28a 100644
+--- a/include/linux/fs_context.h
++++ b/include/linux/fs_context.h
+@@ -139,6 +139,7 @@ extern int vfs_parse_fs_string(struct fs_context *fc, const char *key,
+ extern int generic_parse_monolithic(struct fs_context *fc, void *data);
+ extern int vfs_get_tree(struct fs_context *fc);
+ extern void put_fs_context(struct fs_context *fc);
++extern void fc_drop_locked(struct fs_context *fc);
+ 
+ /*
+  * sget() wrappers to be called from the ->get_tree() op.
+diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
+index b001fa91c14ea..716b7c5f6fdd9 100644
+--- a/include/net/busy_poll.h
++++ b/include/net/busy_poll.h
+@@ -36,7 +36,7 @@ static inline bool net_busy_loop_on(void)
+ 
+ static inline bool sk_can_busy_loop(const struct sock *sk)
+ {
+-	return sk->sk_ll_usec && !signal_pending(current);
++	return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current);
+ }
+ 
+ bool sk_busy_loop_end(void *p, unsigned long start_time);
+diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
+index 122d9e2d8dfde..1ad049ac2add4 100644
+--- a/include/net/sctp/constants.h
++++ b/include/net/sctp/constants.h
+@@ -340,8 +340,7 @@ enum {
+ #define SCTP_SCOPE_POLICY_MAX	SCTP_SCOPE_POLICY_LINK
+ 
+ /* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>,
+- * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24,
+- * 192.88.99.0/24.
++ * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24.
+  * Also, RFC 8.4, non-unicast addresses are not considered valid SCTP
+  * addresses.
+  */
+@@ -349,7 +348,6 @@ enum {
+ 	((htonl(INADDR_BROADCAST) == a) ||  \
+ 	 ipv4_is_multicast(a) ||	    \
+ 	 ipv4_is_zeronet(a) ||		    \
+-	 ipv4_is_test_198(a) ||		    \
+ 	 ipv4_is_anycast_6to4(a))
+ 
+ /* Flags used for the bind address copy functions.  */
+diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
+index 04eb28f7735fb..7f71b54c06c5f 100644
+--- a/kernel/cgroup/cgroup-v1.c
++++ b/kernel/cgroup/cgroup-v1.c
+@@ -1225,9 +1225,7 @@ int cgroup1_get_tree(struct fs_context *fc)
+ 		ret = cgroup_do_get_tree(fc);
+ 
+ 	if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
+-		struct super_block *sb = fc->root->d_sb;
+-		dput(fc->root);
+-		deactivate_locked_super(sb);
++		fc_drop_locked(fc);
+ 		ret = 1;
+ 	}
+ 
+diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
+index 73bbe792fe1e8..b338f514ee5aa 100644
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -879,10 +879,9 @@ static bool trc_inspect_reader(struct task_struct *t, void *arg)
+ 		in_qs = likely(!t->trc_reader_nesting);
+ 	}
+ 
+-	// Mark as checked.  Because this is called from the grace-period
+-	// kthread, also remove the task from the holdout list.
++	// Mark as checked so that the grace-period kthread will
++	// remove it from the holdout list.
+ 	t->trc_reader_checked = true;
+-	trc_del_holdout(t);
+ 
+ 	if (in_qs)
+ 		return true;  // Already in quiescent state, done!!!
+@@ -909,7 +908,6 @@ static void trc_wait_for_one_reader(struct task_struct *t,
+ 	// The current task had better be in a quiescent state.
+ 	if (t == current) {
+ 		t->trc_reader_checked = true;
+-		trc_del_holdout(t);
+ 		WARN_ON_ONCE(t->trc_reader_nesting);
+ 		return;
+ 	}
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index b23f7d1044be7..51d19fc71e616 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3670,15 +3670,21 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
+ 						  unbound_release_work);
+ 	struct workqueue_struct *wq = pwq->wq;
+ 	struct worker_pool *pool = pwq->pool;
+-	bool is_last;
++	bool is_last = false;
+ 
+-	if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
+-		return;
++	/*
++	 * when @pwq is not linked, it doesn't hold any reference to the
++	 * @wq, and @wq is invalid to access.
++	 */
++	if (!list_empty(&pwq->pwqs_node)) {
++		if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
++			return;
+ 
+-	mutex_lock(&wq->mutex);
+-	list_del_rcu(&pwq->pwqs_node);
+-	is_last = list_empty(&wq->pwqs);
+-	mutex_unlock(&wq->mutex);
++		mutex_lock(&wq->mutex);
++		list_del_rcu(&pwq->pwqs_node);
++		is_last = list_empty(&wq->pwqs);
++		mutex_unlock(&wq->mutex);
++	}
+ 
+ 	mutex_lock(&wq_pool_mutex);
+ 	put_unbound_pool(pool);
+diff --git a/net/802/garp.c b/net/802/garp.c
+index 400bd857e5f57..f6012f8e59f00 100644
+--- a/net/802/garp.c
++++ b/net/802/garp.c
+@@ -203,6 +203,19 @@ static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr
+ 	kfree(attr);
+ }
+ 
++static void garp_attr_destroy_all(struct garp_applicant *app)
++{
++	struct rb_node *node, *next;
++	struct garp_attr *attr;
++
++	for (node = rb_first(&app->gid);
++	     next = node ? rb_next(node) : NULL, node != NULL;
++	     node = next) {
++		attr = rb_entry(node, struct garp_attr, node);
++		garp_attr_destroy(app, attr);
++	}
++}
++
+ static int garp_pdu_init(struct garp_applicant *app)
+ {
+ 	struct sk_buff *skb;
+@@ -609,6 +622,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
+ 
+ 	spin_lock_bh(&app->lock);
+ 	garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
++	garp_attr_destroy_all(app);
+ 	garp_pdu_queue(app);
+ 	spin_unlock_bh(&app->lock);
+ 
+diff --git a/net/802/mrp.c b/net/802/mrp.c
+index bea6e43d45a0d..35e04cc5390c4 100644
+--- a/net/802/mrp.c
++++ b/net/802/mrp.c
+@@ -292,6 +292,19 @@ static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
+ 	kfree(attr);
+ }
+ 
++static void mrp_attr_destroy_all(struct mrp_applicant *app)
++{
++	struct rb_node *node, *next;
++	struct mrp_attr *attr;
++
++	for (node = rb_first(&app->mad);
++	     next = node ? rb_next(node) : NULL, node != NULL;
++	     node = next) {
++		attr = rb_entry(node, struct mrp_attr, node);
++		mrp_attr_destroy(app, attr);
++	}
++}
++
+ static int mrp_pdu_init(struct mrp_applicant *app)
+ {
+ 	struct sk_buff *skb;
+@@ -895,6 +908,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
+ 
+ 	spin_lock_bh(&app->lock);
+ 	mrp_mad_event(app, MRP_EVENT_TX);
++	mrp_attr_destroy_all(app);
+ 	mrp_pdu_queue(app);
+ 	spin_unlock_bh(&app->lock);
+ 
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 7de51ea15cdfc..d638c5361ed29 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1164,7 +1164,7 @@ set_sndbuf:
+ 			if (val < 0)
+ 				ret = -EINVAL;
+ 			else
+-				sk->sk_ll_usec = val;
++				WRITE_ONCE(sk->sk_ll_usec, val);
+ 		}
+ 		break;
+ #endif
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 341d0c7acc8bf..72a673a43a754 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -60,10 +60,38 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
+ {
+ 	struct dst_entry *dst = skb_dst(skb);
+ 	struct net_device *dev = dst->dev;
++	unsigned int hh_len = LL_RESERVED_SPACE(dev);
++	int delta = hh_len - skb_headroom(skb);
+ 	const struct in6_addr *nexthop;
+ 	struct neighbour *neigh;
+ 	int ret;
+ 
++	/* Be paranoid, rather than too clever. */
++	if (unlikely(delta > 0) && dev->header_ops) {
++		/* pskb_expand_head() might crash, if skb is shared */
++		if (skb_shared(skb)) {
++			struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
++
++			if (likely(nskb)) {
++				if (skb->sk)
++					skb_set_owner_w(nskb, skb->sk);
++				consume_skb(skb);
++			} else {
++				kfree_skb(skb);
++			}
++			skb = nskb;
++		}
++		if (skb &&
++		    pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
++			kfree_skb(skb);
++			skb = NULL;
++		}
++		if (!skb) {
++			IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
++			return -ENOMEM;
++		}
++	}
++
+ 	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
+ 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
+ 
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 47fb87ce489fc..940f1e257a90a 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -397,7 +397,8 @@ static enum sctp_scope sctp_v4_scope(union sctp_addr *addr)
+ 		retval = SCTP_SCOPE_LINK;
+ 	} else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) ||
+ 		   ipv4_is_private_172(addr->v4.sin_addr.s_addr) ||
+-		   ipv4_is_private_192(addr->v4.sin_addr.s_addr)) {
++		   ipv4_is_private_192(addr->v4.sin_addr.s_addr) ||
++		   ipv4_is_test_198(addr->v4.sin_addr.s_addr)) {
+ 		retval = SCTP_SCOPE_PRIVATE;
+ 	} else {
+ 		retval = SCTP_SCOPE_GLOBAL;
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 39be4b52329b5..37ffa7725cee2 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1521,6 +1521,53 @@ out:
+ 	return err;
+ }
+ 
++static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
++{
++	scm->fp = scm_fp_dup(UNIXCB(skb).fp);
++
++	/*
++	 * Garbage collection of unix sockets starts by selecting a set of
++	 * candidate sockets which have reference only from being in flight
++	 * (total_refs == inflight_refs).  This condition is checked once during
++	 * the candidate collection phase, and candidates are marked as such, so
++	 * that non-candidates can later be ignored.  While inflight_refs is
++	 * protected by unix_gc_lock, total_refs (file count) is not, hence this
++	 * is an instantaneous decision.
++	 *
++	 * Once a candidate, however, the socket must not be reinstalled into a
++	 * file descriptor while the garbage collection is in progress.
++	 *
++	 * If the above conditions are met, then the directed graph of
++	 * candidates (*) does not change while unix_gc_lock is held.
++	 *
++	 * Any operations that changes the file count through file descriptors
++	 * (dup, close, sendmsg) does not change the graph since candidates are
++	 * not installed in fds.
++	 *
++	 * Dequeing a candidate via recvmsg would install it into an fd, but
++	 * that takes unix_gc_lock to decrement the inflight count, so it's
++	 * serialized with garbage collection.
++	 *
++	 * MSG_PEEK is special in that it does not change the inflight count,
++	 * yet does install the socket into an fd.  The following lock/unlock
++	 * pair is to ensure serialization with garbage collection.  It must be
++	 * done between incrementing the file count and installing the file into
++	 * an fd.
++	 *
++	 * If garbage collection starts after the barrier provided by the
++	 * lock/unlock, then it will see the elevated refcount and not mark this
++	 * as a candidate.  If a garbage collection is already in progress
++	 * before the file count was incremented, then the lock/unlock pair will
++	 * ensure that garbage collection is finished before progressing to
++	 * installing the fd.
++	 *
++	 * (*) A -> B where B is on the queue of A or B is on the queue of C
++	 * which is on the queue of listening socket A.
++	 */
++	spin_lock(&unix_gc_lock);
++	spin_unlock(&unix_gc_lock);
++}
++
+ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
+ {
+ 	int err = 0;
+@@ -2170,7 +2217,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
+ 		sk_peek_offset_fwd(sk, size);
+ 
+ 		if (UNIXCB(skb).fp)
+-			scm.fp = scm_fp_dup(UNIXCB(skb).fp);
++			unix_peek_fds(&scm, skb);
+ 	}
+ 	err = (flags & MSG_TRUNC) ? skb->len - skip : size;
+ 
+@@ -2413,7 +2460,7 @@ unlock:
+ 			/* It is questionable, see note in unix_dgram_recvmsg.
+ 			 */
+ 			if (UNIXCB(skb).fp)
+-				scm.fp = scm_fp_dup(UNIXCB(skb).fp);
++				unix_peek_fds(&scm, skb);
+ 
+ 			sk_peek_offset_fwd(sk, chunk);
+ 
+diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
+index 1358e89cdf7d6..a3f99ef6b11ba 100644
+--- a/tools/scripts/Makefile.include
++++ b/tools/scripts/Makefile.include
+@@ -39,8 +39,6 @@ EXTRA_WARNINGS += -Wundef
+ EXTRA_WARNINGS += -Wwrite-strings
+ EXTRA_WARNINGS += -Wformat
+ 
+-CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?)
+-
+ # Makefiles suck: This macro sets a default value of $(2) for the
+ # variable named by $(1), unless the variable has been set by
+ # environment or command line. This is necessary for CC and AR
+@@ -52,12 +50,22 @@ define allow-override
+     $(eval $(1) = $(2)))
+ endef
+ 
++ifneq ($(LLVM),)
++$(call allow-override,CC,clang)
++$(call allow-override,AR,llvm-ar)
++$(call allow-override,LD,ld.lld)
++$(call allow-override,CXX,clang++)
++$(call allow-override,STRIP,llvm-strip)
++else
+ # Allow setting various cross-compile vars or setting CROSS_COMPILE as a prefix.
+ $(call allow-override,CC,$(CROSS_COMPILE)gcc)
+ $(call allow-override,AR,$(CROSS_COMPILE)ar)
+ $(call allow-override,LD,$(CROSS_COMPILE)ld)
+ $(call allow-override,CXX,$(CROSS_COMPILE)g++)
+ $(call allow-override,STRIP,$(CROSS_COMPILE)strip)
++endif
++
++CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?)
+ 
+ ifneq ($(LLVM),)
+ HOSTAR  ?= llvm-ar


             reply	other threads:[~2021-07-31 10:30 UTC|newest]

Thread overview: 312+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-31 10:30 Alice Ferrazzi [this message]
  -- strict thread matches above, loose matches on Subject: below --
2025-10-02 13:27 [gentoo-commits] proj/linux-patches:5.10 commit in: / Arisu Tachibana
2025-09-12  3:58 Arisu Tachibana
2025-09-10  5:33 Arisu Tachibana
2025-09-04 15:19 Arisu Tachibana
2025-09-04 14:32 Arisu Tachibana
2025-08-29  9:13 Arisu Tachibana
2025-08-28 16:55 Arisu Tachibana
2025-07-18 12:07 Arisu Tachibana
2025-06-27 11:21 Mike Pagano
2025-06-04 18:15 Mike Pagano
2025-05-02 10:58 Mike Pagano
2025-04-10 13:16 Mike Pagano
2025-03-13 12:58 Mike Pagano
2025-02-01 23:10 Mike Pagano
2025-01-09 13:58 Mike Pagano
2024-12-19 18:10 Mike Pagano
2024-12-14 23:50 Mike Pagano
2024-11-30 17:35 Mike Pagano
2024-11-17 18:19 Mike Pagano
2024-11-08 17:46 Mike Pagano
2024-10-22 17:00 Mike Pagano
2024-10-17 14:11 Mike Pagano
2024-10-17 14:08 Mike Pagano
2024-09-12 12:42 Mike Pagano
2024-09-04 13:53 Mike Pagano
2024-08-19 10:44 Mike Pagano
2024-07-27  9:20 Mike Pagano
2024-07-27  9:17 Mike Pagano
2024-07-18 12:17 Mike Pagano
2024-07-05 10:53 Mike Pagano
2024-07-05 10:51 Mike Pagano
2024-06-21 14:08 Mike Pagano
2024-06-16 14:35 Mike Pagano
2024-05-25 15:14 Mike Pagano
2024-05-17 11:38 Mike Pagano
2024-05-05 18:14 Mike Pagano
2024-05-02 15:03 Mike Pagano
2024-04-27 22:57 Mike Pagano
2024-04-13 13:09 Mike Pagano
2024-03-27 11:26 Mike Pagano
2024-03-15 22:02 Mike Pagano
2024-03-06 18:09 Mike Pagano
2024-03-01 13:09 Mike Pagano
2024-02-23 12:45 Mike Pagano
2024-02-23 12:39 Mike Pagano
2024-01-25 23:34 Mike Pagano
2024-01-15 18:49 Mike Pagano
2024-01-12 20:35 Mike Pagano
2024-01-05 14:29 Mike Pagano
2023-12-20 15:21 Mike Pagano
2023-12-13 18:29 Mike Pagano
2023-12-08 11:16 Mike Pagano
2023-12-01 17:47 Mike Pagano
2023-11-28 17:52 Mike Pagano
2023-11-20 11:25 Mike Pagano
2023-11-08 17:28 Mike Pagano
2023-10-25 11:38 Mike Pagano
2023-10-18 20:16 Mike Pagano
2023-10-10 20:34 Mike Pagano
2023-10-05 14:24 Mike Pagano
2023-09-23 10:19 Mike Pagano
2023-09-21 11:29 Mike Pagano
2023-09-19 13:22 Mike Pagano
2023-09-02  9:59 Mike Pagano
2023-08-30 14:45 Mike Pagano
2023-08-26 15:21 Mike Pagano
2023-08-16 17:01 Mike Pagano
2023-08-11 11:56 Mike Pagano
2023-08-08 18:42 Mike Pagano
2023-07-27 11:50 Mike Pagano
2023-07-24 20:28 Mike Pagano
2023-06-28 10:27 Mike Pagano
2023-06-21 14:55 Alice Ferrazzi
2023-06-14 10:34 Mike Pagano
2023-06-14 10:20 Mike Pagano
2023-06-09 11:31 Mike Pagano
2023-06-05 11:50 Mike Pagano
2023-05-30 12:56 Mike Pagano
2023-05-17 11:25 Mike Pagano
2023-05-17 10:59 Mike Pagano
2023-05-10 17:56 Mike Pagano
2023-04-27 14:11 Mike Pagano
2023-04-26  9:50 Alice Ferrazzi
2023-04-20 11:17 Alice Ferrazzi
2023-04-05 10:01 Alice Ferrazzi
2023-03-22 14:15 Alice Ferrazzi
2023-03-17 10:45 Mike Pagano
2023-03-13 11:32 Alice Ferrazzi
2023-03-11 16:05 Mike Pagano
2023-03-03 15:01 Mike Pagano
2023-03-03 12:30 Mike Pagano
2023-02-25 11:44 Mike Pagano
2023-02-24  3:06 Alice Ferrazzi
2023-02-22 14:04 Alice Ferrazzi
2023-02-15 16:40 Mike Pagano
2023-02-06 12:47 Mike Pagano
2023-02-02 19:11 Mike Pagano
2023-02-01  8:09 Alice Ferrazzi
2023-01-24  7:13 Alice Ferrazzi
2023-01-18 11:09 Mike Pagano
2023-01-14 13:52 Mike Pagano
2023-01-04 11:39 Mike Pagano
2022-12-21 19:00 Alice Ferrazzi
2022-12-19 12:33 Alice Ferrazzi
2022-12-14 12:14 Mike Pagano
2022-12-08 11:51 Alice Ferrazzi
2022-12-02 17:26 Mike Pagano
2022-11-25 17:06 Mike Pagano
2022-11-16 12:08 Alice Ferrazzi
2022-11-10 18:05 Mike Pagano
2022-11-03 15:17 Mike Pagano
2022-10-30  9:33 Mike Pagano
2022-10-28 13:38 Mike Pagano
2022-10-26 11:46 Mike Pagano
2022-10-17 16:46 Mike Pagano
2022-10-15 10:05 Mike Pagano
2022-10-05 11:58 Mike Pagano
2022-09-28  9:30 Mike Pagano
2022-09-23 12:40 Mike Pagano
2022-09-20 12:01 Mike Pagano
2022-09-15 10:31 Mike Pagano
2022-09-08 10:46 Mike Pagano
2022-09-05 12:04 Mike Pagano
2022-08-31 15:39 Mike Pagano
2022-08-29 10:46 Mike Pagano
2022-08-25 10:33 Mike Pagano
2022-08-21 16:52 Mike Pagano
2022-08-11 12:34 Mike Pagano
2022-08-03 14:24 Alice Ferrazzi
2022-07-29 16:37 Mike Pagano
2022-07-25 10:19 Alice Ferrazzi
2022-07-21 20:08 Mike Pagano
2022-07-15 10:03 Mike Pagano
2022-07-12 15:59 Mike Pagano
2022-07-07 16:17 Mike Pagano
2022-07-02 16:10 Mike Pagano
2022-06-29 11:08 Mike Pagano
2022-06-27 11:12 Mike Pagano
2022-06-25 19:45 Mike Pagano
2022-06-22 12:45 Mike Pagano
2022-06-16 11:44 Mike Pagano
2022-06-14 17:12 Mike Pagano
2022-06-09 11:27 Mike Pagano
2022-06-06 11:03 Mike Pagano
2022-05-30 13:59 Mike Pagano
2022-05-25 11:54 Mike Pagano
2022-05-18  9:48 Mike Pagano
2022-05-15 22:10 Mike Pagano
2022-05-12 11:29 Mike Pagano
2022-05-09 10:56 Mike Pagano
2022-04-27 12:24 Mike Pagano
2022-04-27 12:20 Mike Pagano
2022-04-26 12:17 Mike Pagano
2022-04-20 12:07 Mike Pagano
2022-04-13 20:20 Mike Pagano
2022-04-13 19:48 Mike Pagano
2022-04-12 19:08 Mike Pagano
2022-04-08 13:16 Mike Pagano
2022-03-28 10:58 Mike Pagano
2022-03-23 11:55 Mike Pagano
2022-03-19 13:20 Mike Pagano
2022-03-16 13:33 Mike Pagano
2022-03-11 11:31 Mike Pagano
2022-03-08 18:32 Mike Pagano
2022-03-02 13:06 Mike Pagano
2022-02-26 20:27 Mike Pagano
2022-02-23 12:37 Mike Pagano
2022-02-16 12:46 Mike Pagano
2022-02-11 12:35 Mike Pagano
2022-02-08 17:54 Mike Pagano
2022-02-05 19:04 Mike Pagano
2022-02-05 12:13 Mike Pagano
2022-02-01 17:23 Mike Pagano
2022-01-31 12:25 Mike Pagano
2022-01-29 17:43 Mike Pagano
2022-01-27 11:37 Mike Pagano
2022-01-20 10:00 Mike Pagano
2022-01-16 10:21 Mike Pagano
2022-01-11 14:50 Mike Pagano
2022-01-05 12:53 Mike Pagano
2021-12-29 13:06 Mike Pagano
2021-12-22 14:05 Mike Pagano
2021-12-21 19:37 Mike Pagano
2021-12-17 11:55 Mike Pagano
2021-12-16 16:04 Mike Pagano
2021-12-14 12:51 Mike Pagano
2021-12-14 12:12 Mike Pagano
2021-12-08 12:53 Mike Pagano
2021-12-01 12:49 Mike Pagano
2021-11-26 11:57 Mike Pagano
2021-11-21 20:42 Mike Pagano
2021-11-18 15:33 Mike Pagano
2021-11-12 14:18 Mike Pagano
2021-11-06 13:36 Mike Pagano
2021-11-02 19:30 Mike Pagano
2021-10-27 14:55 Mike Pagano
2021-10-27 11:57 Mike Pagano
2021-10-20 13:23 Mike Pagano
2021-10-18 21:17 Mike Pagano
2021-10-17 13:11 Mike Pagano
2021-10-13  9:35 Alice Ferrazzi
2021-10-09 21:31 Mike Pagano
2021-10-06 14:18 Mike Pagano
2021-09-30 10:48 Mike Pagano
2021-09-26 14:12 Mike Pagano
2021-09-22 11:38 Mike Pagano
2021-09-20 22:02 Mike Pagano
2021-09-18 16:07 Mike Pagano
2021-09-17 12:50 Mike Pagano
2021-09-17 12:46 Mike Pagano
2021-09-16 11:20 Mike Pagano
2021-09-15 12:00 Mike Pagano
2021-09-12 14:38 Mike Pagano
2021-09-08 13:00 Alice Ferrazzi
2021-09-03 11:47 Mike Pagano
2021-09-03 11:20 Mike Pagano
2021-08-26 14:34 Mike Pagano
2021-08-25 16:23 Mike Pagano
2021-08-24 21:33 Mike Pagano
2021-08-24 21:32 Mike Pagano
2021-08-21 14:17 Mike Pagano
2021-08-19 11:56 Mike Pagano
2021-08-18 12:46 Mike Pagano
2021-08-15 20:05 Mike Pagano
2021-08-12 11:53 Mike Pagano
2021-08-10 11:49 Mike Pagano
2021-08-10 11:49 Mike Pagano
2021-08-08 13:36 Mike Pagano
2021-08-04 11:52 Mike Pagano
2021-08-03 11:03 Mike Pagano
2021-08-02 22:35 Mike Pagano
2021-07-28 13:22 Mike Pagano
2021-07-25 17:28 Mike Pagano
2021-07-25 17:26 Mike Pagano
2021-07-20 15:44 Alice Ferrazzi
2021-07-19 11:17 Mike Pagano
2021-07-14 16:31 Mike Pagano
2021-07-14 16:21 Mike Pagano
2021-07-13 12:37 Mike Pagano
2021-07-12 17:25 Mike Pagano
2021-07-11 15:11 Mike Pagano
2021-07-11 14:43 Mike Pagano
2021-07-08 12:27 Mike Pagano
2021-07-08  3:27 Alice Ferrazzi
2021-07-07 13:13 Mike Pagano
2021-07-02 19:38 Mike Pagano
2021-07-01 14:32 Mike Pagano
2021-06-30 14:23 Mike Pagano
2021-06-23 15:12 Mike Pagano
2021-06-18 11:37 Mike Pagano
2021-06-16 12:24 Mike Pagano
2021-06-11 17:34 Mike Pagano
2021-06-10 13:14 Mike Pagano
2021-06-10 12:09 Mike Pagano
2021-06-08 22:42 Mike Pagano
2021-06-03 10:26 Alice Ferrazzi
2021-05-28 12:15 Alice Ferrazzi
2021-05-26 12:07 Mike Pagano
2021-05-22 16:59 Mike Pagano
2021-05-19 12:24 Mike Pagano
2021-05-14 14:07 Alice Ferrazzi
2021-05-11 14:20 Mike Pagano
2021-05-07 11:27 Alice Ferrazzi
2021-05-02 16:03 Mike Pagano
2021-04-30 18:58 Mike Pagano
2021-04-28 12:03 Alice Ferrazzi
2021-04-21 11:42 Mike Pagano
2021-04-16 11:02 Alice Ferrazzi
2021-04-14 11:07 Alice Ferrazzi
2021-04-10 13:26 Mike Pagano
2021-04-07 13:27 Mike Pagano
2021-03-30 12:57 Alice Ferrazzi
2021-03-25  9:04 Alice Ferrazzi
2021-03-22 15:57 Mike Pagano
2021-03-20 14:35 Mike Pagano
2021-03-17 17:00 Mike Pagano
2021-03-11 15:08 Mike Pagano
2021-03-09 12:18 Mike Pagano
2021-03-07 15:17 Mike Pagano
2021-03-04 12:04 Alice Ferrazzi
2021-02-26 13:22 Mike Pagano
2021-02-26 10:42 Alice Ferrazzi
2021-02-23 15:16 Alice Ferrazzi
2021-02-18 20:45 Mike Pagano
2021-02-18 14:48 Mike Pagano
2021-02-17 11:14 Alice Ferrazzi
2021-02-13 15:51 Mike Pagano
2021-02-13 15:48 Mike Pagano
2021-02-13 14:42 Alice Ferrazzi
2021-02-10 10:23 Alice Ferrazzi
2021-02-10  9:51 Alice Ferrazzi
2021-02-09 19:10 Mike Pagano
2021-02-07 15:20 Alice Ferrazzi
2021-02-03 23:43 Alice Ferrazzi
2021-01-30 13:27 Alice Ferrazzi
2021-01-27 11:29 Mike Pagano
2021-01-23 16:38 Mike Pagano
2021-01-19 20:31 Mike Pagano
2021-01-17 16:18 Mike Pagano
2021-01-12 20:03 Mike Pagano
2021-01-09 17:58 Mike Pagano
2021-01-09  0:14 Mike Pagano
2021-01-06 14:54 Mike Pagano
2020-12-30 12:54 Mike Pagano
2020-12-26 15:32 Mike Pagano
2020-12-26 15:29 Mike Pagano
2020-12-21 13:26 Mike Pagano
2020-12-18 16:08 Mike Pagano
2020-12-14 20:45 Mike Pagano
2020-12-13 16:09 Mike Pagano
2020-11-19 13:03 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1627727415.fffb3b021d5afffa2a7eba74d2e5fc5d114650ac.alicef@gentoo \
    --to=alicef@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox