From: "Arisu Tachibana" <alicef@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:6.17 commit in: /
Date: Wed, 15 Oct 2025 18:18:25 +0000 (UTC)	[thread overview]
Message-ID: <1760552259.fa1e33670f90a800dfcb535d62e93e898178f3c2.alicef@gentoo> (raw)
commit:     fa1e33670f90a800dfcb535d62e93e898178f3c2
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Wed Oct 15 17:49:52 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Wed Oct 15 18:17:39 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=fa1e3367
Update BMQ and PDS io scheduler patch to v6.17-r1
Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>
 0000_README                                        |   2 +-
 ...=> 5020_BMQ-and-PDS-io-scheduler-v6.17-r1.patch | 284 ++++++++++++---------
 2 files changed, 163 insertions(+), 123 deletions(-)
diff --git a/0000_README b/0000_README
index 0aa228a9..7857b783 100644
--- a/0000_README
+++ b/0000_README
@@ -103,7 +103,7 @@ Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   More ISA levels and uarches for kernel 6.16+
 
-Patch:  5020_BMQ-and-PDS-io-scheduler-v6.17-r0.patch
+Patch:  5020_BMQ-and-PDS-io-scheduler-v6.17-r1.patch
 From:   https://gitlab.com/alfredchen/projectc
 Desc:   BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incld). Inspired by the scheduler in zircon.
 
diff --git a/5020_BMQ-and-PDS-io-scheduler-v6.17-r0.patch b/5020_BMQ-and-PDS-io-scheduler-v6.17-r1.patch
similarity index 98%
rename from 5020_BMQ-and-PDS-io-scheduler-v6.17-r0.patch
rename to 5020_BMQ-and-PDS-io-scheduler-v6.17-r1.patch
index 6b5e3269..7ce5d221 100644
--- a/5020_BMQ-and-PDS-io-scheduler-v6.17-r0.patch
+++ b/5020_BMQ-and-PDS-io-scheduler-v6.17-r1.patch
@@ -1,3 +1,7 @@
+
+r2 for:
+https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=661f951e371cc134ea31c84238dbdc9a898b8403
+
 diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
 index 8b49eab937d0..c5d4901a9608 100644
 --- a/Documentation/admin-guide/sysctl/kernel.rst
@@ -723,10 +727,10 @@ index 8ae86371ddcd..a972ef1e31a7 100644
  obj-y += build_utility.o
 diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
 new file mode 100644
-index 000000000000..8f03f5312e4d
+index 000000000000..db9a57681f70
 --- /dev/null
 +++ b/kernel/sched/alt_core.c
-@@ -0,0 +1,7648 @@
+@@ -0,0 +1,7645 @@
 +/*
 + *  kernel/sched/alt_core.c
 + *
@@ -801,7 +805,7 @@ index 000000000000..8f03f5312e4d
 +__read_mostly int sysctl_resched_latency_warn_ms = 100;
 +__read_mostly int sysctl_resched_latency_warn_once = 1;
 +
-+#define ALT_SCHED_VERSION "v6.17-r0"
++#define ALT_SCHED_VERSION "v6.17-r1"
 +
 +#define STOP_PRIO		(MAX_RT_PRIO - 1)
 +
@@ -842,7 +846,7 @@ index 000000000000..8f03f5312e4d
 + * the domain), this allows us to quickly tell if two cpus are in the same cache
 + * domain, see cpus_share_cache().
 + */
-+DEFINE_PER_CPU(int, sd_llc_id);
++static DEFINE_PER_CPU_READ_MOSTLY(int, sd_llc_id);
 +
 +DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 +
@@ -919,7 +923,7 @@ index 000000000000..8f03f5312e4d
 +
 +	if (prio < last_prio) {
 +		if (IDLE_TASK_SCHED_PRIO == last_prio) {
-+			rq->clear_idle_mask_func(cpu, sched_idle_mask);
++			sched_clear_idle_mask(cpu);
 +			last_prio -= 2;
 +		}
 +		CLEAR_CACHED_PREEMPT_MASK(pr, prio, last_prio, cpu);
@@ -928,7 +932,7 @@ index 000000000000..8f03f5312e4d
 +	}
 +	/* last_prio < prio */
 +	if (IDLE_TASK_SCHED_PRIO == prio) {
-+		rq->set_idle_mask_func(cpu, sched_idle_mask);
++		sched_set_idle_mask(cpu);
 +		prio -= 2;
 +	}
 +	SET_CACHED_PREEMPT_MASK(pr, last_prio, prio, cpu);
@@ -2741,7 +2745,7 @@ index 000000000000..8f03f5312e4d
 +	return cpumask_and(preempt_mask, allow_mask, mask);
 +}
 +
-+__read_mostly idle_select_func_t idle_select_func ____cacheline_aligned_in_smp = cpumask_and;
++DEFINE_STATIC_CALL(sched_idle_select_func, cpumask_and);
 +
 +static inline int select_task_rq(struct task_struct *p)
 +{
@@ -2750,7 +2754,7 @@ index 000000000000..8f03f5312e4d
 +	if (unlikely(!cpumask_and(&allow_mask, p->cpus_ptr, cpu_active_mask)))
 +		return select_fallback_rq(task_cpu(p), p);
 +
-+	if (idle_select_func(&mask, &allow_mask, sched_idle_mask)	||
++	if (static_call(sched_idle_select_func)(&mask, &allow_mask, sched_idle_mask)	||
 +	    preempt_mask_check(&mask, &allow_mask, task_sched_prio(p)))
 +		return best_mask_cpu(task_cpu(p), &mask);
 +
@@ -5281,8 +5285,7 @@ index 000000000000..8f03f5312e4d
 +
 +	if (next == rq->idle) {
 +		if (!take_other_rq_tasks(rq, cpu)) {
-+			if (likely(rq->balance_func && rq->online))
-+				rq->balance_func(rq, cpu);
++			sched_cpu_topology_balance(cpu, rq);
 +
 +			schedstat_inc(rq->sched_goidle);
 +			/*printk(KERN_INFO "sched: choose_next_task(%d) idle %px\n", cpu, next);*/
@@ -7145,8 +7148,6 @@ index 000000000000..8f03f5312e4d
 +		rq->online = false;
 +		rq->cpu = i;
 +
-+		rq->clear_idle_mask_func = cpumask_clear_cpu;
-+		rq->set_idle_mask_func = cpumask_set_cpu;
 +		rq->balance_func = NULL;
 +		rq->active_balance_arg.active = 0;
 +
@@ -8377,10 +8378,10 @@ index 000000000000..8f03f5312e4d
 +#endif /* CONFIG_SCHED_MM_CID */
 diff --git a/kernel/sched/alt_core.h b/kernel/sched/alt_core.h
 new file mode 100644
-index 000000000000..bb9512c76566
+index 000000000000..55497941a22b
 --- /dev/null
 +++ b/kernel/sched/alt_core.h
-@@ -0,0 +1,177 @@
+@@ -0,0 +1,174 @@
 +#ifndef _KERNEL_SCHED_ALT_CORE_H
 +#define _KERNEL_SCHED_ALT_CORE_H
 +
@@ -8548,10 +8549,7 @@ index 000000000000..bb9512c76566
 +
 +extern struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu);
 +
-+typedef bool (*idle_select_func_t)(struct cpumask *dstp, const struct cpumask *src1p,
-+				   const struct cpumask *src2p);
-+
-+extern idle_select_func_t idle_select_func;
++DECLARE_STATIC_CALL(sched_idle_select_func, cpumask_and);
 +
 +/* balance callback */
 +extern struct balance_callback *splice_balance_callbacks(struct rq *rq);
@@ -8598,10 +8596,10 @@ index 000000000000..1dbd7eb6a434
 +{}
 diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
 new file mode 100644
-index 000000000000..5b9a53c669f5
+index 000000000000..6cd5cfe3a332
 --- /dev/null
 +++ b/kernel/sched/alt_sched.h
-@@ -0,0 +1,1018 @@
+@@ -0,0 +1,1013 @@
 +#ifndef _KERNEL_SCHED_ALT_SCHED_H
 +#define _KERNEL_SCHED_ALT_SCHED_H
 +
@@ -8724,8 +8722,6 @@ index 000000000000..5b9a53c669f5
 +};
 +
 +typedef void (*balance_func_t)(struct rq *rq, int cpu);
-+typedef void (*set_idle_mask_func_t)(unsigned int cpu, struct cpumask *dstp);
-+typedef void (*clear_idle_mask_func_t)(int cpu, struct cpumask *dstp);
 +
 +struct balance_arg {
 +	struct task_struct	*task;
@@ -8766,9 +8762,6 @@ index 000000000000..5b9a53c669f5
 +	int membarrier_state;
 +#endif
 +
-+	set_idle_mask_func_t	set_idle_mask_func;
-+	clear_idle_mask_func_t	clear_idle_mask_func;
-+
 +	int cpu;		/* cpu of this runqueue */
 +	bool online;
 +
@@ -9622,10 +9615,10 @@ index 000000000000..5b9a53c669f5
 +#endif /* _KERNEL_SCHED_ALT_SCHED_H */
 diff --git a/kernel/sched/alt_topology.c b/kernel/sched/alt_topology.c
 new file mode 100644
-index 000000000000..376a08a5afda
+index 000000000000..590ee3cb1b49
 --- /dev/null
 +++ b/kernel/sched/alt_topology.c
-@@ -0,0 +1,347 @@
+@@ -0,0 +1,287 @@
 +#include "alt_core.h"
 +#include "alt_topology.h"
 +
@@ -9640,47 +9633,9 @@ index 000000000000..376a08a5afda
 +}
 +__setup("pcore_cpus=", sched_pcore_mask_setup);
 +
-+/*
-+ * set/clear idle mask functions
-+ */
-+#ifdef CONFIG_SCHED_SMT
-+static void set_idle_mask_smt(unsigned int cpu, struct cpumask *dstp)
-+{
-+	cpumask_set_cpu(cpu, dstp);
-+	if (cpumask_subset(cpu_smt_mask(cpu), sched_idle_mask))
-+		cpumask_or(sched_sg_idle_mask, sched_sg_idle_mask, cpu_smt_mask(cpu));
-+}
-+
-+static void clear_idle_mask_smt(int cpu, struct cpumask *dstp)
-+{
-+	cpumask_clear_cpu(cpu, dstp);
-+	cpumask_andnot(sched_sg_idle_mask, sched_sg_idle_mask, cpu_smt_mask(cpu));
-+}
-+#endif
-+
-+static void set_idle_mask_pcore(unsigned int cpu, struct cpumask *dstp)
-+{
-+	cpumask_set_cpu(cpu, dstp);
-+	cpumask_set_cpu(cpu, sched_pcore_idle_mask);
-+}
-+
-+static void clear_idle_mask_pcore(int cpu, struct cpumask *dstp)
-+{
-+	cpumask_clear_cpu(cpu, dstp);
-+	cpumask_clear_cpu(cpu, sched_pcore_idle_mask);
-+}
-+
-+static void set_idle_mask_ecore(unsigned int cpu, struct cpumask *dstp)
-+{
-+	cpumask_set_cpu(cpu, dstp);
-+	cpumask_set_cpu(cpu, sched_ecore_idle_mask);
-+}
-+
-+static void clear_idle_mask_ecore(int cpu, struct cpumask *dstp)
-+{
-+	cpumask_clear_cpu(cpu, dstp);
-+	cpumask_clear_cpu(cpu, sched_ecore_idle_mask);
-+}
++DEFINE_PER_CPU_READ_MOSTLY(enum cpu_topo_type, sched_cpu_topo);
++DEFINE_PER_CPU_READ_MOSTLY(enum cpu_topo_balance_type, sched_cpu_topo_balance);
++DEFINE_PER_CPU(struct balance_callback, active_balance_head);
 +
 +/*
 + * Idle cpu/rq selection functions
@@ -9785,8 +9740,6 @@ index 000000000000..376a08a5afda
 +	return 0;
 +}
 +
-+static DEFINE_PER_CPU(struct balance_callback, active_balance_head);
-+
 +#ifdef CONFIG_SCHED_SMT
 +static inline int
 +smt_pcore_source_balance(struct rq *rq, cpumask_t *single_task_mask, cpumask_t *target_mask)
@@ -9807,7 +9760,7 @@ index 000000000000..376a08a5afda
 +}
 +
 +/* smt p core balance functions */
-+static inline void smt_pcore_balance(struct rq *rq)
++void smt_pcore_balance(struct rq *rq)
 +{
 +	cpumask_t single_task_mask;
 +
@@ -9822,14 +9775,8 @@ index 000000000000..376a08a5afda
 +		return;
 +}
 +
-+static void smt_pcore_balance_func(struct rq *rq, const int cpu)
-+{
-+	if (cpumask_test_cpu(cpu, sched_sg_idle_mask))
-+		queue_balance_callback(rq, &per_cpu(active_balance_head, cpu), smt_pcore_balance);
-+}
-+
 +/* smt balance functions */
-+static inline void smt_balance(struct rq *rq)
++void smt_balance(struct rq *rq)
 +{
 +	cpumask_t single_task_mask;
 +
@@ -9840,32 +9787,22 @@ index 000000000000..376a08a5afda
 +		return;
 +}
 +
-+static void smt_balance_func(struct rq *rq, const int cpu)
-+{
-+	if (cpumask_test_cpu(cpu, sched_sg_idle_mask))
-+		queue_balance_callback(rq, &per_cpu(active_balance_head, cpu), smt_balance);
-+}
-+
 +/* e core balance functions */
-+static inline void ecore_balance(struct rq *rq)
++void ecore_balance(struct rq *rq)
 +{
 +	cpumask_t single_task_mask;
 +
 +	if (cpumask_andnot(&single_task_mask, cpu_active_mask, sched_idle_mask) &&
 +	    cpumask_andnot(&single_task_mask, &single_task_mask, &sched_rq_pending_mask) &&
++	    cpumask_empty(sched_pcore_idle_mask) &&
 +	    /* smt occupied p core to idle e core balance */
 +	    smt_pcore_source_balance(rq, &single_task_mask, sched_ecore_idle_mask))
 +		return;
 +}
-+
-+static void ecore_balance_func(struct rq *rq, const int cpu)
-+{
-+	queue_balance_callback(rq, &per_cpu(active_balance_head, cpu), ecore_balance);
-+}
 +#endif /* CONFIG_SCHED_SMT */
 +
 +/* p core balance functions */
-+static inline void pcore_balance(struct rq *rq)
++void pcore_balance(struct rq *rq)
 +{
 +	cpumask_t single_task_mask;
 +
@@ -9876,34 +9813,28 @@ index 000000000000..376a08a5afda
 +		return;
 +}
 +
-+static void pcore_balance_func(struct rq *rq, const int cpu)
-+{
-+	queue_balance_callback(rq, &per_cpu(active_balance_head, cpu), pcore_balance);
-+}
-+
 +#ifdef ALT_SCHED_DEBUG
 +#define SCHED_DEBUG_INFO(...)	printk(KERN_INFO __VA_ARGS__)
 +#else
 +#define SCHED_DEBUG_INFO(...)	do { } while(0)
 +#endif
 +
-+#define SET_IDLE_SELECT_FUNC(func)						\
++#define IDLE_SELECT_FUNC_UPDATE(func)						\
 +{										\
-+	idle_select_func = func;						\
-+	printk(KERN_INFO "sched: "#func);					\
++	static_call_update(sched_idle_select_func, &func);			\
++	printk(KERN_INFO "sched: idle select func -> "#func);			\
 +}
 +
-+#define SET_RQ_BALANCE_FUNC(rq, cpu, func)					\
++#define SET_SCHED_CPU_TOPOLOGY(cpu, topo)					\
 +{										\
-+	rq->balance_func = func;						\
-+	SCHED_DEBUG_INFO("sched: cpu#%02d -> "#func, cpu);			\
++	per_cpu(sched_cpu_topo, (cpu)) = topo;					\
++	SCHED_DEBUG_INFO("sched: cpu#%02d -> "#topo, cpu);			\
 +}
 +
-+#define SET_RQ_IDLE_MASK_FUNC(rq, cpu, set_func, clear_func)			\
++#define SET_SCHED_CPU_TOPOLOGY_BALANCE(cpu, balance)				\
 +{										\
-+	rq->set_idle_mask_func		= set_func;				\
-+	rq->clear_idle_mask_func	= clear_func;				\
-+	SCHED_DEBUG_INFO("sched: cpu#%02d -> "#set_func" "#clear_func, cpu);	\
++	per_cpu(sched_cpu_topo_balance, (cpu)) = balance;			\
++	SCHED_DEBUG_INFO("sched: cpu#%02d -> "#balance, cpu);			\
 +}
 +
 +void sched_init_topology(void)
@@ -9926,16 +9857,17 @@ index 000000000000..376a08a5afda
 +		ecore_present = !cpumask_empty(&sched_ecore_mask);
 +	}
 +
-+#ifdef CONFIG_SCHED_SMT
 +	/* idle select function */
++#ifdef CONFIG_SCHED_SMT
 +	if (cpumask_equal(&sched_smt_mask, cpu_online_mask)) {
-+		SET_IDLE_SELECT_FUNC(p1_idle_select_func);
++		IDLE_SELECT_FUNC_UPDATE(p1_idle_select_func);
 +	} else
 +#endif
 +	if (!cpumask_empty(&sched_pcore_mask)) {
-+		SET_IDLE_SELECT_FUNC(p1p2_idle_select_func);
++		IDLE_SELECT_FUNC_UPDATE(p1p2_idle_select_func);
 +	}
 +
++	/* CPU topology setup */
 +	for_each_online_cpu(cpu) {
 +		rq = cpu_rq(cpu);
 +		/* take chance to reset time slice for idle tasks */
@@ -9943,13 +9875,13 @@ index 000000000000..376a08a5afda
 +
 +#ifdef CONFIG_SCHED_SMT
 +		if (cpumask_weight(cpu_smt_mask(cpu)) > 1) {
-+			SET_RQ_IDLE_MASK_FUNC(rq, cpu, set_idle_mask_smt, clear_idle_mask_smt);
++			SET_SCHED_CPU_TOPOLOGY(cpu, CPU_TOPOLOGY_SMT);
 +
 +			if (cpumask_test_cpu(cpu, &sched_pcore_mask) &&
 +			    !cpumask_intersects(&sched_ecore_mask, &sched_smt_mask)) {
-+				SET_RQ_BALANCE_FUNC(rq, cpu, smt_pcore_balance_func);
++				SET_SCHED_CPU_TOPOLOGY_BALANCE(cpu, CPU_TOPOLOGY_BALANCE_SMT_PCORE);
 +			} else {
-+				SET_RQ_BALANCE_FUNC(rq, cpu, smt_balance_func);
++				SET_SCHED_CPU_TOPOLOGY_BALANCE(cpu, CPU_TOPOLOGY_BALANCE_SMT);
 +			}
 +
 +			continue;
@@ -9957,31 +9889,139 @@ index 000000000000..376a08a5afda
 +#endif
 +		/* !SMT or only one cpu in sg */
 +		if (cpumask_test_cpu(cpu, &sched_pcore_mask)) {
-+			SET_RQ_IDLE_MASK_FUNC(rq, cpu, set_idle_mask_pcore, clear_idle_mask_pcore);
++			SET_SCHED_CPU_TOPOLOGY(cpu, CPU_TOPOLOGY_PCORE);
 +
 +			if (ecore_present)
-+				SET_RQ_BALANCE_FUNC(rq, cpu, pcore_balance_func);
++				SET_SCHED_CPU_TOPOLOGY_BALANCE(cpu, CPU_TOPOLOGY_BALANCE_PCORE);
 +
 +			continue;
 +		}
++
 +		if (cpumask_test_cpu(cpu, &sched_ecore_mask)) {
-+			SET_RQ_IDLE_MASK_FUNC(rq, cpu, set_idle_mask_ecore, clear_idle_mask_ecore);
++			SET_SCHED_CPU_TOPOLOGY(cpu, CPU_TOPOLOGY_ECORE);
 +#ifdef CONFIG_SCHED_SMT
 +			if (cpumask_intersects(&sched_pcore_mask, &sched_smt_mask))
-+				SET_RQ_BALANCE_FUNC(rq, cpu, ecore_balance_func);
++				SET_SCHED_CPU_TOPOLOGY_BALANCE(cpu, CPU_TOPOLOGY_BALANCE_ECORE);
 +#endif
 +		}
 +	}
 +}
 diff --git a/kernel/sched/alt_topology.h b/kernel/sched/alt_topology.h
 new file mode 100644
-index 000000000000..076174cd2bc6
+index 000000000000..14591a303ea5
 --- /dev/null
 +++ b/kernel/sched/alt_topology.h
-@@ -0,0 +1,6 @@
+@@ -0,0 +1,113 @@
 +#ifndef _KERNEL_SCHED_ALT_TOPOLOGY_H
 +#define _KERNEL_SCHED_ALT_TOPOLOGY_H
 +
++/*
++ * CPU topology type
++ */
++enum cpu_topo_type {
++	CPU_TOPOLOGY_DEFAULT = 0,
++	CPU_TOPOLOGY_PCORE,
++	CPU_TOPOLOGY_ECORE,
++#ifdef CONFIG_SCHED_SMT
++	CPU_TOPOLOGY_SMT,
++#endif
++};
++
++DECLARE_PER_CPU_READ_MOSTLY(enum cpu_topo_type, sched_cpu_topo);
++
++static inline void sched_set_idle_mask(const unsigned int cpu)
++{
++	cpumask_set_cpu(cpu, sched_idle_mask);
++
++	switch (per_cpu(sched_cpu_topo, cpu)) {
++	case CPU_TOPOLOGY_DEFAULT:
++		break;
++	case CPU_TOPOLOGY_PCORE:
++		cpumask_set_cpu(cpu, sched_pcore_idle_mask);
++		break;
++	case CPU_TOPOLOGY_ECORE:
++		cpumask_set_cpu(cpu, sched_ecore_idle_mask);
++		break;
++#ifdef CONFIG_SCHED_SMT
++	case CPU_TOPOLOGY_SMT:
++		if (cpumask_subset(cpu_smt_mask(cpu), sched_idle_mask))
++			cpumask_or(sched_sg_idle_mask, sched_sg_idle_mask, cpu_smt_mask(cpu));
++		break;
++#endif
++	}
++}
++
++static inline void sched_clear_idle_mask(const unsigned int cpu)
++{
++	cpumask_clear_cpu(cpu, sched_idle_mask);
++
++	switch (per_cpu(sched_cpu_topo, cpu)) {
++	case CPU_TOPOLOGY_DEFAULT:
++		break;
++	case CPU_TOPOLOGY_PCORE:
++		cpumask_clear_cpu(cpu, sched_pcore_idle_mask);
++		break;
++	case CPU_TOPOLOGY_ECORE:
++		cpumask_clear_cpu(cpu, sched_ecore_idle_mask);
++		break;
++#ifdef CONFIG_SCHED_SMT
++	case CPU_TOPOLOGY_SMT:
++		cpumask_andnot(sched_sg_idle_mask, sched_sg_idle_mask, cpu_smt_mask(cpu));
++		break;
++#endif
++	}
++}
++
++/*
++ * CPU topology balance type
++ */
++enum cpu_topo_balance_type {
++	CPU_TOPOLOGY_BALANCE_NONE = 0,
++	CPU_TOPOLOGY_BALANCE_PCORE,
++#ifdef CONFIG_SCHED_SMT
++	CPU_TOPOLOGY_BALANCE_ECORE,
++	CPU_TOPOLOGY_BALANCE_SMT,
++	CPU_TOPOLOGY_BALANCE_SMT_PCORE,
++#endif
++};
++
++DECLARE_PER_CPU_READ_MOSTLY(enum cpu_topo_balance_type, sched_cpu_topo_balance);
++DECLARE_PER_CPU(struct balance_callback, active_balance_head);
++
++extern void pcore_balance(struct rq *rq);
++#ifdef CONFIG_SCHED_SMT
++extern void ecore_balance(struct rq *rq);
++extern void smt_balance(struct rq *rq);
++extern void smt_pcore_balance(struct rq *rq);
++#endif
++
++static inline void sched_cpu_topology_balance(const unsigned int cpu, struct rq *rq)
++{
++	if (!rq->online)
++		return;
++
++	switch (per_cpu(sched_cpu_topo_balance, cpu)) {
++	case CPU_TOPOLOGY_BALANCE_NONE:
++		break;
++	case CPU_TOPOLOGY_BALANCE_PCORE:
++		queue_balance_callback(rq, &per_cpu(active_balance_head, cpu), pcore_balance);
++		break;
++#ifdef CONFIG_SCHED_SMT
++	case CPU_TOPOLOGY_BALANCE_ECORE:
++		queue_balance_callback(rq, &per_cpu(active_balance_head, cpu), ecore_balance);
++		break;
++	case CPU_TOPOLOGY_BALANCE_SMT:
++		if (cpumask_test_cpu(cpu, sched_sg_idle_mask))
++			queue_balance_callback(rq, &per_cpu(active_balance_head, cpu), smt_balance);
++		break;
++	case CPU_TOPOLOGY_BALANCE_SMT_PCORE:
++		if (cpumask_test_cpu(cpu, sched_sg_idle_mask))
++			queue_balance_callback(rq, &per_cpu(active_balance_head, cpu), smt_pcore_balance);
++		break;
++#endif
++	}
++}
++
 +extern void sched_init_topology(void);
 +
 +#endif /* _KERNEL_SCHED_ALT_TOPOLOGY_H */
@@ -11197,7 +11237,7 @@ index 6e2f54169e66..5a5031761477 100644
  static int __init setup_relax_domain_level(char *str)
  {
  	if (kstrtoint(str, 0, &default_relax_domain_level))
-@@ -1731,6 +1734,7 @@ sd_init(struct sched_domain_topology_level *tl,
+@@ -1723,6 +1726,7 @@ sd_init(struct sched_domain_topology_level *tl,
  
  	return sd;
  }
@@ -11205,15 +11245,15 @@ index 6e2f54169e66..5a5031761477 100644
  
  /*
   * Topology list, bottom-up.
-@@ -1767,6 +1771,7 @@ void __init set_sched_topology(struct sched_domain_topology_level *tl)
+@@ -1759,6 +1763,7 @@ void __init set_sched_topology(struct sched_domain_topology_level *tl)
  	sched_domain_topology_saved = NULL;
  }
  
 +#ifndef CONFIG_SCHED_ALT
  #ifdef CONFIG_NUMA
  
- static const struct cpumask *sd_numa_mask(int cpu)
-@@ -2833,3 +2838,31 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
+ static const struct cpumask *sd_numa_mask(struct sched_domain_topology_level *tl, int cpu)
+@@ -2825,3 +2830,31 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
  	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
  	sched_domains_mutex_unlock();
  }
next             reply	other threads:[~2025-10-15 18:18 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-10-15 18:18 Arisu Tachibana [this message]
  -- strict thread matches above, loose matches on Subject: below --
2025-10-27 14:40 [gentoo-commits] proj/linux-patches:6.17 commit in: / Arisu Tachibana
2025-10-24  9:08 Arisu Tachibana
2025-10-20  5:29 Arisu Tachibana
2025-10-15 18:25 Arisu Tachibana
2025-10-15 18:23 Arisu Tachibana
2025-10-15 17:51 Arisu Tachibana
2025-10-15 17:33 Arisu Tachibana
2025-10-13 11:56 Arisu Tachibana
2025-10-06 11:42 Arisu Tachibana
2025-10-06 11:42 Arisu Tachibana
2025-10-06 11:08 Arisu Tachibana
2025-10-06 11:06 Arisu Tachibana
2025-10-06 11:06 Arisu Tachibana
2025-10-02  3:06 Arisu Tachibana
2025-10-01 18:08 Arisu Tachibana
2025-10-01  6:43 Arisu Tachibana
2025-09-29 12:16 Arisu Tachibana
2025-09-29 12:16 Arisu Tachibana
2025-09-29 12:07 Arisu Tachibana
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox
  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):
  git send-email \
    --in-reply-to=1760552259.fa1e33670f90a800dfcb535d62e93e898178f3c2.alicef@gentoo \
    --to=alicef@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY
  https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
  Be sure your reply has a Subject: header at the top and a blank line
  before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox