| From 5fbca1c6f6e579b7e5bd37a23b443264ed088cab Mon Sep 17 00:00:00 2001 |
| From: Aubrey Li <aubrey.li@linux.intel.com> |
| Date: Wed, 24 Mar 2021 17:40:13 -0400 |
| Subject: [PATCH] FROMLIST: sched: Migration changes for core scheduling |
| |
| - Don't migrate if there is a cookie mismatch |
| Load balance tries to move task from busiest CPU to the |
| destination CPU. When core scheduling is enabled, if the |
| task's cookie does not match with the destination CPU's |
| core cookie, this task may be skipped by this CPU. This |
| mitigates the forced idle time on the destination CPU. |
| |
| - Select cookie matched idle CPU |
| In the fast path of task wakeup, select the first cookie matched |
| idle CPU instead of the first idle CPU. |
| |
| - Find cookie matched idlest CPU |
| In the slow path of task wakeup, find the idlest CPU whose core |
| cookie matches with task's cookie |
| |
| (am from |
| https://lore.kernel.org/lkml/20210422123308.860083871@infradead.org/#t) |
| |
| BUG=b:152605392 |
| TEST=run power_VideoCall test |
| |
| Signed-off-by: Aubrey Li <aubrey.li@linux.intel.com> |
| Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> |
| Tested-by: Don Hiatt <dhiatt@digitalocean.com> |
| Signed-off-by: Joel Fernandes <joelaf@google.com> |
| Change-Id: I65d0e47c708d892b4820edafd9e0ea3536ba2ab4 |
| Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/kernel/+/2880790 |
| Reviewed-by: Sonny Rao <sonnyrao@chromium.org> |
| --- |
| kernel/sched/fair.c | 21 +++++++++++-- |
| kernel/sched/sched.h | 73 ++++++++++++++++++++++++++++++++++++++++++++ |
| 2 files changed, 92 insertions(+), 2 deletions(-) |
| |
| diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c |
| index dd1c629c5e1d..1a5d61705ba4 100644 |
| --- a/kernel/sched/fair.c |
| +++ b/kernel/sched/fair.c |
| @@ -5895,11 +5895,15 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this |
| |
| /* Traverse only the allowed CPUs */ |
| for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) { |
| + struct rq *rq = cpu_rq(i); |
| + |
| + if (!sched_core_cookie_match(rq, p)) |
| + continue; |
| + |
| if (sched_idle_cpu(i)) |
| return i; |
| |
| if (available_idle_cpu(i)) { |
| - struct rq *rq = cpu_rq(i); |
| struct cpuidle_state *idle = idle_get_state(rq); |
| if (idle && idle->exit_latency < min_exit_latency) { |
| /* |
| @@ -6060,7 +6064,8 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu |
| return __select_idle_cpu(core); |
| |
| for_each_cpu(cpu, cpu_smt_mask(core)) { |
| - if (!available_idle_cpu(cpu)) { |
| + if (!available_idle_cpu(cpu) || |
| + !sched_cpu_cookie_match(cpu_rq(cpu), p)) { |
| idle = false; |
| if (*idle_cpu == -1) { |
| if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, p->cpus_ptr)) { |
| @@ -7527,6 +7532,14 @@ static int task_hot(struct task_struct *p, struct lb_env *env) |
| |
| if (sysctl_sched_migration_cost == -1) |
| return 1; |
| + |
| + /* |
| + * Don't migrate task if the task's cookie does not match |
| + * with the destination CPU's core cookie. |
| + */ |
| + if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p)) |
| + return 1; |
| + |
| if (sysctl_sched_migration_cost == 0) |
| return 0; |
| |
| @@ -8857,6 +8870,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) |
| p->cpus_ptr)) |
| continue; |
| |
| + /* Skip over this group if no cookie matched */ |
| + if (!sched_group_cookie_match(cpu_rq(this_cpu), p, group)) |
| + continue; |
| + |
| local_group = cpumask_test_cpu(this_cpu, |
| sched_group_span(group)); |
| |
| diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h |
| index 33e1f081434f..465de99175c0 100644 |
| --- a/kernel/sched/sched.h |
| +++ b/kernel/sched/sched.h |
| @@ -1134,7 +1134,9 @@ static inline bool is_migration_disabled(struct task_struct *p) |
| #endif |
| } |
| |
| +struct sched_group; |
| #ifdef CONFIG_SCHED_CORE |
| +static inline struct cpumask *sched_group_span(struct sched_group *sg); |
| |
| DECLARE_STATIC_KEY_FALSE(__sched_core_enabled); |
| |
| @@ -1170,6 +1172,61 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq) |
| |
| bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool fi); |
| |
| +/* |
| + * Helpers to check if the CPU's core cookie matches with the task's cookie |
| + * when core scheduling is enabled. |
| + * A special case is that the task's cookie always matches with CPU's core |
| + * cookie if the CPU is in an idle core. |
| + */ |
| +static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) |
| +{ |
| + /* Ignore cookie match if core scheduler is not enabled on the CPU. */ |
| + if (!sched_core_enabled(rq)) |
| + return true; |
| + |
| + return rq->core->core_cookie == p->core_cookie; |
| +} |
| + |
| +static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) |
| +{ |
| + bool idle_core = true; |
| + int cpu; |
| + |
| + /* Ignore cookie match if core scheduler is not enabled on the CPU. */ |
| + if (!sched_core_enabled(rq)) |
| + return true; |
| + |
| + for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) { |
| + if (!available_idle_cpu(cpu)) { |
| + idle_core = false; |
| + break; |
| + } |
| + } |
| + |
| + /* |
| + * A CPU in an idle core is always the best choice for tasks with |
| + * cookies. |
| + */ |
| + return idle_core || rq->core->core_cookie == p->core_cookie; |
| +} |
| + |
| +static inline bool sched_group_cookie_match(struct rq *rq, |
| + struct task_struct *p, |
| + struct sched_group *group) |
| +{ |
| + int cpu; |
| + |
| + /* Ignore cookie match if core scheduler is not enabled on the CPU. */ |
| + if (!sched_core_enabled(rq)) |
| + return true; |
| + |
| + for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) { |
| + if (sched_core_cookie_match(rq, p)) |
| + return true; |
| + } |
| + return false; |
| +} |
| + |
| extern void queue_core_balance(struct rq *rq); |
| |
| #else /* !CONFIG_SCHED_CORE */ |
| @@ -1197,6 +1254,22 @@ static inline void queue_core_balance(struct rq *rq) |
| { |
| } |
| |
| +static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) |
| +{ |
| + return true; |
| +} |
| + |
| +static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) |
| +{ |
| + return true; |
| +} |
| + |
| +static inline bool sched_group_cookie_match(struct rq *rq, |
| + struct task_struct *p, |
| + struct sched_group *group) |
| +{ |
| + return true; |
| +} |
| #endif /* CONFIG_SCHED_CORE */ |
| |
| static inline void lockdep_assert_rq_held(struct rq *rq) |
| -- |
| 2.32.0.272.g935e593368-goog |
| |