blob: 8a1681fb23e142e71c98ca760207b28844218d9e [file] [log] [blame]
From ba156848c96155a3f70a32f9edbf95d12fa8ce66 Mon Sep 17 00:00:00 2001
From: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
Date: Wed, 5 May 2021 20:38:01 -0700
Subject: [PATCH] FROMLIST: sched/fair: Carve out logic to mark a group for
asymmetric packing
Create a separate function, sched_asym(). A subsequent changeset will
introduce logic to deal with SMT in conjunction with asmymmetric
packing. Such logic will need the statistics of the scheduling
group provided as argument. Update them before calling sched_asym().
Cc: Aubrey Li <aubrey.li@intel.com>
Cc: Ben Segall <bsegall@google.com>
Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Quentin Perret <qperret@google.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Reviewed-by: Len Brown <len.brown@intel.com>
Co-developed-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
(am from https://lore.kernel.org/patchwork/patch/1474505/)
TOREVERT=b:192050150
BUG=b:179699891
TEST=basic boot on brya and check /proc/sys/kernel/sched_itmt_enabled
should be 1
TEST=spin N+1 threads where N = number of physical cores - expect
N threads to run on N physical cores and the N+1st thread
to run on an atom CPU
Change-Id: Icdbed826c5ad6f02275a14c755af175f32042578
---
Changes since v3:
* Remove a redundant check for the local group in sched_asym().
(Dietmar)
* Reworded commit message for clarity. (Len)
Changes since v2:
* Introduced this patch.
Changes since v1:
* N/A
Signed-off-by: George D Sworo <george.d.sworo@intel.com>
Change-Id: I688bb1eb29e851fa83e8c5586919be8c43b68818
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/kernel/+/2953069
Commit-Queue: Alex Levin <levinale@google.com>
Tested-by: George D Sworo <george.d.sworo@intel.corp-partner.google.com>
Reviewed-by: Sean Paul <seanpaul@chromium.org>
Reviewed-by: Alex Levin <levinale@google.com>
Reviewed-by: Joel Fernandes <joelaf@google.com>
---
kernel/sched/fair.c | 20 +++++++++++++-------
1 file changed, 13 insertions(+), 7 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8534,6 +8534,13 @@ group_type group_classify(unsigned int imbalance_pct,
return group_has_spare;
}
+static inline bool
+sched_asym(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *sgs,
+ struct sched_group *group)
+{
+ return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu);
+}
+
/**
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
* @env: The load balancing environment.
@@ -8594,18 +8601,17 @@ static inline void update_sg_lb_stats(struct lb_env *env,
}
}
+ sgs->group_capacity = group->sgc->capacity;
+
+ sgs->group_weight = group->group_weight;
+
/* Check if dst CPU is idle and preferred to this group */
if (!local_group && env->sd->flags & SD_ASYM_PACKING &&
- env->idle != CPU_NOT_IDLE &&
- sgs->sum_h_nr_running &&
- sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu)) {
+ env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
+ sched_asym(env, sds, sgs, group)) {
sgs->group_asym_packing = 1;
}
- sgs->group_capacity = group->sgc->capacity;
-
- sgs->group_weight = group->group_weight;
-
sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
/* Computing avg_load makes sense only when group is overloaded */
--
2.33.0.259.gc128427fd7-goog