blob: 5b172fc45c44997452479f31863bc8c7e28961f4 [file] [log] [blame]
From a7ab5d8d1dd8b882ca352d7d78e26519ddf09414 Mon Sep 17 00:00:00 2001
From: Chris Redpath <chris.redpath@arm.com>
Date: Wed, 27 Mar 2019 17:15:17 +0000
Subject: [PATCH] NOUPSTREAM: ANDROID: sched: Unconditionally honor sync flag
for energy-aware wakeups
Since we don't do energy-aware wakeups when we are overutilized, always
honoring sync wakeups in this state does not prevent wake-wide mechanics
overruling the flag as normal.
This patch is based upon previous work to build EAS for android products.
sync-hint code taken from commit 4a5e890ec60d
"sched/fair: add tunable to force selection at cpu granularity" written
by Juri Lelli <juri.lelli@arm.com>
[CPNOTE: 29/06/21] Lee: Binder related - may regress upstream workloads
Bug: 120440300
Change-Id: I4b3d79141fc8e53dc51cd63ac11096c2e3cb10f5
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
(cherry-picked from commit f1ec666a62dec1083ed52fe1ddef093b84373aaf)
[ Moved the feature to find_energy_efficient_cpu() and removed the
sysctl knob ]
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
---
kernel/sched/fair.c | 11 +++++++++--
1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8a5b1ae0aa55a82915f88b07c3f1958bcf8d3215..958c9d9789b75d3e3da1cc9be71a148858b761ff 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7987,7 +7987,7 @@ compute_energy(struct energy_env *eenv, struct perf_domain *pd,
* other use-cases too. So, until someone finds a better way to solve this,
* let's keep things simple by re-using the existing slow path.
*/
-static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sync)
{
struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
@@ -8007,6 +8007,13 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
if (!pd)
goto unlock;
+ cpu = smp_processor_id();
+ if (sync && cpu_rq(cpu)->nr_running == 1 &&
+ cpumask_test_cpu(cpu, p->cpus_ptr)) {
+ rcu_read_unlock();
+ return cpu;
+ }
+
/*
* Energy-aware wake-up happens on the lowest sched_domain starting
* from sd_asym_cpucapacity spanning over this_cpu and prev_cpu.
@@ -8207,7 +8214,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
return cpu;
if (!is_rd_overutilized(this_rq()->rd)) {
- new_cpu = find_energy_efficient_cpu(p, prev_cpu);
+ new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync);
if (new_cpu >= 0)
return new_cpu;
new_cpu = prev_cpu;
--
2.45.1.288.g0e0cd299f1-goog