| From d17de2e1204f332e2184de96616a44a360d136b6 Mon Sep 17 00:00:00 2001 |
| From: Quentin Perret <quentin.perret@arm.com> |
| Date: Tue, 30 Jul 2019 13:58:29 +0100 |
| Subject: [PATCH] NOUPSTREAM: ANDROID: sched: Introduce uclamp latency and |
| boost wrapper |
| |
| Introduce a simple helper to read the latency_sensitive flag from a |
| task. It is called uclamp_latency_sensitive() to match the API |
| proposed by Patrick. |
| |
| While at it, introduce uclamp_boosted() which returns true only when a |
| task has a non-null min-clamp. |
| |
| [CPNOTE: 30/06/21] Lee: Hoping for an upstream alternative (conversation died) |
| |
| Bug: 120440300 |
| Change-Id: I5fc747da8b58625257a6604a3c88487b657fbe7a |
| Suggested-by: Patrick Bellasi <patrick.bellasi@arm.com> |
| Signed-off-by: Quentin Perret <quentin.perret@arm.com> |
| --- |
| kernel/sched/sched.h | 29 +++++++++++++++++++++++++++++ |
| 1 file changed, 29 insertions(+) |
| |
| diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h |
| index 9a1e8d40261d..1cf0b42a8d12 100644 |
| --- a/kernel/sched/sched.h |
| +++ b/kernel/sched/sched.h |
| @@ -2958,6 +2958,11 @@ unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, |
| return clamp(util, min_util, max_util); |
| } |
| |
| +static inline bool uclamp_boosted(struct task_struct *p) |
| +{ |
| + return uclamp_eff_value(p, UCLAMP_MIN) > 0; |
| +} |
| + |
| /* Is the rq being capped/throttled by uclamp_max? */ |
| static inline bool uclamp_rq_is_capped(struct rq *rq) |
| { |
| @@ -2993,6 +2998,11 @@ unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, |
| return util; |
| } |
| |
| +static inline bool uclamp_boosted(struct task_struct *p) |
| +{ |
| + return false; |
| +} |
| + |
| static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; } |
| |
| static inline bool uclamp_is_used(void) |
| @@ -3001,6 +3011,25 @@ static inline bool uclamp_is_used(void) |
| } |
| #endif /* CONFIG_UCLAMP_TASK */ |
| |
| +#ifdef CONFIG_UCLAMP_TASK_GROUP |
| +static inline bool uclamp_latency_sensitive(struct task_struct *p) |
| +{ |
| + struct cgroup_subsys_state *css = task_css(p, cpu_cgrp_id); |
| + struct task_group *tg; |
| + |
| + if (!css) |
| + return false; |
| + tg = container_of(css, struct task_group, css); |
| + |
| + return tg->latency_sensitive; |
| +} |
| +#else |
| +static inline bool uclamp_latency_sensitive(struct task_struct *p) |
| +{ |
| + return false; |
| +} |
| +#endif /* CONFIG_UCLAMP_TASK_GROUP */ |
| + |
| #ifdef CONFIG_HAVE_SCHED_AVG_IRQ |
| static inline unsigned long cpu_util_irq(struct rq *rq) |
| { |
| -- |
| 2.35.0 |
| |