| From 74752b1351e904148fc18e132bd1f51da98627a1 Mon Sep 17 00:00:00 2001 |
| From: Peter Zijlstra <peterz@infradead.org> |
| Date: Tue, 17 Nov 2020 18:19:31 -0500 |
| Subject: [PATCH] FROMLIST: sched: Wrap rq::lock access |
| |
| In preparation of playing games with rq->lock, abstract the thing |
| using an accessor. |
| |
| (am from |
| https://lore.kernel.org/lkml/20210422123308.136465446@infradead.org/raw) |
| |
| BUG=b:152605392 |
| TEST=run power_VideoCall test |
| |
| Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> |
| Tested-by: Don Hiatt <dhiatt@digitalocean.com> |
| Signed-off-by: Joel Fernandes <joelaf@google.com> |
| Change-Id: Id951969c5eb6316182e52aeb731a27eb039c293d |
| Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/kernel/+/2880778 |
| Reviewed-by: Sonny Rao <sonnyrao@chromium.org> |
| --- |
| kernel/sched/core.c | 70 +++++++++++++-------------- |
| kernel/sched/cpuacct.c | 12 ++--- |
| kernel/sched/deadline.c | 22 ++++----- |
| kernel/sched/debug.c | 4 +- |
| kernel/sched/fair.c | 35 +++++++------- |
| kernel/sched/idle.c | 4 +- |
| kernel/sched/pelt.h | 2 +- |
| kernel/sched/rt.c | 16 +++--- |
| kernel/sched/sched.h | 105 ++++++++++++++++++++-------------------- |
| kernel/sched/topology.c | 4 +- |
| 10 files changed, 136 insertions(+), 138 deletions(-) |
| |
| diff --git a/kernel/sched/core.c b/kernel/sched/core.c |
| index 005696316e73..fd9bf93f953b 100644 |
| --- a/kernel/sched/core.c |
| +++ b/kernel/sched/core.c |
| @@ -211,12 +211,12 @@ struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) |
| |
| for (;;) { |
| rq = task_rq(p); |
| - raw_spin_lock(&rq->lock); |
| + raw_spin_rq_lock(rq); |
| if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { |
| rq_pin_lock(rq, rf); |
| return rq; |
| } |
| - raw_spin_unlock(&rq->lock); |
| + raw_spin_rq_unlock(rq); |
| |
| while (unlikely(task_on_rq_migrating(p))) |
| cpu_relax(); |
| @@ -235,7 +235,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) |
| for (;;) { |
| raw_spin_lock_irqsave(&p->pi_lock, rf->flags); |
| rq = task_rq(p); |
| - raw_spin_lock(&rq->lock); |
| + raw_spin_rq_lock(rq); |
| /* |
| * move_queued_task() task_rq_lock() |
| * |
| @@ -257,7 +257,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) |
| rq_pin_lock(rq, rf); |
| return rq; |
| } |
| - raw_spin_unlock(&rq->lock); |
| + raw_spin_rq_unlock(rq); |
| raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); |
| |
| while (unlikely(task_on_rq_migrating(p))) |
| @@ -327,7 +327,7 @@ void update_rq_clock(struct rq *rq) |
| { |
| s64 delta; |
| |
| - lockdep_assert_held(&rq->lock); |
| + lockdep_assert_rq_held(rq); |
| |
| if (rq->clock_update_flags & RQCF_ACT_SKIP) |
| return; |
| @@ -626,7 +626,7 @@ void resched_curr(struct rq *rq) |
| struct task_struct *curr = rq->curr; |
| int cpu; |
| |
| - lockdep_assert_held(&rq->lock); |
| + lockdep_assert_rq_held(rq); |
| |
| if (test_tsk_need_resched(curr)) |
| return; |
| @@ -650,10 +650,10 @@ void resched_cpu(int cpu) |
| struct rq *rq = cpu_rq(cpu); |
| unsigned long flags; |
| |
| - raw_spin_lock_irqsave(&rq->lock, flags); |
| + raw_spin_rq_lock_irqsave(rq, flags); |
| if (cpu_online(cpu) || cpu == smp_processor_id()) |
| resched_curr(rq); |
| - raw_spin_unlock_irqrestore(&rq->lock, flags); |
| + raw_spin_rq_unlock_irqrestore(rq, flags); |
| } |
| |
| #ifdef CONFIG_SMP |
| @@ -1152,7 +1152,7 @@ static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, |
| struct uclamp_se *uc_se = &p->uclamp[clamp_id]; |
| struct uclamp_bucket *bucket; |
| |
| - lockdep_assert_held(&rq->lock); |
| + lockdep_assert_rq_held(rq); |
| |
| /* Update task effective clamp */ |
| p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); |
| @@ -1192,7 +1192,7 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, |
| unsigned int bkt_clamp; |
| unsigned int rq_clamp; |
| |
| - lockdep_assert_held(&rq->lock); |
| + lockdep_assert_rq_held(rq); |
| |
| /* |
| * If sched_uclamp_used was enabled after task @p was enqueued, |
| @@ -1865,7 +1865,7 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu) |
| static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, |
| struct task_struct *p, int new_cpu) |
| { |
| - lockdep_assert_held(&rq->lock); |
| + lockdep_assert_rq_held(rq); |
| |
| deactivate_task(rq, p, DEQUEUE_NOCLOCK); |
| set_task_cpu(p, new_cpu); |
| @@ -2039,7 +2039,7 @@ int push_cpu_stop(void *arg) |
| struct task_struct *p = arg; |
| |
| raw_spin_lock_irq(&p->pi_lock); |
| - raw_spin_lock(&rq->lock); |
| + raw_spin_rq_lock(rq); |
| |
| if (task_rq(p) != rq) |
| goto out_unlock; |
| @@ -2069,7 +2069,7 @@ int push_cpu_stop(void *arg) |
| |
| out_unlock: |
| rq->push_busy = false; |
| - raw_spin_unlock(&rq->lock); |
| + raw_spin_rq_unlock(rq); |
| raw_spin_unlock_irq(&p->pi_lock); |
| |
| put_task_struct(p); |
| @@ -2122,7 +2122,7 @@ __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 |
| * Because __kthread_bind() calls this on blocked tasks without |
| * holding rq->lock. |
| */ |
| - lockdep_assert_held(&rq->lock); |
| + lockdep_assert_rq_held(rq); |
| dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); |
| } |
| if (running) |
| @@ -2463,7 +2463,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) |
| * task_rq_lock(). |
| */ |
| WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || |
| - lockdep_is_held(&task_rq(p)->lock))); |
| + lockdep_is_held(rq_lockp(task_rq(p))))); |
| #endif |
| /* |
| * Clearly, migrating tasks to offline CPUs is a fairly daft thing. |
| @@ -3005,7 +3005,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, |
| { |
| int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK; |
| |
| - lockdep_assert_held(&rq->lock); |
| + lockdep_assert_rq_held(rq); |
| |
| if (p->sched_contributes_to_load) |
| rq->nr_uninterruptible--; |
| @@ -4016,7 +4016,7 @@ static void do_balance_callbacks(struct rq *rq, struct callback_head *head) |
| void (*func)(struct rq *rq); |
| struct callback_head *next; |
| |
| - lockdep_assert_held(&rq->lock); |
| + lockdep_assert_rq_held(rq); |
| |
| while (head) { |
| func = (void (*)(struct rq *))head->func; |
| @@ -4039,7 +4039,7 @@ static inline struct callback_head *splice_balance_callbacks(struct rq *rq) |
| { |
| struct callback_head *head = rq->balance_callback; |
| |
| - lockdep_assert_held(&rq->lock); |
| + lockdep_assert_rq_held(rq); |
| if (head) |
| rq->balance_callback = NULL; |
| |
| @@ -4056,9 +4056,9 @@ static inline void balance_callbacks(struct rq *rq, struct callback_head *head) |
| unsigned long flags; |
| |
| if (unlikely(head)) { |
| - raw_spin_lock_irqsave(&rq->lock, flags); |
| + raw_spin_rq_lock_irqsave(rq, flags); |
| do_balance_callbacks(rq, head); |
| - raw_spin_unlock_irqrestore(&rq->lock, flags); |
| + raw_spin_rq_unlock_irqrestore(rq, flags); |
| } |
| } |
| |
| @@ -4089,10 +4089,10 @@ prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf |
| * do an early lockdep release here: |
| */ |
| rq_unpin_lock(rq, rf); |
| - spin_release(&rq->lock.dep_map, _THIS_IP_); |
| + spin_release(&rq_lockp(rq)->dep_map, _THIS_IP_); |
| #ifdef CONFIG_DEBUG_SPINLOCK |
| /* this is a valid case when another task releases the spinlock */ |
| - rq->lock.owner = next; |
| + rq_lockp(rq)->owner = next; |
| #endif |
| } |
| |
| @@ -4103,9 +4103,9 @@ static inline void finish_lock_switch(struct rq *rq) |
| * fix up the runqueue lock - which gets 'carried over' from |
| * prev into current: |
| */ |
| - spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); |
| + spin_acquire(&rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_); |
| __balance_callbacks(rq); |
| - raw_spin_unlock_irq(&rq->lock); |
| + raw_spin_rq_unlock_irq(rq); |
| } |
| |
| /* |
| @@ -5165,7 +5165,7 @@ static void __sched notrace __schedule(bool preempt) |
| |
| rq_unpin_lock(rq, &rf); |
| __balance_callbacks(rq); |
| - raw_spin_unlock_irq(&rq->lock); |
| + raw_spin_rq_unlock_irq(rq); |
| } |
| } |
| |
| @@ -5707,7 +5707,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) |
| |
| rq_unpin_lock(rq, &rf); |
| __balance_callbacks(rq); |
| - raw_spin_unlock(&rq->lock); |
| + raw_spin_rq_unlock(rq); |
| |
| preempt_enable(); |
| } |
| @@ -7457,7 +7457,7 @@ void init_idle(struct task_struct *idle, int cpu) |
| __sched_fork(0, idle); |
| |
| raw_spin_lock_irqsave(&idle->pi_lock, flags); |
| - raw_spin_lock(&rq->lock); |
| + raw_spin_rq_lock(rq); |
| |
| idle->state = TASK_RUNNING; |
| idle->se.exec_start = sched_clock(); |
| @@ -7495,7 +7495,7 @@ void init_idle(struct task_struct *idle, int cpu) |
| #ifdef CONFIG_SMP |
| idle->on_cpu = 1; |
| #endif |
| - raw_spin_unlock(&rq->lock); |
| + raw_spin_rq_unlock(rq); |
| raw_spin_unlock_irqrestore(&idle->pi_lock, flags); |
| |
| /* Set the preempt count _outside_ the spinlocks! */ |
| @@ -7666,7 +7666,7 @@ static void balance_push(struct rq *rq) |
| { |
| struct task_struct *push_task = rq->curr; |
| |
| - lockdep_assert_held(&rq->lock); |
| + lockdep_assert_rq_held(rq); |
| SCHED_WARN_ON(rq->cpu != smp_processor_id()); |
| |
| /* |
| @@ -7704,9 +7704,9 @@ static void balance_push(struct rq *rq) |
| */ |
| if (!rq->nr_running && !rq_has_pinned_tasks(rq) && |
| rcuwait_active(&rq->hotplug_wait)) { |
| - raw_spin_unlock(&rq->lock); |
| + raw_spin_rq_unlock(rq); |
| rcuwait_wake_up(&rq->hotplug_wait); |
| - raw_spin_lock(&rq->lock); |
| + raw_spin_rq_lock(rq); |
| } |
| return; |
| } |
| @@ -7716,7 +7716,7 @@ static void balance_push(struct rq *rq) |
| * Temporarily drop rq->lock such that we can wake-up the stop task. |
| * Both preemption and IRQs are still disabled. |
| */ |
| - raw_spin_unlock(&rq->lock); |
| + raw_spin_rq_unlock(rq); |
| stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, |
| this_cpu_ptr(&push_work)); |
| /* |
| @@ -7724,7 +7724,7 @@ static void balance_push(struct rq *rq) |
| * schedule(). The next pick is obviously going to be the stop task |
| * which kthread_is_per_cpu() and will push this task away. |
| */ |
| - raw_spin_lock(&rq->lock); |
| + raw_spin_rq_lock(rq); |
| } |
| |
| static void balance_push_set(int cpu, bool on) |
| @@ -8014,7 +8014,7 @@ static void dump_rq_tasks(struct rq *rq, const char *loglvl) |
| struct task_struct *g, *p; |
| int cpu = cpu_of(rq); |
| |
| - lockdep_assert_held(&rq->lock); |
| + lockdep_assert_rq_held(rq); |
| |
| printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running); |
| for_each_process_thread(g, p) { |
| @@ -8187,7 +8187,7 @@ void __init sched_init(void) |
| struct rq *rq; |
| |
| rq = cpu_rq(i); |
| - raw_spin_lock_init(&rq->lock); |
| + raw_spin_lock_init(&rq->__lock); |
| rq->nr_running = 0; |
| rq->calc_load_active = 0; |
| rq->calc_load_update = jiffies + LOAD_FREQ; |
| diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c |
| index 104a1bade14f..893eece65bfd 100644 |
| --- a/kernel/sched/cpuacct.c |
| +++ b/kernel/sched/cpuacct.c |
| @@ -112,7 +112,7 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu, |
| /* |
| * Take rq->lock to make 64-bit read safe on 32-bit platforms. |
| */ |
| - raw_spin_lock_irq(&cpu_rq(cpu)->lock); |
| + raw_spin_rq_lock_irq(cpu_rq(cpu)); |
| #endif |
| |
| if (index == CPUACCT_STAT_NSTATS) { |
| @@ -126,7 +126,7 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu, |
| } |
| |
| #ifndef CONFIG_64BIT |
| - raw_spin_unlock_irq(&cpu_rq(cpu)->lock); |
| + raw_spin_rq_unlock_irq(cpu_rq(cpu)); |
| #endif |
| |
| return data; |
| @@ -141,14 +141,14 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) |
| /* |
| * Take rq->lock to make 64-bit write safe on 32-bit platforms. |
| */ |
| - raw_spin_lock_irq(&cpu_rq(cpu)->lock); |
| + raw_spin_rq_lock_irq(cpu_rq(cpu)); |
| #endif |
| |
| for (i = 0; i < CPUACCT_STAT_NSTATS; i++) |
| cpuusage->usages[i] = val; |
| |
| #ifndef CONFIG_64BIT |
| - raw_spin_unlock_irq(&cpu_rq(cpu)->lock); |
| + raw_spin_rq_unlock_irq(cpu_rq(cpu)); |
| #endif |
| } |
| |
| @@ -253,13 +253,13 @@ static int cpuacct_all_seq_show(struct seq_file *m, void *V) |
| * Take rq->lock to make 64-bit read safe on 32-bit |
| * platforms. |
| */ |
| - raw_spin_lock_irq(&cpu_rq(cpu)->lock); |
| + raw_spin_rq_lock_irq(cpu_rq(cpu)); |
| #endif |
| |
| seq_printf(m, " %llu", cpuusage->usages[index]); |
| |
| #ifndef CONFIG_64BIT |
| - raw_spin_unlock_irq(&cpu_rq(cpu)->lock); |
| + raw_spin_rq_unlock_irq(cpu_rq(cpu)); |
| #endif |
| } |
| seq_puts(m, "\n"); |
| diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c |
| index 9a2989749b8d..6e99b8b37c8c 100644 |
| --- a/kernel/sched/deadline.c |
| +++ b/kernel/sched/deadline.c |
| @@ -157,7 +157,7 @@ void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq) |
| { |
| u64 old = dl_rq->running_bw; |
| |
| - lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); |
| + lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); |
| dl_rq->running_bw += dl_bw; |
| SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */ |
| SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); |
| @@ -170,7 +170,7 @@ void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq) |
| { |
| u64 old = dl_rq->running_bw; |
| |
| - lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); |
| + lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); |
| dl_rq->running_bw -= dl_bw; |
| SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */ |
| if (dl_rq->running_bw > old) |
| @@ -184,7 +184,7 @@ void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) |
| { |
| u64 old = dl_rq->this_bw; |
| |
| - lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); |
| + lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); |
| dl_rq->this_bw += dl_bw; |
| SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */ |
| } |
| @@ -194,7 +194,7 @@ void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) |
| { |
| u64 old = dl_rq->this_bw; |
| |
| - lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); |
| + lockdep_assert_rq_held(rq_of_dl_rq(dl_rq)); |
| dl_rq->this_bw -= dl_bw; |
| SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */ |
| if (dl_rq->this_bw > old) |
| @@ -987,7 +987,7 @@ static int start_dl_timer(struct task_struct *p) |
| ktime_t now, act; |
| s64 delta; |
| |
| - lockdep_assert_held(&rq->lock); |
| + lockdep_assert_rq_held(rq); |
| |
| /* |
| * We want the timer to fire at the deadline, but considering |
| @@ -1097,9 +1097,9 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) |
| * If the runqueue is no longer available, migrate the |
| * task elsewhere. This necessarily changes rq. |
| */ |
| - lockdep_unpin_lock(&rq->lock, rf.cookie); |
| + lockdep_unpin_lock(rq_lockp(rq), rf.cookie); |
| rq = dl_task_offline_migration(rq, p); |
| - rf.cookie = lockdep_pin_lock(&rq->lock); |
| + rf.cookie = lockdep_pin_lock(rq_lockp(rq)); |
| update_rq_clock(rq); |
| |
| /* |
| @@ -1731,7 +1731,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused |
| * from try_to_wake_up(). Hence, p->pi_lock is locked, but |
| * rq->lock is not... So, lock it |
| */ |
| - raw_spin_lock(&rq->lock); |
| + raw_spin_rq_lock(rq); |
| if (p->dl.dl_non_contending) { |
| sub_running_bw(&p->dl, &rq->dl); |
| p->dl.dl_non_contending = 0; |
| @@ -1746,7 +1746,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused |
| put_task_struct(p); |
| } |
| sub_rq_bw(&p->dl, &rq->dl); |
| - raw_spin_unlock(&rq->lock); |
| + raw_spin_rq_unlock(rq); |
| } |
| |
| static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) |
| @@ -2291,10 +2291,10 @@ static void pull_dl_task(struct rq *this_rq) |
| double_unlock_balance(this_rq, src_rq); |
| |
| if (push_task) { |
| - raw_spin_unlock(&this_rq->lock); |
| + raw_spin_rq_unlock(this_rq); |
| stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop, |
| push_task, &src_rq->push_work); |
| - raw_spin_lock(&this_rq->lock); |
| + raw_spin_rq_lock(this_rq); |
| } |
| } |
| |
| diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c |
| index 9c882f20803e..3bdee5fd7d29 100644 |
| --- a/kernel/sched/debug.c |
| +++ b/kernel/sched/debug.c |
| @@ -576,7 +576,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) |
| SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", |
| SPLIT_NS(cfs_rq->exec_clock)); |
| |
| - raw_spin_lock_irqsave(&rq->lock, flags); |
| + raw_spin_rq_lock_irqsave(rq, flags); |
| if (rb_first_cached(&cfs_rq->tasks_timeline)) |
| MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime; |
| last = __pick_last_entity(cfs_rq); |
| @@ -584,7 +584,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) |
| max_vruntime = last->vruntime; |
| min_vruntime = cfs_rq->min_vruntime; |
| rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; |
| - raw_spin_unlock_irqrestore(&rq->lock, flags); |
| + raw_spin_rq_unlock_irqrestore(rq, flags); |
| SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", |
| SPLIT_NS(MIN_vruntime)); |
| SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime", |
| diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c |
| index 2d152f42a0e7..52e460badf9d 100644 |
| --- a/kernel/sched/fair.c |
| +++ b/kernel/sched/fair.c |
| @@ -1107,7 +1107,7 @@ struct numa_group { |
| static struct numa_group *deref_task_numa_group(struct task_struct *p) |
| { |
| return rcu_dereference_check(p->numa_group, p == current || |
| - (lockdep_is_held(&task_rq(p)->lock) && !READ_ONCE(p->on_cpu))); |
| + (lockdep_is_held(rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu))); |
| } |
| |
| static struct numa_group *deref_curr_numa_group(struct task_struct *p) |
| @@ -5328,7 +5328,7 @@ static void __maybe_unused update_runtime_enabled(struct rq *rq) |
| { |
| struct task_group *tg; |
| |
| - lockdep_assert_held(&rq->lock); |
| + lockdep_assert_rq_held(rq); |
| |
| rcu_read_lock(); |
| list_for_each_entry_rcu(tg, &task_groups, list) { |
| @@ -5347,7 +5347,7 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) |
| { |
| struct task_group *tg; |
| |
| - lockdep_assert_held(&rq->lock); |
| + lockdep_assert_rq_held(rq); |
| |
| rcu_read_lock(); |
| list_for_each_entry_rcu(tg, &task_groups, list) { |
| @@ -6885,7 +6885,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu) |
| * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old' |
| * rq->lock and can modify state directly. |
| */ |
| - lockdep_assert_held(&task_rq(p)->lock); |
| + lockdep_assert_rq_held(task_rq(p)); |
| detach_entity_cfs_rq(&p->se); |
| |
| } else { |
| @@ -7512,7 +7512,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env) |
| { |
| s64 delta; |
| |
| - lockdep_assert_held(&env->src_rq->lock); |
| + lockdep_assert_rq_held(env->src_rq); |
| |
| if (p->sched_class != &fair_sched_class) |
| return 0; |
| @@ -7610,7 +7610,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) |
| { |
| int tsk_cache_hot; |
| |
| - lockdep_assert_held(&env->src_rq->lock); |
| + lockdep_assert_rq_held(env->src_rq); |
| |
| /* |
| * We do not migrate tasks that are: |
| @@ -7699,7 +7699,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) |
| */ |
| static void detach_task(struct task_struct *p, struct lb_env *env) |
| { |
| - lockdep_assert_held(&env->src_rq->lock); |
| + lockdep_assert_rq_held(env->src_rq); |
| |
| deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); |
| set_task_cpu(p, env->dst_cpu); |
| @@ -7715,7 +7715,7 @@ static struct task_struct *detach_one_task(struct lb_env *env) |
| { |
| struct task_struct *p; |
| |
| - lockdep_assert_held(&env->src_rq->lock); |
| + lockdep_assert_rq_held(env->src_rq); |
| |
| list_for_each_entry_reverse(p, |
| &env->src_rq->cfs_tasks, se.group_node) { |
| @@ -7751,7 +7751,7 @@ static int detach_tasks(struct lb_env *env) |
| struct task_struct *p; |
| int detached = 0; |
| |
| - lockdep_assert_held(&env->src_rq->lock); |
| + lockdep_assert_rq_held(env->src_rq); |
| |
| /* |
| * Source run queue has been emptied by another CPU, clear |
| @@ -7881,7 +7881,7 @@ static int detach_tasks(struct lb_env *env) |
| */ |
| static void attach_task(struct rq *rq, struct task_struct *p) |
| { |
| - lockdep_assert_held(&rq->lock); |
| + lockdep_assert_rq_held(rq); |
| |
| BUG_ON(task_rq(p) != rq); |
| activate_task(rq, p, ENQUEUE_NOCLOCK); |
| @@ -9792,7 +9792,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, |
| if (need_active_balance(&env)) { |
| unsigned long flags; |
| |
| - raw_spin_lock_irqsave(&busiest->lock, flags); |
| + raw_spin_rq_lock_irqsave(busiest, flags); |
| |
| /* |
| * Don't kick the active_load_balance_cpu_stop, |
| @@ -9800,8 +9800,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, |
| * moved to this_cpu: |
| */ |
| if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) { |
| - raw_spin_unlock_irqrestore(&busiest->lock, |
| - flags); |
| + raw_spin_rq_unlock_irqrestore(busiest, flags); |
| goto out_one_pinned; |
| } |
| |
| @@ -9818,7 +9817,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, |
| busiest->push_cpu = this_cpu; |
| active_balance = 1; |
| } |
| - raw_spin_unlock_irqrestore(&busiest->lock, flags); |
| + raw_spin_rq_unlock_irqrestore(busiest, flags); |
| |
| if (active_balance) { |
| stop_one_cpu_nowait(cpu_of(busiest), |
| @@ -10635,7 +10634,7 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf) |
| goto out; |
| } |
| |
| - raw_spin_unlock(&this_rq->lock); |
| + raw_spin_rq_unlock(this_rq); |
| |
| update_blocked_averages(this_cpu); |
| rcu_read_lock(); |
| @@ -10673,7 +10672,7 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf) |
| } |
| rcu_read_unlock(); |
| |
| - raw_spin_lock(&this_rq->lock); |
| + raw_spin_rq_lock(this_rq); |
| |
| if (curr_cost > this_rq->max_idle_balance_cost) |
| this_rq->max_idle_balance_cost = curr_cost; |
| @@ -11160,9 +11159,9 @@ void unregister_fair_sched_group(struct task_group *tg) |
| |
| rq = cpu_rq(cpu); |
| |
| - raw_spin_lock_irqsave(&rq->lock, flags); |
| + raw_spin_rq_lock_irqsave(rq, flags); |
| list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); |
| - raw_spin_unlock_irqrestore(&rq->lock, flags); |
| + raw_spin_rq_unlock_irqrestore(rq, flags); |
| } |
| } |
| |
| diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c |
| index 7ca3d3d86c2a..0194768ea9e7 100644 |
| --- a/kernel/sched/idle.c |
| +++ b/kernel/sched/idle.c |
| @@ -455,10 +455,10 @@ struct task_struct *pick_next_task_idle(struct rq *rq) |
| static void |
| dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) |
| { |
| - raw_spin_unlock_irq(&rq->lock); |
| + raw_spin_rq_unlock_irq(rq); |
| printk(KERN_ERR "bad: scheduling from the idle thread!\n"); |
| dump_stack(); |
| - raw_spin_lock_irq(&rq->lock); |
| + raw_spin_rq_lock_irq(rq); |
| } |
| |
| /* |
| diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h |
| index 1462846d244e..9ed6d8c414ad 100644 |
| --- a/kernel/sched/pelt.h |
| +++ b/kernel/sched/pelt.h |
| @@ -141,7 +141,7 @@ static inline void update_idle_rq_clock_pelt(struct rq *rq) |
| |
| static inline u64 rq_clock_pelt(struct rq *rq) |
| { |
| - lockdep_assert_held(&rq->lock); |
| + lockdep_assert_rq_held(rq); |
| assert_clock_updated(rq); |
| |
| return rq->clock_pelt - rq->lost_idle_time; |
| diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c |
| index c286e5ba3c94..b3d39c3d3ab3 100644 |
| --- a/kernel/sched/rt.c |
| +++ b/kernel/sched/rt.c |
| @@ -888,7 +888,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) |
| if (skip) |
| continue; |
| |
| - raw_spin_lock(&rq->lock); |
| + raw_spin_rq_lock(rq); |
| update_rq_clock(rq); |
| |
| if (rt_rq->rt_time) { |
| @@ -926,7 +926,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) |
| |
| if (enqueue) |
| sched_rt_rq_enqueue(rt_rq); |
| - raw_spin_unlock(&rq->lock); |
| + raw_spin_rq_unlock(rq); |
| } |
| |
| if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)) |
| @@ -1894,10 +1894,10 @@ static int push_rt_task(struct rq *rq, bool pull) |
| */ |
| push_task = get_push_task(rq); |
| if (push_task) { |
| - raw_spin_unlock(&rq->lock); |
| + raw_spin_rq_unlock(rq); |
| stop_one_cpu_nowait(rq->cpu, push_cpu_stop, |
| push_task, &rq->push_work); |
| - raw_spin_lock(&rq->lock); |
| + raw_spin_rq_lock(rq); |
| } |
| |
| return 0; |
| @@ -2122,10 +2122,10 @@ void rto_push_irq_work_func(struct irq_work *work) |
| * When it gets updated, a check is made if a push is possible. |
| */ |
| if (has_pushable_tasks(rq)) { |
| - raw_spin_lock(&rq->lock); |
| + raw_spin_rq_lock(rq); |
| while (push_rt_task(rq, true)) |
| ; |
| - raw_spin_unlock(&rq->lock); |
| + raw_spin_rq_unlock(rq); |
| } |
| |
| raw_spin_lock(&rd->rto_lock); |
| @@ -2243,10 +2243,10 @@ static void pull_rt_task(struct rq *this_rq) |
| double_unlock_balance(this_rq, src_rq); |
| |
| if (push_task) { |
| - raw_spin_unlock(&this_rq->lock); |
| + raw_spin_rq_unlock(this_rq); |
| stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop, |
| push_task, &src_rq->push_work); |
| - raw_spin_lock(&this_rq->lock); |
| + raw_spin_rq_lock(this_rq); |
| } |
| } |
| |
| diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h |
| index 729f734561ff..f06a37c20f16 100644 |
| --- a/kernel/sched/sched.h |
| +++ b/kernel/sched/sched.h |
| @@ -905,7 +905,7 @@ DECLARE_STATIC_KEY_FALSE(sched_uclamp_used); |
| */ |
| struct rq { |
| /* runqueue lock: */ |
| - raw_spinlock_t lock; |
| + raw_spinlock_t __lock; |
| |
| /* |
| * nr_running and cpu_load should be in the same cacheline because |
| @@ -1115,7 +1115,7 @@ static inline bool is_migration_disabled(struct task_struct *p) |
| |
| static inline raw_spinlock_t *rq_lockp(struct rq *rq) |
| { |
| - return &rq->lock; |
| + return &rq->__lock; |
| } |
| |
| static inline void lockdep_assert_rq_held(struct rq *rq) |
| @@ -1229,7 +1229,7 @@ static inline void assert_clock_updated(struct rq *rq) |
| |
| static inline u64 rq_clock(struct rq *rq) |
| { |
| - lockdep_assert_held(&rq->lock); |
| + lockdep_assert_rq_held(rq); |
| assert_clock_updated(rq); |
| |
| return rq->clock; |
| @@ -1237,7 +1237,7 @@ static inline u64 rq_clock(struct rq *rq) |
| |
| static inline u64 rq_clock_task(struct rq *rq) |
| { |
| - lockdep_assert_held(&rq->lock); |
| + lockdep_assert_rq_held(rq); |
| assert_clock_updated(rq); |
| |
| return rq->clock_task; |
| @@ -1263,7 +1263,7 @@ static inline u64 rq_clock_thermal(struct rq *rq) |
| |
| static inline void rq_clock_skip_update(struct rq *rq) |
| { |
| - lockdep_assert_held(&rq->lock); |
| + lockdep_assert_rq_held(rq); |
| rq->clock_update_flags |= RQCF_REQ_SKIP; |
| } |
| |
| @@ -1273,7 +1273,7 @@ static inline void rq_clock_skip_update(struct rq *rq) |
| */ |
| static inline void rq_clock_cancel_skipupdate(struct rq *rq) |
| { |
| - lockdep_assert_held(&rq->lock); |
| + lockdep_assert_rq_held(rq); |
| rq->clock_update_flags &= ~RQCF_REQ_SKIP; |
| } |
| |
| @@ -1304,7 +1304,7 @@ extern struct callback_head balance_push_callback; |
| */ |
| static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) |
| { |
| - rf->cookie = lockdep_pin_lock(&rq->lock); |
| + rf->cookie = lockdep_pin_lock(rq_lockp(rq)); |
| |
| #ifdef CONFIG_SCHED_DEBUG |
| rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); |
| @@ -1322,12 +1322,12 @@ static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) |
| rf->clock_update_flags = RQCF_UPDATED; |
| #endif |
| |
| - lockdep_unpin_lock(&rq->lock, rf->cookie); |
| + lockdep_unpin_lock(rq_lockp(rq), rf->cookie); |
| } |
| |
| static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) |
| { |
| - lockdep_repin_lock(&rq->lock, rf->cookie); |
| + lockdep_repin_lock(rq_lockp(rq), rf->cookie); |
| |
| #ifdef CONFIG_SCHED_DEBUG |
| /* |
| @@ -1348,7 +1348,7 @@ static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) |
| __releases(rq->lock) |
| { |
| rq_unpin_lock(rq, rf); |
| - raw_spin_unlock(&rq->lock); |
| + raw_spin_rq_unlock(rq); |
| } |
| |
| static inline void |
| @@ -1357,7 +1357,7 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) |
| __releases(p->pi_lock) |
| { |
| rq_unpin_lock(rq, rf); |
| - raw_spin_unlock(&rq->lock); |
| + raw_spin_rq_unlock(rq); |
| raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); |
| } |
| |
| @@ -1365,7 +1365,7 @@ static inline void |
| rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) |
| __acquires(rq->lock) |
| { |
| - raw_spin_lock_irqsave(&rq->lock, rf->flags); |
| + raw_spin_rq_lock_irqsave(rq, rf->flags); |
| rq_pin_lock(rq, rf); |
| } |
| |
| @@ -1373,7 +1373,7 @@ static inline void |
| rq_lock_irq(struct rq *rq, struct rq_flags *rf) |
| __acquires(rq->lock) |
| { |
| - raw_spin_lock_irq(&rq->lock); |
| + raw_spin_rq_lock_irq(rq); |
| rq_pin_lock(rq, rf); |
| } |
| |
| @@ -1381,7 +1381,7 @@ static inline void |
| rq_lock(struct rq *rq, struct rq_flags *rf) |
| __acquires(rq->lock) |
| { |
| - raw_spin_lock(&rq->lock); |
| + raw_spin_rq_lock(rq); |
| rq_pin_lock(rq, rf); |
| } |
| |
| @@ -1389,7 +1389,7 @@ static inline void |
| rq_relock(struct rq *rq, struct rq_flags *rf) |
| __acquires(rq->lock) |
| { |
| - raw_spin_lock(&rq->lock); |
| + raw_spin_rq_lock(rq); |
| rq_repin_lock(rq, rf); |
| } |
| |
| @@ -1398,7 +1398,7 @@ rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) |
| __releases(rq->lock) |
| { |
| rq_unpin_lock(rq, rf); |
| - raw_spin_unlock_irqrestore(&rq->lock, rf->flags); |
| + raw_spin_rq_unlock_irqrestore(rq, rf->flags); |
| } |
| |
| static inline void |
| @@ -1406,7 +1406,7 @@ rq_unlock_irq(struct rq *rq, struct rq_flags *rf) |
| __releases(rq->lock) |
| { |
| rq_unpin_lock(rq, rf); |
| - raw_spin_unlock_irq(&rq->lock); |
| + raw_spin_rq_unlock_irq(rq); |
| } |
| |
| static inline void |
| @@ -1414,7 +1414,7 @@ rq_unlock(struct rq *rq, struct rq_flags *rf) |
| __releases(rq->lock) |
| { |
| rq_unpin_lock(rq, rf); |
| - raw_spin_unlock(&rq->lock); |
| + raw_spin_rq_unlock(rq); |
| } |
| |
| static inline struct rq * |
| @@ -1479,7 +1479,7 @@ queue_balance_callback(struct rq *rq, |
| struct callback_head *head, |
| void (*func)(struct rq *rq)) |
| { |
| - lockdep_assert_held(&rq->lock); |
| + lockdep_assert_rq_held(rq); |
| |
| if (unlikely(head->next || rq->balance_callback == &balance_push_callback)) |
| return; |
| @@ -2020,7 +2020,7 @@ static inline struct task_struct *get_push_task(struct rq *rq) |
| { |
| struct task_struct *p = rq->curr; |
| |
| - lockdep_assert_held(&rq->lock); |
| + lockdep_assert_rq_held(rq); |
| |
| if (rq->push_busy) |
| return NULL; |
| @@ -2250,7 +2250,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| __acquires(busiest->lock) |
| __acquires(this_rq->lock) |
| { |
| - raw_spin_unlock(&this_rq->lock); |
| + raw_spin_rq_unlock(this_rq); |
| double_rq_lock(this_rq, busiest); |
| |
| return 1; |
| @@ -2269,20 +2269,22 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| __acquires(busiest->lock) |
| __acquires(this_rq->lock) |
| { |
| - int ret = 0; |
| - |
| - if (unlikely(!raw_spin_trylock(&busiest->lock))) { |
| - if (busiest < this_rq) { |
| - raw_spin_unlock(&this_rq->lock); |
| - raw_spin_lock(&busiest->lock); |
| - raw_spin_lock_nested(&this_rq->lock, |
| - SINGLE_DEPTH_NESTING); |
| - ret = 1; |
| - } else |
| - raw_spin_lock_nested(&busiest->lock, |
| - SINGLE_DEPTH_NESTING); |
| + if (rq_lockp(this_rq) == rq_lockp(busiest)) |
| + return 0; |
| + |
| + if (likely(raw_spin_rq_trylock(busiest))) |
| + return 0; |
| + |
| + if (rq_lockp(busiest) >= rq_lockp(this_rq)) { |
| + raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING); |
| + return 0; |
| } |
| - return ret; |
| + |
| + raw_spin_rq_unlock(this_rq); |
| + raw_spin_rq_lock(busiest); |
| + raw_spin_rq_lock_nested(this_rq, SINGLE_DEPTH_NESTING); |
| + |
| + return 1; |
| } |
| |
| #endif /* CONFIG_PREEMPTION */ |
| @@ -2292,11 +2294,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| */ |
| static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| { |
| - if (unlikely(!irqs_disabled())) { |
| - /* printk() doesn't work well under rq->lock */ |
| - raw_spin_unlock(&this_rq->lock); |
| - BUG_ON(1); |
| - } |
| + lockdep_assert_irqs_disabled(); |
| |
| return _double_lock_balance(this_rq, busiest); |
| } |
| @@ -2304,8 +2302,9 @@ static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) |
| __releases(busiest->lock) |
| { |
| - raw_spin_unlock(&busiest->lock); |
| - lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); |
| + if (rq_lockp(this_rq) != rq_lockp(busiest)) |
| + raw_spin_rq_unlock(busiest); |
| + lock_set_subclass(&rq_lockp(this_rq)->dep_map, 0, _RET_IP_); |
| } |
| |
| static inline void double_lock(spinlock_t *l1, spinlock_t *l2) |
| @@ -2346,16 +2345,16 @@ static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) |
| __acquires(rq2->lock) |
| { |
| BUG_ON(!irqs_disabled()); |
| - if (rq1 == rq2) { |
| - raw_spin_lock(&rq1->lock); |
| + if (rq_lockp(rq1) == rq_lockp(rq2)) { |
| + raw_spin_rq_lock(rq1); |
| __acquire(rq2->lock); /* Fake it out ;) */ |
| } else { |
| - if (rq1 < rq2) { |
| - raw_spin_lock(&rq1->lock); |
| - raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); |
| + if (rq_lockp(rq1) < rq_lockp(rq2)) { |
| + raw_spin_rq_lock(rq1); |
| + raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING); |
| } else { |
| - raw_spin_lock(&rq2->lock); |
| - raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); |
| + raw_spin_rq_lock(rq2); |
| + raw_spin_rq_lock_nested(rq1, SINGLE_DEPTH_NESTING); |
| } |
| } |
| } |
| @@ -2370,9 +2369,9 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) |
| __releases(rq1->lock) |
| __releases(rq2->lock) |
| { |
| - raw_spin_unlock(&rq1->lock); |
| - if (rq1 != rq2) |
| - raw_spin_unlock(&rq2->lock); |
| + raw_spin_rq_unlock(rq1); |
| + if (rq_lockp(rq1) != rq_lockp(rq2)) |
| + raw_spin_rq_unlock(rq2); |
| else |
| __release(rq2->lock); |
| } |
| @@ -2395,7 +2394,7 @@ static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) |
| { |
| BUG_ON(!irqs_disabled()); |
| BUG_ON(rq1 != rq2); |
| - raw_spin_lock(&rq1->lock); |
| + raw_spin_rq_lock(rq1); |
| __acquire(rq2->lock); /* Fake it out ;) */ |
| } |
| |
| @@ -2410,7 +2409,7 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) |
| __releases(rq2->lock) |
| { |
| BUG_ON(rq1 != rq2); |
| - raw_spin_unlock(&rq1->lock); |
| + raw_spin_rq_unlock(rq1); |
| __release(rq2->lock); |
| } |
| |
| diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c |
| index 55a0a243e871..053115b55f89 100644 |
| --- a/kernel/sched/topology.c |
| +++ b/kernel/sched/topology.c |
| @@ -467,7 +467,7 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd) |
| struct root_domain *old_rd = NULL; |
| unsigned long flags; |
| |
| - raw_spin_lock_irqsave(&rq->lock, flags); |
| + raw_spin_rq_lock_irqsave(rq, flags); |
| |
| if (rq->rd) { |
| old_rd = rq->rd; |
| @@ -493,7 +493,7 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd) |
| if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) |
| set_rq_online(rq); |
| |
| - raw_spin_unlock_irqrestore(&rq->lock, flags); |
| + raw_spin_rq_unlock_irqrestore(rq, flags); |
| |
| if (old_rd) |
| call_rcu(&old_rd->rcu, free_rootdomain); |
| -- |
| 2.31.1.818.g46aad6cb9e-goog |
| |