diff options
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r-- | kernel/sched/sched.h | 33 |
1 files changed, 25 insertions, 8 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 29418b8c05dd..ca30af37b906 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1133,6 +1133,10 @@ static inline bool sched_core_disabled(void) return !static_branch_unlikely(&__sched_core_enabled); } +/* + * Be careful with this function; not for general use. The return value isn't + * stable unless you actually hold a relevant rq->__lock. + */ static inline raw_spinlock_t *rq_lockp(struct rq *rq) { if (sched_core_enabled(rq)) @@ -1141,6 +1145,14 @@ static inline raw_spinlock_t *rq_lockp(struct rq *rq) return &rq->__lock; } +static inline raw_spinlock_t *__rq_lockp(struct rq *rq) +{ + if (rq->core_enabled) + return &rq->core->__lock; + + return &rq->__lock; +} + #else /* !CONFIG_SCHED_CORE */ static inline bool sched_core_enabled(struct rq *rq) @@ -1158,11 +1170,16 @@ static inline raw_spinlock_t *rq_lockp(struct rq *rq) return &rq->__lock; } +static inline raw_spinlock_t *__rq_lockp(struct rq *rq) +{ + return &rq->__lock; +} + #endif /* CONFIG_SCHED_CORE */ static inline void lockdep_assert_rq_held(struct rq *rq) { - lockdep_assert_held(rq_lockp(rq)); + lockdep_assert_held(__rq_lockp(rq)); } extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass); @@ -1346,7 +1363,7 @@ extern struct callback_head balance_push_callback; */ static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) { - rf->cookie = lockdep_pin_lock(rq_lockp(rq)); + rf->cookie = lockdep_pin_lock(__rq_lockp(rq)); #ifdef CONFIG_SCHED_DEBUG rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); @@ -1364,12 +1381,12 @@ static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) rf->clock_update_flags = RQCF_UPDATED; #endif - lockdep_unpin_lock(rq_lockp(rq), rf->cookie); + lockdep_unpin_lock(__rq_lockp(rq), rf->cookie); } static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) { - lockdep_repin_lock(rq_lockp(rq), rf->cookie); + lockdep_repin_lock(__rq_lockp(rq), rf->cookie); #ifdef CONFIG_SCHED_DEBUG /* @@ -2338,7 +2355,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) __acquires(busiest->lock) __acquires(this_rq->lock) { - if (rq_lockp(this_rq) == rq_lockp(busiest)) + if (__rq_lockp(this_rq) == __rq_lockp(busiest)) return 0; if (likely(raw_spin_rq_trylock(busiest))) @@ -2370,9 +2387,9 @@ static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) __releases(busiest->lock) { - if (rq_lockp(this_rq) != rq_lockp(busiest)) + if (__rq_lockp(this_rq) != __rq_lockp(busiest)) raw_spin_rq_unlock(busiest); - lock_set_subclass(&rq_lockp(this_rq)->dep_map, 0, _RET_IP_); + lock_set_subclass(&__rq_lockp(this_rq)->dep_map, 0, _RET_IP_); } static inline void double_lock(spinlock_t *l1, spinlock_t *l2) @@ -2412,7 +2429,7 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) __releases(rq1->lock) __releases(rq2->lock) { - if (rq_lockp(rq1) != rq_lockp(rq2)) + if (__rq_lockp(rq1) != __rq_lockp(rq2)) raw_spin_rq_unlock(rq2); else __release(rq2->lock); |