123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269 |
- #ifdef CONFIG_SMP
- #include "sched-pelt.h"
- int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
- int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
- int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
- int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
- int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
- #ifdef CONFIG_SCHED_THERMAL_PRESSURE
- int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
- static inline u64 thermal_load_avg(struct rq *rq)
- {
- return READ_ONCE(rq->avg_thermal.load_avg);
- }
- #else
- static inline int
- update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
- {
- return 0;
- }
- static inline u64 thermal_load_avg(struct rq *rq)
- {
- return 0;
- }
- #endif
- #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
- int update_irq_load_avg(struct rq *rq, u64 running);
- #else
- static inline int
- update_irq_load_avg(struct rq *rq, u64 running)
- {
- return 0;
- }
- #endif
- #define PELT_MIN_DIVIDER (LOAD_AVG_MAX - 1024)
- static inline u32 get_pelt_divider(struct sched_avg *avg)
- {
- return PELT_MIN_DIVIDER + avg->period_contrib;
- }
- static inline void cfs_se_util_change(struct sched_avg *avg)
- {
- unsigned int enqueued;
- if (!sched_feat(UTIL_EST))
- return;
-
- enqueued = avg->util_est.enqueued;
- if (!(enqueued & UTIL_AVG_UNCHANGED))
- return;
-
- enqueued &= ~UTIL_AVG_UNCHANGED;
- WRITE_ONCE(avg->util_est.enqueued, enqueued);
- }
- static inline u64 rq_clock_task_mult(struct rq *rq)
- {
- lockdep_assert_rq_held(rq);
- assert_clock_updated(rq);
- return rq->clock_task_mult;
- }
- static inline u64 rq_clock_pelt(struct rq *rq)
- {
- lockdep_assert_rq_held(rq);
- assert_clock_updated(rq);
- return rq->clock_pelt - rq->lost_idle_time;
- }
- static inline void _update_idle_rq_clock_pelt(struct rq *rq)
- {
- rq->clock_pelt = rq_clock_task_mult(rq);
- u64_u32_store(rq->clock_idle, rq_clock(rq));
-
- smp_wmb();
- u64_u32_store(rq->clock_pelt_idle, rq_clock_pelt(rq));
- }
- static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
- {
- if (unlikely(is_idle_task(rq->curr))) {
- _update_idle_rq_clock_pelt(rq);
- return;
- }
-
-
- delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
- delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
- rq->clock_pelt += delta;
- }
- extern unsigned int sched_pelt_lshift;
- static inline void update_rq_clock_task_mult(struct rq *rq, s64 delta)
- {
- delta <<= READ_ONCE(sched_pelt_lshift);
- rq->clock_task_mult += delta;
- update_rq_clock_pelt(rq, delta);
- }
- static inline void update_idle_rq_clock_pelt(struct rq *rq)
- {
- u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX;
- u32 util_sum = rq->cfs.avg.util_sum;
- util_sum += rq->avg_rt.util_sum;
- util_sum += rq->avg_dl.util_sum;
-
- if (util_sum >= divider)
- rq->lost_idle_time += rq_clock_task_mult(rq) - rq->clock_pelt;
- _update_idle_rq_clock_pelt(rq);
- }
- #ifdef CONFIG_CFS_BANDWIDTH
- static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
- {
- u64 throttled;
- if (unlikely(cfs_rq->throttle_count))
- throttled = U64_MAX;
- else
- throttled = cfs_rq->throttled_clock_pelt_time;
- u64_u32_store(cfs_rq->throttled_pelt_idle, throttled);
- }
- static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
- {
- if (unlikely(cfs_rq->throttle_count))
- return cfs_rq->throttled_clock_pelt - cfs_rq->throttled_clock_pelt_time;
- return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time;
- }
- #else
- static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { }
- static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
- {
- return rq_clock_pelt(rq_of(cfs_rq));
- }
- #endif
- #else
- static inline int
- update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
- {
- return 0;
- }
- static inline int
- update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
- {
- return 0;
- }
- static inline int
- update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
- {
- return 0;
- }
- static inline int
- update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
- {
- return 0;
- }
- static inline u64 thermal_load_avg(struct rq *rq)
- {
- return 0;
- }
- static inline int
- update_irq_load_avg(struct rq *rq, u64 running)
- {
- return 0;
- }
- static inline u64 rq_clock_task_mult(struct rq *rq)
- {
- return rq_clock_task(rq);
- }
- static inline u64 rq_clock_pelt(struct rq *rq)
- {
- return rq_clock_task_mult(rq);
- }
- static inline void
- update_rq_clock_task_mult(struct rq *rq, s64 delta) { }
- static inline void
- update_idle_rq_clock_pelt(struct rq *rq) { }
- static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { }
- #endif
|