当前位置: 代码迷 >> 综合 >> Kernel Scheduler学习之八:loading tracking
  详细解决方案

Kernel Scheduler学习之八:loading tracking

热度:57   发布时间:2024-02-26 23:13:46.0
  1. Overview
    scheduler看task或者rq的loading是如何更新的?什么时候更新的?衡量task与cpu loading是通过什么Index呢?
     
  2.  代码走读
      loading的更新主要是通过如下的函数进行的。
    /* Update task and its cfs_rq load average */
    static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
    {u64 now = cfs_rq_clock_pelt(cfs_rq);//获取当前时钟int decayed;/** Track task load average for carrying it to new CPU after migrated, and* track group sched_entity load average for task_h_load calc in migration*/if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))__update_load_avg_se(now, cfs_rq, se);decayed  = update_cfs_rq_load_avg(now, cfs_rq);decayed |= propagate_entity_load_avg(se);if (!se->avg.last_update_time && (flags & DO_ATTACH)) {/** DO_ATTACH means we're here from enqueue_entity().* !last_update_time means we've passed through* migrate_task_rq_fair() indicating we migrated.** IOW we're enqueueing a task on a new CPU.*/attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION);update_tg_load_avg(cfs_rq, 0);} else if (decayed) {cfs_rq_util_change(cfs_rq, 0);if (flags & UPDATE_TG)update_tg_load_avg(cfs_rq, 0);}
    }
    a.获取当前pelt计时:
    #ifdef CONFIG_CFS_BANDWIDTH
    /* rq->task_clock normalized against any time this cfs_rq has spent throttled */
    static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
    {if (unlikely(cfs_rq->throttle_count))return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
    }
    #else
    static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
    {return rq_clock_pelt(rq_of(cfs_rq));
    }
    #endifstatic inline u64 rq_clock_pelt(struct rq *rq)
    {lockdep_assert_held(&rq->lock);assert_clock_updated(rq);return rq->clock_pelt - rq->lost_idle_time;
    }/** The clock_pelt scales the time to reflect the effective amount of* computation done during the running delta time but then sync back to* clock_task when rq is idle.*** absolute time   | 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|16* @ max capacity  ------******---------------******---------------* @ half capacity ------************---------************---------* clock pelt      | 1| 2|    3|    4| 7| 8| 9|   10|   11|14|15|16**/
    static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
    {if (unlikely(is_idle_task(rq->curr))) {/* The rq is idle, we can sync to clock_task */rq->clock_pelt  = rq_clock_task(rq);return;}/** When a rq runs at a lower compute capacity, it will need* more time to do the same amount of work than at max* capacity. In order to be invariant, we scale the delta to* reflect how much work has been really done.* Running longer results in stealing idle time that will* disturb the load signal compared to max capacity. This* stolen idle time will be automatically reflected when the* rq will be idle and the clock will be synced with* rq_clock_task.*//** Scale the elapsed time to reflect the real amount of* computation*/delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));rq->clock_pelt += delta;
    }
    





     
  相关解决方案