Merge branches 'pm-cpufreq', 'pm-cpuidle', 'pm-devfreq', 'pm-opp' and 'pm-tools'
[linux-drm-fsl-dcu.git] / kernel / sched / fair.c
index 40667cbf371ba9e8732e6c30940cc146752ee0c3..7ce18f3c097ac4779eb4cf6ed0ad14ac1beb3eb5 100644 (file)
@@ -676,7 +676,6 @@ void init_task_runnable_average(struct task_struct *p)
 {
        u32 slice;
 
-       p->se.avg.decay_count = 0;
        slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
        p->se.avg.runnable_avg_sum = slice;
        p->se.avg.runnable_avg_period = slice;
@@ -1730,7 +1729,7 @@ static int preferred_group_nid(struct task_struct *p, int nid)
        nodes = node_online_map;
        for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
                unsigned long max_faults = 0;
-               nodemask_t max_group;
+               nodemask_t max_group = NODE_MASK_NONE;
                int a, b;
 
                /* Are there nodes at this distance from each other? */
@@ -2574,11 +2573,11 @@ static inline u64 __synchronize_entity_decay(struct sched_entity *se)
        u64 decays = atomic64_read(&cfs_rq->decay_counter);
 
        decays -= se->avg.decay_count;
+       se->avg.decay_count = 0;
        if (!decays)
                return 0;
 
        se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
-       se->avg.decay_count = 0;
 
        return decays;
 }
@@ -5157,7 +5156,7 @@ static void yield_task_fair(struct rq *rq)
                 * so we don't do microscopic update in schedule()
                 * and double the fastpath cost.
                 */
-                rq->skip_clock_update = 1;
+               rq_clock_skip_update(rq, true);
        }
 
        set_skip_buddy(se);
@@ -5949,8 +5948,8 @@ static unsigned long scale_rt_capacity(int cpu)
         */
        age_stamp = ACCESS_ONCE(rq->age_stamp);
        avg = ACCESS_ONCE(rq->rt_avg);
+       delta = __rq_clock_broken(rq) - age_stamp;
 
-       delta = rq_clock(rq) - age_stamp;
        if (unlikely(delta < 0))
                delta = 0;