Merge branches 'pm-cpufreq', 'pm-cpuidle', 'pm-devfreq', 'pm-opp' and 'pm-tools'
[linux-drm-fsl-dcu.git] / kernel / sched / deadline.c
index 726470d47f87959fe530858f3e99024ecad6d63f..a027799ae130d3623ff4351f08c3cf456979bfbc 100644 (file)
@@ -350,6 +350,11 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
                dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
                dl_se->runtime = pi_se->dl_runtime;
        }
+
+       if (dl_se->dl_yielded)
+               dl_se->dl_yielded = 0;
+       if (dl_se->dl_throttled)
+               dl_se->dl_throttled = 0;
 }
 
 /*
@@ -536,23 +541,19 @@ again:
 
        sched_clock_tick();
        update_rq_clock(rq);
-       dl_se->dl_throttled = 0;
-       dl_se->dl_yielded = 0;
-       if (task_on_rq_queued(p)) {
-               enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
-               if (dl_task(rq->curr))
-                       check_preempt_curr_dl(rq, p, 0);
-               else
-                       resched_curr(rq);
+       enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
+       if (dl_task(rq->curr))
+               check_preempt_curr_dl(rq, p, 0);
+       else
+               resched_curr(rq);
 #ifdef CONFIG_SMP
-               /*
-                * Queueing this task back might have overloaded rq,
-                * check if we need to kick someone away.
-                */
-               if (has_pushable_dl_tasks(rq))
-                       push_dl_task(rq);
+       /*
+        * Queueing this task back might have overloaded rq,
+        * check if we need to kick someone away.
+        */
+       if (has_pushable_dl_tasks(rq))
+               push_dl_task(rq);
 #endif
-       }
 unlock:
        raw_spin_unlock(&rq->lock);
 
@@ -613,10 +614,9 @@ static void update_curr_dl(struct rq *rq)
 
        dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
        if (dl_runtime_exceeded(rq, dl_se)) {
+               dl_se->dl_throttled = 1;
                __dequeue_task_dl(rq, curr, 0);
-               if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted)))
-                       dl_se->dl_throttled = 1;
-               else
+               if (unlikely(!start_dl_timer(dl_se, curr->dl.dl_boosted)))
                        enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
 
                if (!is_leftmost(curr, &rq->dl))
@@ -853,7 +853,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
         * its rq, the bandwidth timer callback (which clearly has not
         * run yet) will take care of this.
         */
-       if (p->dl.dl_throttled)
+       if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
                return;
 
        enqueue_dl_entity(&p->dl, pi_se, flags);
@@ -1073,7 +1073,13 @@ static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
 {
        update_curr_dl(rq);
 
-       if (hrtick_enabled(rq) && queued && p->dl.runtime > 0)
+       /*
+        * Even when we have runtime, update_curr_dl() might have resulted in us
+        * not being the leftmost task anymore. In that case NEED_RESCHED will
+        * be set and schedule() will start a new hrtick for the next task.
+        */
+       if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
+           is_leftmost(p, &rq->dl))
                start_hrtick_dl(rq, p);
 }
 
@@ -1166,9 +1172,6 @@ static int find_later_rq(struct task_struct *task)
         * We have to consider system topology and task affinity
         * first, then we can look for a suitable cpu.
         */
-       cpumask_copy(later_mask, task_rq(task)->rd->span);
-       cpumask_and(later_mask, later_mask, cpu_active_mask);
-       cpumask_and(later_mask, later_mask, &task->cpus_allowed);
        best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
                        task, later_mask);
        if (best_cpu == -1)
@@ -1563,6 +1566,7 @@ static void rq_online_dl(struct rq *rq)
        if (rq->dl.overloaded)
                dl_set_overload(rq);
 
+       cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
        if (rq->dl.dl_nr_running > 0)
                cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
 }
@@ -1574,6 +1578,7 @@ static void rq_offline_dl(struct rq *rq)
                dl_clear_overload(rq);
 
        cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
+       cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
 }
 
 void init_sched_dl_class(void)