Merge branch 'upstream-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/linvil...
[linux-drm-fsl-dcu.git] / kernel / sched.c
index be2706c885a71adc0b5791fecf01a944e2b73eb5..13cdab3b4c4805712cd1039588d0938b54c8822f 100644 (file)
@@ -169,7 +169,7 @@ unsigned long long __attribute__((weak)) sched_clock(void)
                (MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
 
 #define TASK_PREEMPTS_CURR(p, rq) \
-       (((p)->prio < (rq)->curr->prio) && ((p)->array == (rq)->active))
+       ((p)->prio < (rq)->curr->prio)
 
 #define SCALE_PRIO(x, prio) \
        max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
@@ -304,7 +304,8 @@ struct rq {
        struct lock_class_key rq_lock_key;
 };
 
-static DEFINE_PER_CPU(struct rq, runqueues);
+static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp;
+static DEFINE_MUTEX(sched_hotcpu_mutex);
 
 static inline int cpu_of(struct rq *rq)
 {
@@ -4076,13 +4077,13 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
        struct prio_array *array;
        unsigned long flags;
        struct rq *rq;
-       int delta;
+       int oldprio;
 
        BUG_ON(prio < 0 || prio > MAX_PRIO);
 
        rq = task_rq_lock(p, &flags);
 
-       delta = prio - p->prio;
+       oldprio = p->prio;
        array = p->array;
        if (array)
                dequeue_task(p, array);
@@ -4098,11 +4099,13 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
                enqueue_task(p, array);
                /*
                 * Reschedule if we are currently running on this runqueue and
-                * our priority decreased, or if our priority became higher
-                * than the current's.
+                * our priority decreased, or if we are not currently running on
+                * this runqueue and our priority is higher than the current's
                 */
-               if (TASK_PREEMPTS_CURR(p, rq) ||
-                               (delta > 0 && task_running(rq, p)))
+               if (task_running(rq, p)) {
+                       if (p->prio > oldprio)
+                               resched_task(rq->curr);
+               } else if (TASK_PREEMPTS_CURR(p, rq))
                        resched_task(rq->curr);
        }
        task_rq_unlock(rq, &flags);
@@ -4150,12 +4153,10 @@ void set_user_nice(struct task_struct *p, long nice)
                enqueue_task(p, array);
                inc_raw_weighted_load(rq, p);
                /*
-                * Reschedule if we are currently running on this runqueue and
-                * our priority decreased, or if our priority became higher
-                * than the current's.
+                * If the task increased its priority or is running and
+                * lowered its priority, then reschedule its CPU:
                 */
-               if (TASK_PREEMPTS_CURR(p, rq) ||
-                               (delta > 0 && task_running(rq, p)))
+               if (delta < 0 || (delta > 0 && task_running(rq, p)))
                        resched_task(rq->curr);
        }
 out_unlock:
@@ -4382,11 +4383,13 @@ recheck:
                __activate_task(p, rq);
                /*
                 * Reschedule if we are currently running on this runqueue and
-                * our priority decreased, or our priority became higher
-                * than the current's.
+                * our priority decreased, or if we are not currently running on
+                * this runqueue and our priority is higher than the current's
                 */
-               if (TASK_PREEMPTS_CURR(p, rq) ||
-                               (task_running(rq, p) && p->prio > oldprio))
+               if (task_running(rq, p)) {
+                       if (p->prio > oldprio)
+                               resched_task(rq->curr);
+               } else if (TASK_PREEMPTS_CURR(p, rq))
                        resched_task(rq->curr);
        }
        __task_rq_unlock(rq);
@@ -4518,13 +4521,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
        struct task_struct *p;
        int retval;
 
-       lock_cpu_hotplug();
+       mutex_lock(&sched_hotcpu_mutex);
        read_lock(&tasklist_lock);
 
        p = find_process_by_pid(pid);
        if (!p) {
                read_unlock(&tasklist_lock);
-               unlock_cpu_hotplug();
+               mutex_unlock(&sched_hotcpu_mutex);
                return -ESRCH;
        }
 
@@ -4551,7 +4554,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
 
 out_unlock:
        put_task_struct(p);
-       unlock_cpu_hotplug();
+       mutex_unlock(&sched_hotcpu_mutex);
        return retval;
 }
 
@@ -4608,7 +4611,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
        struct task_struct *p;
        int retval;
 
-       lock_cpu_hotplug();
+       mutex_lock(&sched_hotcpu_mutex);
        read_lock(&tasklist_lock);
 
        retval = -ESRCH;
@@ -4624,7 +4627,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
 
 out_unlock:
        read_unlock(&tasklist_lock);
-       unlock_cpu_hotplug();
+       mutex_unlock(&sched_hotcpu_mutex);
        if (retval)
                return retval;
 
@@ -4772,9 +4775,7 @@ int __sched cond_resched_softirq(void)
        BUG_ON(!in_softirq());
 
        if (need_resched() && system_state == SYSTEM_RUNNING) {
-               raw_local_irq_disable();
-               _local_bh_enable();
-               raw_local_irq_enable();
+               local_bh_enable();
                __cond_resched();
                local_bh_disable();
                return 1;
@@ -5386,7 +5387,12 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
        struct rq *rq;
 
        switch (action) {
+       case CPU_LOCK_ACQUIRE:
+               mutex_lock(&sched_hotcpu_mutex);
+               break;
+
        case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
                p = kthread_create(migration_thread, hcpu, "migration/%d",cpu);
                if (IS_ERR(p))
                        return NOTIFY_BAD;
@@ -5400,12 +5406,14 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                break;
 
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
                /* Strictly unneccessary, as first user will wake it. */
                wake_up_process(cpu_rq(cpu)->migration_thread);
                break;
 
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
                if (!cpu_rq(cpu)->migration_thread)
                        break;
                /* Unbind it from offline cpu so it can run.  Fall thru. */
@@ -5416,6 +5424,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                break;
 
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                migrate_live_tasks(cpu);
                rq = cpu_rq(cpu);
                kthread_stop(rq->migration_thread);
@@ -5431,7 +5440,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                BUG_ON(rq->nr_running != 0);
 
                /* No need to migrate the tasks: it was best-effort if
-                * they didn't do lock_cpu_hotplug().  Just wake up
+                * they didn't take sched_hotcpu_mutex.  Just wake up
                 * the requestors. */
                spin_lock_irq(&rq->lock);
                while (!list_empty(&rq->migration_queue)) {
@@ -5445,6 +5454,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                spin_unlock_irq(&rq->lock);
                break;
 #endif
+       case CPU_LOCK_RELEASE:
+               mutex_unlock(&sched_hotcpu_mutex);
+               break;
        }
        return NOTIFY_OK;
 }
@@ -6820,10 +6832,10 @@ int arch_reinit_sched_domains(void)
 {
        int err;
 
-       lock_cpu_hotplug();
+       mutex_lock(&sched_hotcpu_mutex);
        detach_destroy_domains(&cpu_online_map);
        err = arch_init_sched_domains(&cpu_online_map);
-       unlock_cpu_hotplug();
+       mutex_unlock(&sched_hotcpu_mutex);
 
        return err;
 }
@@ -6902,14 +6914,20 @@ static int update_sched_domains(struct notifier_block *nfb,
 {
        switch (action) {
        case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
        case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
                detach_destroy_domains(&cpu_online_map);
                return NOTIFY_OK;
 
        case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
        case CPU_DOWN_FAILED:
+       case CPU_DOWN_FAILED_FROZEN:
        case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
        case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
                /*
                 * Fall through and re-initialise the domains.
                 */
@@ -6928,12 +6946,12 @@ void __init sched_init_smp(void)
 {
        cpumask_t non_isolated_cpus;
 
-       lock_cpu_hotplug();
+       mutex_lock(&sched_hotcpu_mutex);
        arch_init_sched_domains(&cpu_online_map);
        cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
        if (cpus_empty(non_isolated_cpus))
                cpu_set(smp_processor_id(), non_isolated_cpus);
-       unlock_cpu_hotplug();
+       mutex_unlock(&sched_hotcpu_mutex);
        /* XXX: Theoretical race here - CPU may be hotplugged now */
        hotcpu_notifier(update_sched_domains, 0);