sched/deadline: Fix up the smp-affinity mask tests
authorPeter Zijlstra <peterz@infradead.org>
Tue, 17 Dec 2013 09:03:34 +0000 (10:03 +0100)
committerIngo Molnar <mingo@kernel.org>
Mon, 13 Jan 2014 12:47:22 +0000 (13:47 +0100)
For now deadline tasks are not allowed to set smp affinity; however
the current tests are wrong, cure this.

The test in __sched_setscheduler() also uses an on-stack cpumask_t
which is a no-no.

Change both tests to use cpumask_subset() such that we test the root
domain span to be a subset of the cpus_allowed mask. This way we're
sure the tasks can always run on all CPUs they can be balanced over,
and have no effective affinity constraints.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-fyqtb1lapxca3lhsxv9cumdc@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/core.c

index e30356d6b31fed503a6383fc1a51f68810a33004..27c6375d182abd5d892009a8833565aa1d8d9b61 100644 (file)
@@ -3384,23 +3384,14 @@ change:
 #ifdef CONFIG_SMP
                if (dl_bandwidth_enabled() && dl_policy(policy)) {
                        cpumask_t *span = rq->rd->span;
-                       cpumask_t act_affinity;
-
-                       /*
-                        * cpus_allowed mask is statically initialized with
-                        * CPU_MASK_ALL, span is instead dynamic. Here we
-                        * compute the "dynamic" affinity of a task.
-                        */
-                       cpumask_and(&act_affinity, &p->cpus_allowed,
-                                   cpu_active_mask);
 
                        /*
                         * Don't allow tasks with an affinity mask smaller than
                         * the entire root_domain to become SCHED_DEADLINE. We
                         * will also fail if there's no bandwidth available.
                         */
-                       if (!cpumask_equal(&act_affinity, span) ||
-                                          rq->rd->dl_bw.bw == 0) {
+                       if (!cpumask_subset(span, &p->cpus_allowed) ||
+                           rq->rd->dl_bw.bw == 0) {
                                task_rq_unlock(rq, p, &flags);
                                return -EPERM;
                        }
@@ -3420,8 +3411,7 @@ change:
         * of a SCHED_DEADLINE task) we need to check if enough bandwidth
         * is available.
         */
-       if ((dl_policy(policy) || dl_task(p)) &&
-           dl_overflow(p, policy, attr)) {
+       if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
                task_rq_unlock(rq, p, &flags);
                return -EBUSY;
        }
@@ -3860,6 +3850,10 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
        if (retval)
                goto out_unlock;
 
+
+       cpuset_cpus_allowed(p, cpus_allowed);
+       cpumask_and(new_mask, in_mask, cpus_allowed);
+
        /*
         * Since bandwidth control happens on root_domain basis,
         * if admission test is enabled, we only admit -deadline
@@ -3870,16 +3864,12 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
        if (task_has_dl_policy(p)) {
                const struct cpumask *span = task_rq(p)->rd->span;
 
-               if (dl_bandwidth_enabled() &&
-                   !cpumask_equal(in_mask, span)) {
+               if (dl_bandwidth_enabled() && !cpumask_subset(span, new_mask)) {
                        retval = -EBUSY;
                        goto out_unlock;
                }
        }
 #endif
-
-       cpuset_cpus_allowed(p, cpus_allowed);
-       cpumask_and(new_mask, in_mask, cpus_allowed);
 again:
        retval = set_cpus_allowed_ptr(p, new_mask);
 
@@ -4535,7 +4525,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
  * When dealing with a -deadline task, we have to check if moving it to
  * a new CPU is possible or not. In fact, this is only true iff there
  * is enough bandwidth available on such CPU, otherwise we want the
- * whole migration progedure to fail over.
+ * whole migration procedure to fail over.
  */
 static inline
 bool set_task_cpu_dl(struct task_struct *p, unsigned int cpu)