Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 2 Dec 2013 18:13:44 +0000 (10:13 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 2 Dec 2013 18:13:44 +0000 (10:13 -0800)
Pull scheduler fixes from Ingo Molnar:
 "Various smaller fixlets, all over the place"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/doc: Fix generation of device-drivers
  sched: Expose preempt_schedule_irq()
  sched: Fix a trivial typo in comments
  sched: Remove unused variable in 'struct sched_domain'
  sched: Avoid NULL dereference on sd_busy
  sched: Check sched_domain before computing group power
  MAINTAINERS: Update file patterns in the lockdep and scheduler entries

MAINTAINERS
include/linux/sched.h
kernel/sched/core.c
kernel/sched/fair.c

index e9c7b50c612d6d19edc372a1161c4fa685529db7..4afcfb4c892b76551630b3919aeed9efaedde9ed 100644 (file)
@@ -5267,7 +5267,7 @@ S:        Maintained
 F:     Documentation/lockdep*.txt
 F:     Documentation/lockstat.txt
 F:     include/linux/lockdep.h
-F:     kernel/lockdep*
+F:     kernel/locking/
 
 LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks)
 M:     "Richard Russon (FlatCap)" <ldm@flatcap.org>
@@ -7391,7 +7391,6 @@ S:        Maintained
 F:     kernel/sched/
 F:     include/linux/sched.h
 F:     include/uapi/linux/sched.h
-F:     kernel/wait.c
 F:     include/linux/wait.h
 
 SCORE ARCHITECTURE
index 7e35d4b9e14a45cba37fd8a7f234e5a3a50ec4b5..768b037dfacb6273679c097a4abed0945d8f868d 100644 (file)
@@ -831,8 +831,6 @@ struct sched_domain {
        unsigned int balance_interval;  /* initialise to 1. units in ms. */
        unsigned int nr_balance_failed; /* initialise to 0 */
 
-       u64 last_update;
-
        /* idle_balance() stats */
        u64 max_newidle_lb_cost;
        unsigned long next_decay_max_lb_cost;
index c1808606ee5f0f4e48e2fc51380369ba248bd9fe..e85cda20ab2b8ed6694d1cfa4a617b1bf231569d 100644 (file)
@@ -2660,6 +2660,7 @@ asmlinkage void __sched notrace preempt_schedule(void)
        } while (need_resched());
 }
 EXPORT_SYMBOL(preempt_schedule);
+#endif /* CONFIG_PREEMPT */
 
 /*
  * this is the entry point to schedule() from kernel preemption
@@ -2693,8 +2694,6 @@ asmlinkage void __sched preempt_schedule_irq(void)
        exception_exit(prev_state);
 }
 
-#endif /* CONFIG_PREEMPT */
-
 int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
                          void *key)
 {
@@ -4762,7 +4761,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
                cpumask_clear_cpu(rq->cpu, old_rd->span);
 
                /*
-                * If we dont want to free the old_rt yet then
+                * If we dont want to free the old_rd yet then
                 * set old_rd to NULL to skip the freeing later
                 * in this function:
                 */
@@ -4910,8 +4909,9 @@ static void update_top_cache_domain(int cpu)
        if (sd) {
                id = cpumask_first(sched_domain_span(sd));
                size = cpumask_weight(sched_domain_span(sd));
-               rcu_assign_pointer(per_cpu(sd_busy, cpu), sd->parent);
+               sd = sd->parent; /* sd_busy */
        }
+       rcu_assign_pointer(per_cpu(sd_busy, cpu), sd);
 
        rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
        per_cpu(sd_llc_size, cpu) = size;
index e8b652ebe027c481e87122f629a300058cf82679..fd773ade1a3141cd4b152cb8fce905866a7c223a 100644 (file)
@@ -5379,10 +5379,31 @@ void update_group_power(struct sched_domain *sd, int cpu)
                 */
 
                for_each_cpu(cpu, sched_group_cpus(sdg)) {
-                       struct sched_group *sg = cpu_rq(cpu)->sd->groups;
+                       struct sched_group_power *sgp;
+                       struct rq *rq = cpu_rq(cpu);
 
-                       power_orig += sg->sgp->power_orig;
-                       power += sg->sgp->power;
+                       /*
+                        * build_sched_domains() -> init_sched_groups_power()
+                        * gets here before we've attached the domains to the
+                        * runqueues.
+                        *
+                        * Use power_of(), which is set irrespective of domains
+                        * in update_cpu_power().
+                        *
+                        * This avoids power/power_orig from being 0 and
+                        * causing divide-by-zero issues on boot.
+                        *
+                        * Runtime updates will correct power_orig.
+                        */
+                       if (unlikely(!rq->sd)) {
+                               power_orig += power_of(cpu);
+                               power += power_of(cpu);
+                               continue;
+                       }
+
+                       sgp = rq->sd->groups->sgp;
+                       power_orig += sgp->power_orig;
+                       power += sgp->power;
                }
        } else  {
                /*