Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/agpgart
[linux-drm-fsl-dcu.git] / arch / i386 / kernel / process.c
index 1e1fa3e391a3889ec822c869c19bd9feb76e5e36..bea304d48cdbb8fdc150e9033ad0e53a3961c439 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/ptrace.h>
 #include <linux/random.h>
 #include <linux/personality.h>
+#include <linux/tick.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -48,6 +49,7 @@
 #include <asm/i387.h>
 #include <asm/desc.h>
 #include <asm/vm86.h>
+#include <asm/idle.h>
 #ifdef CONFIG_MATH_EMULATION
 #include <asm/math_emu.h>
 #endif
@@ -56,6 +58,7 @@
 
 #include <asm/tlbflush.h>
 #include <asm/cpu.h>
+#include <asm/pda.h>
 
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 
@@ -79,6 +82,42 @@ void (*pm_idle)(void);
 EXPORT_SYMBOL(pm_idle);
 static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
 
+static ATOMIC_NOTIFIER_HEAD(idle_notifier);
+
+void idle_notifier_register(struct notifier_block *n)
+{
+       atomic_notifier_chain_register(&idle_notifier, n);
+}
+
+void idle_notifier_unregister(struct notifier_block *n)
+{
+       atomic_notifier_chain_unregister(&idle_notifier, n);
+}
+
+static DEFINE_PER_CPU(volatile unsigned long, idle_state);
+
+void enter_idle(void)
+{
+       /* needs to be atomic w.r.t. interrupts, not against other CPUs */
+       __set_bit(0, &__get_cpu_var(idle_state));
+       atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
+}
+
+static void __exit_idle(void)
+{
+       /* needs to be atomic w.r.t. interrupts, not against other CPUs */
+       if (__test_and_clear_bit(0, &__get_cpu_var(idle_state)) == 0)
+               return;
+       atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
+}
+
+void exit_idle(void)
+{
+       if (current->pid)
+               return;
+       __exit_idle();
+}
+
 void disable_hlt(void)
 {
        hlt_counter++;
@@ -99,22 +138,23 @@ EXPORT_SYMBOL(enable_hlt);
  */
 void default_idle(void)
 {
-       local_irq_enable();
-
        if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
                current_thread_info()->status &= ~TS_POLLING;
-               smp_mb__after_clear_bit();
-               while (!need_resched()) {
-                       local_irq_disable();
-                       if (!need_resched())
-                               safe_halt();
-                       else
-                               local_irq_enable();
-               }
+               /*
+                * TS_POLLING-cleared state must be visible before we
+                * test NEED_RESCHED:
+                */
+               smp_mb();
+
+               local_irq_disable();
+               if (!need_resched())
+                       safe_halt();    /* enables interrupts racelessly */
+               else
+                       local_irq_enable();
                current_thread_info()->status |= TS_POLLING;
        } else {
-               while (!need_resched())
-                       cpu_relax();
+               /* loop is done by the caller */
+               cpu_relax();
        }
 }
 #ifdef CONFIG_APM_MODULE
@@ -129,13 +169,7 @@ EXPORT_SYMBOL(default_idle);
 static void poll_idle (void)
 {
        local_irq_enable();
-
-       asm volatile(
-               "2:"
-               "testl %0, %1;"
-               "rep; nop;"
-               "je 2b;"
-               : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags));
+       cpu_relax();
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -178,6 +212,7 @@ void cpu_idle(void)
 
        /* endless idle loop with no priority at all */
        while (1) {
+               tick_nohz_stop_sched_tick();
                while (!need_resched()) {
                        void (*idle)(void);
 
@@ -194,8 +229,18 @@ void cpu_idle(void)
                                play_dead();
 
                        __get_cpu_var(irq_stat).idle_timestamp = jiffies;
+
+                       /*
+                        * Idle routines should keep interrupts disabled
+                        * from here on, until they go to idle.
+                        * Otherwise, idle callbacks can misfire.
+                        */
+                       local_irq_disable();
+                       enter_idle();
                        idle();
+                       __exit_idle();
                }
+               tick_nohz_restart_sched_tick();
                preempt_enable_no_resched();
                schedule();
                preempt_disable();
@@ -205,7 +250,7 @@ void cpu_idle(void)
 void cpu_idle_wait(void)
 {
        unsigned int cpu, this_cpu = get_cpu();
-       cpumask_t map;
+       cpumask_t map, tmp = current->cpus_allowed;
 
        set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
        put_cpu();
@@ -227,6 +272,8 @@ void cpu_idle_wait(void)
                }
                cpus_and(map, map, cpu_online_map);
        } while (!cpus_empty(map));
+
+       set_cpus_allowed(current, tmp);
 }
 EXPORT_SYMBOL_GPL(cpu_idle_wait);
 
@@ -246,7 +293,11 @@ void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
                __monitor((void *)&current_thread_info()->flags, 0, 0);
                smp_mb();
                if (!need_resched())
-                       __mwait(eax, ecx);
+                       __sti_mwait(eax, ecx);
+               else
+                       local_irq_enable();
+       } else {
+               local_irq_enable();
        }
 }
 
@@ -254,8 +305,7 @@ void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
 static void mwait_idle(void)
 {
        local_irq_enable();
-       while (!need_resched())
-               mwait_idle_with_hints(0, 0);
+       mwait_idle_with_hints(0, 0);
 }
 
 void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
@@ -312,8 +362,8 @@ void show_regs(struct pt_regs * regs)
                regs->eax,regs->ebx,regs->ecx,regs->edx);
        printk("ESI: %08lx EDI: %08lx EBP: %08lx",
                regs->esi, regs->edi, regs->ebp);
-       printk(" DS: %04x ES: %04x\n",
-               0xffff & regs->xds,0xffff & regs->xes);
+       printk(" DS: %04x ES: %04x FS: %04x\n",
+              0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xfs);
 
        cr0 = read_cr0();
        cr2 = read_cr2();
@@ -344,6 +394,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
 
        regs.xds = __USER_DS;
        regs.xes = __USER_DS;
+       regs.xfs = __KERNEL_PDA;
        regs.orig_eax = -1;
        regs.eip = (unsigned long) kernel_thread_helper;
        regs.xcs = __KERNEL_CS | get_kernel_rpl();
@@ -428,7 +479,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
 
        p->thread.eip = (unsigned long) ret_from_fork;
 
-       savesegment(fs,p->thread.fs);
        savesegment(gs,p->thread.gs);
 
        tsk = current;
@@ -505,7 +555,7 @@ void dump_thread(struct pt_regs * regs, struct user * dump)
        dump->regs.eax = regs->eax;
        dump->regs.ds = regs->xds;
        dump->regs.es = regs->xes;
-       savesegment(fs,dump->regs.fs);
+       dump->regs.fs = regs->xfs;
        savesegment(gs,dump->regs.gs);
        dump->regs.orig_eax = regs->orig_eax;
        dump->regs.eip = regs->eip;
@@ -646,21 +696,26 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
 
        __unlazy_fpu(prev_p);
 
+
+       /* we're going to use this soon, after a few expensive things */
+       if (next_p->fpu_counter > 5)
+               prefetch(&next->i387.fxsave);
+
        /*
         * Reload esp0.
         */
        load_esp0(tss, next);
 
        /*
-        * Save away %fs and %gs. No need to save %es and %ds, as
-        * those are always kernel segments while inside the kernel.
-        * Doing this before setting the new TLS descriptors avoids
-        * the situation where we temporarily have non-reloadable
-        * segments in %fs and %gs.  This could be an issue if the
-        * NMI handler ever used %fs or %gs (it does not today), or
-        * if the kernel is running inside of a hypervisor layer.
+        * Save away %gs. No need to save %fs, as it was saved on the
+        * stack on entry.  No need to save %es and %ds, as those are
+        * always kernel segments while inside the kernel.  Doing this
+        * before setting the new TLS descriptors avoids the situation
+        * where we temporarily have non-reloadable segments in %fs
+        * and %gs.  This could be an issue if the NMI handler ever
+        * used %fs or %gs (it does not today), or if the kernel is
+        * running inside of a hypervisor layer.
         */
-       savesegment(fs, prev->fs);
        savesegment(gs, prev->gs);
 
        /*
@@ -669,21 +724,12 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
        load_TLS(next, cpu);
 
        /*
-        * Restore %fs and %gs if needed.
-        *
-        * Glibc normally makes %fs be zero, and %gs is one of
-        * the TLS segments.
-        */
-       if (unlikely(prev->fs | next->fs))
-               loadsegment(fs, next->fs);
-
-       if (prev->gs | next->gs)
-               loadsegment(gs, next->gs);
-
-       /*
-        * Restore IOPL if needed.
+        * Restore IOPL if needed.  In normal use, the flags restore
+        * in the switch assembly will handle this.  But if the kernel
+        * is running virtualized at a non-zero CPL, the popf will
+        * not restore flags, so it must be done in a separate step.
         */
-       if (unlikely(prev->iopl != next->iopl))
+       if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
                set_iopl_mask(next->iopl);
 
        /*
@@ -695,6 +741,30 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
 
        disable_tsc(prev_p, next_p);
 
+       /*
+        * Leave lazy mode, flushing any hypercalls made here.
+        * This must be done before restoring TLS segments so
+        * the GDT and LDT are properly updated, and must be
+        * done before math_state_restore, so the TS bit is up
+        * to date.
+        */
+       arch_leave_lazy_cpu_mode();
+
+       /* If the task has used fpu the last 5 timeslices, just do a full
+        * restore of the math state immediately to avoid the trap; the
+        * chances of needing FPU soon are obviously high now
+        */
+       if (next_p->fpu_counter > 5)
+               math_state_restore();
+
+       /*
+        * Restore %gs if needed (which is common)
+        */
+       if (prev->gs | next->gs)
+               loadsegment(gs, next->gs);
+
+       write_pda(pcurrent, next_p);
+
        return prev_p;
 }