Pull thermal into release branch
[linux-drm-fsl-dcu.git] / arch / ia64 / kernel / smp.c
index 6ab95ceaf9d4f34aa32a382a9e32cb68bb98b23b..221de38045604ef1f791a015f61e2a399ca00a89 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/delay.h>
 #include <linux/efi.h>
 #include <linux/bitops.h>
+#include <linux/kexec.h>
 
 #include <asm/atomic.h>
 #include <asm/current.h>
 #include <asm/unistd.h>
 #include <asm/mca.h>
 
+/*
+ * Note: alignment of 4 entries/cacheline was empirically determined
+ * to be a good tradeoff between hot cachelines & spreading the array
+ * across too many cacheline.
+ */
+static struct local_tlb_flush_counts {
+       unsigned int count;
+} __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
+
+static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
+
+
 /*
  * Structure and data for smp_call_function(). This is designed to minimise static memory
  * requirements. It also looks cleaner.
@@ -66,6 +79,7 @@ static volatile struct call_data_struct *call_data;
 
 #define IPI_CALL_FUNC          0
 #define IPI_CPU_STOP           1
+#define IPI_KDUMP_CPU_STOP     3
 
 /* This needs to be cacheline aligned because it is written to by *other* CPUs.  */
 static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
@@ -155,7 +169,11 @@ handle_IPI (int irq, void *dev_id)
                              case IPI_CPU_STOP:
                                stop_this_cpu();
                                break;
-
+#ifdef CONFIG_KEXEC
+                             case IPI_KDUMP_CPU_STOP:
+                               unw_init_running(kdump_cpu_freeze, NULL);
+                               break;
+#endif
                              default:
                                printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
                                break;
@@ -213,6 +231,26 @@ send_IPI_self (int op)
        send_IPI_single(smp_processor_id(), op);
 }
 
+#ifdef CONFIG_KEXEC
+void
+kdump_smp_send_stop(void)
+{
+       send_IPI_allbutself(IPI_KDUMP_CPU_STOP);
+}
+
+void
+kdump_smp_send_init(void)
+{
+       unsigned int cpu, self_cpu;
+       self_cpu = smp_processor_id();
+       for_each_online_cpu(cpu) {
+               if (cpu != self_cpu) {
+                       if(kdump_status[cpu] == 0)
+                               platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0);
+               }
+       }
+}
+#endif
 /*
  * Called with preeemption disabled.
  */
@@ -222,6 +260,62 @@ smp_send_reschedule (int cpu)
        platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
 }
 
+/*
+ * Called with preeemption disabled.
+ */
+static void
+smp_send_local_flush_tlb (int cpu)
+{
+       platform_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0);
+}
+
+void
+smp_local_flush_tlb(void)
+{
+       /*
+        * Use atomic ops. Otherwise, the load/increment/store sequence from
+        * a "++" operation can have the line stolen between the load & store.
+        * The overhead of the atomic op in negligible in this case & offers
+        * significant benefit for the brief periods where lots of cpus
+        * are simultaneously flushing TLBs.
+        */
+       ia64_fetchadd(1, &local_tlb_flush_counts[smp_processor_id()].count, acq);
+       local_flush_tlb_all();
+}
+
+#define FLUSH_DELAY    5 /* Usec backoff to eliminate excessive cacheline bouncing */
+
+void
+smp_flush_tlb_cpumask(cpumask_t xcpumask)
+{
+       unsigned int *counts = __ia64_per_cpu_var(shadow_flush_counts);
+       cpumask_t cpumask = xcpumask;
+       int mycpu, cpu, flush_mycpu = 0;
+
+       preempt_disable();
+       mycpu = smp_processor_id();
+
+       for_each_cpu_mask(cpu, cpumask)
+               counts[cpu] = local_tlb_flush_counts[cpu].count;
+
+       mb();
+       for_each_cpu_mask(cpu, cpumask) {
+               if (cpu == mycpu)
+                       flush_mycpu = 1;
+               else
+                       smp_send_local_flush_tlb(cpu);
+       }
+
+       if (flush_mycpu)
+               smp_local_flush_tlb();
+
+       for_each_cpu_mask(cpu, cpumask)
+               while(counts[cpu] == local_tlb_flush_counts[cpu].count)
+                       udelay(FLUSH_DELAY);
+
+       preempt_enable();
+}
+
 void
 smp_flush_tlb_all (void)
 {