Pull thermal into release branch
[linux-drm-fsl-dcu.git] / arch / ia64 / kernel / smp.c
index f4c7f7769cf7df617bdb7cecefd5be4802bb840e..221de38045604ef1f791a015f61e2a399ca00a89 100644 (file)
 #include <asm/unistd.h>
 #include <asm/mca.h>
 
+/*
+ * Note: alignment of 4 entries/cacheline was empirically determined
+ * to be a good tradeoff between hot cachelines & spreading the array
+ * across too many cacheline.
+ */
+static struct local_tlb_flush_counts {
+       unsigned int count;
+} __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
+
+static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
+
+
 /*
  * Structure and data for smp_call_function(). This is designed to minimise static memory
  * requirements. It also looks cleaner.
@@ -221,13 +233,13 @@ send_IPI_self (int op)
 
 #ifdef CONFIG_KEXEC
 void
-kdump_smp_send_stop()
+kdump_smp_send_stop(void)
 {
        send_IPI_allbutself(IPI_KDUMP_CPU_STOP);
 }
 
 void
-kdump_smp_send_init()
+kdump_smp_send_init(void)
 {
        unsigned int cpu, self_cpu;
        self_cpu = smp_processor_id();
@@ -248,6 +260,62 @@ smp_send_reschedule (int cpu)
        platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
 }
 
+/*
+ * Called with preeemption disabled.
+ */
+static void
+smp_send_local_flush_tlb (int cpu)
+{
+       platform_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0);
+}
+
+void
+smp_local_flush_tlb(void)
+{
+       /*
+        * Use atomic ops. Otherwise, the load/increment/store sequence from
+        * a "++" operation can have the line stolen between the load & store.
+        * The overhead of the atomic op in negligible in this case & offers
+        * significant benefit for the brief periods where lots of cpus
+        * are simultaneously flushing TLBs.
+        */
+       ia64_fetchadd(1, &local_tlb_flush_counts[smp_processor_id()].count, acq);
+       local_flush_tlb_all();
+}
+
+#define FLUSH_DELAY    5 /* Usec backoff to eliminate excessive cacheline bouncing */
+
+void
+smp_flush_tlb_cpumask(cpumask_t xcpumask)
+{
+       unsigned int *counts = __ia64_per_cpu_var(shadow_flush_counts);
+       cpumask_t cpumask = xcpumask;
+       int mycpu, cpu, flush_mycpu = 0;
+
+       preempt_disable();
+       mycpu = smp_processor_id();
+
+       for_each_cpu_mask(cpu, cpumask)
+               counts[cpu] = local_tlb_flush_counts[cpu].count;
+
+       mb();
+       for_each_cpu_mask(cpu, cpumask) {
+               if (cpu == mycpu)
+                       flush_mycpu = 1;
+               else
+                       smp_send_local_flush_tlb(cpu);
+       }
+
+       if (flush_mycpu)
+               smp_local_flush_tlb();
+
+       for_each_cpu_mask(cpu, cpumask)
+               while(counts[cpu] == local_tlb_flush_counts[cpu].count)
+                       udelay(FLUSH_DELAY);
+
+       preempt_enable();
+}
+
 void
 smp_flush_tlb_all (void)
 {