MIPS: Netlogic: Move cores per node out of multi-node.h
authorJayachandran C <jchandra@broadcom.com>
Wed, 7 Jan 2015 11:28:30 +0000 (16:58 +0530)
committerRalf Baechle <ralf@linux-mips.org>
Wed, 1 Apr 2015 15:21:50 +0000 (17:21 +0200)
Use the current_cpu_data package field to get the node of the current CPU.

This allows us to remove xlp_cores_per_node and move nlm_threads_per_node()
and nlm_cores_per_node() to netlogic/common.h, which simplifies code.

Signed-off-by: Jayachandran C <jchandra@broadcom.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/8889/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
arch/mips/include/asm/mach-netlogic/multi-node.h
arch/mips/include/asm/netlogic/common.h
arch/mips/netlogic/common/irq.c
arch/mips/netlogic/common/smp.c
arch/mips/netlogic/xlp/setup.c
arch/mips/netlogic/xlp/wakeup.c

index 9ed8dacdc37c5aa3bae8bbfc4e31cf5c6b51bed6..8bdf47e29145026f1c92001bc39b4c7103f63cba 100644 (file)
 #endif
 
 #define NLM_THREADS_PER_CORE   4
-#ifdef CONFIG_CPU_XLR
-#define nlm_cores_per_node()   8
-#else
-extern unsigned int xlp_cores_per_node;
-#define nlm_cores_per_node()   xlp_cores_per_node
-#endif
-
-#define nlm_threads_per_node() (nlm_cores_per_node() * NLM_THREADS_PER_CORE)
-#define nlm_cpuid_to_node(c)   ((c) / nlm_threads_per_node())
 
 struct nlm_soc_info {
        unsigned long   coremask;       /* cores enabled on the soc */
index c281f03eb312634c16a0a383e3600a0d3ee8ed50..2a4c128277e45400fa4061bff15a17a1e10fc442 100644 (file)
@@ -111,6 +111,25 @@ static inline int nlm_irq_to_xirq(int node, int irq)
        return node * NR_IRQS / NLM_NR_NODES + irq;
 }
 
-extern int nlm_cpu_ready[];
+#ifdef CONFIG_CPU_XLR
+#define nlm_cores_per_node()   8
+#else
+static inline int nlm_cores_per_node(void)
+{
+       return ((read_c0_prid() & PRID_IMP_MASK)
+                               == PRID_IMP_NETLOGIC_XLP9XX) ? 32 : 8;
+}
 #endif
+static inline int nlm_threads_per_node(void)
+{
+       return nlm_cores_per_node() * NLM_THREADS_PER_CORE;
+}
+
+static inline int nlm_hwtid_to_node(int hwtid)
+{
+       return hwtid / nlm_threads_per_node();
+}
+
+extern int nlm_cpu_ready[];
+#endif /* __ASSEMBLY__ */
 #endif /* _NETLOGIC_COMMON_H_ */
index c100b9afa0abe85fe31555a06bcc25186345eacd..5f5d18b0e94d8acdccb2b8c86ac153fd784ad6bb 100644 (file)
@@ -230,16 +230,16 @@ static void nlm_init_node_irqs(int node)
        }
 }
 
-void nlm_smp_irq_init(int hwcpuid)
+void nlm_smp_irq_init(int hwtid)
 {
-       int node, cpu;
+       int cpu, node;
 
-       node = nlm_cpuid_to_node(hwcpuid);
-       cpu  = hwcpuid % nlm_threads_per_node();
+       cpu = hwtid % nlm_threads_per_node();
+       node = hwtid / nlm_threads_per_node();
 
        if (cpu == 0 && node != 0)
                nlm_init_node_irqs(node);
-       write_c0_eimr(nlm_current_node()->irqmask);
+       write_c0_eimr(nlm_get_node(node)->irqmask);
 }
 
 asmlinkage void plat_irq_dispatch(void)
index 32f15aba745a98e636c9eed9d839e23fe6979211..dc3e327fbbac105e71c6b89a039e0f79f91e6f3d 100644 (file)
 
 void nlm_send_ipi_single(int logical_cpu, unsigned int action)
 {
-       int cpu, node;
+       unsigned int hwtid;
        uint64_t picbase;
 
-       cpu = cpu_logical_map(logical_cpu);
-       node = nlm_cpuid_to_node(cpu);
-       picbase = nlm_get_node(node)->picbase;
+       /* node id is part of hwtid, and needed for send_ipi */
+       hwtid = cpu_logical_map(logical_cpu);
+       picbase = nlm_get_node(nlm_hwtid_to_node(hwtid))->picbase;
 
        if (action & SMP_CALL_FUNCTION)
-               nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_FUNCTION, 0);
+               nlm_pic_send_ipi(picbase, hwtid, IRQ_IPI_SMP_FUNCTION, 0);
        if (action & SMP_RESCHEDULE_YOURSELF)
-               nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_RESCHEDULE, 0);
+               nlm_pic_send_ipi(picbase, hwtid, IRQ_IPI_SMP_RESCHEDULE, 0);
 }
 
 void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action)
@@ -120,7 +120,7 @@ static void nlm_init_secondary(void)
 
        hwtid = hard_smp_processor_id();
        current_cpu_data.core = hwtid / NLM_THREADS_PER_CORE;
-       current_cpu_data.package = nlm_cpuid_to_node(hwtid);
+       current_cpu_data.package = nlm_nodeid();
        nlm_percpu_init(hwtid);
        nlm_smp_irq_init(hwtid);
 }
@@ -146,16 +146,18 @@ static cpumask_t phys_cpu_present_mask;
 
 void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
 {
-       int cpu, node;
+       uint64_t picbase;
+       int hwtid;
+
+       hwtid = cpu_logical_map(logical_cpu);
+       picbase = nlm_get_node(nlm_hwtid_to_node(hwtid))->picbase;
 
-       cpu = cpu_logical_map(logical_cpu);
-       node = nlm_cpuid_to_node(logical_cpu);
        nlm_next_sp = (unsigned long)__KSTK_TOS(idle);
        nlm_next_gp = (unsigned long)task_thread_info(idle);
 
        /* barrier for sp/gp store above */
        __sync();
-       nlm_pic_send_ipi(nlm_get_node(node)->picbase, cpu, 1, 1);  /* NMI */
+       nlm_pic_send_ipi(picbase, hwtid, 1, 1);  /* NMI */
 }
 
 void __init nlm_smp_setup(void)
@@ -183,7 +185,7 @@ void __init nlm_smp_setup(void)
                        __cpu_number_map[i] = num_cpus;
                        __cpu_logical_map[num_cpus] = i;
                        set_cpu_possible(num_cpus, true);
-                       node = nlm_cpuid_to_node(i);
+                       node = nlm_hwtid_to_node(i);
                        cpumask_set_cpu(num_cpus, &nlm_get_node(node)->cpumask);
                        ++num_cpus;
                }
index 4fdd9fd29d1d89e154d82d26e640d98a9baf6776..27113a17f18d1f324a551a7b456cc10351ffae26 100644 (file)
@@ -51,7 +51,6 @@ uint64_t nlm_io_base;
 struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
 cpumask_t nlm_cpumask = CPU_MASK_CPU0;
 unsigned int nlm_threads_per_core;
-unsigned int xlp_cores_per_node;
 
 static void nlm_linux_exit(void)
 {
@@ -163,10 +162,6 @@ void __init prom_init(void)
        void *reset_vec;
 
        nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE);
-       if (cpu_is_xlp9xx())
-               xlp_cores_per_node = 32;
-       else
-               xlp_cores_per_node = 8;
        nlm_init_boot_cpu();
        xlp_mmu_init();
        nlm_node_init(0);
index 26d82f79ef2979c729084063779b455892550927..87d7846af2d00fee4c24da23a180d99a834634ea 100644 (file)
@@ -111,7 +111,7 @@ static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
        struct nlm_soc_info *nodep;
        uint64_t syspcibase, fusebase;
        uint32_t syscoremask, mask, fusemask;
-       int core, n, cpu;
+       int core, n, cpu, ncores;
 
        for (n = 0; n < NLM_NR_NODES; n++) {
                if (n != 0) {
@@ -168,7 +168,8 @@ static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
                syscoremask = (1 << hweight32(~fusemask & mask)) - 1;
 
                pr_info("Node %d - SYS/FUSE coremask %x\n", n, syscoremask);
-               for (core = 0; core < nlm_cores_per_node(); core++) {
+               ncores = nlm_cores_per_node();
+               for (core = 0; core < ncores; core++) {
                        /* we will be on node 0 core 0 */
                        if (n == 0 && core == 0)
                                continue;
@@ -178,8 +179,7 @@ static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
                                continue;
 
                        /* see if at least the first hw thread is enabled */
-                       cpu = (n * nlm_cores_per_node() + core)
-                                               * NLM_THREADS_PER_CORE;
+                       cpu = (n * ncores + core) * NLM_THREADS_PER_CORE;
                        if (!cpumask_test_cpu(cpu, wakeup_mask))
                                continue;