Pull button into test branch
[linux-drm-fsl-dcu.git] / arch / i386 / kernel / cpu / intel_cacheinfo.c
index c8547a6fa7e6179f41d0c1be2771d34448b33711..80b4c5d421b1366915028b2002af39cdae851c95 100644 (file)
@@ -4,6 +4,7 @@
  *      Changes:
  *      Venkatesh Pallipadi    : Adding cache identification through cpuid(4)
  *             Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
+ *     Andi Kleen              : CPUID4 emulation on AMD.
  */
 
 #include <linux/init.h>
@@ -130,25 +131,111 @@ struct _cpuid4_info {
        cpumask_t shared_cpu_map;
 };
 
-static unsigned short                  num_cache_leaves;
+unsigned short                 num_cache_leaves;
+
+/* AMD doesn't have CPUID4. Emulate it here to report the same
+   information to the user.  This makes some assumptions about the machine:
+   No L3, L2 not shared, no SMT etc. that is currently true on AMD CPUs.
+
+   In theory the TLBs could be reported as fake type (they are in "dummy").
+   Maybe later */
+union l1_cache {
+       struct {
+               unsigned line_size : 8;
+               unsigned lines_per_tag : 8;
+               unsigned assoc : 8;
+               unsigned size_in_kb : 8;
+       };
+       unsigned val;
+};
+
+union l2_cache {
+       struct {
+               unsigned line_size : 8;
+               unsigned lines_per_tag : 4;
+               unsigned assoc : 4;
+               unsigned size_in_kb : 16;
+       };
+       unsigned val;
+};
+
+static const unsigned short assocs[] = {
+       [1] = 1, [2] = 2, [4] = 4, [6] = 8,
+       [8] = 16,
+       [0xf] = 0xffff // ??
+       };
+static const unsigned char levels[] = { 1, 1, 2 };
+static const unsigned char types[] = { 1, 2, 3 };
+
+static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
+                      union _cpuid4_leaf_ebx *ebx,
+                      union _cpuid4_leaf_ecx *ecx)
+{
+       unsigned dummy;
+       unsigned line_size, lines_per_tag, assoc, size_in_kb;
+       union l1_cache l1i, l1d;
+       union l2_cache l2;
+
+       eax->full = 0;
+       ebx->full = 0;
+       ecx->full = 0;
+
+       cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
+       cpuid(0x80000006, &dummy, &dummy, &l2.val, &dummy);
+
+       if (leaf > 2 || !l1d.val || !l1i.val || !l2.val)
+               return;
+
+       eax->split.is_self_initializing = 1;
+       eax->split.type = types[leaf];
+       eax->split.level = levels[leaf];
+       eax->split.num_threads_sharing = 0;
+       eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
+
+       if (leaf <= 1) {
+               union l1_cache *l1 = leaf == 0 ? &l1d : &l1i;
+               assoc = l1->assoc;
+               line_size = l1->line_size;
+               lines_per_tag = l1->lines_per_tag;
+               size_in_kb = l1->size_in_kb;
+       } else {
+               assoc = l2.assoc;
+               line_size = l2.line_size;
+               lines_per_tag = l2.lines_per_tag;
+               /* cpu_data has errata corrections for K7 applied */
+               size_in_kb = current_cpu_data.x86_cache_size;
+       }
+
+       if (assoc == 0xf)
+               eax->split.is_fully_associative = 1;
+       ebx->split.coherency_line_size = line_size - 1;
+       ebx->split.ways_of_associativity = assocs[assoc] - 1;
+       ebx->split.physical_line_partition = lines_per_tag - 1;
+       ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
+               (ebx->split.ways_of_associativity + 1) - 1;
+}
 
 static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
 {
-       unsigned int            eax, ebx, ecx, edx;
-       union _cpuid4_leaf_eax  cache_eax;
+       union _cpuid4_leaf_eax  eax;
+       union _cpuid4_leaf_ebx  ebx;
+       union _cpuid4_leaf_ecx  ecx;
+       unsigned                edx;
 
-       cpuid_count(4, index, &eax, &ebx, &ecx, &edx);
-       cache_eax.full = eax;
-       if (cache_eax.split.type == CACHE_TYPE_NULL)
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+               amd_cpuid4(index, &eax, &ebx, &ecx);
+       else
+               cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full,  &edx);
+       if (eax.split.type == CACHE_TYPE_NULL)
                return -EIO; /* better error ? */
 
-       this_leaf->eax.full = eax;
-       this_leaf->ebx.full = ebx;
-       this_leaf->ecx.full = ecx;
-       this_leaf->size = (this_leaf->ecx.split.number_of_sets + 1) *
-               (this_leaf->ebx.split.coherency_line_size + 1) *
-               (this_leaf->ebx.split.physical_line_partition + 1) *
-               (this_leaf->ebx.split.ways_of_associativity + 1);
+       this_leaf->eax = eax;
+       this_leaf->ebx = ebx;
+       this_leaf->ecx = ecx;
+       this_leaf->size = (ecx.split.number_of_sets + 1) *
+               (ebx.split.coherency_line_size + 1) *
+               (ebx.split.physical_line_partition + 1) *
+               (ebx.split.ways_of_associativity + 1);
        return 0;
 }
 
@@ -174,7 +261,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
        unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
        unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
        unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
-#ifdef CONFIG_SMP
+#ifdef CONFIG_X86_HT
        unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data);
 #endif
 
@@ -296,14 +383,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
 
        if (new_l2) {
                l2 = new_l2;
-#ifdef CONFIG_SMP
+#ifdef CONFIG_X86_HT
                cpu_llc_id[cpu] = l2_id;
 #endif
        }
 
        if (new_l3) {
                l3 = new_l3;
-#ifdef CONFIG_SMP
+#ifdef CONFIG_X86_HT
                cpu_llc_id[cpu] = l3_id;
 #endif
        }
@@ -393,12 +480,10 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
        if (num_cache_leaves == 0)
                return -ENOENT;
 
-       cpuid4_info[cpu] = kmalloc(
+       cpuid4_info[cpu] = kzalloc(
            sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
        if (unlikely(cpuid4_info[cpu] == NULL))
                return -ENOMEM;
-       memset(cpuid4_info[cpu], 0,
-           sizeof(struct _cpuid4_info) * num_cache_leaves);
 
        oldmask = current->cpus_allowed;
        retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
@@ -571,17 +656,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
                return -ENOENT;
 
        /* Allocate all required memory */
-       cache_kobject[cpu] = kmalloc(sizeof(struct kobject), GFP_KERNEL);
+       cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
        if (unlikely(cache_kobject[cpu] == NULL))
                goto err_out;
-       memset(cache_kobject[cpu], 0, sizeof(struct kobject));
 
-       index_kobject[cpu] = kmalloc(
+       index_kobject[cpu] = kzalloc(
            sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
        if (unlikely(index_kobject[cpu] == NULL))
                goto err_out;
-       memset(index_kobject[cpu], 0,
-           sizeof(struct _index_kobject) * num_cache_leaves);
 
        return 0;
 
@@ -642,7 +724,7 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
        return;
 }
 
-static int cacheinfo_cpu_callback(struct notifier_block *nfb,
+static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
                                        unsigned long action, void *hcpu)
 {
        unsigned int cpu = (unsigned long)hcpu;
@@ -660,7 +742,7 @@ static int cacheinfo_cpu_callback(struct notifier_block *nfb,
        return NOTIFY_OK;
 }
 
-static struct notifier_block cacheinfo_cpu_notifier =
+static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
 {
     .notifier_call = cacheinfo_cpu_callback,
 };
@@ -672,7 +754,7 @@ static int __cpuinit cache_sysfs_init(void)
        if (num_cache_leaves == 0)
                return 0;
 
-       register_cpu_notifier(&cacheinfo_cpu_notifier);
+       register_hotcpu_notifier(&cacheinfo_cpu_notifier);
 
        for_each_online_cpu(i) {
                cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE,