Merge remote-tracking branches 'asoc/fix/atmel', 'asoc/fix/fsl', 'asoc/fix/tegra...
[linux-drm-fsl-dcu.git] / drivers / cpufreq / cpufreq.c
1 /*
2  *  linux/drivers/cpufreq/cpufreq.c
3  *
4  *  Copyright (C) 2001 Russell King
5  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6  *            (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
7  *
8  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9  *      Added handling for CPU hotplug
10  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11  *      Fix handling for CPU hotplug -- affected CPUs
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License version 2 as
15  * published by the Free Software Foundation.
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
33
34 /**
35  * The "cpufreq driver" - the arch- or hardware-dependent low
36  * level driver of CPUFreq support, and its spinlock. This lock
37  * also protects the cpufreq_cpu_data array.
38  */
39 static struct cpufreq_driver *cpufreq_driver;
40 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
41 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
42 static DEFINE_RWLOCK(cpufreq_driver_lock);
43 static DEFINE_MUTEX(cpufreq_governor_lock);
44 static LIST_HEAD(cpufreq_policy_list);
45
46 #ifdef CONFIG_HOTPLUG_CPU
47 /* This one keeps track of the previously set governor of a removed CPU */
48 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
49 #endif
50
51 /* Flag to suspend/resume CPUFreq governors */
52 static bool cpufreq_suspended;
53
54 static inline bool has_target(void)
55 {
56         return cpufreq_driver->target_index || cpufreq_driver->target;
57 }
58
59 /*
60  * rwsem to guarantee that cpufreq driver module doesn't unload during critical
61  * sections
62  */
63 static DECLARE_RWSEM(cpufreq_rwsem);
64
65 /* internal prototypes */
66 static int __cpufreq_governor(struct cpufreq_policy *policy,
67                 unsigned int event);
68 static unsigned int __cpufreq_get(unsigned int cpu);
69 static void handle_update(struct work_struct *work);
70
71 /**
72  * Two notifier lists: the "policy" list is involved in the
73  * validation process for a new CPU frequency policy; the
74  * "transition" list for kernel code that needs to handle
75  * changes to devices when the CPU clock speed changes.
76  * The mutex locks both lists.
77  */
78 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
79 static struct srcu_notifier_head cpufreq_transition_notifier_list;
80
81 static bool init_cpufreq_transition_notifier_list_called;
82 static int __init init_cpufreq_transition_notifier_list(void)
83 {
84         srcu_init_notifier_head(&cpufreq_transition_notifier_list);
85         init_cpufreq_transition_notifier_list_called = true;
86         return 0;
87 }
88 pure_initcall(init_cpufreq_transition_notifier_list);
89
90 static int off __read_mostly;
91 static int cpufreq_disabled(void)
92 {
93         return off;
94 }
95 void disable_cpufreq(void)
96 {
97         off = 1;
98 }
99 static LIST_HEAD(cpufreq_governor_list);
100 static DEFINE_MUTEX(cpufreq_governor_mutex);
101
102 bool have_governor_per_policy(void)
103 {
104         return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
105 }
106 EXPORT_SYMBOL_GPL(have_governor_per_policy);
107
108 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
109 {
110         if (have_governor_per_policy())
111                 return &policy->kobj;
112         else
113                 return cpufreq_global_kobject;
114 }
115 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
116
117 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
118 {
119         u64 idle_time;
120         u64 cur_wall_time;
121         u64 busy_time;
122
123         cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
124
125         busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
126         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
127         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
128         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
129         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
130         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
131
132         idle_time = cur_wall_time - busy_time;
133         if (wall)
134                 *wall = cputime_to_usecs(cur_wall_time);
135
136         return cputime_to_usecs(idle_time);
137 }
138
139 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
140 {
141         u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
142
143         if (idle_time == -1ULL)
144                 return get_cpu_idle_time_jiffy(cpu, wall);
145         else if (!io_busy)
146                 idle_time += get_cpu_iowait_time_us(cpu, wall);
147
148         return idle_time;
149 }
150 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
151
152 /*
153  * This is a generic cpufreq init() routine which can be used by cpufreq
154  * drivers of SMP systems. It will do following:
155  * - validate & show freq table passed
156  * - set policies transition latency
157  * - policy->cpus with all possible CPUs
158  */
159 int cpufreq_generic_init(struct cpufreq_policy *policy,
160                 struct cpufreq_frequency_table *table,
161                 unsigned int transition_latency)
162 {
163         int ret;
164
165         ret = cpufreq_table_validate_and_show(policy, table);
166         if (ret) {
167                 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
168                 return ret;
169         }
170
171         policy->cpuinfo.transition_latency = transition_latency;
172
173         /*
174          * The driver only supports the SMP configuartion where all processors
175          * share the clock and voltage and clock.
176          */
177         cpumask_setall(policy->cpus);
178
179         return 0;
180 }
181 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
182
183 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
184 {
185         struct cpufreq_policy *policy = NULL;
186         unsigned long flags;
187
188         if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
189                 return NULL;
190
191         if (!down_read_trylock(&cpufreq_rwsem))
192                 return NULL;
193
194         /* get the cpufreq driver */
195         read_lock_irqsave(&cpufreq_driver_lock, flags);
196
197         if (cpufreq_driver) {
198                 /* get the CPU */
199                 policy = per_cpu(cpufreq_cpu_data, cpu);
200                 if (policy)
201                         kobject_get(&policy->kobj);
202         }
203
204         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
205
206         if (!policy)
207                 up_read(&cpufreq_rwsem);
208
209         return policy;
210 }
211 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
212
213 void cpufreq_cpu_put(struct cpufreq_policy *policy)
214 {
215         if (cpufreq_disabled())
216                 return;
217
218         kobject_put(&policy->kobj);
219         up_read(&cpufreq_rwsem);
220 }
221 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
222
223 /*********************************************************************
224  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
225  *********************************************************************/
226
227 /**
228  * adjust_jiffies - adjust the system "loops_per_jiffy"
229  *
230  * This function alters the system "loops_per_jiffy" for the clock
231  * speed change. Note that loops_per_jiffy cannot be updated on SMP
232  * systems as each CPU might be scaled differently. So, use the arch
233  * per-CPU loops_per_jiffy value wherever possible.
234  */
235 #ifndef CONFIG_SMP
236 static unsigned long l_p_j_ref;
237 static unsigned int l_p_j_ref_freq;
238
239 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
240 {
241         if (ci->flags & CPUFREQ_CONST_LOOPS)
242                 return;
243
244         if (!l_p_j_ref_freq) {
245                 l_p_j_ref = loops_per_jiffy;
246                 l_p_j_ref_freq = ci->old;
247                 pr_debug("saving %lu as reference value for loops_per_jiffy; "
248                         "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
249         }
250         if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
251             (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
252                 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
253                                                                 ci->new);
254                 pr_debug("scaling loops_per_jiffy to %lu "
255                         "for frequency %u kHz\n", loops_per_jiffy, ci->new);
256         }
257 }
258 #else
259 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
260 {
261         return;
262 }
263 #endif
264
265 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
266                 struct cpufreq_freqs *freqs, unsigned int state)
267 {
268         BUG_ON(irqs_disabled());
269
270         if (cpufreq_disabled())
271                 return;
272
273         freqs->flags = cpufreq_driver->flags;
274         pr_debug("notification %u of frequency transition to %u kHz\n",
275                 state, freqs->new);
276
277         switch (state) {
278
279         case CPUFREQ_PRECHANGE:
280                 /* detect if the driver reported a value as "old frequency"
281                  * which is not equal to what the cpufreq core thinks is
282                  * "old frequency".
283                  */
284                 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
285                         if ((policy) && (policy->cpu == freqs->cpu) &&
286                             (policy->cur) && (policy->cur != freqs->old)) {
287                                 pr_debug("Warning: CPU frequency is"
288                                         " %u, cpufreq assumed %u kHz.\n",
289                                         freqs->old, policy->cur);
290                                 freqs->old = policy->cur;
291                         }
292                 }
293                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
294                                 CPUFREQ_PRECHANGE, freqs);
295                 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
296                 break;
297
298         case CPUFREQ_POSTCHANGE:
299                 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
300                 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
301                         (unsigned long)freqs->cpu);
302                 trace_cpu_frequency(freqs->new, freqs->cpu);
303                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
304                                 CPUFREQ_POSTCHANGE, freqs);
305                 if (likely(policy) && likely(policy->cpu == freqs->cpu))
306                         policy->cur = freqs->new;
307                 break;
308         }
309 }
310
311 /**
312  * cpufreq_notify_transition - call notifier chain and adjust_jiffies
313  * on frequency transition.
314  *
315  * This function calls the transition notifiers and the "adjust_jiffies"
316  * function. It is called twice on all CPU frequency changes that have
317  * external effects.
318  */
319 void cpufreq_notify_transition(struct cpufreq_policy *policy,
320                 struct cpufreq_freqs *freqs, unsigned int state)
321 {
322         for_each_cpu(freqs->cpu, policy->cpus)
323                 __cpufreq_notify_transition(policy, freqs, state);
324 }
325 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
326
327
328 /*********************************************************************
329  *                          SYSFS INTERFACE                          *
330  *********************************************************************/
331
332 static struct cpufreq_governor *__find_governor(const char *str_governor)
333 {
334         struct cpufreq_governor *t;
335
336         list_for_each_entry(t, &cpufreq_governor_list, governor_list)
337                 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
338                         return t;
339
340         return NULL;
341 }
342
343 /**
344  * cpufreq_parse_governor - parse a governor string
345  */
346 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
347                                 struct cpufreq_governor **governor)
348 {
349         int err = -EINVAL;
350
351         if (!cpufreq_driver)
352                 goto out;
353
354         if (cpufreq_driver->setpolicy) {
355                 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
356                         *policy = CPUFREQ_POLICY_PERFORMANCE;
357                         err = 0;
358                 } else if (!strnicmp(str_governor, "powersave",
359                                                 CPUFREQ_NAME_LEN)) {
360                         *policy = CPUFREQ_POLICY_POWERSAVE;
361                         err = 0;
362                 }
363         } else if (has_target()) {
364                 struct cpufreq_governor *t;
365
366                 mutex_lock(&cpufreq_governor_mutex);
367
368                 t = __find_governor(str_governor);
369
370                 if (t == NULL) {
371                         int ret;
372
373                         mutex_unlock(&cpufreq_governor_mutex);
374                         ret = request_module("cpufreq_%s", str_governor);
375                         mutex_lock(&cpufreq_governor_mutex);
376
377                         if (ret == 0)
378                                 t = __find_governor(str_governor);
379                 }
380
381                 if (t != NULL) {
382                         *governor = t;
383                         err = 0;
384                 }
385
386                 mutex_unlock(&cpufreq_governor_mutex);
387         }
388 out:
389         return err;
390 }
391
392 /**
393  * cpufreq_per_cpu_attr_read() / show_##file_name() -
394  * print out cpufreq information
395  *
396  * Write out information from cpufreq_driver->policy[cpu]; object must be
397  * "unsigned int".
398  */
399
400 #define show_one(file_name, object)                     \
401 static ssize_t show_##file_name                         \
402 (struct cpufreq_policy *policy, char *buf)              \
403 {                                                       \
404         return sprintf(buf, "%u\n", policy->object);    \
405 }
406
407 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
408 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
409 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
410 show_one(scaling_min_freq, min);
411 show_one(scaling_max_freq, max);
412 show_one(scaling_cur_freq, cur);
413
414 static int cpufreq_set_policy(struct cpufreq_policy *policy,
415                                 struct cpufreq_policy *new_policy);
416
417 /**
418  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
419  */
420 #define store_one(file_name, object)                    \
421 static ssize_t store_##file_name                                        \
422 (struct cpufreq_policy *policy, const char *buf, size_t count)          \
423 {                                                                       \
424         int ret;                                                        \
425         struct cpufreq_policy new_policy;                               \
426                                                                         \
427         ret = cpufreq_get_policy(&new_policy, policy->cpu);             \
428         if (ret)                                                        \
429                 return -EINVAL;                                         \
430                                                                         \
431         ret = sscanf(buf, "%u", &new_policy.object);                    \
432         if (ret != 1)                                                   \
433                 return -EINVAL;                                         \
434                                                                         \
435         ret = cpufreq_set_policy(policy, &new_policy);          \
436         policy->user_policy.object = policy->object;                    \
437                                                                         \
438         return ret ? ret : count;                                       \
439 }
440
441 store_one(scaling_min_freq, min);
442 store_one(scaling_max_freq, max);
443
444 /**
445  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
446  */
447 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
448                                         char *buf)
449 {
450         unsigned int cur_freq = __cpufreq_get(policy->cpu);
451         if (!cur_freq)
452                 return sprintf(buf, "<unknown>");
453         return sprintf(buf, "%u\n", cur_freq);
454 }
455
456 /**
457  * show_scaling_governor - show the current policy for the specified CPU
458  */
459 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
460 {
461         if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
462                 return sprintf(buf, "powersave\n");
463         else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
464                 return sprintf(buf, "performance\n");
465         else if (policy->governor)
466                 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
467                                 policy->governor->name);
468         return -EINVAL;
469 }
470
471 /**
472  * store_scaling_governor - store policy for the specified CPU
473  */
474 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
475                                         const char *buf, size_t count)
476 {
477         int ret;
478         char    str_governor[16];
479         struct cpufreq_policy new_policy;
480
481         ret = cpufreq_get_policy(&new_policy, policy->cpu);
482         if (ret)
483                 return ret;
484
485         ret = sscanf(buf, "%15s", str_governor);
486         if (ret != 1)
487                 return -EINVAL;
488
489         if (cpufreq_parse_governor(str_governor, &new_policy.policy,
490                                                 &new_policy.governor))
491                 return -EINVAL;
492
493         ret = cpufreq_set_policy(policy, &new_policy);
494
495         policy->user_policy.policy = policy->policy;
496         policy->user_policy.governor = policy->governor;
497
498         if (ret)
499                 return ret;
500         else
501                 return count;
502 }
503
504 /**
505  * show_scaling_driver - show the cpufreq driver currently loaded
506  */
507 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
508 {
509         return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
510 }
511
512 /**
513  * show_scaling_available_governors - show the available CPUfreq governors
514  */
515 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
516                                                 char *buf)
517 {
518         ssize_t i = 0;
519         struct cpufreq_governor *t;
520
521         if (!has_target()) {
522                 i += sprintf(buf, "performance powersave");
523                 goto out;
524         }
525
526         list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
527                 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
528                     - (CPUFREQ_NAME_LEN + 2)))
529                         goto out;
530                 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
531         }
532 out:
533         i += sprintf(&buf[i], "\n");
534         return i;
535 }
536
537 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
538 {
539         ssize_t i = 0;
540         unsigned int cpu;
541
542         for_each_cpu(cpu, mask) {
543                 if (i)
544                         i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
545                 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
546                 if (i >= (PAGE_SIZE - 5))
547                         break;
548         }
549         i += sprintf(&buf[i], "\n");
550         return i;
551 }
552 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
553
554 /**
555  * show_related_cpus - show the CPUs affected by each transition even if
556  * hw coordination is in use
557  */
558 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
559 {
560         return cpufreq_show_cpus(policy->related_cpus, buf);
561 }
562
563 /**
564  * show_affected_cpus - show the CPUs affected by each transition
565  */
566 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
567 {
568         return cpufreq_show_cpus(policy->cpus, buf);
569 }
570
571 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
572                                         const char *buf, size_t count)
573 {
574         unsigned int freq = 0;
575         unsigned int ret;
576
577         if (!policy->governor || !policy->governor->store_setspeed)
578                 return -EINVAL;
579
580         ret = sscanf(buf, "%u", &freq);
581         if (ret != 1)
582                 return -EINVAL;
583
584         policy->governor->store_setspeed(policy, freq);
585
586         return count;
587 }
588
589 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
590 {
591         if (!policy->governor || !policy->governor->show_setspeed)
592                 return sprintf(buf, "<unsupported>\n");
593
594         return policy->governor->show_setspeed(policy, buf);
595 }
596
597 /**
598  * show_bios_limit - show the current cpufreq HW/BIOS limitation
599  */
600 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
601 {
602         unsigned int limit;
603         int ret;
604         if (cpufreq_driver->bios_limit) {
605                 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
606                 if (!ret)
607                         return sprintf(buf, "%u\n", limit);
608         }
609         return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
610 }
611
612 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
613 cpufreq_freq_attr_ro(cpuinfo_min_freq);
614 cpufreq_freq_attr_ro(cpuinfo_max_freq);
615 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
616 cpufreq_freq_attr_ro(scaling_available_governors);
617 cpufreq_freq_attr_ro(scaling_driver);
618 cpufreq_freq_attr_ro(scaling_cur_freq);
619 cpufreq_freq_attr_ro(bios_limit);
620 cpufreq_freq_attr_ro(related_cpus);
621 cpufreq_freq_attr_ro(affected_cpus);
622 cpufreq_freq_attr_rw(scaling_min_freq);
623 cpufreq_freq_attr_rw(scaling_max_freq);
624 cpufreq_freq_attr_rw(scaling_governor);
625 cpufreq_freq_attr_rw(scaling_setspeed);
626
627 static struct attribute *default_attrs[] = {
628         &cpuinfo_min_freq.attr,
629         &cpuinfo_max_freq.attr,
630         &cpuinfo_transition_latency.attr,
631         &scaling_min_freq.attr,
632         &scaling_max_freq.attr,
633         &affected_cpus.attr,
634         &related_cpus.attr,
635         &scaling_governor.attr,
636         &scaling_driver.attr,
637         &scaling_available_governors.attr,
638         &scaling_setspeed.attr,
639         NULL
640 };
641
642 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
643 #define to_attr(a) container_of(a, struct freq_attr, attr)
644
645 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
646 {
647         struct cpufreq_policy *policy = to_policy(kobj);
648         struct freq_attr *fattr = to_attr(attr);
649         ssize_t ret;
650
651         if (!down_read_trylock(&cpufreq_rwsem))
652                 return -EINVAL;
653
654         down_read(&policy->rwsem);
655
656         if (fattr->show)
657                 ret = fattr->show(policy, buf);
658         else
659                 ret = -EIO;
660
661         up_read(&policy->rwsem);
662         up_read(&cpufreq_rwsem);
663
664         return ret;
665 }
666
667 static ssize_t store(struct kobject *kobj, struct attribute *attr,
668                      const char *buf, size_t count)
669 {
670         struct cpufreq_policy *policy = to_policy(kobj);
671         struct freq_attr *fattr = to_attr(attr);
672         ssize_t ret = -EINVAL;
673
674         get_online_cpus();
675
676         if (!cpu_online(policy->cpu))
677                 goto unlock;
678
679         if (!down_read_trylock(&cpufreq_rwsem))
680                 goto unlock;
681
682         down_write(&policy->rwsem);
683
684         if (fattr->store)
685                 ret = fattr->store(policy, buf, count);
686         else
687                 ret = -EIO;
688
689         up_write(&policy->rwsem);
690
691         up_read(&cpufreq_rwsem);
692 unlock:
693         put_online_cpus();
694
695         return ret;
696 }
697
698 static void cpufreq_sysfs_release(struct kobject *kobj)
699 {
700         struct cpufreq_policy *policy = to_policy(kobj);
701         pr_debug("last reference is dropped\n");
702         complete(&policy->kobj_unregister);
703 }
704
705 static const struct sysfs_ops sysfs_ops = {
706         .show   = show,
707         .store  = store,
708 };
709
710 static struct kobj_type ktype_cpufreq = {
711         .sysfs_ops      = &sysfs_ops,
712         .default_attrs  = default_attrs,
713         .release        = cpufreq_sysfs_release,
714 };
715
716 struct kobject *cpufreq_global_kobject;
717 EXPORT_SYMBOL(cpufreq_global_kobject);
718
719 static int cpufreq_global_kobject_usage;
720
721 int cpufreq_get_global_kobject(void)
722 {
723         if (!cpufreq_global_kobject_usage++)
724                 return kobject_add(cpufreq_global_kobject,
725                                 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
726
727         return 0;
728 }
729 EXPORT_SYMBOL(cpufreq_get_global_kobject);
730
731 void cpufreq_put_global_kobject(void)
732 {
733         if (!--cpufreq_global_kobject_usage)
734                 kobject_del(cpufreq_global_kobject);
735 }
736 EXPORT_SYMBOL(cpufreq_put_global_kobject);
737
738 int cpufreq_sysfs_create_file(const struct attribute *attr)
739 {
740         int ret = cpufreq_get_global_kobject();
741
742         if (!ret) {
743                 ret = sysfs_create_file(cpufreq_global_kobject, attr);
744                 if (ret)
745                         cpufreq_put_global_kobject();
746         }
747
748         return ret;
749 }
750 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
751
752 void cpufreq_sysfs_remove_file(const struct attribute *attr)
753 {
754         sysfs_remove_file(cpufreq_global_kobject, attr);
755         cpufreq_put_global_kobject();
756 }
757 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
758
759 /* symlink affected CPUs */
760 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
761 {
762         unsigned int j;
763         int ret = 0;
764
765         for_each_cpu(j, policy->cpus) {
766                 struct device *cpu_dev;
767
768                 if (j == policy->cpu)
769                         continue;
770
771                 pr_debug("Adding link for CPU: %u\n", j);
772                 cpu_dev = get_cpu_device(j);
773                 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
774                                         "cpufreq");
775                 if (ret)
776                         break;
777         }
778         return ret;
779 }
780
781 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
782                                      struct device *dev)
783 {
784         struct freq_attr **drv_attr;
785         int ret = 0;
786
787         /* prepare interface data */
788         ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
789                                    &dev->kobj, "cpufreq");
790         if (ret)
791                 return ret;
792
793         /* set up files for this cpu device */
794         drv_attr = cpufreq_driver->attr;
795         while ((drv_attr) && (*drv_attr)) {
796                 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
797                 if (ret)
798                         goto err_out_kobj_put;
799                 drv_attr++;
800         }
801         if (cpufreq_driver->get) {
802                 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
803                 if (ret)
804                         goto err_out_kobj_put;
805         }
806         if (has_target()) {
807                 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
808                 if (ret)
809                         goto err_out_kobj_put;
810         }
811         if (cpufreq_driver->bios_limit) {
812                 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
813                 if (ret)
814                         goto err_out_kobj_put;
815         }
816
817         ret = cpufreq_add_dev_symlink(policy);
818         if (ret)
819                 goto err_out_kobj_put;
820
821         return ret;
822
823 err_out_kobj_put:
824         kobject_put(&policy->kobj);
825         wait_for_completion(&policy->kobj_unregister);
826         return ret;
827 }
828
829 static void cpufreq_init_policy(struct cpufreq_policy *policy)
830 {
831         struct cpufreq_policy new_policy;
832         int ret = 0;
833
834         memcpy(&new_policy, policy, sizeof(*policy));
835         /* assure that the starting sequence is run in cpufreq_set_policy */
836         policy->governor = NULL;
837
838         /* set default policy */
839         ret = cpufreq_set_policy(policy, &new_policy);
840         policy->user_policy.policy = policy->policy;
841         policy->user_policy.governor = policy->governor;
842
843         if (ret) {
844                 pr_debug("setting policy failed\n");
845                 if (cpufreq_driver->exit)
846                         cpufreq_driver->exit(policy);
847         }
848 }
849
850 #ifdef CONFIG_HOTPLUG_CPU
851 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
852                                   unsigned int cpu, struct device *dev,
853                                   bool frozen)
854 {
855         int ret = 0;
856         unsigned long flags;
857
858         if (has_target()) {
859                 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
860                 if (ret) {
861                         pr_err("%s: Failed to stop governor\n", __func__);
862                         return ret;
863                 }
864         }
865
866         down_write(&policy->rwsem);
867
868         write_lock_irqsave(&cpufreq_driver_lock, flags);
869
870         cpumask_set_cpu(cpu, policy->cpus);
871         per_cpu(cpufreq_cpu_data, cpu) = policy;
872         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
873
874         up_write(&policy->rwsem);
875
876         if (has_target()) {
877                 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
878                         (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
879                         pr_err("%s: Failed to start governor\n", __func__);
880                         return ret;
881                 }
882         }
883
884         /* Don't touch sysfs links during light-weight init */
885         if (!frozen)
886                 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
887
888         return ret;
889 }
890 #endif
891
892 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
893 {
894         struct cpufreq_policy *policy;
895         unsigned long flags;
896
897         read_lock_irqsave(&cpufreq_driver_lock, flags);
898
899         policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
900
901         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
902
903         return policy;
904 }
905
906 static struct cpufreq_policy *cpufreq_policy_alloc(void)
907 {
908         struct cpufreq_policy *policy;
909
910         policy = kzalloc(sizeof(*policy), GFP_KERNEL);
911         if (!policy)
912                 return NULL;
913
914         if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
915                 goto err_free_policy;
916
917         if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
918                 goto err_free_cpumask;
919
920         INIT_LIST_HEAD(&policy->policy_list);
921         init_rwsem(&policy->rwsem);
922
923         return policy;
924
925 err_free_cpumask:
926         free_cpumask_var(policy->cpus);
927 err_free_policy:
928         kfree(policy);
929
930         return NULL;
931 }
932
933 static void cpufreq_policy_free(struct cpufreq_policy *policy)
934 {
935         free_cpumask_var(policy->related_cpus);
936         free_cpumask_var(policy->cpus);
937         kfree(policy);
938 }
939
940 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
941 {
942         if (WARN_ON(cpu == policy->cpu))
943                 return;
944
945         down_write(&policy->rwsem);
946
947         policy->last_cpu = policy->cpu;
948         policy->cpu = cpu;
949
950         up_write(&policy->rwsem);
951
952         cpufreq_frequency_table_update_policy_cpu(policy);
953         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
954                         CPUFREQ_UPDATE_POLICY_CPU, policy);
955 }
956
957 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
958                              bool frozen)
959 {
960         unsigned int j, cpu = dev->id;
961         int ret = -ENOMEM;
962         struct cpufreq_policy *policy;
963         unsigned long flags;
964 #ifdef CONFIG_HOTPLUG_CPU
965         struct cpufreq_policy *tpolicy;
966         struct cpufreq_governor *gov;
967 #endif
968
969         if (cpu_is_offline(cpu))
970                 return 0;
971
972         pr_debug("adding CPU %u\n", cpu);
973
974 #ifdef CONFIG_SMP
975         /* check whether a different CPU already registered this
976          * CPU because it is in the same boat. */
977         policy = cpufreq_cpu_get(cpu);
978         if (unlikely(policy)) {
979                 cpufreq_cpu_put(policy);
980                 return 0;
981         }
982 #endif
983
984         if (!down_read_trylock(&cpufreq_rwsem))
985                 return 0;
986
987 #ifdef CONFIG_HOTPLUG_CPU
988         /* Check if this cpu was hot-unplugged earlier and has siblings */
989         read_lock_irqsave(&cpufreq_driver_lock, flags);
990         list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
991                 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
992                         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
993                         ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen);
994                         up_read(&cpufreq_rwsem);
995                         return ret;
996                 }
997         }
998         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
999 #endif
1000
1001         if (frozen)
1002                 /* Restore the saved policy when doing light-weight init */
1003                 policy = cpufreq_policy_restore(cpu);
1004         else
1005                 policy = cpufreq_policy_alloc();
1006
1007         if (!policy)
1008                 goto nomem_out;
1009
1010
1011         /*
1012          * In the resume path, since we restore a saved policy, the assignment
1013          * to policy->cpu is like an update of the existing policy, rather than
1014          * the creation of a brand new one. So we need to perform this update
1015          * by invoking update_policy_cpu().
1016          */
1017         if (frozen && cpu != policy->cpu)
1018                 update_policy_cpu(policy, cpu);
1019         else
1020                 policy->cpu = cpu;
1021
1022         policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1023         cpumask_copy(policy->cpus, cpumask_of(cpu));
1024
1025         init_completion(&policy->kobj_unregister);
1026         INIT_WORK(&policy->update, handle_update);
1027
1028         /* call driver. From then on the cpufreq must be able
1029          * to accept all calls to ->verify and ->setpolicy for this CPU
1030          */
1031         ret = cpufreq_driver->init(policy);
1032         if (ret) {
1033                 pr_debug("initialization failed\n");
1034                 goto err_set_policy_cpu;
1035         }
1036
1037         if (cpufreq_driver->get) {
1038                 policy->cur = cpufreq_driver->get(policy->cpu);
1039                 if (!policy->cur) {
1040                         pr_err("%s: ->get() failed\n", __func__);
1041                         goto err_get_freq;
1042                 }
1043         }
1044
1045         /* related cpus should atleast have policy->cpus */
1046         cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1047
1048         /*
1049          * affected cpus must always be the one, which are online. We aren't
1050          * managing offline cpus here.
1051          */
1052         cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1053
1054         policy->user_policy.min = policy->min;
1055         policy->user_policy.max = policy->max;
1056
1057         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1058                                      CPUFREQ_START, policy);
1059
1060 #ifdef CONFIG_HOTPLUG_CPU
1061         gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1062         if (gov) {
1063                 policy->governor = gov;
1064                 pr_debug("Restoring governor %s for cpu %d\n",
1065                        policy->governor->name, cpu);
1066         }
1067 #endif
1068
1069         write_lock_irqsave(&cpufreq_driver_lock, flags);
1070         for_each_cpu(j, policy->cpus)
1071                 per_cpu(cpufreq_cpu_data, j) = policy;
1072         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1073
1074         if (!frozen) {
1075                 ret = cpufreq_add_dev_interface(policy, dev);
1076                 if (ret)
1077                         goto err_out_unregister;
1078         }
1079
1080         write_lock_irqsave(&cpufreq_driver_lock, flags);
1081         list_add(&policy->policy_list, &cpufreq_policy_list);
1082         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1083
1084         cpufreq_init_policy(policy);
1085
1086         kobject_uevent(&policy->kobj, KOBJ_ADD);
1087         up_read(&cpufreq_rwsem);
1088
1089         pr_debug("initialization complete\n");
1090
1091         return 0;
1092
1093 err_out_unregister:
1094         write_lock_irqsave(&cpufreq_driver_lock, flags);
1095         for_each_cpu(j, policy->cpus)
1096                 per_cpu(cpufreq_cpu_data, j) = NULL;
1097         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1098
1099 err_get_freq:
1100         if (cpufreq_driver->exit)
1101                 cpufreq_driver->exit(policy);
1102 err_set_policy_cpu:
1103         cpufreq_policy_free(policy);
1104 nomem_out:
1105         up_read(&cpufreq_rwsem);
1106
1107         return ret;
1108 }
1109
1110 /**
1111  * cpufreq_add_dev - add a CPU device
1112  *
1113  * Adds the cpufreq interface for a CPU device.
1114  *
1115  * The Oracle says: try running cpufreq registration/unregistration concurrently
1116  * with with cpu hotplugging and all hell will break loose. Tried to clean this
1117  * mess up, but more thorough testing is needed. - Mathieu
1118  */
1119 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1120 {
1121         return __cpufreq_add_dev(dev, sif, false);
1122 }
1123
1124 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1125                                            unsigned int old_cpu, bool frozen)
1126 {
1127         struct device *cpu_dev;
1128         int ret;
1129
1130         /* first sibling now owns the new sysfs dir */
1131         cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
1132
1133         /* Don't touch sysfs files during light-weight tear-down */
1134         if (frozen)
1135                 return cpu_dev->id;
1136
1137         sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1138         ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1139         if (ret) {
1140                 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1141
1142                 down_write(&policy->rwsem);
1143                 cpumask_set_cpu(old_cpu, policy->cpus);
1144                 up_write(&policy->rwsem);
1145
1146                 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1147                                         "cpufreq");
1148
1149                 return -EINVAL;
1150         }
1151
1152         return cpu_dev->id;
1153 }
1154
1155 static int __cpufreq_remove_dev_prepare(struct device *dev,
1156                                         struct subsys_interface *sif,
1157                                         bool frozen)
1158 {
1159         unsigned int cpu = dev->id, cpus;
1160         int new_cpu, ret;
1161         unsigned long flags;
1162         struct cpufreq_policy *policy;
1163
1164         pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1165
1166         write_lock_irqsave(&cpufreq_driver_lock, flags);
1167
1168         policy = per_cpu(cpufreq_cpu_data, cpu);
1169
1170         /* Save the policy somewhere when doing a light-weight tear-down */
1171         if (frozen)
1172                 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1173
1174         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1175
1176         if (!policy) {
1177                 pr_debug("%s: No cpu_data found\n", __func__);
1178                 return -EINVAL;
1179         }
1180
1181         if (has_target()) {
1182                 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1183                 if (ret) {
1184                         pr_err("%s: Failed to stop governor\n", __func__);
1185                         return ret;
1186                 }
1187         }
1188
1189 #ifdef CONFIG_HOTPLUG_CPU
1190         if (!cpufreq_driver->setpolicy)
1191                 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1192                         policy->governor->name, CPUFREQ_NAME_LEN);
1193 #endif
1194
1195         down_read(&policy->rwsem);
1196         cpus = cpumask_weight(policy->cpus);
1197         up_read(&policy->rwsem);
1198
1199         if (cpu != policy->cpu) {
1200                 if (!frozen)
1201                         sysfs_remove_link(&dev->kobj, "cpufreq");
1202         } else if (cpus > 1) {
1203                 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
1204                 if (new_cpu >= 0) {
1205                         update_policy_cpu(policy, new_cpu);
1206
1207                         if (!frozen) {
1208                                 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1209                                                 __func__, new_cpu, cpu);
1210                         }
1211                 }
1212         }
1213
1214         return 0;
1215 }
1216
1217 static int __cpufreq_remove_dev_finish(struct device *dev,
1218                                        struct subsys_interface *sif,
1219                                        bool frozen)
1220 {
1221         unsigned int cpu = dev->id, cpus;
1222         int ret;
1223         unsigned long flags;
1224         struct cpufreq_policy *policy;
1225         struct kobject *kobj;
1226         struct completion *cmp;
1227
1228         read_lock_irqsave(&cpufreq_driver_lock, flags);
1229         policy = per_cpu(cpufreq_cpu_data, cpu);
1230         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1231
1232         if (!policy) {
1233                 pr_debug("%s: No cpu_data found\n", __func__);
1234                 return -EINVAL;
1235         }
1236
1237         down_write(&policy->rwsem);
1238         cpus = cpumask_weight(policy->cpus);
1239
1240         if (cpus > 1)
1241                 cpumask_clear_cpu(cpu, policy->cpus);
1242         up_write(&policy->rwsem);
1243
1244         /* If cpu is last user of policy, free policy */
1245         if (cpus == 1) {
1246                 if (has_target()) {
1247                         ret = __cpufreq_governor(policy,
1248                                         CPUFREQ_GOV_POLICY_EXIT);
1249                         if (ret) {
1250                                 pr_err("%s: Failed to exit governor\n",
1251                                                 __func__);
1252                                 return ret;
1253                         }
1254                 }
1255
1256                 if (!frozen) {
1257                         down_read(&policy->rwsem);
1258                         kobj = &policy->kobj;
1259                         cmp = &policy->kobj_unregister;
1260                         up_read(&policy->rwsem);
1261                         kobject_put(kobj);
1262
1263                         /*
1264                          * We need to make sure that the underlying kobj is
1265                          * actually not referenced anymore by anybody before we
1266                          * proceed with unloading.
1267                          */
1268                         pr_debug("waiting for dropping of refcount\n");
1269                         wait_for_completion(cmp);
1270                         pr_debug("wait complete\n");
1271                 }
1272
1273                 /*
1274                  * Perform the ->exit() even during light-weight tear-down,
1275                  * since this is a core component, and is essential for the
1276                  * subsequent light-weight ->init() to succeed.
1277                  */
1278                 if (cpufreq_driver->exit)
1279                         cpufreq_driver->exit(policy);
1280
1281                 /* Remove policy from list of active policies */
1282                 write_lock_irqsave(&cpufreq_driver_lock, flags);
1283                 list_del(&policy->policy_list);
1284                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1285
1286                 if (!frozen)
1287                         cpufreq_policy_free(policy);
1288         } else {
1289                 if (has_target()) {
1290                         if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1291                                         (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1292                                 pr_err("%s: Failed to start governor\n",
1293                                                 __func__);
1294                                 return ret;
1295                         }
1296                 }
1297         }
1298
1299         per_cpu(cpufreq_cpu_data, cpu) = NULL;
1300         return 0;
1301 }
1302
1303 /**
1304  * cpufreq_remove_dev - remove a CPU device
1305  *
1306  * Removes the cpufreq interface for a CPU device.
1307  */
1308 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1309 {
1310         unsigned int cpu = dev->id;
1311         int ret;
1312
1313         if (cpu_is_offline(cpu))
1314                 return 0;
1315
1316         ret = __cpufreq_remove_dev_prepare(dev, sif, false);
1317
1318         if (!ret)
1319                 ret = __cpufreq_remove_dev_finish(dev, sif, false);
1320
1321         return ret;
1322 }
1323
1324 static void handle_update(struct work_struct *work)
1325 {
1326         struct cpufreq_policy *policy =
1327                 container_of(work, struct cpufreq_policy, update);
1328         unsigned int cpu = policy->cpu;
1329         pr_debug("handle_update for cpu %u called\n", cpu);
1330         cpufreq_update_policy(cpu);
1331 }
1332
1333 /**
1334  *      cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1335  *      in deep trouble.
1336  *      @cpu: cpu number
1337  *      @old_freq: CPU frequency the kernel thinks the CPU runs at
1338  *      @new_freq: CPU frequency the CPU actually runs at
1339  *
1340  *      We adjust to current frequency first, and need to clean up later.
1341  *      So either call to cpufreq_update_policy() or schedule handle_update()).
1342  */
1343 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1344                                 unsigned int new_freq)
1345 {
1346         struct cpufreq_policy *policy;
1347         struct cpufreq_freqs freqs;
1348         unsigned long flags;
1349
1350         pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1351                "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1352
1353         freqs.old = old_freq;
1354         freqs.new = new_freq;
1355
1356         read_lock_irqsave(&cpufreq_driver_lock, flags);
1357         policy = per_cpu(cpufreq_cpu_data, cpu);
1358         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1359
1360         cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1361         cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1362 }
1363
1364 /**
1365  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1366  * @cpu: CPU number
1367  *
1368  * This is the last known freq, without actually getting it from the driver.
1369  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1370  */
1371 unsigned int cpufreq_quick_get(unsigned int cpu)
1372 {
1373         struct cpufreq_policy *policy;
1374         unsigned int ret_freq = 0;
1375
1376         if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1377                 return cpufreq_driver->get(cpu);
1378
1379         policy = cpufreq_cpu_get(cpu);
1380         if (policy) {
1381                 ret_freq = policy->cur;
1382                 cpufreq_cpu_put(policy);
1383         }
1384
1385         return ret_freq;
1386 }
1387 EXPORT_SYMBOL(cpufreq_quick_get);
1388
1389 /**
1390  * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1391  * @cpu: CPU number
1392  *
1393  * Just return the max possible frequency for a given CPU.
1394  */
1395 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1396 {
1397         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1398         unsigned int ret_freq = 0;
1399
1400         if (policy) {
1401                 ret_freq = policy->max;
1402                 cpufreq_cpu_put(policy);
1403         }
1404
1405         return ret_freq;
1406 }
1407 EXPORT_SYMBOL(cpufreq_quick_get_max);
1408
1409 static unsigned int __cpufreq_get(unsigned int cpu)
1410 {
1411         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1412         unsigned int ret_freq = 0;
1413
1414         if (!cpufreq_driver->get)
1415                 return ret_freq;
1416
1417         ret_freq = cpufreq_driver->get(cpu);
1418
1419         if (ret_freq && policy->cur &&
1420                 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1421                 /* verify no discrepancy between actual and
1422                                         saved value exists */
1423                 if (unlikely(ret_freq != policy->cur)) {
1424                         cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1425                         schedule_work(&policy->update);
1426                 }
1427         }
1428
1429         return ret_freq;
1430 }
1431
1432 /**
1433  * cpufreq_get - get the current CPU frequency (in kHz)
1434  * @cpu: CPU number
1435  *
1436  * Get the CPU current (static) CPU frequency
1437  */
1438 unsigned int cpufreq_get(unsigned int cpu)
1439 {
1440         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1441         unsigned int ret_freq = 0;
1442
1443         if (cpufreq_disabled() || !cpufreq_driver)
1444                 return -ENOENT;
1445
1446         BUG_ON(!policy);
1447
1448         if (!down_read_trylock(&cpufreq_rwsem))
1449                 return 0;
1450
1451         down_read(&policy->rwsem);
1452
1453         ret_freq = __cpufreq_get(cpu);
1454
1455         up_read(&policy->rwsem);
1456         up_read(&cpufreq_rwsem);
1457
1458         return ret_freq;
1459 }
1460 EXPORT_SYMBOL(cpufreq_get);
1461
1462 static struct subsys_interface cpufreq_interface = {
1463         .name           = "cpufreq",
1464         .subsys         = &cpu_subsys,
1465         .add_dev        = cpufreq_add_dev,
1466         .remove_dev     = cpufreq_remove_dev,
1467 };
1468
1469 void cpufreq_suspend(void)
1470 {
1471         struct cpufreq_policy *policy;
1472
1473         if (!has_target())
1474                 return;
1475
1476         pr_debug("%s: Suspending Governors\n", __func__);
1477
1478         list_for_each_entry(policy, &cpufreq_policy_list, policy_list)
1479                 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1480                         pr_err("%s: Failed to stop governor for policy: %p\n",
1481                                 __func__, policy);
1482
1483         cpufreq_suspended = true;
1484 }
1485
1486 void cpufreq_resume(void)
1487 {
1488         struct cpufreq_policy *policy;
1489
1490         if (!has_target())
1491                 return;
1492
1493         pr_debug("%s: Resuming Governors\n", __func__);
1494
1495         cpufreq_suspended = false;
1496
1497         list_for_each_entry(policy, &cpufreq_policy_list, policy_list)
1498                 if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1499                     || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1500                         pr_err("%s: Failed to start governor for policy: %p\n",
1501                                 __func__, policy);
1502 }
1503
1504 /**
1505  * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1506  *
1507  * This function is only executed for the boot processor.  The other CPUs
1508  * have been put offline by means of CPU hotplug.
1509  */
1510 static int cpufreq_bp_suspend(void)
1511 {
1512         int ret = 0;
1513
1514         int cpu = smp_processor_id();
1515         struct cpufreq_policy *policy;
1516
1517         pr_debug("suspending cpu %u\n", cpu);
1518
1519         /* If there's no policy for the boot CPU, we have nothing to do. */
1520         policy = cpufreq_cpu_get(cpu);
1521         if (!policy)
1522                 return 0;
1523
1524         if (cpufreq_driver->suspend) {
1525                 ret = cpufreq_driver->suspend(policy);
1526                 if (ret)
1527                         printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1528                                         "step on CPU %u\n", policy->cpu);
1529         }
1530
1531         cpufreq_cpu_put(policy);
1532         return ret;
1533 }
1534
1535 /**
1536  * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1537  *
1538  *      1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1539  *      2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1540  *          restored. It will verify that the current freq is in sync with
1541  *          what we believe it to be. This is a bit later than when it
1542  *          should be, but nonethteless it's better than calling
1543  *          cpufreq_driver->get() here which might re-enable interrupts...
1544  *
1545  * This function is only executed for the boot CPU.  The other CPUs have not
1546  * been turned on yet.
1547  */
1548 static void cpufreq_bp_resume(void)
1549 {
1550         int ret = 0;
1551
1552         int cpu = smp_processor_id();
1553         struct cpufreq_policy *policy;
1554
1555         pr_debug("resuming cpu %u\n", cpu);
1556
1557         /* If there's no policy for the boot CPU, we have nothing to do. */
1558         policy = cpufreq_cpu_get(cpu);
1559         if (!policy)
1560                 return;
1561
1562         if (cpufreq_driver->resume) {
1563                 ret = cpufreq_driver->resume(policy);
1564                 if (ret) {
1565                         printk(KERN_ERR "cpufreq: resume failed in ->resume "
1566                                         "step on CPU %u\n", policy->cpu);
1567                         goto fail;
1568                 }
1569         }
1570
1571         schedule_work(&policy->update);
1572
1573 fail:
1574         cpufreq_cpu_put(policy);
1575 }
1576
1577 static struct syscore_ops cpufreq_syscore_ops = {
1578         .suspend        = cpufreq_bp_suspend,
1579         .resume         = cpufreq_bp_resume,
1580 };
1581
1582 /**
1583  *      cpufreq_get_current_driver - return current driver's name
1584  *
1585  *      Return the name string of the currently loaded cpufreq driver
1586  *      or NULL, if none.
1587  */
1588 const char *cpufreq_get_current_driver(void)
1589 {
1590         if (cpufreq_driver)
1591                 return cpufreq_driver->name;
1592
1593         return NULL;
1594 }
1595 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1596
1597 /*********************************************************************
1598  *                     NOTIFIER LISTS INTERFACE                      *
1599  *********************************************************************/
1600
1601 /**
1602  *      cpufreq_register_notifier - register a driver with cpufreq
1603  *      @nb: notifier function to register
1604  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1605  *
1606  *      Add a driver to one of two lists: either a list of drivers that
1607  *      are notified about clock rate changes (once before and once after
1608  *      the transition), or a list of drivers that are notified about
1609  *      changes in cpufreq policy.
1610  *
1611  *      This function may sleep, and has the same return conditions as
1612  *      blocking_notifier_chain_register.
1613  */
1614 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1615 {
1616         int ret;
1617
1618         if (cpufreq_disabled())
1619                 return -EINVAL;
1620
1621         WARN_ON(!init_cpufreq_transition_notifier_list_called);
1622
1623         switch (list) {
1624         case CPUFREQ_TRANSITION_NOTIFIER:
1625                 ret = srcu_notifier_chain_register(
1626                                 &cpufreq_transition_notifier_list, nb);
1627                 break;
1628         case CPUFREQ_POLICY_NOTIFIER:
1629                 ret = blocking_notifier_chain_register(
1630                                 &cpufreq_policy_notifier_list, nb);
1631                 break;
1632         default:
1633                 ret = -EINVAL;
1634         }
1635
1636         return ret;
1637 }
1638 EXPORT_SYMBOL(cpufreq_register_notifier);
1639
1640 /**
1641  *      cpufreq_unregister_notifier - unregister a driver with cpufreq
1642  *      @nb: notifier block to be unregistered
1643  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1644  *
1645  *      Remove a driver from the CPU frequency notifier list.
1646  *
1647  *      This function may sleep, and has the same return conditions as
1648  *      blocking_notifier_chain_unregister.
1649  */
1650 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1651 {
1652         int ret;
1653
1654         if (cpufreq_disabled())
1655                 return -EINVAL;
1656
1657         switch (list) {
1658         case CPUFREQ_TRANSITION_NOTIFIER:
1659                 ret = srcu_notifier_chain_unregister(
1660                                 &cpufreq_transition_notifier_list, nb);
1661                 break;
1662         case CPUFREQ_POLICY_NOTIFIER:
1663                 ret = blocking_notifier_chain_unregister(
1664                                 &cpufreq_policy_notifier_list, nb);
1665                 break;
1666         default:
1667                 ret = -EINVAL;
1668         }
1669
1670         return ret;
1671 }
1672 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1673
1674
1675 /*********************************************************************
1676  *                              GOVERNORS                            *
1677  *********************************************************************/
1678
1679 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1680                             unsigned int target_freq,
1681                             unsigned int relation)
1682 {
1683         int retval = -EINVAL;
1684         unsigned int old_target_freq = target_freq;
1685
1686         if (cpufreq_disabled())
1687                 return -ENODEV;
1688
1689         /* Make sure that target_freq is within supported range */
1690         if (target_freq > policy->max)
1691                 target_freq = policy->max;
1692         if (target_freq < policy->min)
1693                 target_freq = policy->min;
1694
1695         pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1696                         policy->cpu, target_freq, relation, old_target_freq);
1697
1698         /*
1699          * This might look like a redundant call as we are checking it again
1700          * after finding index. But it is left intentionally for cases where
1701          * exactly same freq is called again and so we can save on few function
1702          * calls.
1703          */
1704         if (target_freq == policy->cur)
1705                 return 0;
1706
1707         if (cpufreq_driver->target)
1708                 retval = cpufreq_driver->target(policy, target_freq, relation);
1709         else if (cpufreq_driver->target_index) {
1710                 struct cpufreq_frequency_table *freq_table;
1711                 struct cpufreq_freqs freqs;
1712                 bool notify;
1713                 int index;
1714
1715                 freq_table = cpufreq_frequency_get_table(policy->cpu);
1716                 if (unlikely(!freq_table)) {
1717                         pr_err("%s: Unable to find freq_table\n", __func__);
1718                         goto out;
1719                 }
1720
1721                 retval = cpufreq_frequency_table_target(policy, freq_table,
1722                                 target_freq, relation, &index);
1723                 if (unlikely(retval)) {
1724                         pr_err("%s: Unable to find matching freq\n", __func__);
1725                         goto out;
1726                 }
1727
1728                 if (freq_table[index].frequency == policy->cur) {
1729                         retval = 0;
1730                         goto out;
1731                 }
1732
1733                 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1734
1735                 if (notify) {
1736                         freqs.old = policy->cur;
1737                         freqs.new = freq_table[index].frequency;
1738                         freqs.flags = 0;
1739
1740                         pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1741                                         __func__, policy->cpu, freqs.old,
1742                                         freqs.new);
1743
1744                         cpufreq_notify_transition(policy, &freqs,
1745                                         CPUFREQ_PRECHANGE);
1746                 }
1747
1748                 retval = cpufreq_driver->target_index(policy, index);
1749                 if (retval)
1750                         pr_err("%s: Failed to change cpu frequency: %d\n",
1751                                         __func__, retval);
1752
1753                 if (notify) {
1754                         /*
1755                          * Notify with old freq in case we failed to change
1756                          * frequency
1757                          */
1758                         if (retval)
1759                                 freqs.new = freqs.old;
1760
1761                         cpufreq_notify_transition(policy, &freqs,
1762                                         CPUFREQ_POSTCHANGE);
1763                 }
1764         }
1765
1766 out:
1767         return retval;
1768 }
1769 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1770
1771 int cpufreq_driver_target(struct cpufreq_policy *policy,
1772                           unsigned int target_freq,
1773                           unsigned int relation)
1774 {
1775         int ret = -EINVAL;
1776
1777         down_write(&policy->rwsem);
1778
1779         ret = __cpufreq_driver_target(policy, target_freq, relation);
1780
1781         up_write(&policy->rwsem);
1782
1783         return ret;
1784 }
1785 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1786
1787 /*
1788  * when "event" is CPUFREQ_GOV_LIMITS
1789  */
1790
1791 static int __cpufreq_governor(struct cpufreq_policy *policy,
1792                                         unsigned int event)
1793 {
1794         int ret;
1795
1796         /* Only must be defined when default governor is known to have latency
1797            restrictions, like e.g. conservative or ondemand.
1798            That this is the case is already ensured in Kconfig
1799         */
1800 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1801         struct cpufreq_governor *gov = &cpufreq_gov_performance;
1802 #else
1803         struct cpufreq_governor *gov = NULL;
1804 #endif
1805
1806         /* Don't start any governor operations if we are entering suspend */
1807         if (cpufreq_suspended)
1808                 return 0;
1809
1810         if (policy->governor->max_transition_latency &&
1811             policy->cpuinfo.transition_latency >
1812             policy->governor->max_transition_latency) {
1813                 if (!gov)
1814                         return -EINVAL;
1815                 else {
1816                         printk(KERN_WARNING "%s governor failed, too long"
1817                                " transition latency of HW, fallback"
1818                                " to %s governor\n",
1819                                policy->governor->name,
1820                                gov->name);
1821                         policy->governor = gov;
1822                 }
1823         }
1824
1825         if (event == CPUFREQ_GOV_POLICY_INIT)
1826                 if (!try_module_get(policy->governor->owner))
1827                         return -EINVAL;
1828
1829         pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1830                                                 policy->cpu, event);
1831
1832         mutex_lock(&cpufreq_governor_lock);
1833         if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
1834             || (!policy->governor_enabled
1835             && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
1836                 mutex_unlock(&cpufreq_governor_lock);
1837                 return -EBUSY;
1838         }
1839
1840         if (event == CPUFREQ_GOV_STOP)
1841                 policy->governor_enabled = false;
1842         else if (event == CPUFREQ_GOV_START)
1843                 policy->governor_enabled = true;
1844
1845         mutex_unlock(&cpufreq_governor_lock);
1846
1847         ret = policy->governor->governor(policy, event);
1848
1849         if (!ret) {
1850                 if (event == CPUFREQ_GOV_POLICY_INIT)
1851                         policy->governor->initialized++;
1852                 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1853                         policy->governor->initialized--;
1854         } else {
1855                 /* Restore original values */
1856                 mutex_lock(&cpufreq_governor_lock);
1857                 if (event == CPUFREQ_GOV_STOP)
1858                         policy->governor_enabled = true;
1859                 else if (event == CPUFREQ_GOV_START)
1860                         policy->governor_enabled = false;
1861                 mutex_unlock(&cpufreq_governor_lock);
1862         }
1863
1864         if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1865                         ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1866                 module_put(policy->governor->owner);
1867
1868         return ret;
1869 }
1870
1871 int cpufreq_register_governor(struct cpufreq_governor *governor)
1872 {
1873         int err;
1874
1875         if (!governor)
1876                 return -EINVAL;
1877
1878         if (cpufreq_disabled())
1879                 return -ENODEV;
1880
1881         mutex_lock(&cpufreq_governor_mutex);
1882
1883         governor->initialized = 0;
1884         err = -EBUSY;
1885         if (__find_governor(governor->name) == NULL) {
1886                 err = 0;
1887                 list_add(&governor->governor_list, &cpufreq_governor_list);
1888         }
1889
1890         mutex_unlock(&cpufreq_governor_mutex);
1891         return err;
1892 }
1893 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1894
1895 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1896 {
1897 #ifdef CONFIG_HOTPLUG_CPU
1898         int cpu;
1899 #endif
1900
1901         if (!governor)
1902                 return;
1903
1904         if (cpufreq_disabled())
1905                 return;
1906
1907 #ifdef CONFIG_HOTPLUG_CPU
1908         for_each_present_cpu(cpu) {
1909                 if (cpu_online(cpu))
1910                         continue;
1911                 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1912                         strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1913         }
1914 #endif
1915
1916         mutex_lock(&cpufreq_governor_mutex);
1917         list_del(&governor->governor_list);
1918         mutex_unlock(&cpufreq_governor_mutex);
1919         return;
1920 }
1921 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1922
1923
1924 /*********************************************************************
1925  *                          POLICY INTERFACE                         *
1926  *********************************************************************/
1927
1928 /**
1929  * cpufreq_get_policy - get the current cpufreq_policy
1930  * @policy: struct cpufreq_policy into which the current cpufreq_policy
1931  *      is written
1932  *
1933  * Reads the current cpufreq policy.
1934  */
1935 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1936 {
1937         struct cpufreq_policy *cpu_policy;
1938         if (!policy)
1939                 return -EINVAL;
1940
1941         cpu_policy = cpufreq_cpu_get(cpu);
1942         if (!cpu_policy)
1943                 return -EINVAL;
1944
1945         memcpy(policy, cpu_policy, sizeof(*policy));
1946
1947         cpufreq_cpu_put(cpu_policy);
1948         return 0;
1949 }
1950 EXPORT_SYMBOL(cpufreq_get_policy);
1951
1952 /*
1953  * policy : current policy.
1954  * new_policy: policy to be set.
1955  */
1956 static int cpufreq_set_policy(struct cpufreq_policy *policy,
1957                                 struct cpufreq_policy *new_policy)
1958 {
1959         int ret = 0, failed = 1;
1960
1961         pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
1962                 new_policy->min, new_policy->max);
1963
1964         memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1965
1966         if (new_policy->min > policy->max || new_policy->max < policy->min) {
1967                 ret = -EINVAL;
1968                 goto error_out;
1969         }
1970
1971         /* verify the cpu speed can be set within this limit */
1972         ret = cpufreq_driver->verify(new_policy);
1973         if (ret)
1974                 goto error_out;
1975
1976         /* adjust if necessary - all reasons */
1977         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1978                         CPUFREQ_ADJUST, new_policy);
1979
1980         /* adjust if necessary - hardware incompatibility*/
1981         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1982                         CPUFREQ_INCOMPATIBLE, new_policy);
1983
1984         /*
1985          * verify the cpu speed can be set within this limit, which might be
1986          * different to the first one
1987          */
1988         ret = cpufreq_driver->verify(new_policy);
1989         if (ret)
1990                 goto error_out;
1991
1992         /* notification of the new policy */
1993         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1994                         CPUFREQ_NOTIFY, new_policy);
1995
1996         policy->min = new_policy->min;
1997         policy->max = new_policy->max;
1998
1999         pr_debug("new min and max freqs are %u - %u kHz\n",
2000                                         policy->min, policy->max);
2001
2002         if (cpufreq_driver->setpolicy) {
2003                 policy->policy = new_policy->policy;
2004                 pr_debug("setting range\n");
2005                 ret = cpufreq_driver->setpolicy(new_policy);
2006         } else {
2007                 if (new_policy->governor != policy->governor) {
2008                         /* save old, working values */
2009                         struct cpufreq_governor *old_gov = policy->governor;
2010
2011                         pr_debug("governor switch\n");
2012
2013                         /* end old governor */
2014                         if (policy->governor) {
2015                                 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2016                                 up_write(&policy->rwsem);
2017                                 __cpufreq_governor(policy,
2018                                                 CPUFREQ_GOV_POLICY_EXIT);
2019                                 down_write(&policy->rwsem);
2020                         }
2021
2022                         /* start new governor */
2023                         policy->governor = new_policy->governor;
2024                         if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2025                                 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
2026                                         failed = 0;
2027                                 } else {
2028                                         up_write(&policy->rwsem);
2029                                         __cpufreq_governor(policy,
2030                                                         CPUFREQ_GOV_POLICY_EXIT);
2031                                         down_write(&policy->rwsem);
2032                                 }
2033                         }
2034
2035                         if (failed) {
2036                                 /* new governor failed, so re-start old one */
2037                                 pr_debug("starting governor %s failed\n",
2038                                                         policy->governor->name);
2039                                 if (old_gov) {
2040                                         policy->governor = old_gov;
2041                                         __cpufreq_governor(policy,
2042                                                         CPUFREQ_GOV_POLICY_INIT);
2043                                         __cpufreq_governor(policy,
2044                                                            CPUFREQ_GOV_START);
2045                                 }
2046                                 ret = -EINVAL;
2047                                 goto error_out;
2048                         }
2049                         /* might be a policy change, too, so fall through */
2050                 }
2051                 pr_debug("governor: change or update limits\n");
2052                 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2053         }
2054
2055 error_out:
2056         return ret;
2057 }
2058
2059 /**
2060  *      cpufreq_update_policy - re-evaluate an existing cpufreq policy
2061  *      @cpu: CPU which shall be re-evaluated
2062  *
2063  *      Useful for policy notifiers which have different necessities
2064  *      at different times.
2065  */
2066 int cpufreq_update_policy(unsigned int cpu)
2067 {
2068         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2069         struct cpufreq_policy new_policy;
2070         int ret;
2071
2072         if (!policy) {
2073                 ret = -ENODEV;
2074                 goto no_policy;
2075         }
2076
2077         down_write(&policy->rwsem);
2078
2079         pr_debug("updating policy for CPU %u\n", cpu);
2080         memcpy(&new_policy, policy, sizeof(*policy));
2081         new_policy.min = policy->user_policy.min;
2082         new_policy.max = policy->user_policy.max;
2083         new_policy.policy = policy->user_policy.policy;
2084         new_policy.governor = policy->user_policy.governor;
2085
2086         /*
2087          * BIOS might change freq behind our back
2088          * -> ask driver for current freq and notify governors about a change
2089          */
2090         if (cpufreq_driver->get) {
2091                 new_policy.cur = cpufreq_driver->get(cpu);
2092                 if (!policy->cur) {
2093                         pr_debug("Driver did not initialize current freq");
2094                         policy->cur = new_policy.cur;
2095                 } else {
2096                         if (policy->cur != new_policy.cur && has_target())
2097                                 cpufreq_out_of_sync(cpu, policy->cur,
2098                                                                 new_policy.cur);
2099                 }
2100         }
2101
2102         ret = cpufreq_set_policy(policy, &new_policy);
2103
2104         up_write(&policy->rwsem);
2105
2106         cpufreq_cpu_put(policy);
2107 no_policy:
2108         return ret;
2109 }
2110 EXPORT_SYMBOL(cpufreq_update_policy);
2111
2112 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2113                                         unsigned long action, void *hcpu)
2114 {
2115         unsigned int cpu = (unsigned long)hcpu;
2116         struct device *dev;
2117         bool frozen = false;
2118
2119         dev = get_cpu_device(cpu);
2120         if (dev) {
2121
2122                 switch (action & ~CPU_TASKS_FROZEN) {
2123                 case CPU_ONLINE:
2124                         __cpufreq_add_dev(dev, NULL, frozen);
2125                         cpufreq_update_policy(cpu);
2126                         break;
2127
2128                 case CPU_DOWN_PREPARE:
2129                         __cpufreq_remove_dev_prepare(dev, NULL, frozen);
2130                         break;
2131
2132                 case CPU_POST_DEAD:
2133                         __cpufreq_remove_dev_finish(dev, NULL, frozen);
2134                         break;
2135
2136                 case CPU_DOWN_FAILED:
2137                         __cpufreq_add_dev(dev, NULL, frozen);
2138                         break;
2139                 }
2140         }
2141         return NOTIFY_OK;
2142 }
2143
2144 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2145         .notifier_call = cpufreq_cpu_callback,
2146 };
2147
2148 /*********************************************************************
2149  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
2150  *********************************************************************/
2151
2152 /**
2153  * cpufreq_register_driver - register a CPU Frequency driver
2154  * @driver_data: A struct cpufreq_driver containing the values#
2155  * submitted by the CPU Frequency driver.
2156  *
2157  * Registers a CPU Frequency driver to this core code. This code
2158  * returns zero on success, -EBUSY when another driver got here first
2159  * (and isn't unregistered in the meantime).
2160  *
2161  */
2162 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2163 {
2164         unsigned long flags;
2165         int ret;
2166
2167         if (cpufreq_disabled())
2168                 return -ENODEV;
2169
2170         if (!driver_data || !driver_data->verify || !driver_data->init ||
2171             !(driver_data->setpolicy || driver_data->target_index ||
2172                     driver_data->target))
2173                 return -EINVAL;
2174
2175         pr_debug("trying to register driver %s\n", driver_data->name);
2176
2177         if (driver_data->setpolicy)
2178                 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2179
2180         write_lock_irqsave(&cpufreq_driver_lock, flags);
2181         if (cpufreq_driver) {
2182                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2183                 return -EEXIST;
2184         }
2185         cpufreq_driver = driver_data;
2186         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2187
2188         ret = subsys_interface_register(&cpufreq_interface);
2189         if (ret)
2190                 goto err_null_driver;
2191
2192         if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2193                 int i;
2194                 ret = -ENODEV;
2195
2196                 /* check for at least one working CPU */
2197                 for (i = 0; i < nr_cpu_ids; i++)
2198                         if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2199                                 ret = 0;
2200                                 break;
2201                         }
2202
2203                 /* if all ->init() calls failed, unregister */
2204                 if (ret) {
2205                         pr_debug("no CPU initialized for driver %s\n",
2206                                                         driver_data->name);
2207                         goto err_if_unreg;
2208                 }
2209         }
2210
2211         register_hotcpu_notifier(&cpufreq_cpu_notifier);
2212         pr_debug("driver %s up and running\n", driver_data->name);
2213
2214         return 0;
2215 err_if_unreg:
2216         subsys_interface_unregister(&cpufreq_interface);
2217 err_null_driver:
2218         write_lock_irqsave(&cpufreq_driver_lock, flags);
2219         cpufreq_driver = NULL;
2220         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2221         return ret;
2222 }
2223 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2224
2225 /**
2226  * cpufreq_unregister_driver - unregister the current CPUFreq driver
2227  *
2228  * Unregister the current CPUFreq driver. Only call this if you have
2229  * the right to do so, i.e. if you have succeeded in initialising before!
2230  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2231  * currently not initialised.
2232  */
2233 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2234 {
2235         unsigned long flags;
2236
2237         if (!cpufreq_driver || (driver != cpufreq_driver))
2238                 return -EINVAL;
2239
2240         pr_debug("unregistering driver %s\n", driver->name);
2241
2242         subsys_interface_unregister(&cpufreq_interface);
2243         unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2244
2245         down_write(&cpufreq_rwsem);
2246         write_lock_irqsave(&cpufreq_driver_lock, flags);
2247
2248         cpufreq_driver = NULL;
2249
2250         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2251         up_write(&cpufreq_rwsem);
2252
2253         return 0;
2254 }
2255 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2256
2257 static int __init cpufreq_core_init(void)
2258 {
2259         if (cpufreq_disabled())
2260                 return -ENODEV;
2261
2262         cpufreq_global_kobject = kobject_create();
2263         BUG_ON(!cpufreq_global_kobject);
2264         register_syscore_ops(&cpufreq_syscore_ops);
2265
2266         return 0;
2267 }
2268 core_initcall(cpufreq_core_init);