Merge tag 'renesas-soc-fixes3-for-v3.19' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-drm-fsl-dcu.git] / drivers / cpufreq / cpufreq.c
1 /*
2  *  linux/drivers/cpufreq/cpufreq.c
3  *
4  *  Copyright (C) 2001 Russell King
5  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6  *            (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
7  *
8  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9  *      Added handling for CPU hotplug
10  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11  *      Fix handling for CPU hotplug -- affected CPUs
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License version 2 as
15  * published by the Free Software Foundation.
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
32
33 /**
34  * The "cpufreq driver" - the arch- or hardware-dependent low
35  * level driver of CPUFreq support, and its spinlock. This lock
36  * also protects the cpufreq_cpu_data array.
37  */
38 static struct cpufreq_driver *cpufreq_driver;
39 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
40 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
41 static DEFINE_RWLOCK(cpufreq_driver_lock);
42 DEFINE_MUTEX(cpufreq_governor_lock);
43 static LIST_HEAD(cpufreq_policy_list);
44
45 /* This one keeps track of the previously set governor of a removed CPU */
46 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
47
48 /* Flag to suspend/resume CPUFreq governors */
49 static bool cpufreq_suspended;
50
51 static inline bool has_target(void)
52 {
53         return cpufreq_driver->target_index || cpufreq_driver->target;
54 }
55
56 /*
57  * rwsem to guarantee that cpufreq driver module doesn't unload during critical
58  * sections
59  */
60 static DECLARE_RWSEM(cpufreq_rwsem);
61
62 /* internal prototypes */
63 static int __cpufreq_governor(struct cpufreq_policy *policy,
64                 unsigned int event);
65 static unsigned int __cpufreq_get(unsigned int cpu);
66 static void handle_update(struct work_struct *work);
67
68 /**
69  * Two notifier lists: the "policy" list is involved in the
70  * validation process for a new CPU frequency policy; the
71  * "transition" list for kernel code that needs to handle
72  * changes to devices when the CPU clock speed changes.
73  * The mutex locks both lists.
74  */
75 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
76 static struct srcu_notifier_head cpufreq_transition_notifier_list;
77
78 static bool init_cpufreq_transition_notifier_list_called;
79 static int __init init_cpufreq_transition_notifier_list(void)
80 {
81         srcu_init_notifier_head(&cpufreq_transition_notifier_list);
82         init_cpufreq_transition_notifier_list_called = true;
83         return 0;
84 }
85 pure_initcall(init_cpufreq_transition_notifier_list);
86
87 static int off __read_mostly;
88 static int cpufreq_disabled(void)
89 {
90         return off;
91 }
92 void disable_cpufreq(void)
93 {
94         off = 1;
95 }
96 static LIST_HEAD(cpufreq_governor_list);
97 static DEFINE_MUTEX(cpufreq_governor_mutex);
98
99 bool have_governor_per_policy(void)
100 {
101         return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
102 }
103 EXPORT_SYMBOL_GPL(have_governor_per_policy);
104
105 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
106 {
107         if (have_governor_per_policy())
108                 return &policy->kobj;
109         else
110                 return cpufreq_global_kobject;
111 }
112 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
113
114 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
115 {
116         u64 idle_time;
117         u64 cur_wall_time;
118         u64 busy_time;
119
120         cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
121
122         busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
123         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
124         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
125         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
126         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
127         busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
128
129         idle_time = cur_wall_time - busy_time;
130         if (wall)
131                 *wall = cputime_to_usecs(cur_wall_time);
132
133         return cputime_to_usecs(idle_time);
134 }
135
136 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
137 {
138         u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
139
140         if (idle_time == -1ULL)
141                 return get_cpu_idle_time_jiffy(cpu, wall);
142         else if (!io_busy)
143                 idle_time += get_cpu_iowait_time_us(cpu, wall);
144
145         return idle_time;
146 }
147 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
148
149 /*
150  * This is a generic cpufreq init() routine which can be used by cpufreq
151  * drivers of SMP systems. It will do following:
152  * - validate & show freq table passed
153  * - set policies transition latency
154  * - policy->cpus with all possible CPUs
155  */
156 int cpufreq_generic_init(struct cpufreq_policy *policy,
157                 struct cpufreq_frequency_table *table,
158                 unsigned int transition_latency)
159 {
160         int ret;
161
162         ret = cpufreq_table_validate_and_show(policy, table);
163         if (ret) {
164                 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
165                 return ret;
166         }
167
168         policy->cpuinfo.transition_latency = transition_latency;
169
170         /*
171          * The driver only supports the SMP configuartion where all processors
172          * share the clock and voltage and clock.
173          */
174         cpumask_setall(policy->cpus);
175
176         return 0;
177 }
178 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
179
180 unsigned int cpufreq_generic_get(unsigned int cpu)
181 {
182         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
183
184         if (!policy || IS_ERR(policy->clk)) {
185                 pr_err("%s: No %s associated to cpu: %d\n",
186                        __func__, policy ? "clk" : "policy", cpu);
187                 return 0;
188         }
189
190         return clk_get_rate(policy->clk) / 1000;
191 }
192 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
193
194 /* Only for cpufreq core internal use */
195 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
196 {
197         return per_cpu(cpufreq_cpu_data, cpu);
198 }
199
200 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
201 {
202         struct cpufreq_policy *policy = NULL;
203         unsigned long flags;
204
205         if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
206                 return NULL;
207
208         if (!down_read_trylock(&cpufreq_rwsem))
209                 return NULL;
210
211         /* get the cpufreq driver */
212         read_lock_irqsave(&cpufreq_driver_lock, flags);
213
214         if (cpufreq_driver) {
215                 /* get the CPU */
216                 policy = per_cpu(cpufreq_cpu_data, cpu);
217                 if (policy)
218                         kobject_get(&policy->kobj);
219         }
220
221         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
222
223         if (!policy)
224                 up_read(&cpufreq_rwsem);
225
226         return policy;
227 }
228 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
229
230 void cpufreq_cpu_put(struct cpufreq_policy *policy)
231 {
232         if (cpufreq_disabled())
233                 return;
234
235         kobject_put(&policy->kobj);
236         up_read(&cpufreq_rwsem);
237 }
238 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
239
240 /*********************************************************************
241  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
242  *********************************************************************/
243
244 /**
245  * adjust_jiffies - adjust the system "loops_per_jiffy"
246  *
247  * This function alters the system "loops_per_jiffy" for the clock
248  * speed change. Note that loops_per_jiffy cannot be updated on SMP
249  * systems as each CPU might be scaled differently. So, use the arch
250  * per-CPU loops_per_jiffy value wherever possible.
251  */
252 #ifndef CONFIG_SMP
253 static unsigned long l_p_j_ref;
254 static unsigned int l_p_j_ref_freq;
255
256 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
257 {
258         if (ci->flags & CPUFREQ_CONST_LOOPS)
259                 return;
260
261         if (!l_p_j_ref_freq) {
262                 l_p_j_ref = loops_per_jiffy;
263                 l_p_j_ref_freq = ci->old;
264                 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
265                          l_p_j_ref, l_p_j_ref_freq);
266         }
267         if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
268                 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
269                                                                 ci->new);
270                 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
271                          loops_per_jiffy, ci->new);
272         }
273 }
274 #else
275 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
276 {
277         return;
278 }
279 #endif
280
281 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
282                 struct cpufreq_freqs *freqs, unsigned int state)
283 {
284         BUG_ON(irqs_disabled());
285
286         if (cpufreq_disabled())
287                 return;
288
289         freqs->flags = cpufreq_driver->flags;
290         pr_debug("notification %u of frequency transition to %u kHz\n",
291                  state, freqs->new);
292
293         switch (state) {
294
295         case CPUFREQ_PRECHANGE:
296                 /* detect if the driver reported a value as "old frequency"
297                  * which is not equal to what the cpufreq core thinks is
298                  * "old frequency".
299                  */
300                 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
301                         if ((policy) && (policy->cpu == freqs->cpu) &&
302                             (policy->cur) && (policy->cur != freqs->old)) {
303                                 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
304                                          freqs->old, policy->cur);
305                                 freqs->old = policy->cur;
306                         }
307                 }
308                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
309                                 CPUFREQ_PRECHANGE, freqs);
310                 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
311                 break;
312
313         case CPUFREQ_POSTCHANGE:
314                 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
315                 pr_debug("FREQ: %lu - CPU: %lu\n",
316                          (unsigned long)freqs->new, (unsigned long)freqs->cpu);
317                 trace_cpu_frequency(freqs->new, freqs->cpu);
318                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
319                                 CPUFREQ_POSTCHANGE, freqs);
320                 if (likely(policy) && likely(policy->cpu == freqs->cpu))
321                         policy->cur = freqs->new;
322                 break;
323         }
324 }
325
326 /**
327  * cpufreq_notify_transition - call notifier chain and adjust_jiffies
328  * on frequency transition.
329  *
330  * This function calls the transition notifiers and the "adjust_jiffies"
331  * function. It is called twice on all CPU frequency changes that have
332  * external effects.
333  */
334 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
335                 struct cpufreq_freqs *freqs, unsigned int state)
336 {
337         for_each_cpu(freqs->cpu, policy->cpus)
338                 __cpufreq_notify_transition(policy, freqs, state);
339 }
340
341 /* Do post notifications when there are chances that transition has failed */
342 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
343                 struct cpufreq_freqs *freqs, int transition_failed)
344 {
345         cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
346         if (!transition_failed)
347                 return;
348
349         swap(freqs->old, freqs->new);
350         cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
351         cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
352 }
353
354 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
355                 struct cpufreq_freqs *freqs)
356 {
357
358         /*
359          * Catch double invocations of _begin() which lead to self-deadlock.
360          * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
361          * doesn't invoke _begin() on their behalf, and hence the chances of
362          * double invocations are very low. Moreover, there are scenarios
363          * where these checks can emit false-positive warnings in these
364          * drivers; so we avoid that by skipping them altogether.
365          */
366         WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
367                                 && current == policy->transition_task);
368
369 wait:
370         wait_event(policy->transition_wait, !policy->transition_ongoing);
371
372         spin_lock(&policy->transition_lock);
373
374         if (unlikely(policy->transition_ongoing)) {
375                 spin_unlock(&policy->transition_lock);
376                 goto wait;
377         }
378
379         policy->transition_ongoing = true;
380         policy->transition_task = current;
381
382         spin_unlock(&policy->transition_lock);
383
384         cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
385 }
386 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
387
388 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
389                 struct cpufreq_freqs *freqs, int transition_failed)
390 {
391         if (unlikely(WARN_ON(!policy->transition_ongoing)))
392                 return;
393
394         cpufreq_notify_post_transition(policy, freqs, transition_failed);
395
396         policy->transition_ongoing = false;
397         policy->transition_task = NULL;
398
399         wake_up(&policy->transition_wait);
400 }
401 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
402
403
404 /*********************************************************************
405  *                          SYSFS INTERFACE                          *
406  *********************************************************************/
407 static ssize_t show_boost(struct kobject *kobj,
408                                  struct attribute *attr, char *buf)
409 {
410         return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
411 }
412
413 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
414                                   const char *buf, size_t count)
415 {
416         int ret, enable;
417
418         ret = sscanf(buf, "%d", &enable);
419         if (ret != 1 || enable < 0 || enable > 1)
420                 return -EINVAL;
421
422         if (cpufreq_boost_trigger_state(enable)) {
423                 pr_err("%s: Cannot %s BOOST!\n",
424                        __func__, enable ? "enable" : "disable");
425                 return -EINVAL;
426         }
427
428         pr_debug("%s: cpufreq BOOST %s\n",
429                  __func__, enable ? "enabled" : "disabled");
430
431         return count;
432 }
433 define_one_global_rw(boost);
434
435 static struct cpufreq_governor *__find_governor(const char *str_governor)
436 {
437         struct cpufreq_governor *t;
438
439         list_for_each_entry(t, &cpufreq_governor_list, governor_list)
440                 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
441                         return t;
442
443         return NULL;
444 }
445
446 /**
447  * cpufreq_parse_governor - parse a governor string
448  */
449 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
450                                 struct cpufreq_governor **governor)
451 {
452         int err = -EINVAL;
453
454         if (!cpufreq_driver)
455                 goto out;
456
457         if (cpufreq_driver->setpolicy) {
458                 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
459                         *policy = CPUFREQ_POLICY_PERFORMANCE;
460                         err = 0;
461                 } else if (!strncasecmp(str_governor, "powersave",
462                                                 CPUFREQ_NAME_LEN)) {
463                         *policy = CPUFREQ_POLICY_POWERSAVE;
464                         err = 0;
465                 }
466         } else if (has_target()) {
467                 struct cpufreq_governor *t;
468
469                 mutex_lock(&cpufreq_governor_mutex);
470
471                 t = __find_governor(str_governor);
472
473                 if (t == NULL) {
474                         int ret;
475
476                         mutex_unlock(&cpufreq_governor_mutex);
477                         ret = request_module("cpufreq_%s", str_governor);
478                         mutex_lock(&cpufreq_governor_mutex);
479
480                         if (ret == 0)
481                                 t = __find_governor(str_governor);
482                 }
483
484                 if (t != NULL) {
485                         *governor = t;
486                         err = 0;
487                 }
488
489                 mutex_unlock(&cpufreq_governor_mutex);
490         }
491 out:
492         return err;
493 }
494
495 /**
496  * cpufreq_per_cpu_attr_read() / show_##file_name() -
497  * print out cpufreq information
498  *
499  * Write out information from cpufreq_driver->policy[cpu]; object must be
500  * "unsigned int".
501  */
502
503 #define show_one(file_name, object)                     \
504 static ssize_t show_##file_name                         \
505 (struct cpufreq_policy *policy, char *buf)              \
506 {                                                       \
507         return sprintf(buf, "%u\n", policy->object);    \
508 }
509
510 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
511 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
512 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
513 show_one(scaling_min_freq, min);
514 show_one(scaling_max_freq, max);
515
516 static ssize_t show_scaling_cur_freq(
517         struct cpufreq_policy *policy, char *buf)
518 {
519         ssize_t ret;
520
521         if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
522                 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
523         else
524                 ret = sprintf(buf, "%u\n", policy->cur);
525         return ret;
526 }
527
528 static int cpufreq_set_policy(struct cpufreq_policy *policy,
529                                 struct cpufreq_policy *new_policy);
530
531 /**
532  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
533  */
534 #define store_one(file_name, object)                    \
535 static ssize_t store_##file_name                                        \
536 (struct cpufreq_policy *policy, const char *buf, size_t count)          \
537 {                                                                       \
538         int ret, temp;                                                  \
539         struct cpufreq_policy new_policy;                               \
540                                                                         \
541         ret = cpufreq_get_policy(&new_policy, policy->cpu);             \
542         if (ret)                                                        \
543                 return -EINVAL;                                         \
544                                                                         \
545         ret = sscanf(buf, "%u", &new_policy.object);                    \
546         if (ret != 1)                                                   \
547                 return -EINVAL;                                         \
548                                                                         \
549         temp = new_policy.object;                                       \
550         ret = cpufreq_set_policy(policy, &new_policy);          \
551         if (!ret)                                                       \
552                 policy->user_policy.object = temp;                      \
553                                                                         \
554         return ret ? ret : count;                                       \
555 }
556
557 store_one(scaling_min_freq, min);
558 store_one(scaling_max_freq, max);
559
560 /**
561  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
562  */
563 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
564                                         char *buf)
565 {
566         unsigned int cur_freq = __cpufreq_get(policy->cpu);
567         if (!cur_freq)
568                 return sprintf(buf, "<unknown>");
569         return sprintf(buf, "%u\n", cur_freq);
570 }
571
572 /**
573  * show_scaling_governor - show the current policy for the specified CPU
574  */
575 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
576 {
577         if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
578                 return sprintf(buf, "powersave\n");
579         else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
580                 return sprintf(buf, "performance\n");
581         else if (policy->governor)
582                 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
583                                 policy->governor->name);
584         return -EINVAL;
585 }
586
587 /**
588  * store_scaling_governor - store policy for the specified CPU
589  */
590 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
591                                         const char *buf, size_t count)
592 {
593         int ret;
594         char    str_governor[16];
595         struct cpufreq_policy new_policy;
596
597         ret = cpufreq_get_policy(&new_policy, policy->cpu);
598         if (ret)
599                 return ret;
600
601         ret = sscanf(buf, "%15s", str_governor);
602         if (ret != 1)
603                 return -EINVAL;
604
605         if (cpufreq_parse_governor(str_governor, &new_policy.policy,
606                                                 &new_policy.governor))
607                 return -EINVAL;
608
609         ret = cpufreq_set_policy(policy, &new_policy);
610
611         policy->user_policy.policy = policy->policy;
612         policy->user_policy.governor = policy->governor;
613
614         if (ret)
615                 return ret;
616         else
617                 return count;
618 }
619
620 /**
621  * show_scaling_driver - show the cpufreq driver currently loaded
622  */
623 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
624 {
625         return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
626 }
627
628 /**
629  * show_scaling_available_governors - show the available CPUfreq governors
630  */
631 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
632                                                 char *buf)
633 {
634         ssize_t i = 0;
635         struct cpufreq_governor *t;
636
637         if (!has_target()) {
638                 i += sprintf(buf, "performance powersave");
639                 goto out;
640         }
641
642         list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
643                 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
644                     - (CPUFREQ_NAME_LEN + 2)))
645                         goto out;
646                 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
647         }
648 out:
649         i += sprintf(&buf[i], "\n");
650         return i;
651 }
652
653 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
654 {
655         ssize_t i = 0;
656         unsigned int cpu;
657
658         for_each_cpu(cpu, mask) {
659                 if (i)
660                         i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
661                 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
662                 if (i >= (PAGE_SIZE - 5))
663                         break;
664         }
665         i += sprintf(&buf[i], "\n");
666         return i;
667 }
668 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
669
670 /**
671  * show_related_cpus - show the CPUs affected by each transition even if
672  * hw coordination is in use
673  */
674 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
675 {
676         return cpufreq_show_cpus(policy->related_cpus, buf);
677 }
678
679 /**
680  * show_affected_cpus - show the CPUs affected by each transition
681  */
682 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
683 {
684         return cpufreq_show_cpus(policy->cpus, buf);
685 }
686
687 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
688                                         const char *buf, size_t count)
689 {
690         unsigned int freq = 0;
691         unsigned int ret;
692
693         if (!policy->governor || !policy->governor->store_setspeed)
694                 return -EINVAL;
695
696         ret = sscanf(buf, "%u", &freq);
697         if (ret != 1)
698                 return -EINVAL;
699
700         policy->governor->store_setspeed(policy, freq);
701
702         return count;
703 }
704
705 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
706 {
707         if (!policy->governor || !policy->governor->show_setspeed)
708                 return sprintf(buf, "<unsupported>\n");
709
710         return policy->governor->show_setspeed(policy, buf);
711 }
712
713 /**
714  * show_bios_limit - show the current cpufreq HW/BIOS limitation
715  */
716 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
717 {
718         unsigned int limit;
719         int ret;
720         if (cpufreq_driver->bios_limit) {
721                 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
722                 if (!ret)
723                         return sprintf(buf, "%u\n", limit);
724         }
725         return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
726 }
727
728 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
729 cpufreq_freq_attr_ro(cpuinfo_min_freq);
730 cpufreq_freq_attr_ro(cpuinfo_max_freq);
731 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
732 cpufreq_freq_attr_ro(scaling_available_governors);
733 cpufreq_freq_attr_ro(scaling_driver);
734 cpufreq_freq_attr_ro(scaling_cur_freq);
735 cpufreq_freq_attr_ro(bios_limit);
736 cpufreq_freq_attr_ro(related_cpus);
737 cpufreq_freq_attr_ro(affected_cpus);
738 cpufreq_freq_attr_rw(scaling_min_freq);
739 cpufreq_freq_attr_rw(scaling_max_freq);
740 cpufreq_freq_attr_rw(scaling_governor);
741 cpufreq_freq_attr_rw(scaling_setspeed);
742
743 static struct attribute *default_attrs[] = {
744         &cpuinfo_min_freq.attr,
745         &cpuinfo_max_freq.attr,
746         &cpuinfo_transition_latency.attr,
747         &scaling_min_freq.attr,
748         &scaling_max_freq.attr,
749         &affected_cpus.attr,
750         &related_cpus.attr,
751         &scaling_governor.attr,
752         &scaling_driver.attr,
753         &scaling_available_governors.attr,
754         &scaling_setspeed.attr,
755         NULL
756 };
757
758 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
759 #define to_attr(a) container_of(a, struct freq_attr, attr)
760
761 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
762 {
763         struct cpufreq_policy *policy = to_policy(kobj);
764         struct freq_attr *fattr = to_attr(attr);
765         ssize_t ret;
766
767         if (!down_read_trylock(&cpufreq_rwsem))
768                 return -EINVAL;
769
770         down_read(&policy->rwsem);
771
772         if (fattr->show)
773                 ret = fattr->show(policy, buf);
774         else
775                 ret = -EIO;
776
777         up_read(&policy->rwsem);
778         up_read(&cpufreq_rwsem);
779
780         return ret;
781 }
782
783 static ssize_t store(struct kobject *kobj, struct attribute *attr,
784                      const char *buf, size_t count)
785 {
786         struct cpufreq_policy *policy = to_policy(kobj);
787         struct freq_attr *fattr = to_attr(attr);
788         ssize_t ret = -EINVAL;
789
790         get_online_cpus();
791
792         if (!cpu_online(policy->cpu))
793                 goto unlock;
794
795         if (!down_read_trylock(&cpufreq_rwsem))
796                 goto unlock;
797
798         down_write(&policy->rwsem);
799
800         if (fattr->store)
801                 ret = fattr->store(policy, buf, count);
802         else
803                 ret = -EIO;
804
805         up_write(&policy->rwsem);
806
807         up_read(&cpufreq_rwsem);
808 unlock:
809         put_online_cpus();
810
811         return ret;
812 }
813
814 static void cpufreq_sysfs_release(struct kobject *kobj)
815 {
816         struct cpufreq_policy *policy = to_policy(kobj);
817         pr_debug("last reference is dropped\n");
818         complete(&policy->kobj_unregister);
819 }
820
821 static const struct sysfs_ops sysfs_ops = {
822         .show   = show,
823         .store  = store,
824 };
825
826 static struct kobj_type ktype_cpufreq = {
827         .sysfs_ops      = &sysfs_ops,
828         .default_attrs  = default_attrs,
829         .release        = cpufreq_sysfs_release,
830 };
831
832 struct kobject *cpufreq_global_kobject;
833 EXPORT_SYMBOL(cpufreq_global_kobject);
834
835 static int cpufreq_global_kobject_usage;
836
837 int cpufreq_get_global_kobject(void)
838 {
839         if (!cpufreq_global_kobject_usage++)
840                 return kobject_add(cpufreq_global_kobject,
841                                 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
842
843         return 0;
844 }
845 EXPORT_SYMBOL(cpufreq_get_global_kobject);
846
847 void cpufreq_put_global_kobject(void)
848 {
849         if (!--cpufreq_global_kobject_usage)
850                 kobject_del(cpufreq_global_kobject);
851 }
852 EXPORT_SYMBOL(cpufreq_put_global_kobject);
853
854 int cpufreq_sysfs_create_file(const struct attribute *attr)
855 {
856         int ret = cpufreq_get_global_kobject();
857
858         if (!ret) {
859                 ret = sysfs_create_file(cpufreq_global_kobject, attr);
860                 if (ret)
861                         cpufreq_put_global_kobject();
862         }
863
864         return ret;
865 }
866 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
867
868 void cpufreq_sysfs_remove_file(const struct attribute *attr)
869 {
870         sysfs_remove_file(cpufreq_global_kobject, attr);
871         cpufreq_put_global_kobject();
872 }
873 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
874
875 /* symlink affected CPUs */
876 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
877 {
878         unsigned int j;
879         int ret = 0;
880
881         for_each_cpu(j, policy->cpus) {
882                 struct device *cpu_dev;
883
884                 if (j == policy->cpu)
885                         continue;
886
887                 pr_debug("Adding link for CPU: %u\n", j);
888                 cpu_dev = get_cpu_device(j);
889                 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
890                                         "cpufreq");
891                 if (ret)
892                         break;
893         }
894         return ret;
895 }
896
897 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
898                                      struct device *dev)
899 {
900         struct freq_attr **drv_attr;
901         int ret = 0;
902
903         /* set up files for this cpu device */
904         drv_attr = cpufreq_driver->attr;
905         while ((drv_attr) && (*drv_attr)) {
906                 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
907                 if (ret)
908                         return ret;
909                 drv_attr++;
910         }
911         if (cpufreq_driver->get) {
912                 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
913                 if (ret)
914                         return ret;
915         }
916
917         ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
918         if (ret)
919                 return ret;
920
921         if (cpufreq_driver->bios_limit) {
922                 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
923                 if (ret)
924                         return ret;
925         }
926
927         return cpufreq_add_dev_symlink(policy);
928 }
929
930 static void cpufreq_init_policy(struct cpufreq_policy *policy)
931 {
932         struct cpufreq_governor *gov = NULL;
933         struct cpufreq_policy new_policy;
934         int ret = 0;
935
936         memcpy(&new_policy, policy, sizeof(*policy));
937
938         /* Update governor of new_policy to the governor used before hotplug */
939         gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
940         if (gov)
941                 pr_debug("Restoring governor %s for cpu %d\n",
942                                 policy->governor->name, policy->cpu);
943         else
944                 gov = CPUFREQ_DEFAULT_GOVERNOR;
945
946         new_policy.governor = gov;
947
948         /* Use the default policy if its valid. */
949         if (cpufreq_driver->setpolicy)
950                 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
951
952         /* set default policy */
953         ret = cpufreq_set_policy(policy, &new_policy);
954         if (ret) {
955                 pr_debug("setting policy failed\n");
956                 if (cpufreq_driver->exit)
957                         cpufreq_driver->exit(policy);
958         }
959 }
960
961 #ifdef CONFIG_HOTPLUG_CPU
962 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
963                                   unsigned int cpu, struct device *dev)
964 {
965         int ret = 0;
966         unsigned long flags;
967
968         if (has_target()) {
969                 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
970                 if (ret) {
971                         pr_err("%s: Failed to stop governor\n", __func__);
972                         return ret;
973                 }
974         }
975
976         down_write(&policy->rwsem);
977
978         write_lock_irqsave(&cpufreq_driver_lock, flags);
979
980         cpumask_set_cpu(cpu, policy->cpus);
981         per_cpu(cpufreq_cpu_data, cpu) = policy;
982         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
983
984         up_write(&policy->rwsem);
985
986         if (has_target()) {
987                 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
988                 if (!ret)
989                         ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
990
991                 if (ret) {
992                         pr_err("%s: Failed to start governor\n", __func__);
993                         return ret;
994                 }
995         }
996
997         return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
998 }
999 #endif
1000
1001 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1002 {
1003         struct cpufreq_policy *policy;
1004         unsigned long flags;
1005
1006         read_lock_irqsave(&cpufreq_driver_lock, flags);
1007
1008         policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
1009
1010         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1011
1012         if (policy)
1013                 policy->governor = NULL;
1014
1015         return policy;
1016 }
1017
1018 static struct cpufreq_policy *cpufreq_policy_alloc(void)
1019 {
1020         struct cpufreq_policy *policy;
1021
1022         policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1023         if (!policy)
1024                 return NULL;
1025
1026         if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1027                 goto err_free_policy;
1028
1029         if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1030                 goto err_free_cpumask;
1031
1032         INIT_LIST_HEAD(&policy->policy_list);
1033         init_rwsem(&policy->rwsem);
1034         spin_lock_init(&policy->transition_lock);
1035         init_waitqueue_head(&policy->transition_wait);
1036
1037         return policy;
1038
1039 err_free_cpumask:
1040         free_cpumask_var(policy->cpus);
1041 err_free_policy:
1042         kfree(policy);
1043
1044         return NULL;
1045 }
1046
1047 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1048 {
1049         struct kobject *kobj;
1050         struct completion *cmp;
1051
1052         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1053                         CPUFREQ_REMOVE_POLICY, policy);
1054
1055         down_read(&policy->rwsem);
1056         kobj = &policy->kobj;
1057         cmp = &policy->kobj_unregister;
1058         up_read(&policy->rwsem);
1059         kobject_put(kobj);
1060
1061         /*
1062          * We need to make sure that the underlying kobj is
1063          * actually not referenced anymore by anybody before we
1064          * proceed with unloading.
1065          */
1066         pr_debug("waiting for dropping of refcount\n");
1067         wait_for_completion(cmp);
1068         pr_debug("wait complete\n");
1069 }
1070
1071 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1072 {
1073         free_cpumask_var(policy->related_cpus);
1074         free_cpumask_var(policy->cpus);
1075         kfree(policy);
1076 }
1077
1078 static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
1079                              struct device *cpu_dev)
1080 {
1081         int ret;
1082
1083         if (WARN_ON(cpu == policy->cpu))
1084                 return 0;
1085
1086         /* Move kobject to the new policy->cpu */
1087         ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1088         if (ret) {
1089                 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1090                 return ret;
1091         }
1092
1093         down_write(&policy->rwsem);
1094
1095         policy->last_cpu = policy->cpu;
1096         policy->cpu = cpu;
1097
1098         up_write(&policy->rwsem);
1099
1100         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1101                         CPUFREQ_UPDATE_POLICY_CPU, policy);
1102
1103         return 0;
1104 }
1105
1106 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1107 {
1108         unsigned int j, cpu = dev->id;
1109         int ret = -ENOMEM;
1110         struct cpufreq_policy *policy;
1111         unsigned long flags;
1112         bool recover_policy = cpufreq_suspended;
1113 #ifdef CONFIG_HOTPLUG_CPU
1114         struct cpufreq_policy *tpolicy;
1115 #endif
1116
1117         if (cpu_is_offline(cpu))
1118                 return 0;
1119
1120         pr_debug("adding CPU %u\n", cpu);
1121
1122 #ifdef CONFIG_SMP
1123         /* check whether a different CPU already registered this
1124          * CPU because it is in the same boat. */
1125         policy = cpufreq_cpu_get(cpu);
1126         if (unlikely(policy)) {
1127                 cpufreq_cpu_put(policy);
1128                 return 0;
1129         }
1130 #endif
1131
1132         if (!down_read_trylock(&cpufreq_rwsem))
1133                 return 0;
1134
1135 #ifdef CONFIG_HOTPLUG_CPU
1136         /* Check if this cpu was hot-unplugged earlier and has siblings */
1137         read_lock_irqsave(&cpufreq_driver_lock, flags);
1138         list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
1139                 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
1140                         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1141                         ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
1142                         up_read(&cpufreq_rwsem);
1143                         return ret;
1144                 }
1145         }
1146         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1147 #endif
1148
1149         /*
1150          * Restore the saved policy when doing light-weight init and fall back
1151          * to the full init if that fails.
1152          */
1153         policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
1154         if (!policy) {
1155                 recover_policy = false;
1156                 policy = cpufreq_policy_alloc();
1157                 if (!policy)
1158                         goto nomem_out;
1159         }
1160
1161         /*
1162          * In the resume path, since we restore a saved policy, the assignment
1163          * to policy->cpu is like an update of the existing policy, rather than
1164          * the creation of a brand new one. So we need to perform this update
1165          * by invoking update_policy_cpu().
1166          */
1167         if (recover_policy && cpu != policy->cpu)
1168                 WARN_ON(update_policy_cpu(policy, cpu, dev));
1169         else
1170                 policy->cpu = cpu;
1171
1172         cpumask_copy(policy->cpus, cpumask_of(cpu));
1173
1174         init_completion(&policy->kobj_unregister);
1175         INIT_WORK(&policy->update, handle_update);
1176
1177         /* call driver. From then on the cpufreq must be able
1178          * to accept all calls to ->verify and ->setpolicy for this CPU
1179          */
1180         ret = cpufreq_driver->init(policy);
1181         if (ret) {
1182                 pr_debug("initialization failed\n");
1183                 goto err_set_policy_cpu;
1184         }
1185
1186         down_write(&policy->rwsem);
1187
1188         /* related cpus should atleast have policy->cpus */
1189         cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1190
1191         /*
1192          * affected cpus must always be the one, which are online. We aren't
1193          * managing offline cpus here.
1194          */
1195         cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1196
1197         if (!recover_policy) {
1198                 policy->user_policy.min = policy->min;
1199                 policy->user_policy.max = policy->max;
1200
1201                 /* prepare interface data */
1202                 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1203                                            &dev->kobj, "cpufreq");
1204                 if (ret) {
1205                         pr_err("%s: failed to init policy->kobj: %d\n",
1206                                __func__, ret);
1207                         goto err_init_policy_kobj;
1208                 }
1209         }
1210
1211         write_lock_irqsave(&cpufreq_driver_lock, flags);
1212         for_each_cpu(j, policy->cpus)
1213                 per_cpu(cpufreq_cpu_data, j) = policy;
1214         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1215
1216         if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1217                 policy->cur = cpufreq_driver->get(policy->cpu);
1218                 if (!policy->cur) {
1219                         pr_err("%s: ->get() failed\n", __func__);
1220                         goto err_get_freq;
1221                 }
1222         }
1223
1224         /*
1225          * Sometimes boot loaders set CPU frequency to a value outside of
1226          * frequency table present with cpufreq core. In such cases CPU might be
1227          * unstable if it has to run on that frequency for long duration of time
1228          * and so its better to set it to a frequency which is specified in
1229          * freq-table. This also makes cpufreq stats inconsistent as
1230          * cpufreq-stats would fail to register because current frequency of CPU
1231          * isn't found in freq-table.
1232          *
1233          * Because we don't want this change to effect boot process badly, we go
1234          * for the next freq which is >= policy->cur ('cur' must be set by now,
1235          * otherwise we will end up setting freq to lowest of the table as 'cur'
1236          * is initialized to zero).
1237          *
1238          * We are passing target-freq as "policy->cur - 1" otherwise
1239          * __cpufreq_driver_target() would simply fail, as policy->cur will be
1240          * equal to target-freq.
1241          */
1242         if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1243             && has_target()) {
1244                 /* Are we running at unknown frequency ? */
1245                 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1246                 if (ret == -EINVAL) {
1247                         /* Warn user and fix it */
1248                         pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1249                                 __func__, policy->cpu, policy->cur);
1250                         ret = __cpufreq_driver_target(policy, policy->cur - 1,
1251                                 CPUFREQ_RELATION_L);
1252
1253                         /*
1254                          * Reaching here after boot in a few seconds may not
1255                          * mean that system will remain stable at "unknown"
1256                          * frequency for longer duration. Hence, a BUG_ON().
1257                          */
1258                         BUG_ON(ret);
1259                         pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1260                                 __func__, policy->cpu, policy->cur);
1261                 }
1262         }
1263
1264         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1265                                      CPUFREQ_START, policy);
1266
1267         if (!recover_policy) {
1268                 ret = cpufreq_add_dev_interface(policy, dev);
1269                 if (ret)
1270                         goto err_out_unregister;
1271                 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1272                                 CPUFREQ_CREATE_POLICY, policy);
1273         }
1274
1275         write_lock_irqsave(&cpufreq_driver_lock, flags);
1276         list_add(&policy->policy_list, &cpufreq_policy_list);
1277         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1278
1279         cpufreq_init_policy(policy);
1280
1281         if (!recover_policy) {
1282                 policy->user_policy.policy = policy->policy;
1283                 policy->user_policy.governor = policy->governor;
1284         }
1285         up_write(&policy->rwsem);
1286
1287         kobject_uevent(&policy->kobj, KOBJ_ADD);
1288
1289         up_read(&cpufreq_rwsem);
1290
1291         /* Callback for handling stuff after policy is ready */
1292         if (cpufreq_driver->ready)
1293                 cpufreq_driver->ready(policy);
1294
1295         pr_debug("initialization complete\n");
1296
1297         return 0;
1298
1299 err_out_unregister:
1300 err_get_freq:
1301         write_lock_irqsave(&cpufreq_driver_lock, flags);
1302         for_each_cpu(j, policy->cpus)
1303                 per_cpu(cpufreq_cpu_data, j) = NULL;
1304         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1305
1306         if (!recover_policy) {
1307                 kobject_put(&policy->kobj);
1308                 wait_for_completion(&policy->kobj_unregister);
1309         }
1310 err_init_policy_kobj:
1311         up_write(&policy->rwsem);
1312
1313         if (cpufreq_driver->exit)
1314                 cpufreq_driver->exit(policy);
1315 err_set_policy_cpu:
1316         if (recover_policy) {
1317                 /* Do not leave stale fallback data behind. */
1318                 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
1319                 cpufreq_policy_put_kobj(policy);
1320         }
1321         cpufreq_policy_free(policy);
1322
1323 nomem_out:
1324         up_read(&cpufreq_rwsem);
1325
1326         return ret;
1327 }
1328
1329 /**
1330  * cpufreq_add_dev - add a CPU device
1331  *
1332  * Adds the cpufreq interface for a CPU device.
1333  *
1334  * The Oracle says: try running cpufreq registration/unregistration concurrently
1335  * with with cpu hotplugging and all hell will break loose. Tried to clean this
1336  * mess up, but more thorough testing is needed. - Mathieu
1337  */
1338 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1339 {
1340         return __cpufreq_add_dev(dev, sif);
1341 }
1342
1343 static int __cpufreq_remove_dev_prepare(struct device *dev,
1344                                         struct subsys_interface *sif)
1345 {
1346         unsigned int cpu = dev->id, cpus;
1347         int ret;
1348         unsigned long flags;
1349         struct cpufreq_policy *policy;
1350
1351         pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1352
1353         write_lock_irqsave(&cpufreq_driver_lock, flags);
1354
1355         policy = per_cpu(cpufreq_cpu_data, cpu);
1356
1357         /* Save the policy somewhere when doing a light-weight tear-down */
1358         if (cpufreq_suspended)
1359                 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1360
1361         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1362
1363         if (!policy) {
1364                 pr_debug("%s: No cpu_data found\n", __func__);
1365                 return -EINVAL;
1366         }
1367
1368         if (has_target()) {
1369                 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1370                 if (ret) {
1371                         pr_err("%s: Failed to stop governor\n", __func__);
1372                         return ret;
1373                 }
1374         }
1375
1376         if (!cpufreq_driver->setpolicy)
1377                 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1378                         policy->governor->name, CPUFREQ_NAME_LEN);
1379
1380         down_read(&policy->rwsem);
1381         cpus = cpumask_weight(policy->cpus);
1382         up_read(&policy->rwsem);
1383
1384         if (cpu != policy->cpu) {
1385                 sysfs_remove_link(&dev->kobj, "cpufreq");
1386         } else if (cpus > 1) {
1387                 /* Nominate new CPU */
1388                 int new_cpu = cpumask_any_but(policy->cpus, cpu);
1389                 struct device *cpu_dev = get_cpu_device(new_cpu);
1390
1391                 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1392                 ret = update_policy_cpu(policy, new_cpu, cpu_dev);
1393                 if (ret) {
1394                         if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1395                                               "cpufreq"))
1396                                 pr_err("%s: Failed to restore kobj link to cpu:%d\n",
1397                                        __func__, cpu_dev->id);
1398                         return ret;
1399                 }
1400
1401                 if (!cpufreq_suspended)
1402                         pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1403                                  __func__, new_cpu, cpu);
1404         } else if (cpufreq_driver->stop_cpu) {
1405                 cpufreq_driver->stop_cpu(policy);
1406         }
1407
1408         return 0;
1409 }
1410
1411 static int __cpufreq_remove_dev_finish(struct device *dev,
1412                                        struct subsys_interface *sif)
1413 {
1414         unsigned int cpu = dev->id, cpus;
1415         int ret;
1416         unsigned long flags;
1417         struct cpufreq_policy *policy;
1418
1419         read_lock_irqsave(&cpufreq_driver_lock, flags);
1420         policy = per_cpu(cpufreq_cpu_data, cpu);
1421         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1422
1423         if (!policy) {
1424                 pr_debug("%s: No cpu_data found\n", __func__);
1425                 return -EINVAL;
1426         }
1427
1428         down_write(&policy->rwsem);
1429         cpus = cpumask_weight(policy->cpus);
1430
1431         if (cpus > 1)
1432                 cpumask_clear_cpu(cpu, policy->cpus);
1433         up_write(&policy->rwsem);
1434
1435         /* If cpu is last user of policy, free policy */
1436         if (cpus == 1) {
1437                 if (has_target()) {
1438                         ret = __cpufreq_governor(policy,
1439                                         CPUFREQ_GOV_POLICY_EXIT);
1440                         if (ret) {
1441                                 pr_err("%s: Failed to exit governor\n",
1442                                        __func__);
1443                                 return ret;
1444                         }
1445                 }
1446
1447                 if (!cpufreq_suspended)
1448                         cpufreq_policy_put_kobj(policy);
1449
1450                 /*
1451                  * Perform the ->exit() even during light-weight tear-down,
1452                  * since this is a core component, and is essential for the
1453                  * subsequent light-weight ->init() to succeed.
1454                  */
1455                 if (cpufreq_driver->exit)
1456                         cpufreq_driver->exit(policy);
1457
1458                 /* Remove policy from list of active policies */
1459                 write_lock_irqsave(&cpufreq_driver_lock, flags);
1460                 list_del(&policy->policy_list);
1461                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1462
1463                 if (!cpufreq_suspended)
1464                         cpufreq_policy_free(policy);
1465         } else if (has_target()) {
1466                 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1467                 if (!ret)
1468                         ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1469
1470                 if (ret) {
1471                         pr_err("%s: Failed to start governor\n", __func__);
1472                         return ret;
1473                 }
1474         }
1475
1476         per_cpu(cpufreq_cpu_data, cpu) = NULL;
1477         return 0;
1478 }
1479
1480 /**
1481  * cpufreq_remove_dev - remove a CPU device
1482  *
1483  * Removes the cpufreq interface for a CPU device.
1484  */
1485 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1486 {
1487         unsigned int cpu = dev->id;
1488         int ret;
1489
1490         if (cpu_is_offline(cpu))
1491                 return 0;
1492
1493         ret = __cpufreq_remove_dev_prepare(dev, sif);
1494
1495         if (!ret)
1496                 ret = __cpufreq_remove_dev_finish(dev, sif);
1497
1498         return ret;
1499 }
1500
1501 static void handle_update(struct work_struct *work)
1502 {
1503         struct cpufreq_policy *policy =
1504                 container_of(work, struct cpufreq_policy, update);
1505         unsigned int cpu = policy->cpu;
1506         pr_debug("handle_update for cpu %u called\n", cpu);
1507         cpufreq_update_policy(cpu);
1508 }
1509
1510 /**
1511  *      cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1512  *      in deep trouble.
1513  *      @cpu: cpu number
1514  *      @old_freq: CPU frequency the kernel thinks the CPU runs at
1515  *      @new_freq: CPU frequency the CPU actually runs at
1516  *
1517  *      We adjust to current frequency first, and need to clean up later.
1518  *      So either call to cpufreq_update_policy() or schedule handle_update()).
1519  */
1520 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1521                                 unsigned int new_freq)
1522 {
1523         struct cpufreq_policy *policy;
1524         struct cpufreq_freqs freqs;
1525         unsigned long flags;
1526
1527         pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1528                  old_freq, new_freq);
1529
1530         freqs.old = old_freq;
1531         freqs.new = new_freq;
1532
1533         read_lock_irqsave(&cpufreq_driver_lock, flags);
1534         policy = per_cpu(cpufreq_cpu_data, cpu);
1535         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1536
1537         cpufreq_freq_transition_begin(policy, &freqs);
1538         cpufreq_freq_transition_end(policy, &freqs, 0);
1539 }
1540
1541 /**
1542  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1543  * @cpu: CPU number
1544  *
1545  * This is the last known freq, without actually getting it from the driver.
1546  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1547  */
1548 unsigned int cpufreq_quick_get(unsigned int cpu)
1549 {
1550         struct cpufreq_policy *policy;
1551         unsigned int ret_freq = 0;
1552
1553         if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1554                 return cpufreq_driver->get(cpu);
1555
1556         policy = cpufreq_cpu_get(cpu);
1557         if (policy) {
1558                 ret_freq = policy->cur;
1559                 cpufreq_cpu_put(policy);
1560         }
1561
1562         return ret_freq;
1563 }
1564 EXPORT_SYMBOL(cpufreq_quick_get);
1565
1566 /**
1567  * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1568  * @cpu: CPU number
1569  *
1570  * Just return the max possible frequency for a given CPU.
1571  */
1572 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1573 {
1574         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1575         unsigned int ret_freq = 0;
1576
1577         if (policy) {
1578                 ret_freq = policy->max;
1579                 cpufreq_cpu_put(policy);
1580         }
1581
1582         return ret_freq;
1583 }
1584 EXPORT_SYMBOL(cpufreq_quick_get_max);
1585
1586 static unsigned int __cpufreq_get(unsigned int cpu)
1587 {
1588         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1589         unsigned int ret_freq = 0;
1590
1591         if (!cpufreq_driver->get)
1592                 return ret_freq;
1593
1594         ret_freq = cpufreq_driver->get(cpu);
1595
1596         if (ret_freq && policy->cur &&
1597                 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1598                 /* verify no discrepancy between actual and
1599                                         saved value exists */
1600                 if (unlikely(ret_freq != policy->cur)) {
1601                         cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1602                         schedule_work(&policy->update);
1603                 }
1604         }
1605
1606         return ret_freq;
1607 }
1608
1609 /**
1610  * cpufreq_get - get the current CPU frequency (in kHz)
1611  * @cpu: CPU number
1612  *
1613  * Get the CPU current (static) CPU frequency
1614  */
1615 unsigned int cpufreq_get(unsigned int cpu)
1616 {
1617         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1618         unsigned int ret_freq = 0;
1619
1620         if (policy) {
1621                 down_read(&policy->rwsem);
1622                 ret_freq = __cpufreq_get(cpu);
1623                 up_read(&policy->rwsem);
1624
1625                 cpufreq_cpu_put(policy);
1626         }
1627
1628         return ret_freq;
1629 }
1630 EXPORT_SYMBOL(cpufreq_get);
1631
1632 static struct subsys_interface cpufreq_interface = {
1633         .name           = "cpufreq",
1634         .subsys         = &cpu_subsys,
1635         .add_dev        = cpufreq_add_dev,
1636         .remove_dev     = cpufreq_remove_dev,
1637 };
1638
1639 /*
1640  * In case platform wants some specific frequency to be configured
1641  * during suspend..
1642  */
1643 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1644 {
1645         int ret;
1646
1647         if (!policy->suspend_freq) {
1648                 pr_err("%s: suspend_freq can't be zero\n", __func__);
1649                 return -EINVAL;
1650         }
1651
1652         pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1653                         policy->suspend_freq);
1654
1655         ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1656                         CPUFREQ_RELATION_H);
1657         if (ret)
1658                 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1659                                 __func__, policy->suspend_freq, ret);
1660
1661         return ret;
1662 }
1663 EXPORT_SYMBOL(cpufreq_generic_suspend);
1664
1665 /**
1666  * cpufreq_suspend() - Suspend CPUFreq governors
1667  *
1668  * Called during system wide Suspend/Hibernate cycles for suspending governors
1669  * as some platforms can't change frequency after this point in suspend cycle.
1670  * Because some of the devices (like: i2c, regulators, etc) they use for
1671  * changing frequency are suspended quickly after this point.
1672  */
1673 void cpufreq_suspend(void)
1674 {
1675         struct cpufreq_policy *policy;
1676
1677         if (!cpufreq_driver)
1678                 return;
1679
1680         if (!has_target())
1681                 goto suspend;
1682
1683         pr_debug("%s: Suspending Governors\n", __func__);
1684
1685         list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1686                 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1687                         pr_err("%s: Failed to stop governor for policy: %p\n",
1688                                 __func__, policy);
1689                 else if (cpufreq_driver->suspend
1690                     && cpufreq_driver->suspend(policy))
1691                         pr_err("%s: Failed to suspend driver: %p\n", __func__,
1692                                 policy);
1693         }
1694
1695 suspend:
1696         cpufreq_suspended = true;
1697 }
1698
1699 /**
1700  * cpufreq_resume() - Resume CPUFreq governors
1701  *
1702  * Called during system wide Suspend/Hibernate cycle for resuming governors that
1703  * are suspended with cpufreq_suspend().
1704  */
1705 void cpufreq_resume(void)
1706 {
1707         struct cpufreq_policy *policy;
1708
1709         if (!cpufreq_driver)
1710                 return;
1711
1712         cpufreq_suspended = false;
1713
1714         if (!has_target())
1715                 return;
1716
1717         pr_debug("%s: Resuming Governors\n", __func__);
1718
1719         list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1720                 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1721                         pr_err("%s: Failed to resume driver: %p\n", __func__,
1722                                 policy);
1723                 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1724                     || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1725                         pr_err("%s: Failed to start governor for policy: %p\n",
1726                                 __func__, policy);
1727
1728                 /*
1729                  * schedule call cpufreq_update_policy() for boot CPU, i.e. last
1730                  * policy in list. It will verify that the current freq is in
1731                  * sync with what we believe it to be.
1732                  */
1733                 if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
1734                         schedule_work(&policy->update);
1735         }
1736 }
1737
1738 /**
1739  *      cpufreq_get_current_driver - return current driver's name
1740  *
1741  *      Return the name string of the currently loaded cpufreq driver
1742  *      or NULL, if none.
1743  */
1744 const char *cpufreq_get_current_driver(void)
1745 {
1746         if (cpufreq_driver)
1747                 return cpufreq_driver->name;
1748
1749         return NULL;
1750 }
1751 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1752
1753 /**
1754  *      cpufreq_get_driver_data - return current driver data
1755  *
1756  *      Return the private data of the currently loaded cpufreq
1757  *      driver, or NULL if no cpufreq driver is loaded.
1758  */
1759 void *cpufreq_get_driver_data(void)
1760 {
1761         if (cpufreq_driver)
1762                 return cpufreq_driver->driver_data;
1763
1764         return NULL;
1765 }
1766 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1767
1768 /*********************************************************************
1769  *                     NOTIFIER LISTS INTERFACE                      *
1770  *********************************************************************/
1771
1772 /**
1773  *      cpufreq_register_notifier - register a driver with cpufreq
1774  *      @nb: notifier function to register
1775  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1776  *
1777  *      Add a driver to one of two lists: either a list of drivers that
1778  *      are notified about clock rate changes (once before and once after
1779  *      the transition), or a list of drivers that are notified about
1780  *      changes in cpufreq policy.
1781  *
1782  *      This function may sleep, and has the same return conditions as
1783  *      blocking_notifier_chain_register.
1784  */
1785 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1786 {
1787         int ret;
1788
1789         if (cpufreq_disabled())
1790                 return -EINVAL;
1791
1792         WARN_ON(!init_cpufreq_transition_notifier_list_called);
1793
1794         switch (list) {
1795         case CPUFREQ_TRANSITION_NOTIFIER:
1796                 ret = srcu_notifier_chain_register(
1797                                 &cpufreq_transition_notifier_list, nb);
1798                 break;
1799         case CPUFREQ_POLICY_NOTIFIER:
1800                 ret = blocking_notifier_chain_register(
1801                                 &cpufreq_policy_notifier_list, nb);
1802                 break;
1803         default:
1804                 ret = -EINVAL;
1805         }
1806
1807         return ret;
1808 }
1809 EXPORT_SYMBOL(cpufreq_register_notifier);
1810
1811 /**
1812  *      cpufreq_unregister_notifier - unregister a driver with cpufreq
1813  *      @nb: notifier block to be unregistered
1814  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1815  *
1816  *      Remove a driver from the CPU frequency notifier list.
1817  *
1818  *      This function may sleep, and has the same return conditions as
1819  *      blocking_notifier_chain_unregister.
1820  */
1821 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1822 {
1823         int ret;
1824
1825         if (cpufreq_disabled())
1826                 return -EINVAL;
1827
1828         switch (list) {
1829         case CPUFREQ_TRANSITION_NOTIFIER:
1830                 ret = srcu_notifier_chain_unregister(
1831                                 &cpufreq_transition_notifier_list, nb);
1832                 break;
1833         case CPUFREQ_POLICY_NOTIFIER:
1834                 ret = blocking_notifier_chain_unregister(
1835                                 &cpufreq_policy_notifier_list, nb);
1836                 break;
1837         default:
1838                 ret = -EINVAL;
1839         }
1840
1841         return ret;
1842 }
1843 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1844
1845
1846 /*********************************************************************
1847  *                              GOVERNORS                            *
1848  *********************************************************************/
1849
1850 /* Must set freqs->new to intermediate frequency */
1851 static int __target_intermediate(struct cpufreq_policy *policy,
1852                                  struct cpufreq_freqs *freqs, int index)
1853 {
1854         int ret;
1855
1856         freqs->new = cpufreq_driver->get_intermediate(policy, index);
1857
1858         /* We don't need to switch to intermediate freq */
1859         if (!freqs->new)
1860                 return 0;
1861
1862         pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1863                  __func__, policy->cpu, freqs->old, freqs->new);
1864
1865         cpufreq_freq_transition_begin(policy, freqs);
1866         ret = cpufreq_driver->target_intermediate(policy, index);
1867         cpufreq_freq_transition_end(policy, freqs, ret);
1868
1869         if (ret)
1870                 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1871                        __func__, ret);
1872
1873         return ret;
1874 }
1875
1876 static int __target_index(struct cpufreq_policy *policy,
1877                           struct cpufreq_frequency_table *freq_table, int index)
1878 {
1879         struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1880         unsigned int intermediate_freq = 0;
1881         int retval = -EINVAL;
1882         bool notify;
1883
1884         notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1885         if (notify) {
1886                 /* Handle switching to intermediate frequency */
1887                 if (cpufreq_driver->get_intermediate) {
1888                         retval = __target_intermediate(policy, &freqs, index);
1889                         if (retval)
1890                                 return retval;
1891
1892                         intermediate_freq = freqs.new;
1893                         /* Set old freq to intermediate */
1894                         if (intermediate_freq)
1895                                 freqs.old = freqs.new;
1896                 }
1897
1898                 freqs.new = freq_table[index].frequency;
1899                 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1900                          __func__, policy->cpu, freqs.old, freqs.new);
1901
1902                 cpufreq_freq_transition_begin(policy, &freqs);
1903         }
1904
1905         retval = cpufreq_driver->target_index(policy, index);
1906         if (retval)
1907                 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1908                        retval);
1909
1910         if (notify) {
1911                 cpufreq_freq_transition_end(policy, &freqs, retval);
1912
1913                 /*
1914                  * Failed after setting to intermediate freq? Driver should have
1915                  * reverted back to initial frequency and so should we. Check
1916                  * here for intermediate_freq instead of get_intermediate, in
1917                  * case we have't switched to intermediate freq at all.
1918                  */
1919                 if (unlikely(retval && intermediate_freq)) {
1920                         freqs.old = intermediate_freq;
1921                         freqs.new = policy->restore_freq;
1922                         cpufreq_freq_transition_begin(policy, &freqs);
1923                         cpufreq_freq_transition_end(policy, &freqs, 0);
1924                 }
1925         }
1926
1927         return retval;
1928 }
1929
1930 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1931                             unsigned int target_freq,
1932                             unsigned int relation)
1933 {
1934         unsigned int old_target_freq = target_freq;
1935         int retval = -EINVAL;
1936
1937         if (cpufreq_disabled())
1938                 return -ENODEV;
1939
1940         /* Make sure that target_freq is within supported range */
1941         if (target_freq > policy->max)
1942                 target_freq = policy->max;
1943         if (target_freq < policy->min)
1944                 target_freq = policy->min;
1945
1946         pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1947                  policy->cpu, target_freq, relation, old_target_freq);
1948
1949         /*
1950          * This might look like a redundant call as we are checking it again
1951          * after finding index. But it is left intentionally for cases where
1952          * exactly same freq is called again and so we can save on few function
1953          * calls.
1954          */
1955         if (target_freq == policy->cur)
1956                 return 0;
1957
1958         /* Save last value to restore later on errors */
1959         policy->restore_freq = policy->cur;
1960
1961         if (cpufreq_driver->target)
1962                 retval = cpufreq_driver->target(policy, target_freq, relation);
1963         else if (cpufreq_driver->target_index) {
1964                 struct cpufreq_frequency_table *freq_table;
1965                 int index;
1966
1967                 freq_table = cpufreq_frequency_get_table(policy->cpu);
1968                 if (unlikely(!freq_table)) {
1969                         pr_err("%s: Unable to find freq_table\n", __func__);
1970                         goto out;
1971                 }
1972
1973                 retval = cpufreq_frequency_table_target(policy, freq_table,
1974                                 target_freq, relation, &index);
1975                 if (unlikely(retval)) {
1976                         pr_err("%s: Unable to find matching freq\n", __func__);
1977                         goto out;
1978                 }
1979
1980                 if (freq_table[index].frequency == policy->cur) {
1981                         retval = 0;
1982                         goto out;
1983                 }
1984
1985                 retval = __target_index(policy, freq_table, index);
1986         }
1987
1988 out:
1989         return retval;
1990 }
1991 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1992
1993 int cpufreq_driver_target(struct cpufreq_policy *policy,
1994                           unsigned int target_freq,
1995                           unsigned int relation)
1996 {
1997         int ret = -EINVAL;
1998
1999         down_write(&policy->rwsem);
2000
2001         ret = __cpufreq_driver_target(policy, target_freq, relation);
2002
2003         up_write(&policy->rwsem);
2004
2005         return ret;
2006 }
2007 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2008
2009 /*
2010  * when "event" is CPUFREQ_GOV_LIMITS
2011  */
2012
2013 static int __cpufreq_governor(struct cpufreq_policy *policy,
2014                                         unsigned int event)
2015 {
2016         int ret;
2017
2018         /* Only must be defined when default governor is known to have latency
2019            restrictions, like e.g. conservative or ondemand.
2020            That this is the case is already ensured in Kconfig
2021         */
2022 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
2023         struct cpufreq_governor *gov = &cpufreq_gov_performance;
2024 #else
2025         struct cpufreq_governor *gov = NULL;
2026 #endif
2027
2028         /* Don't start any governor operations if we are entering suspend */
2029         if (cpufreq_suspended)
2030                 return 0;
2031         /*
2032          * Governor might not be initiated here if ACPI _PPC changed
2033          * notification happened, so check it.
2034          */
2035         if (!policy->governor)
2036                 return -EINVAL;
2037
2038         if (policy->governor->max_transition_latency &&
2039             policy->cpuinfo.transition_latency >
2040             policy->governor->max_transition_latency) {
2041                 if (!gov)
2042                         return -EINVAL;
2043                 else {
2044                         pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2045                                 policy->governor->name, gov->name);
2046                         policy->governor = gov;
2047                 }
2048         }
2049
2050         if (event == CPUFREQ_GOV_POLICY_INIT)
2051                 if (!try_module_get(policy->governor->owner))
2052                         return -EINVAL;
2053
2054         pr_debug("__cpufreq_governor for CPU %u, event %u\n",
2055                  policy->cpu, event);
2056
2057         mutex_lock(&cpufreq_governor_lock);
2058         if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
2059             || (!policy->governor_enabled
2060             && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
2061                 mutex_unlock(&cpufreq_governor_lock);
2062                 return -EBUSY;
2063         }
2064
2065         if (event == CPUFREQ_GOV_STOP)
2066                 policy->governor_enabled = false;
2067         else if (event == CPUFREQ_GOV_START)
2068                 policy->governor_enabled = true;
2069
2070         mutex_unlock(&cpufreq_governor_lock);
2071
2072         ret = policy->governor->governor(policy, event);
2073
2074         if (!ret) {
2075                 if (event == CPUFREQ_GOV_POLICY_INIT)
2076                         policy->governor->initialized++;
2077                 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2078                         policy->governor->initialized--;
2079         } else {
2080                 /* Restore original values */
2081                 mutex_lock(&cpufreq_governor_lock);
2082                 if (event == CPUFREQ_GOV_STOP)
2083                         policy->governor_enabled = true;
2084                 else if (event == CPUFREQ_GOV_START)
2085                         policy->governor_enabled = false;
2086                 mutex_unlock(&cpufreq_governor_lock);
2087         }
2088
2089         if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2090                         ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
2091                 module_put(policy->governor->owner);
2092
2093         return ret;
2094 }
2095
2096 int cpufreq_register_governor(struct cpufreq_governor *governor)
2097 {
2098         int err;
2099
2100         if (!governor)
2101                 return -EINVAL;
2102
2103         if (cpufreq_disabled())
2104                 return -ENODEV;
2105
2106         mutex_lock(&cpufreq_governor_mutex);
2107
2108         governor->initialized = 0;
2109         err = -EBUSY;
2110         if (__find_governor(governor->name) == NULL) {
2111                 err = 0;
2112                 list_add(&governor->governor_list, &cpufreq_governor_list);
2113         }
2114
2115         mutex_unlock(&cpufreq_governor_mutex);
2116         return err;
2117 }
2118 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2119
2120 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2121 {
2122         int cpu;
2123
2124         if (!governor)
2125                 return;
2126
2127         if (cpufreq_disabled())
2128                 return;
2129
2130         for_each_present_cpu(cpu) {
2131                 if (cpu_online(cpu))
2132                         continue;
2133                 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
2134                         strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
2135         }
2136
2137         mutex_lock(&cpufreq_governor_mutex);
2138         list_del(&governor->governor_list);
2139         mutex_unlock(&cpufreq_governor_mutex);
2140         return;
2141 }
2142 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2143
2144
2145 /*********************************************************************
2146  *                          POLICY INTERFACE                         *
2147  *********************************************************************/
2148
2149 /**
2150  * cpufreq_get_policy - get the current cpufreq_policy
2151  * @policy: struct cpufreq_policy into which the current cpufreq_policy
2152  *      is written
2153  *
2154  * Reads the current cpufreq policy.
2155  */
2156 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2157 {
2158         struct cpufreq_policy *cpu_policy;
2159         if (!policy)
2160                 return -EINVAL;
2161
2162         cpu_policy = cpufreq_cpu_get(cpu);
2163         if (!cpu_policy)
2164                 return -EINVAL;
2165
2166         memcpy(policy, cpu_policy, sizeof(*policy));
2167
2168         cpufreq_cpu_put(cpu_policy);
2169         return 0;
2170 }
2171 EXPORT_SYMBOL(cpufreq_get_policy);
2172
2173 /*
2174  * policy : current policy.
2175  * new_policy: policy to be set.
2176  */
2177 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2178                                 struct cpufreq_policy *new_policy)
2179 {
2180         struct cpufreq_governor *old_gov;
2181         int ret;
2182
2183         pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2184                  new_policy->cpu, new_policy->min, new_policy->max);
2185
2186         memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2187
2188         if (new_policy->min > policy->max || new_policy->max < policy->min)
2189                 return -EINVAL;
2190
2191         /* verify the cpu speed can be set within this limit */
2192         ret = cpufreq_driver->verify(new_policy);
2193         if (ret)
2194                 return ret;
2195
2196         /* adjust if necessary - all reasons */
2197         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2198                         CPUFREQ_ADJUST, new_policy);
2199
2200         /* adjust if necessary - hardware incompatibility*/
2201         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2202                         CPUFREQ_INCOMPATIBLE, new_policy);
2203
2204         /*
2205          * verify the cpu speed can be set within this limit, which might be
2206          * different to the first one
2207          */
2208         ret = cpufreq_driver->verify(new_policy);
2209         if (ret)
2210                 return ret;
2211
2212         /* notification of the new policy */
2213         blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2214                         CPUFREQ_NOTIFY, new_policy);
2215
2216         policy->min = new_policy->min;
2217         policy->max = new_policy->max;
2218
2219         pr_debug("new min and max freqs are %u - %u kHz\n",
2220                  policy->min, policy->max);
2221
2222         if (cpufreq_driver->setpolicy) {
2223                 policy->policy = new_policy->policy;
2224                 pr_debug("setting range\n");
2225                 return cpufreq_driver->setpolicy(new_policy);
2226         }
2227
2228         if (new_policy->governor == policy->governor)
2229                 goto out;
2230
2231         pr_debug("governor switch\n");
2232
2233         /* save old, working values */
2234         old_gov = policy->governor;
2235         /* end old governor */
2236         if (old_gov) {
2237                 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2238                 up_write(&policy->rwsem);
2239                 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2240                 down_write(&policy->rwsem);
2241         }
2242
2243         /* start new governor */
2244         policy->governor = new_policy->governor;
2245         if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2246                 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2247                         goto out;
2248
2249                 up_write(&policy->rwsem);
2250                 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2251                 down_write(&policy->rwsem);
2252         }
2253
2254         /* new governor failed, so re-start old one */
2255         pr_debug("starting governor %s failed\n", policy->governor->name);
2256         if (old_gov) {
2257                 policy->governor = old_gov;
2258                 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2259                 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2260         }
2261
2262         return -EINVAL;
2263
2264  out:
2265         pr_debug("governor: change or update limits\n");
2266         return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2267 }
2268
2269 /**
2270  *      cpufreq_update_policy - re-evaluate an existing cpufreq policy
2271  *      @cpu: CPU which shall be re-evaluated
2272  *
2273  *      Useful for policy notifiers which have different necessities
2274  *      at different times.
2275  */
2276 int cpufreq_update_policy(unsigned int cpu)
2277 {
2278         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2279         struct cpufreq_policy new_policy;
2280         int ret;
2281
2282         if (!policy)
2283                 return -ENODEV;
2284
2285         down_write(&policy->rwsem);
2286
2287         pr_debug("updating policy for CPU %u\n", cpu);
2288         memcpy(&new_policy, policy, sizeof(*policy));
2289         new_policy.min = policy->user_policy.min;
2290         new_policy.max = policy->user_policy.max;
2291         new_policy.policy = policy->user_policy.policy;
2292         new_policy.governor = policy->user_policy.governor;
2293
2294         /*
2295          * BIOS might change freq behind our back
2296          * -> ask driver for current freq and notify governors about a change
2297          */
2298         if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2299                 new_policy.cur = cpufreq_driver->get(cpu);
2300                 if (WARN_ON(!new_policy.cur)) {
2301                         ret = -EIO;
2302                         goto unlock;
2303                 }
2304
2305                 if (!policy->cur) {
2306                         pr_debug("Driver did not initialize current freq\n");
2307                         policy->cur = new_policy.cur;
2308                 } else {
2309                         if (policy->cur != new_policy.cur && has_target())
2310                                 cpufreq_out_of_sync(cpu, policy->cur,
2311                                                                 new_policy.cur);
2312                 }
2313         }
2314
2315         ret = cpufreq_set_policy(policy, &new_policy);
2316
2317 unlock:
2318         up_write(&policy->rwsem);
2319
2320         cpufreq_cpu_put(policy);
2321         return ret;
2322 }
2323 EXPORT_SYMBOL(cpufreq_update_policy);
2324
2325 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2326                                         unsigned long action, void *hcpu)
2327 {
2328         unsigned int cpu = (unsigned long)hcpu;
2329         struct device *dev;
2330
2331         dev = get_cpu_device(cpu);
2332         if (dev) {
2333                 switch (action & ~CPU_TASKS_FROZEN) {
2334                 case CPU_ONLINE:
2335                         __cpufreq_add_dev(dev, NULL);
2336                         break;
2337
2338                 case CPU_DOWN_PREPARE:
2339                         __cpufreq_remove_dev_prepare(dev, NULL);
2340                         break;
2341
2342                 case CPU_POST_DEAD:
2343                         __cpufreq_remove_dev_finish(dev, NULL);
2344                         break;
2345
2346                 case CPU_DOWN_FAILED:
2347                         __cpufreq_add_dev(dev, NULL);
2348                         break;
2349                 }
2350         }
2351         return NOTIFY_OK;
2352 }
2353
2354 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2355         .notifier_call = cpufreq_cpu_callback,
2356 };
2357
2358 /*********************************************************************
2359  *               BOOST                                               *
2360  *********************************************************************/
2361 static int cpufreq_boost_set_sw(int state)
2362 {
2363         struct cpufreq_frequency_table *freq_table;
2364         struct cpufreq_policy *policy;
2365         int ret = -EINVAL;
2366
2367         list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
2368                 freq_table = cpufreq_frequency_get_table(policy->cpu);
2369                 if (freq_table) {
2370                         ret = cpufreq_frequency_table_cpuinfo(policy,
2371                                                         freq_table);
2372                         if (ret) {
2373                                 pr_err("%s: Policy frequency update failed\n",
2374                                        __func__);
2375                                 break;
2376                         }
2377                         policy->user_policy.max = policy->max;
2378                         __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2379                 }
2380         }
2381
2382         return ret;
2383 }
2384
2385 int cpufreq_boost_trigger_state(int state)
2386 {
2387         unsigned long flags;
2388         int ret = 0;
2389
2390         if (cpufreq_driver->boost_enabled == state)
2391                 return 0;
2392
2393         write_lock_irqsave(&cpufreq_driver_lock, flags);
2394         cpufreq_driver->boost_enabled = state;
2395         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2396
2397         ret = cpufreq_driver->set_boost(state);
2398         if (ret) {
2399                 write_lock_irqsave(&cpufreq_driver_lock, flags);
2400                 cpufreq_driver->boost_enabled = !state;
2401                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2402
2403                 pr_err("%s: Cannot %s BOOST\n",
2404                        __func__, state ? "enable" : "disable");
2405         }
2406
2407         return ret;
2408 }
2409
2410 int cpufreq_boost_supported(void)
2411 {
2412         if (likely(cpufreq_driver))
2413                 return cpufreq_driver->boost_supported;
2414
2415         return 0;
2416 }
2417 EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2418
2419 int cpufreq_boost_enabled(void)
2420 {
2421         return cpufreq_driver->boost_enabled;
2422 }
2423 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2424
2425 /*********************************************************************
2426  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
2427  *********************************************************************/
2428
2429 /**
2430  * cpufreq_register_driver - register a CPU Frequency driver
2431  * @driver_data: A struct cpufreq_driver containing the values#
2432  * submitted by the CPU Frequency driver.
2433  *
2434  * Registers a CPU Frequency driver to this core code. This code
2435  * returns zero on success, -EBUSY when another driver got here first
2436  * (and isn't unregistered in the meantime).
2437  *
2438  */
2439 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2440 {
2441         unsigned long flags;
2442         int ret;
2443
2444         if (cpufreq_disabled())
2445                 return -ENODEV;
2446
2447         if (!driver_data || !driver_data->verify || !driver_data->init ||
2448             !(driver_data->setpolicy || driver_data->target_index ||
2449                     driver_data->target) ||
2450              (driver_data->setpolicy && (driver_data->target_index ||
2451                     driver_data->target)) ||
2452              (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2453                 return -EINVAL;
2454
2455         pr_debug("trying to register driver %s\n", driver_data->name);
2456
2457         if (driver_data->setpolicy)
2458                 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2459
2460         write_lock_irqsave(&cpufreq_driver_lock, flags);
2461         if (cpufreq_driver) {
2462                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2463                 return -EEXIST;
2464         }
2465         cpufreq_driver = driver_data;
2466         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2467
2468         if (cpufreq_boost_supported()) {
2469                 /*
2470                  * Check if driver provides function to enable boost -
2471                  * if not, use cpufreq_boost_set_sw as default
2472                  */
2473                 if (!cpufreq_driver->set_boost)
2474                         cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2475
2476                 ret = cpufreq_sysfs_create_file(&boost.attr);
2477                 if (ret) {
2478                         pr_err("%s: cannot register global BOOST sysfs file\n",
2479                                __func__);
2480                         goto err_null_driver;
2481                 }
2482         }
2483
2484         ret = subsys_interface_register(&cpufreq_interface);
2485         if (ret)
2486                 goto err_boost_unreg;
2487
2488         if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2489                 int i;
2490                 ret = -ENODEV;
2491
2492                 /* check for at least one working CPU */
2493                 for (i = 0; i < nr_cpu_ids; i++)
2494                         if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2495                                 ret = 0;
2496                                 break;
2497                         }
2498
2499                 /* if all ->init() calls failed, unregister */
2500                 if (ret) {
2501                         pr_debug("no CPU initialized for driver %s\n",
2502                                  driver_data->name);
2503                         goto err_if_unreg;
2504                 }
2505         }
2506
2507         register_hotcpu_notifier(&cpufreq_cpu_notifier);
2508         pr_debug("driver %s up and running\n", driver_data->name);
2509
2510         return 0;
2511 err_if_unreg:
2512         subsys_interface_unregister(&cpufreq_interface);
2513 err_boost_unreg:
2514         if (cpufreq_boost_supported())
2515                 cpufreq_sysfs_remove_file(&boost.attr);
2516 err_null_driver:
2517         write_lock_irqsave(&cpufreq_driver_lock, flags);
2518         cpufreq_driver = NULL;
2519         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2520         return ret;
2521 }
2522 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2523
2524 /**
2525  * cpufreq_unregister_driver - unregister the current CPUFreq driver
2526  *
2527  * Unregister the current CPUFreq driver. Only call this if you have
2528  * the right to do so, i.e. if you have succeeded in initialising before!
2529  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2530  * currently not initialised.
2531  */
2532 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2533 {
2534         unsigned long flags;
2535
2536         if (!cpufreq_driver || (driver != cpufreq_driver))
2537                 return -EINVAL;
2538
2539         pr_debug("unregistering driver %s\n", driver->name);
2540
2541         subsys_interface_unregister(&cpufreq_interface);
2542         if (cpufreq_boost_supported())
2543                 cpufreq_sysfs_remove_file(&boost.attr);
2544
2545         unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2546
2547         down_write(&cpufreq_rwsem);
2548         write_lock_irqsave(&cpufreq_driver_lock, flags);
2549
2550         cpufreq_driver = NULL;
2551
2552         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2553         up_write(&cpufreq_rwsem);
2554
2555         return 0;
2556 }
2557 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2558
2559 static int __init cpufreq_core_init(void)
2560 {
2561         if (cpufreq_disabled())
2562                 return -ENODEV;
2563
2564         cpufreq_global_kobject = kobject_create();
2565         BUG_ON(!cpufreq_global_kobject);
2566
2567         return 0;
2568 }
2569 core_initcall(cpufreq_core_init);