2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
35 * The "cpufreq driver" - the arch- or hardware-dependent low
36 * level driver of CPUFreq support, and its spinlock. This lock
37 * also protects the cpufreq_cpu_data array.
39 static struct cpufreq_driver *cpufreq_driver;
40 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
41 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
42 static DEFINE_RWLOCK(cpufreq_driver_lock);
43 DEFINE_MUTEX(cpufreq_governor_lock);
44 static LIST_HEAD(cpufreq_policy_list);
46 /* This one keeps track of the previously set governor of a removed CPU */
47 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
49 /* Flag to suspend/resume CPUFreq governors */
50 static bool cpufreq_suspended;
52 static inline bool has_target(void)
54 return cpufreq_driver->target_index || cpufreq_driver->target;
58 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
61 static DECLARE_RWSEM(cpufreq_rwsem);
63 /* internal prototypes */
64 static int __cpufreq_governor(struct cpufreq_policy *policy,
66 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
67 static void handle_update(struct work_struct *work);
70 * Two notifier lists: the "policy" list is involved in the
71 * validation process for a new CPU frequency policy; the
72 * "transition" list for kernel code that needs to handle
73 * changes to devices when the CPU clock speed changes.
74 * The mutex locks both lists.
76 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
77 static struct srcu_notifier_head cpufreq_transition_notifier_list;
79 static bool init_cpufreq_transition_notifier_list_called;
80 static int __init init_cpufreq_transition_notifier_list(void)
82 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
83 init_cpufreq_transition_notifier_list_called = true;
86 pure_initcall(init_cpufreq_transition_notifier_list);
88 static int off __read_mostly;
89 static int cpufreq_disabled(void)
93 void disable_cpufreq(void)
97 static LIST_HEAD(cpufreq_governor_list);
98 static DEFINE_MUTEX(cpufreq_governor_mutex);
100 bool have_governor_per_policy(void)
102 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
104 EXPORT_SYMBOL_GPL(have_governor_per_policy);
106 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
108 if (have_governor_per_policy())
109 return &policy->kobj;
111 return cpufreq_global_kobject;
113 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
115 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
121 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
123 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
124 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
125 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
127 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
128 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
130 idle_time = cur_wall_time - busy_time;
132 *wall = cputime_to_usecs(cur_wall_time);
134 return cputime_to_usecs(idle_time);
137 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
139 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
141 if (idle_time == -1ULL)
142 return get_cpu_idle_time_jiffy(cpu, wall);
144 idle_time += get_cpu_iowait_time_us(cpu, wall);
148 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
151 * This is a generic cpufreq init() routine which can be used by cpufreq
152 * drivers of SMP systems. It will do following:
153 * - validate & show freq table passed
154 * - set policies transition latency
155 * - policy->cpus with all possible CPUs
157 int cpufreq_generic_init(struct cpufreq_policy *policy,
158 struct cpufreq_frequency_table *table,
159 unsigned int transition_latency)
163 ret = cpufreq_table_validate_and_show(policy, table);
165 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
169 policy->cpuinfo.transition_latency = transition_latency;
172 * The driver only supports the SMP configuartion where all processors
173 * share the clock and voltage and clock.
175 cpumask_setall(policy->cpus);
179 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
181 unsigned int cpufreq_generic_get(unsigned int cpu)
183 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
185 if (!policy || IS_ERR(policy->clk)) {
186 pr_err("%s: No %s associated to cpu: %d\n",
187 __func__, policy ? "clk" : "policy", cpu);
191 return clk_get_rate(policy->clk) / 1000;
193 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
195 /* Only for cpufreq core internal use */
196 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
198 return per_cpu(cpufreq_cpu_data, cpu);
201 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
203 struct cpufreq_policy *policy = NULL;
206 if (cpu >= nr_cpu_ids)
209 if (!down_read_trylock(&cpufreq_rwsem))
212 /* get the cpufreq driver */
213 read_lock_irqsave(&cpufreq_driver_lock, flags);
215 if (cpufreq_driver) {
217 policy = per_cpu(cpufreq_cpu_data, cpu);
219 kobject_get(&policy->kobj);
222 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
225 up_read(&cpufreq_rwsem);
229 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
231 void cpufreq_cpu_put(struct cpufreq_policy *policy)
233 kobject_put(&policy->kobj);
234 up_read(&cpufreq_rwsem);
236 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
238 /*********************************************************************
239 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
240 *********************************************************************/
243 * adjust_jiffies - adjust the system "loops_per_jiffy"
245 * This function alters the system "loops_per_jiffy" for the clock
246 * speed change. Note that loops_per_jiffy cannot be updated on SMP
247 * systems as each CPU might be scaled differently. So, use the arch
248 * per-CPU loops_per_jiffy value wherever possible.
250 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
253 static unsigned long l_p_j_ref;
254 static unsigned int l_p_j_ref_freq;
256 if (ci->flags & CPUFREQ_CONST_LOOPS)
259 if (!l_p_j_ref_freq) {
260 l_p_j_ref = loops_per_jiffy;
261 l_p_j_ref_freq = ci->old;
262 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
263 l_p_j_ref, l_p_j_ref_freq);
265 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
266 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
268 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
269 loops_per_jiffy, ci->new);
274 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
275 struct cpufreq_freqs *freqs, unsigned int state)
277 BUG_ON(irqs_disabled());
279 if (cpufreq_disabled())
282 freqs->flags = cpufreq_driver->flags;
283 pr_debug("notification %u of frequency transition to %u kHz\n",
288 case CPUFREQ_PRECHANGE:
289 /* detect if the driver reported a value as "old frequency"
290 * which is not equal to what the cpufreq core thinks is
293 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
294 if ((policy) && (policy->cpu == freqs->cpu) &&
295 (policy->cur) && (policy->cur != freqs->old)) {
296 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
297 freqs->old, policy->cur);
298 freqs->old = policy->cur;
301 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
302 CPUFREQ_PRECHANGE, freqs);
303 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
306 case CPUFREQ_POSTCHANGE:
307 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
308 pr_debug("FREQ: %lu - CPU: %lu\n",
309 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
310 trace_cpu_frequency(freqs->new, freqs->cpu);
311 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
312 CPUFREQ_POSTCHANGE, freqs);
313 if (likely(policy) && likely(policy->cpu == freqs->cpu))
314 policy->cur = freqs->new;
320 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
321 * on frequency transition.
323 * This function calls the transition notifiers and the "adjust_jiffies"
324 * function. It is called twice on all CPU frequency changes that have
327 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
328 struct cpufreq_freqs *freqs, unsigned int state)
330 for_each_cpu(freqs->cpu, policy->cpus)
331 __cpufreq_notify_transition(policy, freqs, state);
334 /* Do post notifications when there are chances that transition has failed */
335 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
336 struct cpufreq_freqs *freqs, int transition_failed)
338 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
339 if (!transition_failed)
342 swap(freqs->old, freqs->new);
343 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
344 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
347 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
348 struct cpufreq_freqs *freqs)
352 * Catch double invocations of _begin() which lead to self-deadlock.
353 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
354 * doesn't invoke _begin() on their behalf, and hence the chances of
355 * double invocations are very low. Moreover, there are scenarios
356 * where these checks can emit false-positive warnings in these
357 * drivers; so we avoid that by skipping them altogether.
359 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
360 && current == policy->transition_task);
363 wait_event(policy->transition_wait, !policy->transition_ongoing);
365 spin_lock(&policy->transition_lock);
367 if (unlikely(policy->transition_ongoing)) {
368 spin_unlock(&policy->transition_lock);
372 policy->transition_ongoing = true;
373 policy->transition_task = current;
375 spin_unlock(&policy->transition_lock);
377 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
379 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
381 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
382 struct cpufreq_freqs *freqs, int transition_failed)
384 if (unlikely(WARN_ON(!policy->transition_ongoing)))
387 cpufreq_notify_post_transition(policy, freqs, transition_failed);
389 policy->transition_ongoing = false;
390 policy->transition_task = NULL;
392 wake_up(&policy->transition_wait);
394 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
397 /*********************************************************************
399 *********************************************************************/
400 static ssize_t show_boost(struct kobject *kobj,
401 struct attribute *attr, char *buf)
403 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
406 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
407 const char *buf, size_t count)
411 ret = sscanf(buf, "%d", &enable);
412 if (ret != 1 || enable < 0 || enable > 1)
415 if (cpufreq_boost_trigger_state(enable)) {
416 pr_err("%s: Cannot %s BOOST!\n",
417 __func__, enable ? "enable" : "disable");
421 pr_debug("%s: cpufreq BOOST %s\n",
422 __func__, enable ? "enabled" : "disabled");
426 define_one_global_rw(boost);
428 static struct cpufreq_governor *find_governor(const char *str_governor)
430 struct cpufreq_governor *t;
432 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
433 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
440 * cpufreq_parse_governor - parse a governor string
442 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
443 struct cpufreq_governor **governor)
450 if (cpufreq_driver->setpolicy) {
451 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
452 *policy = CPUFREQ_POLICY_PERFORMANCE;
454 } else if (!strncasecmp(str_governor, "powersave",
456 *policy = CPUFREQ_POLICY_POWERSAVE;
460 struct cpufreq_governor *t;
462 mutex_lock(&cpufreq_governor_mutex);
464 t = find_governor(str_governor);
469 mutex_unlock(&cpufreq_governor_mutex);
470 ret = request_module("cpufreq_%s", str_governor);
471 mutex_lock(&cpufreq_governor_mutex);
474 t = find_governor(str_governor);
482 mutex_unlock(&cpufreq_governor_mutex);
489 * cpufreq_per_cpu_attr_read() / show_##file_name() -
490 * print out cpufreq information
492 * Write out information from cpufreq_driver->policy[cpu]; object must be
496 #define show_one(file_name, object) \
497 static ssize_t show_##file_name \
498 (struct cpufreq_policy *policy, char *buf) \
500 return sprintf(buf, "%u\n", policy->object); \
503 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
504 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
505 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
506 show_one(scaling_min_freq, min);
507 show_one(scaling_max_freq, max);
509 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
513 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
514 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
516 ret = sprintf(buf, "%u\n", policy->cur);
520 static int cpufreq_set_policy(struct cpufreq_policy *policy,
521 struct cpufreq_policy *new_policy);
524 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
526 #define store_one(file_name, object) \
527 static ssize_t store_##file_name \
528 (struct cpufreq_policy *policy, const char *buf, size_t count) \
531 struct cpufreq_policy new_policy; \
533 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
537 ret = sscanf(buf, "%u", &new_policy.object); \
541 temp = new_policy.object; \
542 ret = cpufreq_set_policy(policy, &new_policy); \
544 policy->user_policy.object = temp; \
546 return ret ? ret : count; \
549 store_one(scaling_min_freq, min);
550 store_one(scaling_max_freq, max);
553 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
555 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
558 unsigned int cur_freq = __cpufreq_get(policy);
560 return sprintf(buf, "<unknown>");
561 return sprintf(buf, "%u\n", cur_freq);
565 * show_scaling_governor - show the current policy for the specified CPU
567 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
569 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
570 return sprintf(buf, "powersave\n");
571 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
572 return sprintf(buf, "performance\n");
573 else if (policy->governor)
574 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
575 policy->governor->name);
580 * store_scaling_governor - store policy for the specified CPU
582 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
583 const char *buf, size_t count)
586 char str_governor[16];
587 struct cpufreq_policy new_policy;
589 ret = cpufreq_get_policy(&new_policy, policy->cpu);
593 ret = sscanf(buf, "%15s", str_governor);
597 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
598 &new_policy.governor))
601 ret = cpufreq_set_policy(policy, &new_policy);
603 policy->user_policy.policy = policy->policy;
604 policy->user_policy.governor = policy->governor;
613 * show_scaling_driver - show the cpufreq driver currently loaded
615 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
617 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
621 * show_scaling_available_governors - show the available CPUfreq governors
623 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
627 struct cpufreq_governor *t;
630 i += sprintf(buf, "performance powersave");
634 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
635 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
636 - (CPUFREQ_NAME_LEN + 2)))
638 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
641 i += sprintf(&buf[i], "\n");
645 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
650 for_each_cpu(cpu, mask) {
652 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
653 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
654 if (i >= (PAGE_SIZE - 5))
657 i += sprintf(&buf[i], "\n");
660 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
663 * show_related_cpus - show the CPUs affected by each transition even if
664 * hw coordination is in use
666 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
668 return cpufreq_show_cpus(policy->related_cpus, buf);
672 * show_affected_cpus - show the CPUs affected by each transition
674 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
676 return cpufreq_show_cpus(policy->cpus, buf);
679 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
680 const char *buf, size_t count)
682 unsigned int freq = 0;
685 if (!policy->governor || !policy->governor->store_setspeed)
688 ret = sscanf(buf, "%u", &freq);
692 policy->governor->store_setspeed(policy, freq);
697 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
699 if (!policy->governor || !policy->governor->show_setspeed)
700 return sprintf(buf, "<unsupported>\n");
702 return policy->governor->show_setspeed(policy, buf);
706 * show_bios_limit - show the current cpufreq HW/BIOS limitation
708 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
712 if (cpufreq_driver->bios_limit) {
713 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
715 return sprintf(buf, "%u\n", limit);
717 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
720 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
721 cpufreq_freq_attr_ro(cpuinfo_min_freq);
722 cpufreq_freq_attr_ro(cpuinfo_max_freq);
723 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
724 cpufreq_freq_attr_ro(scaling_available_governors);
725 cpufreq_freq_attr_ro(scaling_driver);
726 cpufreq_freq_attr_ro(scaling_cur_freq);
727 cpufreq_freq_attr_ro(bios_limit);
728 cpufreq_freq_attr_ro(related_cpus);
729 cpufreq_freq_attr_ro(affected_cpus);
730 cpufreq_freq_attr_rw(scaling_min_freq);
731 cpufreq_freq_attr_rw(scaling_max_freq);
732 cpufreq_freq_attr_rw(scaling_governor);
733 cpufreq_freq_attr_rw(scaling_setspeed);
735 static struct attribute *default_attrs[] = {
736 &cpuinfo_min_freq.attr,
737 &cpuinfo_max_freq.attr,
738 &cpuinfo_transition_latency.attr,
739 &scaling_min_freq.attr,
740 &scaling_max_freq.attr,
743 &scaling_governor.attr,
744 &scaling_driver.attr,
745 &scaling_available_governors.attr,
746 &scaling_setspeed.attr,
750 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
751 #define to_attr(a) container_of(a, struct freq_attr, attr)
753 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
755 struct cpufreq_policy *policy = to_policy(kobj);
756 struct freq_attr *fattr = to_attr(attr);
759 if (!down_read_trylock(&cpufreq_rwsem))
762 down_read(&policy->rwsem);
765 ret = fattr->show(policy, buf);
769 up_read(&policy->rwsem);
770 up_read(&cpufreq_rwsem);
775 static ssize_t store(struct kobject *kobj, struct attribute *attr,
776 const char *buf, size_t count)
778 struct cpufreq_policy *policy = to_policy(kobj);
779 struct freq_attr *fattr = to_attr(attr);
780 ssize_t ret = -EINVAL;
784 if (!cpu_online(policy->cpu))
787 if (!down_read_trylock(&cpufreq_rwsem))
790 down_write(&policy->rwsem);
793 ret = fattr->store(policy, buf, count);
797 up_write(&policy->rwsem);
799 up_read(&cpufreq_rwsem);
806 static void cpufreq_sysfs_release(struct kobject *kobj)
808 struct cpufreq_policy *policy = to_policy(kobj);
809 pr_debug("last reference is dropped\n");
810 complete(&policy->kobj_unregister);
813 static const struct sysfs_ops sysfs_ops = {
818 static struct kobj_type ktype_cpufreq = {
819 .sysfs_ops = &sysfs_ops,
820 .default_attrs = default_attrs,
821 .release = cpufreq_sysfs_release,
824 struct kobject *cpufreq_global_kobject;
825 EXPORT_SYMBOL(cpufreq_global_kobject);
827 static int cpufreq_global_kobject_usage;
829 int cpufreq_get_global_kobject(void)
831 if (!cpufreq_global_kobject_usage++)
832 return kobject_add(cpufreq_global_kobject,
833 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
837 EXPORT_SYMBOL(cpufreq_get_global_kobject);
839 void cpufreq_put_global_kobject(void)
841 if (!--cpufreq_global_kobject_usage)
842 kobject_del(cpufreq_global_kobject);
844 EXPORT_SYMBOL(cpufreq_put_global_kobject);
846 int cpufreq_sysfs_create_file(const struct attribute *attr)
848 int ret = cpufreq_get_global_kobject();
851 ret = sysfs_create_file(cpufreq_global_kobject, attr);
853 cpufreq_put_global_kobject();
858 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
860 void cpufreq_sysfs_remove_file(const struct attribute *attr)
862 sysfs_remove_file(cpufreq_global_kobject, attr);
863 cpufreq_put_global_kobject();
865 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
867 /* symlink affected CPUs */
868 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
873 for_each_cpu(j, policy->cpus) {
874 struct device *cpu_dev;
876 if (j == policy->cpu)
879 pr_debug("Adding link for CPU: %u\n", j);
880 cpu_dev = get_cpu_device(j);
881 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
889 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
892 struct freq_attr **drv_attr;
895 /* set up files for this cpu device */
896 drv_attr = cpufreq_driver->attr;
897 while (drv_attr && *drv_attr) {
898 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
903 if (cpufreq_driver->get) {
904 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
909 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
913 if (cpufreq_driver->bios_limit) {
914 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
919 return cpufreq_add_dev_symlink(policy);
922 static void cpufreq_init_policy(struct cpufreq_policy *policy)
924 struct cpufreq_governor *gov = NULL;
925 struct cpufreq_policy new_policy;
928 memcpy(&new_policy, policy, sizeof(*policy));
930 /* Update governor of new_policy to the governor used before hotplug */
931 gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
933 pr_debug("Restoring governor %s for cpu %d\n",
934 policy->governor->name, policy->cpu);
936 gov = CPUFREQ_DEFAULT_GOVERNOR;
938 new_policy.governor = gov;
940 /* Use the default policy if its valid. */
941 if (cpufreq_driver->setpolicy)
942 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
944 /* set default policy */
945 ret = cpufreq_set_policy(policy, &new_policy);
947 pr_debug("setting policy failed\n");
948 if (cpufreq_driver->exit)
949 cpufreq_driver->exit(policy);
953 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
954 unsigned int cpu, struct device *dev)
960 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
962 pr_err("%s: Failed to stop governor\n", __func__);
967 down_write(&policy->rwsem);
969 write_lock_irqsave(&cpufreq_driver_lock, flags);
971 cpumask_set_cpu(cpu, policy->cpus);
972 per_cpu(cpufreq_cpu_data, cpu) = policy;
973 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
975 up_write(&policy->rwsem);
978 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
980 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
983 pr_err("%s: Failed to start governor\n", __func__);
988 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
991 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
993 struct cpufreq_policy *policy;
996 read_lock_irqsave(&cpufreq_driver_lock, flags);
998 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
1000 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1003 policy->governor = NULL;
1008 static struct cpufreq_policy *cpufreq_policy_alloc(void)
1010 struct cpufreq_policy *policy;
1012 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1016 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1017 goto err_free_policy;
1019 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1020 goto err_free_cpumask;
1022 INIT_LIST_HEAD(&policy->policy_list);
1023 init_rwsem(&policy->rwsem);
1024 spin_lock_init(&policy->transition_lock);
1025 init_waitqueue_head(&policy->transition_wait);
1026 init_completion(&policy->kobj_unregister);
1027 INIT_WORK(&policy->update, handle_update);
1032 free_cpumask_var(policy->cpus);
1039 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1041 struct kobject *kobj;
1042 struct completion *cmp;
1044 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1045 CPUFREQ_REMOVE_POLICY, policy);
1047 down_read(&policy->rwsem);
1048 kobj = &policy->kobj;
1049 cmp = &policy->kobj_unregister;
1050 up_read(&policy->rwsem);
1054 * We need to make sure that the underlying kobj is
1055 * actually not referenced anymore by anybody before we
1056 * proceed with unloading.
1058 pr_debug("waiting for dropping of refcount\n");
1059 wait_for_completion(cmp);
1060 pr_debug("wait complete\n");
1063 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1065 free_cpumask_var(policy->related_cpus);
1066 free_cpumask_var(policy->cpus);
1070 static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
1071 struct device *cpu_dev)
1075 if (WARN_ON(cpu == policy->cpu))
1078 /* Move kobject to the new policy->cpu */
1079 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1081 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1085 down_write(&policy->rwsem);
1087 up_write(&policy->rwsem);
1092 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1094 unsigned int j, cpu = dev->id;
1096 struct cpufreq_policy *policy;
1097 unsigned long flags;
1098 bool recover_policy = cpufreq_suspended;
1100 if (cpu_is_offline(cpu))
1103 pr_debug("adding CPU %u\n", cpu);
1105 /* check whether a different CPU already registered this
1106 * CPU because it is in the same boat. */
1107 policy = cpufreq_cpu_get_raw(cpu);
1108 if (unlikely(policy))
1111 if (!down_read_trylock(&cpufreq_rwsem))
1114 /* Check if this cpu was hot-unplugged earlier and has siblings */
1115 read_lock_irqsave(&cpufreq_driver_lock, flags);
1116 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1117 if (cpumask_test_cpu(cpu, policy->related_cpus)) {
1118 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1119 ret = cpufreq_add_policy_cpu(policy, cpu, dev);
1120 up_read(&cpufreq_rwsem);
1124 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1127 * Restore the saved policy when doing light-weight init and fall back
1128 * to the full init if that fails.
1130 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
1132 recover_policy = false;
1133 policy = cpufreq_policy_alloc();
1139 * In the resume path, since we restore a saved policy, the assignment
1140 * to policy->cpu is like an update of the existing policy, rather than
1141 * the creation of a brand new one. So we need to perform this update
1142 * by invoking update_policy_cpu().
1144 if (recover_policy && cpu != policy->cpu)
1145 WARN_ON(update_policy_cpu(policy, cpu, dev));
1149 cpumask_copy(policy->cpus, cpumask_of(cpu));
1151 /* call driver. From then on the cpufreq must be able
1152 * to accept all calls to ->verify and ->setpolicy for this CPU
1154 ret = cpufreq_driver->init(policy);
1156 pr_debug("initialization failed\n");
1157 goto err_set_policy_cpu;
1160 down_write(&policy->rwsem);
1162 /* related cpus should atleast have policy->cpus */
1163 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1166 * affected cpus must always be the one, which are online. We aren't
1167 * managing offline cpus here.
1169 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1171 if (!recover_policy) {
1172 policy->user_policy.min = policy->min;
1173 policy->user_policy.max = policy->max;
1175 /* prepare interface data */
1176 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1177 &dev->kobj, "cpufreq");
1179 pr_err("%s: failed to init policy->kobj: %d\n",
1181 goto err_init_policy_kobj;
1185 write_lock_irqsave(&cpufreq_driver_lock, flags);
1186 for_each_cpu(j, policy->cpus)
1187 per_cpu(cpufreq_cpu_data, j) = policy;
1188 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1190 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1191 policy->cur = cpufreq_driver->get(policy->cpu);
1193 pr_err("%s: ->get() failed\n", __func__);
1199 * Sometimes boot loaders set CPU frequency to a value outside of
1200 * frequency table present with cpufreq core. In such cases CPU might be
1201 * unstable if it has to run on that frequency for long duration of time
1202 * and so its better to set it to a frequency which is specified in
1203 * freq-table. This also makes cpufreq stats inconsistent as
1204 * cpufreq-stats would fail to register because current frequency of CPU
1205 * isn't found in freq-table.
1207 * Because we don't want this change to effect boot process badly, we go
1208 * for the next freq which is >= policy->cur ('cur' must be set by now,
1209 * otherwise we will end up setting freq to lowest of the table as 'cur'
1210 * is initialized to zero).
1212 * We are passing target-freq as "policy->cur - 1" otherwise
1213 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1214 * equal to target-freq.
1216 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1218 /* Are we running at unknown frequency ? */
1219 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1220 if (ret == -EINVAL) {
1221 /* Warn user and fix it */
1222 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1223 __func__, policy->cpu, policy->cur);
1224 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1225 CPUFREQ_RELATION_L);
1228 * Reaching here after boot in a few seconds may not
1229 * mean that system will remain stable at "unknown"
1230 * frequency for longer duration. Hence, a BUG_ON().
1233 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1234 __func__, policy->cpu, policy->cur);
1238 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1239 CPUFREQ_START, policy);
1241 if (!recover_policy) {
1242 ret = cpufreq_add_dev_interface(policy, dev);
1244 goto err_out_unregister;
1245 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1246 CPUFREQ_CREATE_POLICY, policy);
1249 write_lock_irqsave(&cpufreq_driver_lock, flags);
1250 list_add(&policy->policy_list, &cpufreq_policy_list);
1251 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1253 cpufreq_init_policy(policy);
1255 if (!recover_policy) {
1256 policy->user_policy.policy = policy->policy;
1257 policy->user_policy.governor = policy->governor;
1259 up_write(&policy->rwsem);
1261 kobject_uevent(&policy->kobj, KOBJ_ADD);
1263 up_read(&cpufreq_rwsem);
1265 /* Callback for handling stuff after policy is ready */
1266 if (cpufreq_driver->ready)
1267 cpufreq_driver->ready(policy);
1269 pr_debug("initialization complete\n");
1275 write_lock_irqsave(&cpufreq_driver_lock, flags);
1276 for_each_cpu(j, policy->cpus)
1277 per_cpu(cpufreq_cpu_data, j) = NULL;
1278 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1280 if (!recover_policy) {
1281 kobject_put(&policy->kobj);
1282 wait_for_completion(&policy->kobj_unregister);
1284 err_init_policy_kobj:
1285 up_write(&policy->rwsem);
1287 if (cpufreq_driver->exit)
1288 cpufreq_driver->exit(policy);
1290 if (recover_policy) {
1291 /* Do not leave stale fallback data behind. */
1292 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
1293 cpufreq_policy_put_kobj(policy);
1295 cpufreq_policy_free(policy);
1298 up_read(&cpufreq_rwsem);
1304 * cpufreq_add_dev - add a CPU device
1306 * Adds the cpufreq interface for a CPU device.
1308 * The Oracle says: try running cpufreq registration/unregistration concurrently
1309 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1310 * mess up, but more thorough testing is needed. - Mathieu
1312 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1314 return __cpufreq_add_dev(dev, sif);
1317 static int __cpufreq_remove_dev_prepare(struct device *dev,
1318 struct subsys_interface *sif)
1320 unsigned int cpu = dev->id, cpus;
1322 unsigned long flags;
1323 struct cpufreq_policy *policy;
1325 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1327 write_lock_irqsave(&cpufreq_driver_lock, flags);
1329 policy = per_cpu(cpufreq_cpu_data, cpu);
1331 /* Save the policy somewhere when doing a light-weight tear-down */
1332 if (cpufreq_suspended)
1333 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1335 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1338 pr_debug("%s: No cpu_data found\n", __func__);
1343 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1345 pr_err("%s: Failed to stop governor\n", __func__);
1349 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1350 policy->governor->name, CPUFREQ_NAME_LEN);
1353 down_read(&policy->rwsem);
1354 cpus = cpumask_weight(policy->cpus);
1355 up_read(&policy->rwsem);
1357 if (cpu != policy->cpu) {
1358 sysfs_remove_link(&dev->kobj, "cpufreq");
1359 } else if (cpus > 1) {
1360 /* Nominate new CPU */
1361 int new_cpu = cpumask_any_but(policy->cpus, cpu);
1362 struct device *cpu_dev = get_cpu_device(new_cpu);
1364 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1365 ret = update_policy_cpu(policy, new_cpu, cpu_dev);
1367 if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1369 pr_err("%s: Failed to restore kobj link to cpu:%d\n",
1370 __func__, cpu_dev->id);
1374 if (!cpufreq_suspended)
1375 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1376 __func__, new_cpu, cpu);
1377 } else if (cpufreq_driver->stop_cpu) {
1378 cpufreq_driver->stop_cpu(policy);
1384 static int __cpufreq_remove_dev_finish(struct device *dev,
1385 struct subsys_interface *sif)
1387 unsigned int cpu = dev->id, cpus;
1389 unsigned long flags;
1390 struct cpufreq_policy *policy;
1392 write_lock_irqsave(&cpufreq_driver_lock, flags);
1393 policy = per_cpu(cpufreq_cpu_data, cpu);
1394 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1395 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1398 pr_debug("%s: No cpu_data found\n", __func__);
1402 down_write(&policy->rwsem);
1403 cpus = cpumask_weight(policy->cpus);
1406 cpumask_clear_cpu(cpu, policy->cpus);
1407 up_write(&policy->rwsem);
1409 /* If cpu is last user of policy, free policy */
1412 ret = __cpufreq_governor(policy,
1413 CPUFREQ_GOV_POLICY_EXIT);
1415 pr_err("%s: Failed to exit governor\n",
1421 if (!cpufreq_suspended)
1422 cpufreq_policy_put_kobj(policy);
1425 * Perform the ->exit() even during light-weight tear-down,
1426 * since this is a core component, and is essential for the
1427 * subsequent light-weight ->init() to succeed.
1429 if (cpufreq_driver->exit)
1430 cpufreq_driver->exit(policy);
1432 /* Remove policy from list of active policies */
1433 write_lock_irqsave(&cpufreq_driver_lock, flags);
1434 list_del(&policy->policy_list);
1435 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1437 if (!cpufreq_suspended)
1438 cpufreq_policy_free(policy);
1439 } else if (has_target()) {
1440 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1442 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1445 pr_err("%s: Failed to start governor\n", __func__);
1454 * cpufreq_remove_dev - remove a CPU device
1456 * Removes the cpufreq interface for a CPU device.
1458 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1460 unsigned int cpu = dev->id;
1463 if (cpu_is_offline(cpu))
1466 ret = __cpufreq_remove_dev_prepare(dev, sif);
1469 ret = __cpufreq_remove_dev_finish(dev, sif);
1474 static void handle_update(struct work_struct *work)
1476 struct cpufreq_policy *policy =
1477 container_of(work, struct cpufreq_policy, update);
1478 unsigned int cpu = policy->cpu;
1479 pr_debug("handle_update for cpu %u called\n", cpu);
1480 cpufreq_update_policy(cpu);
1484 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1486 * @policy: policy managing CPUs
1487 * @new_freq: CPU frequency the CPU actually runs at
1489 * We adjust to current frequency first, and need to clean up later.
1490 * So either call to cpufreq_update_policy() or schedule handle_update()).
1492 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1493 unsigned int new_freq)
1495 struct cpufreq_freqs freqs;
1497 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1498 policy->cur, new_freq);
1500 freqs.old = policy->cur;
1501 freqs.new = new_freq;
1503 cpufreq_freq_transition_begin(policy, &freqs);
1504 cpufreq_freq_transition_end(policy, &freqs, 0);
1508 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1511 * This is the last known freq, without actually getting it from the driver.
1512 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1514 unsigned int cpufreq_quick_get(unsigned int cpu)
1516 struct cpufreq_policy *policy;
1517 unsigned int ret_freq = 0;
1519 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1520 return cpufreq_driver->get(cpu);
1522 policy = cpufreq_cpu_get(cpu);
1524 ret_freq = policy->cur;
1525 cpufreq_cpu_put(policy);
1530 EXPORT_SYMBOL(cpufreq_quick_get);
1533 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1536 * Just return the max possible frequency for a given CPU.
1538 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1540 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1541 unsigned int ret_freq = 0;
1544 ret_freq = policy->max;
1545 cpufreq_cpu_put(policy);
1550 EXPORT_SYMBOL(cpufreq_quick_get_max);
1552 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1554 unsigned int ret_freq = 0;
1556 if (!cpufreq_driver->get)
1559 ret_freq = cpufreq_driver->get(policy->cpu);
1561 if (ret_freq && policy->cur &&
1562 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1563 /* verify no discrepancy between actual and
1564 saved value exists */
1565 if (unlikely(ret_freq != policy->cur)) {
1566 cpufreq_out_of_sync(policy, ret_freq);
1567 schedule_work(&policy->update);
1575 * cpufreq_get - get the current CPU frequency (in kHz)
1578 * Get the CPU current (static) CPU frequency
1580 unsigned int cpufreq_get(unsigned int cpu)
1582 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1583 unsigned int ret_freq = 0;
1586 down_read(&policy->rwsem);
1587 ret_freq = __cpufreq_get(policy);
1588 up_read(&policy->rwsem);
1590 cpufreq_cpu_put(policy);
1595 EXPORT_SYMBOL(cpufreq_get);
1597 static struct subsys_interface cpufreq_interface = {
1599 .subsys = &cpu_subsys,
1600 .add_dev = cpufreq_add_dev,
1601 .remove_dev = cpufreq_remove_dev,
1605 * In case platform wants some specific frequency to be configured
1608 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1612 if (!policy->suspend_freq) {
1613 pr_err("%s: suspend_freq can't be zero\n", __func__);
1617 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1618 policy->suspend_freq);
1620 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1621 CPUFREQ_RELATION_H);
1623 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1624 __func__, policy->suspend_freq, ret);
1628 EXPORT_SYMBOL(cpufreq_generic_suspend);
1631 * cpufreq_suspend() - Suspend CPUFreq governors
1633 * Called during system wide Suspend/Hibernate cycles for suspending governors
1634 * as some platforms can't change frequency after this point in suspend cycle.
1635 * Because some of the devices (like: i2c, regulators, etc) they use for
1636 * changing frequency are suspended quickly after this point.
1638 void cpufreq_suspend(void)
1640 struct cpufreq_policy *policy;
1642 if (!cpufreq_driver)
1648 pr_debug("%s: Suspending Governors\n", __func__);
1650 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1651 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1652 pr_err("%s: Failed to stop governor for policy: %p\n",
1654 else if (cpufreq_driver->suspend
1655 && cpufreq_driver->suspend(policy))
1656 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1661 cpufreq_suspended = true;
1665 * cpufreq_resume() - Resume CPUFreq governors
1667 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1668 * are suspended with cpufreq_suspend().
1670 void cpufreq_resume(void)
1672 struct cpufreq_policy *policy;
1674 if (!cpufreq_driver)
1677 cpufreq_suspended = false;
1682 pr_debug("%s: Resuming Governors\n", __func__);
1684 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1685 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1686 pr_err("%s: Failed to resume driver: %p\n", __func__,
1688 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1689 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1690 pr_err("%s: Failed to start governor for policy: %p\n",
1694 * schedule call cpufreq_update_policy() for boot CPU, i.e. last
1695 * policy in list. It will verify that the current freq is in
1696 * sync with what we believe it to be.
1698 if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
1699 schedule_work(&policy->update);
1704 * cpufreq_get_current_driver - return current driver's name
1706 * Return the name string of the currently loaded cpufreq driver
1709 const char *cpufreq_get_current_driver(void)
1712 return cpufreq_driver->name;
1716 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1719 * cpufreq_get_driver_data - return current driver data
1721 * Return the private data of the currently loaded cpufreq
1722 * driver, or NULL if no cpufreq driver is loaded.
1724 void *cpufreq_get_driver_data(void)
1727 return cpufreq_driver->driver_data;
1731 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1733 /*********************************************************************
1734 * NOTIFIER LISTS INTERFACE *
1735 *********************************************************************/
1738 * cpufreq_register_notifier - register a driver with cpufreq
1739 * @nb: notifier function to register
1740 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1742 * Add a driver to one of two lists: either a list of drivers that
1743 * are notified about clock rate changes (once before and once after
1744 * the transition), or a list of drivers that are notified about
1745 * changes in cpufreq policy.
1747 * This function may sleep, and has the same return conditions as
1748 * blocking_notifier_chain_register.
1750 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1754 if (cpufreq_disabled())
1757 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1760 case CPUFREQ_TRANSITION_NOTIFIER:
1761 ret = srcu_notifier_chain_register(
1762 &cpufreq_transition_notifier_list, nb);
1764 case CPUFREQ_POLICY_NOTIFIER:
1765 ret = blocking_notifier_chain_register(
1766 &cpufreq_policy_notifier_list, nb);
1774 EXPORT_SYMBOL(cpufreq_register_notifier);
1777 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1778 * @nb: notifier block to be unregistered
1779 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1781 * Remove a driver from the CPU frequency notifier list.
1783 * This function may sleep, and has the same return conditions as
1784 * blocking_notifier_chain_unregister.
1786 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1790 if (cpufreq_disabled())
1794 case CPUFREQ_TRANSITION_NOTIFIER:
1795 ret = srcu_notifier_chain_unregister(
1796 &cpufreq_transition_notifier_list, nb);
1798 case CPUFREQ_POLICY_NOTIFIER:
1799 ret = blocking_notifier_chain_unregister(
1800 &cpufreq_policy_notifier_list, nb);
1808 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1811 /*********************************************************************
1813 *********************************************************************/
1815 /* Must set freqs->new to intermediate frequency */
1816 static int __target_intermediate(struct cpufreq_policy *policy,
1817 struct cpufreq_freqs *freqs, int index)
1821 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1823 /* We don't need to switch to intermediate freq */
1827 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1828 __func__, policy->cpu, freqs->old, freqs->new);
1830 cpufreq_freq_transition_begin(policy, freqs);
1831 ret = cpufreq_driver->target_intermediate(policy, index);
1832 cpufreq_freq_transition_end(policy, freqs, ret);
1835 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1841 static int __target_index(struct cpufreq_policy *policy,
1842 struct cpufreq_frequency_table *freq_table, int index)
1844 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1845 unsigned int intermediate_freq = 0;
1846 int retval = -EINVAL;
1849 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1851 /* Handle switching to intermediate frequency */
1852 if (cpufreq_driver->get_intermediate) {
1853 retval = __target_intermediate(policy, &freqs, index);
1857 intermediate_freq = freqs.new;
1858 /* Set old freq to intermediate */
1859 if (intermediate_freq)
1860 freqs.old = freqs.new;
1863 freqs.new = freq_table[index].frequency;
1864 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1865 __func__, policy->cpu, freqs.old, freqs.new);
1867 cpufreq_freq_transition_begin(policy, &freqs);
1870 retval = cpufreq_driver->target_index(policy, index);
1872 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1876 cpufreq_freq_transition_end(policy, &freqs, retval);
1879 * Failed after setting to intermediate freq? Driver should have
1880 * reverted back to initial frequency and so should we. Check
1881 * here for intermediate_freq instead of get_intermediate, in
1882 * case we have't switched to intermediate freq at all.
1884 if (unlikely(retval && intermediate_freq)) {
1885 freqs.old = intermediate_freq;
1886 freqs.new = policy->restore_freq;
1887 cpufreq_freq_transition_begin(policy, &freqs);
1888 cpufreq_freq_transition_end(policy, &freqs, 0);
1895 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1896 unsigned int target_freq,
1897 unsigned int relation)
1899 unsigned int old_target_freq = target_freq;
1900 int retval = -EINVAL;
1902 if (cpufreq_disabled())
1905 /* Make sure that target_freq is within supported range */
1906 if (target_freq > policy->max)
1907 target_freq = policy->max;
1908 if (target_freq < policy->min)
1909 target_freq = policy->min;
1911 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1912 policy->cpu, target_freq, relation, old_target_freq);
1915 * This might look like a redundant call as we are checking it again
1916 * after finding index. But it is left intentionally for cases where
1917 * exactly same freq is called again and so we can save on few function
1920 if (target_freq == policy->cur)
1923 /* Save last value to restore later on errors */
1924 policy->restore_freq = policy->cur;
1926 if (cpufreq_driver->target)
1927 retval = cpufreq_driver->target(policy, target_freq, relation);
1928 else if (cpufreq_driver->target_index) {
1929 struct cpufreq_frequency_table *freq_table;
1932 freq_table = cpufreq_frequency_get_table(policy->cpu);
1933 if (unlikely(!freq_table)) {
1934 pr_err("%s: Unable to find freq_table\n", __func__);
1938 retval = cpufreq_frequency_table_target(policy, freq_table,
1939 target_freq, relation, &index);
1940 if (unlikely(retval)) {
1941 pr_err("%s: Unable to find matching freq\n", __func__);
1945 if (freq_table[index].frequency == policy->cur) {
1950 retval = __target_index(policy, freq_table, index);
1956 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1958 int cpufreq_driver_target(struct cpufreq_policy *policy,
1959 unsigned int target_freq,
1960 unsigned int relation)
1964 down_write(&policy->rwsem);
1966 ret = __cpufreq_driver_target(policy, target_freq, relation);
1968 up_write(&policy->rwsem);
1972 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1974 static int __cpufreq_governor(struct cpufreq_policy *policy,
1979 /* Only must be defined when default governor is known to have latency
1980 restrictions, like e.g. conservative or ondemand.
1981 That this is the case is already ensured in Kconfig
1983 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1984 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1986 struct cpufreq_governor *gov = NULL;
1989 /* Don't start any governor operations if we are entering suspend */
1990 if (cpufreq_suspended)
1993 * Governor might not be initiated here if ACPI _PPC changed
1994 * notification happened, so check it.
1996 if (!policy->governor)
1999 if (policy->governor->max_transition_latency &&
2000 policy->cpuinfo.transition_latency >
2001 policy->governor->max_transition_latency) {
2005 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2006 policy->governor->name, gov->name);
2007 policy->governor = gov;
2011 if (event == CPUFREQ_GOV_POLICY_INIT)
2012 if (!try_module_get(policy->governor->owner))
2015 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
2016 policy->cpu, event);
2018 mutex_lock(&cpufreq_governor_lock);
2019 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
2020 || (!policy->governor_enabled
2021 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
2022 mutex_unlock(&cpufreq_governor_lock);
2026 if (event == CPUFREQ_GOV_STOP)
2027 policy->governor_enabled = false;
2028 else if (event == CPUFREQ_GOV_START)
2029 policy->governor_enabled = true;
2031 mutex_unlock(&cpufreq_governor_lock);
2033 ret = policy->governor->governor(policy, event);
2036 if (event == CPUFREQ_GOV_POLICY_INIT)
2037 policy->governor->initialized++;
2038 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2039 policy->governor->initialized--;
2041 /* Restore original values */
2042 mutex_lock(&cpufreq_governor_lock);
2043 if (event == CPUFREQ_GOV_STOP)
2044 policy->governor_enabled = true;
2045 else if (event == CPUFREQ_GOV_START)
2046 policy->governor_enabled = false;
2047 mutex_unlock(&cpufreq_governor_lock);
2050 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2051 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
2052 module_put(policy->governor->owner);
2057 int cpufreq_register_governor(struct cpufreq_governor *governor)
2064 if (cpufreq_disabled())
2067 mutex_lock(&cpufreq_governor_mutex);
2069 governor->initialized = 0;
2071 if (!find_governor(governor->name)) {
2073 list_add(&governor->governor_list, &cpufreq_governor_list);
2076 mutex_unlock(&cpufreq_governor_mutex);
2079 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2081 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2088 if (cpufreq_disabled())
2091 for_each_present_cpu(cpu) {
2092 if (cpu_online(cpu))
2094 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
2095 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
2098 mutex_lock(&cpufreq_governor_mutex);
2099 list_del(&governor->governor_list);
2100 mutex_unlock(&cpufreq_governor_mutex);
2103 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2106 /*********************************************************************
2107 * POLICY INTERFACE *
2108 *********************************************************************/
2111 * cpufreq_get_policy - get the current cpufreq_policy
2112 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2115 * Reads the current cpufreq policy.
2117 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2119 struct cpufreq_policy *cpu_policy;
2123 cpu_policy = cpufreq_cpu_get(cpu);
2127 memcpy(policy, cpu_policy, sizeof(*policy));
2129 cpufreq_cpu_put(cpu_policy);
2132 EXPORT_SYMBOL(cpufreq_get_policy);
2135 * policy : current policy.
2136 * new_policy: policy to be set.
2138 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2139 struct cpufreq_policy *new_policy)
2141 struct cpufreq_governor *old_gov;
2144 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2145 new_policy->cpu, new_policy->min, new_policy->max);
2147 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2149 if (new_policy->min > policy->max || new_policy->max < policy->min)
2152 /* verify the cpu speed can be set within this limit */
2153 ret = cpufreq_driver->verify(new_policy);
2157 /* adjust if necessary - all reasons */
2158 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2159 CPUFREQ_ADJUST, new_policy);
2161 /* adjust if necessary - hardware incompatibility*/
2162 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2163 CPUFREQ_INCOMPATIBLE, new_policy);
2166 * verify the cpu speed can be set within this limit, which might be
2167 * different to the first one
2169 ret = cpufreq_driver->verify(new_policy);
2173 /* notification of the new policy */
2174 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2175 CPUFREQ_NOTIFY, new_policy);
2177 policy->min = new_policy->min;
2178 policy->max = new_policy->max;
2180 pr_debug("new min and max freqs are %u - %u kHz\n",
2181 policy->min, policy->max);
2183 if (cpufreq_driver->setpolicy) {
2184 policy->policy = new_policy->policy;
2185 pr_debug("setting range\n");
2186 return cpufreq_driver->setpolicy(new_policy);
2189 if (new_policy->governor == policy->governor)
2192 pr_debug("governor switch\n");
2194 /* save old, working values */
2195 old_gov = policy->governor;
2196 /* end old governor */
2198 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2199 up_write(&policy->rwsem);
2200 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2201 down_write(&policy->rwsem);
2204 /* start new governor */
2205 policy->governor = new_policy->governor;
2206 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2207 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2210 up_write(&policy->rwsem);
2211 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2212 down_write(&policy->rwsem);
2215 /* new governor failed, so re-start old one */
2216 pr_debug("starting governor %s failed\n", policy->governor->name);
2218 policy->governor = old_gov;
2219 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2220 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2226 pr_debug("governor: change or update limits\n");
2227 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2231 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2232 * @cpu: CPU which shall be re-evaluated
2234 * Useful for policy notifiers which have different necessities
2235 * at different times.
2237 int cpufreq_update_policy(unsigned int cpu)
2239 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2240 struct cpufreq_policy new_policy;
2246 down_write(&policy->rwsem);
2248 pr_debug("updating policy for CPU %u\n", cpu);
2249 memcpy(&new_policy, policy, sizeof(*policy));
2250 new_policy.min = policy->user_policy.min;
2251 new_policy.max = policy->user_policy.max;
2252 new_policy.policy = policy->user_policy.policy;
2253 new_policy.governor = policy->user_policy.governor;
2256 * BIOS might change freq behind our back
2257 * -> ask driver for current freq and notify governors about a change
2259 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2260 new_policy.cur = cpufreq_driver->get(cpu);
2261 if (WARN_ON(!new_policy.cur)) {
2267 pr_debug("Driver did not initialize current freq\n");
2268 policy->cur = new_policy.cur;
2270 if (policy->cur != new_policy.cur && has_target())
2271 cpufreq_out_of_sync(policy, new_policy.cur);
2275 ret = cpufreq_set_policy(policy, &new_policy);
2278 up_write(&policy->rwsem);
2280 cpufreq_cpu_put(policy);
2283 EXPORT_SYMBOL(cpufreq_update_policy);
2285 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2286 unsigned long action, void *hcpu)
2288 unsigned int cpu = (unsigned long)hcpu;
2291 dev = get_cpu_device(cpu);
2293 switch (action & ~CPU_TASKS_FROZEN) {
2295 __cpufreq_add_dev(dev, NULL);
2298 case CPU_DOWN_PREPARE:
2299 __cpufreq_remove_dev_prepare(dev, NULL);
2303 __cpufreq_remove_dev_finish(dev, NULL);
2306 case CPU_DOWN_FAILED:
2307 __cpufreq_add_dev(dev, NULL);
2314 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2315 .notifier_call = cpufreq_cpu_callback,
2318 /*********************************************************************
2320 *********************************************************************/
2321 static int cpufreq_boost_set_sw(int state)
2323 struct cpufreq_frequency_table *freq_table;
2324 struct cpufreq_policy *policy;
2327 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
2328 freq_table = cpufreq_frequency_get_table(policy->cpu);
2330 ret = cpufreq_frequency_table_cpuinfo(policy,
2333 pr_err("%s: Policy frequency update failed\n",
2337 policy->user_policy.max = policy->max;
2338 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2345 int cpufreq_boost_trigger_state(int state)
2347 unsigned long flags;
2350 if (cpufreq_driver->boost_enabled == state)
2353 write_lock_irqsave(&cpufreq_driver_lock, flags);
2354 cpufreq_driver->boost_enabled = state;
2355 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2357 ret = cpufreq_driver->set_boost(state);
2359 write_lock_irqsave(&cpufreq_driver_lock, flags);
2360 cpufreq_driver->boost_enabled = !state;
2361 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2363 pr_err("%s: Cannot %s BOOST\n",
2364 __func__, state ? "enable" : "disable");
2370 int cpufreq_boost_supported(void)
2372 if (likely(cpufreq_driver))
2373 return cpufreq_driver->boost_supported;
2377 EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2379 int cpufreq_boost_enabled(void)
2381 return cpufreq_driver->boost_enabled;
2383 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2385 /*********************************************************************
2386 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2387 *********************************************************************/
2390 * cpufreq_register_driver - register a CPU Frequency driver
2391 * @driver_data: A struct cpufreq_driver containing the values#
2392 * submitted by the CPU Frequency driver.
2394 * Registers a CPU Frequency driver to this core code. This code
2395 * returns zero on success, -EBUSY when another driver got here first
2396 * (and isn't unregistered in the meantime).
2399 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2401 unsigned long flags;
2404 if (cpufreq_disabled())
2407 if (!driver_data || !driver_data->verify || !driver_data->init ||
2408 !(driver_data->setpolicy || driver_data->target_index ||
2409 driver_data->target) ||
2410 (driver_data->setpolicy && (driver_data->target_index ||
2411 driver_data->target)) ||
2412 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2415 pr_debug("trying to register driver %s\n", driver_data->name);
2417 write_lock_irqsave(&cpufreq_driver_lock, flags);
2418 if (cpufreq_driver) {
2419 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2422 cpufreq_driver = driver_data;
2423 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2425 if (driver_data->setpolicy)
2426 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2428 if (cpufreq_boost_supported()) {
2430 * Check if driver provides function to enable boost -
2431 * if not, use cpufreq_boost_set_sw as default
2433 if (!cpufreq_driver->set_boost)
2434 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2436 ret = cpufreq_sysfs_create_file(&boost.attr);
2438 pr_err("%s: cannot register global BOOST sysfs file\n",
2440 goto err_null_driver;
2444 ret = subsys_interface_register(&cpufreq_interface);
2446 goto err_boost_unreg;
2448 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2449 list_empty(&cpufreq_policy_list)) {
2450 /* if all ->init() calls failed, unregister */
2451 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2456 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2457 pr_debug("driver %s up and running\n", driver_data->name);
2461 subsys_interface_unregister(&cpufreq_interface);
2463 if (cpufreq_boost_supported())
2464 cpufreq_sysfs_remove_file(&boost.attr);
2466 write_lock_irqsave(&cpufreq_driver_lock, flags);
2467 cpufreq_driver = NULL;
2468 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2471 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2474 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2476 * Unregister the current CPUFreq driver. Only call this if you have
2477 * the right to do so, i.e. if you have succeeded in initialising before!
2478 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2479 * currently not initialised.
2481 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2483 unsigned long flags;
2485 if (!cpufreq_driver || (driver != cpufreq_driver))
2488 pr_debug("unregistering driver %s\n", driver->name);
2490 subsys_interface_unregister(&cpufreq_interface);
2491 if (cpufreq_boost_supported())
2492 cpufreq_sysfs_remove_file(&boost.attr);
2494 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2496 down_write(&cpufreq_rwsem);
2497 write_lock_irqsave(&cpufreq_driver_lock, flags);
2499 cpufreq_driver = NULL;
2501 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2502 up_write(&cpufreq_rwsem);
2506 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2509 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2510 * or mutexes when secondary CPUs are halted.
2512 static struct syscore_ops cpufreq_syscore_ops = {
2513 .shutdown = cpufreq_suspend,
2516 static int __init cpufreq_core_init(void)
2518 if (cpufreq_disabled())
2521 cpufreq_global_kobject = kobject_create();
2522 BUG_ON(!cpufreq_global_kobject);
2524 register_syscore_ops(&cpufreq_syscore_ops);
2528 core_initcall(cpufreq_core_init);