Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[linux-drm-fsl-dcu.git] / drivers / cpufreq / cpufreq.c
index 81e9d4412db8584b6e97086cf9a257edb2c7056f..8d19f7c06010c364ff6ee758c15096c252f9f4bd 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
-#include <linux/suspend.h>
 #include <linux/syscore_ops.h>
 #include <linux/tick.h>
 #include <trace/events/power.h>
@@ -48,9 +47,6 @@ static LIST_HEAD(cpufreq_policy_list);
 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
 #endif
 
-/* Flag to suspend/resume CPUFreq governors */
-static bool cpufreq_suspended;
-
 static inline bool has_target(void)
 {
        return cpufreq_driver->target_index || cpufreq_driver->target;
@@ -832,14 +828,17 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
        int ret = 0;
 
        memcpy(&new_policy, policy, sizeof(*policy));
+
+       /* Use the default policy if its valid. */
+       if (cpufreq_driver->setpolicy)
+               cpufreq_parse_governor(policy->governor->name,
+                                       &new_policy.policy, NULL);
+
        /* assure that the starting sequence is run in cpufreq_set_policy */
        policy->governor = NULL;
 
        /* set default policy */
        ret = cpufreq_set_policy(policy, &new_policy);
-       policy->user_policy.policy = policy->policy;
-       policy->user_policy.governor = policy->governor;
-
        if (ret) {
                pr_debug("setting policy failed\n");
                if (cpufreq_driver->exit)
@@ -849,8 +848,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
 
 #ifdef CONFIG_HOTPLUG_CPU
 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
-                                 unsigned int cpu, struct device *dev,
-                                 bool frozen)
+                                 unsigned int cpu, struct device *dev)
 {
        int ret = 0;
        unsigned long flags;
@@ -881,11 +879,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
                }
        }
 
-       /* Don't touch sysfs links during light-weight init */
-       if (!frozen)
-               ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
-
-       return ret;
+       return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
 }
 #endif
 
@@ -930,6 +924,27 @@ err_free_policy:
        return NULL;
 }
 
+static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
+{
+       struct kobject *kobj;
+       struct completion *cmp;
+
+       down_read(&policy->rwsem);
+       kobj = &policy->kobj;
+       cmp = &policy->kobj_unregister;
+       up_read(&policy->rwsem);
+       kobject_put(kobj);
+
+       /*
+        * We need to make sure that the underlying kobj is
+        * actually not referenced anymore by anybody before we
+        * proceed with unloading.
+        */
+       pr_debug("waiting for dropping of refcount\n");
+       wait_for_completion(cmp);
+       pr_debug("wait complete\n");
+}
+
 static void cpufreq_policy_free(struct cpufreq_policy *policy)
 {
        free_cpumask_var(policy->related_cpus);
@@ -990,7 +1005,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
        list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
                if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
                        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
-                       ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen);
+                       ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
                        up_read(&cpufreq_rwsem);
                        return ret;
                }
@@ -998,15 +1013,17 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
 #endif
 
-       if (frozen)
-               /* Restore the saved policy when doing light-weight init */
-               policy = cpufreq_policy_restore(cpu);
-       else
+       /*
+        * Restore the saved policy when doing light-weight init and fall back
+        * to the full init if that fails.
+        */
+       policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
+       if (!policy) {
+               frozen = false;
                policy = cpufreq_policy_alloc();
-
-       if (!policy)
-               goto nomem_out;
-
+               if (!policy)
+                       goto nomem_out;
+       }
 
        /*
         * In the resume path, since we restore a saved policy, the assignment
@@ -1051,8 +1068,10 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
         */
        cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
 
-       policy->user_policy.min = policy->min;
-       policy->user_policy.max = policy->max;
+       if (!frozen) {
+               policy->user_policy.min = policy->min;
+               policy->user_policy.max = policy->max;
+       }
 
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                                     CPUFREQ_START, policy);
@@ -1083,6 +1102,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
 
        cpufreq_init_policy(policy);
 
+       if (!frozen) {
+               policy->user_policy.policy = policy->policy;
+               policy->user_policy.governor = policy->governor;
+       }
+
        kobject_uevent(&policy->kobj, KOBJ_ADD);
        up_read(&cpufreq_rwsem);
 
@@ -1100,7 +1124,13 @@ err_get_freq:
        if (cpufreq_driver->exit)
                cpufreq_driver->exit(policy);
 err_set_policy_cpu:
+       if (frozen) {
+               /* Do not leave stale fallback data behind. */
+               per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
+               cpufreq_policy_put_kobj(policy);
+       }
        cpufreq_policy_free(policy);
+
 nomem_out:
        up_read(&cpufreq_rwsem);
 
@@ -1122,7 +1152,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
 }
 
 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
-                                          unsigned int old_cpu, bool frozen)
+                                          unsigned int old_cpu)
 {
        struct device *cpu_dev;
        int ret;
@@ -1130,10 +1160,6 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
        /* first sibling now owns the new sysfs dir */
        cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
 
-       /* Don't touch sysfs files during light-weight tear-down */
-       if (frozen)
-               return cpu_dev->id;
-
        sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
        ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
        if (ret) {
@@ -1200,7 +1226,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
                if (!frozen)
                        sysfs_remove_link(&dev->kobj, "cpufreq");
        } else if (cpus > 1) {
-               new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
+               new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
                if (new_cpu >= 0) {
                        update_policy_cpu(policy, new_cpu);
 
@@ -1222,8 +1248,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
        int ret;
        unsigned long flags;
        struct cpufreq_policy *policy;
-       struct kobject *kobj;
-       struct completion *cmp;
 
        read_lock_irqsave(&cpufreq_driver_lock, flags);
        policy = per_cpu(cpufreq_cpu_data, cpu);
@@ -1253,22 +1277,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
                        }
                }
 
-               if (!frozen) {
-                       down_read(&policy->rwsem);
-                       kobj = &policy->kobj;
-                       cmp = &policy->kobj_unregister;
-                       up_read(&policy->rwsem);
-                       kobject_put(kobj);
-
-                       /*
-                        * We need to make sure that the underlying kobj is
-                        * actually not referenced anymore by anybody before we
-                        * proceed with unloading.
-                        */
-                       pr_debug("waiting for dropping of refcount\n");
-                       wait_for_completion(cmp);
-                       pr_debug("wait complete\n");
-               }
+               if (!frozen)
+                       cpufreq_policy_put_kobj(policy);
 
                /*
                 * Perform the ->exit() even during light-weight tear-down,
@@ -1466,41 +1476,6 @@ static struct subsys_interface cpufreq_interface = {
        .remove_dev     = cpufreq_remove_dev,
 };
 
-void cpufreq_suspend(void)
-{
-       struct cpufreq_policy *policy;
-
-       if (!has_target())
-               return;
-
-       pr_debug("%s: Suspending Governors\n", __func__);
-
-       list_for_each_entry(policy, &cpufreq_policy_list, policy_list)
-               if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
-                       pr_err("%s: Failed to stop governor for policy: %p\n",
-                               __func__, policy);
-
-       cpufreq_suspended = true;
-}
-
-void cpufreq_resume(void)
-{
-       struct cpufreq_policy *policy;
-
-       if (!has_target())
-               return;
-
-       pr_debug("%s: Resuming Governors\n", __func__);
-
-       cpufreq_suspended = false;
-
-       list_for_each_entry(policy, &cpufreq_policy_list, policy_list)
-               if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
-                   || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
-                       pr_err("%s: Failed to start governor for policy: %p\n",
-                               __func__, policy);
-}
-
 /**
  * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
  *
@@ -1803,10 +1778,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
        struct cpufreq_governor *gov = NULL;
 #endif
 
-       /* Don't start any governor operations if we are entering suspend */
-       if (cpufreq_suspended)
-               return 0;
-
        if (policy->governor->max_transition_latency &&
            policy->cpuinfo.transition_latency >
            policy->governor->max_transition_latency) {
@@ -2119,6 +2090,9 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
        dev = get_cpu_device(cpu);
        if (dev) {
 
+               if (action & CPU_TASKS_FROZEN)
+                       frozen = true;
+
                switch (action & ~CPU_TASKS_FROZEN) {
                case CPU_ONLINE:
                        __cpufreq_add_dev(dev, NULL, frozen);