Merge tag 'driver-core-3.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / drivers / base / power / domain.c
1 /*
2  * drivers/base/power/domain.c - Common code related to device power domains.
3  *
4  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5  *
6  * This file is released under the GPLv2.
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/io.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/pm_domain.h>
13 #include <linux/pm_qos.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sched.h>
17 #include <linux/suspend.h>
18 #include <linux/export.h>
19
20 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)          \
21 ({                                                              \
22         type (*__routine)(struct device *__d);                  \
23         type __ret = (type)0;                                   \
24                                                                 \
25         __routine = genpd->dev_ops.callback;                    \
26         if (__routine) {                                        \
27                 __ret = __routine(dev);                         \
28         } else {                                                \
29                 __routine = dev_gpd_data(dev)->ops.callback;    \
30                 if (__routine)                                  \
31                         __ret = __routine(dev);                 \
32         }                                                       \
33         __ret;                                                  \
34 })
35
36 #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name)       \
37 ({                                                                              \
38         ktime_t __start = ktime_get();                                          \
39         type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev);         \
40         s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start));           \
41         struct gpd_timing_data *__td = &dev_gpd_data(dev)->td;                  \
42         if (!__retval && __elapsed > __td->field) {                             \
43                 __td->field = __elapsed;                                        \
44                 dev_dbg(dev, name " latency exceeded, new value %lld ns\n",     \
45                         __elapsed);                                             \
46                 genpd->max_off_time_changed = true;                             \
47                 __td->constraint_changed = true;                                \
48         }                                                                       \
49         __retval;                                                               \
50 })
51
52 static LIST_HEAD(gpd_list);
53 static DEFINE_MUTEX(gpd_list_lock);
54
55 static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
56 {
57         struct generic_pm_domain *genpd = NULL, *gpd;
58
59         if (IS_ERR_OR_NULL(domain_name))
60                 return NULL;
61
62         mutex_lock(&gpd_list_lock);
63         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
64                 if (!strcmp(gpd->name, domain_name)) {
65                         genpd = gpd;
66                         break;
67                 }
68         }
69         mutex_unlock(&gpd_list_lock);
70         return genpd;
71 }
72
73 #ifdef CONFIG_PM
74
75 struct generic_pm_domain *dev_to_genpd(struct device *dev)
76 {
77         if (IS_ERR_OR_NULL(dev->pm_domain))
78                 return ERR_PTR(-EINVAL);
79
80         return pd_to_genpd(dev->pm_domain);
81 }
82
83 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
84 {
85         return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
86                                         stop_latency_ns, "stop");
87 }
88
89 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
90 {
91         return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
92                                         start_latency_ns, "start");
93 }
94
95 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
96 {
97         bool ret = false;
98
99         if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
100                 ret = !!atomic_dec_and_test(&genpd->sd_count);
101
102         return ret;
103 }
104
105 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
106 {
107         atomic_inc(&genpd->sd_count);
108         smp_mb__after_atomic_inc();
109 }
110
111 static void genpd_acquire_lock(struct generic_pm_domain *genpd)
112 {
113         DEFINE_WAIT(wait);
114
115         mutex_lock(&genpd->lock);
116         /*
117          * Wait for the domain to transition into either the active,
118          * or the power off state.
119          */
120         for (;;) {
121                 prepare_to_wait(&genpd->status_wait_queue, &wait,
122                                 TASK_UNINTERRUPTIBLE);
123                 if (genpd->status == GPD_STATE_ACTIVE
124                     || genpd->status == GPD_STATE_POWER_OFF)
125                         break;
126                 mutex_unlock(&genpd->lock);
127
128                 schedule();
129
130                 mutex_lock(&genpd->lock);
131         }
132         finish_wait(&genpd->status_wait_queue, &wait);
133 }
134
135 static void genpd_release_lock(struct generic_pm_domain *genpd)
136 {
137         mutex_unlock(&genpd->lock);
138 }
139
140 static void genpd_set_active(struct generic_pm_domain *genpd)
141 {
142         if (genpd->resume_count == 0)
143                 genpd->status = GPD_STATE_ACTIVE;
144 }
145
146 static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
147 {
148         s64 usecs64;
149
150         if (!genpd->cpu_data)
151                 return;
152
153         usecs64 = genpd->power_on_latency_ns;
154         do_div(usecs64, NSEC_PER_USEC);
155         usecs64 += genpd->cpu_data->saved_exit_latency;
156         genpd->cpu_data->idle_state->exit_latency = usecs64;
157 }
158
159 /**
160  * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
161  * @genpd: PM domain to power up.
162  *
163  * Restore power to @genpd and all of its masters so that it is possible to
164  * resume a device belonging to it.
165  */
166 static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
167         __releases(&genpd->lock) __acquires(&genpd->lock)
168 {
169         struct gpd_link *link;
170         DEFINE_WAIT(wait);
171         int ret = 0;
172
173         /* If the domain's master is being waited for, we have to wait too. */
174         for (;;) {
175                 prepare_to_wait(&genpd->status_wait_queue, &wait,
176                                 TASK_UNINTERRUPTIBLE);
177                 if (genpd->status != GPD_STATE_WAIT_MASTER)
178                         break;
179                 mutex_unlock(&genpd->lock);
180
181                 schedule();
182
183                 mutex_lock(&genpd->lock);
184         }
185         finish_wait(&genpd->status_wait_queue, &wait);
186
187         if (genpd->status == GPD_STATE_ACTIVE
188             || (genpd->prepared_count > 0 && genpd->suspend_power_off))
189                 return 0;
190
191         if (genpd->status != GPD_STATE_POWER_OFF) {
192                 genpd_set_active(genpd);
193                 return 0;
194         }
195
196         if (genpd->cpu_data) {
197                 cpuidle_pause_and_lock();
198                 genpd->cpu_data->idle_state->disabled = true;
199                 cpuidle_resume_and_unlock();
200                 goto out;
201         }
202
203         /*
204          * The list is guaranteed not to change while the loop below is being
205          * executed, unless one of the masters' .power_on() callbacks fiddles
206          * with it.
207          */
208         list_for_each_entry(link, &genpd->slave_links, slave_node) {
209                 genpd_sd_counter_inc(link->master);
210                 genpd->status = GPD_STATE_WAIT_MASTER;
211
212                 mutex_unlock(&genpd->lock);
213
214                 ret = pm_genpd_poweron(link->master);
215
216                 mutex_lock(&genpd->lock);
217
218                 /*
219                  * The "wait for parent" status is guaranteed not to change
220                  * while the master is powering on.
221                  */
222                 genpd->status = GPD_STATE_POWER_OFF;
223                 wake_up_all(&genpd->status_wait_queue);
224                 if (ret) {
225                         genpd_sd_counter_dec(link->master);
226                         goto err;
227                 }
228         }
229
230         if (genpd->power_on) {
231                 ktime_t time_start = ktime_get();
232                 s64 elapsed_ns;
233
234                 ret = genpd->power_on(genpd);
235                 if (ret)
236                         goto err;
237
238                 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
239                 if (elapsed_ns > genpd->power_on_latency_ns) {
240                         genpd->power_on_latency_ns = elapsed_ns;
241                         genpd->max_off_time_changed = true;
242                         genpd_recalc_cpu_exit_latency(genpd);
243                         if (genpd->name)
244                                 pr_warning("%s: Power-on latency exceeded, "
245                                         "new value %lld ns\n", genpd->name,
246                                         elapsed_ns);
247                 }
248         }
249
250  out:
251         genpd_set_active(genpd);
252
253         return 0;
254
255  err:
256         list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
257                 genpd_sd_counter_dec(link->master);
258
259         return ret;
260 }
261
262 /**
263  * pm_genpd_poweron - Restore power to a given PM domain and its masters.
264  * @genpd: PM domain to power up.
265  */
266 int pm_genpd_poweron(struct generic_pm_domain *genpd)
267 {
268         int ret;
269
270         mutex_lock(&genpd->lock);
271         ret = __pm_genpd_poweron(genpd);
272         mutex_unlock(&genpd->lock);
273         return ret;
274 }
275
276 /**
277  * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
278  * @domain_name: Name of the PM domain to power up.
279  */
280 int pm_genpd_name_poweron(const char *domain_name)
281 {
282         struct generic_pm_domain *genpd;
283
284         genpd = pm_genpd_lookup_name(domain_name);
285         return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
286 }
287
288 #endif /* CONFIG_PM */
289
290 #ifdef CONFIG_PM_RUNTIME
291
292 static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
293                                      struct device *dev)
294 {
295         return GENPD_DEV_CALLBACK(genpd, int, start, dev);
296 }
297
298 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
299 {
300         return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
301                                         save_state_latency_ns, "state save");
302 }
303
304 static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
305 {
306         return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
307                                         restore_state_latency_ns,
308                                         "state restore");
309 }
310
311 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
312                                      unsigned long val, void *ptr)
313 {
314         struct generic_pm_domain_data *gpd_data;
315         struct device *dev;
316
317         gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
318
319         mutex_lock(&gpd_data->lock);
320         dev = gpd_data->base.dev;
321         if (!dev) {
322                 mutex_unlock(&gpd_data->lock);
323                 return NOTIFY_DONE;
324         }
325         mutex_unlock(&gpd_data->lock);
326
327         for (;;) {
328                 struct generic_pm_domain *genpd;
329                 struct pm_domain_data *pdd;
330
331                 spin_lock_irq(&dev->power.lock);
332
333                 pdd = dev->power.subsys_data ?
334                                 dev->power.subsys_data->domain_data : NULL;
335                 if (pdd && pdd->dev) {
336                         to_gpd_data(pdd)->td.constraint_changed = true;
337                         genpd = dev_to_genpd(dev);
338                 } else {
339                         genpd = ERR_PTR(-ENODATA);
340                 }
341
342                 spin_unlock_irq(&dev->power.lock);
343
344                 if (!IS_ERR(genpd)) {
345                         mutex_lock(&genpd->lock);
346                         genpd->max_off_time_changed = true;
347                         mutex_unlock(&genpd->lock);
348                 }
349
350                 dev = dev->parent;
351                 if (!dev || dev->power.ignore_children)
352                         break;
353         }
354
355         return NOTIFY_DONE;
356 }
357
358 /**
359  * __pm_genpd_save_device - Save the pre-suspend state of a device.
360  * @pdd: Domain data of the device to save the state of.
361  * @genpd: PM domain the device belongs to.
362  */
363 static int __pm_genpd_save_device(struct pm_domain_data *pdd,
364                                   struct generic_pm_domain *genpd)
365         __releases(&genpd->lock) __acquires(&genpd->lock)
366 {
367         struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
368         struct device *dev = pdd->dev;
369         int ret = 0;
370
371         if (gpd_data->need_restore)
372                 return 0;
373
374         mutex_unlock(&genpd->lock);
375
376         genpd_start_dev(genpd, dev);
377         ret = genpd_save_dev(genpd, dev);
378         genpd_stop_dev(genpd, dev);
379
380         mutex_lock(&genpd->lock);
381
382         if (!ret)
383                 gpd_data->need_restore = true;
384
385         return ret;
386 }
387
388 /**
389  * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
390  * @pdd: Domain data of the device to restore the state of.
391  * @genpd: PM domain the device belongs to.
392  */
393 static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
394                                       struct generic_pm_domain *genpd)
395         __releases(&genpd->lock) __acquires(&genpd->lock)
396 {
397         struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
398         struct device *dev = pdd->dev;
399         bool need_restore = gpd_data->need_restore;
400
401         gpd_data->need_restore = false;
402         mutex_unlock(&genpd->lock);
403
404         genpd_start_dev(genpd, dev);
405         if (need_restore)
406                 genpd_restore_dev(genpd, dev);
407
408         mutex_lock(&genpd->lock);
409 }
410
411 /**
412  * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
413  * @genpd: PM domain to check.
414  *
415  * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
416  * a "power off" operation, which means that a "power on" has occured in the
417  * meantime, or if its resume_count field is different from zero, which means
418  * that one of its devices has been resumed in the meantime.
419  */
420 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
421 {
422         return genpd->status == GPD_STATE_WAIT_MASTER
423                 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
424 }
425
426 /**
427  * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
428  * @genpd: PM domait to power off.
429  *
430  * Queue up the execution of pm_genpd_poweroff() unless it's already been done
431  * before.
432  */
433 void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
434 {
435         queue_work(pm_wq, &genpd->power_off_work);
436 }
437
438 /**
439  * pm_genpd_poweroff - Remove power from a given PM domain.
440  * @genpd: PM domain to power down.
441  *
442  * If all of the @genpd's devices have been suspended and all of its subdomains
443  * have been powered down, run the runtime suspend callbacks provided by all of
444  * the @genpd's devices' drivers and remove power from @genpd.
445  */
446 static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
447         __releases(&genpd->lock) __acquires(&genpd->lock)
448 {
449         struct pm_domain_data *pdd;
450         struct gpd_link *link;
451         unsigned int not_suspended;
452         int ret = 0;
453
454  start:
455         /*
456          * Do not try to power off the domain in the following situations:
457          * (1) The domain is already in the "power off" state.
458          * (2) The domain is waiting for its master to power up.
459          * (3) One of the domain's devices is being resumed right now.
460          * (4) System suspend is in progress.
461          */
462         if (genpd->status == GPD_STATE_POWER_OFF
463             || genpd->status == GPD_STATE_WAIT_MASTER
464             || genpd->resume_count > 0 || genpd->prepared_count > 0)
465                 return 0;
466
467         if (atomic_read(&genpd->sd_count) > 0)
468                 return -EBUSY;
469
470         not_suspended = 0;
471         list_for_each_entry(pdd, &genpd->dev_list, list_node) {
472                 enum pm_qos_flags_status stat;
473
474                 stat = dev_pm_qos_flags(pdd->dev,
475                                         PM_QOS_FLAG_NO_POWER_OFF
476                                                 | PM_QOS_FLAG_REMOTE_WAKEUP);
477                 if (stat > PM_QOS_FLAGS_NONE)
478                         return -EBUSY;
479
480                 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
481                     || pdd->dev->power.irq_safe))
482                         not_suspended++;
483         }
484
485         if (not_suspended > genpd->in_progress)
486                 return -EBUSY;
487
488         if (genpd->poweroff_task) {
489                 /*
490                  * Another instance of pm_genpd_poweroff() is executing
491                  * callbacks, so tell it to start over and return.
492                  */
493                 genpd->status = GPD_STATE_REPEAT;
494                 return 0;
495         }
496
497         if (genpd->gov && genpd->gov->power_down_ok) {
498                 if (!genpd->gov->power_down_ok(&genpd->domain))
499                         return -EAGAIN;
500         }
501
502         genpd->status = GPD_STATE_BUSY;
503         genpd->poweroff_task = current;
504
505         list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
506                 ret = atomic_read(&genpd->sd_count) == 0 ?
507                         __pm_genpd_save_device(pdd, genpd) : -EBUSY;
508
509                 if (genpd_abort_poweroff(genpd))
510                         goto out;
511
512                 if (ret) {
513                         genpd_set_active(genpd);
514                         goto out;
515                 }
516
517                 if (genpd->status == GPD_STATE_REPEAT) {
518                         genpd->poweroff_task = NULL;
519                         goto start;
520                 }
521         }
522
523         if (genpd->cpu_data) {
524                 /*
525                  * If cpu_data is set, cpuidle should turn the domain off when
526                  * the CPU in it is idle.  In that case we don't decrement the
527                  * subdomain counts of the master domains, so that power is not
528                  * removed from the current domain prematurely as a result of
529                  * cutting off the masters' power.
530                  */
531                 genpd->status = GPD_STATE_POWER_OFF;
532                 cpuidle_pause_and_lock();
533                 genpd->cpu_data->idle_state->disabled = false;
534                 cpuidle_resume_and_unlock();
535                 goto out;
536         }
537
538         if (genpd->power_off) {
539                 ktime_t time_start;
540                 s64 elapsed_ns;
541
542                 if (atomic_read(&genpd->sd_count) > 0) {
543                         ret = -EBUSY;
544                         goto out;
545                 }
546
547                 time_start = ktime_get();
548
549                 /*
550                  * If sd_count > 0 at this point, one of the subdomains hasn't
551                  * managed to call pm_genpd_poweron() for the master yet after
552                  * incrementing it.  In that case pm_genpd_poweron() will wait
553                  * for us to drop the lock, so we can call .power_off() and let
554                  * the pm_genpd_poweron() restore power for us (this shouldn't
555                  * happen very often).
556                  */
557                 ret = genpd->power_off(genpd);
558                 if (ret == -EBUSY) {
559                         genpd_set_active(genpd);
560                         goto out;
561                 }
562
563                 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
564                 if (elapsed_ns > genpd->power_off_latency_ns) {
565                         genpd->power_off_latency_ns = elapsed_ns;
566                         genpd->max_off_time_changed = true;
567                         if (genpd->name)
568                                 pr_warning("%s: Power-off latency exceeded, "
569                                         "new value %lld ns\n", genpd->name,
570                                         elapsed_ns);
571                 }
572         }
573
574         genpd->status = GPD_STATE_POWER_OFF;
575
576         list_for_each_entry(link, &genpd->slave_links, slave_node) {
577                 genpd_sd_counter_dec(link->master);
578                 genpd_queue_power_off_work(link->master);
579         }
580
581  out:
582         genpd->poweroff_task = NULL;
583         wake_up_all(&genpd->status_wait_queue);
584         return ret;
585 }
586
587 /**
588  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
589  * @work: Work structure used for scheduling the execution of this function.
590  */
591 static void genpd_power_off_work_fn(struct work_struct *work)
592 {
593         struct generic_pm_domain *genpd;
594
595         genpd = container_of(work, struct generic_pm_domain, power_off_work);
596
597         genpd_acquire_lock(genpd);
598         pm_genpd_poweroff(genpd);
599         genpd_release_lock(genpd);
600 }
601
602 /**
603  * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
604  * @dev: Device to suspend.
605  *
606  * Carry out a runtime suspend of a device under the assumption that its
607  * pm_domain field points to the domain member of an object of type
608  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
609  */
610 static int pm_genpd_runtime_suspend(struct device *dev)
611 {
612         struct generic_pm_domain *genpd;
613         bool (*stop_ok)(struct device *__dev);
614         int ret;
615
616         dev_dbg(dev, "%s()\n", __func__);
617
618         genpd = dev_to_genpd(dev);
619         if (IS_ERR(genpd))
620                 return -EINVAL;
621
622         might_sleep_if(!genpd->dev_irq_safe);
623
624         stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
625         if (stop_ok && !stop_ok(dev))
626                 return -EBUSY;
627
628         ret = genpd_stop_dev(genpd, dev);
629         if (ret)
630                 return ret;
631
632         /*
633          * If power.irq_safe is set, this routine will be run with interrupts
634          * off, so it can't use mutexes.
635          */
636         if (dev->power.irq_safe)
637                 return 0;
638
639         mutex_lock(&genpd->lock);
640         genpd->in_progress++;
641         pm_genpd_poweroff(genpd);
642         genpd->in_progress--;
643         mutex_unlock(&genpd->lock);
644
645         return 0;
646 }
647
648 /**
649  * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
650  * @dev: Device to resume.
651  *
652  * Carry out a runtime resume of a device under the assumption that its
653  * pm_domain field points to the domain member of an object of type
654  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
655  */
656 static int pm_genpd_runtime_resume(struct device *dev)
657 {
658         struct generic_pm_domain *genpd;
659         DEFINE_WAIT(wait);
660         int ret;
661
662         dev_dbg(dev, "%s()\n", __func__);
663
664         genpd = dev_to_genpd(dev);
665         if (IS_ERR(genpd))
666                 return -EINVAL;
667
668         might_sleep_if(!genpd->dev_irq_safe);
669
670         /* If power.irq_safe, the PM domain is never powered off. */
671         if (dev->power.irq_safe)
672                 return genpd_start_dev_no_timing(genpd, dev);
673
674         mutex_lock(&genpd->lock);
675         ret = __pm_genpd_poweron(genpd);
676         if (ret) {
677                 mutex_unlock(&genpd->lock);
678                 return ret;
679         }
680         genpd->status = GPD_STATE_BUSY;
681         genpd->resume_count++;
682         for (;;) {
683                 prepare_to_wait(&genpd->status_wait_queue, &wait,
684                                 TASK_UNINTERRUPTIBLE);
685                 /*
686                  * If current is the powering off task, we have been called
687                  * reentrantly from one of the device callbacks, so we should
688                  * not wait.
689                  */
690                 if (!genpd->poweroff_task || genpd->poweroff_task == current)
691                         break;
692                 mutex_unlock(&genpd->lock);
693
694                 schedule();
695
696                 mutex_lock(&genpd->lock);
697         }
698         finish_wait(&genpd->status_wait_queue, &wait);
699         __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
700         genpd->resume_count--;
701         genpd_set_active(genpd);
702         wake_up_all(&genpd->status_wait_queue);
703         mutex_unlock(&genpd->lock);
704
705         return 0;
706 }
707
708 /**
709  * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
710  */
711 void pm_genpd_poweroff_unused(void)
712 {
713         struct generic_pm_domain *genpd;
714
715         mutex_lock(&gpd_list_lock);
716
717         list_for_each_entry(genpd, &gpd_list, gpd_list_node)
718                 genpd_queue_power_off_work(genpd);
719
720         mutex_unlock(&gpd_list_lock);
721 }
722
723 #else
724
725 static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
726                                             unsigned long val, void *ptr)
727 {
728         return NOTIFY_DONE;
729 }
730
731 static inline void genpd_power_off_work_fn(struct work_struct *work) {}
732
733 #define pm_genpd_runtime_suspend        NULL
734 #define pm_genpd_runtime_resume         NULL
735
736 #endif /* CONFIG_PM_RUNTIME */
737
738 #ifdef CONFIG_PM_SLEEP
739
740 /**
741  * pm_genpd_present - Check if the given PM domain has been initialized.
742  * @genpd: PM domain to check.
743  */
744 static bool pm_genpd_present(struct generic_pm_domain *genpd)
745 {
746         struct generic_pm_domain *gpd;
747
748         if (IS_ERR_OR_NULL(genpd))
749                 return false;
750
751         list_for_each_entry(gpd, &gpd_list, gpd_list_node)
752                 if (gpd == genpd)
753                         return true;
754
755         return false;
756 }
757
758 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
759                                     struct device *dev)
760 {
761         return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
762 }
763
764 static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
765 {
766         return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
767 }
768
769 static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
770 {
771         return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
772 }
773
774 static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
775 {
776         return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
777 }
778
779 static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
780 {
781         return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
782 }
783
784 static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
785 {
786         return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
787 }
788
789 static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
790 {
791         return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
792 }
793
794 static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
795 {
796         return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
797 }
798
799 static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
800 {
801         return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
802 }
803
804 /**
805  * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
806  * @genpd: PM domain to power off, if possible.
807  *
808  * Check if the given PM domain can be powered off (during system suspend or
809  * hibernation) and do that if so.  Also, in that case propagate to its masters.
810  *
811  * This function is only called in "noirq" and "syscore" stages of system power
812  * transitions, so it need not acquire locks (all of the "noirq" callbacks are
813  * executed sequentially, so it is guaranteed that it will never run twice in
814  * parallel).
815  */
816 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
817 {
818         struct gpd_link *link;
819
820         if (genpd->status == GPD_STATE_POWER_OFF)
821                 return;
822
823         if (genpd->suspended_count != genpd->device_count
824             || atomic_read(&genpd->sd_count) > 0)
825                 return;
826
827         if (genpd->power_off)
828                 genpd->power_off(genpd);
829
830         genpd->status = GPD_STATE_POWER_OFF;
831
832         list_for_each_entry(link, &genpd->slave_links, slave_node) {
833                 genpd_sd_counter_dec(link->master);
834                 pm_genpd_sync_poweroff(link->master);
835         }
836 }
837
838 /**
839  * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
840  * @genpd: PM domain to power on.
841  *
842  * This function is only called in "noirq" and "syscore" stages of system power
843  * transitions, so it need not acquire locks (all of the "noirq" callbacks are
844  * executed sequentially, so it is guaranteed that it will never run twice in
845  * parallel).
846  */
847 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
848 {
849         struct gpd_link *link;
850
851         if (genpd->status != GPD_STATE_POWER_OFF)
852                 return;
853
854         list_for_each_entry(link, &genpd->slave_links, slave_node) {
855                 pm_genpd_sync_poweron(link->master);
856                 genpd_sd_counter_inc(link->master);
857         }
858
859         if (genpd->power_on)
860                 genpd->power_on(genpd);
861
862         genpd->status = GPD_STATE_ACTIVE;
863 }
864
865 /**
866  * resume_needed - Check whether to resume a device before system suspend.
867  * @dev: Device to check.
868  * @genpd: PM domain the device belongs to.
869  *
870  * There are two cases in which a device that can wake up the system from sleep
871  * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
872  * to wake up the system and it has to remain active for this purpose while the
873  * system is in the sleep state and (2) if the device is not enabled to wake up
874  * the system from sleep states and it generally doesn't generate wakeup signals
875  * by itself (those signals are generated on its behalf by other parts of the
876  * system).  In the latter case it may be necessary to reconfigure the device's
877  * wakeup settings during system suspend, because it may have been set up to
878  * signal remote wakeup from the system's working state as needed by runtime PM.
879  * Return 'true' in either of the above cases.
880  */
881 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
882 {
883         bool active_wakeup;
884
885         if (!device_can_wakeup(dev))
886                 return false;
887
888         active_wakeup = genpd_dev_active_wakeup(genpd, dev);
889         return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
890 }
891
892 /**
893  * pm_genpd_prepare - Start power transition of a device in a PM domain.
894  * @dev: Device to start the transition of.
895  *
896  * Start a power transition of a device (during a system-wide power transition)
897  * under the assumption that its pm_domain field points to the domain member of
898  * an object of type struct generic_pm_domain representing a PM domain
899  * consisting of I/O devices.
900  */
901 static int pm_genpd_prepare(struct device *dev)
902 {
903         struct generic_pm_domain *genpd;
904         int ret;
905
906         dev_dbg(dev, "%s()\n", __func__);
907
908         genpd = dev_to_genpd(dev);
909         if (IS_ERR(genpd))
910                 return -EINVAL;
911
912         /*
913          * If a wakeup request is pending for the device, it should be woken up
914          * at this point and a system wakeup event should be reported if it's
915          * set up to wake up the system from sleep states.
916          */
917         pm_runtime_get_noresume(dev);
918         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
919                 pm_wakeup_event(dev, 0);
920
921         if (pm_wakeup_pending()) {
922                 pm_runtime_put(dev);
923                 return -EBUSY;
924         }
925
926         if (resume_needed(dev, genpd))
927                 pm_runtime_resume(dev);
928
929         genpd_acquire_lock(genpd);
930
931         if (genpd->prepared_count++ == 0) {
932                 genpd->suspended_count = 0;
933                 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
934         }
935
936         genpd_release_lock(genpd);
937
938         if (genpd->suspend_power_off) {
939                 pm_runtime_put_noidle(dev);
940                 return 0;
941         }
942
943         /*
944          * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
945          * so pm_genpd_poweron() will return immediately, but if the device
946          * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
947          * to make it operational.
948          */
949         pm_runtime_resume(dev);
950         __pm_runtime_disable(dev, false);
951
952         ret = pm_generic_prepare(dev);
953         if (ret) {
954                 mutex_lock(&genpd->lock);
955
956                 if (--genpd->prepared_count == 0)
957                         genpd->suspend_power_off = false;
958
959                 mutex_unlock(&genpd->lock);
960                 pm_runtime_enable(dev);
961         }
962
963         pm_runtime_put(dev);
964         return ret;
965 }
966
967 /**
968  * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
969  * @dev: Device to suspend.
970  *
971  * Suspend a device under the assumption that its pm_domain field points to the
972  * domain member of an object of type struct generic_pm_domain representing
973  * a PM domain consisting of I/O devices.
974  */
975 static int pm_genpd_suspend(struct device *dev)
976 {
977         struct generic_pm_domain *genpd;
978
979         dev_dbg(dev, "%s()\n", __func__);
980
981         genpd = dev_to_genpd(dev);
982         if (IS_ERR(genpd))
983                 return -EINVAL;
984
985         return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
986 }
987
988 /**
989  * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
990  * @dev: Device to suspend.
991  *
992  * Carry out a late suspend of a device under the assumption that its
993  * pm_domain field points to the domain member of an object of type
994  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
995  */
996 static int pm_genpd_suspend_late(struct device *dev)
997 {
998         struct generic_pm_domain *genpd;
999
1000         dev_dbg(dev, "%s()\n", __func__);
1001
1002         genpd = dev_to_genpd(dev);
1003         if (IS_ERR(genpd))
1004                 return -EINVAL;
1005
1006         return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
1007 }
1008
1009 /**
1010  * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1011  * @dev: Device to suspend.
1012  *
1013  * Stop the device and remove power from the domain if all devices in it have
1014  * been stopped.
1015  */
1016 static int pm_genpd_suspend_noirq(struct device *dev)
1017 {
1018         struct generic_pm_domain *genpd;
1019
1020         dev_dbg(dev, "%s()\n", __func__);
1021
1022         genpd = dev_to_genpd(dev);
1023         if (IS_ERR(genpd))
1024                 return -EINVAL;
1025
1026         if (genpd->suspend_power_off
1027             || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1028                 return 0;
1029
1030         genpd_stop_dev(genpd, dev);
1031
1032         /*
1033          * Since all of the "noirq" callbacks are executed sequentially, it is
1034          * guaranteed that this function will never run twice in parallel for
1035          * the same PM domain, so it is not necessary to use locking here.
1036          */
1037         genpd->suspended_count++;
1038         pm_genpd_sync_poweroff(genpd);
1039
1040         return 0;
1041 }
1042
1043 /**
1044  * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1045  * @dev: Device to resume.
1046  *
1047  * Restore power to the device's PM domain, if necessary, and start the device.
1048  */
1049 static int pm_genpd_resume_noirq(struct device *dev)
1050 {
1051         struct generic_pm_domain *genpd;
1052
1053         dev_dbg(dev, "%s()\n", __func__);
1054
1055         genpd = dev_to_genpd(dev);
1056         if (IS_ERR(genpd))
1057                 return -EINVAL;
1058
1059         if (genpd->suspend_power_off
1060             || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1061                 return 0;
1062
1063         /*
1064          * Since all of the "noirq" callbacks are executed sequentially, it is
1065          * guaranteed that this function will never run twice in parallel for
1066          * the same PM domain, so it is not necessary to use locking here.
1067          */
1068         pm_genpd_sync_poweron(genpd);
1069         genpd->suspended_count--;
1070
1071         return genpd_start_dev(genpd, dev);
1072 }
1073
1074 /**
1075  * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
1076  * @dev: Device to resume.
1077  *
1078  * Carry out an early resume of a device under the assumption that its
1079  * pm_domain field points to the domain member of an object of type
1080  * struct generic_pm_domain representing a power domain consisting of I/O
1081  * devices.
1082  */
1083 static int pm_genpd_resume_early(struct device *dev)
1084 {
1085         struct generic_pm_domain *genpd;
1086
1087         dev_dbg(dev, "%s()\n", __func__);
1088
1089         genpd = dev_to_genpd(dev);
1090         if (IS_ERR(genpd))
1091                 return -EINVAL;
1092
1093         return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
1094 }
1095
1096 /**
1097  * pm_genpd_resume - Resume of device in an I/O PM domain.
1098  * @dev: Device to resume.
1099  *
1100  * Resume a device under the assumption that its pm_domain field points to the
1101  * domain member of an object of type struct generic_pm_domain representing
1102  * a power domain consisting of I/O devices.
1103  */
1104 static int pm_genpd_resume(struct device *dev)
1105 {
1106         struct generic_pm_domain *genpd;
1107
1108         dev_dbg(dev, "%s()\n", __func__);
1109
1110         genpd = dev_to_genpd(dev);
1111         if (IS_ERR(genpd))
1112                 return -EINVAL;
1113
1114         return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
1115 }
1116
1117 /**
1118  * pm_genpd_freeze - Freezing a device in an I/O PM domain.
1119  * @dev: Device to freeze.
1120  *
1121  * Freeze a device under the assumption that its pm_domain field points to the
1122  * domain member of an object of type struct generic_pm_domain representing
1123  * a power domain consisting of I/O devices.
1124  */
1125 static int pm_genpd_freeze(struct device *dev)
1126 {
1127         struct generic_pm_domain *genpd;
1128
1129         dev_dbg(dev, "%s()\n", __func__);
1130
1131         genpd = dev_to_genpd(dev);
1132         if (IS_ERR(genpd))
1133                 return -EINVAL;
1134
1135         return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
1136 }
1137
1138 /**
1139  * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
1140  * @dev: Device to freeze.
1141  *
1142  * Carry out a late freeze of a device under the assumption that its
1143  * pm_domain field points to the domain member of an object of type
1144  * struct generic_pm_domain representing a power domain consisting of I/O
1145  * devices.
1146  */
1147 static int pm_genpd_freeze_late(struct device *dev)
1148 {
1149         struct generic_pm_domain *genpd;
1150
1151         dev_dbg(dev, "%s()\n", __func__);
1152
1153         genpd = dev_to_genpd(dev);
1154         if (IS_ERR(genpd))
1155                 return -EINVAL;
1156
1157         return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
1158 }
1159
1160 /**
1161  * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1162  * @dev: Device to freeze.
1163  *
1164  * Carry out a late freeze of a device under the assumption that its
1165  * pm_domain field points to the domain member of an object of type
1166  * struct generic_pm_domain representing a power domain consisting of I/O
1167  * devices.
1168  */
1169 static int pm_genpd_freeze_noirq(struct device *dev)
1170 {
1171         struct generic_pm_domain *genpd;
1172
1173         dev_dbg(dev, "%s()\n", __func__);
1174
1175         genpd = dev_to_genpd(dev);
1176         if (IS_ERR(genpd))
1177                 return -EINVAL;
1178
1179         return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
1180 }
1181
1182 /**
1183  * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1184  * @dev: Device to thaw.
1185  *
1186  * Start the device, unless power has been removed from the domain already
1187  * before the system transition.
1188  */
1189 static int pm_genpd_thaw_noirq(struct device *dev)
1190 {
1191         struct generic_pm_domain *genpd;
1192
1193         dev_dbg(dev, "%s()\n", __func__);
1194
1195         genpd = dev_to_genpd(dev);
1196         if (IS_ERR(genpd))
1197                 return -EINVAL;
1198
1199         return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
1200 }
1201
1202 /**
1203  * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
1204  * @dev: Device to thaw.
1205  *
1206  * Carry out an early thaw of a device under the assumption that its
1207  * pm_domain field points to the domain member of an object of type
1208  * struct generic_pm_domain representing a power domain consisting of I/O
1209  * devices.
1210  */
1211 static int pm_genpd_thaw_early(struct device *dev)
1212 {
1213         struct generic_pm_domain *genpd;
1214
1215         dev_dbg(dev, "%s()\n", __func__);
1216
1217         genpd = dev_to_genpd(dev);
1218         if (IS_ERR(genpd))
1219                 return -EINVAL;
1220
1221         return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
1222 }
1223
1224 /**
1225  * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
1226  * @dev: Device to thaw.
1227  *
1228  * Thaw a device under the assumption that its pm_domain field points to the
1229  * domain member of an object of type struct generic_pm_domain representing
1230  * a power domain consisting of I/O devices.
1231  */
1232 static int pm_genpd_thaw(struct device *dev)
1233 {
1234         struct generic_pm_domain *genpd;
1235
1236         dev_dbg(dev, "%s()\n", __func__);
1237
1238         genpd = dev_to_genpd(dev);
1239         if (IS_ERR(genpd))
1240                 return -EINVAL;
1241
1242         return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
1243 }
1244
1245 /**
1246  * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1247  * @dev: Device to resume.
1248  *
1249  * Make sure the domain will be in the same power state as before the
1250  * hibernation the system is resuming from and start the device if necessary.
1251  */
1252 static int pm_genpd_restore_noirq(struct device *dev)
1253 {
1254         struct generic_pm_domain *genpd;
1255
1256         dev_dbg(dev, "%s()\n", __func__);
1257
1258         genpd = dev_to_genpd(dev);
1259         if (IS_ERR(genpd))
1260                 return -EINVAL;
1261
1262         /*
1263          * Since all of the "noirq" callbacks are executed sequentially, it is
1264          * guaranteed that this function will never run twice in parallel for
1265          * the same PM domain, so it is not necessary to use locking here.
1266          *
1267          * At this point suspended_count == 0 means we are being run for the
1268          * first time for the given domain in the present cycle.
1269          */
1270         if (genpd->suspended_count++ == 0) {
1271                 /*
1272                  * The boot kernel might put the domain into arbitrary state,
1273                  * so make it appear as powered off to pm_genpd_sync_poweron(),
1274                  * so that it tries to power it on in case it was really off.
1275                  */
1276                 genpd->status = GPD_STATE_POWER_OFF;
1277                 if (genpd->suspend_power_off) {
1278                         /*
1279                          * If the domain was off before the hibernation, make
1280                          * sure it will be off going forward.
1281                          */
1282                         if (genpd->power_off)
1283                                 genpd->power_off(genpd);
1284
1285                         return 0;
1286                 }
1287         }
1288
1289         if (genpd->suspend_power_off)
1290                 return 0;
1291
1292         pm_genpd_sync_poweron(genpd);
1293
1294         return genpd_start_dev(genpd, dev);
1295 }
1296
1297 /**
1298  * pm_genpd_complete - Complete power transition of a device in a power domain.
1299  * @dev: Device to complete the transition of.
1300  *
1301  * Complete a power transition of a device (during a system-wide power
1302  * transition) under the assumption that its pm_domain field points to the
1303  * domain member of an object of type struct generic_pm_domain representing
1304  * a power domain consisting of I/O devices.
1305  */
1306 static void pm_genpd_complete(struct device *dev)
1307 {
1308         struct generic_pm_domain *genpd;
1309         bool run_complete;
1310
1311         dev_dbg(dev, "%s()\n", __func__);
1312
1313         genpd = dev_to_genpd(dev);
1314         if (IS_ERR(genpd))
1315                 return;
1316
1317         mutex_lock(&genpd->lock);
1318
1319         run_complete = !genpd->suspend_power_off;
1320         if (--genpd->prepared_count == 0)
1321                 genpd->suspend_power_off = false;
1322
1323         mutex_unlock(&genpd->lock);
1324
1325         if (run_complete) {
1326                 pm_generic_complete(dev);
1327                 pm_runtime_set_active(dev);
1328                 pm_runtime_enable(dev);
1329                 pm_request_idle(dev);
1330         }
1331 }
1332
1333 /**
1334  * pm_genpd_syscore_switch - Switch power during system core suspend or resume.
1335  * @dev: Device that normally is marked as "always on" to switch power for.
1336  *
1337  * This routine may only be called during the system core (syscore) suspend or
1338  * resume phase for devices whose "always on" flags are set.
1339  */
1340 void pm_genpd_syscore_switch(struct device *dev, bool suspend)
1341 {
1342         struct generic_pm_domain *genpd;
1343
1344         genpd = dev_to_genpd(dev);
1345         if (!pm_genpd_present(genpd))
1346                 return;
1347
1348         if (suspend) {
1349                 genpd->suspended_count++;
1350                 pm_genpd_sync_poweroff(genpd);
1351         } else {
1352                 pm_genpd_sync_poweron(genpd);
1353                 genpd->suspended_count--;
1354         }
1355 }
1356 EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch);
1357
1358 #else
1359
1360 #define pm_genpd_prepare                NULL
1361 #define pm_genpd_suspend                NULL
1362 #define pm_genpd_suspend_late           NULL
1363 #define pm_genpd_suspend_noirq          NULL
1364 #define pm_genpd_resume_early           NULL
1365 #define pm_genpd_resume_noirq           NULL
1366 #define pm_genpd_resume                 NULL
1367 #define pm_genpd_freeze                 NULL
1368 #define pm_genpd_freeze_late            NULL
1369 #define pm_genpd_freeze_noirq           NULL
1370 #define pm_genpd_thaw_early             NULL
1371 #define pm_genpd_thaw_noirq             NULL
1372 #define pm_genpd_thaw                   NULL
1373 #define pm_genpd_restore_noirq          NULL
1374 #define pm_genpd_complete               NULL
1375
1376 #endif /* CONFIG_PM_SLEEP */
1377
1378 static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev)
1379 {
1380         struct generic_pm_domain_data *gpd_data;
1381
1382         gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1383         if (!gpd_data)
1384                 return NULL;
1385
1386         mutex_init(&gpd_data->lock);
1387         gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1388         dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1389         return gpd_data;
1390 }
1391
1392 static void __pm_genpd_free_dev_data(struct device *dev,
1393                                      struct generic_pm_domain_data *gpd_data)
1394 {
1395         dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1396         kfree(gpd_data);
1397 }
1398
1399 /**
1400  * __pm_genpd_add_device - Add a device to an I/O PM domain.
1401  * @genpd: PM domain to add the device to.
1402  * @dev: Device to be added.
1403  * @td: Set of PM QoS timing parameters to attach to the device.
1404  */
1405 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1406                           struct gpd_timing_data *td)
1407 {
1408         struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
1409         struct pm_domain_data *pdd;
1410         int ret = 0;
1411
1412         dev_dbg(dev, "%s()\n", __func__);
1413
1414         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1415                 return -EINVAL;
1416
1417         gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1418         if (!gpd_data_new)
1419                 return -ENOMEM;
1420
1421         genpd_acquire_lock(genpd);
1422
1423         if (genpd->prepared_count > 0) {
1424                 ret = -EAGAIN;
1425                 goto out;
1426         }
1427
1428         list_for_each_entry(pdd, &genpd->dev_list, list_node)
1429                 if (pdd->dev == dev) {
1430                         ret = -EINVAL;
1431                         goto out;
1432                 }
1433
1434         ret = dev_pm_get_subsys_data(dev);
1435         if (ret)
1436                 goto out;
1437
1438         genpd->device_count++;
1439         genpd->max_off_time_changed = true;
1440
1441         spin_lock_irq(&dev->power.lock);
1442
1443         dev->pm_domain = &genpd->domain;
1444         if (dev->power.subsys_data->domain_data) {
1445                 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1446         } else {
1447                 gpd_data = gpd_data_new;
1448                 dev->power.subsys_data->domain_data = &gpd_data->base;
1449         }
1450         gpd_data->refcount++;
1451         if (td)
1452                 gpd_data->td = *td;
1453
1454         spin_unlock_irq(&dev->power.lock);
1455
1456         mutex_lock(&gpd_data->lock);
1457         gpd_data->base.dev = dev;
1458         list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1459         gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
1460         gpd_data->td.constraint_changed = true;
1461         gpd_data->td.effective_constraint_ns = -1;
1462         mutex_unlock(&gpd_data->lock);
1463
1464  out:
1465         genpd_release_lock(genpd);
1466
1467         if (gpd_data != gpd_data_new)
1468                 __pm_genpd_free_dev_data(dev, gpd_data_new);
1469
1470         return ret;
1471 }
1472
1473 /**
1474  * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
1475  * @genpd_node: Device tree node pointer representing a PM domain to which the
1476  *   the device is added to.
1477  * @dev: Device to be added.
1478  * @td: Set of PM QoS timing parameters to attach to the device.
1479  */
1480 int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
1481                              struct gpd_timing_data *td)
1482 {
1483         struct generic_pm_domain *genpd = NULL, *gpd;
1484
1485         dev_dbg(dev, "%s()\n", __func__);
1486
1487         if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
1488                 return -EINVAL;
1489
1490         mutex_lock(&gpd_list_lock);
1491         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1492                 if (gpd->of_node == genpd_node) {
1493                         genpd = gpd;
1494                         break;
1495                 }
1496         }
1497         mutex_unlock(&gpd_list_lock);
1498
1499         if (!genpd)
1500                 return -EINVAL;
1501
1502         return __pm_genpd_add_device(genpd, dev, td);
1503 }
1504
1505
1506 /**
1507  * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
1508  * @domain_name: Name of the PM domain to add the device to.
1509  * @dev: Device to be added.
1510  * @td: Set of PM QoS timing parameters to attach to the device.
1511  */
1512 int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
1513                                struct gpd_timing_data *td)
1514 {
1515         return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
1516 }
1517
1518 /**
1519  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1520  * @genpd: PM domain to remove the device from.
1521  * @dev: Device to be removed.
1522  */
1523 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1524                            struct device *dev)
1525 {
1526         struct generic_pm_domain_data *gpd_data;
1527         struct pm_domain_data *pdd;
1528         bool remove = false;
1529         int ret = 0;
1530
1531         dev_dbg(dev, "%s()\n", __func__);
1532
1533         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
1534             ||  IS_ERR_OR_NULL(dev->pm_domain)
1535             ||  pd_to_genpd(dev->pm_domain) != genpd)
1536                 return -EINVAL;
1537
1538         genpd_acquire_lock(genpd);
1539
1540         if (genpd->prepared_count > 0) {
1541                 ret = -EAGAIN;
1542                 goto out;
1543         }
1544
1545         genpd->device_count--;
1546         genpd->max_off_time_changed = true;
1547
1548         spin_lock_irq(&dev->power.lock);
1549
1550         dev->pm_domain = NULL;
1551         pdd = dev->power.subsys_data->domain_data;
1552         list_del_init(&pdd->list_node);
1553         gpd_data = to_gpd_data(pdd);
1554         if (--gpd_data->refcount == 0) {
1555                 dev->power.subsys_data->domain_data = NULL;
1556                 remove = true;
1557         }
1558
1559         spin_unlock_irq(&dev->power.lock);
1560
1561         mutex_lock(&gpd_data->lock);
1562         pdd->dev = NULL;
1563         mutex_unlock(&gpd_data->lock);
1564
1565         genpd_release_lock(genpd);
1566
1567         dev_pm_put_subsys_data(dev);
1568         if (remove)
1569                 __pm_genpd_free_dev_data(dev, gpd_data);
1570
1571         return 0;
1572
1573  out:
1574         genpd_release_lock(genpd);
1575
1576         return ret;
1577 }
1578
1579 /**
1580  * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
1581  * @dev: Device to set/unset the flag for.
1582  * @val: The new value of the device's "need restore" flag.
1583  */
1584 void pm_genpd_dev_need_restore(struct device *dev, bool val)
1585 {
1586         struct pm_subsys_data *psd;
1587         unsigned long flags;
1588
1589         spin_lock_irqsave(&dev->power.lock, flags);
1590
1591         psd = dev_to_psd(dev);
1592         if (psd && psd->domain_data)
1593                 to_gpd_data(psd->domain_data)->need_restore = val;
1594
1595         spin_unlock_irqrestore(&dev->power.lock, flags);
1596 }
1597 EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);
1598
1599 /**
1600  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1601  * @genpd: Master PM domain to add the subdomain to.
1602  * @subdomain: Subdomain to be added.
1603  */
1604 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1605                            struct generic_pm_domain *subdomain)
1606 {
1607         struct gpd_link *link;
1608         int ret = 0;
1609
1610         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1611             || genpd == subdomain)
1612                 return -EINVAL;
1613
1614  start:
1615         genpd_acquire_lock(genpd);
1616         mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1617
1618         if (subdomain->status != GPD_STATE_POWER_OFF
1619             && subdomain->status != GPD_STATE_ACTIVE) {
1620                 mutex_unlock(&subdomain->lock);
1621                 genpd_release_lock(genpd);
1622                 goto start;
1623         }
1624
1625         if (genpd->status == GPD_STATE_POWER_OFF
1626             &&  subdomain->status != GPD_STATE_POWER_OFF) {
1627                 ret = -EINVAL;
1628                 goto out;
1629         }
1630
1631         list_for_each_entry(link, &genpd->master_links, master_node) {
1632                 if (link->slave == subdomain && link->master == genpd) {
1633                         ret = -EINVAL;
1634                         goto out;
1635                 }
1636         }
1637
1638         link = kzalloc(sizeof(*link), GFP_KERNEL);
1639         if (!link) {
1640                 ret = -ENOMEM;
1641                 goto out;
1642         }
1643         link->master = genpd;
1644         list_add_tail(&link->master_node, &genpd->master_links);
1645         link->slave = subdomain;
1646         list_add_tail(&link->slave_node, &subdomain->slave_links);
1647         if (subdomain->status != GPD_STATE_POWER_OFF)
1648                 genpd_sd_counter_inc(genpd);
1649
1650  out:
1651         mutex_unlock(&subdomain->lock);
1652         genpd_release_lock(genpd);
1653
1654         return ret;
1655 }
1656
1657 /**
1658  * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
1659  * @master_name: Name of the master PM domain to add the subdomain to.
1660  * @subdomain_name: Name of the subdomain to be added.
1661  */
1662 int pm_genpd_add_subdomain_names(const char *master_name,
1663                                  const char *subdomain_name)
1664 {
1665         struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
1666
1667         if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
1668                 return -EINVAL;
1669
1670         mutex_lock(&gpd_list_lock);
1671         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1672                 if (!master && !strcmp(gpd->name, master_name))
1673                         master = gpd;
1674
1675                 if (!subdomain && !strcmp(gpd->name, subdomain_name))
1676                         subdomain = gpd;
1677
1678                 if (master && subdomain)
1679                         break;
1680         }
1681         mutex_unlock(&gpd_list_lock);
1682
1683         return pm_genpd_add_subdomain(master, subdomain);
1684 }
1685
1686 /**
1687  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1688  * @genpd: Master PM domain to remove the subdomain from.
1689  * @subdomain: Subdomain to be removed.
1690  */
1691 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1692                               struct generic_pm_domain *subdomain)
1693 {
1694         struct gpd_link *link;
1695         int ret = -EINVAL;
1696
1697         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1698                 return -EINVAL;
1699
1700  start:
1701         genpd_acquire_lock(genpd);
1702
1703         list_for_each_entry(link, &genpd->master_links, master_node) {
1704                 if (link->slave != subdomain)
1705                         continue;
1706
1707                 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1708
1709                 if (subdomain->status != GPD_STATE_POWER_OFF
1710                     && subdomain->status != GPD_STATE_ACTIVE) {
1711                         mutex_unlock(&subdomain->lock);
1712                         genpd_release_lock(genpd);
1713                         goto start;
1714                 }
1715
1716                 list_del(&link->master_node);
1717                 list_del(&link->slave_node);
1718                 kfree(link);
1719                 if (subdomain->status != GPD_STATE_POWER_OFF)
1720                         genpd_sd_counter_dec(genpd);
1721
1722                 mutex_unlock(&subdomain->lock);
1723
1724                 ret = 0;
1725                 break;
1726         }
1727
1728         genpd_release_lock(genpd);
1729
1730         return ret;
1731 }
1732
1733 /**
1734  * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
1735  * @dev: Device to add the callbacks to.
1736  * @ops: Set of callbacks to add.
1737  * @td: Timing data to add to the device along with the callbacks (optional).
1738  *
1739  * Every call to this routine should be balanced with a call to
1740  * __pm_genpd_remove_callbacks() and they must not be nested.
1741  */
1742 int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
1743                            struct gpd_timing_data *td)
1744 {
1745         struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
1746         int ret = 0;
1747
1748         if (!(dev && ops))
1749                 return -EINVAL;
1750
1751         gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1752         if (!gpd_data_new)
1753                 return -ENOMEM;
1754
1755         pm_runtime_disable(dev);
1756         device_pm_lock();
1757
1758         ret = dev_pm_get_subsys_data(dev);
1759         if (ret)
1760                 goto out;
1761
1762         spin_lock_irq(&dev->power.lock);
1763
1764         if (dev->power.subsys_data->domain_data) {
1765                 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1766         } else {
1767                 gpd_data = gpd_data_new;
1768                 dev->power.subsys_data->domain_data = &gpd_data->base;
1769         }
1770         gpd_data->refcount++;
1771         gpd_data->ops = *ops;
1772         if (td)
1773                 gpd_data->td = *td;
1774
1775         spin_unlock_irq(&dev->power.lock);
1776
1777  out:
1778         device_pm_unlock();
1779         pm_runtime_enable(dev);
1780
1781         if (gpd_data != gpd_data_new)
1782                 __pm_genpd_free_dev_data(dev, gpd_data_new);
1783
1784         return ret;
1785 }
1786 EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
1787
1788 /**
1789  * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
1790  * @dev: Device to remove the callbacks from.
1791  * @clear_td: If set, clear the device's timing data too.
1792  *
1793  * This routine can only be called after pm_genpd_add_callbacks().
1794  */
1795 int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
1796 {
1797         struct generic_pm_domain_data *gpd_data = NULL;
1798         bool remove = false;
1799         int ret = 0;
1800
1801         if (!(dev && dev->power.subsys_data))
1802                 return -EINVAL;
1803
1804         pm_runtime_disable(dev);
1805         device_pm_lock();
1806
1807         spin_lock_irq(&dev->power.lock);
1808
1809         if (dev->power.subsys_data->domain_data) {
1810                 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1811                 gpd_data->ops = (struct gpd_dev_ops){ NULL };
1812                 if (clear_td)
1813                         gpd_data->td = (struct gpd_timing_data){ 0 };
1814
1815                 if (--gpd_data->refcount == 0) {
1816                         dev->power.subsys_data->domain_data = NULL;
1817                         remove = true;
1818                 }
1819         } else {
1820                 ret = -EINVAL;
1821         }
1822
1823         spin_unlock_irq(&dev->power.lock);
1824
1825         device_pm_unlock();
1826         pm_runtime_enable(dev);
1827
1828         if (ret)
1829                 return ret;
1830
1831         dev_pm_put_subsys_data(dev);
1832         if (remove)
1833                 __pm_genpd_free_dev_data(dev, gpd_data);
1834
1835         return 0;
1836 }
1837 EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
1838
1839 /**
1840  * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
1841  * @genpd: PM domain to be connected with cpuidle.
1842  * @state: cpuidle state this domain can disable/enable.
1843  *
1844  * Make a PM domain behave as though it contained a CPU core, that is, instead
1845  * of calling its power down routine it will enable the given cpuidle state so
1846  * that the cpuidle subsystem can power it down (if possible and desirable).
1847  */
1848 int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1849 {
1850         struct cpuidle_driver *cpuidle_drv;
1851         struct gpd_cpu_data *cpu_data;
1852         struct cpuidle_state *idle_state;
1853         int ret = 0;
1854
1855         if (IS_ERR_OR_NULL(genpd) || state < 0)
1856                 return -EINVAL;
1857
1858         genpd_acquire_lock(genpd);
1859
1860         if (genpd->cpu_data) {
1861                 ret = -EEXIST;
1862                 goto out;
1863         }
1864         cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL);
1865         if (!cpu_data) {
1866                 ret = -ENOMEM;
1867                 goto out;
1868         }
1869         cpuidle_drv = cpuidle_driver_ref();
1870         if (!cpuidle_drv) {
1871                 ret = -ENODEV;
1872                 goto err_drv;
1873         }
1874         if (cpuidle_drv->state_count <= state) {
1875                 ret = -EINVAL;
1876                 goto err;
1877         }
1878         idle_state = &cpuidle_drv->states[state];
1879         if (!idle_state->disabled) {
1880                 ret = -EAGAIN;
1881                 goto err;
1882         }
1883         cpu_data->idle_state = idle_state;
1884         cpu_data->saved_exit_latency = idle_state->exit_latency;
1885         genpd->cpu_data = cpu_data;
1886         genpd_recalc_cpu_exit_latency(genpd);
1887
1888  out:
1889         genpd_release_lock(genpd);
1890         return ret;
1891
1892  err:
1893         cpuidle_driver_unref();
1894
1895  err_drv:
1896         kfree(cpu_data);
1897         goto out;
1898 }
1899
1900 /**
1901  * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
1902  * @name: Name of the domain to connect to cpuidle.
1903  * @state: cpuidle state this domain can manipulate.
1904  */
1905 int pm_genpd_name_attach_cpuidle(const char *name, int state)
1906 {
1907         return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
1908 }
1909
1910 /**
1911  * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
1912  * @genpd: PM domain to remove the cpuidle connection from.
1913  *
1914  * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
1915  * given PM domain.
1916  */
1917 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1918 {
1919         struct gpd_cpu_data *cpu_data;
1920         struct cpuidle_state *idle_state;
1921         int ret = 0;
1922
1923         if (IS_ERR_OR_NULL(genpd))
1924                 return -EINVAL;
1925
1926         genpd_acquire_lock(genpd);
1927
1928         cpu_data = genpd->cpu_data;
1929         if (!cpu_data) {
1930                 ret = -ENODEV;
1931                 goto out;
1932         }
1933         idle_state = cpu_data->idle_state;
1934         if (!idle_state->disabled) {
1935                 ret = -EAGAIN;
1936                 goto out;
1937         }
1938         idle_state->exit_latency = cpu_data->saved_exit_latency;
1939         cpuidle_driver_unref();
1940         genpd->cpu_data = NULL;
1941         kfree(cpu_data);
1942
1943  out:
1944         genpd_release_lock(genpd);
1945         return ret;
1946 }
1947
1948 /**
1949  * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
1950  * @name: Name of the domain to disconnect cpuidle from.
1951  */
1952 int pm_genpd_name_detach_cpuidle(const char *name)
1953 {
1954         return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
1955 }
1956
1957 /* Default device callbacks for generic PM domains. */
1958
1959 /**
1960  * pm_genpd_default_save_state - Default "save device state" for PM domians.
1961  * @dev: Device to handle.
1962  */
1963 static int pm_genpd_default_save_state(struct device *dev)
1964 {
1965         int (*cb)(struct device *__dev);
1966
1967         cb = dev_gpd_data(dev)->ops.save_state;
1968         if (cb)
1969                 return cb(dev);
1970
1971         if (dev->type && dev->type->pm)
1972                 cb = dev->type->pm->runtime_suspend;
1973         else if (dev->class && dev->class->pm)
1974                 cb = dev->class->pm->runtime_suspend;
1975         else if (dev->bus && dev->bus->pm)
1976                 cb = dev->bus->pm->runtime_suspend;
1977         else
1978                 cb = NULL;
1979
1980         if (!cb && dev->driver && dev->driver->pm)
1981                 cb = dev->driver->pm->runtime_suspend;
1982
1983         return cb ? cb(dev) : 0;
1984 }
1985
1986 /**
1987  * pm_genpd_default_restore_state - Default PM domians "restore device state".
1988  * @dev: Device to handle.
1989  */
1990 static int pm_genpd_default_restore_state(struct device *dev)
1991 {
1992         int (*cb)(struct device *__dev);
1993
1994         cb = dev_gpd_data(dev)->ops.restore_state;
1995         if (cb)
1996                 return cb(dev);
1997
1998         if (dev->type && dev->type->pm)
1999                 cb = dev->type->pm->runtime_resume;
2000         else if (dev->class && dev->class->pm)
2001                 cb = dev->class->pm->runtime_resume;
2002         else if (dev->bus && dev->bus->pm)
2003                 cb = dev->bus->pm->runtime_resume;
2004         else
2005                 cb = NULL;
2006
2007         if (!cb && dev->driver && dev->driver->pm)
2008                 cb = dev->driver->pm->runtime_resume;
2009
2010         return cb ? cb(dev) : 0;
2011 }
2012
2013 #ifdef CONFIG_PM_SLEEP
2014
2015 /**
2016  * pm_genpd_default_suspend - Default "device suspend" for PM domians.
2017  * @dev: Device to handle.
2018  */
2019 static int pm_genpd_default_suspend(struct device *dev)
2020 {
2021         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
2022
2023         return cb ? cb(dev) : pm_generic_suspend(dev);
2024 }
2025
2026 /**
2027  * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
2028  * @dev: Device to handle.
2029  */
2030 static int pm_genpd_default_suspend_late(struct device *dev)
2031 {
2032         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
2033
2034         return cb ? cb(dev) : pm_generic_suspend_late(dev);
2035 }
2036
2037 /**
2038  * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
2039  * @dev: Device to handle.
2040  */
2041 static int pm_genpd_default_resume_early(struct device *dev)
2042 {
2043         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
2044
2045         return cb ? cb(dev) : pm_generic_resume_early(dev);
2046 }
2047
2048 /**
2049  * pm_genpd_default_resume - Default "device resume" for PM domians.
2050  * @dev: Device to handle.
2051  */
2052 static int pm_genpd_default_resume(struct device *dev)
2053 {
2054         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
2055
2056         return cb ? cb(dev) : pm_generic_resume(dev);
2057 }
2058
2059 /**
2060  * pm_genpd_default_freeze - Default "device freeze" for PM domians.
2061  * @dev: Device to handle.
2062  */
2063 static int pm_genpd_default_freeze(struct device *dev)
2064 {
2065         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
2066
2067         return cb ? cb(dev) : pm_generic_freeze(dev);
2068 }
2069
2070 /**
2071  * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
2072  * @dev: Device to handle.
2073  */
2074 static int pm_genpd_default_freeze_late(struct device *dev)
2075 {
2076         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
2077
2078         return cb ? cb(dev) : pm_generic_freeze_late(dev);
2079 }
2080
2081 /**
2082  * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
2083  * @dev: Device to handle.
2084  */
2085 static int pm_genpd_default_thaw_early(struct device *dev)
2086 {
2087         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
2088
2089         return cb ? cb(dev) : pm_generic_thaw_early(dev);
2090 }
2091
2092 /**
2093  * pm_genpd_default_thaw - Default "device thaw" for PM domians.
2094  * @dev: Device to handle.
2095  */
2096 static int pm_genpd_default_thaw(struct device *dev)
2097 {
2098         int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
2099
2100         return cb ? cb(dev) : pm_generic_thaw(dev);
2101 }
2102
2103 #else /* !CONFIG_PM_SLEEP */
2104
2105 #define pm_genpd_default_suspend        NULL
2106 #define pm_genpd_default_suspend_late   NULL
2107 #define pm_genpd_default_resume_early   NULL
2108 #define pm_genpd_default_resume         NULL
2109 #define pm_genpd_default_freeze         NULL
2110 #define pm_genpd_default_freeze_late    NULL
2111 #define pm_genpd_default_thaw_early     NULL
2112 #define pm_genpd_default_thaw           NULL
2113
2114 #endif /* !CONFIG_PM_SLEEP */
2115
2116 /**
2117  * pm_genpd_init - Initialize a generic I/O PM domain object.
2118  * @genpd: PM domain object to initialize.
2119  * @gov: PM domain governor to associate with the domain (may be NULL).
2120  * @is_off: Initial value of the domain's power_is_off field.
2121  */
2122 void pm_genpd_init(struct generic_pm_domain *genpd,
2123                    struct dev_power_governor *gov, bool is_off)
2124 {
2125         if (IS_ERR_OR_NULL(genpd))
2126                 return;
2127
2128         INIT_LIST_HEAD(&genpd->master_links);
2129         INIT_LIST_HEAD(&genpd->slave_links);
2130         INIT_LIST_HEAD(&genpd->dev_list);
2131         mutex_init(&genpd->lock);
2132         genpd->gov = gov;
2133         INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2134         genpd->in_progress = 0;
2135         atomic_set(&genpd->sd_count, 0);
2136         genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
2137         init_waitqueue_head(&genpd->status_wait_queue);
2138         genpd->poweroff_task = NULL;
2139         genpd->resume_count = 0;
2140         genpd->device_count = 0;
2141         genpd->max_off_time_ns = -1;
2142         genpd->max_off_time_changed = true;
2143         genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
2144         genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
2145         genpd->domain.ops.prepare = pm_genpd_prepare;
2146         genpd->domain.ops.suspend = pm_genpd_suspend;
2147         genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
2148         genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
2149         genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
2150         genpd->domain.ops.resume_early = pm_genpd_resume_early;
2151         genpd->domain.ops.resume = pm_genpd_resume;
2152         genpd->domain.ops.freeze = pm_genpd_freeze;
2153         genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
2154         genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
2155         genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
2156         genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
2157         genpd->domain.ops.thaw = pm_genpd_thaw;
2158         genpd->domain.ops.poweroff = pm_genpd_suspend;
2159         genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
2160         genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
2161         genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
2162         genpd->domain.ops.restore_early = pm_genpd_resume_early;
2163         genpd->domain.ops.restore = pm_genpd_resume;
2164         genpd->domain.ops.complete = pm_genpd_complete;
2165         genpd->dev_ops.save_state = pm_genpd_default_save_state;
2166         genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
2167         genpd->dev_ops.suspend = pm_genpd_default_suspend;
2168         genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
2169         genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
2170         genpd->dev_ops.resume = pm_genpd_default_resume;
2171         genpd->dev_ops.freeze = pm_genpd_default_freeze;
2172         genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
2173         genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
2174         genpd->dev_ops.thaw = pm_genpd_default_thaw;
2175         mutex_lock(&gpd_list_lock);
2176         list_add(&genpd->gpd_list_node, &gpd_list);
2177         mutex_unlock(&gpd_list_lock);
2178 }