Merge remote-tracking branches 'asoc/fix/atmel', 'asoc/fix/fsl', 'asoc/fix/tegra...
[linux-drm-fsl-dcu.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/resume-trace.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/async.h>
30 #include <linux/suspend.h>
31 #include <trace/events/power.h>
32 #include <linux/cpufreq.h>
33 #include <linux/cpuidle.h>
34 #include <linux/timer.h>
35
36 #include "../base.h"
37 #include "power.h"
38
39 typedef int (*pm_callback_t)(struct device *);
40
41 /*
42  * The entries in the dpm_list list are in a depth first order, simply
43  * because children are guaranteed to be discovered after parents, and
44  * are inserted at the back of the list on discovery.
45  *
46  * Since device_pm_add() may be called with a device lock held,
47  * we must never try to acquire a device lock while holding
48  * dpm_list_mutex.
49  */
50
51 LIST_HEAD(dpm_list);
52 static LIST_HEAD(dpm_prepared_list);
53 static LIST_HEAD(dpm_suspended_list);
54 static LIST_HEAD(dpm_late_early_list);
55 static LIST_HEAD(dpm_noirq_list);
56
57 struct suspend_stats suspend_stats;
58 static DEFINE_MUTEX(dpm_list_mtx);
59 static pm_message_t pm_transition;
60
61 static int async_error;
62
63 static char *pm_verb(int event)
64 {
65         switch (event) {
66         case PM_EVENT_SUSPEND:
67                 return "suspend";
68         case PM_EVENT_RESUME:
69                 return "resume";
70         case PM_EVENT_FREEZE:
71                 return "freeze";
72         case PM_EVENT_QUIESCE:
73                 return "quiesce";
74         case PM_EVENT_HIBERNATE:
75                 return "hibernate";
76         case PM_EVENT_THAW:
77                 return "thaw";
78         case PM_EVENT_RESTORE:
79                 return "restore";
80         case PM_EVENT_RECOVER:
81                 return "recover";
82         default:
83                 return "(unknown PM event)";
84         }
85 }
86
87 /**
88  * device_pm_sleep_init - Initialize system suspend-related device fields.
89  * @dev: Device object being initialized.
90  */
91 void device_pm_sleep_init(struct device *dev)
92 {
93         dev->power.is_prepared = false;
94         dev->power.is_suspended = false;
95         init_completion(&dev->power.completion);
96         complete_all(&dev->power.completion);
97         dev->power.wakeup = NULL;
98         INIT_LIST_HEAD(&dev->power.entry);
99 }
100
101 /**
102  * device_pm_lock - Lock the list of active devices used by the PM core.
103  */
104 void device_pm_lock(void)
105 {
106         mutex_lock(&dpm_list_mtx);
107 }
108
109 /**
110  * device_pm_unlock - Unlock the list of active devices used by the PM core.
111  */
112 void device_pm_unlock(void)
113 {
114         mutex_unlock(&dpm_list_mtx);
115 }
116
117 /**
118  * device_pm_add - Add a device to the PM core's list of active devices.
119  * @dev: Device to add to the list.
120  */
121 void device_pm_add(struct device *dev)
122 {
123         pr_debug("PM: Adding info for %s:%s\n",
124                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
125         mutex_lock(&dpm_list_mtx);
126         if (dev->parent && dev->parent->power.is_prepared)
127                 dev_warn(dev, "parent %s should not be sleeping\n",
128                         dev_name(dev->parent));
129         list_add_tail(&dev->power.entry, &dpm_list);
130         mutex_unlock(&dpm_list_mtx);
131 }
132
133 /**
134  * device_pm_remove - Remove a device from the PM core's list of active devices.
135  * @dev: Device to be removed from the list.
136  */
137 void device_pm_remove(struct device *dev)
138 {
139         pr_debug("PM: Removing info for %s:%s\n",
140                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
141         complete_all(&dev->power.completion);
142         mutex_lock(&dpm_list_mtx);
143         list_del_init(&dev->power.entry);
144         mutex_unlock(&dpm_list_mtx);
145         device_wakeup_disable(dev);
146         pm_runtime_remove(dev);
147 }
148
149 /**
150  * device_pm_move_before - Move device in the PM core's list of active devices.
151  * @deva: Device to move in dpm_list.
152  * @devb: Device @deva should come before.
153  */
154 void device_pm_move_before(struct device *deva, struct device *devb)
155 {
156         pr_debug("PM: Moving %s:%s before %s:%s\n",
157                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
158                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
159         /* Delete deva from dpm_list and reinsert before devb. */
160         list_move_tail(&deva->power.entry, &devb->power.entry);
161 }
162
163 /**
164  * device_pm_move_after - Move device in the PM core's list of active devices.
165  * @deva: Device to move in dpm_list.
166  * @devb: Device @deva should come after.
167  */
168 void device_pm_move_after(struct device *deva, struct device *devb)
169 {
170         pr_debug("PM: Moving %s:%s after %s:%s\n",
171                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
172                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
173         /* Delete deva from dpm_list and reinsert after devb. */
174         list_move(&deva->power.entry, &devb->power.entry);
175 }
176
177 /**
178  * device_pm_move_last - Move device to end of the PM core's list of devices.
179  * @dev: Device to move in dpm_list.
180  */
181 void device_pm_move_last(struct device *dev)
182 {
183         pr_debug("PM: Moving %s:%s to end of list\n",
184                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
185         list_move_tail(&dev->power.entry, &dpm_list);
186 }
187
188 static ktime_t initcall_debug_start(struct device *dev)
189 {
190         ktime_t calltime = ktime_set(0, 0);
191
192         if (pm_print_times_enabled) {
193                 pr_info("calling  %s+ @ %i, parent: %s\n",
194                         dev_name(dev), task_pid_nr(current),
195                         dev->parent ? dev_name(dev->parent) : "none");
196                 calltime = ktime_get();
197         }
198
199         return calltime;
200 }
201
202 static void initcall_debug_report(struct device *dev, ktime_t calltime,
203                                   int error, pm_message_t state, char *info)
204 {
205         ktime_t rettime;
206         s64 nsecs;
207
208         rettime = ktime_get();
209         nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
210
211         if (pm_print_times_enabled) {
212                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
213                         error, (unsigned long long)nsecs >> 10);
214         }
215
216         trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event),
217                                     error);
218 }
219
220 /**
221  * dpm_wait - Wait for a PM operation to complete.
222  * @dev: Device to wait for.
223  * @async: If unset, wait only if the device's power.async_suspend flag is set.
224  */
225 static void dpm_wait(struct device *dev, bool async)
226 {
227         if (!dev)
228                 return;
229
230         if (async || (pm_async_enabled && dev->power.async_suspend))
231                 wait_for_completion(&dev->power.completion);
232 }
233
234 static int dpm_wait_fn(struct device *dev, void *async_ptr)
235 {
236         dpm_wait(dev, *((bool *)async_ptr));
237         return 0;
238 }
239
240 static void dpm_wait_for_children(struct device *dev, bool async)
241 {
242        device_for_each_child(dev, &async, dpm_wait_fn);
243 }
244
245 /**
246  * pm_op - Return the PM operation appropriate for given PM event.
247  * @ops: PM operations to choose from.
248  * @state: PM transition of the system being carried out.
249  */
250 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
251 {
252         switch (state.event) {
253 #ifdef CONFIG_SUSPEND
254         case PM_EVENT_SUSPEND:
255                 return ops->suspend;
256         case PM_EVENT_RESUME:
257                 return ops->resume;
258 #endif /* CONFIG_SUSPEND */
259 #ifdef CONFIG_HIBERNATE_CALLBACKS
260         case PM_EVENT_FREEZE:
261         case PM_EVENT_QUIESCE:
262                 return ops->freeze;
263         case PM_EVENT_HIBERNATE:
264                 return ops->poweroff;
265         case PM_EVENT_THAW:
266         case PM_EVENT_RECOVER:
267                 return ops->thaw;
268                 break;
269         case PM_EVENT_RESTORE:
270                 return ops->restore;
271 #endif /* CONFIG_HIBERNATE_CALLBACKS */
272         }
273
274         return NULL;
275 }
276
277 /**
278  * pm_late_early_op - Return the PM operation appropriate for given PM event.
279  * @ops: PM operations to choose from.
280  * @state: PM transition of the system being carried out.
281  *
282  * Runtime PM is disabled for @dev while this function is being executed.
283  */
284 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
285                                       pm_message_t state)
286 {
287         switch (state.event) {
288 #ifdef CONFIG_SUSPEND
289         case PM_EVENT_SUSPEND:
290                 return ops->suspend_late;
291         case PM_EVENT_RESUME:
292                 return ops->resume_early;
293 #endif /* CONFIG_SUSPEND */
294 #ifdef CONFIG_HIBERNATE_CALLBACKS
295         case PM_EVENT_FREEZE:
296         case PM_EVENT_QUIESCE:
297                 return ops->freeze_late;
298         case PM_EVENT_HIBERNATE:
299                 return ops->poweroff_late;
300         case PM_EVENT_THAW:
301         case PM_EVENT_RECOVER:
302                 return ops->thaw_early;
303         case PM_EVENT_RESTORE:
304                 return ops->restore_early;
305 #endif /* CONFIG_HIBERNATE_CALLBACKS */
306         }
307
308         return NULL;
309 }
310
311 /**
312  * pm_noirq_op - Return the PM operation appropriate for given PM event.
313  * @ops: PM operations to choose from.
314  * @state: PM transition of the system being carried out.
315  *
316  * The driver of @dev will not receive interrupts while this function is being
317  * executed.
318  */
319 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
320 {
321         switch (state.event) {
322 #ifdef CONFIG_SUSPEND
323         case PM_EVENT_SUSPEND:
324                 return ops->suspend_noirq;
325         case PM_EVENT_RESUME:
326                 return ops->resume_noirq;
327 #endif /* CONFIG_SUSPEND */
328 #ifdef CONFIG_HIBERNATE_CALLBACKS
329         case PM_EVENT_FREEZE:
330         case PM_EVENT_QUIESCE:
331                 return ops->freeze_noirq;
332         case PM_EVENT_HIBERNATE:
333                 return ops->poweroff_noirq;
334         case PM_EVENT_THAW:
335         case PM_EVENT_RECOVER:
336                 return ops->thaw_noirq;
337         case PM_EVENT_RESTORE:
338                 return ops->restore_noirq;
339 #endif /* CONFIG_HIBERNATE_CALLBACKS */
340         }
341
342         return NULL;
343 }
344
345 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
346 {
347         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
348                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
349                 ", may wakeup" : "");
350 }
351
352 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
353                         int error)
354 {
355         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
356                 dev_name(dev), pm_verb(state.event), info, error);
357 }
358
359 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
360 {
361         ktime_t calltime;
362         u64 usecs64;
363         int usecs;
364
365         calltime = ktime_get();
366         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
367         do_div(usecs64, NSEC_PER_USEC);
368         usecs = usecs64;
369         if (usecs == 0)
370                 usecs = 1;
371         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
372                 info ?: "", info ? " " : "", pm_verb(state.event),
373                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
374 }
375
376 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
377                             pm_message_t state, char *info)
378 {
379         ktime_t calltime;
380         int error;
381
382         if (!cb)
383                 return 0;
384
385         calltime = initcall_debug_start(dev);
386
387         pm_dev_dbg(dev, state, info);
388         error = cb(dev);
389         suspend_report_result(cb, error);
390
391         initcall_debug_report(dev, calltime, error, state, info);
392
393         return error;
394 }
395
396 #ifdef CONFIG_DPM_WATCHDOG
397 struct dpm_watchdog {
398         struct device           *dev;
399         struct task_struct      *tsk;
400         struct timer_list       timer;
401 };
402
403 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
404         struct dpm_watchdog wd
405
406 /**
407  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
408  * @data: Watchdog object address.
409  *
410  * Called when a driver has timed out suspending or resuming.
411  * There's not much we can do here to recover so panic() to
412  * capture a crash-dump in pstore.
413  */
414 static void dpm_watchdog_handler(unsigned long data)
415 {
416         struct dpm_watchdog *wd = (void *)data;
417
418         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
419         show_stack(wd->tsk, NULL);
420         panic("%s %s: unrecoverable failure\n",
421                 dev_driver_string(wd->dev), dev_name(wd->dev));
422 }
423
424 /**
425  * dpm_watchdog_set - Enable pm watchdog for given device.
426  * @wd: Watchdog. Must be allocated on the stack.
427  * @dev: Device to handle.
428  */
429 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
430 {
431         struct timer_list *timer = &wd->timer;
432
433         wd->dev = dev;
434         wd->tsk = current;
435
436         init_timer_on_stack(timer);
437         /* use same timeout value for both suspend and resume */
438         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
439         timer->function = dpm_watchdog_handler;
440         timer->data = (unsigned long)wd;
441         add_timer(timer);
442 }
443
444 /**
445  * dpm_watchdog_clear - Disable suspend/resume watchdog.
446  * @wd: Watchdog to disable.
447  */
448 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
449 {
450         struct timer_list *timer = &wd->timer;
451
452         del_timer_sync(timer);
453         destroy_timer_on_stack(timer);
454 }
455 #else
456 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
457 #define dpm_watchdog_set(x, y)
458 #define dpm_watchdog_clear(x)
459 #endif
460
461 /*------------------------- Resume routines -------------------------*/
462
463 /**
464  * device_resume_noirq - Execute an "early resume" callback for given device.
465  * @dev: Device to handle.
466  * @state: PM transition of the system being carried out.
467  *
468  * The driver of @dev will not receive interrupts while this function is being
469  * executed.
470  */
471 static int device_resume_noirq(struct device *dev, pm_message_t state)
472 {
473         pm_callback_t callback = NULL;
474         char *info = NULL;
475         int error = 0;
476
477         TRACE_DEVICE(dev);
478         TRACE_RESUME(0);
479
480         if (dev->power.syscore)
481                 goto Out;
482
483         if (dev->pm_domain) {
484                 info = "noirq power domain ";
485                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
486         } else if (dev->type && dev->type->pm) {
487                 info = "noirq type ";
488                 callback = pm_noirq_op(dev->type->pm, state);
489         } else if (dev->class && dev->class->pm) {
490                 info = "noirq class ";
491                 callback = pm_noirq_op(dev->class->pm, state);
492         } else if (dev->bus && dev->bus->pm) {
493                 info = "noirq bus ";
494                 callback = pm_noirq_op(dev->bus->pm, state);
495         }
496
497         if (!callback && dev->driver && dev->driver->pm) {
498                 info = "noirq driver ";
499                 callback = pm_noirq_op(dev->driver->pm, state);
500         }
501
502         error = dpm_run_callback(callback, dev, state, info);
503
504  Out:
505         TRACE_RESUME(error);
506         return error;
507 }
508
509 /**
510  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
511  * @state: PM transition of the system being carried out.
512  *
513  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
514  * enable device drivers to receive interrupts.
515  */
516 static void dpm_resume_noirq(pm_message_t state)
517 {
518         ktime_t starttime = ktime_get();
519
520         mutex_lock(&dpm_list_mtx);
521         while (!list_empty(&dpm_noirq_list)) {
522                 struct device *dev = to_device(dpm_noirq_list.next);
523                 int error;
524
525                 get_device(dev);
526                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
527                 mutex_unlock(&dpm_list_mtx);
528
529                 error = device_resume_noirq(dev, state);
530                 if (error) {
531                         suspend_stats.failed_resume_noirq++;
532                         dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
533                         dpm_save_failed_dev(dev_name(dev));
534                         pm_dev_err(dev, state, " noirq", error);
535                 }
536
537                 mutex_lock(&dpm_list_mtx);
538                 put_device(dev);
539         }
540         mutex_unlock(&dpm_list_mtx);
541         dpm_show_time(starttime, state, "noirq");
542         resume_device_irqs();
543         cpuidle_resume();
544         cpufreq_resume();
545 }
546
547 /**
548  * device_resume_early - Execute an "early resume" callback for given device.
549  * @dev: Device to handle.
550  * @state: PM transition of the system being carried out.
551  *
552  * Runtime PM is disabled for @dev while this function is being executed.
553  */
554 static int device_resume_early(struct device *dev, pm_message_t state)
555 {
556         pm_callback_t callback = NULL;
557         char *info = NULL;
558         int error = 0;
559
560         TRACE_DEVICE(dev);
561         TRACE_RESUME(0);
562
563         if (dev->power.syscore)
564                 goto Out;
565
566         if (dev->pm_domain) {
567                 info = "early power domain ";
568                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
569         } else if (dev->type && dev->type->pm) {
570                 info = "early type ";
571                 callback = pm_late_early_op(dev->type->pm, state);
572         } else if (dev->class && dev->class->pm) {
573                 info = "early class ";
574                 callback = pm_late_early_op(dev->class->pm, state);
575         } else if (dev->bus && dev->bus->pm) {
576                 info = "early bus ";
577                 callback = pm_late_early_op(dev->bus->pm, state);
578         }
579
580         if (!callback && dev->driver && dev->driver->pm) {
581                 info = "early driver ";
582                 callback = pm_late_early_op(dev->driver->pm, state);
583         }
584
585         error = dpm_run_callback(callback, dev, state, info);
586
587  Out:
588         TRACE_RESUME(error);
589
590         pm_runtime_enable(dev);
591         return error;
592 }
593
594 /**
595  * dpm_resume_early - Execute "early resume" callbacks for all devices.
596  * @state: PM transition of the system being carried out.
597  */
598 static void dpm_resume_early(pm_message_t state)
599 {
600         ktime_t starttime = ktime_get();
601
602         mutex_lock(&dpm_list_mtx);
603         while (!list_empty(&dpm_late_early_list)) {
604                 struct device *dev = to_device(dpm_late_early_list.next);
605                 int error;
606
607                 get_device(dev);
608                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
609                 mutex_unlock(&dpm_list_mtx);
610
611                 error = device_resume_early(dev, state);
612                 if (error) {
613                         suspend_stats.failed_resume_early++;
614                         dpm_save_failed_step(SUSPEND_RESUME_EARLY);
615                         dpm_save_failed_dev(dev_name(dev));
616                         pm_dev_err(dev, state, " early", error);
617                 }
618
619                 mutex_lock(&dpm_list_mtx);
620                 put_device(dev);
621         }
622         mutex_unlock(&dpm_list_mtx);
623         dpm_show_time(starttime, state, "early");
624 }
625
626 /**
627  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
628  * @state: PM transition of the system being carried out.
629  */
630 void dpm_resume_start(pm_message_t state)
631 {
632         dpm_resume_noirq(state);
633         dpm_resume_early(state);
634 }
635 EXPORT_SYMBOL_GPL(dpm_resume_start);
636
637 /**
638  * device_resume - Execute "resume" callbacks for given device.
639  * @dev: Device to handle.
640  * @state: PM transition of the system being carried out.
641  * @async: If true, the device is being resumed asynchronously.
642  */
643 static int device_resume(struct device *dev, pm_message_t state, bool async)
644 {
645         pm_callback_t callback = NULL;
646         char *info = NULL;
647         int error = 0;
648         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
649
650         TRACE_DEVICE(dev);
651         TRACE_RESUME(0);
652
653         if (dev->power.syscore)
654                 goto Complete;
655
656         dpm_wait(dev->parent, async);
657         dpm_watchdog_set(&wd, dev);
658         device_lock(dev);
659
660         /*
661          * This is a fib.  But we'll allow new children to be added below
662          * a resumed device, even if the device hasn't been completed yet.
663          */
664         dev->power.is_prepared = false;
665
666         if (!dev->power.is_suspended)
667                 goto Unlock;
668
669         if (dev->pm_domain) {
670                 info = "power domain ";
671                 callback = pm_op(&dev->pm_domain->ops, state);
672                 goto Driver;
673         }
674
675         if (dev->type && dev->type->pm) {
676                 info = "type ";
677                 callback = pm_op(dev->type->pm, state);
678                 goto Driver;
679         }
680
681         if (dev->class) {
682                 if (dev->class->pm) {
683                         info = "class ";
684                         callback = pm_op(dev->class->pm, state);
685                         goto Driver;
686                 } else if (dev->class->resume) {
687                         info = "legacy class ";
688                         callback = dev->class->resume;
689                         goto End;
690                 }
691         }
692
693         if (dev->bus) {
694                 if (dev->bus->pm) {
695                         info = "bus ";
696                         callback = pm_op(dev->bus->pm, state);
697                 } else if (dev->bus->resume) {
698                         info = "legacy bus ";
699                         callback = dev->bus->resume;
700                         goto End;
701                 }
702         }
703
704  Driver:
705         if (!callback && dev->driver && dev->driver->pm) {
706                 info = "driver ";
707                 callback = pm_op(dev->driver->pm, state);
708         }
709
710  End:
711         error = dpm_run_callback(callback, dev, state, info);
712         dev->power.is_suspended = false;
713
714  Unlock:
715         device_unlock(dev);
716         dpm_watchdog_clear(&wd);
717
718  Complete:
719         complete_all(&dev->power.completion);
720
721         TRACE_RESUME(error);
722
723         return error;
724 }
725
726 static void async_resume(void *data, async_cookie_t cookie)
727 {
728         struct device *dev = (struct device *)data;
729         int error;
730
731         error = device_resume(dev, pm_transition, true);
732         if (error)
733                 pm_dev_err(dev, pm_transition, " async", error);
734         put_device(dev);
735 }
736
737 static bool is_async(struct device *dev)
738 {
739         return dev->power.async_suspend && pm_async_enabled
740                 && !pm_trace_is_enabled();
741 }
742
743 /**
744  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
745  * @state: PM transition of the system being carried out.
746  *
747  * Execute the appropriate "resume" callback for all devices whose status
748  * indicates that they are suspended.
749  */
750 void dpm_resume(pm_message_t state)
751 {
752         struct device *dev;
753         ktime_t starttime = ktime_get();
754
755         might_sleep();
756
757         mutex_lock(&dpm_list_mtx);
758         pm_transition = state;
759         async_error = 0;
760
761         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
762                 reinit_completion(&dev->power.completion);
763                 if (is_async(dev)) {
764                         get_device(dev);
765                         async_schedule(async_resume, dev);
766                 }
767         }
768
769         while (!list_empty(&dpm_suspended_list)) {
770                 dev = to_device(dpm_suspended_list.next);
771                 get_device(dev);
772                 if (!is_async(dev)) {
773                         int error;
774
775                         mutex_unlock(&dpm_list_mtx);
776
777                         error = device_resume(dev, state, false);
778                         if (error) {
779                                 suspend_stats.failed_resume++;
780                                 dpm_save_failed_step(SUSPEND_RESUME);
781                                 dpm_save_failed_dev(dev_name(dev));
782                                 pm_dev_err(dev, state, "", error);
783                         }
784
785                         mutex_lock(&dpm_list_mtx);
786                 }
787                 if (!list_empty(&dev->power.entry))
788                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
789                 put_device(dev);
790         }
791         mutex_unlock(&dpm_list_mtx);
792         async_synchronize_full();
793         dpm_show_time(starttime, state, NULL);
794 }
795
796 /**
797  * device_complete - Complete a PM transition for given device.
798  * @dev: Device to handle.
799  * @state: PM transition of the system being carried out.
800  */
801 static void device_complete(struct device *dev, pm_message_t state)
802 {
803         void (*callback)(struct device *) = NULL;
804         char *info = NULL;
805
806         if (dev->power.syscore)
807                 return;
808
809         device_lock(dev);
810
811         if (dev->pm_domain) {
812                 info = "completing power domain ";
813                 callback = dev->pm_domain->ops.complete;
814         } else if (dev->type && dev->type->pm) {
815                 info = "completing type ";
816                 callback = dev->type->pm->complete;
817         } else if (dev->class && dev->class->pm) {
818                 info = "completing class ";
819                 callback = dev->class->pm->complete;
820         } else if (dev->bus && dev->bus->pm) {
821                 info = "completing bus ";
822                 callback = dev->bus->pm->complete;
823         }
824
825         if (!callback && dev->driver && dev->driver->pm) {
826                 info = "completing driver ";
827                 callback = dev->driver->pm->complete;
828         }
829
830         if (callback) {
831                 pm_dev_dbg(dev, state, info);
832                 callback(dev);
833         }
834
835         device_unlock(dev);
836
837         pm_runtime_put(dev);
838 }
839
840 /**
841  * dpm_complete - Complete a PM transition for all non-sysdev devices.
842  * @state: PM transition of the system being carried out.
843  *
844  * Execute the ->complete() callbacks for all devices whose PM status is not
845  * DPM_ON (this allows new devices to be registered).
846  */
847 void dpm_complete(pm_message_t state)
848 {
849         struct list_head list;
850
851         might_sleep();
852
853         INIT_LIST_HEAD(&list);
854         mutex_lock(&dpm_list_mtx);
855         while (!list_empty(&dpm_prepared_list)) {
856                 struct device *dev = to_device(dpm_prepared_list.prev);
857
858                 get_device(dev);
859                 dev->power.is_prepared = false;
860                 list_move(&dev->power.entry, &list);
861                 mutex_unlock(&dpm_list_mtx);
862
863                 device_complete(dev, state);
864
865                 mutex_lock(&dpm_list_mtx);
866                 put_device(dev);
867         }
868         list_splice(&list, &dpm_list);
869         mutex_unlock(&dpm_list_mtx);
870 }
871
872 /**
873  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
874  * @state: PM transition of the system being carried out.
875  *
876  * Execute "resume" callbacks for all devices and complete the PM transition of
877  * the system.
878  */
879 void dpm_resume_end(pm_message_t state)
880 {
881         dpm_resume(state);
882         dpm_complete(state);
883 }
884 EXPORT_SYMBOL_GPL(dpm_resume_end);
885
886
887 /*------------------------- Suspend routines -------------------------*/
888
889 /**
890  * resume_event - Return a "resume" message for given "suspend" sleep state.
891  * @sleep_state: PM message representing a sleep state.
892  *
893  * Return a PM message representing the resume event corresponding to given
894  * sleep state.
895  */
896 static pm_message_t resume_event(pm_message_t sleep_state)
897 {
898         switch (sleep_state.event) {
899         case PM_EVENT_SUSPEND:
900                 return PMSG_RESUME;
901         case PM_EVENT_FREEZE:
902         case PM_EVENT_QUIESCE:
903                 return PMSG_RECOVER;
904         case PM_EVENT_HIBERNATE:
905                 return PMSG_RESTORE;
906         }
907         return PMSG_ON;
908 }
909
910 /**
911  * device_suspend_noirq - Execute a "late suspend" callback for given device.
912  * @dev: Device to handle.
913  * @state: PM transition of the system being carried out.
914  *
915  * The driver of @dev will not receive interrupts while this function is being
916  * executed.
917  */
918 static int device_suspend_noirq(struct device *dev, pm_message_t state)
919 {
920         pm_callback_t callback = NULL;
921         char *info = NULL;
922
923         if (dev->power.syscore)
924                 return 0;
925
926         if (dev->pm_domain) {
927                 info = "noirq power domain ";
928                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
929         } else if (dev->type && dev->type->pm) {
930                 info = "noirq type ";
931                 callback = pm_noirq_op(dev->type->pm, state);
932         } else if (dev->class && dev->class->pm) {
933                 info = "noirq class ";
934                 callback = pm_noirq_op(dev->class->pm, state);
935         } else if (dev->bus && dev->bus->pm) {
936                 info = "noirq bus ";
937                 callback = pm_noirq_op(dev->bus->pm, state);
938         }
939
940         if (!callback && dev->driver && dev->driver->pm) {
941                 info = "noirq driver ";
942                 callback = pm_noirq_op(dev->driver->pm, state);
943         }
944
945         return dpm_run_callback(callback, dev, state, info);
946 }
947
948 /**
949  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
950  * @state: PM transition of the system being carried out.
951  *
952  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
953  * handlers for all non-sysdev devices.
954  */
955 static int dpm_suspend_noirq(pm_message_t state)
956 {
957         ktime_t starttime = ktime_get();
958         int error = 0;
959
960         cpufreq_suspend();
961         cpuidle_pause();
962         suspend_device_irqs();
963         mutex_lock(&dpm_list_mtx);
964         while (!list_empty(&dpm_late_early_list)) {
965                 struct device *dev = to_device(dpm_late_early_list.prev);
966
967                 get_device(dev);
968                 mutex_unlock(&dpm_list_mtx);
969
970                 error = device_suspend_noirq(dev, state);
971
972                 mutex_lock(&dpm_list_mtx);
973                 if (error) {
974                         pm_dev_err(dev, state, " noirq", error);
975                         suspend_stats.failed_suspend_noirq++;
976                         dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
977                         dpm_save_failed_dev(dev_name(dev));
978                         put_device(dev);
979                         break;
980                 }
981                 if (!list_empty(&dev->power.entry))
982                         list_move(&dev->power.entry, &dpm_noirq_list);
983                 put_device(dev);
984
985                 if (pm_wakeup_pending()) {
986                         error = -EBUSY;
987                         break;
988                 }
989         }
990         mutex_unlock(&dpm_list_mtx);
991         if (error)
992                 dpm_resume_noirq(resume_event(state));
993         else
994                 dpm_show_time(starttime, state, "noirq");
995         return error;
996 }
997
998 /**
999  * device_suspend_late - Execute a "late suspend" callback for given device.
1000  * @dev: Device to handle.
1001  * @state: PM transition of the system being carried out.
1002  *
1003  * Runtime PM is disabled for @dev while this function is being executed.
1004  */
1005 static int device_suspend_late(struct device *dev, pm_message_t state)
1006 {
1007         pm_callback_t callback = NULL;
1008         char *info = NULL;
1009
1010         __pm_runtime_disable(dev, false);
1011
1012         if (dev->power.syscore)
1013                 return 0;
1014
1015         if (dev->pm_domain) {
1016                 info = "late power domain ";
1017                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1018         } else if (dev->type && dev->type->pm) {
1019                 info = "late type ";
1020                 callback = pm_late_early_op(dev->type->pm, state);
1021         } else if (dev->class && dev->class->pm) {
1022                 info = "late class ";
1023                 callback = pm_late_early_op(dev->class->pm, state);
1024         } else if (dev->bus && dev->bus->pm) {
1025                 info = "late bus ";
1026                 callback = pm_late_early_op(dev->bus->pm, state);
1027         }
1028
1029         if (!callback && dev->driver && dev->driver->pm) {
1030                 info = "late driver ";
1031                 callback = pm_late_early_op(dev->driver->pm, state);
1032         }
1033
1034         return dpm_run_callback(callback, dev, state, info);
1035 }
1036
1037 /**
1038  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1039  * @state: PM transition of the system being carried out.
1040  */
1041 static int dpm_suspend_late(pm_message_t state)
1042 {
1043         ktime_t starttime = ktime_get();
1044         int error = 0;
1045
1046         mutex_lock(&dpm_list_mtx);
1047         while (!list_empty(&dpm_suspended_list)) {
1048                 struct device *dev = to_device(dpm_suspended_list.prev);
1049
1050                 get_device(dev);
1051                 mutex_unlock(&dpm_list_mtx);
1052
1053                 error = device_suspend_late(dev, state);
1054
1055                 mutex_lock(&dpm_list_mtx);
1056                 if (error) {
1057                         pm_dev_err(dev, state, " late", error);
1058                         suspend_stats.failed_suspend_late++;
1059                         dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1060                         dpm_save_failed_dev(dev_name(dev));
1061                         put_device(dev);
1062                         break;
1063                 }
1064                 if (!list_empty(&dev->power.entry))
1065                         list_move(&dev->power.entry, &dpm_late_early_list);
1066                 put_device(dev);
1067
1068                 if (pm_wakeup_pending()) {
1069                         error = -EBUSY;
1070                         break;
1071                 }
1072         }
1073         mutex_unlock(&dpm_list_mtx);
1074         if (error)
1075                 dpm_resume_early(resume_event(state));
1076         else
1077                 dpm_show_time(starttime, state, "late");
1078
1079         return error;
1080 }
1081
1082 /**
1083  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1084  * @state: PM transition of the system being carried out.
1085  */
1086 int dpm_suspend_end(pm_message_t state)
1087 {
1088         int error = dpm_suspend_late(state);
1089         if (error)
1090                 return error;
1091
1092         error = dpm_suspend_noirq(state);
1093         if (error) {
1094                 dpm_resume_early(resume_event(state));
1095                 return error;
1096         }
1097
1098         return 0;
1099 }
1100 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1101
1102 /**
1103  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1104  * @dev: Device to suspend.
1105  * @state: PM transition of the system being carried out.
1106  * @cb: Suspend callback to execute.
1107  */
1108 static int legacy_suspend(struct device *dev, pm_message_t state,
1109                           int (*cb)(struct device *dev, pm_message_t state),
1110                           char *info)
1111 {
1112         int error;
1113         ktime_t calltime;
1114
1115         calltime = initcall_debug_start(dev);
1116
1117         error = cb(dev, state);
1118         suspend_report_result(cb, error);
1119
1120         initcall_debug_report(dev, calltime, error, state, info);
1121
1122         return error;
1123 }
1124
1125 /**
1126  * device_suspend - Execute "suspend" callbacks for given device.
1127  * @dev: Device to handle.
1128  * @state: PM transition of the system being carried out.
1129  * @async: If true, the device is being suspended asynchronously.
1130  */
1131 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1132 {
1133         pm_callback_t callback = NULL;
1134         char *info = NULL;
1135         int error = 0;
1136         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1137
1138         dpm_wait_for_children(dev, async);
1139
1140         if (async_error)
1141                 goto Complete;
1142
1143         /*
1144          * If a device configured to wake up the system from sleep states
1145          * has been suspended at run time and there's a resume request pending
1146          * for it, this is equivalent to the device signaling wakeup, so the
1147          * system suspend operation should be aborted.
1148          */
1149         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1150                 pm_wakeup_event(dev, 0);
1151
1152         if (pm_wakeup_pending()) {
1153                 async_error = -EBUSY;
1154                 goto Complete;
1155         }
1156
1157         if (dev->power.syscore)
1158                 goto Complete;
1159
1160         dpm_watchdog_set(&wd, dev);
1161         device_lock(dev);
1162
1163         if (dev->pm_domain) {
1164                 info = "power domain ";
1165                 callback = pm_op(&dev->pm_domain->ops, state);
1166                 goto Run;
1167         }
1168
1169         if (dev->type && dev->type->pm) {
1170                 info = "type ";
1171                 callback = pm_op(dev->type->pm, state);
1172                 goto Run;
1173         }
1174
1175         if (dev->class) {
1176                 if (dev->class->pm) {
1177                         info = "class ";
1178                         callback = pm_op(dev->class->pm, state);
1179                         goto Run;
1180                 } else if (dev->class->suspend) {
1181                         pm_dev_dbg(dev, state, "legacy class ");
1182                         error = legacy_suspend(dev, state, dev->class->suspend,
1183                                                 "legacy class ");
1184                         goto End;
1185                 }
1186         }
1187
1188         if (dev->bus) {
1189                 if (dev->bus->pm) {
1190                         info = "bus ";
1191                         callback = pm_op(dev->bus->pm, state);
1192                 } else if (dev->bus->suspend) {
1193                         pm_dev_dbg(dev, state, "legacy bus ");
1194                         error = legacy_suspend(dev, state, dev->bus->suspend,
1195                                                 "legacy bus ");
1196                         goto End;
1197                 }
1198         }
1199
1200  Run:
1201         if (!callback && dev->driver && dev->driver->pm) {
1202                 info = "driver ";
1203                 callback = pm_op(dev->driver->pm, state);
1204         }
1205
1206         error = dpm_run_callback(callback, dev, state, info);
1207
1208  End:
1209         if (!error) {
1210                 dev->power.is_suspended = true;
1211                 if (dev->power.wakeup_path
1212                     && dev->parent && !dev->parent->power.ignore_children)
1213                         dev->parent->power.wakeup_path = true;
1214         }
1215
1216         device_unlock(dev);
1217         dpm_watchdog_clear(&wd);
1218
1219  Complete:
1220         complete_all(&dev->power.completion);
1221         if (error)
1222                 async_error = error;
1223
1224         return error;
1225 }
1226
1227 static void async_suspend(void *data, async_cookie_t cookie)
1228 {
1229         struct device *dev = (struct device *)data;
1230         int error;
1231
1232         error = __device_suspend(dev, pm_transition, true);
1233         if (error) {
1234                 dpm_save_failed_dev(dev_name(dev));
1235                 pm_dev_err(dev, pm_transition, " async", error);
1236         }
1237
1238         put_device(dev);
1239 }
1240
1241 static int device_suspend(struct device *dev)
1242 {
1243         reinit_completion(&dev->power.completion);
1244
1245         if (pm_async_enabled && dev->power.async_suspend) {
1246                 get_device(dev);
1247                 async_schedule(async_suspend, dev);
1248                 return 0;
1249         }
1250
1251         return __device_suspend(dev, pm_transition, false);
1252 }
1253
1254 /**
1255  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1256  * @state: PM transition of the system being carried out.
1257  */
1258 int dpm_suspend(pm_message_t state)
1259 {
1260         ktime_t starttime = ktime_get();
1261         int error = 0;
1262
1263         might_sleep();
1264
1265         mutex_lock(&dpm_list_mtx);
1266         pm_transition = state;
1267         async_error = 0;
1268         while (!list_empty(&dpm_prepared_list)) {
1269                 struct device *dev = to_device(dpm_prepared_list.prev);
1270
1271                 get_device(dev);
1272                 mutex_unlock(&dpm_list_mtx);
1273
1274                 error = device_suspend(dev);
1275
1276                 mutex_lock(&dpm_list_mtx);
1277                 if (error) {
1278                         pm_dev_err(dev, state, "", error);
1279                         dpm_save_failed_dev(dev_name(dev));
1280                         put_device(dev);
1281                         break;
1282                 }
1283                 if (!list_empty(&dev->power.entry))
1284                         list_move(&dev->power.entry, &dpm_suspended_list);
1285                 put_device(dev);
1286                 if (async_error)
1287                         break;
1288         }
1289         mutex_unlock(&dpm_list_mtx);
1290         async_synchronize_full();
1291         if (!error)
1292                 error = async_error;
1293         if (error) {
1294                 suspend_stats.failed_suspend++;
1295                 dpm_save_failed_step(SUSPEND_SUSPEND);
1296         } else
1297                 dpm_show_time(starttime, state, NULL);
1298         return error;
1299 }
1300
1301 /**
1302  * device_prepare - Prepare a device for system power transition.
1303  * @dev: Device to handle.
1304  * @state: PM transition of the system being carried out.
1305  *
1306  * Execute the ->prepare() callback(s) for given device.  No new children of the
1307  * device may be registered after this function has returned.
1308  */
1309 static int device_prepare(struct device *dev, pm_message_t state)
1310 {
1311         int (*callback)(struct device *) = NULL;
1312         char *info = NULL;
1313         int error = 0;
1314
1315         if (dev->power.syscore)
1316                 return 0;
1317
1318         /*
1319          * If a device's parent goes into runtime suspend at the wrong time,
1320          * it won't be possible to resume the device.  To prevent this we
1321          * block runtime suspend here, during the prepare phase, and allow
1322          * it again during the complete phase.
1323          */
1324         pm_runtime_get_noresume(dev);
1325
1326         device_lock(dev);
1327
1328         dev->power.wakeup_path = device_may_wakeup(dev);
1329
1330         if (dev->pm_domain) {
1331                 info = "preparing power domain ";
1332                 callback = dev->pm_domain->ops.prepare;
1333         } else if (dev->type && dev->type->pm) {
1334                 info = "preparing type ";
1335                 callback = dev->type->pm->prepare;
1336         } else if (dev->class && dev->class->pm) {
1337                 info = "preparing class ";
1338                 callback = dev->class->pm->prepare;
1339         } else if (dev->bus && dev->bus->pm) {
1340                 info = "preparing bus ";
1341                 callback = dev->bus->pm->prepare;
1342         }
1343
1344         if (!callback && dev->driver && dev->driver->pm) {
1345                 info = "preparing driver ";
1346                 callback = dev->driver->pm->prepare;
1347         }
1348
1349         if (callback) {
1350                 error = callback(dev);
1351                 suspend_report_result(callback, error);
1352         }
1353
1354         device_unlock(dev);
1355
1356         if (error)
1357                 pm_runtime_put(dev);
1358
1359         return error;
1360 }
1361
1362 /**
1363  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1364  * @state: PM transition of the system being carried out.
1365  *
1366  * Execute the ->prepare() callback(s) for all devices.
1367  */
1368 int dpm_prepare(pm_message_t state)
1369 {
1370         int error = 0;
1371
1372         might_sleep();
1373
1374         mutex_lock(&dpm_list_mtx);
1375         while (!list_empty(&dpm_list)) {
1376                 struct device *dev = to_device(dpm_list.next);
1377
1378                 get_device(dev);
1379                 mutex_unlock(&dpm_list_mtx);
1380
1381                 error = device_prepare(dev, state);
1382
1383                 mutex_lock(&dpm_list_mtx);
1384                 if (error) {
1385                         if (error == -EAGAIN) {
1386                                 put_device(dev);
1387                                 error = 0;
1388                                 continue;
1389                         }
1390                         printk(KERN_INFO "PM: Device %s not prepared "
1391                                 "for power transition: code %d\n",
1392                                 dev_name(dev), error);
1393                         put_device(dev);
1394                         break;
1395                 }
1396                 dev->power.is_prepared = true;
1397                 if (!list_empty(&dev->power.entry))
1398                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1399                 put_device(dev);
1400         }
1401         mutex_unlock(&dpm_list_mtx);
1402         return error;
1403 }
1404
1405 /**
1406  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1407  * @state: PM transition of the system being carried out.
1408  *
1409  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1410  * callbacks for them.
1411  */
1412 int dpm_suspend_start(pm_message_t state)
1413 {
1414         int error;
1415
1416         error = dpm_prepare(state);
1417         if (error) {
1418                 suspend_stats.failed_prepare++;
1419                 dpm_save_failed_step(SUSPEND_PREPARE);
1420         } else
1421                 error = dpm_suspend(state);
1422         return error;
1423 }
1424 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1425
1426 void __suspend_report_result(const char *function, void *fn, int ret)
1427 {
1428         if (ret)
1429                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1430 }
1431 EXPORT_SYMBOL_GPL(__suspend_report_result);
1432
1433 /**
1434  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1435  * @dev: Device to wait for.
1436  * @subordinate: Device that needs to wait for @dev.
1437  */
1438 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1439 {
1440         dpm_wait(dev, subordinate->power.async_suspend);
1441         return async_error;
1442 }
1443 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1444
1445 /**
1446  * dpm_for_each_dev - device iterator.
1447  * @data: data for the callback.
1448  * @fn: function to be called for each device.
1449  *
1450  * Iterate over devices in dpm_list, and call @fn for each device,
1451  * passing it @data.
1452  */
1453 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1454 {
1455         struct device *dev;
1456
1457         if (!fn)
1458                 return;
1459
1460         device_pm_lock();
1461         list_for_each_entry(dev, &dpm_list, power.entry)
1462                 fn(dev, data);
1463         device_pm_unlock();
1464 }
1465 EXPORT_SYMBOL_GPL(dpm_for_each_dev);