Merge branch 'acpi-ec'
[linux-drm-fsl-dcu.git] / drivers / gpu / drm / radeon / radeon_pm.c
1 /*
2  * Permission is hereby granted, free of charge, to any person obtaining a
3  * copy of this software and associated documentation files (the "Software"),
4  * to deal in the Software without restriction, including without limitation
5  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6  * and/or sell copies of the Software, and to permit persons to whom the
7  * Software is furnished to do so, subject to the following conditions:
8  *
9  * The above copyright notice and this permission notice shall be included in
10  * all copies or substantial portions of the Software.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
15  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18  * OTHER DEALINGS IN THE SOFTWARE.
19  *
20  * Authors: Rafał Miłecki <zajec5@gmail.com>
21  *          Alex Deucher <alexdeucher@gmail.com>
22  */
23 #include <drm/drmP.h>
24 #include "radeon.h"
25 #include "avivod.h"
26 #include "atom.h"
27 #include <linux/power_supply.h>
28 #include <linux/hwmon.h>
29 #include <linux/hwmon-sysfs.h>
30
31 #define RADEON_IDLE_LOOP_MS 100
32 #define RADEON_RECLOCK_DELAY_MS 200
33 #define RADEON_WAIT_VBLANK_TIMEOUT 200
34
35 static const char *radeon_pm_state_type_name[5] = {
36         "",
37         "Powersave",
38         "Battery",
39         "Balanced",
40         "Performance",
41 };
42
43 static void radeon_dynpm_idle_work_handler(struct work_struct *work);
44 static int radeon_debugfs_pm_init(struct radeon_device *rdev);
45 static bool radeon_pm_in_vbl(struct radeon_device *rdev);
46 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
47 static void radeon_pm_update_profile(struct radeon_device *rdev);
48 static void radeon_pm_set_clocks(struct radeon_device *rdev);
49
50 int radeon_pm_get_type_index(struct radeon_device *rdev,
51                              enum radeon_pm_state_type ps_type,
52                              int instance)
53 {
54         int i;
55         int found_instance = -1;
56
57         for (i = 0; i < rdev->pm.num_power_states; i++) {
58                 if (rdev->pm.power_state[i].type == ps_type) {
59                         found_instance++;
60                         if (found_instance == instance)
61                                 return i;
62                 }
63         }
64         /* return default if no match */
65         return rdev->pm.default_power_state_index;
66 }
67
68 void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
69 {
70         if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
71                 mutex_lock(&rdev->pm.mutex);
72                 if (power_supply_is_system_supplied() > 0)
73                         rdev->pm.dpm.ac_power = true;
74                 else
75                         rdev->pm.dpm.ac_power = false;
76                 if (rdev->family == CHIP_ARUBA) {
77                         if (rdev->asic->dpm.enable_bapm)
78                                 radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
79                 }
80                 mutex_unlock(&rdev->pm.mutex);
81         } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
82                 if (rdev->pm.profile == PM_PROFILE_AUTO) {
83                         mutex_lock(&rdev->pm.mutex);
84                         radeon_pm_update_profile(rdev);
85                         radeon_pm_set_clocks(rdev);
86                         mutex_unlock(&rdev->pm.mutex);
87                 }
88         }
89 }
90
91 static void radeon_pm_update_profile(struct radeon_device *rdev)
92 {
93         switch (rdev->pm.profile) {
94         case PM_PROFILE_DEFAULT:
95                 rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
96                 break;
97         case PM_PROFILE_AUTO:
98                 if (power_supply_is_system_supplied() > 0) {
99                         if (rdev->pm.active_crtc_count > 1)
100                                 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
101                         else
102                                 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
103                 } else {
104                         if (rdev->pm.active_crtc_count > 1)
105                                 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
106                         else
107                                 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
108                 }
109                 break;
110         case PM_PROFILE_LOW:
111                 if (rdev->pm.active_crtc_count > 1)
112                         rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
113                 else
114                         rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
115                 break;
116         case PM_PROFILE_MID:
117                 if (rdev->pm.active_crtc_count > 1)
118                         rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
119                 else
120                         rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
121                 break;
122         case PM_PROFILE_HIGH:
123                 if (rdev->pm.active_crtc_count > 1)
124                         rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
125                 else
126                         rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
127                 break;
128         }
129
130         if (rdev->pm.active_crtc_count == 0) {
131                 rdev->pm.requested_power_state_index =
132                         rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
133                 rdev->pm.requested_clock_mode_index =
134                         rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
135         } else {
136                 rdev->pm.requested_power_state_index =
137                         rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
138                 rdev->pm.requested_clock_mode_index =
139                         rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
140         }
141 }
142
143 static void radeon_unmap_vram_bos(struct radeon_device *rdev)
144 {
145         struct radeon_bo *bo, *n;
146
147         if (list_empty(&rdev->gem.objects))
148                 return;
149
150         list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
151                 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
152                         ttm_bo_unmap_virtual(&bo->tbo);
153         }
154 }
155
156 static void radeon_sync_with_vblank(struct radeon_device *rdev)
157 {
158         if (rdev->pm.active_crtcs) {
159                 rdev->pm.vblank_sync = false;
160                 wait_event_timeout(
161                         rdev->irq.vblank_queue, rdev->pm.vblank_sync,
162                         msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
163         }
164 }
165
166 static void radeon_set_power_state(struct radeon_device *rdev)
167 {
168         u32 sclk, mclk;
169         bool misc_after = false;
170
171         if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
172             (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
173                 return;
174
175         if (radeon_gui_idle(rdev)) {
176                 sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
177                         clock_info[rdev->pm.requested_clock_mode_index].sclk;
178                 if (sclk > rdev->pm.default_sclk)
179                         sclk = rdev->pm.default_sclk;
180
181                 /* starting with BTC, there is one state that is used for both
182                  * MH and SH.  Difference is that we always use the high clock index for
183                  * mclk and vddci.
184                  */
185                 if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
186                     (rdev->family >= CHIP_BARTS) &&
187                     rdev->pm.active_crtc_count &&
188                     ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
189                      (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
190                         mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
191                                 clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
192                 else
193                         mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
194                                 clock_info[rdev->pm.requested_clock_mode_index].mclk;
195
196                 if (mclk > rdev->pm.default_mclk)
197                         mclk = rdev->pm.default_mclk;
198
199                 /* upvolt before raising clocks, downvolt after lowering clocks */
200                 if (sclk < rdev->pm.current_sclk)
201                         misc_after = true;
202
203                 radeon_sync_with_vblank(rdev);
204
205                 if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
206                         if (!radeon_pm_in_vbl(rdev))
207                                 return;
208                 }
209
210                 radeon_pm_prepare(rdev);
211
212                 if (!misc_after)
213                         /* voltage, pcie lanes, etc.*/
214                         radeon_pm_misc(rdev);
215
216                 /* set engine clock */
217                 if (sclk != rdev->pm.current_sclk) {
218                         radeon_pm_debug_check_in_vbl(rdev, false);
219                         radeon_set_engine_clock(rdev, sclk);
220                         radeon_pm_debug_check_in_vbl(rdev, true);
221                         rdev->pm.current_sclk = sclk;
222                         DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
223                 }
224
225                 /* set memory clock */
226                 if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
227                         radeon_pm_debug_check_in_vbl(rdev, false);
228                         radeon_set_memory_clock(rdev, mclk);
229                         radeon_pm_debug_check_in_vbl(rdev, true);
230                         rdev->pm.current_mclk = mclk;
231                         DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
232                 }
233
234                 if (misc_after)
235                         /* voltage, pcie lanes, etc.*/
236                         radeon_pm_misc(rdev);
237
238                 radeon_pm_finish(rdev);
239
240                 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
241                 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
242         } else
243                 DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
244 }
245
246 static void radeon_pm_set_clocks(struct radeon_device *rdev)
247 {
248         int i, r;
249
250         /* no need to take locks, etc. if nothing's going to change */
251         if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
252             (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
253                 return;
254
255         mutex_lock(&rdev->ddev->struct_mutex);
256         down_write(&rdev->pm.mclk_lock);
257         mutex_lock(&rdev->ring_lock);
258
259         /* wait for the rings to drain */
260         for (i = 0; i < RADEON_NUM_RINGS; i++) {
261                 struct radeon_ring *ring = &rdev->ring[i];
262                 if (!ring->ready) {
263                         continue;
264                 }
265                 r = radeon_fence_wait_empty(rdev, i);
266                 if (r) {
267                         /* needs a GPU reset dont reset here */
268                         mutex_unlock(&rdev->ring_lock);
269                         up_write(&rdev->pm.mclk_lock);
270                         mutex_unlock(&rdev->ddev->struct_mutex);
271                         return;
272                 }
273         }
274
275         radeon_unmap_vram_bos(rdev);
276
277         if (rdev->irq.installed) {
278                 for (i = 0; i < rdev->num_crtc; i++) {
279                         if (rdev->pm.active_crtcs & (1 << i)) {
280                                 rdev->pm.req_vblank |= (1 << i);
281                                 drm_vblank_get(rdev->ddev, i);
282                         }
283                 }
284         }
285
286         radeon_set_power_state(rdev);
287
288         if (rdev->irq.installed) {
289                 for (i = 0; i < rdev->num_crtc; i++) {
290                         if (rdev->pm.req_vblank & (1 << i)) {
291                                 rdev->pm.req_vblank &= ~(1 << i);
292                                 drm_vblank_put(rdev->ddev, i);
293                         }
294                 }
295         }
296
297         /* update display watermarks based on new power state */
298         radeon_update_bandwidth_info(rdev);
299         if (rdev->pm.active_crtc_count)
300                 radeon_bandwidth_update(rdev);
301
302         rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
303
304         mutex_unlock(&rdev->ring_lock);
305         up_write(&rdev->pm.mclk_lock);
306         mutex_unlock(&rdev->ddev->struct_mutex);
307 }
308
309 static void radeon_pm_print_states(struct radeon_device *rdev)
310 {
311         int i, j;
312         struct radeon_power_state *power_state;
313         struct radeon_pm_clock_info *clock_info;
314
315         DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
316         for (i = 0; i < rdev->pm.num_power_states; i++) {
317                 power_state = &rdev->pm.power_state[i];
318                 DRM_DEBUG_DRIVER("State %d: %s\n", i,
319                         radeon_pm_state_type_name[power_state->type]);
320                 if (i == rdev->pm.default_power_state_index)
321                         DRM_DEBUG_DRIVER("\tDefault");
322                 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
323                         DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
324                 if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
325                         DRM_DEBUG_DRIVER("\tSingle display only\n");
326                 DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
327                 for (j = 0; j < power_state->num_clock_modes; j++) {
328                         clock_info = &(power_state->clock_info[j]);
329                         if (rdev->flags & RADEON_IS_IGP)
330                                 DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
331                                                  j,
332                                                  clock_info->sclk * 10);
333                         else
334                                 DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
335                                                  j,
336                                                  clock_info->sclk * 10,
337                                                  clock_info->mclk * 10,
338                                                  clock_info->voltage.voltage);
339                 }
340         }
341 }
342
343 static ssize_t radeon_get_pm_profile(struct device *dev,
344                                      struct device_attribute *attr,
345                                      char *buf)
346 {
347         struct drm_device *ddev = dev_get_drvdata(dev);
348         struct radeon_device *rdev = ddev->dev_private;
349         int cp = rdev->pm.profile;
350
351         return snprintf(buf, PAGE_SIZE, "%s\n",
352                         (cp == PM_PROFILE_AUTO) ? "auto" :
353                         (cp == PM_PROFILE_LOW) ? "low" :
354                         (cp == PM_PROFILE_MID) ? "mid" :
355                         (cp == PM_PROFILE_HIGH) ? "high" : "default");
356 }
357
358 static ssize_t radeon_set_pm_profile(struct device *dev,
359                                      struct device_attribute *attr,
360                                      const char *buf,
361                                      size_t count)
362 {
363         struct drm_device *ddev = dev_get_drvdata(dev);
364         struct radeon_device *rdev = ddev->dev_private;
365
366         /* Can't set profile when the card is off */
367         if  ((rdev->flags & RADEON_IS_PX) &&
368              (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
369                 return -EINVAL;
370
371         mutex_lock(&rdev->pm.mutex);
372         if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
373                 if (strncmp("default", buf, strlen("default")) == 0)
374                         rdev->pm.profile = PM_PROFILE_DEFAULT;
375                 else if (strncmp("auto", buf, strlen("auto")) == 0)
376                         rdev->pm.profile = PM_PROFILE_AUTO;
377                 else if (strncmp("low", buf, strlen("low")) == 0)
378                         rdev->pm.profile = PM_PROFILE_LOW;
379                 else if (strncmp("mid", buf, strlen("mid")) == 0)
380                         rdev->pm.profile = PM_PROFILE_MID;
381                 else if (strncmp("high", buf, strlen("high")) == 0)
382                         rdev->pm.profile = PM_PROFILE_HIGH;
383                 else {
384                         count = -EINVAL;
385                         goto fail;
386                 }
387                 radeon_pm_update_profile(rdev);
388                 radeon_pm_set_clocks(rdev);
389         } else
390                 count = -EINVAL;
391
392 fail:
393         mutex_unlock(&rdev->pm.mutex);
394
395         return count;
396 }
397
398 static ssize_t radeon_get_pm_method(struct device *dev,
399                                     struct device_attribute *attr,
400                                     char *buf)
401 {
402         struct drm_device *ddev = dev_get_drvdata(dev);
403         struct radeon_device *rdev = ddev->dev_private;
404         int pm = rdev->pm.pm_method;
405
406         return snprintf(buf, PAGE_SIZE, "%s\n",
407                         (pm == PM_METHOD_DYNPM) ? "dynpm" :
408                         (pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
409 }
410
411 static ssize_t radeon_set_pm_method(struct device *dev,
412                                     struct device_attribute *attr,
413                                     const char *buf,
414                                     size_t count)
415 {
416         struct drm_device *ddev = dev_get_drvdata(dev);
417         struct radeon_device *rdev = ddev->dev_private;
418
419         /* Can't set method when the card is off */
420         if  ((rdev->flags & RADEON_IS_PX) &&
421              (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
422                 count = -EINVAL;
423                 goto fail;
424         }
425
426         /* we don't support the legacy modes with dpm */
427         if (rdev->pm.pm_method == PM_METHOD_DPM) {
428                 count = -EINVAL;
429                 goto fail;
430         }
431
432         if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
433                 mutex_lock(&rdev->pm.mutex);
434                 rdev->pm.pm_method = PM_METHOD_DYNPM;
435                 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
436                 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
437                 mutex_unlock(&rdev->pm.mutex);
438         } else if (strncmp("profile", buf, strlen("profile")) == 0) {
439                 mutex_lock(&rdev->pm.mutex);
440                 /* disable dynpm */
441                 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
442                 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
443                 rdev->pm.pm_method = PM_METHOD_PROFILE;
444                 mutex_unlock(&rdev->pm.mutex);
445                 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
446         } else {
447                 count = -EINVAL;
448                 goto fail;
449         }
450         radeon_pm_compute_clocks(rdev);
451 fail:
452         return count;
453 }
454
455 static ssize_t radeon_get_dpm_state(struct device *dev,
456                                     struct device_attribute *attr,
457                                     char *buf)
458 {
459         struct drm_device *ddev = dev_get_drvdata(dev);
460         struct radeon_device *rdev = ddev->dev_private;
461         enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
462
463         return snprintf(buf, PAGE_SIZE, "%s\n",
464                         (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
465                         (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
466 }
467
468 static ssize_t radeon_set_dpm_state(struct device *dev,
469                                     struct device_attribute *attr,
470                                     const char *buf,
471                                     size_t count)
472 {
473         struct drm_device *ddev = dev_get_drvdata(dev);
474         struct radeon_device *rdev = ddev->dev_private;
475
476         mutex_lock(&rdev->pm.mutex);
477         if (strncmp("battery", buf, strlen("battery")) == 0)
478                 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
479         else if (strncmp("balanced", buf, strlen("balanced")) == 0)
480                 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
481         else if (strncmp("performance", buf, strlen("performance")) == 0)
482                 rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
483         else {
484                 mutex_unlock(&rdev->pm.mutex);
485                 count = -EINVAL;
486                 goto fail;
487         }
488         mutex_unlock(&rdev->pm.mutex);
489
490         /* Can't set dpm state when the card is off */
491         if (!(rdev->flags & RADEON_IS_PX) ||
492             (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
493                 radeon_pm_compute_clocks(rdev);
494
495 fail:
496         return count;
497 }
498
499 static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
500                                                        struct device_attribute *attr,
501                                                        char *buf)
502 {
503         struct drm_device *ddev = dev_get_drvdata(dev);
504         struct radeon_device *rdev = ddev->dev_private;
505         enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
506
507         if  ((rdev->flags & RADEON_IS_PX) &&
508              (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
509                 return snprintf(buf, PAGE_SIZE, "off\n");
510
511         return snprintf(buf, PAGE_SIZE, "%s\n",
512                         (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
513                         (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
514 }
515
516 static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
517                                                        struct device_attribute *attr,
518                                                        const char *buf,
519                                                        size_t count)
520 {
521         struct drm_device *ddev = dev_get_drvdata(dev);
522         struct radeon_device *rdev = ddev->dev_private;
523         enum radeon_dpm_forced_level level;
524         int ret = 0;
525
526         /* Can't force performance level when the card is off */
527         if  ((rdev->flags & RADEON_IS_PX) &&
528              (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
529                 return -EINVAL;
530
531         mutex_lock(&rdev->pm.mutex);
532         if (strncmp("low", buf, strlen("low")) == 0) {
533                 level = RADEON_DPM_FORCED_LEVEL_LOW;
534         } else if (strncmp("high", buf, strlen("high")) == 0) {
535                 level = RADEON_DPM_FORCED_LEVEL_HIGH;
536         } else if (strncmp("auto", buf, strlen("auto")) == 0) {
537                 level = RADEON_DPM_FORCED_LEVEL_AUTO;
538         } else {
539                 count = -EINVAL;
540                 goto fail;
541         }
542         if (rdev->asic->dpm.force_performance_level) {
543                 if (rdev->pm.dpm.thermal_active) {
544                         count = -EINVAL;
545                         goto fail;
546                 }
547                 ret = radeon_dpm_force_performance_level(rdev, level);
548                 if (ret)
549                         count = -EINVAL;
550         }
551 fail:
552         mutex_unlock(&rdev->pm.mutex);
553
554         return count;
555 }
556
557 static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
558 static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
559 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state);
560 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
561                    radeon_get_dpm_forced_performance_level,
562                    radeon_set_dpm_forced_performance_level);
563
564 static ssize_t radeon_hwmon_show_temp(struct device *dev,
565                                       struct device_attribute *attr,
566                                       char *buf)
567 {
568         struct radeon_device *rdev = dev_get_drvdata(dev);
569         struct drm_device *ddev = rdev->ddev;
570         int temp;
571
572         /* Can't get temperature when the card is off */
573         if  ((rdev->flags & RADEON_IS_PX) &&
574              (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
575                 return -EINVAL;
576
577         if (rdev->asic->pm.get_temperature)
578                 temp = radeon_get_temperature(rdev);
579         else
580                 temp = 0;
581
582         return snprintf(buf, PAGE_SIZE, "%d\n", temp);
583 }
584
585 static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
586                                              struct device_attribute *attr,
587                                              char *buf)
588 {
589         struct radeon_device *rdev = dev_get_drvdata(dev);
590         int hyst = to_sensor_dev_attr(attr)->index;
591         int temp;
592
593         if (hyst)
594                 temp = rdev->pm.dpm.thermal.min_temp;
595         else
596                 temp = rdev->pm.dpm.thermal.max_temp;
597
598         return snprintf(buf, PAGE_SIZE, "%d\n", temp);
599 }
600
601 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
602 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
603 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
604
605 static struct attribute *hwmon_attributes[] = {
606         &sensor_dev_attr_temp1_input.dev_attr.attr,
607         &sensor_dev_attr_temp1_crit.dev_attr.attr,
608         &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
609         NULL
610 };
611
612 static umode_t hwmon_attributes_visible(struct kobject *kobj,
613                                         struct attribute *attr, int index)
614 {
615         struct device *dev = container_of(kobj, struct device, kobj);
616         struct radeon_device *rdev = dev_get_drvdata(dev);
617
618         /* Skip limit attributes if DPM is not enabled */
619         if (rdev->pm.pm_method != PM_METHOD_DPM &&
620             (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
621              attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
622                 return 0;
623
624         return attr->mode;
625 }
626
627 static const struct attribute_group hwmon_attrgroup = {
628         .attrs = hwmon_attributes,
629         .is_visible = hwmon_attributes_visible,
630 };
631
632 static const struct attribute_group *hwmon_groups[] = {
633         &hwmon_attrgroup,
634         NULL
635 };
636
637 static int radeon_hwmon_init(struct radeon_device *rdev)
638 {
639         int err = 0;
640
641         switch (rdev->pm.int_thermal_type) {
642         case THERMAL_TYPE_RV6XX:
643         case THERMAL_TYPE_RV770:
644         case THERMAL_TYPE_EVERGREEN:
645         case THERMAL_TYPE_NI:
646         case THERMAL_TYPE_SUMO:
647         case THERMAL_TYPE_SI:
648         case THERMAL_TYPE_CI:
649         case THERMAL_TYPE_KV:
650                 if (rdev->asic->pm.get_temperature == NULL)
651                         return err;
652                 rdev->pm.int_hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
653                                                                            "radeon", rdev,
654                                                                            hwmon_groups);
655                 if (IS_ERR(rdev->pm.int_hwmon_dev)) {
656                         err = PTR_ERR(rdev->pm.int_hwmon_dev);
657                         dev_err(rdev->dev,
658                                 "Unable to register hwmon device: %d\n", err);
659                 }
660                 break;
661         default:
662                 break;
663         }
664
665         return err;
666 }
667
668 static void radeon_hwmon_fini(struct radeon_device *rdev)
669 {
670         if (rdev->pm.int_hwmon_dev)
671                 hwmon_device_unregister(rdev->pm.int_hwmon_dev);
672 }
673
674 static void radeon_dpm_thermal_work_handler(struct work_struct *work)
675 {
676         struct radeon_device *rdev =
677                 container_of(work, struct radeon_device,
678                              pm.dpm.thermal.work);
679         /* switch to the thermal state */
680         enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
681
682         if (!rdev->pm.dpm_enabled)
683                 return;
684
685         if (rdev->asic->pm.get_temperature) {
686                 int temp = radeon_get_temperature(rdev);
687
688                 if (temp < rdev->pm.dpm.thermal.min_temp)
689                         /* switch back the user state */
690                         dpm_state = rdev->pm.dpm.user_state;
691         } else {
692                 if (rdev->pm.dpm.thermal.high_to_low)
693                         /* switch back the user state */
694                         dpm_state = rdev->pm.dpm.user_state;
695         }
696         mutex_lock(&rdev->pm.mutex);
697         if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
698                 rdev->pm.dpm.thermal_active = true;
699         else
700                 rdev->pm.dpm.thermal_active = false;
701         rdev->pm.dpm.state = dpm_state;
702         mutex_unlock(&rdev->pm.mutex);
703
704         radeon_pm_compute_clocks(rdev);
705 }
706
707 static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
708                                                      enum radeon_pm_state_type dpm_state)
709 {
710         int i;
711         struct radeon_ps *ps;
712         u32 ui_class;
713         bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
714                 true : false;
715
716         /* check if the vblank period is too short to adjust the mclk */
717         if (single_display && rdev->asic->dpm.vblank_too_short) {
718                 if (radeon_dpm_vblank_too_short(rdev))
719                         single_display = false;
720         }
721
722         /* certain older asics have a separare 3D performance state,
723          * so try that first if the user selected performance
724          */
725         if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
726                 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
727         /* balanced states don't exist at the moment */
728         if (dpm_state == POWER_STATE_TYPE_BALANCED)
729                 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
730
731 restart_search:
732         /* Pick the best power state based on current conditions */
733         for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
734                 ps = &rdev->pm.dpm.ps[i];
735                 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
736                 switch (dpm_state) {
737                 /* user states */
738                 case POWER_STATE_TYPE_BATTERY:
739                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
740                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
741                                         if (single_display)
742                                                 return ps;
743                                 } else
744                                         return ps;
745                         }
746                         break;
747                 case POWER_STATE_TYPE_BALANCED:
748                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
749                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
750                                         if (single_display)
751                                                 return ps;
752                                 } else
753                                         return ps;
754                         }
755                         break;
756                 case POWER_STATE_TYPE_PERFORMANCE:
757                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
758                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
759                                         if (single_display)
760                                                 return ps;
761                                 } else
762                                         return ps;
763                         }
764                         break;
765                 /* internal states */
766                 case POWER_STATE_TYPE_INTERNAL_UVD:
767                         if (rdev->pm.dpm.uvd_ps)
768                                 return rdev->pm.dpm.uvd_ps;
769                         else
770                                 break;
771                 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
772                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
773                                 return ps;
774                         break;
775                 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
776                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
777                                 return ps;
778                         break;
779                 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
780                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
781                                 return ps;
782                         break;
783                 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
784                         if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
785                                 return ps;
786                         break;
787                 case POWER_STATE_TYPE_INTERNAL_BOOT:
788                         return rdev->pm.dpm.boot_ps;
789                 case POWER_STATE_TYPE_INTERNAL_THERMAL:
790                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
791                                 return ps;
792                         break;
793                 case POWER_STATE_TYPE_INTERNAL_ACPI:
794                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
795                                 return ps;
796                         break;
797                 case POWER_STATE_TYPE_INTERNAL_ULV:
798                         if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
799                                 return ps;
800                         break;
801                 case POWER_STATE_TYPE_INTERNAL_3DPERF:
802                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
803                                 return ps;
804                         break;
805                 default:
806                         break;
807                 }
808         }
809         /* use a fallback state if we didn't match */
810         switch (dpm_state) {
811         case POWER_STATE_TYPE_INTERNAL_UVD_SD:
812                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
813                 goto restart_search;
814         case POWER_STATE_TYPE_INTERNAL_UVD_HD:
815         case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
816         case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
817                 if (rdev->pm.dpm.uvd_ps) {
818                         return rdev->pm.dpm.uvd_ps;
819                 } else {
820                         dpm_state = POWER_STATE_TYPE_PERFORMANCE;
821                         goto restart_search;
822                 }
823         case POWER_STATE_TYPE_INTERNAL_THERMAL:
824                 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
825                 goto restart_search;
826         case POWER_STATE_TYPE_INTERNAL_ACPI:
827                 dpm_state = POWER_STATE_TYPE_BATTERY;
828                 goto restart_search;
829         case POWER_STATE_TYPE_BATTERY:
830         case POWER_STATE_TYPE_BALANCED:
831         case POWER_STATE_TYPE_INTERNAL_3DPERF:
832                 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
833                 goto restart_search;
834         default:
835                 break;
836         }
837
838         return NULL;
839 }
840
841 static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
842 {
843         int i;
844         struct radeon_ps *ps;
845         enum radeon_pm_state_type dpm_state;
846         int ret;
847
848         /* if dpm init failed */
849         if (!rdev->pm.dpm_enabled)
850                 return;
851
852         if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
853                 /* add other state override checks here */
854                 if ((!rdev->pm.dpm.thermal_active) &&
855                     (!rdev->pm.dpm.uvd_active))
856                         rdev->pm.dpm.state = rdev->pm.dpm.user_state;
857         }
858         dpm_state = rdev->pm.dpm.state;
859
860         ps = radeon_dpm_pick_power_state(rdev, dpm_state);
861         if (ps)
862                 rdev->pm.dpm.requested_ps = ps;
863         else
864                 return;
865
866         /* no need to reprogram if nothing changed unless we are on BTC+ */
867         if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
868                 /* vce just modifies an existing state so force a change */
869                 if (ps->vce_active != rdev->pm.dpm.vce_active)
870                         goto force;
871                 if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
872                         /* for pre-BTC and APUs if the num crtcs changed but state is the same,
873                          * all we need to do is update the display configuration.
874                          */
875                         if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
876                                 /* update display watermarks based on new power state */
877                                 radeon_bandwidth_update(rdev);
878                                 /* update displays */
879                                 radeon_dpm_display_configuration_changed(rdev);
880                                 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
881                                 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
882                         }
883                         return;
884                 } else {
885                         /* for BTC+ if the num crtcs hasn't changed and state is the same,
886                          * nothing to do, if the num crtcs is > 1 and state is the same,
887                          * update display configuration.
888                          */
889                         if (rdev->pm.dpm.new_active_crtcs ==
890                             rdev->pm.dpm.current_active_crtcs) {
891                                 return;
892                         } else {
893                                 if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
894                                     (rdev->pm.dpm.new_active_crtc_count > 1)) {
895                                         /* update display watermarks based on new power state */
896                                         radeon_bandwidth_update(rdev);
897                                         /* update displays */
898                                         radeon_dpm_display_configuration_changed(rdev);
899                                         rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
900                                         rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
901                                         return;
902                                 }
903                         }
904                 }
905         }
906
907 force:
908         if (radeon_dpm == 1) {
909                 printk("switching from power state:\n");
910                 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
911                 printk("switching to power state:\n");
912                 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
913         }
914
915         mutex_lock(&rdev->ddev->struct_mutex);
916         down_write(&rdev->pm.mclk_lock);
917         mutex_lock(&rdev->ring_lock);
918
919         /* update whether vce is active */
920         ps->vce_active = rdev->pm.dpm.vce_active;
921
922         ret = radeon_dpm_pre_set_power_state(rdev);
923         if (ret)
924                 goto done;
925
926         /* update display watermarks based on new power state */
927         radeon_bandwidth_update(rdev);
928         /* update displays */
929         radeon_dpm_display_configuration_changed(rdev);
930
931         rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
932         rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
933
934         /* wait for the rings to drain */
935         for (i = 0; i < RADEON_NUM_RINGS; i++) {
936                 struct radeon_ring *ring = &rdev->ring[i];
937                 if (ring->ready)
938                         radeon_fence_wait_empty(rdev, i);
939         }
940
941         /* program the new power state */
942         radeon_dpm_set_power_state(rdev);
943
944         /* update current power state */
945         rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
946
947         radeon_dpm_post_set_power_state(rdev);
948
949         if (rdev->asic->dpm.force_performance_level) {
950                 if (rdev->pm.dpm.thermal_active) {
951                         enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
952                         /* force low perf level for thermal */
953                         radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
954                         /* save the user's level */
955                         rdev->pm.dpm.forced_level = level;
956                 } else {
957                         /* otherwise, user selected level */
958                         radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
959                 }
960         }
961
962 done:
963         mutex_unlock(&rdev->ring_lock);
964         up_write(&rdev->pm.mclk_lock);
965         mutex_unlock(&rdev->ddev->struct_mutex);
966 }
967
968 void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
969 {
970         enum radeon_pm_state_type dpm_state;
971
972         if (rdev->asic->dpm.powergate_uvd) {
973                 mutex_lock(&rdev->pm.mutex);
974                 /* don't powergate anything if we
975                    have active but pause streams */
976                 enable |= rdev->pm.dpm.sd > 0;
977                 enable |= rdev->pm.dpm.hd > 0;
978                 /* enable/disable UVD */
979                 radeon_dpm_powergate_uvd(rdev, !enable);
980                 mutex_unlock(&rdev->pm.mutex);
981         } else {
982                 if (enable) {
983                         mutex_lock(&rdev->pm.mutex);
984                         rdev->pm.dpm.uvd_active = true;
985                         /* disable this for now */
986 #if 0
987                         if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
988                                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
989                         else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
990                                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
991                         else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
992                                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
993                         else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
994                                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
995                         else
996 #endif
997                                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
998                         rdev->pm.dpm.state = dpm_state;
999                         mutex_unlock(&rdev->pm.mutex);
1000                 } else {
1001                         mutex_lock(&rdev->pm.mutex);
1002                         rdev->pm.dpm.uvd_active = false;
1003                         mutex_unlock(&rdev->pm.mutex);
1004                 }
1005
1006                 radeon_pm_compute_clocks(rdev);
1007         }
1008 }
1009
1010 void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
1011 {
1012         if (enable) {
1013                 mutex_lock(&rdev->pm.mutex);
1014                 rdev->pm.dpm.vce_active = true;
1015                 /* XXX select vce level based on ring/task */
1016                 rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
1017                 mutex_unlock(&rdev->pm.mutex);
1018         } else {
1019                 mutex_lock(&rdev->pm.mutex);
1020                 rdev->pm.dpm.vce_active = false;
1021                 mutex_unlock(&rdev->pm.mutex);
1022         }
1023
1024         radeon_pm_compute_clocks(rdev);
1025 }
1026
1027 static void radeon_pm_suspend_old(struct radeon_device *rdev)
1028 {
1029         mutex_lock(&rdev->pm.mutex);
1030         if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1031                 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
1032                         rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
1033         }
1034         mutex_unlock(&rdev->pm.mutex);
1035
1036         cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1037 }
1038
1039 static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
1040 {
1041         mutex_lock(&rdev->pm.mutex);
1042         /* disable dpm */
1043         radeon_dpm_disable(rdev);
1044         /* reset the power state */
1045         rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1046         rdev->pm.dpm_enabled = false;
1047         mutex_unlock(&rdev->pm.mutex);
1048 }
1049
1050 void radeon_pm_suspend(struct radeon_device *rdev)
1051 {
1052         if (rdev->pm.pm_method == PM_METHOD_DPM)
1053                 radeon_pm_suspend_dpm(rdev);
1054         else
1055                 radeon_pm_suspend_old(rdev);
1056 }
1057
1058 static void radeon_pm_resume_old(struct radeon_device *rdev)
1059 {
1060         /* set up the default clocks if the MC ucode is loaded */
1061         if ((rdev->family >= CHIP_BARTS) &&
1062             (rdev->family <= CHIP_CAYMAN) &&
1063             rdev->mc_fw) {
1064                 if (rdev->pm.default_vddc)
1065                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1066                                                 SET_VOLTAGE_TYPE_ASIC_VDDC);
1067                 if (rdev->pm.default_vddci)
1068                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1069                                                 SET_VOLTAGE_TYPE_ASIC_VDDCI);
1070                 if (rdev->pm.default_sclk)
1071                         radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1072                 if (rdev->pm.default_mclk)
1073                         radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1074         }
1075         /* asic init will reset the default power state */
1076         mutex_lock(&rdev->pm.mutex);
1077         rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
1078         rdev->pm.current_clock_mode_index = 0;
1079         rdev->pm.current_sclk = rdev->pm.default_sclk;
1080         rdev->pm.current_mclk = rdev->pm.default_mclk;
1081         if (rdev->pm.power_state) {
1082                 rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
1083                 rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
1084         }
1085         if (rdev->pm.pm_method == PM_METHOD_DYNPM
1086             && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
1087                 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1088                 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1089                                       msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1090         }
1091         mutex_unlock(&rdev->pm.mutex);
1092         radeon_pm_compute_clocks(rdev);
1093 }
1094
1095 static void radeon_pm_resume_dpm(struct radeon_device *rdev)
1096 {
1097         int ret;
1098
1099         /* asic init will reset to the boot state */
1100         mutex_lock(&rdev->pm.mutex);
1101         rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1102         radeon_dpm_setup_asic(rdev);
1103         ret = radeon_dpm_enable(rdev);
1104         mutex_unlock(&rdev->pm.mutex);
1105         if (ret)
1106                 goto dpm_resume_fail;
1107         rdev->pm.dpm_enabled = true;
1108         return;
1109
1110 dpm_resume_fail:
1111         DRM_ERROR("radeon: dpm resume failed\n");
1112         if ((rdev->family >= CHIP_BARTS) &&
1113             (rdev->family <= CHIP_CAYMAN) &&
1114             rdev->mc_fw) {
1115                 if (rdev->pm.default_vddc)
1116                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1117                                                 SET_VOLTAGE_TYPE_ASIC_VDDC);
1118                 if (rdev->pm.default_vddci)
1119                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1120                                                 SET_VOLTAGE_TYPE_ASIC_VDDCI);
1121                 if (rdev->pm.default_sclk)
1122                         radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1123                 if (rdev->pm.default_mclk)
1124                         radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1125         }
1126 }
1127
1128 void radeon_pm_resume(struct radeon_device *rdev)
1129 {
1130         if (rdev->pm.pm_method == PM_METHOD_DPM)
1131                 radeon_pm_resume_dpm(rdev);
1132         else
1133                 radeon_pm_resume_old(rdev);
1134 }
1135
1136 static int radeon_pm_init_old(struct radeon_device *rdev)
1137 {
1138         int ret;
1139
1140         rdev->pm.profile = PM_PROFILE_DEFAULT;
1141         rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1142         rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1143         rdev->pm.dynpm_can_upclock = true;
1144         rdev->pm.dynpm_can_downclock = true;
1145         rdev->pm.default_sclk = rdev->clock.default_sclk;
1146         rdev->pm.default_mclk = rdev->clock.default_mclk;
1147         rdev->pm.current_sclk = rdev->clock.default_sclk;
1148         rdev->pm.current_mclk = rdev->clock.default_mclk;
1149         rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1150
1151         if (rdev->bios) {
1152                 if (rdev->is_atom_bios)
1153                         radeon_atombios_get_power_modes(rdev);
1154                 else
1155                         radeon_combios_get_power_modes(rdev);
1156                 radeon_pm_print_states(rdev);
1157                 radeon_pm_init_profile(rdev);
1158                 /* set up the default clocks if the MC ucode is loaded */
1159                 if ((rdev->family >= CHIP_BARTS) &&
1160                     (rdev->family <= CHIP_CAYMAN) &&
1161                     rdev->mc_fw) {
1162                         if (rdev->pm.default_vddc)
1163                                 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1164                                                         SET_VOLTAGE_TYPE_ASIC_VDDC);
1165                         if (rdev->pm.default_vddci)
1166                                 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1167                                                         SET_VOLTAGE_TYPE_ASIC_VDDCI);
1168                         if (rdev->pm.default_sclk)
1169                                 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1170                         if (rdev->pm.default_mclk)
1171                                 radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1172                 }
1173         }
1174
1175         /* set up the internal thermal sensor if applicable */
1176         ret = radeon_hwmon_init(rdev);
1177         if (ret)
1178                 return ret;
1179
1180         INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
1181
1182         if (rdev->pm.num_power_states > 1) {
1183                 /* where's the best place to put these? */
1184                 ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1185                 if (ret)
1186                         DRM_ERROR("failed to create device file for power profile\n");
1187                 ret = device_create_file(rdev->dev, &dev_attr_power_method);
1188                 if (ret)
1189                         DRM_ERROR("failed to create device file for power method\n");
1190
1191                 if (radeon_debugfs_pm_init(rdev)) {
1192                         DRM_ERROR("Failed to register debugfs file for PM!\n");
1193                 }
1194
1195                 DRM_INFO("radeon: power management initialized\n");
1196         }
1197
1198         return 0;
1199 }
1200
1201 static void radeon_dpm_print_power_states(struct radeon_device *rdev)
1202 {
1203         int i;
1204
1205         for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
1206                 printk("== power state %d ==\n", i);
1207                 radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
1208         }
1209 }
1210
1211 static int radeon_pm_init_dpm(struct radeon_device *rdev)
1212 {
1213         int ret;
1214
1215         /* default to balanced state */
1216         rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
1217         rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
1218         rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1219         rdev->pm.default_sclk = rdev->clock.default_sclk;
1220         rdev->pm.default_mclk = rdev->clock.default_mclk;
1221         rdev->pm.current_sclk = rdev->clock.default_sclk;
1222         rdev->pm.current_mclk = rdev->clock.default_mclk;
1223         rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1224
1225         if (rdev->bios && rdev->is_atom_bios)
1226                 radeon_atombios_get_power_modes(rdev);
1227         else
1228                 return -EINVAL;
1229
1230         /* set up the internal thermal sensor if applicable */
1231         ret = radeon_hwmon_init(rdev);
1232         if (ret)
1233                 return ret;
1234
1235         INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
1236         mutex_lock(&rdev->pm.mutex);
1237         radeon_dpm_init(rdev);
1238         rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1239         if (radeon_dpm == 1)
1240                 radeon_dpm_print_power_states(rdev);
1241         radeon_dpm_setup_asic(rdev);
1242         ret = radeon_dpm_enable(rdev);
1243         mutex_unlock(&rdev->pm.mutex);
1244         if (ret)
1245                 goto dpm_failed;
1246         rdev->pm.dpm_enabled = true;
1247
1248         ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
1249         if (ret)
1250                 DRM_ERROR("failed to create device file for dpm state\n");
1251         ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1252         if (ret)
1253                 DRM_ERROR("failed to create device file for dpm state\n");
1254         /* XXX: these are noops for dpm but are here for backwards compat */
1255         ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1256         if (ret)
1257                 DRM_ERROR("failed to create device file for power profile\n");
1258         ret = device_create_file(rdev->dev, &dev_attr_power_method);
1259         if (ret)
1260                 DRM_ERROR("failed to create device file for power method\n");
1261
1262         if (radeon_debugfs_pm_init(rdev)) {
1263                 DRM_ERROR("Failed to register debugfs file for dpm!\n");
1264         }
1265
1266         DRM_INFO("radeon: dpm initialized\n");
1267
1268         return 0;
1269
1270 dpm_failed:
1271         rdev->pm.dpm_enabled = false;
1272         if ((rdev->family >= CHIP_BARTS) &&
1273             (rdev->family <= CHIP_CAYMAN) &&
1274             rdev->mc_fw) {
1275                 if (rdev->pm.default_vddc)
1276                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1277                                                 SET_VOLTAGE_TYPE_ASIC_VDDC);
1278                 if (rdev->pm.default_vddci)
1279                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1280                                                 SET_VOLTAGE_TYPE_ASIC_VDDCI);
1281                 if (rdev->pm.default_sclk)
1282                         radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1283                 if (rdev->pm.default_mclk)
1284                         radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1285         }
1286         DRM_ERROR("radeon: dpm initialization failed\n");
1287         return ret;
1288 }
1289
1290 struct radeon_dpm_quirk {
1291         u32 chip_vendor;
1292         u32 chip_device;
1293         u32 subsys_vendor;
1294         u32 subsys_device;
1295 };
1296
1297 /* cards with dpm stability problems */
1298 static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = {
1299         /* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */
1300         { PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 },
1301         /* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */
1302         { PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 },
1303         { 0, 0, 0, 0 },
1304 };
1305
1306 int radeon_pm_init(struct radeon_device *rdev)
1307 {
1308         struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
1309         bool disable_dpm = false;
1310
1311         /* Apply dpm quirks */
1312         while (p && p->chip_device != 0) {
1313                 if (rdev->pdev->vendor == p->chip_vendor &&
1314                     rdev->pdev->device == p->chip_device &&
1315                     rdev->pdev->subsystem_vendor == p->subsys_vendor &&
1316                     rdev->pdev->subsystem_device == p->subsys_device) {
1317                         disable_dpm = true;
1318                         break;
1319                 }
1320                 ++p;
1321         }
1322
1323         /* enable dpm on rv6xx+ */
1324         switch (rdev->family) {
1325         case CHIP_RV610:
1326         case CHIP_RV630:
1327         case CHIP_RV620:
1328         case CHIP_RV635:
1329         case CHIP_RV670:
1330         case CHIP_RS780:
1331         case CHIP_RS880:
1332         case CHIP_RV770:
1333                 /* DPM requires the RLC, RV770+ dGPU requires SMC */
1334                 if (!rdev->rlc_fw)
1335                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1336                 else if ((rdev->family >= CHIP_RV770) &&
1337                          (!(rdev->flags & RADEON_IS_IGP)) &&
1338                          (!rdev->smc_fw))
1339                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1340                 else if (radeon_dpm == 1)
1341                         rdev->pm.pm_method = PM_METHOD_DPM;
1342                 else
1343                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1344                 break;
1345         case CHIP_RV730:
1346         case CHIP_RV710:
1347         case CHIP_RV740:
1348         case CHIP_CEDAR:
1349         case CHIP_REDWOOD:
1350         case CHIP_JUNIPER:
1351         case CHIP_CYPRESS:
1352         case CHIP_HEMLOCK:
1353         case CHIP_PALM:
1354         case CHIP_SUMO:
1355         case CHIP_SUMO2:
1356         case CHIP_BARTS:
1357         case CHIP_TURKS:
1358         case CHIP_CAICOS:
1359         case CHIP_CAYMAN:
1360         case CHIP_ARUBA:
1361         case CHIP_TAHITI:
1362         case CHIP_PITCAIRN:
1363         case CHIP_VERDE:
1364         case CHIP_OLAND:
1365         case CHIP_HAINAN:
1366         case CHIP_BONAIRE:
1367         case CHIP_KABINI:
1368         case CHIP_KAVERI:
1369         case CHIP_HAWAII:
1370         case CHIP_MULLINS:
1371                 /* DPM requires the RLC, RV770+ dGPU requires SMC */
1372                 if (!rdev->rlc_fw)
1373                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1374                 else if ((rdev->family >= CHIP_RV770) &&
1375                          (!(rdev->flags & RADEON_IS_IGP)) &&
1376                          (!rdev->smc_fw))
1377                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1378                 else if (disable_dpm && (radeon_dpm == -1))
1379                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1380                 else if (radeon_dpm == 0)
1381                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1382                 else
1383                         rdev->pm.pm_method = PM_METHOD_DPM;
1384                 break;
1385         default:
1386                 /* default to profile method */
1387                 rdev->pm.pm_method = PM_METHOD_PROFILE;
1388                 break;
1389         }
1390
1391         if (rdev->pm.pm_method == PM_METHOD_DPM)
1392                 return radeon_pm_init_dpm(rdev);
1393         else
1394                 return radeon_pm_init_old(rdev);
1395 }
1396
1397 int radeon_pm_late_init(struct radeon_device *rdev)
1398 {
1399         int ret = 0;
1400
1401         if (rdev->pm.pm_method == PM_METHOD_DPM) {
1402                 mutex_lock(&rdev->pm.mutex);
1403                 ret = radeon_dpm_late_enable(rdev);
1404                 mutex_unlock(&rdev->pm.mutex);
1405         }
1406         return ret;
1407 }
1408
1409 static void radeon_pm_fini_old(struct radeon_device *rdev)
1410 {
1411         if (rdev->pm.num_power_states > 1) {
1412                 mutex_lock(&rdev->pm.mutex);
1413                 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1414                         rdev->pm.profile = PM_PROFILE_DEFAULT;
1415                         radeon_pm_update_profile(rdev);
1416                         radeon_pm_set_clocks(rdev);
1417                 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1418                         /* reset default clocks */
1419                         rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1420                         rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1421                         radeon_pm_set_clocks(rdev);
1422                 }
1423                 mutex_unlock(&rdev->pm.mutex);
1424
1425                 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1426
1427                 device_remove_file(rdev->dev, &dev_attr_power_profile);
1428                 device_remove_file(rdev->dev, &dev_attr_power_method);
1429         }
1430
1431         radeon_hwmon_fini(rdev);
1432         kfree(rdev->pm.power_state);
1433 }
1434
1435 static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1436 {
1437         if (rdev->pm.num_power_states > 1) {
1438                 mutex_lock(&rdev->pm.mutex);
1439                 radeon_dpm_disable(rdev);
1440                 mutex_unlock(&rdev->pm.mutex);
1441
1442                 device_remove_file(rdev->dev, &dev_attr_power_dpm_state);
1443                 device_remove_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1444                 /* XXX backwards compat */
1445                 device_remove_file(rdev->dev, &dev_attr_power_profile);
1446                 device_remove_file(rdev->dev, &dev_attr_power_method);
1447         }
1448         radeon_dpm_fini(rdev);
1449
1450         radeon_hwmon_fini(rdev);
1451         kfree(rdev->pm.power_state);
1452 }
1453
1454 void radeon_pm_fini(struct radeon_device *rdev)
1455 {
1456         if (rdev->pm.pm_method == PM_METHOD_DPM)
1457                 radeon_pm_fini_dpm(rdev);
1458         else
1459                 radeon_pm_fini_old(rdev);
1460 }
1461
1462 static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
1463 {
1464         struct drm_device *ddev = rdev->ddev;
1465         struct drm_crtc *crtc;
1466         struct radeon_crtc *radeon_crtc;
1467
1468         if (rdev->pm.num_power_states < 2)
1469                 return;
1470
1471         mutex_lock(&rdev->pm.mutex);
1472
1473         rdev->pm.active_crtcs = 0;
1474         rdev->pm.active_crtc_count = 0;
1475         if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1476                 list_for_each_entry(crtc,
1477                                     &ddev->mode_config.crtc_list, head) {
1478                         radeon_crtc = to_radeon_crtc(crtc);
1479                         if (radeon_crtc->enabled) {
1480                                 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
1481                                 rdev->pm.active_crtc_count++;
1482                         }
1483                 }
1484         }
1485
1486         if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1487                 radeon_pm_update_profile(rdev);
1488                 radeon_pm_set_clocks(rdev);
1489         } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1490                 if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
1491                         if (rdev->pm.active_crtc_count > 1) {
1492                                 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1493                                         cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1494
1495                                         rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
1496                                         rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1497                                         radeon_pm_get_dynpm_state(rdev);
1498                                         radeon_pm_set_clocks(rdev);
1499
1500                                         DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
1501                                 }
1502                         } else if (rdev->pm.active_crtc_count == 1) {
1503                                 /* TODO: Increase clocks if needed for current mode */
1504
1505                                 if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
1506                                         rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1507                                         rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
1508                                         radeon_pm_get_dynpm_state(rdev);
1509                                         radeon_pm_set_clocks(rdev);
1510
1511                                         schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1512                                                               msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1513                                 } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
1514                                         rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1515                                         schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1516                                                               msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1517                                         DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
1518                                 }
1519                         } else { /* count == 0 */
1520                                 if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
1521                                         cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1522
1523                                         rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
1524                                         rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
1525                                         radeon_pm_get_dynpm_state(rdev);
1526                                         radeon_pm_set_clocks(rdev);
1527                                 }
1528                         }
1529                 }
1530         }
1531
1532         mutex_unlock(&rdev->pm.mutex);
1533 }
1534
1535 static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
1536 {
1537         struct drm_device *ddev = rdev->ddev;
1538         struct drm_crtc *crtc;
1539         struct radeon_crtc *radeon_crtc;
1540
1541         if (!rdev->pm.dpm_enabled)
1542                 return;
1543
1544         mutex_lock(&rdev->pm.mutex);
1545
1546         /* update active crtc counts */
1547         rdev->pm.dpm.new_active_crtcs = 0;
1548         rdev->pm.dpm.new_active_crtc_count = 0;
1549         if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1550                 list_for_each_entry(crtc,
1551                                     &ddev->mode_config.crtc_list, head) {
1552                         radeon_crtc = to_radeon_crtc(crtc);
1553                         if (crtc->enabled) {
1554                                 rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
1555                                 rdev->pm.dpm.new_active_crtc_count++;
1556                         }
1557                 }
1558         }
1559
1560         /* update battery/ac status */
1561         if (power_supply_is_system_supplied() > 0)
1562                 rdev->pm.dpm.ac_power = true;
1563         else
1564                 rdev->pm.dpm.ac_power = false;
1565
1566         radeon_dpm_change_power_state_locked(rdev);
1567
1568         mutex_unlock(&rdev->pm.mutex);
1569
1570 }
1571
1572 void radeon_pm_compute_clocks(struct radeon_device *rdev)
1573 {
1574         if (rdev->pm.pm_method == PM_METHOD_DPM)
1575                 radeon_pm_compute_clocks_dpm(rdev);
1576         else
1577                 radeon_pm_compute_clocks_old(rdev);
1578 }
1579
1580 static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1581 {
1582         int  crtc, vpos, hpos, vbl_status;
1583         bool in_vbl = true;
1584
1585         /* Iterate over all active crtc's. All crtc's must be in vblank,
1586          * otherwise return in_vbl == false.
1587          */
1588         for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
1589                 if (rdev->pm.active_crtcs & (1 << crtc)) {
1590                         vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL);
1591                         if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
1592                             !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK))
1593                                 in_vbl = false;
1594                 }
1595         }
1596
1597         return in_vbl;
1598 }
1599
1600 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
1601 {
1602         u32 stat_crtc = 0;
1603         bool in_vbl = radeon_pm_in_vbl(rdev);
1604
1605         if (in_vbl == false)
1606                 DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
1607                          finish ? "exit" : "entry");
1608         return in_vbl;
1609 }
1610
1611 static void radeon_dynpm_idle_work_handler(struct work_struct *work)
1612 {
1613         struct radeon_device *rdev;
1614         int resched;
1615         rdev = container_of(work, struct radeon_device,
1616                                 pm.dynpm_idle_work.work);
1617
1618         resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1619         mutex_lock(&rdev->pm.mutex);
1620         if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1621                 int not_processed = 0;
1622                 int i;
1623
1624                 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1625                         struct radeon_ring *ring = &rdev->ring[i];
1626
1627                         if (ring->ready) {
1628                                 not_processed += radeon_fence_count_emitted(rdev, i);
1629                                 if (not_processed >= 3)
1630                                         break;
1631                         }
1632                 }
1633
1634                 if (not_processed >= 3) { /* should upclock */
1635                         if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
1636                                 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1637                         } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
1638                                    rdev->pm.dynpm_can_upclock) {
1639                                 rdev->pm.dynpm_planned_action =
1640                                         DYNPM_ACTION_UPCLOCK;
1641                                 rdev->pm.dynpm_action_timeout = jiffies +
1642                                 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
1643                         }
1644                 } else if (not_processed == 0) { /* should downclock */
1645                         if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
1646                                 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1647                         } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
1648                                    rdev->pm.dynpm_can_downclock) {
1649                                 rdev->pm.dynpm_planned_action =
1650                                         DYNPM_ACTION_DOWNCLOCK;
1651                                 rdev->pm.dynpm_action_timeout = jiffies +
1652                                 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
1653                         }
1654                 }
1655
1656                 /* Note, radeon_pm_set_clocks is called with static_switch set
1657                  * to false since we want to wait for vbl to avoid flicker.
1658                  */
1659                 if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
1660                     jiffies > rdev->pm.dynpm_action_timeout) {
1661                         radeon_pm_get_dynpm_state(rdev);
1662                         radeon_pm_set_clocks(rdev);
1663                 }
1664
1665                 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1666                                       msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1667         }
1668         mutex_unlock(&rdev->pm.mutex);
1669         ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1670 }
1671
1672 /*
1673  * Debugfs info
1674  */
1675 #if defined(CONFIG_DEBUG_FS)
1676
1677 static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
1678 {
1679         struct drm_info_node *node = (struct drm_info_node *) m->private;
1680         struct drm_device *dev = node->minor->dev;
1681         struct radeon_device *rdev = dev->dev_private;
1682         struct drm_device *ddev = rdev->ddev;
1683
1684         if  ((rdev->flags & RADEON_IS_PX) &&
1685              (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1686                 seq_printf(m, "PX asic powered off\n");
1687         } else if (rdev->pm.dpm_enabled) {
1688                 mutex_lock(&rdev->pm.mutex);
1689                 if (rdev->asic->dpm.debugfs_print_current_performance_level)
1690                         radeon_dpm_debugfs_print_current_performance_level(rdev, m);
1691                 else
1692                         seq_printf(m, "Debugfs support not implemented for this asic\n");
1693                 mutex_unlock(&rdev->pm.mutex);
1694         } else {
1695                 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
1696                 /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
1697                 if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
1698                         seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
1699                 else
1700                         seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
1701                 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
1702                 if (rdev->asic->pm.get_memory_clock)
1703                         seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
1704                 if (rdev->pm.current_vddc)
1705                         seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
1706                 if (rdev->asic->pm.get_pcie_lanes)
1707                         seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
1708         }
1709
1710         return 0;
1711 }
1712
1713 static struct drm_info_list radeon_pm_info_list[] = {
1714         {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
1715 };
1716 #endif
1717
1718 static int radeon_debugfs_pm_init(struct radeon_device *rdev)
1719 {
1720 #if defined(CONFIG_DEBUG_FS)
1721         return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
1722 #else
1723         return 0;
1724 #endif
1725 }