drm/ttm: convert to unified vma offset manager
[dragonfly.git] / sys / dev / drm / radeon / radeon_pm.c
1 /*
2  * Permission is hereby granted, free of charge, to any person obtaining a
3  * copy of this software and associated documentation files (the "Software"),
4  * to deal in the Software without restriction, including without limitation
5  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6  * and/or sell copies of the Software, and to permit persons to whom the
7  * Software is furnished to do so, subject to the following conditions:
8  *
9  * The above copyright notice and this permission notice shall be included in
10  * all copies or substantial portions of the Software.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
15  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18  * OTHER DEALINGS IN THE SOFTWARE.
19  *
20  * Authors: Rafał Miłecki <zajec5@gmail.com>
21  *          Alex Deucher <alexdeucher@gmail.com>
22  *
23  * $FreeBSD: head/sys/dev/drm2/radeon/radeon_pm.c 254885 2013-08-25 19:37:15Z dumbbell $
24  */
25
26 #include <sys/power.h>
27 #include <drm/drmP.h>
28 #include <sys/sensors.h>
29 #include "radeon.h"
30 #include "avivod.h"
31 #include "atom.h"
32
33 #define RADEON_IDLE_LOOP_MS 100
34 #define RADEON_RECLOCK_DELAY_MS 200
35 #define RADEON_WAIT_VBLANK_TIMEOUT 200
36
37 static const char *radeon_pm_state_type_name[5] = {
38         "",
39         "Powersave",
40         "Battery",
41         "Balanced",
42         "Performance",
43 };
44
45 #ifdef DUMBBELL_WIP
46 static void radeon_dynpm_idle_work_handler(struct work_struct *work);
47 #endif /* DUMBBELL_WIP */
48 static int radeon_debugfs_pm_init(struct radeon_device *rdev);
49 static bool radeon_pm_in_vbl(struct radeon_device *rdev);
50 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
51 static void radeon_pm_update_profile(struct radeon_device *rdev);
52 static void radeon_pm_set_clocks(struct radeon_device *rdev);
53
54 int radeon_pm_get_type_index(struct radeon_device *rdev,
55                              enum radeon_pm_state_type ps_type,
56                              int instance)
57 {
58         int i;
59         int found_instance = -1;
60
61         for (i = 0; i < rdev->pm.num_power_states; i++) {
62                 if (rdev->pm.power_state[i].type == ps_type) {
63                         found_instance++;
64                         if (found_instance == instance)
65                                 return i;
66                 }
67         }
68         /* return default if no match */
69         return rdev->pm.default_power_state_index;
70 }
71
72 void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
73 {
74         if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
75                 lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
76                 if (power_profile_get_state() == POWER_PROFILE_PERFORMANCE)
77                         rdev->pm.dpm.ac_power = true;
78                 else
79                         rdev->pm.dpm.ac_power = false;
80                 if (rdev->family == CHIP_ARUBA) {
81                         if (rdev->asic->dpm.enable_bapm)
82                                 radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
83                 }
84                 lockmgr(&rdev->pm.mutex, LK_RELEASE);
85         } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
86                 if (rdev->pm.profile == PM_PROFILE_AUTO) {
87                         lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
88                         radeon_pm_update_profile(rdev);
89                         radeon_pm_set_clocks(rdev);
90                         lockmgr(&rdev->pm.mutex, LK_RELEASE);
91                 }
92         }
93 }
94
95 static void radeon_pm_update_profile(struct radeon_device *rdev)
96 {
97         switch (rdev->pm.profile) {
98         case PM_PROFILE_DEFAULT:
99                 rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
100                 break;
101         case PM_PROFILE_AUTO:
102                 if (power_profile_get_state() == POWER_PROFILE_PERFORMANCE) {
103                         if (rdev->pm.active_crtc_count > 1)
104                                 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
105                         else
106                                 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
107                 } else {
108                         if (rdev->pm.active_crtc_count > 1)
109                                 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
110                         else
111                                 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
112                 }
113                 break;
114         case PM_PROFILE_LOW:
115                 if (rdev->pm.active_crtc_count > 1)
116                         rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
117                 else
118                         rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
119                 break;
120         case PM_PROFILE_MID:
121                 if (rdev->pm.active_crtc_count > 1)
122                         rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
123                 else
124                         rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
125                 break;
126         case PM_PROFILE_HIGH:
127                 if (rdev->pm.active_crtc_count > 1)
128                         rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
129                 else
130                         rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
131                 break;
132         }
133
134         if (rdev->pm.active_crtc_count == 0) {
135                 rdev->pm.requested_power_state_index =
136                         rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
137                 rdev->pm.requested_clock_mode_index =
138                         rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
139         } else {
140                 rdev->pm.requested_power_state_index =
141                         rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
142                 rdev->pm.requested_clock_mode_index =
143                         rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
144         }
145 }
146
147 static void radeon_unmap_vram_bos(struct radeon_device *rdev)
148 {
149         struct radeon_bo *bo, *n;
150
151         if (list_empty(&rdev->gem.objects))
152                 return;
153
154         list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
155                 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
156                         ttm_bo_unmap_virtual(&bo->tbo);
157         }
158 }
159
160 static void radeon_sync_with_vblank(struct radeon_device *rdev)
161 {
162         if (rdev->pm.active_crtcs) {
163                 rdev->pm.vblank_sync = false;
164 #ifdef DUMBBELL_WIP
165                 wait_event_timeout(
166                         rdev->irq.vblank_queue, rdev->pm.vblank_sync,
167                         msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
168 #endif /* DUMBBELL_WIP */
169         }
170 }
171
172 static void radeon_set_power_state(struct radeon_device *rdev)
173 {
174         u32 sclk, mclk;
175         bool misc_after = false;
176
177         if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
178             (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
179                 return;
180
181         if (radeon_gui_idle(rdev)) {
182                 sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
183                         clock_info[rdev->pm.requested_clock_mode_index].sclk;
184                 if (sclk > rdev->pm.default_sclk)
185                         sclk = rdev->pm.default_sclk;
186
187                 /* starting with BTC, there is one state that is used for both
188                  * MH and SH.  Difference is that we always use the high clock index for
189                  * mclk and vddci.
190                  */
191                 if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
192                     (rdev->family >= CHIP_BARTS) &&
193                     rdev->pm.active_crtc_count &&
194                     ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
195                      (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
196                         mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
197                                 clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
198                 else
199                         mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
200                                 clock_info[rdev->pm.requested_clock_mode_index].mclk;
201
202                 if (mclk > rdev->pm.default_mclk)
203                         mclk = rdev->pm.default_mclk;
204
205                 /* upvolt before raising clocks, downvolt after lowering clocks */
206                 if (sclk < rdev->pm.current_sclk)
207                         misc_after = true;
208
209                 radeon_sync_with_vblank(rdev);
210
211                 if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
212                         if (!radeon_pm_in_vbl(rdev))
213                                 return;
214                 }
215
216                 radeon_pm_prepare(rdev);
217
218                 if (!misc_after)
219                         /* voltage, pcie lanes, etc.*/
220                         radeon_pm_misc(rdev);
221
222                 /* set engine clock */
223                 if (sclk != rdev->pm.current_sclk) {
224                         radeon_pm_debug_check_in_vbl(rdev, false);
225                         radeon_set_engine_clock(rdev, sclk);
226                         radeon_pm_debug_check_in_vbl(rdev, true);
227                         rdev->pm.current_sclk = sclk;
228                         DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
229                 }
230
231                 /* set memory clock */
232                 if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
233                         radeon_pm_debug_check_in_vbl(rdev, false);
234                         radeon_set_memory_clock(rdev, mclk);
235                         radeon_pm_debug_check_in_vbl(rdev, true);
236                         rdev->pm.current_mclk = mclk;
237                         DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
238                 }
239
240                 if (misc_after)
241                         /* voltage, pcie lanes, etc.*/
242                         radeon_pm_misc(rdev);
243
244                 radeon_pm_finish(rdev);
245
246                 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
247                 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
248         } else
249                 DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
250 }
251
252 static void radeon_pm_set_clocks(struct radeon_device *rdev)
253 {
254         int i, r;
255
256         /* no need to take locks, etc. if nothing's going to change */
257         if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
258             (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
259                 return;
260
261         DRM_LOCK(rdev->ddev);
262         lockmgr(&rdev->pm.mclk_lock, LK_EXCLUSIVE); // down_write
263         lockmgr(&rdev->ring_lock, LK_EXCLUSIVE);
264
265         /* wait for the rings to drain */
266         for (i = 0; i < RADEON_NUM_RINGS; i++) {
267                 struct radeon_ring *ring = &rdev->ring[i];
268                 if (!ring->ready) {
269                         continue;
270                 }
271                 r = radeon_fence_wait_empty(rdev, i);
272                 if (r) {
273                         /* needs a GPU reset dont reset here */
274                         lockmgr(&rdev->ring_lock, LK_RELEASE);
275                         lockmgr(&rdev->pm.mclk_lock, LK_RELEASE); // up_write
276                         DRM_UNLOCK(rdev->ddev);
277                         return;
278                 }
279         }
280
281         radeon_unmap_vram_bos(rdev);
282
283         if (rdev->irq.installed) {
284                 for (i = 0; i < rdev->num_crtc; i++) {
285                         if (rdev->pm.active_crtcs & (1 << i)) {
286                                 rdev->pm.req_vblank |= (1 << i);
287                                 drm_vblank_get(rdev->ddev, i);
288                         }
289                 }
290         }
291
292         radeon_set_power_state(rdev);
293
294         if (rdev->irq.installed) {
295                 for (i = 0; i < rdev->num_crtc; i++) {
296                         if (rdev->pm.req_vblank & (1 << i)) {
297                                 rdev->pm.req_vblank &= ~(1 << i);
298                                 drm_vblank_put(rdev->ddev, i);
299                         }
300                 }
301         }
302
303         /* update display watermarks based on new power state */
304         radeon_update_bandwidth_info(rdev);
305         if (rdev->pm.active_crtc_count)
306                 radeon_bandwidth_update(rdev);
307
308         rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
309
310         lockmgr(&rdev->ring_lock, LK_RELEASE);
311         lockmgr(&rdev->pm.mclk_lock, LK_RELEASE); // up_write
312         DRM_UNLOCK(rdev->ddev);
313 }
314
315 static void radeon_pm_print_states(struct radeon_device *rdev)
316 {
317         int i, j;
318         struct radeon_power_state *power_state;
319         struct radeon_pm_clock_info *clock_info;
320
321         DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
322         for (i = 0; i < rdev->pm.num_power_states; i++) {
323                 power_state = &rdev->pm.power_state[i];
324                 DRM_DEBUG_DRIVER("State %d: %s\n", i,
325                         radeon_pm_state_type_name[power_state->type]);
326                 if (i == rdev->pm.default_power_state_index)
327                         DRM_DEBUG_DRIVER("\tDefault");
328                 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
329                         DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
330                 if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
331                         DRM_DEBUG_DRIVER("\tSingle display only\n");
332                 DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
333                 for (j = 0; j < power_state->num_clock_modes; j++) {
334                         clock_info = &(power_state->clock_info[j]);
335                         if (rdev->flags & RADEON_IS_IGP)
336                                 DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
337                                                  j,
338                                                  clock_info->sclk * 10);
339                         else
340                                 DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
341                                                  j,
342                                                  clock_info->sclk * 10,
343                                                  clock_info->mclk * 10,
344                                                  clock_info->voltage.voltage);
345                 }
346         }
347 }
348
349 #ifdef DUMBBELL_WIP
350 static ssize_t radeon_get_pm_profile(struct device *dev,
351                                      struct device_attribute *attr,
352                                      char *buf)
353 {
354         struct drm_device *ddev = dev_get_drvdata(dev);
355         struct radeon_device *rdev = ddev->dev_private;
356         int cp = rdev->pm.profile;
357
358         return ksnprintf(buf, PAGE_SIZE, "%s\n",
359                         (cp == PM_PROFILE_AUTO) ? "auto" :
360                         (cp == PM_PROFILE_LOW) ? "low" :
361                         (cp == PM_PROFILE_MID) ? "mid" :
362                         (cp == PM_PROFILE_HIGH) ? "high" : "default");
363 }
364
365 static ssize_t radeon_set_pm_profile(struct device *dev,
366                                      struct device_attribute *attr,
367                                      const char *buf,
368                                      size_t count)
369 {
370         struct drm_device *ddev = dev_get_drvdata(dev);
371         struct radeon_device *rdev = ddev->dev_private;
372
373         /* Can't set profile when the card is off */
374         if  ((rdev->flags & RADEON_IS_PX) &&
375              (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
376                 return -EINVAL;
377
378         lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
379         if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
380                 if (strncmp("default", buf, strlen("default")) == 0)
381                         rdev->pm.profile = PM_PROFILE_DEFAULT;
382                 else if (strncmp("auto", buf, strlen("auto")) == 0)
383                         rdev->pm.profile = PM_PROFILE_AUTO;
384                 else if (strncmp("low", buf, strlen("low")) == 0)
385                         rdev->pm.profile = PM_PROFILE_LOW;
386                 else if (strncmp("mid", buf, strlen("mid")) == 0)
387                         rdev->pm.profile = PM_PROFILE_MID;
388                 else if (strncmp("high", buf, strlen("high")) == 0)
389                         rdev->pm.profile = PM_PROFILE_HIGH;
390                 else {
391                         count = -EINVAL;
392                         goto fail;
393                 }
394                 radeon_pm_update_profile(rdev);
395                 radeon_pm_set_clocks(rdev);
396         } else
397                 count = -EINVAL;
398
399 fail:
400         lockmgr(&rdev->pm.mutex, LK_RELEASE);
401
402         return count;
403 }
404
405 static ssize_t radeon_get_pm_method(struct device *dev,
406                                     struct device_attribute *attr,
407                                     char *buf)
408 {
409         struct drm_device *ddev = dev_get_drvdata(dev);
410         struct radeon_device *rdev = ddev->dev_private;
411         int pm = rdev->pm.pm_method;
412
413         return ksnprintf(buf, PAGE_SIZE, "%s\n",
414                         (pm == PM_METHOD_DYNPM) ? "dynpm" :
415                         (pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
416 }
417
418 static ssize_t radeon_set_pm_method(struct device *dev,
419                                     struct device_attribute *attr,
420                                     const char *buf,
421                                     size_t count)
422 {
423         struct drm_device *ddev = dev_get_drvdata(dev);
424         struct radeon_device *rdev = ddev->dev_private;
425
426         /* Can't set method when the card is off */
427         if  ((rdev->flags & RADEON_IS_PX) &&
428              (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
429                 count = -EINVAL;
430                 goto fail;
431         }
432
433         /* we don't support the legacy modes with dpm */
434         if (rdev->pm.pm_method == PM_METHOD_DPM) {
435                 count = -EINVAL;
436                 goto fail;
437         }
438
439         if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
440                 lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
441                 rdev->pm.pm_method = PM_METHOD_DYNPM;
442                 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
443                 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
444                 lockmgr(&rdev->pm.mutex, LK_RELEASE);
445         } else if (strncmp("profile", buf, strlen("profile")) == 0) {
446                 lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
447                 /* disable dynpm */
448                 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
449                 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
450                 rdev->pm.pm_method = PM_METHOD_PROFILE;
451                 lockmgr(&rdev->pm.mutex, LK_RELEASE);
452 #ifdef DUMBBELL_WIP
453                 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
454 #endif /* DUMBBELL_WIP */
455         } else {
456                 count = -EINVAL;
457                 goto fail;
458         }
459         radeon_pm_compute_clocks(rdev);
460 fail:
461         return count;
462 }
463
464 static ssize_t radeon_get_dpm_state(struct device *dev,
465                                     struct device_attribute *attr,
466                                     char *buf)
467 {
468         struct drm_device *ddev = dev_get_drvdata(dev);
469         struct radeon_device *rdev = ddev->dev_private;
470         enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
471
472         return snprintf(buf, PAGE_SIZE, "%s\n",
473                         (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
474                         (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
475 }
476
477 static ssize_t radeon_set_dpm_state(struct device *dev,
478                                     struct device_attribute *attr,
479                                     const char *buf,
480                                     size_t count)
481 {
482         struct drm_device *ddev = dev_get_drvdata(dev);
483         struct radeon_device *rdev = ddev->dev_private;
484
485         lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
486         if (strncmp("battery", buf, strlen("battery")) == 0)
487                 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
488         else if (strncmp("balanced", buf, strlen("balanced")) == 0)
489                 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
490         else if (strncmp("performance", buf, strlen("performance")) == 0)
491                 rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
492         else {
493                 lockmgr(&rdev->pm.mutex, LK_RELEASE);
494                 count = -EINVAL;
495                 goto fail;
496         }
497         lockmgr(&rdev->pm.mutex, LK_RELEASE);
498
499         /* Can't set dpm state when the card is off */
500         if (!(rdev->flags & RADEON_IS_PX) ||
501             (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
502                 radeon_pm_compute_clocks(rdev);
503
504 fail:
505         return count;
506 }
507
508 static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
509                                                        struct device_attribute *attr,
510                                                        char *buf)
511 {
512         struct drm_device *ddev = dev_get_drvdata(dev);
513         struct radeon_device *rdev = ddev->dev_private;
514         enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
515
516         if  ((rdev->flags & RADEON_IS_PX) &&
517              (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
518                 return snprintf(buf, PAGE_SIZE, "off\n");
519
520         return snprintf(buf, PAGE_SIZE, "%s\n",
521                         (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
522                         (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
523 }
524
525 static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
526                                                        struct device_attribute *attr,
527                                                        const char *buf,
528                                                        size_t count)
529 {
530         struct drm_device *ddev = dev_get_drvdata(dev);
531         struct radeon_device *rdev = ddev->dev_private;
532         enum radeon_dpm_forced_level level;
533         int ret = 0;
534
535         /* Can't force performance level when the card is off */
536         if  ((rdev->flags & RADEON_IS_PX) &&
537              (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
538                 return -EINVAL;
539
540         spin_lock(&rdev->pm.mutex);
541         if (strncmp("low", buf, strlen("low")) == 0) {
542                 level = RADEON_DPM_FORCED_LEVEL_LOW;
543         } else if (strncmp("high", buf, strlen("high")) == 0) {
544                 level = RADEON_DPM_FORCED_LEVEL_HIGH;
545         } else if (strncmp("auto", buf, strlen("auto")) == 0) {
546                 level = RADEON_DPM_FORCED_LEVEL_AUTO;
547         } else {
548                 count = -EINVAL;
549                 goto fail;
550         }
551         if (rdev->asic->dpm.force_performance_level) {
552                 if (rdev->pm.dpm.thermal_active) {
553                         count = -EINVAL;
554                         goto fail;
555                 }
556                 ret = radeon_dpm_force_performance_level(rdev, level);
557                 if (ret)
558                         count = -EINVAL;
559         }
560 fail:
561         spin_unlock(&rdev->pm.mutex);
562
563         return count;
564 }
565
566 static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
567 static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
568 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state);
569 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
570                    radeon_get_dpm_forced_performance_level,
571                    radeon_set_dpm_forced_performance_level);
572
573 static ssize_t radeon_hwmon_show_temp(struct device *dev,
574                                       struct device_attribute *attr,
575                                       char *buf)
576 {
577         struct radeon_device *rdev = dev_get_drvdata(dev);
578         struct drm_device *ddev = rdev->ddev;
579         int temp;
580
581         /* Can't get temperature when the card is off */
582         if  ((rdev->flags & RADEON_IS_PX) &&
583              (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
584                 return -EINVAL;
585
586         if (rdev->asic->pm.get_temperature)
587                 temp = radeon_get_temperature(rdev);
588         else
589                 temp = 0;
590
591         return ksnprintf(buf, PAGE_SIZE, "%d\n", temp);
592 }
593
594 static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
595                                              struct device_attribute *attr,
596                                              char *buf)
597 {
598         struct radeon_device *rdev = dev_get_drvdata(dev);
599         int hyst = to_sensor_dev_attr(attr)->index;
600         int temp;
601
602         if (hyst)
603                 temp = rdev->pm.dpm.thermal.min_temp;
604         else
605                 temp = rdev->pm.dpm.thermal.max_temp;
606
607         return ksnprintf(buf, PAGE_SIZE, "%d\n", temp);
608 }
609
610 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
611 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
612 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
613
614 static struct attribute *hwmon_attributes[] = {
615         &sensor_dev_attr_temp1_input.dev_attr.attr,
616         &sensor_dev_attr_temp1_crit.dev_attr.attr,
617         &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
618         NULL
619 };
620
621 static umode_t hwmon_attributes_visible(struct kobject *kobj,
622                                         struct attribute *attr, int index)
623 {
624         struct device *dev = container_of(kobj, struct device, kobj);
625         struct radeon_device *rdev = dev_get_drvdata(dev);
626
627         /* Skip limit attributes if DPM is not enabled */
628         if (rdev->pm.pm_method != PM_METHOD_DPM &&
629             (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
630              attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
631                 return 0;
632
633         return attr->mode;
634 }
635
636 static const struct attribute_group hwmon_attrgroup = {
637         .attrs = hwmon_attributes,
638         .is_visible = hwmon_attributes_visible,
639 };
640
641 static const struct attribute_group *hwmon_groups[] = {
642         &hwmon_attrgroup,
643         NULL
644 };
645 #endif /* DUMBBELL_WIP */
646
647 static void
648 radeon_hwmon_refresh(void *arg)
649 {
650         struct radeon_device *rdev = (struct radeon_device *)arg;
651         struct drm_device *ddev = rdev->ddev;
652         struct ksensor *s = rdev->pm.int_sensor;
653         int temp;
654         enum sensor_status stat;
655
656         /* Can't get temperature when the card is off */
657         if  ((rdev->flags & RADEON_IS_PX) &&
658              (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
659                 sensor_set_unknown(s);
660                 s->status = SENSOR_S_OK;
661                 return;
662         }
663
664         if (rdev->asic->pm.get_temperature == NULL) {
665                 sensor_set_invalid(s);
666                 return;
667         }
668
669         temp = radeon_get_temperature(rdev);
670         if (temp >= rdev->pm.dpm.thermal.max_temp)
671                 stat = SENSOR_S_CRIT;
672         else if (temp >= rdev->pm.dpm.thermal.min_temp)
673                 stat = SENSOR_S_WARN;
674         else
675                 stat = SENSOR_S_OK;
676
677         sensor_set(s, temp * 1000 + 273150000, stat);
678 }
679
680 static int radeon_hwmon_init(struct radeon_device *rdev)
681 {
682         int err = 0;
683
684         rdev->pm.int_sensor = NULL;
685         rdev->pm.int_sensordev = NULL;
686
687         switch (rdev->pm.int_thermal_type) {
688         case THERMAL_TYPE_RV6XX:
689         case THERMAL_TYPE_RV770:
690         case THERMAL_TYPE_EVERGREEN:
691         case THERMAL_TYPE_NI:
692         case THERMAL_TYPE_SUMO:
693         case THERMAL_TYPE_SI:
694         case THERMAL_TYPE_CI:
695         case THERMAL_TYPE_KV:
696                 if (rdev->asic->pm.get_temperature == NULL)
697                         return err;
698
699                 rdev->pm.int_sensor = kmalloc(sizeof(*rdev->pm.int_sensor),
700                     M_DRM, M_ZERO | M_WAITOK);
701                 rdev->pm.int_sensordev = kmalloc(
702                     sizeof(*rdev->pm.int_sensordev), M_DRM,
703                     M_ZERO | M_WAITOK);
704                 strlcpy(rdev->pm.int_sensordev->xname,
705                     device_get_nameunit(rdev->dev->bsddev),
706                     sizeof(rdev->pm.int_sensordev->xname));
707                 rdev->pm.int_sensor->type = SENSOR_TEMP;
708                 rdev->pm.int_sensor->flags |= SENSOR_FINVALID;
709                 sensor_attach(rdev->pm.int_sensordev, rdev->pm.int_sensor);
710                 sensor_task_register(rdev, radeon_hwmon_refresh, 5);
711                 sensordev_install(rdev->pm.int_sensordev);
712                 break;
713         default:
714                 break;
715         }
716
717         return err;
718 }
719
720 static void radeon_hwmon_fini(struct radeon_device *rdev)
721 {
722         if (rdev->pm.int_sensor != NULL && rdev->pm.int_sensordev != NULL) {
723                 sensordev_deinstall(rdev->pm.int_sensordev);
724                 sensor_task_unregister(rdev);
725                 kfree(rdev->pm.int_sensor);
726                 kfree(rdev->pm.int_sensordev);
727                 rdev->pm.int_sensor = NULL;
728                 rdev->pm.int_sensordev = NULL;
729         }
730 }
731
732 static void radeon_dpm_thermal_work_handler(struct work_struct *work)
733 {
734         struct radeon_device *rdev =
735                 container_of(work, struct radeon_device,
736                              pm.dpm.thermal.work);
737         /* switch to the thermal state */
738         enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
739
740         if (!rdev->pm.dpm_enabled)
741                 return;
742
743         if (rdev->asic->pm.get_temperature) {
744                 int temp = radeon_get_temperature(rdev);
745
746                 if (temp < rdev->pm.dpm.thermal.min_temp)
747                         /* switch back the user state */
748                         dpm_state = rdev->pm.dpm.user_state;
749         } else {
750                 if (rdev->pm.dpm.thermal.high_to_low)
751                         /* switch back the user state */
752                         dpm_state = rdev->pm.dpm.user_state;
753         }
754         mutex_lock(&rdev->pm.mutex);
755         if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
756                 rdev->pm.dpm.thermal_active = true;
757         else
758                 rdev->pm.dpm.thermal_active = false;
759         rdev->pm.dpm.state = dpm_state;
760         mutex_unlock(&rdev->pm.mutex);
761
762         radeon_pm_compute_clocks(rdev);
763 }
764
765 static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
766                                                      enum radeon_pm_state_type dpm_state)
767 {
768         int i;
769         struct radeon_ps *ps;
770         u32 ui_class;
771         bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
772                 true : false;
773
774         /* check if the vblank period is too short to adjust the mclk */
775         if (single_display && rdev->asic->dpm.vblank_too_short) {
776                 if (radeon_dpm_vblank_too_short(rdev))
777                         single_display = false;
778         }
779
780         /* certain older asics have a separare 3D performance state,
781          * so try that first if the user selected performance
782          */
783         if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
784                 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
785         /* balanced states don't exist at the moment */
786         if (dpm_state == POWER_STATE_TYPE_BALANCED)
787                 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
788
789 restart_search:
790         /* Pick the best power state based on current conditions */
791         for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
792                 ps = &rdev->pm.dpm.ps[i];
793                 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
794                 switch (dpm_state) {
795                 /* user states */
796                 case POWER_STATE_TYPE_BATTERY:
797                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
798                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
799                                         if (single_display)
800                                                 return ps;
801                                 } else
802                                         return ps;
803                         }
804                         break;
805                 case POWER_STATE_TYPE_BALANCED:
806                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
807                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
808                                         if (single_display)
809                                                 return ps;
810                                 } else
811                                         return ps;
812                         }
813                         break;
814                 case POWER_STATE_TYPE_PERFORMANCE:
815                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
816                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
817                                         if (single_display)
818                                                 return ps;
819                                 } else
820                                         return ps;
821                         }
822                         break;
823                 /* internal states */
824                 case POWER_STATE_TYPE_INTERNAL_UVD:
825                         if (rdev->pm.dpm.uvd_ps)
826                                 return rdev->pm.dpm.uvd_ps;
827                         else
828                                 break;
829                 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
830                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
831                                 return ps;
832                         break;
833                 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
834                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
835                                 return ps;
836                         break;
837                 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
838                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
839                                 return ps;
840                         break;
841                 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
842                         if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
843                                 return ps;
844                         break;
845                 case POWER_STATE_TYPE_INTERNAL_BOOT:
846                         return rdev->pm.dpm.boot_ps;
847                 case POWER_STATE_TYPE_INTERNAL_THERMAL:
848                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
849                                 return ps;
850                         break;
851                 case POWER_STATE_TYPE_INTERNAL_ACPI:
852                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
853                                 return ps;
854                         break;
855                 case POWER_STATE_TYPE_INTERNAL_ULV:
856                         if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
857                                 return ps;
858                         break;
859                 case POWER_STATE_TYPE_INTERNAL_3DPERF:
860                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
861                                 return ps;
862                         break;
863                 default:
864                         break;
865                 }
866         }
867         /* use a fallback state if we didn't match */
868         switch (dpm_state) {
869         case POWER_STATE_TYPE_INTERNAL_UVD_SD:
870                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
871                 goto restart_search;
872         case POWER_STATE_TYPE_INTERNAL_UVD_HD:
873         case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
874         case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
875                 if (rdev->pm.dpm.uvd_ps) {
876                         return rdev->pm.dpm.uvd_ps;
877                 } else {
878                         dpm_state = POWER_STATE_TYPE_PERFORMANCE;
879                         goto restart_search;
880                 }
881         case POWER_STATE_TYPE_INTERNAL_THERMAL:
882                 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
883                 goto restart_search;
884         case POWER_STATE_TYPE_INTERNAL_ACPI:
885                 dpm_state = POWER_STATE_TYPE_BATTERY;
886                 goto restart_search;
887         case POWER_STATE_TYPE_BATTERY:
888         case POWER_STATE_TYPE_BALANCED:
889         case POWER_STATE_TYPE_INTERNAL_3DPERF:
890                 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
891                 goto restart_search;
892         default:
893                 break;
894         }
895
896         return NULL;
897 }
898
899 static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
900 {
901         int i;
902         struct radeon_ps *ps;
903         enum radeon_pm_state_type dpm_state;
904         int ret;
905
906         /* if dpm init failed */
907         if (!rdev->pm.dpm_enabled)
908                 return;
909
910         if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
911                 /* add other state override checks here */
912                 if ((!rdev->pm.dpm.thermal_active) &&
913                     (!rdev->pm.dpm.uvd_active))
914                         rdev->pm.dpm.state = rdev->pm.dpm.user_state;
915         }
916         dpm_state = rdev->pm.dpm.state;
917
918         ps = radeon_dpm_pick_power_state(rdev, dpm_state);
919         if (ps)
920                 rdev->pm.dpm.requested_ps = ps;
921         else
922                 return;
923
924         /* no need to reprogram if nothing changed unless we are on BTC+ */
925         if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
926                 /* vce just modifies an existing state so force a change */
927                 if (ps->vce_active != rdev->pm.dpm.vce_active)
928                         goto force;
929                 if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
930                         /* for pre-BTC and APUs if the num crtcs changed but state is the same,
931                          * all we need to do is update the display configuration.
932                          */
933                         if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
934                                 /* update display watermarks based on new power state */
935                                 radeon_bandwidth_update(rdev);
936                                 /* update displays */
937                                 radeon_dpm_display_configuration_changed(rdev);
938                                 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
939                                 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
940                         }
941                         return;
942                 } else {
943                         /* for BTC+ if the num crtcs hasn't changed and state is the same,
944                          * nothing to do, if the num crtcs is > 1 and state is the same,
945                          * update display configuration.
946                          */
947                         if (rdev->pm.dpm.new_active_crtcs ==
948                             rdev->pm.dpm.current_active_crtcs) {
949                                 return;
950                         } else {
951                                 if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
952                                     (rdev->pm.dpm.new_active_crtc_count > 1)) {
953                                         /* update display watermarks based on new power state */
954                                         radeon_bandwidth_update(rdev);
955                                         /* update displays */
956                                         radeon_dpm_display_configuration_changed(rdev);
957                                         rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
958                                         rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
959                                         return;
960                                 }
961                         }
962                 }
963         }
964
965 force:
966         if (radeon_dpm == 1) {
967                 printk("switching from power state:\n");
968                 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
969                 printk("switching to power state:\n");
970                 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
971         }
972
973         lockmgr(&rdev->ddev->struct_mutex, LK_EXCLUSIVE);
974         lockmgr(&rdev->pm.mclk_lock, LK_EXCLUSIVE); // down_write
975         lockmgr(&rdev->ring_lock, LK_EXCLUSIVE);
976
977         /* update whether vce is active */
978         ps->vce_active = rdev->pm.dpm.vce_active;
979
980         ret = radeon_dpm_pre_set_power_state(rdev);
981         if (ret)
982                 goto done;
983
984         /* update display watermarks based on new power state */
985         radeon_bandwidth_update(rdev);
986         /* update displays */
987         radeon_dpm_display_configuration_changed(rdev);
988
989         rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
990         rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
991
992         /* wait for the rings to drain */
993         for (i = 0; i < RADEON_NUM_RINGS; i++) {
994                 struct radeon_ring *ring = &rdev->ring[i];
995                 if (ring->ready)
996                         radeon_fence_wait_empty(rdev, i);
997         }
998
999         /* program the new power state */
1000         radeon_dpm_set_power_state(rdev);
1001
1002         /* update current power state */
1003         rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
1004
1005         radeon_dpm_post_set_power_state(rdev);
1006
1007         if (rdev->asic->dpm.force_performance_level) {
1008                 if (rdev->pm.dpm.thermal_active) {
1009                         enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
1010                         /* force low perf level for thermal */
1011                         radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
1012                         /* save the user's level */
1013                         rdev->pm.dpm.forced_level = level;
1014                 } else {
1015                         /* otherwise, user selected level */
1016                         radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
1017                 }
1018         }
1019
1020 done:
1021         lockmgr(&rdev->ring_lock, LK_RELEASE);
1022         lockmgr(&rdev->pm.mclk_lock, LK_RELEASE); // up_write
1023         lockmgr(&rdev->ddev->struct_mutex, LK_RELEASE);
1024 }
1025
1026 void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
1027 {
1028         enum radeon_pm_state_type dpm_state;
1029
1030         if (rdev->asic->dpm.powergate_uvd) {
1031                 mutex_lock(&rdev->pm.mutex);
1032                 /* don't powergate anything if we
1033                    have active but pause streams */
1034                 enable |= rdev->pm.dpm.sd > 0;
1035                 enable |= rdev->pm.dpm.hd > 0;
1036                 /* enable/disable UVD */
1037                 radeon_dpm_powergate_uvd(rdev, !enable);
1038                 mutex_unlock(&rdev->pm.mutex);
1039         } else {
1040                 if (enable) {
1041                         mutex_lock(&rdev->pm.mutex);
1042                         rdev->pm.dpm.uvd_active = true;
1043                         /* disable this for now */
1044 #if 0
1045                         if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
1046                                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
1047                         else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
1048                                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1049                         else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
1050                                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1051                         else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
1052                                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
1053                         else
1054 #endif
1055                                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
1056                         rdev->pm.dpm.state = dpm_state;
1057                         mutex_unlock(&rdev->pm.mutex);
1058                 } else {
1059                         mutex_lock(&rdev->pm.mutex);
1060                         rdev->pm.dpm.uvd_active = false;
1061                         mutex_unlock(&rdev->pm.mutex);
1062                 }
1063
1064                 radeon_pm_compute_clocks(rdev);
1065         }
1066 }
1067
1068 void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
1069 {
1070         if (enable) {
1071                 mutex_lock(&rdev->pm.mutex);
1072                 rdev->pm.dpm.vce_active = true;
1073                 /* XXX select vce level based on ring/task */
1074                 rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
1075                 mutex_unlock(&rdev->pm.mutex);
1076         } else {
1077                 mutex_lock(&rdev->pm.mutex);
1078                 rdev->pm.dpm.vce_active = false;
1079                 mutex_unlock(&rdev->pm.mutex);
1080         }
1081
1082         radeon_pm_compute_clocks(rdev);
1083 }
1084
1085 static void radeon_pm_suspend_old(struct radeon_device *rdev)
1086 {
1087         lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
1088         if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1089                 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
1090                         rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
1091         }
1092         lockmgr(&rdev->pm.mutex, LK_RELEASE);
1093
1094 #ifdef DUMBBELL_WIP
1095         cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1096 #endif /* DUMBBELL_WIP */
1097 }
1098
1099 static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
1100 {
1101         lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
1102         /* disable dpm */
1103         radeon_dpm_disable(rdev);
1104         /* reset the power state */
1105         rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1106         rdev->pm.dpm_enabled = false;
1107         lockmgr(&rdev->pm.mutex, LK_RELEASE);
1108 }
1109
1110 void radeon_pm_suspend(struct radeon_device *rdev)
1111 {
1112         if (rdev->pm.pm_method == PM_METHOD_DPM)
1113                 radeon_pm_suspend_dpm(rdev);
1114         else
1115                 radeon_pm_suspend_old(rdev);
1116 }
1117
1118 static void radeon_pm_resume_old(struct radeon_device *rdev)
1119 {
1120         /* set up the default clocks if the MC ucode is loaded */
1121         if ((rdev->family >= CHIP_BARTS) &&
1122             (rdev->family <= CHIP_CAYMAN) &&
1123             rdev->mc_fw) {
1124                 if (rdev->pm.default_vddc)
1125                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1126                                                 SET_VOLTAGE_TYPE_ASIC_VDDC);
1127                 if (rdev->pm.default_vddci)
1128                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1129                                                 SET_VOLTAGE_TYPE_ASIC_VDDCI);
1130                 if (rdev->pm.default_sclk)
1131                         radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1132                 if (rdev->pm.default_mclk)
1133                         radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1134         }
1135         /* asic init will reset the default power state */
1136         lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
1137         rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
1138         rdev->pm.current_clock_mode_index = 0;
1139         rdev->pm.current_sclk = rdev->pm.default_sclk;
1140         rdev->pm.current_mclk = rdev->pm.default_mclk;
1141         if (rdev->pm.power_state) {
1142                 rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
1143                 rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
1144         }
1145         if (rdev->pm.pm_method == PM_METHOD_DYNPM
1146             && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
1147                 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1148 #ifdef DUMBBELL_WIP
1149                 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1150                                       msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1151 #endif /* DUMBBELL_WIP */
1152         }
1153         lockmgr(&rdev->pm.mutex, LK_RELEASE);
1154         radeon_pm_compute_clocks(rdev);
1155 }
1156
1157 static void radeon_pm_resume_dpm(struct radeon_device *rdev)
1158 {
1159         int ret;
1160
1161         /* asic init will reset to the boot state */
1162         lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
1163         rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1164         radeon_dpm_setup_asic(rdev);
1165         ret = radeon_dpm_enable(rdev);
1166         lockmgr(&rdev->pm.mutex, LK_RELEASE);
1167         if (ret)
1168                 goto dpm_resume_fail;
1169         rdev->pm.dpm_enabled = true;
1170         return;
1171
1172 dpm_resume_fail:
1173         DRM_ERROR("radeon: dpm resume failed\n");
1174         if ((rdev->family >= CHIP_BARTS) &&
1175             (rdev->family <= CHIP_CAYMAN) &&
1176             rdev->mc_fw) {
1177                 if (rdev->pm.default_vddc)
1178                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1179                                                 SET_VOLTAGE_TYPE_ASIC_VDDC);
1180                 if (rdev->pm.default_vddci)
1181                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1182                                                 SET_VOLTAGE_TYPE_ASIC_VDDCI);
1183                 if (rdev->pm.default_sclk)
1184                         radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1185                 if (rdev->pm.default_mclk)
1186                         radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1187         }
1188 }
1189
1190 void radeon_pm_resume(struct radeon_device *rdev)
1191 {
1192         if (rdev->pm.pm_method == PM_METHOD_DPM)
1193                 radeon_pm_resume_dpm(rdev);
1194         else
1195                 radeon_pm_resume_old(rdev);
1196 }
1197
1198 static int radeon_pm_init_old(struct radeon_device *rdev)
1199 {
1200         int ret;
1201
1202         rdev->pm.profile = PM_PROFILE_DEFAULT;
1203         rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1204         rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1205         rdev->pm.dynpm_can_upclock = true;
1206         rdev->pm.dynpm_can_downclock = true;
1207         rdev->pm.default_sclk = rdev->clock.default_sclk;
1208         rdev->pm.default_mclk = rdev->clock.default_mclk;
1209         rdev->pm.current_sclk = rdev->clock.default_sclk;
1210         rdev->pm.current_mclk = rdev->clock.default_mclk;
1211         rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1212
1213         if (rdev->bios) {
1214                 if (rdev->is_atom_bios)
1215                         radeon_atombios_get_power_modes(rdev);
1216                 else
1217                         radeon_combios_get_power_modes(rdev);
1218                 radeon_pm_print_states(rdev);
1219                 radeon_pm_init_profile(rdev);
1220                 /* set up the default clocks if the MC ucode is loaded */
1221                 if ((rdev->family >= CHIP_BARTS) &&
1222                     (rdev->family <= CHIP_CAYMAN) &&
1223                     rdev->mc_fw) {
1224                         if (rdev->pm.default_vddc)
1225                                 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1226                                                         SET_VOLTAGE_TYPE_ASIC_VDDC);
1227                         if (rdev->pm.default_vddci)
1228                                 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1229                                                         SET_VOLTAGE_TYPE_ASIC_VDDCI);
1230                         if (rdev->pm.default_sclk)
1231                                 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1232                         if (rdev->pm.default_mclk)
1233                                 radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1234                 }
1235         }
1236
1237         /* set up the internal thermal sensor if applicable */
1238         ret = radeon_hwmon_init(rdev);
1239         if (ret)
1240                 return ret;
1241
1242 #ifdef DUMBBELL_WIP
1243         INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
1244 #endif /* DUMBBELL_WIP */
1245
1246         if (rdev->pm.num_power_states > 1) {
1247                 /* where's the best place to put these? */
1248 #ifdef DUMBBELL_WIP
1249                 ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1250 #endif /* DUMBBELL_WIP */
1251                 if (ret)
1252                         DRM_ERROR("failed to create device file for power profile\n");
1253 #ifdef DUMBBELL_WIP
1254                 ret = device_create_file(rdev->dev, &dev_attr_power_method);
1255 #endif /* DUMBBELL_WIP */
1256                 if (ret)
1257                         DRM_ERROR("failed to create device file for power method\n");
1258
1259                 if (radeon_debugfs_pm_init(rdev)) {
1260                         DRM_ERROR("Failed to register debugfs file for PM!\n");
1261                 }
1262
1263                 DRM_INFO("radeon: power management initialized\n");
1264         }
1265
1266         return 0;
1267 }
1268
1269 static void radeon_dpm_print_power_states(struct radeon_device *rdev)
1270 {
1271         int i;
1272
1273         for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
1274                 printk("== power state %d ==\n", i);
1275                 radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
1276         }
1277 }
1278
1279 static int radeon_pm_init_dpm(struct radeon_device *rdev)
1280 {
1281         int ret;
1282
1283         /* default to balanced state */
1284         rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
1285         rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
1286         rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1287         rdev->pm.default_sclk = rdev->clock.default_sclk;
1288         rdev->pm.default_mclk = rdev->clock.default_mclk;
1289         rdev->pm.current_sclk = rdev->clock.default_sclk;
1290         rdev->pm.current_mclk = rdev->clock.default_mclk;
1291         rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1292
1293         if (rdev->bios && rdev->is_atom_bios)
1294                 radeon_atombios_get_power_modes(rdev);
1295         else
1296                 return -EINVAL;
1297
1298         /* set up the internal thermal sensor if applicable */
1299         ret = radeon_hwmon_init(rdev);
1300         if (ret)
1301                 return ret;
1302
1303         INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
1304         mutex_lock(&rdev->pm.mutex);
1305         radeon_dpm_init(rdev);
1306         rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1307         if (radeon_dpm == 1)
1308                 radeon_dpm_print_power_states(rdev);
1309         radeon_dpm_setup_asic(rdev);
1310         ret = radeon_dpm_enable(rdev);
1311         mutex_unlock(&rdev->pm.mutex);
1312         if (ret)
1313                 goto dpm_failed;
1314         rdev->pm.dpm_enabled = true;
1315
1316 #ifdef TODO_DEVICE_FILE
1317         ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
1318         if (ret)
1319                 DRM_ERROR("failed to create device file for dpm state\n");
1320         ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1321         if (ret)
1322                 DRM_ERROR("failed to create device file for dpm state\n");
1323         /* XXX: these are noops for dpm but are here for backwards compat */
1324         ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1325         if (ret)
1326                 DRM_ERROR("failed to create device file for power profile\n");
1327         ret = device_create_file(rdev->dev, &dev_attr_power_method);
1328         if (ret)
1329                 DRM_ERROR("failed to create device file for power method\n");
1330
1331         if (radeon_debugfs_pm_init(rdev)) {
1332                 DRM_ERROR("Failed to register debugfs file for dpm!\n");
1333         }
1334 #endif
1335
1336         DRM_INFO("radeon: dpm initialized\n");
1337
1338         return 0;
1339
1340 dpm_failed:
1341         rdev->pm.dpm_enabled = false;
1342         if ((rdev->family >= CHIP_BARTS) &&
1343             (rdev->family <= CHIP_CAYMAN) &&
1344             rdev->mc_fw) {
1345                 if (rdev->pm.default_vddc)
1346                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1347                                                 SET_VOLTAGE_TYPE_ASIC_VDDC);
1348                 if (rdev->pm.default_vddci)
1349                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1350                                                 SET_VOLTAGE_TYPE_ASIC_VDDCI);
1351                 if (rdev->pm.default_sclk)
1352                         radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1353                 if (rdev->pm.default_mclk)
1354                         radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1355         }
1356         DRM_ERROR("radeon: dpm initialization failed\n");
1357         return ret;
1358 }
1359
1360 int radeon_pm_init(struct radeon_device *rdev)
1361 {
1362         /* enable dpm on rv6xx+ */
1363         switch (rdev->family) {
1364         case CHIP_RV610:
1365         case CHIP_RV630:
1366         case CHIP_RV620:
1367         case CHIP_RV635:
1368         case CHIP_RV670:
1369         case CHIP_RS780:
1370         case CHIP_RS880:
1371         case CHIP_RV770:
1372                 /* DPM requires the RLC, RV770+ dGPU requires SMC */
1373                 if (!rdev->rlc_fw)
1374                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1375                 else if ((rdev->family >= CHIP_RV770) &&
1376                          (!(rdev->flags & RADEON_IS_IGP)) &&
1377                          (!rdev->smc_fw))
1378                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1379                 else if (radeon_dpm == 1)
1380                         rdev->pm.pm_method = PM_METHOD_DPM;
1381                 else
1382                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1383                 break;
1384         case CHIP_RV730:
1385         case CHIP_RV710:
1386         case CHIP_RV740:
1387         case CHIP_CEDAR:
1388         case CHIP_REDWOOD:
1389         case CHIP_JUNIPER:
1390         case CHIP_CYPRESS:
1391         case CHIP_HEMLOCK:
1392         case CHIP_PALM:
1393         case CHIP_SUMO:
1394         case CHIP_SUMO2:
1395         case CHIP_BARTS:
1396         case CHIP_TURKS:
1397         case CHIP_CAICOS:
1398         case CHIP_CAYMAN:
1399         case CHIP_ARUBA:
1400         case CHIP_TAHITI:
1401         case CHIP_PITCAIRN:
1402         case CHIP_VERDE:
1403         case CHIP_OLAND:
1404         case CHIP_HAINAN:
1405         case CHIP_BONAIRE:
1406         case CHIP_KABINI:
1407         case CHIP_KAVERI:
1408         case CHIP_HAWAII:
1409         case CHIP_MULLINS:
1410                 /* DPM requires the RLC, RV770+ dGPU requires SMC */
1411                 if (!rdev->rlc_fw)
1412                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1413                 else if ((rdev->family >= CHIP_RV770) &&
1414                          (!(rdev->flags & RADEON_IS_IGP)) &&
1415                          (!rdev->smc_fw))
1416                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1417                 else if (radeon_dpm == 0)
1418                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1419                 else
1420                         rdev->pm.pm_method = PM_METHOD_DPM;
1421                 break;
1422         default:
1423                 /* default to profile method */
1424                 rdev->pm.pm_method = PM_METHOD_PROFILE;
1425                 break;
1426         }
1427
1428         if (rdev->pm.pm_method == PM_METHOD_DPM)
1429                 return radeon_pm_init_dpm(rdev);
1430         else
1431                 return radeon_pm_init_old(rdev);
1432 }
1433
1434 int radeon_pm_late_init(struct radeon_device *rdev)
1435 {
1436         int ret = 0;
1437
1438         if (rdev->pm.pm_method == PM_METHOD_DPM) {
1439                 lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
1440                 ret = radeon_dpm_late_enable(rdev);
1441                 lockmgr(&rdev->pm.mutex, LK_RELEASE);
1442         }
1443         return ret;
1444 }
1445
1446 static void radeon_pm_fini_old(struct radeon_device *rdev)
1447 {
1448         if (rdev->pm.num_power_states > 1) {
1449                 DRM_UNLOCK(rdev->ddev); /* Work around LOR. */
1450                 lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
1451                 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1452                         rdev->pm.profile = PM_PROFILE_DEFAULT;
1453                         radeon_pm_update_profile(rdev);
1454                         radeon_pm_set_clocks(rdev);
1455                 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1456                         /* reset default clocks */
1457                         rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1458                         rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1459                         radeon_pm_set_clocks(rdev);
1460                 }
1461                 lockmgr(&rdev->pm.mutex, LK_RELEASE);
1462                 DRM_LOCK(rdev->ddev);
1463
1464 #ifdef DUMBBELL_WIP
1465                 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1466
1467                 device_remove_file(rdev->dev, &dev_attr_power_profile);
1468                 device_remove_file(rdev->dev, &dev_attr_power_method);
1469 #endif /* DUMBBELL_WIP */
1470         }
1471
1472         if (rdev->pm.power_state) {
1473                 int i;
1474                 for (i = 0; i < rdev->pm.num_power_states; ++i) {
1475                         kfree(rdev->pm.power_state[i].clock_info);
1476                 }
1477                 kfree(rdev->pm.power_state);
1478                 rdev->pm.power_state = NULL;
1479                 rdev->pm.num_power_states = 0;
1480         }
1481
1482         radeon_hwmon_fini(rdev);
1483 }
1484
1485 static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1486 {
1487         if (rdev->pm.num_power_states > 1) {
1488                 lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
1489                 radeon_dpm_disable(rdev);
1490                 lockmgr(&rdev->pm.mutex, LK_RELEASE);
1491
1492 #ifdef TODO_DEVICE_FILE
1493                 device_remove_file(rdev->dev, &dev_attr_power_dpm_state);
1494                 device_remove_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1495                 /* XXX backwards compat */
1496                 device_remove_file(rdev->dev, &dev_attr_power_profile);
1497                 device_remove_file(rdev->dev, &dev_attr_power_method);
1498 #endif
1499         }
1500         radeon_dpm_fini(rdev);
1501
1502         /* prevents leaking 440 bytes on OLAND */
1503         if (rdev->pm.power_state) {
1504                 int i;
1505                 for (i = 0; i < rdev->pm.num_power_states; ++i) {
1506                         kfree(rdev->pm.power_state[i].clock_info);
1507                 }
1508                 kfree(rdev->pm.power_state);
1509                 rdev->pm.power_state = NULL;
1510                 rdev->pm.num_power_states = 0;
1511         }
1512
1513         radeon_hwmon_fini(rdev);
1514 }
1515
1516 void radeon_pm_fini(struct radeon_device *rdev)
1517 {
1518         if (rdev->pm.pm_method == PM_METHOD_DPM)
1519                 radeon_pm_fini_dpm(rdev);
1520         else
1521                 radeon_pm_fini_old(rdev);
1522 }
1523
1524 static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
1525 {
1526         struct drm_device *ddev = rdev->ddev;
1527         struct drm_crtc *crtc;
1528         struct radeon_crtc *radeon_crtc;
1529
1530         if (rdev->pm.num_power_states < 2)
1531                 return;
1532
1533         lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
1534
1535         rdev->pm.active_crtcs = 0;
1536         rdev->pm.active_crtc_count = 0;
1537         if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1538                 list_for_each_entry(crtc,
1539                                     &ddev->mode_config.crtc_list, head) {
1540                         radeon_crtc = to_radeon_crtc(crtc);
1541                         if (radeon_crtc->enabled) {
1542                                 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
1543                                 rdev->pm.active_crtc_count++;
1544                         }
1545                 }
1546         }
1547
1548         if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1549                 radeon_pm_update_profile(rdev);
1550                 radeon_pm_set_clocks(rdev);
1551         } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1552                 if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
1553                         if (rdev->pm.active_crtc_count > 1) {
1554                                 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1555 #ifdef DUMBBELL_WIP
1556                                         cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1557 #endif /* DUMBBELL_WIP */
1558
1559                                         rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
1560                                         rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1561                                         radeon_pm_get_dynpm_state(rdev);
1562                                         radeon_pm_set_clocks(rdev);
1563
1564                                         DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
1565                                 }
1566                         } else if (rdev->pm.active_crtc_count == 1) {
1567                                 /* TODO: Increase clocks if needed for current mode */
1568
1569                                 if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
1570                                         rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1571                                         rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
1572                                         radeon_pm_get_dynpm_state(rdev);
1573                                         radeon_pm_set_clocks(rdev);
1574
1575 #ifdef DUMBBELL_WIP
1576                                         schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1577                                                               msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1578 #endif /* DUMBBELL_WIP */
1579                                 } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
1580                                         rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1581 #ifdef DUMBBELL_WIP
1582                                         schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1583                                                               msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1584 #endif /* DUMBBELL_WIP */
1585                                         DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
1586                                 }
1587                         } else { /* count == 0 */
1588                                 if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
1589 #ifdef DUMBBELL_WIP
1590                                         cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1591 #endif /* DUMBBELL_WIP */
1592
1593                                         rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
1594                                         rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
1595                                         radeon_pm_get_dynpm_state(rdev);
1596                                         radeon_pm_set_clocks(rdev);
1597                                 }
1598                         }
1599                 }
1600         }
1601
1602         lockmgr(&rdev->pm.mutex, LK_RELEASE);
1603 }
1604
1605 static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
1606 {
1607         struct drm_device *ddev = rdev->ddev;
1608         struct drm_crtc *crtc;
1609         struct radeon_crtc *radeon_crtc;
1610
1611         if (!rdev->pm.dpm_enabled)
1612                 return;
1613
1614         lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
1615
1616         /* update active crtc counts */
1617         rdev->pm.dpm.new_active_crtcs = 0;
1618         rdev->pm.dpm.new_active_crtc_count = 0;
1619         if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1620                 list_for_each_entry(crtc,
1621                                     &ddev->mode_config.crtc_list, head) {
1622                         radeon_crtc = to_radeon_crtc(crtc);
1623                         if (crtc->enabled) {
1624                                 rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
1625                                 rdev->pm.dpm.new_active_crtc_count++;
1626                         }
1627                 }
1628         }
1629
1630         /* update battery/ac status */
1631         if (power_profile_get_state() == POWER_PROFILE_PERFORMANCE)
1632                 rdev->pm.dpm.ac_power = true;
1633         else
1634                 rdev->pm.dpm.ac_power = false;
1635
1636         radeon_dpm_change_power_state_locked(rdev);
1637
1638         lockmgr(&rdev->pm.mutex, LK_RELEASE);
1639
1640 }
1641
1642 void radeon_pm_compute_clocks(struct radeon_device *rdev)
1643 {
1644         if (rdev->pm.pm_method == PM_METHOD_DPM)
1645                 radeon_pm_compute_clocks_dpm(rdev);
1646         else
1647                 radeon_pm_compute_clocks_old(rdev);
1648 }
1649
1650 static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1651 {
1652         int  crtc, vpos, hpos, vbl_status;
1653         bool in_vbl = true;
1654
1655         /* Iterate over all active crtc's. All crtc's must be in vblank,
1656          * otherwise return in_vbl == false.
1657          */
1658         for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
1659                 if (rdev->pm.active_crtcs & (1 << crtc)) {
1660                         vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0,
1661                                                                 &vpos, &hpos, NULL, NULL,
1662                                                                 &rdev->mode_info.crtcs[crtc]->base.hwmode);
1663                         if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
1664                             !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK))
1665                                 in_vbl = false;
1666                 }
1667         }
1668
1669         return in_vbl;
1670 }
1671
1672 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
1673 {
1674         u32 stat_crtc = 0;
1675         bool in_vbl = radeon_pm_in_vbl(rdev);
1676
1677         if (in_vbl == false)
1678                 DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
1679                          finish ? "exit" : "entry");
1680         return in_vbl;
1681 }
1682
1683 #ifdef DUMBBELL_WIP
1684 static void radeon_dynpm_idle_work_handler(struct work_struct *work)
1685 {
1686         struct radeon_device *rdev;
1687         int resched;
1688         rdev = container_of(work, struct radeon_device,
1689                                 pm.dynpm_idle_work.work);
1690
1691         resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1692         lockmgr(&rdev->pm.mutex, LK_EXCLUSIVE);
1693         if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1694                 int not_processed = 0;
1695                 int i;
1696
1697                 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1698                         struct radeon_ring *ring = &rdev->ring[i];
1699
1700                         if (ring->ready) {
1701                                 not_processed += radeon_fence_count_emitted(rdev, i);
1702                                 if (not_processed >= 3)
1703                                         break;
1704                         }
1705                 }
1706
1707                 if (not_processed >= 3) { /* should upclock */
1708                         if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
1709                                 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1710                         } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
1711                                    rdev->pm.dynpm_can_upclock) {
1712                                 rdev->pm.dynpm_planned_action =
1713                                         DYNPM_ACTION_UPCLOCK;
1714                                 rdev->pm.dynpm_action_timeout = jiffies +
1715                                 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
1716                         }
1717                 } else if (not_processed == 0) { /* should downclock */
1718                         if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
1719                                 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1720                         } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
1721                                    rdev->pm.dynpm_can_downclock) {
1722                                 rdev->pm.dynpm_planned_action =
1723                                         DYNPM_ACTION_DOWNCLOCK;
1724                                 rdev->pm.dynpm_action_timeout = jiffies +
1725                                 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
1726                         }
1727                 }
1728
1729                 /* Note, radeon_pm_set_clocks is called with static_switch set
1730                  * to false since we want to wait for vbl to avoid flicker.
1731                  */
1732                 if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
1733                     jiffies > rdev->pm.dynpm_action_timeout) {
1734                         radeon_pm_get_dynpm_state(rdev);
1735                         radeon_pm_set_clocks(rdev);
1736                 }
1737
1738                 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1739                                       msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1740         }
1741         lockmgr(&rdev->pm.mutex, LK_RELEASE);
1742         ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1743 }
1744 #endif /* DUMBBELL_WIP */
1745
1746 /*
1747  * Debugfs info
1748  */
1749 #if defined(CONFIG_DEBUG_FS)
1750
1751 static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
1752 {
1753         struct drm_info_node *node = (struct drm_info_node *) m->private;
1754         struct drm_device *dev = node->minor->dev;
1755         struct radeon_device *rdev = dev->dev_private;
1756         struct drm_device *ddev = rdev->ddev;
1757
1758         if  ((rdev->flags & RADEON_IS_PX) &&
1759              (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1760                 seq_printf(m, "PX asic powered off\n");
1761         } else if (rdev->pm.dpm_enabled) {
1762                 spin_lock(&rdev->pm.mutex);
1763                 if (rdev->asic->dpm.debugfs_print_current_performance_level)
1764                         radeon_dpm_debugfs_print_current_performance_level(rdev, m);
1765                 else
1766                         seq_printf(m, "Debugfs support not implemented for this asic\n");
1767                 spin_unlock(&rdev->pm.mutex);
1768         } else {
1769                 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
1770                 /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
1771                 if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
1772                         seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
1773                 else
1774                         seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
1775                 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
1776                 if (rdev->asic->pm.get_memory_clock)
1777                         seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
1778                 if (rdev->pm.current_vddc)
1779                         seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
1780                 if (rdev->asic->pm.get_pcie_lanes)
1781                         seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
1782         }
1783
1784         return 0;
1785 }
1786
1787 static struct drm_info_list radeon_pm_info_list[] = {
1788         {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
1789 };
1790 #endif
1791
1792 static int radeon_debugfs_pm_init(struct radeon_device *rdev)
1793 {
1794 #if defined(CONFIG_DEBUG_FS)
1795         return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
1796 #else
1797         return 0;
1798 #endif
1799 }