drm/radeon: Update to Linux 4.7.10
[dragonfly.git] / sys / dev / drm / radeon / radeon_pm.c
1 /*
2  * Permission is hereby granted, free of charge, to any person obtaining a
3  * copy of this software and associated documentation files (the "Software"),
4  * to deal in the Software without restriction, including without limitation
5  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6  * and/or sell copies of the Software, and to permit persons to whom the
7  * Software is furnished to do so, subject to the following conditions:
8  *
9  * The above copyright notice and this permission notice shall be included in
10  * all copies or substantial portions of the Software.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
15  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18  * OTHER DEALINGS IN THE SOFTWARE.
19  *
20  * Authors: Rafał Miłecki <zajec5@gmail.com>
21  *          Alex Deucher <alexdeucher@gmail.com>
22  */
23 #include <drm/drmP.h>
24 #include "radeon.h"
25 #include "avivod.h"
26 #include "atom.h"
27 #include "r600_dpm.h"
28 #include <linux/power_supply.h>
29 #include <linux/hwmon.h>
30
31 #include <sys/power.h>
32 #include <sys/sensors.h>
33
34 #define RADEON_IDLE_LOOP_MS 100
35 #define RADEON_RECLOCK_DELAY_MS 200
36 #define RADEON_WAIT_VBLANK_TIMEOUT 200
37
38 static const char *radeon_pm_state_type_name[5] = {
39         "",
40         "Powersave",
41         "Battery",
42         "Balanced",
43         "Performance",
44 };
45
46 static void radeon_dynpm_idle_work_handler(struct work_struct *work);
47 static int radeon_debugfs_pm_init(struct radeon_device *rdev);
48 static bool radeon_pm_in_vbl(struct radeon_device *rdev);
49 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
50 static void radeon_pm_update_profile(struct radeon_device *rdev);
51 static void radeon_pm_set_clocks(struct radeon_device *rdev);
52
53 int radeon_pm_get_type_index(struct radeon_device *rdev,
54                              enum radeon_pm_state_type ps_type,
55                              int instance)
56 {
57         int i;
58         int found_instance = -1;
59
60         for (i = 0; i < rdev->pm.num_power_states; i++) {
61                 if (rdev->pm.power_state[i].type == ps_type) {
62                         found_instance++;
63                         if (found_instance == instance)
64                                 return i;
65                 }
66         }
67         /* return default if no match */
68         return rdev->pm.default_power_state_index;
69 }
70
71 void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
72 {
73         if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
74                 mutex_lock(&rdev->pm.mutex);
75                 if (power_profile_get_state() == POWER_PROFILE_PERFORMANCE)
76                         rdev->pm.dpm.ac_power = true;
77                 else
78                         rdev->pm.dpm.ac_power = false;
79                 if (rdev->family == CHIP_ARUBA) {
80                         if (rdev->asic->dpm.enable_bapm)
81                                 radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
82                 }
83                 mutex_unlock(&rdev->pm.mutex);
84         } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
85                 if (rdev->pm.profile == PM_PROFILE_AUTO) {
86                         mutex_lock(&rdev->pm.mutex);
87                         radeon_pm_update_profile(rdev);
88                         radeon_pm_set_clocks(rdev);
89                         mutex_unlock(&rdev->pm.mutex);
90                 }
91         }
92 }
93
94 static void radeon_pm_update_profile(struct radeon_device *rdev)
95 {
96         switch (rdev->pm.profile) {
97         case PM_PROFILE_DEFAULT:
98                 rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
99                 break;
100         case PM_PROFILE_AUTO:
101                 if (power_profile_get_state() == POWER_PROFILE_PERFORMANCE) {
102                         if (rdev->pm.active_crtc_count > 1)
103                                 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
104                         else
105                                 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
106                 } else {
107                         if (rdev->pm.active_crtc_count > 1)
108                                 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
109                         else
110                                 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
111                 }
112                 break;
113         case PM_PROFILE_LOW:
114                 if (rdev->pm.active_crtc_count > 1)
115                         rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
116                 else
117                         rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
118                 break;
119         case PM_PROFILE_MID:
120                 if (rdev->pm.active_crtc_count > 1)
121                         rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
122                 else
123                         rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
124                 break;
125         case PM_PROFILE_HIGH:
126                 if (rdev->pm.active_crtc_count > 1)
127                         rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
128                 else
129                         rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
130                 break;
131         }
132
133         if (rdev->pm.active_crtc_count == 0) {
134                 rdev->pm.requested_power_state_index =
135                         rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
136                 rdev->pm.requested_clock_mode_index =
137                         rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
138         } else {
139                 rdev->pm.requested_power_state_index =
140                         rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
141                 rdev->pm.requested_clock_mode_index =
142                         rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
143         }
144 }
145
146 static void radeon_unmap_vram_bos(struct radeon_device *rdev)
147 {
148         struct radeon_bo *bo, *n;
149
150         if (list_empty(&rdev->gem.objects))
151                 return;
152
153         list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
154                 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
155                         ttm_bo_unmap_virtual(&bo->tbo);
156         }
157 }
158
159 static void radeon_sync_with_vblank(struct radeon_device *rdev)
160 {
161         if (rdev->pm.active_crtcs) {
162                 rdev->pm.vblank_sync = false;
163 #ifdef DUMBBELL_WIP
164                 wait_event_timeout(
165                         rdev->irq.vblank_queue, rdev->pm.vblank_sync,
166                         msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
167 #endif /* DUMBBELL_WIP */
168         }
169 }
170
171 static void radeon_set_power_state(struct radeon_device *rdev)
172 {
173         u32 sclk, mclk;
174         bool misc_after = false;
175
176         if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
177             (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
178                 return;
179
180         if (radeon_gui_idle(rdev)) {
181                 sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
182                         clock_info[rdev->pm.requested_clock_mode_index].sclk;
183                 if (sclk > rdev->pm.default_sclk)
184                         sclk = rdev->pm.default_sclk;
185
186                 /* starting with BTC, there is one state that is used for both
187                  * MH and SH.  Difference is that we always use the high clock index for
188                  * mclk and vddci.
189                  */
190                 if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
191                     (rdev->family >= CHIP_BARTS) &&
192                     rdev->pm.active_crtc_count &&
193                     ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
194                      (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
195                         mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
196                                 clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
197                 else
198                         mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
199                                 clock_info[rdev->pm.requested_clock_mode_index].mclk;
200
201                 if (mclk > rdev->pm.default_mclk)
202                         mclk = rdev->pm.default_mclk;
203
204                 /* upvolt before raising clocks, downvolt after lowering clocks */
205                 if (sclk < rdev->pm.current_sclk)
206                         misc_after = true;
207
208                 radeon_sync_with_vblank(rdev);
209
210                 if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
211                         if (!radeon_pm_in_vbl(rdev))
212                                 return;
213                 }
214
215                 radeon_pm_prepare(rdev);
216
217                 if (!misc_after)
218                         /* voltage, pcie lanes, etc.*/
219                         radeon_pm_misc(rdev);
220
221                 /* set engine clock */
222                 if (sclk != rdev->pm.current_sclk) {
223                         radeon_pm_debug_check_in_vbl(rdev, false);
224                         radeon_set_engine_clock(rdev, sclk);
225                         radeon_pm_debug_check_in_vbl(rdev, true);
226                         rdev->pm.current_sclk = sclk;
227                         DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
228                 }
229
230                 /* set memory clock */
231                 if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
232                         radeon_pm_debug_check_in_vbl(rdev, false);
233                         radeon_set_memory_clock(rdev, mclk);
234                         radeon_pm_debug_check_in_vbl(rdev, true);
235                         rdev->pm.current_mclk = mclk;
236                         DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
237                 }
238
239                 if (misc_after)
240                         /* voltage, pcie lanes, etc.*/
241                         radeon_pm_misc(rdev);
242
243                 radeon_pm_finish(rdev);
244
245                 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
246                 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
247         } else
248                 DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
249 }
250
251 static void radeon_pm_set_clocks(struct radeon_device *rdev)
252 {
253         int i, r;
254
255         /* no need to take locks, etc. if nothing's going to change */
256         if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
257             (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
258                 return;
259
260         down_write(&rdev->pm.mclk_lock);
261         mutex_lock(&rdev->ring_lock);
262
263         /* wait for the rings to drain */
264         for (i = 0; i < RADEON_NUM_RINGS; i++) {
265                 struct radeon_ring *ring = &rdev->ring[i];
266                 if (!ring->ready) {
267                         continue;
268                 }
269                 r = radeon_fence_wait_empty(rdev, i);
270                 if (r) {
271                         /* needs a GPU reset dont reset here */
272                         mutex_unlock(&rdev->ring_lock);
273                         up_write(&rdev->pm.mclk_lock);
274                         return;
275                 }
276         }
277
278         radeon_unmap_vram_bos(rdev);
279
280         if (rdev->irq.installed) {
281                 for (i = 0; i < rdev->num_crtc; i++) {
282                         if (rdev->pm.active_crtcs & (1 << i)) {
283                                 /* This can fail if a modeset is in progress */
284                                 if (drm_vblank_get(rdev->ddev, i) == 0)
285                                         rdev->pm.req_vblank |= (1 << i);
286                                 else
287                                         DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n",
288                                                          i);
289                         }
290                 }
291         }
292
293         radeon_set_power_state(rdev);
294
295         if (rdev->irq.installed) {
296                 for (i = 0; i < rdev->num_crtc; i++) {
297                         if (rdev->pm.req_vblank & (1 << i)) {
298                                 rdev->pm.req_vblank &= ~(1 << i);
299                                 drm_vblank_put(rdev->ddev, i);
300                         }
301                 }
302         }
303
304         /* update display watermarks based on new power state */
305         radeon_update_bandwidth_info(rdev);
306         if (rdev->pm.active_crtc_count)
307                 radeon_bandwidth_update(rdev);
308
309         rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
310
311         mutex_unlock(&rdev->ring_lock);
312         up_write(&rdev->pm.mclk_lock);
313 }
314
315 static void radeon_pm_print_states(struct radeon_device *rdev)
316 {
317         int i, j;
318         struct radeon_power_state *power_state;
319         struct radeon_pm_clock_info *clock_info;
320
321         DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
322         for (i = 0; i < rdev->pm.num_power_states; i++) {
323                 power_state = &rdev->pm.power_state[i];
324                 DRM_DEBUG_DRIVER("State %d: %s\n", i,
325                         radeon_pm_state_type_name[power_state->type]);
326                 if (i == rdev->pm.default_power_state_index)
327                         DRM_DEBUG_DRIVER("\tDefault");
328                 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
329                         DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
330                 if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
331                         DRM_DEBUG_DRIVER("\tSingle display only\n");
332                 DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
333                 for (j = 0; j < power_state->num_clock_modes; j++) {
334                         clock_info = &(power_state->clock_info[j]);
335                         if (rdev->flags & RADEON_IS_IGP)
336                                 DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
337                                                  j,
338                                                  clock_info->sclk * 10);
339                         else
340                                 DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
341                                                  j,
342                                                  clock_info->sclk * 10,
343                                                  clock_info->mclk * 10,
344                                                  clock_info->voltage.voltage);
345                 }
346         }
347 }
348
349 #ifdef DUMBBELL_WIP
350 static ssize_t radeon_get_pm_profile(struct device *dev,
351                                      struct device_attribute *attr,
352                                      char *buf)
353 {
354         struct drm_device *ddev = dev_get_drvdata(dev);
355         struct radeon_device *rdev = ddev->dev_private;
356         int cp = rdev->pm.profile;
357
358         return ksnprintf(buf, PAGE_SIZE, "%s\n",
359                         (cp == PM_PROFILE_AUTO) ? "auto" :
360                         (cp == PM_PROFILE_LOW) ? "low" :
361                         (cp == PM_PROFILE_MID) ? "mid" :
362                         (cp == PM_PROFILE_HIGH) ? "high" : "default");
363 }
364
365 static ssize_t radeon_set_pm_profile(struct device *dev,
366                                      struct device_attribute *attr,
367                                      const char *buf,
368                                      size_t count)
369 {
370         struct drm_device *ddev = dev_get_drvdata(dev);
371         struct radeon_device *rdev = ddev->dev_private;
372
373         /* Can't set profile when the card is off */
374         if  ((rdev->flags & RADEON_IS_PX) &&
375              (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
376                 return -EINVAL;
377
378         mutex_lock(&rdev->pm.mutex);
379         if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
380                 if (strncmp("default", buf, strlen("default")) == 0)
381                         rdev->pm.profile = PM_PROFILE_DEFAULT;
382                 else if (strncmp("auto", buf, strlen("auto")) == 0)
383                         rdev->pm.profile = PM_PROFILE_AUTO;
384                 else if (strncmp("low", buf, strlen("low")) == 0)
385                         rdev->pm.profile = PM_PROFILE_LOW;
386                 else if (strncmp("mid", buf, strlen("mid")) == 0)
387                         rdev->pm.profile = PM_PROFILE_MID;
388                 else if (strncmp("high", buf, strlen("high")) == 0)
389                         rdev->pm.profile = PM_PROFILE_HIGH;
390                 else {
391                         count = -EINVAL;
392                         goto fail;
393                 }
394                 radeon_pm_update_profile(rdev);
395                 radeon_pm_set_clocks(rdev);
396         } else
397                 count = -EINVAL;
398
399 fail:
400         mutex_unlock(&rdev->pm.mutex);
401
402         return count;
403 }
404
405 static ssize_t radeon_get_pm_method(struct device *dev,
406                                     struct device_attribute *attr,
407                                     char *buf)
408 {
409         struct drm_device *ddev = dev_get_drvdata(dev);
410         struct radeon_device *rdev = ddev->dev_private;
411         int pm = rdev->pm.pm_method;
412
413         return ksnprintf(buf, PAGE_SIZE, "%s\n",
414                         (pm == PM_METHOD_DYNPM) ? "dynpm" :
415                         (pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
416 }
417
418 static ssize_t radeon_set_pm_method(struct device *dev,
419                                     struct device_attribute *attr,
420                                     const char *buf,
421                                     size_t count)
422 {
423         struct drm_device *ddev = dev_get_drvdata(dev);
424         struct radeon_device *rdev = ddev->dev_private;
425
426         /* Can't set method when the card is off */
427         if  ((rdev->flags & RADEON_IS_PX) &&
428              (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
429                 count = -EINVAL;
430                 goto fail;
431         }
432
433         /* we don't support the legacy modes with dpm */
434         if (rdev->pm.pm_method == PM_METHOD_DPM) {
435                 count = -EINVAL;
436                 goto fail;
437         }
438
439         if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
440                 mutex_lock(&rdev->pm.mutex);
441                 rdev->pm.pm_method = PM_METHOD_DYNPM;
442                 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
443                 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
444                 mutex_unlock(&rdev->pm.mutex);
445         } else if (strncmp("profile", buf, strlen("profile")) == 0) {
446                 mutex_lock(&rdev->pm.mutex);
447                 /* disable dynpm */
448                 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
449                 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
450                 rdev->pm.pm_method = PM_METHOD_PROFILE;
451                 mutex_unlock(&rdev->pm.mutex);
452 #ifdef DUMBBELL_WIP
453                 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
454 #endif /* DUMBBELL_WIP */
455         } else {
456                 count = -EINVAL;
457                 goto fail;
458         }
459         radeon_pm_compute_clocks(rdev);
460 fail:
461         return count;
462 }
463
464 static ssize_t radeon_get_dpm_state(struct device *dev,
465                                     struct device_attribute *attr,
466                                     char *buf)
467 {
468         struct drm_device *ddev = dev_get_drvdata(dev);
469         struct radeon_device *rdev = ddev->dev_private;
470         enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
471
472         return snprintf(buf, PAGE_SIZE, "%s\n",
473                         (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
474                         (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
475 }
476
477 static ssize_t radeon_set_dpm_state(struct device *dev,
478                                     struct device_attribute *attr,
479                                     const char *buf,
480                                     size_t count)
481 {
482         struct drm_device *ddev = dev_get_drvdata(dev);
483         struct radeon_device *rdev = ddev->dev_private;
484
485         mutex_lock(&rdev->pm.mutex);
486         if (strncmp("battery", buf, strlen("battery")) == 0)
487                 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
488         else if (strncmp("balanced", buf, strlen("balanced")) == 0)
489                 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
490         else if (strncmp("performance", buf, strlen("performance")) == 0)
491                 rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
492         else {
493                 mutex_unlock(&rdev->pm.mutex);
494                 count = -EINVAL;
495                 goto fail;
496         }
497         mutex_unlock(&rdev->pm.mutex);
498
499         /* Can't set dpm state when the card is off */
500         if (!(rdev->flags & RADEON_IS_PX) ||
501             (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
502                 radeon_pm_compute_clocks(rdev);
503
504 fail:
505         return count;
506 }
507
508 static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
509                                                        struct device_attribute *attr,
510                                                        char *buf)
511 {
512         struct drm_device *ddev = dev_get_drvdata(dev);
513         struct radeon_device *rdev = ddev->dev_private;
514         enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
515
516         if  ((rdev->flags & RADEON_IS_PX) &&
517              (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
518                 return ksnprintf(buf, PAGE_SIZE, "off\n");
519
520         return snprintf(buf, PAGE_SIZE, "%s\n",
521                         (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
522                         (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
523 }
524
525 static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
526                                                        struct device_attribute *attr,
527                                                        const char *buf,
528                                                        size_t count)
529 {
530         struct drm_device *ddev = dev_get_drvdata(dev);
531         struct radeon_device *rdev = ddev->dev_private;
532         enum radeon_dpm_forced_level level;
533         int ret = 0;
534
535         /* Can't force performance level when the card is off */
536         if  ((rdev->flags & RADEON_IS_PX) &&
537              (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
538                 return -EINVAL;
539
540         mutex_lock(&rdev->pm.mutex);
541         if (strncmp("low", buf, strlen("low")) == 0) {
542                 level = RADEON_DPM_FORCED_LEVEL_LOW;
543         } else if (strncmp("high", buf, strlen("high")) == 0) {
544                 level = RADEON_DPM_FORCED_LEVEL_HIGH;
545         } else if (strncmp("auto", buf, strlen("auto")) == 0) {
546                 level = RADEON_DPM_FORCED_LEVEL_AUTO;
547         } else {
548                 count = -EINVAL;
549                 goto fail;
550         }
551         if (rdev->asic->dpm.force_performance_level) {
552                 if (rdev->pm.dpm.thermal_active) {
553                         count = -EINVAL;
554                         goto fail;
555                 }
556                 ret = radeon_dpm_force_performance_level(rdev, level);
557                 if (ret)
558                         count = -EINVAL;
559         }
560 fail:
561         mutex_unlock(&rdev->pm.mutex);
562
563         return count;
564 }
565
566 static ssize_t radeon_hwmon_get_pwm1_enable(struct device *dev,
567                                             struct device_attribute *attr,
568                                             char *buf)
569 {
570         struct radeon_device *rdev = dev_get_drvdata(dev);
571         u32 pwm_mode = 0;
572
573         if (rdev->asic->dpm.fan_ctrl_get_mode)
574                 pwm_mode = rdev->asic->dpm.fan_ctrl_get_mode(rdev);
575
576         /* never 0 (full-speed), fuse or smc-controlled always */
577         return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
578 }
579
580 static ssize_t radeon_hwmon_set_pwm1_enable(struct device *dev,
581                                             struct device_attribute *attr,
582                                             const char *buf,
583                                             size_t count)
584 {
585         struct radeon_device *rdev = dev_get_drvdata(dev);
586         int err;
587         int value;
588
589         if(!rdev->asic->dpm.fan_ctrl_set_mode)
590                 return -EINVAL;
591
592         err = kstrtoint(buf, 10, &value);
593         if (err)
594                 return err;
595
596         switch (value) {
597         case 1: /* manual, percent-based */
598                 rdev->asic->dpm.fan_ctrl_set_mode(rdev, FDO_PWM_MODE_STATIC);
599                 break;
600         default: /* disable */
601                 rdev->asic->dpm.fan_ctrl_set_mode(rdev, 0);
602                 break;
603         }
604
605         return count;
606 }
607
608 static ssize_t radeon_hwmon_get_pwm1_min(struct device *dev,
609                                          struct device_attribute *attr,
610                                          char *buf)
611 {
612         return sprintf(buf, "%i\n", 0);
613 }
614
615 static ssize_t radeon_hwmon_get_pwm1_max(struct device *dev,
616                                          struct device_attribute *attr,
617                                          char *buf)
618 {
619         return sprintf(buf, "%i\n", 255);
620 }
621
622 static ssize_t radeon_hwmon_set_pwm1(struct device *dev,
623                                      struct device_attribute *attr,
624                                      const char *buf, size_t count)
625 {
626         struct radeon_device *rdev = dev_get_drvdata(dev);
627         int err;
628         u32 value;
629
630         err = kstrtou32(buf, 10, &value);
631         if (err)
632                 return err;
633
634         value = (value * 100) / 255;
635
636         err = rdev->asic->dpm.set_fan_speed_percent(rdev, value);
637         if (err)
638                 return err;
639
640         return count;
641 }
642
643 static ssize_t radeon_hwmon_get_pwm1(struct device *dev,
644                                      struct device_attribute *attr,
645                                      char *buf)
646 {
647         struct radeon_device *rdev = dev_get_drvdata(dev);
648         int err;
649         u32 speed;
650
651         err = rdev->asic->dpm.get_fan_speed_percent(rdev, &speed);
652         if (err)
653                 return err;
654
655         speed = (speed * 255) / 100;
656
657         return sprintf(buf, "%i\n", speed);
658 }
659
660 static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
661 static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
662 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state);
663 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
664                    radeon_get_dpm_forced_performance_level,
665                    radeon_set_dpm_forced_performance_level);
666
667 static ssize_t radeon_hwmon_show_temp(struct device *dev,
668                                       struct device_attribute *attr,
669                                       char *buf)
670 {
671         struct radeon_device *rdev = dev_get_drvdata(dev);
672         struct drm_device *ddev = rdev->ddev;
673         int temp;
674
675         /* Can't get temperature when the card is off */
676         if  ((rdev->flags & RADEON_IS_PX) &&
677              (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
678                 return -EINVAL;
679
680         if (rdev->asic->pm.get_temperature)
681                 temp = radeon_get_temperature(rdev);
682         else
683                 temp = 0;
684
685         return ksnprintf(buf, PAGE_SIZE, "%d\n", temp);
686 }
687
688 static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
689                                              struct device_attribute *attr,
690                                              char *buf)
691 {
692         struct radeon_device *rdev = dev_get_drvdata(dev);
693         int hyst = to_sensor_dev_attr(attr)->index;
694         int temp;
695
696         if (hyst)
697                 temp = rdev->pm.dpm.thermal.min_temp;
698         else
699                 temp = rdev->pm.dpm.thermal.max_temp;
700
701         return ksnprintf(buf, PAGE_SIZE, "%d\n", temp);
702 }
703
704 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
705 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
706 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
707 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1, radeon_hwmon_set_pwm1, 0);
708 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1_enable, radeon_hwmon_set_pwm1_enable, 0);
709 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, radeon_hwmon_get_pwm1_min, NULL, 0);
710 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, radeon_hwmon_get_pwm1_max, NULL, 0);
711
712
713 static struct attribute *hwmon_attributes[] = {
714         &sensor_dev_attr_temp1_input.dev_attr.attr,
715         &sensor_dev_attr_temp1_crit.dev_attr.attr,
716         &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
717         &sensor_dev_attr_pwm1.dev_attr.attr,
718         &sensor_dev_attr_pwm1_enable.dev_attr.attr,
719         &sensor_dev_attr_pwm1_min.dev_attr.attr,
720         &sensor_dev_attr_pwm1_max.dev_attr.attr,
721         NULL
722 };
723
724 static umode_t hwmon_attributes_visible(struct kobject *kobj,
725                                         struct attribute *attr, int index)
726 {
727         struct device *dev = kobj_to_dev(kobj);
728         struct radeon_device *rdev = dev_get_drvdata(dev);
729         umode_t effective_mode = attr->mode;
730
731         /* Skip attributes if DPM is not enabled */
732         if (rdev->pm.pm_method != PM_METHOD_DPM &&
733             (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
734              attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
735              attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
736              attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
737              attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
738              attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
739                 return 0;
740
741         /* Skip fan attributes if fan is not present */
742         if (rdev->pm.no_fan &&
743             (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
744              attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
745              attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
746              attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
747                 return 0;
748
749         /* mask fan attributes if we have no bindings for this asic to expose */
750         if ((!rdev->asic->dpm.get_fan_speed_percent &&
751              attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
752             (!rdev->asic->dpm.fan_ctrl_get_mode &&
753              attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
754                 effective_mode &= ~S_IRUGO;
755
756         if ((!rdev->asic->dpm.set_fan_speed_percent &&
757              attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
758             (!rdev->asic->dpm.fan_ctrl_set_mode &&
759              attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
760                 effective_mode &= ~S_IWUSR;
761
762         /* hide max/min values if we can't both query and manage the fan */
763         if ((!rdev->asic->dpm.set_fan_speed_percent &&
764              !rdev->asic->dpm.get_fan_speed_percent) &&
765             (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
766              attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
767                 return 0;
768
769         return effective_mode;
770 }
771
772 static const struct attribute_group hwmon_attrgroup = {
773         .attrs = hwmon_attributes,
774         .is_visible = hwmon_attributes_visible,
775 };
776
777 static const struct attribute_group *hwmon_groups[] = {
778         &hwmon_attrgroup,
779         NULL
780 };
781 #endif /* DUMBBELL_WIP */
782
783 static void
784 radeon_hwmon_refresh(void *arg)
785 {
786         struct radeon_device *rdev = (struct radeon_device *)arg;
787         struct drm_device *ddev = rdev->ddev;
788         struct ksensor *s = rdev->pm.int_sensor;
789         int temp;
790         enum sensor_status stat;
791
792         /* Can't get temperature when the card is off */
793         if  ((rdev->flags & RADEON_IS_PX) &&
794              (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
795                 sensor_set_unknown(s);
796                 s->status = SENSOR_S_OK;
797                 return;
798         }
799
800         if (rdev->asic->pm.get_temperature == NULL) {
801                 sensor_set_invalid(s);
802                 return;
803         }
804
805         temp = radeon_get_temperature(rdev);
806         if (temp >= rdev->pm.dpm.thermal.max_temp)
807                 stat = SENSOR_S_CRIT;
808         else if (temp >= rdev->pm.dpm.thermal.min_temp)
809                 stat = SENSOR_S_WARN;
810         else
811                 stat = SENSOR_S_OK;
812
813         sensor_set(s, temp * 1000 + 273150000, stat);
814 }
815
816 static int radeon_hwmon_init(struct radeon_device *rdev)
817 {
818         int err = 0;
819
820         rdev->pm.int_sensor = NULL;
821         rdev->pm.int_sensordev = NULL;
822
823         switch (rdev->pm.int_thermal_type) {
824         case THERMAL_TYPE_RV6XX:
825         case THERMAL_TYPE_RV770:
826         case THERMAL_TYPE_EVERGREEN:
827         case THERMAL_TYPE_NI:
828         case THERMAL_TYPE_SUMO:
829         case THERMAL_TYPE_SI:
830         case THERMAL_TYPE_CI:
831         case THERMAL_TYPE_KV:
832                 if (rdev->asic->pm.get_temperature == NULL)
833                         return err;
834
835                 rdev->pm.int_sensor = kmalloc(sizeof(*rdev->pm.int_sensor),
836                     M_DRM, M_ZERO | M_WAITOK);
837                 rdev->pm.int_sensordev = kmalloc(
838                     sizeof(*rdev->pm.int_sensordev), M_DRM,
839                     M_ZERO | M_WAITOK);
840                 strlcpy(rdev->pm.int_sensordev->xname,
841                     device_get_nameunit(rdev->dev->bsddev),
842                     sizeof(rdev->pm.int_sensordev->xname));
843                 rdev->pm.int_sensor->type = SENSOR_TEMP;
844                 rdev->pm.int_sensor->flags |= SENSOR_FINVALID;
845                 sensor_attach(rdev->pm.int_sensordev, rdev->pm.int_sensor);
846                 sensor_task_register(rdev, radeon_hwmon_refresh, 5);
847                 sensordev_install(rdev->pm.int_sensordev);
848                 break;
849         default:
850                 break;
851         }
852
853         return err;
854 }
855
856 static void radeon_hwmon_fini(struct radeon_device *rdev)
857 {
858         if (rdev->pm.int_sensor != NULL && rdev->pm.int_sensordev != NULL) {
859                 sensordev_deinstall(rdev->pm.int_sensordev);
860                 sensor_task_unregister(rdev);
861                 kfree(rdev->pm.int_sensor);
862                 kfree(rdev->pm.int_sensordev);
863                 rdev->pm.int_sensor = NULL;
864                 rdev->pm.int_sensordev = NULL;
865         }
866 }
867
868 static void radeon_dpm_thermal_work_handler(struct work_struct *work)
869 {
870         struct radeon_device *rdev =
871                 container_of(work, struct radeon_device,
872                              pm.dpm.thermal.work);
873         /* switch to the thermal state */
874         enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
875
876         if (!rdev->pm.dpm_enabled)
877                 return;
878
879         if (rdev->asic->pm.get_temperature) {
880                 int temp = radeon_get_temperature(rdev);
881
882                 if (temp < rdev->pm.dpm.thermal.min_temp)
883                         /* switch back the user state */
884                         dpm_state = rdev->pm.dpm.user_state;
885         } else {
886                 if (rdev->pm.dpm.thermal.high_to_low)
887                         /* switch back the user state */
888                         dpm_state = rdev->pm.dpm.user_state;
889         }
890         mutex_lock(&rdev->pm.mutex);
891         if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
892                 rdev->pm.dpm.thermal_active = true;
893         else
894                 rdev->pm.dpm.thermal_active = false;
895         rdev->pm.dpm.state = dpm_state;
896         mutex_unlock(&rdev->pm.mutex);
897
898         radeon_pm_compute_clocks(rdev);
899 }
900
901 static bool radeon_dpm_single_display(struct radeon_device *rdev)
902 {
903         bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
904                 true : false;
905
906         /* check if the vblank period is too short to adjust the mclk */
907         if (single_display && rdev->asic->dpm.vblank_too_short) {
908                 if (radeon_dpm_vblank_too_short(rdev))
909                         single_display = false;
910         }
911
912         return single_display;
913 }
914
915 static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
916                                                      enum radeon_pm_state_type dpm_state)
917 {
918         int i;
919         struct radeon_ps *ps;
920         u32 ui_class;
921         bool single_display = radeon_dpm_single_display(rdev);
922
923         /* 120hz tends to be problematic even if they are under the
924          * vblank limit.
925          */
926         if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
927                 single_display = false;
928
929         /* certain older asics have a separare 3D performance state,
930          * so try that first if the user selected performance
931          */
932         if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
933                 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
934         /* balanced states don't exist at the moment */
935         if (dpm_state == POWER_STATE_TYPE_BALANCED)
936                 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
937
938 restart_search:
939         /* Pick the best power state based on current conditions */
940         for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
941                 ps = &rdev->pm.dpm.ps[i];
942                 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
943                 switch (dpm_state) {
944                 /* user states */
945                 case POWER_STATE_TYPE_BATTERY:
946                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
947                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
948                                         if (single_display)
949                                                 return ps;
950                                 } else
951                                         return ps;
952                         }
953                         break;
954                 case POWER_STATE_TYPE_BALANCED:
955                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
956                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
957                                         if (single_display)
958                                                 return ps;
959                                 } else
960                                         return ps;
961                         }
962                         break;
963                 case POWER_STATE_TYPE_PERFORMANCE:
964                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
965                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
966                                         if (single_display)
967                                                 return ps;
968                                 } else
969                                         return ps;
970                         }
971                         break;
972                 /* internal states */
973                 case POWER_STATE_TYPE_INTERNAL_UVD:
974                         if (rdev->pm.dpm.uvd_ps)
975                                 return rdev->pm.dpm.uvd_ps;
976                         else
977                                 break;
978                 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
979                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
980                                 return ps;
981                         break;
982                 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
983                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
984                                 return ps;
985                         break;
986                 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
987                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
988                                 return ps;
989                         break;
990                 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
991                         if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
992                                 return ps;
993                         break;
994                 case POWER_STATE_TYPE_INTERNAL_BOOT:
995                         return rdev->pm.dpm.boot_ps;
996                 case POWER_STATE_TYPE_INTERNAL_THERMAL:
997                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
998                                 return ps;
999                         break;
1000                 case POWER_STATE_TYPE_INTERNAL_ACPI:
1001                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
1002                                 return ps;
1003                         break;
1004                 case POWER_STATE_TYPE_INTERNAL_ULV:
1005                         if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
1006                                 return ps;
1007                         break;
1008                 case POWER_STATE_TYPE_INTERNAL_3DPERF:
1009                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
1010                                 return ps;
1011                         break;
1012                 default:
1013                         break;
1014                 }
1015         }
1016         /* use a fallback state if we didn't match */
1017         switch (dpm_state) {
1018         case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1019                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1020                 goto restart_search;
1021         case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1022         case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1023         case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1024                 if (rdev->pm.dpm.uvd_ps) {
1025                         return rdev->pm.dpm.uvd_ps;
1026                 } else {
1027                         dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1028                         goto restart_search;
1029                 }
1030         case POWER_STATE_TYPE_INTERNAL_THERMAL:
1031                 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
1032                 goto restart_search;
1033         case POWER_STATE_TYPE_INTERNAL_ACPI:
1034                 dpm_state = POWER_STATE_TYPE_BATTERY;
1035                 goto restart_search;
1036         case POWER_STATE_TYPE_BATTERY:
1037         case POWER_STATE_TYPE_BALANCED:
1038         case POWER_STATE_TYPE_INTERNAL_3DPERF:
1039                 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1040                 goto restart_search;
1041         default:
1042                 break;
1043         }
1044
1045         return NULL;
1046 }
1047
1048 static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
1049 {
1050         int i;
1051         struct radeon_ps *ps;
1052         enum radeon_pm_state_type dpm_state;
1053         int ret;
1054         bool single_display = radeon_dpm_single_display(rdev);
1055
1056         /* if dpm init failed */
1057         if (!rdev->pm.dpm_enabled)
1058                 return;
1059
1060         if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
1061                 /* add other state override checks here */
1062                 if ((!rdev->pm.dpm.thermal_active) &&
1063                     (!rdev->pm.dpm.uvd_active))
1064                         rdev->pm.dpm.state = rdev->pm.dpm.user_state;
1065         }
1066         dpm_state = rdev->pm.dpm.state;
1067
1068         ps = radeon_dpm_pick_power_state(rdev, dpm_state);
1069         if (ps)
1070                 rdev->pm.dpm.requested_ps = ps;
1071         else
1072                 return;
1073
1074         /* no need to reprogram if nothing changed unless we are on BTC+ */
1075         if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
1076                 /* vce just modifies an existing state so force a change */
1077                 if (ps->vce_active != rdev->pm.dpm.vce_active)
1078                         goto force;
1079                 /* user has made a display change (such as timing) */
1080                 if (rdev->pm.dpm.single_display != single_display)
1081                         goto force;
1082                 if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
1083                         /* for pre-BTC and APUs if the num crtcs changed but state is the same,
1084                          * all we need to do is update the display configuration.
1085                          */
1086                         if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
1087                                 /* update display watermarks based on new power state */
1088                                 radeon_bandwidth_update(rdev);
1089                                 /* update displays */
1090                                 radeon_dpm_display_configuration_changed(rdev);
1091                                 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
1092                                 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
1093                         }
1094                         return;
1095                 } else {
1096                         /* for BTC+ if the num crtcs hasn't changed and state is the same,
1097                          * nothing to do, if the num crtcs is > 1 and state is the same,
1098                          * update display configuration.
1099                          */
1100                         if (rdev->pm.dpm.new_active_crtcs ==
1101                             rdev->pm.dpm.current_active_crtcs) {
1102                                 return;
1103                         } else {
1104                                 if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
1105                                     (rdev->pm.dpm.new_active_crtc_count > 1)) {
1106                                         /* update display watermarks based on new power state */
1107                                         radeon_bandwidth_update(rdev);
1108                                         /* update displays */
1109                                         radeon_dpm_display_configuration_changed(rdev);
1110                                         rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
1111                                         rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
1112                                         return;
1113                                 }
1114                         }
1115                 }
1116         }
1117
1118 force:
1119         if (radeon_dpm == 1) {
1120                 printk("switching from power state:\n");
1121                 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
1122                 printk("switching to power state:\n");
1123                 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
1124         }
1125
1126         down_write(&rdev->pm.mclk_lock);
1127         mutex_lock(&rdev->ring_lock);
1128
1129         /* update whether vce is active */
1130         ps->vce_active = rdev->pm.dpm.vce_active;
1131
1132         ret = radeon_dpm_pre_set_power_state(rdev);
1133         if (ret)
1134                 goto done;
1135
1136         /* update display watermarks based on new power state */
1137         radeon_bandwidth_update(rdev);
1138
1139         /* wait for the rings to drain */
1140         for (i = 0; i < RADEON_NUM_RINGS; i++) {
1141                 struct radeon_ring *ring = &rdev->ring[i];
1142                 if (ring->ready)
1143                         radeon_fence_wait_empty(rdev, i);
1144         }
1145
1146         /* program the new power state */
1147         radeon_dpm_set_power_state(rdev);
1148
1149         /* update current power state */
1150         rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
1151
1152         radeon_dpm_post_set_power_state(rdev);
1153
1154         rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
1155         rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
1156         rdev->pm.dpm.single_display = single_display;
1157
1158         /* update displays */
1159         radeon_dpm_display_configuration_changed(rdev);
1160
1161         if (rdev->asic->dpm.force_performance_level) {
1162                 if (rdev->pm.dpm.thermal_active) {
1163                         enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
1164                         /* force low perf level for thermal */
1165                         radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
1166                         /* save the user's level */
1167                         rdev->pm.dpm.forced_level = level;
1168                 } else {
1169                         /* otherwise, user selected level */
1170                         radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
1171                 }
1172         }
1173
1174 done:
1175         mutex_unlock(&rdev->ring_lock);
1176         up_write(&rdev->pm.mclk_lock);
1177 }
1178
1179 void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
1180 {
1181         enum radeon_pm_state_type dpm_state;
1182
1183         if (rdev->asic->dpm.powergate_uvd) {
1184                 mutex_lock(&rdev->pm.mutex);
1185                 /* don't powergate anything if we
1186                    have active but pause streams */
1187                 enable |= rdev->pm.dpm.sd > 0;
1188                 enable |= rdev->pm.dpm.hd > 0;
1189                 /* enable/disable UVD */
1190                 radeon_dpm_powergate_uvd(rdev, !enable);
1191                 mutex_unlock(&rdev->pm.mutex);
1192         } else {
1193                 if (enable) {
1194                         mutex_lock(&rdev->pm.mutex);
1195                         rdev->pm.dpm.uvd_active = true;
1196                         /* disable this for now */
1197 #if 0
1198                         if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
1199                                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
1200                         else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
1201                                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1202                         else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
1203                                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1204                         else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
1205                                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
1206                         else
1207 #endif
1208                                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
1209                         rdev->pm.dpm.state = dpm_state;
1210                         mutex_unlock(&rdev->pm.mutex);
1211                 } else {
1212                         mutex_lock(&rdev->pm.mutex);
1213                         rdev->pm.dpm.uvd_active = false;
1214                         mutex_unlock(&rdev->pm.mutex);
1215                 }
1216
1217                 radeon_pm_compute_clocks(rdev);
1218         }
1219 }
1220
1221 void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
1222 {
1223         if (enable) {
1224                 mutex_lock(&rdev->pm.mutex);
1225                 rdev->pm.dpm.vce_active = true;
1226                 /* XXX select vce level based on ring/task */
1227                 rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
1228                 mutex_unlock(&rdev->pm.mutex);
1229         } else {
1230                 mutex_lock(&rdev->pm.mutex);
1231                 rdev->pm.dpm.vce_active = false;
1232                 mutex_unlock(&rdev->pm.mutex);
1233         }
1234
1235         radeon_pm_compute_clocks(rdev);
1236 }
1237
1238 static void radeon_pm_suspend_old(struct radeon_device *rdev)
1239 {
1240         mutex_lock(&rdev->pm.mutex);
1241         if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1242                 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
1243                         rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
1244         }
1245         mutex_unlock(&rdev->pm.mutex);
1246
1247 #ifdef DUMBBELL_WIP
1248         cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1249 #endif /* DUMBBELL_WIP */
1250 }
1251
1252 static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
1253 {
1254         mutex_lock(&rdev->pm.mutex);
1255         /* disable dpm */
1256         radeon_dpm_disable(rdev);
1257         /* reset the power state */
1258         rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1259         rdev->pm.dpm_enabled = false;
1260         mutex_unlock(&rdev->pm.mutex);
1261 }
1262
1263 void radeon_pm_suspend(struct radeon_device *rdev)
1264 {
1265         if (rdev->pm.pm_method == PM_METHOD_DPM)
1266                 radeon_pm_suspend_dpm(rdev);
1267         else
1268                 radeon_pm_suspend_old(rdev);
1269 }
1270
1271 static void radeon_pm_resume_old(struct radeon_device *rdev)
1272 {
1273         /* set up the default clocks if the MC ucode is loaded */
1274         if ((rdev->family >= CHIP_BARTS) &&
1275             (rdev->family <= CHIP_CAYMAN) &&
1276             rdev->mc_fw) {
1277                 if (rdev->pm.default_vddc)
1278                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1279                                                 SET_VOLTAGE_TYPE_ASIC_VDDC);
1280                 if (rdev->pm.default_vddci)
1281                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1282                                                 SET_VOLTAGE_TYPE_ASIC_VDDCI);
1283                 if (rdev->pm.default_sclk)
1284                         radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1285                 if (rdev->pm.default_mclk)
1286                         radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1287         }
1288         /* asic init will reset the default power state */
1289         mutex_lock(&rdev->pm.mutex);
1290         rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
1291         rdev->pm.current_clock_mode_index = 0;
1292         rdev->pm.current_sclk = rdev->pm.default_sclk;
1293         rdev->pm.current_mclk = rdev->pm.default_mclk;
1294         if (rdev->pm.power_state) {
1295                 rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
1296                 rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
1297         }
1298         if (rdev->pm.pm_method == PM_METHOD_DYNPM
1299             && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
1300                 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1301 #ifdef DUMBBELL_WIP
1302                 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1303                                       msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1304 #endif /* DUMBBELL_WIP */
1305         }
1306         mutex_unlock(&rdev->pm.mutex);
1307         radeon_pm_compute_clocks(rdev);
1308 }
1309
1310 static void radeon_pm_resume_dpm(struct radeon_device *rdev)
1311 {
1312         int ret;
1313
1314         /* asic init will reset to the boot state */
1315         mutex_lock(&rdev->pm.mutex);
1316         rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1317         radeon_dpm_setup_asic(rdev);
1318         ret = radeon_dpm_enable(rdev);
1319         mutex_unlock(&rdev->pm.mutex);
1320         if (ret)
1321                 goto dpm_resume_fail;
1322         rdev->pm.dpm_enabled = true;
1323         return;
1324
1325 dpm_resume_fail:
1326         DRM_ERROR("radeon: dpm resume failed\n");
1327         if ((rdev->family >= CHIP_BARTS) &&
1328             (rdev->family <= CHIP_CAYMAN) &&
1329             rdev->mc_fw) {
1330                 if (rdev->pm.default_vddc)
1331                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1332                                                 SET_VOLTAGE_TYPE_ASIC_VDDC);
1333                 if (rdev->pm.default_vddci)
1334                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1335                                                 SET_VOLTAGE_TYPE_ASIC_VDDCI);
1336                 if (rdev->pm.default_sclk)
1337                         radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1338                 if (rdev->pm.default_mclk)
1339                         radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1340         }
1341 }
1342
1343 void radeon_pm_resume(struct radeon_device *rdev)
1344 {
1345         if (rdev->pm.pm_method == PM_METHOD_DPM)
1346                 radeon_pm_resume_dpm(rdev);
1347         else
1348                 radeon_pm_resume_old(rdev);
1349 }
1350
1351 static int radeon_pm_init_old(struct radeon_device *rdev)
1352 {
1353         int ret;
1354
1355         rdev->pm.profile = PM_PROFILE_DEFAULT;
1356         rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1357         rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1358         rdev->pm.dynpm_can_upclock = true;
1359         rdev->pm.dynpm_can_downclock = true;
1360         rdev->pm.default_sclk = rdev->clock.default_sclk;
1361         rdev->pm.default_mclk = rdev->clock.default_mclk;
1362         rdev->pm.current_sclk = rdev->clock.default_sclk;
1363         rdev->pm.current_mclk = rdev->clock.default_mclk;
1364         rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1365
1366         if (rdev->bios) {
1367                 if (rdev->is_atom_bios)
1368                         radeon_atombios_get_power_modes(rdev);
1369                 else
1370                         radeon_combios_get_power_modes(rdev);
1371                 radeon_pm_print_states(rdev);
1372                 radeon_pm_init_profile(rdev);
1373                 /* set up the default clocks if the MC ucode is loaded */
1374                 if ((rdev->family >= CHIP_BARTS) &&
1375                     (rdev->family <= CHIP_CAYMAN) &&
1376                     rdev->mc_fw) {
1377                         if (rdev->pm.default_vddc)
1378                                 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1379                                                         SET_VOLTAGE_TYPE_ASIC_VDDC);
1380                         if (rdev->pm.default_vddci)
1381                                 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1382                                                         SET_VOLTAGE_TYPE_ASIC_VDDCI);
1383                         if (rdev->pm.default_sclk)
1384                                 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1385                         if (rdev->pm.default_mclk)
1386                                 radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1387                 }
1388         }
1389
1390         /* set up the internal thermal sensor if applicable */
1391         ret = radeon_hwmon_init(rdev);
1392         if (ret)
1393                 return ret;
1394
1395         INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
1396
1397         if (rdev->pm.num_power_states > 1) {
1398                 if (radeon_debugfs_pm_init(rdev)) {
1399                         DRM_ERROR("Failed to register debugfs file for PM!\n");
1400                 }
1401
1402                 DRM_INFO("radeon: power management initialized\n");
1403         }
1404
1405         return 0;
1406 }
1407
1408 static void radeon_dpm_print_power_states(struct radeon_device *rdev)
1409 {
1410         int i;
1411
1412         for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
1413                 printk("== power state %d ==\n", i);
1414                 radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
1415         }
1416 }
1417
1418 static int radeon_pm_init_dpm(struct radeon_device *rdev)
1419 {
1420         int ret;
1421
1422         /* default to balanced state */
1423         rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
1424         rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
1425         rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
1426         rdev->pm.default_sclk = rdev->clock.default_sclk;
1427         rdev->pm.default_mclk = rdev->clock.default_mclk;
1428         rdev->pm.current_sclk = rdev->clock.default_sclk;
1429         rdev->pm.current_mclk = rdev->clock.default_mclk;
1430         rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
1431
1432         if (rdev->bios && rdev->is_atom_bios)
1433                 radeon_atombios_get_power_modes(rdev);
1434         else
1435                 return -EINVAL;
1436
1437         /* set up the internal thermal sensor if applicable */
1438         ret = radeon_hwmon_init(rdev);
1439         if (ret)
1440                 return ret;
1441
1442         INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
1443         mutex_lock(&rdev->pm.mutex);
1444         radeon_dpm_init(rdev);
1445         rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
1446         if (radeon_dpm == 1)
1447                 radeon_dpm_print_power_states(rdev);
1448         radeon_dpm_setup_asic(rdev);
1449         ret = radeon_dpm_enable(rdev);
1450         mutex_unlock(&rdev->pm.mutex);
1451         if (ret)
1452                 goto dpm_failed;
1453         rdev->pm.dpm_enabled = true;
1454
1455 #ifdef TODO_DEVICE_FILE
1456         if (radeon_debugfs_pm_init(rdev)) {
1457                 DRM_ERROR("Failed to register debugfs file for dpm!\n");
1458         }
1459 #endif
1460
1461         DRM_INFO("radeon: dpm initialized\n");
1462
1463         return 0;
1464
1465 dpm_failed:
1466         rdev->pm.dpm_enabled = false;
1467         if ((rdev->family >= CHIP_BARTS) &&
1468             (rdev->family <= CHIP_CAYMAN) &&
1469             rdev->mc_fw) {
1470                 if (rdev->pm.default_vddc)
1471                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
1472                                                 SET_VOLTAGE_TYPE_ASIC_VDDC);
1473                 if (rdev->pm.default_vddci)
1474                         radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
1475                                                 SET_VOLTAGE_TYPE_ASIC_VDDCI);
1476                 if (rdev->pm.default_sclk)
1477                         radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
1478                 if (rdev->pm.default_mclk)
1479                         radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
1480         }
1481         DRM_ERROR("radeon: dpm initialization failed\n");
1482         return ret;
1483 }
1484
1485 struct radeon_dpm_quirk {
1486         u32 chip_vendor;
1487         u32 chip_device;
1488         u32 subsys_vendor;
1489         u32 subsys_device;
1490 };
1491
1492 /* cards with dpm stability problems */
1493 static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = {
1494         /* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */
1495         { PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 },
1496         /* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */
1497         { PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 },
1498         { 0, 0, 0, 0 },
1499 };
1500
1501 int radeon_pm_init(struct radeon_device *rdev)
1502 {
1503         struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
1504         bool disable_dpm = false;
1505
1506         /* Apply dpm quirks */
1507         while (p && p->chip_device != 0) {
1508                 if (rdev->pdev->vendor == p->chip_vendor &&
1509                     rdev->pdev->device == p->chip_device &&
1510                     rdev->pdev->subsystem_vendor == p->subsys_vendor &&
1511                     rdev->pdev->subsystem_device == p->subsys_device) {
1512                         disable_dpm = true;
1513                         break;
1514                 }
1515                 ++p;
1516         }
1517
1518         /* enable dpm on rv6xx+ */
1519         switch (rdev->family) {
1520         case CHIP_RV610:
1521         case CHIP_RV630:
1522         case CHIP_RV620:
1523         case CHIP_RV635:
1524         case CHIP_RV670:
1525         case CHIP_RS780:
1526         case CHIP_RS880:
1527         case CHIP_RV770:
1528                 /* DPM requires the RLC, RV770+ dGPU requires SMC */
1529                 if (!rdev->rlc_fw)
1530                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1531                 else if ((rdev->family >= CHIP_RV770) &&
1532                          (!(rdev->flags & RADEON_IS_IGP)) &&
1533                          (!rdev->smc_fw))
1534                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1535                 else if (radeon_dpm == 1)
1536                         rdev->pm.pm_method = PM_METHOD_DPM;
1537                 else
1538                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1539                 break;
1540         case CHIP_RV730:
1541         case CHIP_RV710:
1542         case CHIP_RV740:
1543         case CHIP_CEDAR:
1544         case CHIP_REDWOOD:
1545         case CHIP_JUNIPER:
1546         case CHIP_CYPRESS:
1547         case CHIP_HEMLOCK:
1548         case CHIP_PALM:
1549         case CHIP_SUMO:
1550         case CHIP_SUMO2:
1551         case CHIP_BARTS:
1552         case CHIP_TURKS:
1553         case CHIP_CAICOS:
1554         case CHIP_CAYMAN:
1555         case CHIP_ARUBA:
1556         case CHIP_TAHITI:
1557         case CHIP_PITCAIRN:
1558         case CHIP_VERDE:
1559         case CHIP_OLAND:
1560         case CHIP_HAINAN:
1561         case CHIP_BONAIRE:
1562         case CHIP_KABINI:
1563         case CHIP_KAVERI:
1564         case CHIP_HAWAII:
1565         case CHIP_MULLINS:
1566                 /* DPM requires the RLC, RV770+ dGPU requires SMC */
1567                 if (!rdev->rlc_fw)
1568                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1569                 else if ((rdev->family >= CHIP_RV770) &&
1570                          (!(rdev->flags & RADEON_IS_IGP)) &&
1571                          (!rdev->smc_fw))
1572                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1573                 else if (disable_dpm && (radeon_dpm == -1))
1574                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1575                 else if (radeon_dpm == 0)
1576                         rdev->pm.pm_method = PM_METHOD_PROFILE;
1577                 else
1578                         rdev->pm.pm_method = PM_METHOD_DPM;
1579                 break;
1580         default:
1581                 /* default to profile method */
1582                 rdev->pm.pm_method = PM_METHOD_PROFILE;
1583                 break;
1584         }
1585
1586         if (rdev->pm.pm_method == PM_METHOD_DPM)
1587                 return radeon_pm_init_dpm(rdev);
1588         else
1589                 return radeon_pm_init_old(rdev);
1590 }
1591
1592 int radeon_pm_late_init(struct radeon_device *rdev)
1593 {
1594         int ret = 0;
1595
1596         if (rdev->pm.pm_method == PM_METHOD_DPM) {
1597                 if (rdev->pm.dpm_enabled) {
1598                         if (!rdev->pm.sysfs_initialized) {
1599 #if 0
1600                                 ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
1601                                 if (ret)
1602                                         DRM_ERROR("failed to create device file for dpm state\n");
1603                                 ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1604                                 if (ret)
1605                                         DRM_ERROR("failed to create device file for dpm state\n");
1606                                 /* XXX: these are noops for dpm but are here for backwards compat */
1607                                 ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1608                                 if (ret)
1609                                         DRM_ERROR("failed to create device file for power profile\n");
1610                                 ret = device_create_file(rdev->dev, &dev_attr_power_method);
1611                                 if (ret)
1612                                         DRM_ERROR("failed to create device file for power method\n");
1613 #endif
1614                                 rdev->pm.sysfs_initialized = true;
1615                         }
1616
1617                         mutex_lock(&rdev->pm.mutex);
1618                         ret = radeon_dpm_late_enable(rdev);
1619                         mutex_unlock(&rdev->pm.mutex);
1620                         if (ret) {
1621                                 rdev->pm.dpm_enabled = false;
1622                                 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1623                         } else {
1624                                 /* set the dpm state for PX since there won't be
1625                                  * a modeset to call this.
1626                                  */
1627                                 radeon_pm_compute_clocks(rdev);
1628                         }
1629                 }
1630         } else {
1631                 if ((rdev->pm.num_power_states > 1) &&
1632                     (!rdev->pm.sysfs_initialized)) {
1633                         /* where's the best place to put these? */
1634 #if 0
1635                         ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1636                         if (ret)
1637                                 DRM_ERROR("failed to create device file for power profile\n");
1638                         ret = device_create_file(rdev->dev, &dev_attr_power_method);
1639                         if (ret)
1640                                 DRM_ERROR("failed to create device file for power method\n");
1641                         if (!ret)
1642                                 rdev->pm.sysfs_initialized = true;
1643 #endif
1644                 }
1645         }
1646         return ret;
1647 }
1648
1649 static void radeon_pm_fini_old(struct radeon_device *rdev)
1650 {
1651         if (rdev->pm.num_power_states > 1) {
1652                 mutex_lock(&rdev->pm.mutex);
1653                 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1654                         rdev->pm.profile = PM_PROFILE_DEFAULT;
1655                         radeon_pm_update_profile(rdev);
1656                         radeon_pm_set_clocks(rdev);
1657                 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1658                         /* reset default clocks */
1659                         rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
1660                         rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1661                         radeon_pm_set_clocks(rdev);
1662                 }
1663                 mutex_unlock(&rdev->pm.mutex);
1664
1665 #ifdef DUMBBELL_WIP
1666                 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
1667
1668                 device_remove_file(rdev->dev, &dev_attr_power_profile);
1669                 device_remove_file(rdev->dev, &dev_attr_power_method);
1670 #endif /* DUMBBELL_WIP */
1671         }
1672
1673         if (rdev->pm.power_state) {
1674                 int i;
1675                 for (i = 0; i < rdev->pm.num_power_states; ++i) {
1676                         kfree(rdev->pm.power_state[i].clock_info);
1677                 }
1678                 kfree(rdev->pm.power_state);
1679                 rdev->pm.power_state = NULL;
1680                 rdev->pm.num_power_states = 0;
1681         }
1682
1683         radeon_hwmon_fini(rdev);
1684 }
1685
1686 static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1687 {
1688         if (rdev->pm.num_power_states > 1) {
1689                 mutex_lock(&rdev->pm.mutex);
1690                 radeon_dpm_disable(rdev);
1691                 mutex_unlock(&rdev->pm.mutex);
1692
1693 #ifdef TODO_DEVICE_FILE
1694                 device_remove_file(rdev->dev, &dev_attr_power_dpm_state);
1695                 device_remove_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1696                 /* XXX backwards compat */
1697                 device_remove_file(rdev->dev, &dev_attr_power_profile);
1698                 device_remove_file(rdev->dev, &dev_attr_power_method);
1699 #endif
1700         }
1701         radeon_dpm_fini(rdev);
1702
1703         /* prevents leaking 440 bytes on OLAND */
1704         if (rdev->pm.power_state) {
1705                 int i;
1706                 for (i = 0; i < rdev->pm.num_power_states; ++i) {
1707                         kfree(rdev->pm.power_state[i].clock_info);
1708                 }
1709                 kfree(rdev->pm.power_state);
1710                 rdev->pm.power_state = NULL;
1711                 rdev->pm.num_power_states = 0;
1712         }
1713
1714         radeon_hwmon_fini(rdev);
1715 }
1716
1717 void radeon_pm_fini(struct radeon_device *rdev)
1718 {
1719         if (rdev->pm.pm_method == PM_METHOD_DPM)
1720                 radeon_pm_fini_dpm(rdev);
1721         else
1722                 radeon_pm_fini_old(rdev);
1723 }
1724
1725 static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
1726 {
1727         struct drm_device *ddev = rdev->ddev;
1728         struct drm_crtc *crtc;
1729         struct radeon_crtc *radeon_crtc;
1730
1731         if (rdev->pm.num_power_states < 2)
1732                 return;
1733
1734         mutex_lock(&rdev->pm.mutex);
1735
1736         rdev->pm.active_crtcs = 0;
1737         rdev->pm.active_crtc_count = 0;
1738         if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1739                 list_for_each_entry(crtc,
1740                                     &ddev->mode_config.crtc_list, head) {
1741                         radeon_crtc = to_radeon_crtc(crtc);
1742                         if (radeon_crtc->enabled) {
1743                                 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
1744                                 rdev->pm.active_crtc_count++;
1745                         }
1746                 }
1747         }
1748
1749         if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1750                 radeon_pm_update_profile(rdev);
1751                 radeon_pm_set_clocks(rdev);
1752         } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
1753                 if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
1754                         if (rdev->pm.active_crtc_count > 1) {
1755                                 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1756 #ifdef DUMBBELL_WIP
1757                                         cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1758 #endif /* DUMBBELL_WIP */
1759
1760                                         rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
1761                                         rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
1762                                         radeon_pm_get_dynpm_state(rdev);
1763                                         radeon_pm_set_clocks(rdev);
1764
1765                                         DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
1766                                 }
1767                         } else if (rdev->pm.active_crtc_count == 1) {
1768                                 /* TODO: Increase clocks if needed for current mode */
1769
1770                                 if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
1771                                         rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1772                                         rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
1773                                         radeon_pm_get_dynpm_state(rdev);
1774                                         radeon_pm_set_clocks(rdev);
1775
1776 #ifdef DUMBBELL_WIP
1777                                         schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1778                                                               msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1779 #endif /* DUMBBELL_WIP */
1780                                 } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
1781                                         rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1782 #ifdef DUMBBELL_WIP
1783                                         schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1784                                                               msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1785 #endif /* DUMBBELL_WIP */
1786                                         DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
1787                                 }
1788                         } else { /* count == 0 */
1789                                 if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
1790 #ifdef DUMBBELL_WIP
1791                                         cancel_delayed_work(&rdev->pm.dynpm_idle_work);
1792 #endif /* DUMBBELL_WIP */
1793
1794                                         rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
1795                                         rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
1796                                         radeon_pm_get_dynpm_state(rdev);
1797                                         radeon_pm_set_clocks(rdev);
1798                                 }
1799                         }
1800                 }
1801         }
1802
1803         mutex_unlock(&rdev->pm.mutex);
1804 }
1805
1806 static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
1807 {
1808         struct drm_device *ddev = rdev->ddev;
1809         struct drm_crtc *crtc;
1810         struct radeon_crtc *radeon_crtc;
1811
1812         if (!rdev->pm.dpm_enabled)
1813                 return;
1814
1815         mutex_lock(&rdev->pm.mutex);
1816
1817         /* update active crtc counts */
1818         rdev->pm.dpm.new_active_crtcs = 0;
1819         rdev->pm.dpm.new_active_crtc_count = 0;
1820         if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1821                 list_for_each_entry(crtc,
1822                                     &ddev->mode_config.crtc_list, head) {
1823                         radeon_crtc = to_radeon_crtc(crtc);
1824                         if (crtc->enabled) {
1825                                 rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
1826                                 rdev->pm.dpm.new_active_crtc_count++;
1827                         }
1828                 }
1829         }
1830
1831         /* update battery/ac status */
1832         if (power_profile_get_state() == POWER_PROFILE_PERFORMANCE)
1833                 rdev->pm.dpm.ac_power = true;
1834         else
1835                 rdev->pm.dpm.ac_power = false;
1836
1837         radeon_dpm_change_power_state_locked(rdev);
1838
1839         mutex_unlock(&rdev->pm.mutex);
1840
1841 }
1842
1843 void radeon_pm_compute_clocks(struct radeon_device *rdev)
1844 {
1845         if (rdev->pm.pm_method == PM_METHOD_DPM)
1846                 radeon_pm_compute_clocks_dpm(rdev);
1847         else
1848                 radeon_pm_compute_clocks_old(rdev);
1849 }
1850
1851 static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1852 {
1853         int  crtc, vpos, hpos, vbl_status;
1854         bool in_vbl = true;
1855
1856         /* Iterate over all active crtc's. All crtc's must be in vblank,
1857          * otherwise return in_vbl == false.
1858          */
1859         for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
1860                 if (rdev->pm.active_crtcs & (1 << crtc)) {
1861                         vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev,
1862                                                                 crtc,
1863                                                                 USE_REAL_VBLANKSTART,
1864                                                                 &vpos, &hpos, NULL, NULL,
1865                                                                 &rdev->mode_info.crtcs[crtc]->base.hwmode);
1866                         if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
1867                             !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK))
1868                                 in_vbl = false;
1869                 }
1870         }
1871
1872         return in_vbl;
1873 }
1874
1875 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
1876 {
1877         u32 stat_crtc = 0;
1878         bool in_vbl = radeon_pm_in_vbl(rdev);
1879
1880         if (in_vbl == false)
1881                 DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
1882                          finish ? "exit" : "entry");
1883         return in_vbl;
1884 }
1885
1886 static void radeon_dynpm_idle_work_handler(struct work_struct *work)
1887 {
1888         struct radeon_device *rdev;
1889         int resched;
1890         rdev = container_of(work, struct radeon_device,
1891                                 pm.dynpm_idle_work.work);
1892
1893         resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1894         mutex_lock(&rdev->pm.mutex);
1895         if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
1896                 int not_processed = 0;
1897                 int i;
1898
1899                 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1900                         struct radeon_ring *ring = &rdev->ring[i];
1901
1902                         if (ring->ready) {
1903                                 not_processed += radeon_fence_count_emitted(rdev, i);
1904                                 if (not_processed >= 3)
1905                                         break;
1906                         }
1907                 }
1908
1909                 if (not_processed >= 3) { /* should upclock */
1910                         if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
1911                                 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1912                         } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
1913                                    rdev->pm.dynpm_can_upclock) {
1914                                 rdev->pm.dynpm_planned_action =
1915                                         DYNPM_ACTION_UPCLOCK;
1916                                 rdev->pm.dynpm_action_timeout = jiffies +
1917                                 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
1918                         }
1919                 } else if (not_processed == 0) { /* should downclock */
1920                         if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
1921                                 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1922                         } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
1923                                    rdev->pm.dynpm_can_downclock) {
1924                                 rdev->pm.dynpm_planned_action =
1925                                         DYNPM_ACTION_DOWNCLOCK;
1926                                 rdev->pm.dynpm_action_timeout = jiffies +
1927                                 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
1928                         }
1929                 }
1930
1931                 /* Note, radeon_pm_set_clocks is called with static_switch set
1932                  * to false since we want to wait for vbl to avoid flicker.
1933                  */
1934                 if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
1935                     jiffies > rdev->pm.dynpm_action_timeout) {
1936                         radeon_pm_get_dynpm_state(rdev);
1937                         radeon_pm_set_clocks(rdev);
1938                 }
1939
1940                 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
1941                                       msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
1942         }
1943         mutex_unlock(&rdev->pm.mutex);
1944         ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1945 }
1946
1947 /*
1948  * Debugfs info
1949  */
1950 #if defined(CONFIG_DEBUG_FS)
1951
1952 static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
1953 {
1954         struct drm_info_node *node = (struct drm_info_node *) m->private;
1955         struct drm_device *dev = node->minor->dev;
1956         struct radeon_device *rdev = dev->dev_private;
1957         struct drm_device *ddev = rdev->ddev;
1958
1959         if  ((rdev->flags & RADEON_IS_PX) &&
1960              (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1961                 seq_printf(m, "PX asic powered off\n");
1962         } else if (rdev->pm.dpm_enabled) {
1963                 mutex_lock(&rdev->pm.mutex);
1964                 if (rdev->asic->dpm.debugfs_print_current_performance_level)
1965                         radeon_dpm_debugfs_print_current_performance_level(rdev, m);
1966                 else
1967                         seq_printf(m, "Debugfs support not implemented for this asic\n");
1968                 mutex_unlock(&rdev->pm.mutex);
1969         } else {
1970                 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
1971                 /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
1972                 if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
1973                         seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
1974                 else
1975                         seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
1976                 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
1977                 if (rdev->asic->pm.get_memory_clock)
1978                         seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
1979                 if (rdev->pm.current_vddc)
1980                         seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
1981                 if (rdev->asic->pm.get_pcie_lanes)
1982                         seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
1983         }
1984
1985         return 0;
1986 }
1987
1988 static struct drm_info_list radeon_pm_info_list[] = {
1989         {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
1990 };
1991 #endif
1992
1993 static int radeon_debugfs_pm_init(struct radeon_device *rdev)
1994 {
1995 #if defined(CONFIG_DEBUG_FS)
1996         return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
1997 #else
1998         return 0;
1999 #endif
2000 }