diff options
author | Alex Deucher <alexander.deucher@amd.com> | 2013-04-12 13:55:22 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2013-06-27 10:49:20 -0400 |
commit | da321c8a6a2a947710499273aaad733974af1689 (patch) | |
tree | 42804d32464171deb561326ead8a12ba07e2a4cf | |
parent | ca361b6538bd91c33af7cb0bed6accc292b10253 (diff) |
drm/radeon/kms: add common dpm infrastructure
This adds the common dpm (dynamic power management)
infrastructure:
- dpm callbacks
- dpm init/fini/suspend/resume
- dpm power state selection
No device specific code is enabled yet.
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 100 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_drv.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_pm.c | 496 |
3 files changed, 591 insertions, 9 deletions
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index aeec346c8d3..41d79bb6866 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -96,6 +96,7 @@ extern int radeon_pcie_gen2; extern int radeon_msi; extern int radeon_lockup_timeout; extern int radeon_fastfb; +extern int radeon_dpm; /* * Copy from radeon_drv.h so we don't have to include both and have conflicting @@ -1043,6 +1044,7 @@ struct radeon_wb { enum radeon_pm_method { PM_METHOD_PROFILE, PM_METHOD_DYNPM, + PM_METHOD_DPM, }; enum radeon_dynpm_state { @@ -1068,11 +1070,23 @@ enum radeon_voltage_type { }; enum radeon_pm_state_type { + /* not used for dpm */ POWER_STATE_TYPE_DEFAULT, POWER_STATE_TYPE_POWERSAVE, + /* user selectable states */ POWER_STATE_TYPE_BATTERY, POWER_STATE_TYPE_BALANCED, POWER_STATE_TYPE_PERFORMANCE, + /* internal states */ + POWER_STATE_TYPE_INTERNAL_UVD, + POWER_STATE_TYPE_INTERNAL_UVD_SD, + POWER_STATE_TYPE_INTERNAL_UVD_HD, + POWER_STATE_TYPE_INTERNAL_UVD_HD2, + POWER_STATE_TYPE_INTERNAL_UVD_MVC, + POWER_STATE_TYPE_INTERNAL_BOOT, + POWER_STATE_TYPE_INTERNAL_THERMAL, + POWER_STATE_TYPE_INTERNAL_ACPI, + POWER_STATE_TYPE_INTERNAL_ULV, }; enum radeon_pm_profile_type { @@ -1101,12 +1115,16 @@ struct radeon_pm_profile { enum radeon_int_thermal_type { THERMAL_TYPE_NONE, + THERMAL_TYPE_EXTERNAL, + THERMAL_TYPE_EXTERNAL_GPIO, THERMAL_TYPE_RV6XX, THERMAL_TYPE_RV770, + THERMAL_TYPE_ADT7473_WITH_INTERNAL, THERMAL_TYPE_EVERGREEN, THERMAL_TYPE_SUMO, THERMAL_TYPE_NI, THERMAL_TYPE_SI, + THERMAL_TYPE_EMC2103_WITH_INTERNAL, THERMAL_TYPE_CI, }; @@ -1161,6 +1179,60 @@ struct radeon_power_state { */ #define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */ +struct radeon_ps { + u32 caps; /* vbios flags */ + u32 class; /* vbios flags */ + u32 class2; /* vbios flags */ + /* UVD clocks */ + u32 vclk; + u32 dclk; + /* asic priv */ + void *ps_priv; +}; + +struct radeon_dpm_thermal { + /* thermal interrupt work */ + struct work_struct work; + /* low temperature threshold */ + int min_temp; + /* high temperature threshold */ + int max_temp; + /* was interrupt low to high or high to low */ + bool high_to_low; +}; + +struct radeon_dpm { + struct radeon_ps *ps; + /* number of valid power states */ + int num_ps; + /* current power state that is active */ + struct radeon_ps *current_ps; + /* requested power state */ + struct radeon_ps *requested_ps; + /* boot up power state */ + struct radeon_ps *boot_ps; + /* default uvd power state */ + struct radeon_ps *uvd_ps; + enum radeon_pm_state_type state; + enum radeon_pm_state_type user_state; + u32 platform_caps; + u32 voltage_response_time; + u32 backbias_response_time; + void *priv; + u32 new_active_crtcs; + int new_active_crtc_count; + u32 current_active_crtcs; + int current_active_crtc_count; + /* special states active */ + bool thermal_active; + /* thermal handling */ + struct radeon_dpm_thermal thermal; +}; + +void radeon_dpm_enable_power_state(struct radeon_device *rdev, + enum radeon_pm_state_type dpm_state); + + struct radeon_pm { struct mutex mutex; /* write locked while reprogramming mclk */ @@ -1214,6 +1286,9 @@ struct radeon_pm { /* internal thermal controller on rv6xx+ */ enum radeon_int_thermal_type int_thermal_type; struct device *int_hwmon_dev; + /* dpm */ + bool dpm_enabled; + struct radeon_dpm dpm; }; int radeon_pm_get_type_index(struct radeon_device *rdev, @@ -1415,7 +1490,7 @@ struct radeon_asic { bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); } hpd; - /* power management */ + /* static power management */ struct { void (*misc)(struct radeon_device *rdev); void (*prepare)(struct radeon_device *rdev); @@ -1432,6 +1507,19 @@ struct radeon_asic { int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk); int (*get_temperature)(struct radeon_device *rdev); } pm; + /* dynamic power management */ + struct { + int (*init)(struct radeon_device *rdev); + void (*setup_asic)(struct radeon_device *rdev); + int (*enable)(struct radeon_device *rdev); + void (*disable)(struct radeon_device *rdev); + int (*set_power_state)(struct radeon_device *rdev); + void (*display_configuration_changed)(struct radeon_device *rdev); + void (*fini)(struct radeon_device *rdev); + u32 (*get_sclk)(struct radeon_device *rdev, bool low); + u32 (*get_mclk)(struct radeon_device *rdev, bool low); + void (*print_power_state)(struct radeon_device *rdev, struct radeon_ps *ps); + } dpm; /* pageflipping */ struct { void (*pre_page_flip)(struct radeon_device *rdev, int crtc); @@ -2124,6 +2212,16 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); #define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev)) #define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev)) #define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev)) +#define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev)) +#define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev)) +#define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev)) +#define radeon_dpm_disable(rdev) rdev->asic->dpm.disable((rdev)) +#define radeon_dpm_set_power_state(rdev) rdev->asic->dpm.set_power_state((rdev)) +#define radeon_dpm_display_configuration_changed(rdev) rdev->asic->dpm.display_configuration_changed((rdev)) +#define radeon_dpm_fini(rdev) rdev->asic->dpm.fini((rdev)) +#define radeon_dpm_get_sclk(rdev, l) rdev->asic->dpm.get_sclk((rdev), (l)) +#define radeon_dpm_get_mclk(rdev, l) rdev->asic->dpm.get_mclk((rdev), (l)) +#define radeon_dpm_print_power_state(rdev, ps) rdev->asic->dpm.print_power_state((rdev), (ps)) /* Common functions */ /* AGP */ diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 02709e4ebe6..00cc52e601f 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -165,6 +165,7 @@ int radeon_pcie_gen2 = -1; int radeon_msi = -1; int radeon_lockup_timeout = 10000; int radeon_fastfb = 0; +int radeon_dpm = -1; MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); module_param_named(no_wb, radeon_no_wb, int, 0444); @@ -220,6 +221,9 @@ module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444); MODULE_PARM_DESC(fastfb, "Direct FB access for IGP chips (0 = disable, 1 = enable)"); module_param_named(fastfb, radeon_fastfb, int, 0444); +MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)"); +module_param_named(dpm, radeon_dpm, int, 0444); + static struct pci_device_id pciidlist[] = { radeon_PCI_IDS }; diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index e8c1bea9b57..4f5422e6ccb 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -388,7 +388,8 @@ static ssize_t radeon_get_pm_method(struct device *dev, int pm = rdev->pm.pm_method; return snprintf(buf, PAGE_SIZE, "%s\n", - (pm == PM_METHOD_DYNPM) ? "dynpm" : "profile"); + (pm == PM_METHOD_DYNPM) ? "dynpm" : + (pm == PM_METHOD_PROFILE) ? "profile" : "dpm"); } static ssize_t radeon_set_pm_method(struct device *dev, @@ -399,6 +400,11 @@ static ssize_t radeon_set_pm_method(struct device *dev, struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); struct radeon_device *rdev = ddev->dev_private; + /* we don't support the legacy modes with dpm */ + if (rdev->pm.pm_method == PM_METHOD_DPM) { + count = -EINVAL; + goto fail; + } if (strncmp("dynpm", buf, strlen("dynpm")) == 0) { mutex_lock(&rdev->pm.mutex); @@ -423,8 +429,48 @@ fail: return count; } +static ssize_t radeon_get_dpm_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); + struct radeon_device *rdev = ddev->dev_private; + enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; + + return snprintf(buf, PAGE_SIZE, "%s\n", + (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : + (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); +} + +static ssize_t radeon_set_dpm_state(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); + struct radeon_device *rdev = ddev->dev_private; + + mutex_lock(&rdev->pm.mutex); + if (strncmp("battery", buf, strlen("battery")) == 0) + rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; + else if (strncmp("balanced", buf, strlen("balanced")) == 0) + rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; + else if (strncmp("performance", buf, strlen("performance")) == 0) + rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE; + else { + mutex_unlock(&rdev->pm.mutex); + count = -EINVAL; + goto fail; + } + mutex_unlock(&rdev->pm.mutex); + radeon_pm_compute_clocks(rdev); +fail: + return count; +} + static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile); static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method); +static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state); static ssize_t radeon_hwmon_show_temp(struct device *dev, struct device_attribute *attr, @@ -508,7 +554,228 @@ static void radeon_hwmon_fini(struct radeon_device *rdev) } } -void radeon_pm_suspend(struct radeon_device *rdev) +static void radeon_dpm_thermal_work_handler(struct work_struct *work) +{ + struct radeon_device *rdev = + container_of(work, struct radeon_device, + pm.dpm.thermal.work); + /* switch to the thermal state */ + enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; + + if (!rdev->pm.dpm_enabled) + return; + + if (rdev->asic->pm.get_temperature) { + int temp = radeon_get_temperature(rdev); + + if (temp < rdev->pm.dpm.thermal.min_temp) + /* switch back the user state */ + dpm_state = rdev->pm.dpm.user_state; + } else { + if (rdev->pm.dpm.thermal.high_to_low) + /* switch back the user state */ + dpm_state = rdev->pm.dpm.user_state; + } + radeon_dpm_enable_power_state(rdev, dpm_state); +} + +static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, + enum radeon_pm_state_type dpm_state) +{ + int i; + struct radeon_ps *ps; + u32 ui_class; + +restart_search: + /* balanced states don't exist at the moment */ + if (dpm_state == POWER_STATE_TYPE_BALANCED) + dpm_state = POWER_STATE_TYPE_PERFORMANCE; + + /* Pick the best power state based on current conditions */ + for (i = 0; i < rdev->pm.dpm.num_ps; i++) { + ps = &rdev->pm.dpm.ps[i]; + ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; + switch (dpm_state) { + /* user states */ + case POWER_STATE_TYPE_BATTERY: + if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { + if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { + if (rdev->pm.dpm.new_active_crtc_count < 2) + return ps; + } else + return ps; + } + break; + case POWER_STATE_TYPE_BALANCED: + if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { + if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { + if (rdev->pm.dpm.new_active_crtc_count < 2) + return ps; + } else + return ps; + } + break; + case POWER_STATE_TYPE_PERFORMANCE: + if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { + if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { + if (rdev->pm.dpm.new_active_crtc_count < 2) + return ps; + } else + return ps; + } + break; + /* internal states */ + case POWER_STATE_TYPE_INTERNAL_UVD: + return rdev->pm.dpm.uvd_ps; + case POWER_STATE_TYPE_INTERNAL_UVD_SD: + if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_UVD_HD: + if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_UVD_HD2: + if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_UVD_MVC: + if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_BOOT: + return rdev->pm.dpm.boot_ps; + case POWER_STATE_TYPE_INTERNAL_THERMAL: + if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_ACPI: + if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_ULV: + if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) + return ps; + break; + default: + break; + } + } + /* use a fallback state if we didn't match */ + switch (dpm_state) { + case POWER_STATE_TYPE_INTERNAL_UVD_SD: + case POWER_STATE_TYPE_INTERNAL_UVD_HD: + case POWER_STATE_TYPE_INTERNAL_UVD_HD2: + case POWER_STATE_TYPE_INTERNAL_UVD_MVC: + return rdev->pm.dpm.uvd_ps; + case POWER_STATE_TYPE_INTERNAL_THERMAL: + dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; + goto restart_search; + case POWER_STATE_TYPE_INTERNAL_ACPI: + dpm_state = POWER_STATE_TYPE_BATTERY; + goto restart_search; + case POWER_STATE_TYPE_BATTERY: + dpm_state = POWER_STATE_TYPE_PERFORMANCE; + goto restart_search; + default: + break; + } + + return NULL; +} + +static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) +{ + int i; + struct radeon_ps *ps; + enum radeon_pm_state_type dpm_state; + + /* if dpm init failed */ + if (!rdev->pm.dpm_enabled) + return; + + if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) { + /* add other state override checks here */ + if (!rdev->pm.dpm.thermal_active) + rdev->pm.dpm.state = rdev->pm.dpm.user_state; + } + dpm_state = rdev->pm.dpm.state; + + ps = radeon_dpm_pick_power_state(rdev, dpm_state); + if (ps) + rdev->pm.dpm.requested_ps = ps; + else + return; + + /* no need to reprogram if nothing changed */ + if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) { + /* update display watermarks based on new power state */ + if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) { + radeon_bandwidth_update(rdev); + /* update displays */ + radeon_dpm_display_configuration_changed(rdev); + rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; + rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; + } + return; + } + + printk("switching from power state:\n"); + radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps); + printk("switching to power state:\n"); + radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps); + + mutex_lock(&rdev->ddev->struct_mutex); + down_write(&rdev->pm.mclk_lock); + mutex_lock(&rdev->ring_lock); + + /* update display watermarks based on new power state */ + radeon_bandwidth_update(rdev); + /* update displays */ + radeon_dpm_display_configuration_changed(rdev); + + rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; + rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; + + /* wait for the rings to drain */ + for (i = 0; i < RADEON_NUM_RINGS; i++) { + struct radeon_ring *ring = &rdev->ring[i]; + if (ring->ready) + radeon_fence_wait_empty_locked(rdev, i); + } + + /* program the new power state */ + radeon_dpm_set_power_state(rdev); + + /* update current power state */ + rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps; + + mutex_unlock(&rdev->ring_lock); + up_write(&rdev->pm.mclk_lock); + mutex_unlock(&rdev->ddev->struct_mutex); +} + +void radeon_dpm_enable_power_state(struct radeon_device *rdev, + enum radeon_pm_state_type dpm_state) +{ + if (!rdev->pm.dpm_enabled) + return; + + mutex_lock(&rdev->pm.mutex); + switch (dpm_state) { + case POWER_STATE_TYPE_INTERNAL_THERMAL: + rdev->pm.dpm.thermal_active = true; + break; + default: + rdev->pm.dpm.thermal_active = false; + break; + } + rdev->pm.dpm.state = dpm_state; + mutex_unlock(&rdev->pm.mutex); + radeon_pm_compute_clocks(rdev); +} + +static void radeon_pm_suspend_old(struct radeon_device *rdev) { mutex_lock(&rdev->pm.mutex); if (rdev->pm.pm_method == PM_METHOD_DYNPM) { @@ -520,7 +787,26 @@ void radeon_pm_suspend(struct radeon_device *rdev) cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); } -void radeon_pm_resume(struct radeon_device *rdev) +static void radeon_pm_suspend_dpm(struct radeon_device *rdev) +{ + mutex_lock(&rdev->pm.mutex); + /* disable dpm */ + radeon_dpm_disable(rdev); + /* reset the power state */ + rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps; + rdev->pm.dpm_enabled = false; + mutex_unlock(&rdev->pm.mutex); +} + +void radeon_pm_suspend(struct radeon_device *rdev) +{ + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_suspend_dpm(rdev); + else + radeon_pm_suspend_old(rdev); +} + +static void radeon_pm_resume_old(struct radeon_device *rdev) { /* set up the default clocks if the MC ucode is loaded */ if ((rdev->family >= CHIP_BARTS) && @@ -555,12 +841,50 @@ void radeon_pm_resume(struct radeon_device *rdev) radeon_pm_compute_clocks(rdev); } -int radeon_pm_init(struct radeon_device *rdev) +static void radeon_pm_resume_dpm(struct radeon_device *rdev) +{ + int ret; + + /* asic init will reset to the boot state */ + mutex_lock(&rdev->pm.mutex); + rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps; + radeon_dpm_setup_asic(rdev); + ret = radeon_dpm_enable(rdev); + mutex_unlock(&rdev->pm.mutex); + if (ret) { + DRM_ERROR("radeon: dpm resume failed\n"); + if ((rdev->family >= CHIP_BARTS) && + (rdev->family <= CHIP_CAYMAN) && + rdev->mc_fw) { + if (rdev->pm.default_vddc) + radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, + SET_VOLTAGE_TYPE_ASIC_VDDC); + if (rdev->pm.default_vddci) + radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, + SET_VOLTAGE_TYPE_ASIC_VDDCI); + if (rdev->pm.default_sclk) + radeon_set_engine_clock(rdev, rdev->pm.default_sclk); + if (rdev->pm.default_mclk) + radeon_set_memory_clock(rdev, rdev->pm.default_mclk); + } + } else { + rdev->pm.dpm_enabled = true; + radeon_pm_compute_clocks(rdev); + } +} + +void radeon_pm_resume(struct radeon_device *rdev) +{ + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume_dpm(rdev); + else + radeon_pm_resume_old(rdev); +} + +static int radeon_pm_init_old(struct radeon_device *rdev) { int ret; - /* default to profile method */ - rdev->pm.pm_method = PM_METHOD_PROFILE; rdev->pm.profile = PM_PROFILE_DEFAULT; rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; @@ -622,7 +946,103 @@ int radeon_pm_init(struct radeon_device *rdev) return 0; } -void radeon_pm_fini(struct radeon_device *rdev) +static void radeon_dpm_print_power_states(struct radeon_device *rdev) +{ + int i; + + for (i = 0; i < rdev->pm.dpm.num_ps; i++) { + printk("== power state %d ==\n", i); + radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]); + } +} + +static int radeon_pm_init_dpm(struct radeon_device *rdev) +{ + int ret; + + /* default to performance state */ + rdev->pm.dpm.state = POWER_STATE_TYPE_PERFORMANCE; + rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE; + rdev->pm.default_sclk = rdev->clock.default_sclk; + rdev->pm.default_mclk = rdev->clock.default_mclk; + rdev->pm.current_sclk = rdev->clock.default_sclk; + rdev->pm.current_mclk = rdev->clock.default_mclk; + rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; + + if (rdev->bios && rdev->is_atom_bios) + radeon_atombios_get_power_modes(rdev); + else + return -EINVAL; + + /* set up the internal thermal sensor if applicable */ + ret = radeon_hwmon_init(rdev); + if (ret) + return ret; + + INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler); + mutex_lock(&rdev->pm.mutex); + radeon_dpm_init(rdev); + rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps; + radeon_dpm_print_power_states(rdev); + radeon_dpm_setup_asic(rdev); + ret = radeon_dpm_enable(rdev); + mutex_unlock(&rdev->pm.mutex); + if (ret) { + rdev->pm.dpm_enabled = false; + if ((rdev->family >= CHIP_BARTS) && + (rdev->family <= CHIP_CAYMAN) && + rdev->mc_fw) { + if (rdev->pm.default_vddc) + radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, + SET_VOLTAGE_TYPE_ASIC_VDDC); + if (rdev->pm.default_vddci) + radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, + SET_VOLTAGE_TYPE_ASIC_VDDCI); + if (rdev->pm.default_sclk) + radeon_set_engine_clock(rdev, rdev->pm.default_sclk); + if (rdev->pm.default_mclk) + radeon_set_memory_clock(rdev, rdev->pm.default_mclk); + } + DRM_ERROR("radeon: dpm initialization failed\n"); + return ret; + } + rdev->pm.dpm_enabled = true; + radeon_pm_compute_clocks(rdev); + + if (rdev->pm.num_power_states > 1) { + ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); + if (ret) + DRM_ERROR("failed to create device file for dpm state\n"); + /* XXX: these are noops for dpm but are here for backwards compat */ + ret = device_create_file(rdev->dev, &dev_attr_power_profile); + if (ret) + DRM_ERROR("failed to create device file for power profile\n"); + ret = device_create_file(rdev->dev, &dev_attr_power_method); + if (ret) + DRM_ERROR("failed to create device file for power method\n"); + DRM_INFO("radeon: dpm initialized\n"); + } + + return 0; +} + +int radeon_pm_init(struct radeon_device *rdev) +{ + /* enable dpm on rv6xx+ */ + switch (rdev->family) { + default: + /* default to profile method */ + rdev->pm.pm_method = PM_METHOD_PROFILE; + break; + } + + if (rdev->pm.pm_method == PM_METHOD_DPM) + return radeon_pm_init_dpm(rdev); + else + return radeon_pm_init_old(rdev); +} + +static void radeon_pm_fini_old(struct radeon_device *rdev) { if (rdev->pm.num_power_states > 1) { mutex_lock(&rdev->pm.mutex); @@ -650,7 +1070,35 @@ void radeon_pm_fini(struct radeon_device *rdev) radeon_hwmon_fini(rdev); } -void radeon_pm_compute_clocks(struct radeon_device *rdev) +static void radeon_pm_fini_dpm(struct radeon_device *rdev) +{ + if (rdev->pm.num_power_states > 1) { + mutex_lock(&rdev->pm.mutex); + radeon_dpm_disable(rdev); + mutex_unlock(&rdev->pm.mutex); + + device_remove_file(rdev->dev, &dev_attr_power_dpm_state); + /* XXX backwards compat */ + device_remove_file(rdev->dev, &dev_attr_power_profile); + device_remove_file(rdev->dev, &dev_attr_power_method); + } + radeon_dpm_fini(rdev); + + if (rdev->pm.power_state) + kfree(rdev->pm.power_state); + + radeon_hwmon_fini(rdev); +} + +void radeon_pm_fini(struct radeon_device *rdev) +{ + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_fini_dpm(rdev); + else + radeon_pm_fini_old(rdev); +} + +static void radeon_pm_compute_clocks_old(struct radeon_device *rdev) { struct drm_device *ddev = rdev->ddev; struct drm_crtc *crtc; @@ -721,6 +1169,38 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) mutex_unlock(&rdev->pm.mutex); } +static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev) +{ + struct drm_device *ddev = rdev->ddev; + struct drm_crtc *crtc; + struct radeon_crtc *radeon_crtc; + + mutex_lock(&rdev->pm.mutex); + + rdev->pm.dpm.new_active_crtcs = 0; + rdev->pm.dpm.new_active_crtc_count = 0; + list_for_each_entry(crtc, + &ddev->mode_config.crtc_list, head) { + radeon_crtc = to_radeon_crtc(crtc); + if (crtc->enabled) { + rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id); + rdev->pm.dpm.new_active_crtc_count++; + } + } + + radeon_dpm_change_power_state_locked(rdev); + + mutex_unlock(&rdev->pm.mutex); +} + +void radeon_pm_compute_clocks(struct radeon_device *rdev) +{ + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_compute_clocks_dpm(rdev); + else + radeon_pm_compute_clocks_old(rdev); +} + static bool radeon_pm_in_vbl(struct radeon_device *rdev) { int crtc, vpos, hpos, vbl_status; |