diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/r100.c')
-rw-r--r-- | drivers/gpu/drm/radeon/r100.c | 731 |
1 files changed, 518 insertions, 213 deletions
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index d7388fdb6d0..cc004b05d63 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -37,6 +37,7 @@ #include "rs100d.h" #include "rv200d.h" #include "rv250d.h" +#include "atom.h" #include <linux/firmware.h> #include <linux/platform_device.h> @@ -67,6 +68,264 @@ MODULE_FIRMWARE(FIRMWARE_R520); * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ +void r100_pm_get_dynpm_state(struct radeon_device *rdev) +{ + int i; + rdev->pm.dynpm_can_upclock = true; + rdev->pm.dynpm_can_downclock = true; + + switch (rdev->pm.dynpm_planned_action) { + case DYNPM_ACTION_MINIMUM: + rdev->pm.requested_power_state_index = 0; + rdev->pm.dynpm_can_downclock = false; + break; + case DYNPM_ACTION_DOWNCLOCK: + if (rdev->pm.current_power_state_index == 0) { + rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; + rdev->pm.dynpm_can_downclock = false; + } else { + if (rdev->pm.active_crtc_count > 1) { + for (i = 0; i < rdev->pm.num_power_states; i++) { + if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) + continue; + else if (i >= rdev->pm.current_power_state_index) { + rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; + break; + } else { + rdev->pm.requested_power_state_index = i; + break; + } + } + } else + rdev->pm.requested_power_state_index = + rdev->pm.current_power_state_index - 1; + } + /* don't use the power state if crtcs are active and no display flag is set */ + if ((rdev->pm.active_crtc_count > 0) && + (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags & + RADEON_PM_MODE_NO_DISPLAY)) { + rdev->pm.requested_power_state_index++; + } + break; + case DYNPM_ACTION_UPCLOCK: + if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { + rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; + rdev->pm.dynpm_can_upclock = false; + } else { + if (rdev->pm.active_crtc_count > 1) { + for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { + if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) + continue; + else if (i <= rdev->pm.current_power_state_index) { + rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; + break; + } else { + rdev->pm.requested_power_state_index = i; + break; + } + } + } else + rdev->pm.requested_power_state_index = + rdev->pm.current_power_state_index + 1; + } + break; + case DYNPM_ACTION_DEFAULT: + rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; + rdev->pm.dynpm_can_upclock = false; + break; + case DYNPM_ACTION_NONE: + default: + DRM_ERROR("Requested mode for not defined action\n"); + return; + } + /* only one clock mode per power state */ + rdev->pm.requested_clock_mode_index = 0; + + DRM_DEBUG("Requested: e: %d m: %d p: %d\n", + rdev->pm.power_state[rdev->pm.requested_power_state_index]. + clock_info[rdev->pm.requested_clock_mode_index].sclk, + rdev->pm.power_state[rdev->pm.requested_power_state_index]. + clock_info[rdev->pm.requested_clock_mode_index].mclk, + rdev->pm.power_state[rdev->pm.requested_power_state_index]. + pcie_lanes); +} + +void r100_pm_init_profile(struct radeon_device *rdev) +{ + /* default */ + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; + /* low sh */ + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; + /* high sh */ + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; + /* low mh */ + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; + /* high mh */ + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; + rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; +} + +void r100_pm_misc(struct radeon_device *rdev) +{ + int requested_index = rdev->pm.requested_power_state_index; + struct radeon_power_state *ps = &rdev->pm.power_state[requested_index]; + struct radeon_voltage *voltage = &ps->clock_info[0].voltage; + u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl; + + if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) { + if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { + tmp = RREG32(voltage->gpio.reg); + if (voltage->active_high) + tmp |= voltage->gpio.mask; + else + tmp &= ~(voltage->gpio.mask); + WREG32(voltage->gpio.reg, tmp); + if (voltage->delay) + udelay(voltage->delay); + } else { + tmp = RREG32(voltage->gpio.reg); + if (voltage->active_high) + tmp &= ~voltage->gpio.mask; + else + tmp |= voltage->gpio.mask; + WREG32(voltage->gpio.reg, tmp); + if (voltage->delay) + udelay(voltage->delay); + } + } + + sclk_cntl = RREG32_PLL(SCLK_CNTL); + sclk_cntl2 = RREG32_PLL(SCLK_CNTL2); + sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3); + sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL); + sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3); + if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) { + sclk_more_cntl |= REDUCED_SPEED_SCLK_EN; + if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE) + sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE; + else + sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE; + if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) + sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0); + else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) + sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2); + } else + sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN; + + if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) { + sclk_more_cntl |= IO_CG_VOLTAGE_DROP; + if (voltage->delay) { + sclk_more_cntl |= VOLTAGE_DROP_SYNC; + switch (voltage->delay) { + case 33: + sclk_more_cntl |= VOLTAGE_DELAY_SEL(0); + break; + case 66: + sclk_more_cntl |= VOLTAGE_DELAY_SEL(1); + break; + case 99: + sclk_more_cntl |= VOLTAGE_DELAY_SEL(2); + break; + case 132: + sclk_more_cntl |= VOLTAGE_DELAY_SEL(3); + break; + } + } else + sclk_more_cntl &= ~VOLTAGE_DROP_SYNC; + } else + sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP; + + if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN) + sclk_cntl &= ~FORCE_HDP; + else + sclk_cntl |= FORCE_HDP; + + WREG32_PLL(SCLK_CNTL, sclk_cntl); + WREG32_PLL(SCLK_CNTL2, sclk_cntl2); + WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl); + + /* set pcie lanes */ + if ((rdev->flags & RADEON_IS_PCIE) && + !(rdev->flags & RADEON_IS_IGP) && + rdev->asic->set_pcie_lanes && + (ps->pcie_lanes != + rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { + radeon_set_pcie_lanes(rdev, + ps->pcie_lanes); + DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes); + } +} + +void r100_pm_prepare(struct radeon_device *rdev) +{ + struct drm_device *ddev = rdev->ddev; + struct drm_crtc *crtc; + struct radeon_crtc *radeon_crtc; + u32 tmp; + + /* disable any active CRTCs */ + list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { + radeon_crtc = to_radeon_crtc(crtc); + if (radeon_crtc->enabled) { + if (radeon_crtc->crtc_id) { + tmp = RREG32(RADEON_CRTC2_GEN_CNTL); + tmp |= RADEON_CRTC2_DISP_REQ_EN_B; + WREG32(RADEON_CRTC2_GEN_CNTL, tmp); + } else { + tmp = RREG32(RADEON_CRTC_GEN_CNTL); + tmp |= RADEON_CRTC_DISP_REQ_EN_B; + WREG32(RADEON_CRTC_GEN_CNTL, tmp); + } + } + } +} + +void r100_pm_finish(struct radeon_device *rdev) +{ + struct drm_device *ddev = rdev->ddev; + struct drm_crtc *crtc; + struct radeon_crtc *radeon_crtc; + u32 tmp; + + /* enable any active CRTCs */ + list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { + radeon_crtc = to_radeon_crtc(crtc); + if (radeon_crtc->enabled) { + if (radeon_crtc->crtc_id) { + tmp = RREG32(RADEON_CRTC2_GEN_CNTL); + tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B; + WREG32(RADEON_CRTC2_GEN_CNTL, tmp); + } else { + tmp = RREG32(RADEON_CRTC_GEN_CNTL); + tmp &= ~RADEON_CRTC_DISP_REQ_EN_B; + WREG32(RADEON_CRTC_GEN_CNTL, tmp); + } + } + } +} + +bool r100_gui_idle(struct radeon_device *rdev) +{ + if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE) + return false; + else + return true; +} + /* hpd for digital panel detect/disconnect */ bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) { @@ -254,6 +513,9 @@ int r100_irq_set(struct radeon_device *rdev) if (rdev->irq.sw_int) { tmp |= RADEON_SW_INT_ENABLE; } + if (rdev->irq.gui_idle) { + tmp |= RADEON_GUI_IDLE_MASK; + } if (rdev->irq.crtc_vblank_int[0]) { tmp |= RADEON_CRTC_VBLANK_MASK; } @@ -288,6 +550,12 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev) RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; + /* the interrupt works, but the status bit is permanently asserted */ + if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) { + if (!rdev->irq.gui_idle_acked) + irq_mask |= RADEON_GUI_IDLE_STAT; + } + if (irqs) { WREG32(RADEON_GEN_INT_STATUS, irqs); } @@ -299,6 +567,9 @@ int r100_irq_process(struct radeon_device *rdev) uint32_t status, msi_rearm; bool queue_hotplug = false; + /* reset gui idle ack. the status bit is broken */ + rdev->irq.gui_idle_acked = false; + status = r100_irq_ack(rdev); if (!status) { return IRQ_NONE; @@ -311,6 +582,12 @@ int r100_irq_process(struct radeon_device *rdev) if (status & RADEON_SW_INT_TEST) { radeon_fence_process(rdev); } + /* gui idle interrupt */ + if (status & RADEON_GUI_IDLE_STAT) { + rdev->irq.gui_idle_acked = true; + rdev->pm.gui_idle = true; + wake_up(&rdev->irq.idle_queue); + } /* Vertical blank interrupts */ if (status & RADEON_CRTC_VBLANK_STAT) { drm_handle_vblank(rdev->ddev, 0); @@ -332,6 +609,8 @@ int r100_irq_process(struct radeon_device *rdev) } status = r100_irq_ack(rdev); } + /* reset gui idle ack. the status bit is broken */ + rdev->irq.gui_idle_acked = false; if (queue_hotplug) queue_work(rdev->wq, &rdev->hotplug_work); if (rdev->msi_enabled) { @@ -663,26 +942,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) if (r100_debugfs_cp_init(rdev)) { DRM_ERROR("Failed to register debugfs file for CP !\n"); } - /* Reset CP */ - tmp = RREG32(RADEON_CP_CSQ_STAT); - if ((tmp & (1 << 31))) { - DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp); - WREG32(RADEON_CP_CSQ_MODE, 0); - WREG32(RADEON_CP_CSQ_CNTL, 0); - WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP); - tmp = RREG32(RADEON_RBBM_SOFT_RESET); - mdelay(2); - WREG32(RADEON_RBBM_SOFT_RESET, 0); - tmp = RREG32(RADEON_RBBM_SOFT_RESET); - mdelay(2); - tmp = RREG32(RADEON_CP_CSQ_STAT); - if ((tmp & (1 << 31))) { - DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp); - } - } else { - DRM_INFO("radeon: cp idle (0x%08X)\n", tmp); - } - if (!rdev->me_fw) { r = r100_cp_init_microcode(rdev); if (r) { @@ -787,39 +1046,6 @@ void r100_cp_disable(struct radeon_device *rdev) } } -int r100_cp_reset(struct radeon_device *rdev) -{ - uint32_t tmp; - bool reinit_cp; - int i; - - reinit_cp = rdev->cp.ready; - rdev->cp.ready = false; - WREG32(RADEON_CP_CSQ_MODE, 0); - WREG32(RADEON_CP_CSQ_CNTL, 0); - WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP); - (void)RREG32(RADEON_RBBM_SOFT_RESET); - udelay(200); - WREG32(RADEON_RBBM_SOFT_RESET, 0); - /* Wait to prevent race in RBBM_STATUS */ - mdelay(1); - for (i = 0; i < rdev->usec_timeout; i++) { - tmp = RREG32(RADEON_RBBM_STATUS); - if (!(tmp & (1 << 16))) { - DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n", - tmp); - if (reinit_cp) { - return r100_cp_init(rdev, rdev->cp.ring_size); - } - return 0; - } - DRM_UDELAY(1); - } - tmp = RREG32(RADEON_RBBM_STATUS); - DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp); - return -1; -} - void r100_cp_commit(struct radeon_device *rdev) { WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); @@ -1733,76 +1959,163 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev) return -1; } -void r100_gpu_init(struct radeon_device *rdev) +void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp) { - /* TODO: anythings to do here ? pipes ? */ - r100_hdp_reset(rdev); + lockup->last_cp_rptr = cp->rptr; + lockup->last_jiffies = jiffies; +} + +/** + * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information + * @rdev: radeon device structure + * @lockup: r100_gpu_lockup structure holding CP lockup tracking informations + * @cp: radeon_cp structure holding CP information + * + * We don't need to initialize the lockup tracking information as we will either + * have CP rptr to a different value of jiffies wrap around which will force + * initialization of the lockup tracking informations. + * + * A possible false positivie is if we get call after while and last_cp_rptr == + * the current CP rptr, even if it's unlikely it might happen. To avoid this + * if the elapsed time since last call is bigger than 2 second than we return + * false and update the tracking information. Due to this the caller must call + * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported + * the fencing code should be cautious about that. + * + * Caller should write to the ring to force CP to do something so we don't get + * false positive when CP is just gived nothing to do. + * + **/ +bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp) +{ + unsigned long cjiffies, elapsed; + + cjiffies = jiffies; + if (!time_after(cjiffies, lockup->last_jiffies)) { + /* likely a wrap around */ + lockup->last_cp_rptr = cp->rptr; + lockup->last_jiffies = jiffies; + return false; + } + if (cp->rptr != lockup->last_cp_rptr) { + /* CP is still working no lockup */ + lockup->last_cp_rptr = cp->rptr; + lockup->last_jiffies = jiffies; + return false; + } + elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies); + if (elapsed >= 3000) { + /* very likely the improbable case where current + * rptr is equal to last recorded, a while ago, rptr + * this is more likely a false positive update tracking + * information which should force us to be recall at + * latter point + */ + lockup->last_cp_rptr = cp->rptr; + lockup->last_jiffies = jiffies; + return false; + } + if (elapsed >= 1000) { + dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed); + return true; + } + /* give a chance to the GPU ... */ + return false; } -void r100_hdp_reset(struct radeon_device *rdev) +bool r100_gpu_is_lockup(struct radeon_device *rdev) { - uint32_t tmp; + u32 rbbm_status; + int r; - tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL; - tmp |= (7 << 28); - WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE); - (void)RREG32(RADEON_HOST_PATH_CNTL); - udelay(200); - WREG32(RADEON_RBBM_SOFT_RESET, 0); - WREG32(RADEON_HOST_PATH_CNTL, tmp); - (void)RREG32(RADEON_HOST_PATH_CNTL); + rbbm_status = RREG32(R_000E40_RBBM_STATUS); + if (!G_000E40_GUI_ACTIVE(rbbm_status)) { + r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp); + return false; + } + /* force CP activities */ + r = radeon_ring_lock(rdev, 2); + if (!r) { + /* PACKET2 NOP */ + radeon_ring_write(rdev, 0x80000000); + radeon_ring_write(rdev, 0x80000000); + radeon_ring_unlock_commit(rdev); + } + rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); + return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp); } -int r100_rb2d_reset(struct radeon_device *rdev) +void r100_bm_disable(struct radeon_device *rdev) { - uint32_t tmp; - int i; + u32 tmp; - WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2); - (void)RREG32(RADEON_RBBM_SOFT_RESET); - udelay(200); - WREG32(RADEON_RBBM_SOFT_RESET, 0); - /* Wait to prevent race in RBBM_STATUS */ + /* disable bus mastering */ + tmp = RREG32(R_000030_BUS_CNTL); + WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044); + mdelay(1); + WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042); + mdelay(1); + WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040); + tmp = RREG32(RADEON_BUS_CNTL); + mdelay(1); + pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp); + pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB); mdelay(1); - for (i = 0; i < rdev->usec_timeout; i++) { - tmp = RREG32(RADEON_RBBM_STATUS); - if (!(tmp & (1 << 26))) { - DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n", - tmp); - return 0; - } - DRM_UDELAY(1); - } - tmp = RREG32(RADEON_RBBM_STATUS); - DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp); - return -1; } -int r100_gpu_reset(struct radeon_device *rdev) +int r100_asic_reset(struct radeon_device *rdev) { - uint32_t status; + struct r100_mc_save save; + u32 status, tmp; - /* reset order likely matter */ - status = RREG32(RADEON_RBBM_STATUS); - /* reset HDP */ - r100_hdp_reset(rdev); - /* reset rb2d */ - if (status & ((1 << 17) | (1 << 18) | (1 << 27))) { - r100_rb2d_reset(rdev); + r100_mc_stop(rdev, &save); + status = RREG32(R_000E40_RBBM_STATUS); + if (!G_000E40_GUI_ACTIVE(status)) { + return 0; } - /* TODO: reset 3D engine */ + status = RREG32(R_000E40_RBBM_STATUS); + dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); + /* stop CP */ + WREG32(RADEON_CP_CSQ_CNTL, 0); + tmp = RREG32(RADEON_CP_RB_CNTL); + WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); + WREG32(RADEON_CP_RB_RPTR_WR, 0); + WREG32(RADEON_CP_RB_WPTR, 0); + WREG32(RADEON_CP_RB_CNTL, tmp); + /* save PCI state */ + pci_save_state(rdev->pdev); + /* disable bus mastering */ + r100_bm_disable(rdev); + WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) | + S_0000F0_SOFT_RESET_RE(1) | + S_0000F0_SOFT_RESET_PP(1) | + S_0000F0_SOFT_RESET_RB(1)); + RREG32(R_0000F0_RBBM_SOFT_RESET); + mdelay(500); + WREG32(R_0000F0_RBBM_SOFT_RESET, 0); + mdelay(1); + status = RREG32(R_000E40_RBBM_STATUS); + dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); /* reset CP */ - status = RREG32(RADEON_RBBM_STATUS); - if (status & (1 << 16)) { - r100_cp_reset(rdev); - } + WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); + RREG32(R_0000F0_RBBM_SOFT_RESET); + mdelay(500); + WREG32(R_0000F0_RBBM_SOFT_RESET, 0); + mdelay(1); + status = RREG32(R_000E40_RBBM_STATUS); + dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); + /* restore PCI & busmastering */ + pci_restore_state(rdev->pdev); + r100_enable_bm(rdev); /* Check if GPU is idle */ - status = RREG32(RADEON_RBBM_STATUS); - if (status & RADEON_RBBM_ACTIVE) { - DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); + if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) || + G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { + dev_err(rdev->dev, "failed to reset GPU\n"); + rdev->gpu_lockup = true; return -1; } - DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status); + r100_mc_resume(rdev, &save); + dev_info(rdev->dev, "GPU reset succeed\n"); return 0; } @@ -2002,11 +2315,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev) else rdev->mc.mc_vram_size = rdev->mc.real_vram_size; } - /* FIXME remove this once we support unmappable VRAM */ - if (rdev->mc.mc_vram_size > rdev->mc.aper_size) { - rdev->mc.mc_vram_size = rdev->mc.aper_size; - rdev->mc.real_vram_size = rdev->mc.aper_size; - } } void r100_vga_set_state(struct radeon_device *rdev, bool state) @@ -2335,53 +2643,53 @@ void r100_bandwidth_update(struct radeon_device *rdev) fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; uint32_t temp, data, mem_trcd, mem_trp, mem_tras; fixed20_12 memtcas_ff[8] = { - fixed_init(1), - fixed_init(2), - fixed_init(3), - fixed_init(0), - fixed_init_half(1), - fixed_init_half(2), - fixed_init(0), + dfixed_init(1), + dfixed_init(2), + dfixed_init(3), + dfixed_init(0), + dfixed_init_half(1), + dfixed_init_half(2), + dfixed_init(0), }; fixed20_12 memtcas_rs480_ff[8] = { - fixed_init(0), - fixed_init(1), - fixed_init(2), - fixed_init(3), - fixed_init(0), - fixed_init_half(1), - fixed_init_half(2), - fixed_init_half(3), + dfixed_init(0), + dfixed_init(1), + dfixed_init(2), + dfixed_init(3), + dfixed_init(0), + dfixed_init_half(1), + dfixed_init_half(2), + dfixed_init_half(3), }; fixed20_12 memtcas2_ff[8] = { - fixed_init(0), - fixed_init(1), - fixed_init(2), - fixed_init(3), - fixed_init(4), - fixed_init(5), - fixed_init(6), - fixed_init(7), + dfixed_init(0), + dfixed_init(1), + dfixed_init(2), + dfixed_init(3), + dfixed_init(4), + dfixed_init(5), + dfixed_init(6), + dfixed_init(7), }; fixed20_12 memtrbs[8] = { - fixed_init(1), - fixed_init_half(1), - fixed_init(2), - fixed_init_half(2), - fixed_init(3), - fixed_init_half(3), - fixed_init(4), - fixed_init_half(4) + dfixed_init(1), + dfixed_init_half(1), + dfixed_init(2), + dfixed_init_half(2), + dfixed_init(3), + dfixed_init_half(3), + dfixed_init(4), + dfixed_init_half(4) }; fixed20_12 memtrbs_r4xx[8] = { - fixed_init(4), - fixed_init(5), - fixed_init(6), - fixed_init(7), - fixed_init(8), - fixed_init(9), - fixed_init(10), - fixed_init(11) + dfixed_init(4), + dfixed_init(5), + dfixed_init(6), + dfixed_init(7), + dfixed_init(8), + dfixed_init(9), + dfixed_init(10), + dfixed_init(11) }; fixed20_12 min_mem_eff; fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; @@ -2412,7 +2720,7 @@ void r100_bandwidth_update(struct radeon_device *rdev) } } - min_mem_eff.full = rfixed_const_8(0); + min_mem_eff.full = dfixed_const_8(0); /* get modes */ if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); @@ -2433,28 +2741,28 @@ void r100_bandwidth_update(struct radeon_device *rdev) mclk_ff = rdev->pm.mclk; temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); - temp_ff.full = rfixed_const(temp); - mem_bw.full = rfixed_mul(mclk_ff, temp_ff); + temp_ff.full = dfixed_const(temp); + mem_bw.full = dfixed_mul(mclk_ff, temp_ff); pix_clk.full = 0; pix_clk2.full = 0; peak_disp_bw.full = 0; if (mode1) { - temp_ff.full = rfixed_const(1000); - pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ - pix_clk.full = rfixed_div(pix_clk, temp_ff); - temp_ff.full = rfixed_const(pixel_bytes1); - peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); + temp_ff.full = dfixed_const(1000); + pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */ + pix_clk.full = dfixed_div(pix_clk, temp_ff); + temp_ff.full = dfixed_const(pixel_bytes1); + peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff); } if (mode2) { - temp_ff.full = rfixed_const(1000); - pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ - pix_clk2.full = rfixed_div(pix_clk2, temp_ff); - temp_ff.full = rfixed_const(pixel_bytes2); - peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); + temp_ff.full = dfixed_const(1000); + pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */ + pix_clk2.full = dfixed_div(pix_clk2, temp_ff); + temp_ff.full = dfixed_const(pixel_bytes2); + peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff); } - mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); + mem_bw.full = dfixed_mul(mem_bw, min_mem_eff); if (peak_disp_bw.full >= mem_bw.full) { DRM_ERROR("You may not have enough display bandwidth for current mode\n" "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); @@ -2496,9 +2804,9 @@ void r100_bandwidth_update(struct radeon_device *rdev) mem_tras = ((temp >> 12) & 0xf) + 4; } /* convert to FF */ - trcd_ff.full = rfixed_const(mem_trcd); - trp_ff.full = rfixed_const(mem_trp); - tras_ff.full = rfixed_const(mem_tras); + trcd_ff.full = dfixed_const(mem_trcd); + trp_ff.full = dfixed_const(mem_trp); + tras_ff.full = dfixed_const(mem_tras); /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); @@ -2516,7 +2824,7 @@ void r100_bandwidth_update(struct radeon_device *rdev) /* extra cas latency stored in bits 23-25 0-4 clocks */ data = (temp >> 23) & 0x7; if (data < 5) - tcas_ff.full += rfixed_const(data); + tcas_ff.full += dfixed_const(data); } if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { @@ -2553,72 +2861,72 @@ void r100_bandwidth_update(struct radeon_device *rdev) if (rdev->flags & RADEON_IS_AGP) { fixed20_12 agpmode_ff; - agpmode_ff.full = rfixed_const(radeon_agpmode); - temp_ff.full = rfixed_const_666(16); - sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); + agpmode_ff.full = dfixed_const(radeon_agpmode); + temp_ff.full = dfixed_const_666(16); + sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff); } /* TODO PCIE lanes may affect this - agpmode == 16?? */ if (ASIC_IS_R300(rdev)) { - sclk_delay_ff.full = rfixed_const(250); + sclk_delay_ff.full = dfixed_const(250); } else { if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { if (rdev->mc.vram_is_ddr) - sclk_delay_ff.full = rfixed_const(41); + sclk_delay_ff.full = dfixed_const(41); else - sclk_delay_ff.full = rfixed_const(33); + sclk_delay_ff.full = dfixed_const(33); } else { if (rdev->mc.vram_width == 128) - sclk_delay_ff.full = rfixed_const(57); + sclk_delay_ff.full = dfixed_const(57); else - sclk_delay_ff.full = rfixed_const(41); + sclk_delay_ff.full = dfixed_const(41); } } - mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); + mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff); if (rdev->mc.vram_is_ddr) { if (rdev->mc.vram_width == 32) { - k1.full = rfixed_const(40); + k1.full = dfixed_const(40); c = 3; } else { - k1.full = rfixed_const(20); + k1.full = dfixed_const(20); c = 1; } } else { - k1.full = rfixed_const(40); + k1.full = dfixed_const(40); c = 3; } - temp_ff.full = rfixed_const(2); - mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); - temp_ff.full = rfixed_const(c); - mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); - temp_ff.full = rfixed_const(4); - mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); - mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); + temp_ff.full = dfixed_const(2); + mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff); + temp_ff.full = dfixed_const(c); + mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff); + temp_ff.full = dfixed_const(4); + mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff); + mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff); mc_latency_mclk.full += k1.full; - mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); - mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); + mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff); + mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff); /* HW cursor time assuming worst case of full size colour cursor. */ - temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); + temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); temp_ff.full += trcd_ff.full; if (temp_ff.full < tras_ff.full) temp_ff.full = tras_ff.full; - cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); + cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff); - temp_ff.full = rfixed_const(cur_size); - cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); + temp_ff.full = dfixed_const(cur_size); + cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff); /* Find the total latency for the display data. */ - disp_latency_overhead.full = rfixed_const(8); - disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); + disp_latency_overhead.full = dfixed_const(8); + disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff); mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; @@ -2646,16 +2954,16 @@ void r100_bandwidth_update(struct radeon_device *rdev) /* Find the drain rate of the display buffer. */ - temp_ff.full = rfixed_const((16/pixel_bytes1)); - disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); + temp_ff.full = dfixed_const((16/pixel_bytes1)); + disp_drain_rate.full = dfixed_div(pix_clk, temp_ff); /* Find the critical point of the display buffer. */ - crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); - crit_point_ff.full += rfixed_const_half(0); + crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency); + crit_point_ff.full += dfixed_const_half(0); - critical_point = rfixed_trunc(crit_point_ff); + critical_point = dfixed_trunc(crit_point_ff); if (rdev->disp_priority == 2) { critical_point = 0; @@ -2726,8 +3034,8 @@ void r100_bandwidth_update(struct radeon_device *rdev) /* Find the drain rate of the display buffer. */ - temp_ff.full = rfixed_const((16/pixel_bytes2)); - disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); + temp_ff.full = dfixed_const((16/pixel_bytes2)); + disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff); grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); @@ -2748,8 +3056,8 @@ void r100_bandwidth_update(struct radeon_device *rdev) critical_point2 = 0; else { temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; - temp_ff.full = rfixed_const(temp); - temp_ff.full = rfixed_mul(mclk_ff, temp_ff); + temp_ff.full = dfixed_const(temp); + temp_ff.full = dfixed_mul(mclk_ff, temp_ff); if (sclk_ff.full < temp_ff.full) temp_ff.full = sclk_ff.full; @@ -2757,15 +3065,15 @@ void r100_bandwidth_update(struct radeon_device *rdev) if (mode1) { temp_ff.full = read_return_rate.full - disp_drain_rate.full; - time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); + time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff); } else { time_disp1_drop_priority.full = 0; } crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; - crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); - crit_point_ff.full += rfixed_const_half(0); + crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2); + crit_point_ff.full += dfixed_const_half(0); - critical_point2 = rfixed_trunc(crit_point_ff); + critical_point2 = dfixed_trunc(crit_point_ff); if (rdev->disp_priority == 2) { critical_point2 = 0; @@ -2975,7 +3283,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) for (i = 0; i < track->num_cb; i++) { if (track->cb[i].robj == NULL) { - if (!(track->fastfill || track->color_channel_mask || + if (!(track->zb_cb_clear || track->color_channel_mask || track->blend_read_enable)) { continue; } @@ -3399,7 +3707,7 @@ static int r100_startup(struct radeon_device *rdev) /* Resume clock */ r100_clock_startup(rdev); /* Initialize GPU configuration (# pipes, ...) */ - r100_gpu_init(rdev); +// r100_gpu_init(rdev); /* Initialize GART (initialize after TTM so we can allocate * memory through TTM but finalize after TTM) */ r100_enable_bm(rdev); @@ -3436,7 +3744,7 @@ int r100_resume(struct radeon_device *rdev) /* Resume clock before doing reset */ r100_clock_startup(rdev); /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { + if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), RREG32(R_0007C0_CP_STAT)); @@ -3462,7 +3770,6 @@ int r100_suspend(struct radeon_device *rdev) void r100_fini(struct radeon_device *rdev) { - radeon_pm_fini(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -3505,7 +3812,7 @@ int r100_init(struct radeon_device *rdev) return r; } /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { + if (radeon_asic_reset(rdev)) { dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", RREG32(R_000E40_RBBM_STATUS), @@ -3518,8 +3825,6 @@ int r100_init(struct radeon_device *rdev) r100_errata(rdev); /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); - /* Initialize power management */ - radeon_pm_init(rdev); /* initialize AGP */ if (rdev->flags & RADEON_IS_AGP) { r = radeon_agp_init(rdev); |