diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/evergreen.c')
-rw-r--r-- | drivers/gpu/drm/radeon/evergreen.c | 259 |
1 files changed, 174 insertions, 85 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 1d603a3335d..f58254a3fb0 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -40,6 +40,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev); void evergreen_pcie_gen2_enable(struct radeon_device *rdev); +extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev, + int ring, u32 cp_int_cntl); void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) { @@ -82,6 +84,7 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset); + int i; /* Lock the graphics update lock */ tmp |= EVERGREEN_GRPH_UPDATE_LOCK; @@ -99,7 +102,11 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) (u32)crtc_base); /* Wait for update_pending to go high. */ - while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)); + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) + break; + udelay(1); + } DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); /* Unlock the lock, so double-buffering can take place inside vblank */ @@ -1306,18 +1313,20 @@ void evergreen_mc_program(struct radeon_device *rdev) */ void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) { + struct radeon_ring *ring = &rdev->ring[ib->fence->ring]; + /* set to DX10/11 mode */ - radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0)); - radeon_ring_write(rdev, 1); + radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); + radeon_ring_write(ring, 1); /* FIXME: implement */ - radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); - radeon_ring_write(rdev, + radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); + radeon_ring_write(ring, #ifdef __BIG_ENDIAN (2 << 0) | #endif (ib->gpu_addr & 0xFFFFFFFC)); - radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); - radeon_ring_write(rdev, ib->length_dw); + radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF); + radeon_ring_write(ring, ib->length_dw); } @@ -1355,71 +1364,73 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev) static int evergreen_cp_start(struct radeon_device *rdev) { + struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; int r, i; uint32_t cp_me; - r = radeon_ring_lock(rdev, 7); + r = radeon_ring_lock(rdev, ring, 7); if (r) { DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); return r; } - radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); - radeon_ring_write(rdev, 0x1); - radeon_ring_write(rdev, 0x0); - radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1); - radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); - radeon_ring_write(rdev, 0); - radeon_ring_write(rdev, 0); - radeon_ring_unlock_commit(rdev); + radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)); + radeon_ring_write(ring, 0x1); + radeon_ring_write(ring, 0x0); + radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1); + radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); + radeon_ring_write(ring, 0); + radeon_ring_write(ring, 0); + radeon_ring_unlock_commit(rdev, ring); cp_me = 0xff; WREG32(CP_ME_CNTL, cp_me); - r = radeon_ring_lock(rdev, evergreen_default_size + 19); + r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19); if (r) { DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); return r; } /* setup clear context state */ - radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); - radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); + radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); for (i = 0; i < evergreen_default_size; i++) - radeon_ring_write(rdev, evergreen_default_state[i]); + radeon_ring_write(ring, evergreen_default_state[i]); - radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); - radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE); + radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); + radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); /* set clear context state */ - radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0)); - radeon_ring_write(rdev, 0); + radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); + radeon_ring_write(ring, 0); /* SQ_VTX_BASE_VTX_LOC */ - radeon_ring_write(rdev, 0xc0026f00); - radeon_ring_write(rdev, 0x00000000); - radeon_ring_write(rdev, 0x00000000); - radeon_ring_write(rdev, 0x00000000); + radeon_ring_write(ring, 0xc0026f00); + radeon_ring_write(ring, 0x00000000); + radeon_ring_write(ring, 0x00000000); + radeon_ring_write(ring, 0x00000000); /* Clear consts */ - radeon_ring_write(rdev, 0xc0036f00); - radeon_ring_write(rdev, 0x00000bc4); - radeon_ring_write(rdev, 0xffffffff); - radeon_ring_write(rdev, 0xffffffff); - radeon_ring_write(rdev, 0xffffffff); + radeon_ring_write(ring, 0xc0036f00); + radeon_ring_write(ring, 0x00000bc4); + radeon_ring_write(ring, 0xffffffff); + radeon_ring_write(ring, 0xffffffff); + radeon_ring_write(ring, 0xffffffff); - radeon_ring_write(rdev, 0xc0026900); - radeon_ring_write(rdev, 0x00000316); - radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ - radeon_ring_write(rdev, 0x00000010); /* */ + radeon_ring_write(ring, 0xc0026900); + radeon_ring_write(ring, 0x00000316); + radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ + radeon_ring_write(ring, 0x00000010); /* */ - radeon_ring_unlock_commit(rdev); + radeon_ring_unlock_commit(rdev, ring); return 0; } int evergreen_cp_resume(struct radeon_device *rdev) { + struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; u32 tmp; u32 rb_bufsz; int r; @@ -1437,13 +1448,14 @@ int evergreen_cp_resume(struct radeon_device *rdev) RREG32(GRBM_SOFT_RESET); /* Set ring buffer size */ - rb_bufsz = drm_order(rdev->cp.ring_size / 8); + rb_bufsz = drm_order(ring->ring_size / 8); tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif WREG32(CP_RB_CNTL, tmp); - WREG32(CP_SEM_WAIT_TIMER, 0x4); + WREG32(CP_SEM_WAIT_TIMER, 0x0); + WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); /* Set the write pointer delay */ WREG32(CP_RB_WPTR_DELAY, 0); @@ -1451,8 +1463,8 @@ int evergreen_cp_resume(struct radeon_device *rdev) /* Initialize the ring buffer's read and write pointers */ WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); WREG32(CP_RB_RPTR_WR, 0); - rdev->cp.wptr = 0; - WREG32(CP_RB_WPTR, rdev->cp.wptr); + ring->wptr = 0; + WREG32(CP_RB_WPTR, ring->wptr); /* set the wb address wether it's enabled or not */ WREG32(CP_RB_RPTR_ADDR, @@ -1470,16 +1482,16 @@ int evergreen_cp_resume(struct radeon_device *rdev) mdelay(1); WREG32(CP_RB_CNTL, tmp); - WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8); + WREG32(CP_RB_BASE, ring->gpu_addr >> 8); WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); - rdev->cp.rptr = RREG32(CP_RB_RPTR); + ring->rptr = RREG32(CP_RB_RPTR); evergreen_cp_start(rdev); - rdev->cp.ready = true; - r = radeon_ring_test(rdev); + ring->ready = true; + r = radeon_ring_test(rdev, ring); if (r) { - rdev->cp.ready = false; + ring->ready = false; return r; } return 0; @@ -2348,7 +2360,7 @@ int evergreen_mc_init(struct radeon_device *rdev) return 0; } -bool evergreen_gpu_is_lockup(struct radeon_device *rdev) +bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) { u32 srbm_status; u32 grbm_status; @@ -2361,19 +2373,19 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev) grbm_status_se0 = RREG32(GRBM_STATUS_SE0); grbm_status_se1 = RREG32(GRBM_STATUS_SE1); if (!(grbm_status & GUI_ACTIVE)) { - r100_gpu_lockup_update(lockup, &rdev->cp); + r100_gpu_lockup_update(lockup, ring); return false; } /* force CP activities */ - r = radeon_ring_lock(rdev, 2); + r = radeon_ring_lock(rdev, ring, 2); if (!r) { /* PACKET2 NOP */ - radeon_ring_write(rdev, 0x80000000); - radeon_ring_write(rdev, 0x80000000); - radeon_ring_unlock_commit(rdev); + radeon_ring_write(ring, 0x80000000); + radeon_ring_write(ring, 0x80000000); + radeon_ring_unlock_commit(rdev, ring); } - rdev->cp.rptr = RREG32(CP_RB_RPTR); - return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp); + ring->rptr = RREG32(CP_RB_RPTR); + return r100_gpu_cp_is_lockup(rdev, lockup, ring); } static int evergreen_gpu_soft_reset(struct radeon_device *rdev) @@ -2465,7 +2477,13 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev) { u32 tmp; - WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + if (rdev->family >= CHIP_CAYMAN) { + cayman_cp_int_cntl_setup(rdev, 0, + CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); + cayman_cp_int_cntl_setup(rdev, 1, 0); + cayman_cp_int_cntl_setup(rdev, 2, 0); + } else + WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); WREG32(GRBM_INT_CNTL, 0); WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); @@ -2510,6 +2528,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev) int evergreen_irq_set(struct radeon_device *rdev) { u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; + u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0; u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; u32 grbm_int_cntl = 0; @@ -2534,11 +2553,28 @@ int evergreen_irq_set(struct radeon_device *rdev) hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; - if (rdev->irq.sw_int) { - DRM_DEBUG("evergreen_irq_set: sw int\n"); - cp_int_cntl |= RB_INT_ENABLE; - cp_int_cntl |= TIME_STAMP_INT_ENABLE; + if (rdev->family >= CHIP_CAYMAN) { + /* enable CP interrupts on all rings */ + if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { + DRM_DEBUG("evergreen_irq_set: sw int gfx\n"); + cp_int_cntl |= TIME_STAMP_INT_ENABLE; + } + if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) { + DRM_DEBUG("evergreen_irq_set: sw int cp1\n"); + cp_int_cntl1 |= TIME_STAMP_INT_ENABLE; + } + if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) { + DRM_DEBUG("evergreen_irq_set: sw int cp2\n"); + cp_int_cntl2 |= TIME_STAMP_INT_ENABLE; + } + } else { + if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { + DRM_DEBUG("evergreen_irq_set: sw int gfx\n"); + cp_int_cntl |= RB_INT_ENABLE; + cp_int_cntl |= TIME_STAMP_INT_ENABLE; + } } + if (rdev->irq.crtc_vblank_int[0] || rdev->irq.pflip[0]) { DRM_DEBUG("evergreen_irq_set: vblank 0\n"); @@ -2598,7 +2634,12 @@ int evergreen_irq_set(struct radeon_device *rdev) grbm_int_cntl |= GUI_IDLE_INT_ENABLE; } - WREG32(CP_INT_CNTL, cp_int_cntl); + if (rdev->family >= CHIP_CAYMAN) { + cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl); + cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1); + cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2); + } else + WREG32(CP_INT_CNTL, cp_int_cntl); WREG32(GRBM_INT_CNTL, grbm_int_cntl); WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); @@ -3013,11 +3054,24 @@ restart_ih: case 177: /* CP_INT in IB1 */ case 178: /* CP_INT in IB2 */ DRM_DEBUG("IH: CP int: 0x%08x\n", src_data); - radeon_fence_process(rdev); + radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); break; case 181: /* CP EOP event */ DRM_DEBUG("IH: CP EOP\n"); - radeon_fence_process(rdev); + if (rdev->family >= CHIP_CAYMAN) { + switch (src_data) { + case 0: + radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); + break; + case 1: + radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX); + break; + case 2: + radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX); + break; + } + } else + radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); break; case 233: /* GUI IDLE */ DRM_DEBUG("IH: GUI idle\n"); @@ -3047,6 +3101,7 @@ restart_ih: static int evergreen_startup(struct radeon_device *rdev) { + struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; int r; /* enable pcie gen2 link */ @@ -3101,6 +3156,12 @@ static int evergreen_startup(struct radeon_device *rdev) if (r) return r; + r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); + if (r) { + dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); + return r; + } + /* Enable IRQ */ r = r600_irq_init(rdev); if (r) { @@ -3110,7 +3171,9 @@ static int evergreen_startup(struct radeon_device *rdev) } evergreen_irq_set(rdev); - r = radeon_ring_init(rdev, rdev->cp.ring_size); + r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, + R600_CP_RB_RPTR, R600_CP_RB_WPTR, + 0, 0xfffff, RADEON_CP_PACKET2); if (r) return r; r = evergreen_cp_load_microcode(rdev); @@ -3120,6 +3183,23 @@ static int evergreen_startup(struct radeon_device *rdev) if (r) return r; + r = radeon_ib_pool_start(rdev); + if (r) + return r; + + r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX); + if (r) { + DRM_ERROR("radeon: failed testing IB (%d).\n", r); + rdev->accel_working = false; + return r; + } + + r = r600_audio_init(rdev); + if (r) { + DRM_ERROR("radeon: audio init failed\n"); + return r; + } + return 0; } @@ -3139,15 +3219,11 @@ int evergreen_resume(struct radeon_device *rdev) /* post card */ atom_asic_init(rdev->mode_info.atom_context); + rdev->accel_working = true; r = evergreen_startup(rdev); if (r) { DRM_ERROR("evergreen startup failed on resume\n"); - return r; - } - - r = r600_ib_test(rdev); - if (r) { - DRM_ERROR("radeon: failed testing IB (%d).\n", r); + rdev->accel_working = false; return r; } @@ -3157,13 +3233,17 @@ int evergreen_resume(struct radeon_device *rdev) int evergreen_suspend(struct radeon_device *rdev) { + struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; + + r600_audio_fini(rdev); /* FIXME: we should wait for ring to be empty */ + radeon_ib_pool_suspend(rdev); + r600_blit_suspend(rdev); r700_cp_stop(rdev); - rdev->cp.ready = false; + ring->ready = false; evergreen_irq_suspend(rdev); radeon_wb_disable(rdev); evergreen_pcie_gart_disable(rdev); - r600_blit_suspend(rdev); return 0; } @@ -3238,8 +3318,8 @@ int evergreen_init(struct radeon_device *rdev) if (r) return r; - rdev->cp.ring_obj = NULL; - r600_ring_init(rdev, 1024 * 1024); + rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; + r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); rdev->ih.ring_obj = NULL; r600_ih_ring_init(rdev, 64 * 1024); @@ -3248,43 +3328,52 @@ int evergreen_init(struct radeon_device *rdev) if (r) return r; + r = radeon_ib_pool_init(rdev); rdev->accel_working = true; + if (r) { + dev_err(rdev->dev, "IB initialization failed (%d).\n", r); + rdev->accel_working = false; + } + r = evergreen_startup(rdev); if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); r700_cp_fini(rdev); r600_irq_fini(rdev); radeon_wb_fini(rdev); + r100_ib_fini(rdev); radeon_irq_kms_fini(rdev); evergreen_pcie_gart_fini(rdev); rdev->accel_working = false; } - if (rdev->accel_working) { - r = radeon_ib_pool_init(rdev); - if (r) { - DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); - rdev->accel_working = false; - } - r = r600_ib_test(rdev); - if (r) { - DRM_ERROR("radeon: failed testing IB (%d).\n", r); - rdev->accel_working = false; + + /* Don't start up if the MC ucode is missing on BTC parts. + * The default clocks and voltages before the MC ucode + * is loaded are not suffient for advanced operations. + */ + if (ASIC_IS_DCE5(rdev)) { + if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) { + DRM_ERROR("radeon: MC ucode required for NI+.\n"); + return -EINVAL; } } + return 0; } void evergreen_fini(struct radeon_device *rdev) { + r600_audio_fini(rdev); r600_blit_fini(rdev); r700_cp_fini(rdev); r600_irq_fini(rdev); radeon_wb_fini(rdev); - radeon_ib_pool_fini(rdev); + r100_ib_fini(rdev); radeon_irq_kms_fini(rdev); evergreen_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); + radeon_semaphore_driver_fini(rdev); radeon_fence_driver_fini(rdev); radeon_agp_fini(rdev); radeon_bo_fini(rdev); |