diff options
Diffstat (limited to 'drivers/gpu/drm/radeon')
31 files changed, 741 insertions, 207 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f95d7fc1f5e..a2d478e8692 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1313,14 +1313,18 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) { radeon_wait_for_vblank(rdev, i); tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); } } else { tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) { radeon_wait_for_vblank(rdev, i); tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); } } /* wait for the next frame */ @@ -1345,6 +1349,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav blackout &= ~BLACKOUT_MODE_MASK; WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1); } + /* wait for the MC to settle */ + udelay(100); } void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) @@ -1378,11 +1384,15 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s if (ASIC_IS_DCE6(rdev)) { tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); } else { tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); } /* wait for the next frame */ frame_count = radeon_get_vblank_counter(rdev, i); @@ -2036,9 +2046,20 @@ static void evergreen_gpu_init(struct radeon_device *rdev) WREG32(HDP_ADDR_CONFIG, gb_addr_config); WREG32(DMA_TILING_CONFIG, gb_addr_config); - tmp = gb_addr_config & NUM_PIPES_MASK; - tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, - EVERGREEN_MAX_BACKENDS, disabled_rb_mask); + if ((rdev->config.evergreen.max_backends == 1) && + (rdev->flags & RADEON_IS_IGP)) { + if ((disabled_rb_mask & 3) == 1) { + /* RB0 disabled, RB1 enabled */ + tmp = 0x11111111; + } else { + /* RB1 disabled, RB0 enabled */ + tmp = 0x00000000; + } + } else { + tmp = gb_addr_config & NUM_PIPES_MASK; + tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, + EVERGREEN_MAX_BACKENDS, disabled_rb_mask); + } WREG32(GB_BACKEND_MAP, tmp); WREG32(CGTS_SYS_TCC_DISABLE, 0); @@ -2306,22 +2327,20 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin return radeon_ring_test_lockup(rdev, ring); } -static int evergreen_gpu_soft_reset(struct radeon_device *rdev) +static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev) { - struct evergreen_mc_save save; u32 grbm_reset = 0; if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) - return 0; + return; - dev_info(rdev->dev, "GPU softreset \n"); - dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", + dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n", RREG32(GRBM_STATUS)); - dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", + dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n", RREG32(GRBM_STATUS_SE0)); - dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", + dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n", RREG32(GRBM_STATUS_SE1)); - dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", + dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n", RREG32(SRBM_STATUS)); dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", RREG32(CP_STALLED_STAT1)); @@ -2331,10 +2350,7 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev) RREG32(CP_BUSY_STAT)); dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", RREG32(CP_STAT)); - evergreen_mc_stop(rdev, &save); - if (evergreen_mc_wait_for_idle(rdev)) { - dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); - } + /* Disable CP parsing/prefetching */ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); @@ -2358,15 +2374,14 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev) udelay(50); WREG32(GRBM_SOFT_RESET, 0); (void)RREG32(GRBM_SOFT_RESET); - /* Wait a little for things to settle down */ - udelay(50); - dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", + + dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n", RREG32(GRBM_STATUS)); - dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", + dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n", RREG32(GRBM_STATUS_SE0)); - dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", + dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n", RREG32(GRBM_STATUS_SE1)); - dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", + dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n", RREG32(SRBM_STATUS)); dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", RREG32(CP_STALLED_STAT1)); @@ -2376,13 +2391,71 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev) RREG32(CP_BUSY_STAT)); dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", RREG32(CP_STAT)); +} + +static void evergreen_gpu_soft_reset_dma(struct radeon_device *rdev) +{ + u32 tmp; + + if (RREG32(DMA_STATUS_REG) & DMA_IDLE) + return; + + dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", + RREG32(DMA_STATUS_REG)); + + /* Disable DMA */ + tmp = RREG32(DMA_RB_CNTL); + tmp &= ~DMA_RB_ENABLE; + WREG32(DMA_RB_CNTL, tmp); + + /* Reset dma */ + WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA); + RREG32(SRBM_SOFT_RESET); + udelay(50); + WREG32(SRBM_SOFT_RESET, 0); + + dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", + RREG32(DMA_STATUS_REG)); +} + +static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) +{ + struct evergreen_mc_save save; + + if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) + reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); + + if (RREG32(DMA_STATUS_REG) & DMA_IDLE) + reset_mask &= ~RADEON_RESET_DMA; + + if (reset_mask == 0) + return 0; + + dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); + + evergreen_mc_stop(rdev, &save); + if (evergreen_mc_wait_for_idle(rdev)) { + dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); + } + + if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) + evergreen_gpu_soft_reset_gfx(rdev); + + if (reset_mask & RADEON_RESET_DMA) + evergreen_gpu_soft_reset_dma(rdev); + + /* Wait a little for things to settle down */ + udelay(50); + evergreen_mc_resume(rdev, &save); return 0; } int evergreen_asic_reset(struct radeon_device *rdev) { - return evergreen_gpu_soft_reset(rdev); + return evergreen_gpu_soft_reset(rdev, (RADEON_RESET_GFX | + RADEON_RESET_COMPUTE | + RADEON_RESET_DMA)); } /* Interrupts */ @@ -3215,7 +3288,7 @@ void evergreen_dma_fence_ring_emit(struct radeon_device *rdev, radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0)); /* flush HDP */ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); - radeon_ring_write(ring, (0xf << 16) | HDP_MEM_COHERENCY_FLUSH_CNTL); + radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); radeon_ring_write(ring, 1); } diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 7a445666e71..ee4cff534f1 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -2909,14 +2909,14 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) return -EINVAL; } if (tiled) { - dst_offset = ib[idx+1]; + dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); p->idx += count + 7; } else { - dst_offset = ib[idx+1]; - dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32; + dst_offset = radeon_get_ib_value(p, idx+1); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; @@ -2954,12 +2954,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n"); return -EINVAL; } - dst_offset = ib[idx+1]; + dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; - dst2_offset = ib[idx+2]; + dst2_offset = radeon_get_ib_value(p, idx+2); dst2_offset <<= 8; - src_offset = ib[idx+8]; - src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; + src_offset = radeon_get_ib_value(p, idx+8); + src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n", src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); @@ -3014,12 +3014,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); return -EINVAL; } - dst_offset = ib[idx+1]; + dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; - dst2_offset = ib[idx+2]; + dst2_offset = radeon_get_ib_value(p, idx+2); dst2_offset <<= 8; - src_offset = ib[idx+8]; - src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; + src_offset = radeon_get_ib_value(p, idx+8); + src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); @@ -3046,22 +3046,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) /* detile bit */ if (idx_value & (1 << 31)) { /* tiled src, linear dst */ - src_offset = ib[idx+1]; + src_offset = radeon_get_ib_value(p, idx+1); src_offset <<= 8; ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); - dst_offset = ib[idx+7]; - dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; + dst_offset = radeon_get_ib_value(p, idx+7); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; } else { /* linear src, tiled dst */ - src_offset = ib[idx+7]; - src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; + src_offset = radeon_get_ib_value(p, idx+7); + src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; - dst_offset = ib[idx+1]; + dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); } @@ -3098,12 +3098,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); return -EINVAL; } - dst_offset = ib[idx+1]; + dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; - dst2_offset = ib[idx+2]; + dst2_offset = radeon_get_ib_value(p, idx+2); dst2_offset <<= 8; - src_offset = ib[idx+8]; - src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; + src_offset = radeon_get_ib_value(p, idx+8); + src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); @@ -3135,22 +3135,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) /* detile bit */ if (idx_value & (1 << 31)) { /* tiled src, linear dst */ - src_offset = ib[idx+1]; + src_offset = radeon_get_ib_value(p, idx+1); src_offset <<= 8; ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); - dst_offset = ib[idx+7]; - dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; + dst_offset = radeon_get_ib_value(p, idx+7); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; } else { /* linear src, tiled dst */ - src_offset = ib[idx+7]; - src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; + src_offset = radeon_get_ib_value(p, idx+7); + src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; - dst_offset = ib[idx+1]; + dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); } @@ -3176,10 +3176,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) switch (misc) { case 0: /* L2L, byte */ - src_offset = ib[idx+2]; - src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; - dst_offset = ib[idx+1]; - dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; + src_offset = radeon_get_ib_value(p, idx+2); + src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; + dst_offset = radeon_get_ib_value(p, idx+1); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n", src_offset + count, radeon_bo_size(src_reloc->robj)); @@ -3216,12 +3216,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n"); return -EINVAL; } - dst_offset = ib[idx+1]; - dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; - dst2_offset = ib[idx+2]; - dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32; - src_offset = ib[idx+3]; - src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; + dst_offset = radeon_get_ib_value(p, idx+1); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; + dst2_offset = radeon_get_ib_value(p, idx+2); + dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32; + src_offset = radeon_get_ib_value(p, idx+3); + src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n", src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); @@ -3251,10 +3251,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) } } else { /* L2L, dw */ - src_offset = ib[idx+2]; - src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; - dst_offset = ib[idx+1]; - dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; + src_offset = radeon_get_ib_value(p, idx+2); + src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; + dst_offset = radeon_get_ib_value(p, idx+1); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n", src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); @@ -3279,8 +3279,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n"); return -EINVAL; } - dst_offset = ib[idx+1]; - dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16; + dst_offset = radeon_get_ib_value(p, idx+1); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16; if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", dst_offset, radeon_bo_size(dst_reloc->robj)); diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index cb9baaac9e8..0bfd0e9e469 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -742,8 +742,9 @@ #define SOFT_RESET_ROM (1 << 14) #define SOFT_RESET_SEM (1 << 15) #define SOFT_RESET_VMC (1 << 17) +#define SOFT_RESET_DMA (1 << 20) #define SOFT_RESET_TST (1 << 21) -#define SOFT_RESET_REGBB (1 << 22) +#define SOFT_RESET_REGBB (1 << 22) #define SOFT_RESET_ORB (1 << 23) /* display watermarks */ @@ -2027,4 +2028,15 @@ /* cayman packet3 addition */ #define CAYMAN_PACKET3_DEALLOC_STATE 0x14 +/* DMA regs common on r6xx/r7xx/evergreen/ni */ +#define DMA_RB_CNTL 0xd000 +# define DMA_RB_ENABLE (1 << 0) +# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */ +# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */ +# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12) +# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */ +# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */ +#define DMA_STATUS_REG 0xd034 +# define DMA_IDLE (1 << 0) + #endif diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 7bdbcb00aaf..835992d8d06 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1216,7 +1216,7 @@ void cayman_dma_stop(struct radeon_device *rdev) int cayman_dma_resume(struct radeon_device *rdev) { struct radeon_ring *ring; - u32 rb_cntl, dma_cntl; + u32 rb_cntl, dma_cntl, ib_cntl; u32 rb_bufsz; u32 reg_offset, wb_offset; int i, r; @@ -1265,7 +1265,11 @@ int cayman_dma_resume(struct radeon_device *rdev) WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8); /* enable DMA IBs */ - WREG32(DMA_IB_CNTL + reg_offset, DMA_IB_ENABLE | CMD_VMID_FORCE); + ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE; +#ifdef __BIG_ENDIAN + ib_cntl |= DMA_IB_SWAP_ENABLE; +#endif + WREG32(DMA_IB_CNTL + reg_offset, ib_cntl); dma_cntl = RREG32(DMA_CNTL + reg_offset); dma_cntl &= ~CTXEMPTY_INT_ENABLE; @@ -1306,22 +1310,20 @@ void cayman_dma_fini(struct radeon_device *rdev) radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]); } -static int cayman_gpu_soft_reset(struct radeon_device *rdev) +static void cayman_gpu_soft_reset_gfx(struct radeon_device *rdev) { - struct evergreen_mc_save save; u32 grbm_reset = 0; if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) - return 0; + return; - dev_info(rdev->dev, "GPU softreset \n"); - dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", + dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n", RREG32(GRBM_STATUS)); - dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", + dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n", RREG32(GRBM_STATUS_SE0)); - dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", + dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n", RREG32(GRBM_STATUS_SE1)); - dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", + dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n", RREG32(SRBM_STATUS)); dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", RREG32(CP_STALLED_STAT1)); @@ -1331,19 +1333,7 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev) RREG32(CP_BUSY_STAT)); dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", RREG32(CP_STAT)); - dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n", - RREG32(0x14F8)); - dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n", - RREG32(0x14D8)); - dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", - RREG32(0x14FC)); - dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", - RREG32(0x14DC)); - evergreen_mc_stop(rdev, &save); - if (evergreen_mc_wait_for_idle(rdev)) { - dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); - } /* Disable CP parsing/prefetching */ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); @@ -1368,16 +1358,14 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev) udelay(50); WREG32(GRBM_SOFT_RESET, 0); (void)RREG32(GRBM_SOFT_RESET); - /* Wait a little for things to settle down */ - udelay(50); - dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", + dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n", RREG32(GRBM_STATUS)); - dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", + dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n", RREG32(GRBM_STATUS_SE0)); - dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", + dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n", RREG32(GRBM_STATUS_SE1)); - dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", + dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n", RREG32(SRBM_STATUS)); dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", RREG32(CP_STALLED_STAT1)); @@ -1387,13 +1375,87 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev) RREG32(CP_BUSY_STAT)); dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", RREG32(CP_STAT)); + +} + +static void cayman_gpu_soft_reset_dma(struct radeon_device *rdev) +{ + u32 tmp; + + if (RREG32(DMA_STATUS_REG) & DMA_IDLE) + return; + + dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", + RREG32(DMA_STATUS_REG)); + + /* dma0 */ + tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); + tmp &= ~DMA_RB_ENABLE; + WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp); + + /* dma1 */ + tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); + tmp &= ~DMA_RB_ENABLE; + WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp); + + /* Reset dma */ + WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1); + RREG32(SRBM_SOFT_RESET); + udelay(50); + WREG32(SRBM_SOFT_RESET, 0); + + dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", + RREG32(DMA_STATUS_REG)); + +} + +static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) +{ + struct evergreen_mc_save save; + + if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) + reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); + + if (RREG32(DMA_STATUS_REG) & DMA_IDLE) + reset_mask &= ~RADEON_RESET_DMA; + + if (reset_mask == 0) + return 0; + + dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); + + dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n", + RREG32(0x14F8)); + dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n", + RREG32(0x14D8)); + dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", + RREG32(0x14FC)); + dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", + RREG32(0x14DC)); + + evergreen_mc_stop(rdev, &save); + if (evergreen_mc_wait_for_idle(rdev)) { + dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); + } + + if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) + cayman_gpu_soft_reset_gfx(rdev); + + if (reset_mask & RADEON_RESET_DMA) + cayman_gpu_soft_reset_dma(rdev); + + /* Wait a little for things to settle down */ + udelay(50); + evergreen_mc_resume(rdev, &save); return 0; } int cayman_asic_reset(struct radeon_device *rdev) { - return cayman_gpu_soft_reset(rdev); + return cayman_gpu_soft_reset(rdev, (RADEON_RESET_GFX | + RADEON_RESET_COMPUTE | + RADEON_RESET_DMA)); } /** diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h index b93186b8ee4..48e5022ee92 100644 --- a/drivers/gpu/drm/radeon/nid.h +++ b/drivers/gpu/drm/radeon/nid.h @@ -65,7 +65,7 @@ #define SOFT_RESET_VMC (1 << 17) #define SOFT_RESET_DMA (1 << 20) #define SOFT_RESET_TST (1 << 21) -#define SOFT_RESET_REGBB (1 << 22) +#define SOFT_RESET_REGBB (1 << 22) #define SOFT_RESET_ORB (1 << 23) #define VM_CONTEXT0_REQUEST_RESPONSE 0x1470 @@ -675,4 +675,3 @@ #define DMA_PACKET_NOP 0xf #endif - diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 2aaf147969b..becb03e8b32 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1258,9 +1258,8 @@ void r600_vram_scratch_fini(struct radeon_device *rdev) * reset, it's up to the caller to determine if the GPU needs one. We * might add an helper function to check that. */ -static int r600_gpu_soft_reset(struct radeon_device *rdev) +static void r600_gpu_soft_reset_gfx(struct radeon_device *rdev) { - struct rv515_mc_save save; u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) | S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) | @@ -1280,14 +1279,13 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev) u32 tmp; if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) - return 0; + return; - dev_info(rdev->dev, "GPU softreset \n"); - dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", + dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n", RREG32(R_008010_GRBM_STATUS)); - dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", + dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n", RREG32(R_008014_GRBM_STATUS2)); - dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", + dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n", RREG32(R_000E50_SRBM_STATUS)); dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", RREG32(CP_STALLED_STAT1)); @@ -1297,12 +1295,10 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev) RREG32(CP_BUSY_STAT)); dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", RREG32(CP_STAT)); - rv515_mc_stop(rdev, &save); - if (r600_mc_wait_for_idle(rdev)) { - dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); - } + /* Disable CP parsing/prefetching */ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); + /* Check if any of the rendering block is busy and reset it */ if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { @@ -1332,13 +1328,12 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev) RREG32(R_008020_GRBM_SOFT_RESET); mdelay(15); WREG32(R_008020_GRBM_SOFT_RESET, 0); - /* Wait a little for things to settle down */ - mdelay(1); - dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", + + dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n", RREG32(R_008010_GRBM_STATUS)); - dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", + dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n", RREG32(R_008014_GRBM_STATUS2)); - dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", + dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n", RREG32(R_000E50_SRBM_STATUS)); dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", RREG32(CP_STALLED_STAT1)); @@ -1348,6 +1343,66 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev) RREG32(CP_BUSY_STAT)); dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", RREG32(CP_STAT)); + +} + +static void r600_gpu_soft_reset_dma(struct radeon_device *rdev) +{ + u32 tmp; + + if (RREG32(DMA_STATUS_REG) & DMA_IDLE) + return; + + dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", + RREG32(DMA_STATUS_REG)); + + /* Disable DMA */ + tmp = RREG32(DMA_RB_CNTL); + tmp &= ~DMA_RB_ENABLE; + WREG32(DMA_RB_CNTL, tmp); + + /* Reset dma */ + if (rdev->family >= CHIP_RV770) + WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA); + else + WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA); + RREG32(SRBM_SOFT_RESET); + udelay(50); + WREG32(SRBM_SOFT_RESET, 0); + + dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", + RREG32(DMA_STATUS_REG)); +} + +static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) +{ + struct rv515_mc_save save; + + if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) + reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); + + if (RREG32(DMA_STATUS_REG) & DMA_IDLE) + reset_mask &= ~RADEON_RESET_DMA; + + if (reset_mask == 0) + return 0; + + dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); + + rv515_mc_stop(rdev, &save); + if (r600_mc_wait_for_idle(rdev)) { + dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); + } + + if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) + r600_gpu_soft_reset_gfx(rdev); + + if (reset_mask & RADEON_RESET_DMA) + r600_gpu_soft_reset_dma(rdev); + + /* Wait a little for things to settle down */ + mdelay(1); + rv515_mc_resume(rdev, &save); return 0; } @@ -1395,7 +1450,9 @@ bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) int r600_asic_reset(struct radeon_device *rdev) { - return r600_gpu_soft_reset(rdev); + return r600_gpu_soft_reset(rdev, (RADEON_RESET_GFX | + RADEON_RESET_COMPUTE | + RADEON_RESET_DMA)); } u32 r6xx_remap_render_backend(struct radeon_device *rdev, @@ -1405,12 +1462,15 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev, u32 disabled_rb_mask) { u32 rendering_pipe_num, rb_num_width, req_rb_num; - u32 pipe_rb_ratio, pipe_rb_remain; + u32 pipe_rb_ratio, pipe_rb_remain, tmp; u32 data = 0, mask = 1 << (max_rb_num - 1); unsigned i, j; /* mask out the RBs that don't exist on that asic */ - disabled_rb_mask |= (0xff << max_rb_num) & 0xff; + tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff); + /* make sure at least one RB is available */ + if ((tmp & 0xff) != 0xff) + disabled_rb_mask = tmp; rendering_pipe_num = 1 << tiling_pipe_num; req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask); @@ -2256,7 +2316,7 @@ void r600_dma_stop(struct radeon_device *rdev) int r600_dma_resume(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; - u32 rb_cntl, dma_cntl; + u32 rb_cntl, dma_cntl, ib_cntl; u32 rb_bufsz; int r; @@ -2296,7 +2356,11 @@ int r600_dma_resume(struct radeon_device *rdev) WREG32(DMA_RB_BASE, ring->gpu_addr >> 8); /* enable DMA IBs */ - WREG32(DMA_IB_CNTL, DMA_IB_ENABLE); + ib_cntl = DMA_IB_ENABLE; +#ifdef __BIG_ENDIAN + ib_cntl |= DMA_IB_SWAP_ENABLE; +#endif + WREG32(DMA_IB_CNTL, ib_cntl); dma_cntl = RREG32(DMA_CNTL); dma_cntl &= ~CTXEMPTY_INT_ENABLE; @@ -2595,7 +2659,7 @@ int r600_copy_blit(struct radeon_device *rdev, * @num_gpu_pages: number of GPU pages to xfer * @fence: radeon fence object * - * Copy GPU paging using the DMA engine (r6xx-r7xx). + * Copy GPU paging using the DMA engine (r6xx). * Used by the radeon ttm implementation to move pages if * registered as the asic copy callback. */ @@ -2618,8 +2682,8 @@ int r600_copy_dma(struct radeon_device *rdev, } size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; - num_loops = DIV_ROUND_UP(size_in_dw, 0xffff); - r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8); + num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE); + r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8); if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); radeon_semaphore_free(rdev, &sem, NULL); @@ -2636,14 +2700,14 @@ int r600_copy_dma(struct radeon_device *rdev, for (i = 0; i < num_loops; i++) { cur_size_in_dw = size_in_dw; - if (cur_size_in_dw > 0xFFFF) - cur_size_in_dw = 0xFFFF; + if (cur_size_in_dw > 0xFFFE) + cur_size_in_dw = 0xFFFE; size_in_dw -= cur_size_in_dw; radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); radeon_ring_write(ring, dst_offset & 0xfffffffc); radeon_ring_write(ring, src_offset & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); - radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); + radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) | + (upper_32_bits(src_offset) & 0xff))); src_offset += cur_size_in_dw * 4; dst_offset += cur_size_in_dw * 4; } diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 9ea13d07cc5..9b2512bf1a4 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -2476,8 +2476,10 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error) kfree(parser->relocs); for (i = 0; i < parser->nchunks; i++) { kfree(parser->chunks[i].kdata); - kfree(parser->chunks[i].kpage[0]); - kfree(parser->chunks[i].kpage[1]); + if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) { + kfree(parser->chunks[i].kpage[0]); + kfree(parser->chunks[i].kpage[1]); + } } kfree(parser->chunks); kfree(parser->chunks_array); @@ -2561,16 +2563,16 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, struct radeon_cs_chunk *relocs_chunk; unsigned idx; + *cs_reloc = NULL; if (p->chunk_relocs_idx == -1) { DRM_ERROR("No relocation chunk !\n"); return -EINVAL; } - *cs_reloc = NULL; relocs_chunk = &p->chunks[p->chunk_relocs_idx]; idx = p->dma_reloc_idx; - if (idx >= relocs_chunk->length_dw) { + if (idx >= p->nrelocs) { DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", - idx, relocs_chunk->length_dw); + idx, p->nrelocs); return -EINVAL; } *cs_reloc = p->relocs_ptr[idx]; @@ -2621,14 +2623,14 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) return -EINVAL; } if (tiled) { - dst_offset = ib[idx+1]; + dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); p->idx += count + 5; } else { - dst_offset = ib[idx+1]; - dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32; + dst_offset = radeon_get_ib_value(p, idx+1); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; @@ -2656,37 +2658,50 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) /* detile bit */ if (idx_value & (1 << 31)) { /* tiled src, linear dst */ - src_offset = ib[idx+1]; + src_offset = radeon_get_ib_value(p, idx+1); src_offset <<= 8; ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); - dst_offset = ib[idx+5]; - dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; + dst_offset = radeon_get_ib_value(p, idx+5); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; } else { /* linear src, tiled dst */ - src_offset = ib[idx+5]; - src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; + src_offset = radeon_get_ib_value(p, idx+5); + src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; - dst_offset = ib[idx+1]; + dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); } p->idx += 7; } else { - src_offset = ib[idx+2]; - src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; - dst_offset = ib[idx+1]; - dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; - - ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); - ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; - ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; - p->idx += 5; + if (p->family >= CHIP_RV770) { + src_offset = radeon_get_ib_value(p, idx+2); + src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; + dst_offset = radeon_get_ib_value(p, idx+1); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; + + ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); + ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); + ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; + ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; + p->idx += 5; + } else { + src_offset = radeon_get_ib_value(p, idx+2); + src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; + dst_offset = radeon_get_ib_value(p, idx+1); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16; + + ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); + ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); + ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; + ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16; + p->idx += 4; + } } if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n", @@ -2709,8 +2724,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) DRM_ERROR("bad DMA_PACKET_WRITE\n"); return -EINVAL; } - dst_offset = ib[idx+1]; - dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16; + dst_offset = radeon_get_ib_value(p, idx+1); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16; if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 9b9422c4403..a08f657329a 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -132,6 +132,11 @@ extern int radeon_lockup_timeout; #define RADEON_VA_RESERVED_SIZE (8 << 20) #define RADEON_IB_VM_MAX_SIZE (64 << 10) +/* reset flags */ +#define RADEON_RESET_GFX (1 << 0) +#define RADEON_RESET_COMPUTE (1 << 1) +#define RADEON_RESET_DMA (1 << 2) + /* * Errata workarounds. */ @@ -319,7 +324,6 @@ struct radeon_bo { struct list_head list; /* Protected by tbo.reserved */ u32 placements[3]; - u32 busy_placements[3]; struct ttm_placement placement; struct ttm_buffer_object tbo; struct ttm_bo_kmap_obj kmap; @@ -649,6 +653,8 @@ struct radeon_ring { u32 ptr_reg_mask; u32 nop; u32 idx; + u64 last_semaphore_signal_addr; + u64 last_semaphore_wait_addr; }; /* diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 596bcbe80ed..0b202c07fe5 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -1140,9 +1140,9 @@ static struct radeon_asic rv770_asic = { .copy = { .blit = &r600_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, - .dma = &r600_copy_dma, + .dma = &rv770_copy_dma, .dma_ring_index = R600_RING_TYPE_DMA_INDEX, - .copy = &r600_copy_dma, + .copy = &rv770_copy_dma, .copy_ring_index = R600_RING_TYPE_DMA_INDEX, }, .surface = { @@ -1445,7 +1445,7 @@ static struct radeon_asic cayman_asic = { .vm = { .init = &cayman_vm_init, .fini = &cayman_vm_fini, - .pt_ring_index = R600_RING_TYPE_DMA_INDEX, + .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, .set_page = &cayman_vm_set_page, }, .ring = { @@ -1572,7 +1572,7 @@ static struct radeon_asic trinity_asic = { .vm = { .init = &cayman_vm_init, .fini = &cayman_vm_fini, - .pt_ring_index = R600_RING_TYPE_DMA_INDEX, + .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, .set_page = &cayman_vm_set_page, }, .ring = { @@ -1699,7 +1699,7 @@ static struct radeon_asic si_asic = { .vm = { .init = &si_vm_init, .fini = &si_vm_fini, - .pt_ring_index = R600_RING_TYPE_DMA_INDEX, + .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, .set_page = &si_vm_set_page, }, .ring = { diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 5f4882cc215..15d70e61307 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -403,6 +403,10 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); void r700_cp_stop(struct radeon_device *rdev); void r700_cp_fini(struct radeon_device *rdev); +int rv770_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct radeon_fence **fence); /* * evergreen diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 4af89126e22..3e403bdda58 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -1548,6 +1548,9 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) of_machine_is_compatible("PowerBook6,7")) { /* ibook */ rdev->mode_info.connector_table = CT_IBOOK; + } else if (of_machine_is_compatible("PowerMac3,5")) { + /* PowerMac G4 Silver radeon 7500 */ + rdev->mode_info.connector_table = CT_MAC_G4_SILVER; } else if (of_machine_is_compatible("PowerMac4,4")) { /* emac */ rdev->mode_info.connector_table = CT_EMAC; @@ -2212,6 +2215,54 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) CONNECTOR_OBJECT_ID_SVIDEO, &hpd); break; + case CT_MAC_G4_SILVER: + DRM_INFO("Connector Table: %d (mac g4 silver)\n", + rdev->mode_info.connector_table); + /* DVI-I - tv dac, int tmds */ + ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); + hpd.hpd = RADEON_HPD_1; /* ??? */ + radeon_add_legacy_encoder(dev, + radeon_get_encoder_enum(dev, + ATOM_DEVICE_DFP1_SUPPORT, + 0), + ATOM_DEVICE_DFP1_SUPPORT); + radeon_add_legacy_encoder(dev, + radeon_get_encoder_enum(dev, + ATOM_DEVICE_CRT2_SUPPORT, + 2), + ATOM_DEVICE_CRT2_SUPPORT); + radeon_add_legacy_connector(dev, 0, + ATOM_DEVICE_DFP1_SUPPORT | + ATOM_DEVICE_CRT2_SUPPORT, + DRM_MODE_CONNECTOR_DVII, &ddc_i2c, + CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, + &hpd); + /* VGA - primary dac */ + ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); + hpd.hpd = RADEON_HPD_NONE; + radeon_add_legacy_encoder(dev, + radeon_get_encoder_enum(dev, + ATOM_DEVICE_CRT1_SUPPORT, + 1), + ATOM_DEVICE_CRT1_SUPPORT); + radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT, + DRM_MODE_CONNECTOR_VGA, &ddc_i2c, + CONNECTOR_OBJECT_ID_VGA, + &hpd); + /* TV - TV DAC */ + ddc_i2c.valid = false; + hpd.hpd = RADEON_HPD_NONE; + radeon_add_legacy_encoder(dev, + radeon_get_encoder_enum(dev, + ATOM_DEVICE_TV1_SUPPORT, + 2), + ATOM_DEVICE_TV1_SUPPORT); + radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, + DRM_MODE_CONNECTOR_SVIDEO, + &ddc_i2c, + CONNECTOR_OBJECT_ID_SVIDEO, + &hpd); + break; default: DRM_INFO("Connector table: %d (invalid)\n", rdev->mode_info.connector_table); @@ -2419,6 +2470,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) 1), ATOM_DEVICE_CRT1_SUPPORT); } + /* RV100 board with external TDMS bit mis-set. + * Actually uses internal TMDS, clear the bit. + */ + if (dev->pdev->device == 0x5159 && + dev->pdev->subsystem_vendor == 0x1014 && + dev->pdev->subsystem_device == 0x029A) { + tmp &= ~(1 << 4); + } if ((tmp >> 4) & 0x1) { devices |= ATOM_DEVICE_DFP2_SUPPORT; radeon_add_legacy_encoder(dev, diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 47bf162ab9c..2399f25ec03 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -741,7 +741,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force) ret = connector_status_disconnected; if (radeon_connector->ddc_bus) - dret = radeon_ddc_probe(radeon_connector); + dret = radeon_ddc_probe(radeon_connector, false); if (dret) { radeon_connector->detected_by_load = false; if (radeon_connector->edid) { @@ -947,7 +947,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) return connector->status; if (radeon_connector->ddc_bus) - dret = radeon_ddc_probe(radeon_connector); + dret = radeon_ddc_probe(radeon_connector, false); if (dret) { radeon_connector->detected_by_load = false; if (radeon_connector->edid) { @@ -1401,7 +1401,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force) if (encoder) { /* setup ddc on the bridge */ radeon_atom_ext_encoder_setup_ddc(encoder); - if (radeon_ddc_probe(radeon_connector)) /* try DDC */ + /* bridge chips are always aux */ + if (radeon_ddc_probe(radeon_connector, true)) /* try DDC */ ret = connector_status_connected; else if (radeon_connector->dac_load_detect) { /* try load detection */ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; @@ -1419,7 +1420,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force) if (radeon_dp_getdpcd(radeon_connector)) ret = connector_status_connected; } else { - if (radeon_ddc_probe(radeon_connector)) + /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */ + if (radeon_ddc_probe(radeon_connector, false)) ret = connector_status_connected; } } diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 396baba0141..5407459e56d 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -279,13 +279,15 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) p->chunks[p->chunk_ib_idx].length_dw); return -EINVAL; } - if ((p->rdev->flags & RADEON_IS_AGP)) { + if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) { p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL || p->chunks[p->chunk_ib_idx].kpage[1] == NULL) { - kfree(p->chunks[i].kpage[0]); - kfree(p->chunks[i].kpage[1]); + kfree(p->chunks[p->chunk_ib_idx].kpage[0]); + kfree(p->chunks[p->chunk_ib_idx].kpage[1]); + p->chunks[p->chunk_ib_idx].kpage[0] = NULL; + p->chunks[p->chunk_ib_idx].kpage[1] = NULL; return -ENOMEM; } } @@ -583,7 +585,8 @@ static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx) struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; int i; int size = PAGE_SIZE; - bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true; + bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ? + false : true; for (i = ibc->last_copied_page + 1; i < pg_idx; i++) { if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)), diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index ad6df625e8b..0d67674b64b 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -241,7 +241,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, y = 0; } - if (ASIC_IS_AVIVO(rdev)) { + /* fixed on DCE6 and newer */ + if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) { int i = 0; struct drm_crtc *crtc_p; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index cd756262924..0d6562bb0c9 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -429,7 +429,8 @@ bool radeon_card_posted(struct radeon_device *rdev) { uint32_t reg; - if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) + if (efi_enabled(EFI_BOOT) && + rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) return false; /* first check CRTCs */ @@ -897,6 +898,25 @@ static void radeon_check_arguments(struct radeon_device *rdev) } /** + * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is + * needed for waking up. + * + * @pdev: pci dev pointer + */ +static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev) +{ + + /* 6600m in a macbook pro */ + if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE && + pdev->subsystem_device == 0x00e2) { + printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n"); + return true; + } + + return false; +} + +/** * radeon_switcheroo_set_state - set switcheroo state * * @pdev: pci dev pointer @@ -910,10 +930,19 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero struct drm_device *dev = pci_get_drvdata(pdev); pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; if (state == VGA_SWITCHEROO_ON) { + unsigned d3_delay = dev->pdev->d3_delay; + printk(KERN_INFO "radeon: switched on\n"); /* don't suspend or resume card normally */ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + + if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev)) + dev->pdev->d3_delay = 20; + radeon_resume_kms(dev); + + dev->pdev->d3_delay = d3_delay; + dev->switch_power_state = DRM_SWITCH_POWER_ON; drm_kms_helper_poll_enable(dev); } else { diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 310c0e5254b..05c96fa0b05 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -699,10 +699,15 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) if (radeon_connector->router.ddc_valid) radeon_router_select_ddc_port(radeon_connector); - if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || - (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) || - (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) != - ENCODER_OBJECT_ID_NONE)) { + if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) != + ENCODER_OBJECT_ID_NONE) { + struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; + + if (dig->dp_i2c_bus) + radeon_connector->edid = drm_get_edid(&radeon_connector->base, + &dig->dp_i2c_bus->adapter); + } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || + (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || @@ -1110,14 +1115,16 @@ radeon_user_framebuffer_create(struct drm_device *dev, } radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); - if (radeon_fb == NULL) + if (radeon_fb == NULL) { + drm_gem_object_unreference_unlocked(obj); return ERR_PTR(-ENOMEM); + } ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj); if (ret) { kfree(radeon_fb); drm_gem_object_unreference_unlocked(obj); - return NULL; + return ERR_PTR(ret); } return &radeon_fb->base; diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index ff7593498a7..d9bf96ee299 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -69,9 +69,10 @@ * 2.26.0 - r600-eg: fix htile size computation * 2.27.0 - r600-SI: Add CS ioctl support for async DMA * 2.28.0 - r600-eg: Add MEM_WRITE packet support + * 2.29.0 - R500 FP16 color clear registers */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 28 +#define KMS_DRIVER_MINOR 29 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); @@ -306,8 +307,8 @@ static int radeon_kick_out_firmware_fb(struct pci_dev *pdev) return 0; } -static int __devinit -radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +static int radeon_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) { int ret; diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index c5bddd630eb..fc60b74ee30 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -39,7 +39,7 @@ extern u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap); * radeon_ddc_probe * */ -bool radeon_ddc_probe(struct radeon_connector *radeon_connector) +bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux) { u8 out = 0x0; u8 buf[8]; @@ -63,7 +63,13 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector) if (radeon_connector->router.ddc_valid) radeon_router_select_ddc_port(radeon_connector); - ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); + if (use_aux) { + struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; + ret = i2c_transfer(&dig->dp_i2c_bus->adapter, msgs, 2); + } else { + ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); + } + if (ret != 2) /* Couldn't find an accessible DDC on this connector */ return false; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index f5ba2241dac..62cd512f5c8 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -640,6 +640,14 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc enum drm_connector_status found = connector_status_disconnected; bool color = true; + /* just don't bother on RN50 those chip are often connected to remoting + * console hw and often we get failure to load detect those. So to make + * everyone happy report the encoder as always connected. + */ + if (ASIC_IS_RN50(rdev)) { + return connector_status_connected; + } + /* save the regs we need */ vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL); crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index d818b503b42..4003f5a68c0 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -209,7 +209,8 @@ enum radeon_connector_table { CT_RN50_POWER, CT_MAC_X800, CT_MAC_G5_9600, - CT_SAM440EP + CT_SAM440EP, + CT_MAC_G4_SILVER }; enum radeon_dvo_chip { @@ -558,7 +559,7 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c, u8 val); extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector); extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); -extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); +extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux); extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 883c95d8d90..d3aface2d12 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -84,6 +84,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) rbo->placement.fpfn = 0; rbo->placement.lpfn = 0; rbo->placement.placement = rbo->placements; + rbo->placement.busy_placement = rbo->placements; if (domain & RADEON_GEM_DOMAIN_VRAM) rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; @@ -104,14 +105,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) if (!c) rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; rbo->placement.num_placement = c; - - c = 0; - rbo->placement.busy_placement = rbo->busy_placements; - if (rbo->rdev->flags & RADEON_IS_AGP) { - rbo->busy_placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT; - } else { - rbo->busy_placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; - } rbo->placement.num_busy_placement = c; } @@ -357,6 +350,7 @@ int radeon_bo_list_validate(struct list_head *head) { struct radeon_bo_list *lobj; struct radeon_bo *bo; + u32 domain; int r; r = ttm_eu_reserve_buffers(head); @@ -366,9 +360,17 @@ int radeon_bo_list_validate(struct list_head *head) list_for_each_entry(lobj, head, tv.head) { bo = lobj->bo; if (!bo->pin_count) { + domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; + + retry: + radeon_ttm_placement_from_domain(bo, domain); r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); if (unlikely(r)) { + if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) { + domain |= RADEON_GEM_DOMAIN_GTT; + goto retry; + } return r; } } diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c index e09521858f6..26c23bb651c 100644 --- a/drivers/gpu/drm/radeon/radeon_prime.c +++ b/drivers/gpu/drm/radeon/radeon_prime.c @@ -194,6 +194,7 @@ struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev, bo = dma_buf->priv; if (bo->gem_base.dev == dev) { drm_gem_object_reference(&bo->gem_base); + dma_buf_put(dma_buf); return &bo->gem_base; } } diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index ebd69562ef6..cd72062d5a9 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -377,6 +377,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi { int r; + /* make sure we aren't trying to allocate more space than there is on the ring */ + if (ndw > (ring->ring_size / 4)) + return -ENOMEM; /* Align requested size with padding so unlock_commit can * pad safely */ ndw = (ndw + ring->align_mask) & ~ring->align_mask; @@ -770,22 +773,30 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data) int ridx = *(int*)node->info_ent->data; struct radeon_ring *ring = &rdev->ring[ridx]; unsigned count, i, j; + u32 tmp; radeon_ring_free_size(rdev, ring); count = (ring->ring_size / 4) - ring->ring_free_dw; - seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg)); - seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg)); + tmp = RREG32(ring->wptr_reg) >> ring->ptr_reg_shift; + seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp); + tmp = RREG32(ring->rptr_reg) >> ring->ptr_reg_shift; + seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp); if (ring->rptr_save_reg) { seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg, RREG32(ring->rptr_save_reg)); } - seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr); - seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr); + seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr); + seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr); + seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr); + seq_printf(m, "last semaphore wait addr : 0x%016llx\n", ring->last_semaphore_wait_addr); seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); seq_printf(m, "%u dwords in ring\n", count); - i = ring->rptr; - for (j = 0; j <= count; j++) { - seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); + /* print 8 dw before current rptr as often it's the last executed + * packet that is the root issue + */ + i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; + for (j = 0; j <= (count + 32); j++) { + seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]); i = (i + 1) & ring->ptr_mask; } return 0; @@ -794,11 +805,15 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data) static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX; static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX; static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX; +static int radeon_ring_type_dma1_index = R600_RING_TYPE_DMA_INDEX; +static int radeon_ring_type_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX; static struct drm_info_list radeon_debugfs_ring_info_list[] = { {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index}, {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index}, {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index}, + {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma1_index}, + {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma2_index}, }; static int radeon_debugfs_sa_info(struct seq_file *m, void *data) diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c index 97f3ece81cd..8dcc20f53d7 100644 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c @@ -95,6 +95,10 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, /* we assume caller has already allocated space on waiters ring */ radeon_semaphore_emit_wait(rdev, waiter, semaphore); + /* for debugging lockup only, used by sysfs debug files */ + rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr; + rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr; + return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 1d8ff2f850b..93f760e27a9 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -38,6 +38,7 @@ #include <drm/radeon_drm.h> #include <linux/seq_file.h> #include <linux/slab.h> +#include <linux/swiotlb.h> #include "radeon_reg.h" #include "radeon.h" diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman index 0f656b111c1..a072fa8c46b 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/cayman +++ b/drivers/gpu/drm/radeon/reg_srcs/cayman @@ -1,5 +1,6 @@ cayman 0x9400 0x0000802C GRBM_GFX_INDEX +0x00008040 WAIT_UNTIL 0x000084FC CP_STRMOUT_CNTL 0x000085F0 CP_COHER_CNTL 0x000085F4 CP_COHER_SIZE diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515 index 911a8fbd32b..78d5e99d759 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rv515 +++ b/drivers/gpu/drm/radeon/reg_srcs/rv515 @@ -324,6 +324,8 @@ rv515 0x6d40 0x46AC US_OUT_FMT_2 0x46B0 US_OUT_FMT_3 0x46B4 US_W_FMT +0x46C0 RB3D_COLOR_CLEAR_VALUE_AR +0x46C4 RB3D_COLOR_CLEAR_VALUE_GB 0x4BC0 FG_FOG_BLEND 0x4BC4 FG_FOG_FACTOR 0x4BC8 FG_FOG_COLOR_R diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 2bb6d0e84b3..435ed355136 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -336,6 +336,8 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save) WREG32(R600_CITF_CNTL, blackout); } } + /* wait for the MC to settle */ + udelay(100); } void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 87c979c4f72..1b2444f4d8f 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -887,6 +887,80 @@ static int rv770_mc_init(struct radeon_device *rdev) return 0; } +/** + * rv770_copy_dma - copy pages using the DMA engine + * + * @rdev: radeon_device pointer + * @src_offset: src GPU address + * @dst_offset: dst GPU address + * @num_gpu_pages: number of GPU pages to xfer + * @fence: radeon fence object + * + * Copy GPU paging using the DMA engine (r7xx). + * Used by the radeon ttm implementation to move pages if + * registered as the asic copy callback. + */ +int rv770_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct radeon_fence **fence) +{ + struct radeon_semaphore *sem = NULL; + int ring_index = rdev->asic->copy.dma_ring_index; + struct radeon_ring *ring = &rdev->ring[ring_index]; + u32 size_in_dw, cur_size_in_dw; + int i, num_loops; + int r = 0; + + r = radeon_semaphore_create(rdev, &sem); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + return r; + } + + size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; + num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF); + r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + radeon_semaphore_free(rdev, &sem, NULL); + return r; + } + + if (radeon_fence_need_sync(*fence, ring->idx)) { + radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, + ring->idx); + radeon_fence_note_sync(*fence, ring->idx); + } else { + radeon_semaphore_free(rdev, &sem, NULL); + } + + for (i = 0; i < num_loops; i++) { + cur_size_in_dw = size_in_dw; + if (cur_size_in_dw > 0xFFFF) + cur_size_in_dw = 0xFFFF; + size_in_dw -= cur_size_in_dw; + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); + radeon_ring_write(ring, dst_offset & 0xfffffffc); + radeon_ring_write(ring, src_offset & 0xfffffffc); + radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); + radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); + src_offset += cur_size_in_dw * 4; + dst_offset += cur_size_in_dw * 4; + } + + r = radeon_fence_emit(rdev, fence, ring->idx); + if (r) { + radeon_ring_unlock_undo(rdev, ring); + return r; + } + + radeon_ring_unlock_commit(rdev, ring); + radeon_semaphore_free(rdev, &sem, *fence); + + return r; +} + static int rv770_startup(struct radeon_device *rdev) { struct radeon_ring *ring; diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index ef683653f0b..ae8b48205a6 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -2126,15 +2126,13 @@ bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) return radeon_ring_test_lockup(rdev, ring); } -static int si_gpu_soft_reset(struct radeon_device *rdev) +static void si_gpu_soft_reset_gfx(struct radeon_device *rdev) { - struct evergreen_mc_save save; u32 grbm_reset = 0; if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) - return 0; + return; - dev_info(rdev->dev, "GPU softreset \n"); dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", RREG32(GRBM_STATUS)); dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n", @@ -2145,10 +2143,7 @@ static int si_gpu_soft_reset(struct radeon_device *rdev) RREG32(GRBM_STATUS_SE1)); dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", RREG32(SRBM_STATUS)); - evergreen_mc_stop(rdev, &save); - if (radeon_mc_wait_for_idle(rdev)) { - dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); - } + /* Disable CP parsing/prefetching */ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); @@ -2173,8 +2168,7 @@ static int si_gpu_soft_reset(struct radeon_device *rdev) udelay(50); WREG32(GRBM_SOFT_RESET, 0); (void)RREG32(GRBM_SOFT_RESET); - /* Wait a little for things to settle down */ - udelay(50); + dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", RREG32(GRBM_STATUS)); dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n", @@ -2185,13 +2179,81 @@ static int si_gpu_soft_reset(struct radeon_device *rdev) RREG32(GRBM_STATUS_SE1)); dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", RREG32(SRBM_STATUS)); +} + +static void si_gpu_soft_reset_dma(struct radeon_device *rdev) +{ + u32 tmp; + + if (RREG32(DMA_STATUS_REG) & DMA_IDLE) + return; + + dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n", + RREG32(DMA_STATUS_REG)); + + /* dma0 */ + tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); + tmp &= ~DMA_RB_ENABLE; + WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp); + + /* dma1 */ + tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); + tmp &= ~DMA_RB_ENABLE; + WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp); + + /* Reset dma */ + WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1); + RREG32(SRBM_SOFT_RESET); + udelay(50); + WREG32(SRBM_SOFT_RESET, 0); + + dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n", + RREG32(DMA_STATUS_REG)); +} + +static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) +{ + struct evergreen_mc_save save; + + if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) + reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); + + if (RREG32(DMA_STATUS_REG) & DMA_IDLE) + reset_mask &= ~RADEON_RESET_DMA; + + if (reset_mask == 0) + return 0; + + dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); + + dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", + RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); + dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", + RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); + + evergreen_mc_stop(rdev, &save); + if (radeon_mc_wait_for_idle(rdev)) { + dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); + } + + if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) + si_gpu_soft_reset_gfx(rdev); + + if (reset_mask & RADEON_RESET_DMA) + si_gpu_soft_reset_dma(rdev); + + /* Wait a little for things to settle down */ + udelay(50); + evergreen_mc_resume(rdev, &save); return 0; } int si_asic_reset(struct radeon_device *rdev) { - return si_gpu_soft_reset(rdev); + return si_gpu_soft_reset(rdev, (RADEON_RESET_GFX | + RADEON_RESET_COMPUTE | + RADEON_RESET_DMA)); } /* MC */ diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 62b46215d42..c056aae814f 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -62,6 +62,22 @@ #define SRBM_STATUS 0xE50 +#define SRBM_SOFT_RESET 0x0E60 +#define SOFT_RESET_BIF (1 << 1) +#define SOFT_RESET_DC (1 << 5) +#define SOFT_RESET_DMA1 (1 << 6) +#define SOFT_RESET_GRBM (1 << 8) +#define SOFT_RESET_HDP (1 << 9) +#define SOFT_RESET_IH (1 << 10) +#define SOFT_RESET_MC (1 << 11) +#define SOFT_RESET_ROM (1 << 14) +#define SOFT_RESET_SEM (1 << 15) +#define SOFT_RESET_VMC (1 << 17) +#define SOFT_RESET_DMA (1 << 20) +#define SOFT_RESET_TST (1 << 21) +#define SOFT_RESET_REGBB (1 << 22) +#define SOFT_RESET_ORB (1 << 23) + #define CC_SYS_RB_BACKEND_DISABLE 0xe80 #define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84 @@ -1013,6 +1029,8 @@ # define DATA_SWAP_ENABLE (1 << 3) # define FENCE_SWAP_ENABLE (1 << 4) # define CTXEMPTY_INT_ENABLE (1 << 28) +#define DMA_STATUS_REG 0xd034 +# define DMA_IDLE (1 << 0) #define DMA_TILING_CONFIG 0xd0b8 #define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \ |