diff options
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/drm_crtc.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_fb_helper.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_fence.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_sgdma.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv04_crtc.c | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_crtc.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/evergreen.c | 42 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/ni.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r100.c | 22 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r200.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r600.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_asic.h | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_clocks.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_combios.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_connectors.c | 37 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_display.c | 21 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_encoders.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_ttm.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 3 |
21 files changed, 171 insertions, 74 deletions
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 82db1850666..fe738f05309 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -499,6 +499,7 @@ void drm_connector_cleanup(struct drm_connector *connector) mutex_lock(&dev->mode_config.mutex); drm_mode_object_put(dev, &connector->base); list_del(&connector->head); + dev->mode_config.num_connector--; mutex_unlock(&dev->mode_config.mutex); } EXPORT_SYMBOL(drm_connector_cleanup); @@ -529,6 +530,7 @@ void drm_encoder_cleanup(struct drm_encoder *encoder) mutex_lock(&dev->mode_config.mutex); drm_mode_object_put(dev, &encoder->base); list_del(&encoder->head); + dev->mode_config.num_encoder--; mutex_unlock(&dev->mode_config.mutex); } EXPORT_SYMBOL(drm_encoder_cleanup); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 802b61ac313..f7c6854eb4d 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -256,7 +256,6 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed, { printk(KERN_ERR "panic occurred, switching back to text console\n"); return drm_fb_helper_force_kernel_mode(); - return 0; } EXPORT_SYMBOL(drm_fb_helper_panic); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ee1d701317f..56a8554d903 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -878,7 +878,7 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv, int pp_reg, lvds_reg; u32 val; enum pipe panel_pipe = PIPE_A; - bool locked = locked; + bool locked = true; if (HAS_PCH_SPLIT(dev_priv->dev)) { pp_reg = PCH_PP_CONTROL; @@ -7238,8 +7238,6 @@ static void intel_setup_outputs(struct drm_device *dev) intel_encoder_clones(dev, encoder->clone_mask); } - intel_panel_setup_backlight(dev); - /* disable all the possible outputs/crtcs before entering KMS mode */ drm_helper_disable_unused_functions(dev); } diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 8d02d875376..c919cfc8f2f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -530,7 +530,8 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) nouveau_gpuobj_ref(NULL, &obj); if (ret) return ret; - } else { + } else + if (USE_SEMA(dev)) { /* map fence bo into channel's vm */ ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm, &chan->fence.vma); diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index c444cadbf84..2706cb3d871 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -37,8 +37,11 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, return -ENOMEM; nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL); - if (!nvbe->ttm_alloced) + if (!nvbe->ttm_alloced) { + kfree(nvbe->pages); + nvbe->pages = NULL; return -ENOMEM; + } nvbe->nr_pages = 0; while (num_pages--) { @@ -126,7 +129,7 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); - dma_offset += NV_CTXDMA_PAGE_SIZE; + offset_l += NV_CTXDMA_PAGE_SIZE; } } diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index 118261d4927..5e45398a9e2 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -781,11 +781,20 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; - struct drm_framebuffer *drm_fb = nv_crtc->base.fb; - struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); + struct drm_framebuffer *drm_fb; + struct nouveau_framebuffer *fb; int arb_burst, arb_lwm; int ret; + NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); + + /* no fb bound */ + if (!atomic && !crtc->fb) { + NV_DEBUG_KMS(dev, "No FB bound\n"); + return 0; + } + + /* If atomic, we want to switch to the fb we were passed, so * now we update pointers to do that. (We don't pin; just * assume we're already pinned and update the base address.) @@ -794,6 +803,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, drm_fb = passed_fb; fb = nouveau_framebuffer(passed_fb); } else { + drm_fb = crtc->fb; + fb = nouveau_framebuffer(crtc->fb); /* If not atomic, we can go ahead and pin, and unpin the * old fb we were passed. */ diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 46ad59ea218..5d989073ba6 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -519,12 +519,18 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, struct drm_device *dev = nv_crtc->base.dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *evo = nv50_display(dev)->master; - struct drm_framebuffer *drm_fb = nv_crtc->base.fb; - struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); + struct drm_framebuffer *drm_fb; + struct nouveau_framebuffer *fb; int ret; NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); + /* no fb bound */ + if (!atomic && !crtc->fb) { + NV_DEBUG_KMS(dev, "No FB bound\n"); + return 0; + } + /* If atomic, we want to switch to the fb we were passed, so * now we update pointers to do that. (We don't pin; just * assume we're already pinned and update the base address.) @@ -533,6 +539,8 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, drm_fb = passed_fb; fb = nouveau_framebuffer(passed_fb); } else { + drm_fb = crtc->fb; + fb = nouveau_framebuffer(crtc->fb); /* If not atomic, we can go ahead and pin, and unpin the * old fb we were passed. */ diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index fb5fa089886..e8a746712b5 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -41,6 +41,31 @@ static void evergreen_gpu_init(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev); static void evergreen_pcie_gen2_enable(struct radeon_device *rdev); +void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) +{ + u16 ctl, v; + int cap, err; + + cap = pci_pcie_cap(rdev->pdev); + if (!cap) + return; + + err = pci_read_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, &ctl); + if (err) + return; + + v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12; + + /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it + * to avoid hangs or perfomance issues + */ + if ((v == 0) || (v == 6) || (v == 7)) { + ctl &= ~PCI_EXP_DEVCTL_READRQ; + ctl |= (2 << 12); + pci_write_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, ctl); + } +} + void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) { /* enable the pflip int */ @@ -1357,6 +1382,7 @@ int evergreen_cp_resume(struct radeon_device *rdev) SOFT_RESET_PA | SOFT_RESET_SH | SOFT_RESET_VGT | + SOFT_RESET_SPI | SOFT_RESET_SX)); RREG32(GRBM_SOFT_RESET); mdelay(15); @@ -1378,7 +1404,8 @@ int evergreen_cp_resume(struct radeon_device *rdev) /* Initialize the ring buffer's read and write pointers */ WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); WREG32(CP_RB_RPTR_WR, 0); - WREG32(CP_RB_WPTR, 0); + rdev->cp.wptr = 0; + WREG32(CP_RB_WPTR, rdev->cp.wptr); /* set the wb address wether it's enabled or not */ WREG32(CP_RB_RPTR_ADDR, @@ -1400,7 +1427,6 @@ int evergreen_cp_resume(struct radeon_device *rdev) WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); rdev->cp.rptr = RREG32(CP_RB_RPTR); - rdev->cp.wptr = RREG32(CP_RB_WPTR); evergreen_cp_start(rdev); rdev->cp.ready = true; @@ -1862,6 +1888,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev) WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); + evergreen_fix_pci_max_read_req_size(rdev); + cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2; cc_gc_shader_pipe_config |= @@ -3143,21 +3171,23 @@ int evergreen_suspend(struct radeon_device *rdev) } int evergreen_copy_blit(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_pages, struct radeon_fence *fence) + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_gpu_pages, + struct radeon_fence *fence) { int r; mutex_lock(&rdev->r600_blit.mutex); rdev->r600_blit.vb_ib = NULL; - r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); + r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE); if (r) { if (rdev->r600_blit.vb_ib) radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); mutex_unlock(&rdev->r600_blit.mutex); return r; } - evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); + evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE); evergreen_blit_done_copy(rdev, fence); mutex_unlock(&rdev->r600_blit.mutex); return 0; diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 44c4750f451..99fbd793c08 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -39,6 +39,7 @@ extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev); extern void evergreen_mc_program(struct radeon_device *rdev); extern void evergreen_irq_suspend(struct radeon_device *rdev); extern int evergreen_mc_init(struct radeon_device *rdev); +extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); #define EVERGREEN_PFP_UCODE_SIZE 1120 #define EVERGREEN_PM4_UCODE_SIZE 1376 @@ -669,6 +670,8 @@ static void cayman_gpu_init(struct radeon_device *rdev) WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); + evergreen_fix_pci_max_read_req_size(rdev); + mc_shared_chmap = RREG32(MC_SHARED_CHMAP); mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); @@ -1159,6 +1162,7 @@ int cayman_cp_resume(struct radeon_device *rdev) SOFT_RESET_PA | SOFT_RESET_SH | SOFT_RESET_VGT | + SOFT_RESET_SPI | SOFT_RESET_SX)); RREG32(GRBM_SOFT_RESET); mdelay(15); @@ -1183,7 +1187,8 @@ int cayman_cp_resume(struct radeon_device *rdev) /* Initialize the ring buffer's read and write pointers */ WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); - WREG32(CP_RB0_WPTR, 0); + rdev->cp.wptr = 0; + WREG32(CP_RB0_WPTR, rdev->cp.wptr); /* set the wb address wether it's enabled or not */ WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); @@ -1203,7 +1208,6 @@ int cayman_cp_resume(struct radeon_device *rdev) WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8); rdev->cp.rptr = RREG32(CP_RB0_RPTR); - rdev->cp.wptr = RREG32(CP_RB0_WPTR); /* ring1 - compute only */ /* Set ring buffer size */ @@ -1216,7 +1220,8 @@ int cayman_cp_resume(struct radeon_device *rdev) /* Initialize the ring buffer's read and write pointers */ WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); - WREG32(CP_RB1_WPTR, 0); + rdev->cp1.wptr = 0; + WREG32(CP_RB1_WPTR, rdev->cp1.wptr); /* set the wb address wether it's enabled or not */ WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); @@ -1228,7 +1233,6 @@ int cayman_cp_resume(struct radeon_device *rdev) WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8); rdev->cp1.rptr = RREG32(CP_RB1_RPTR); - rdev->cp1.wptr = RREG32(CP_RB1_WPTR); /* ring2 - compute only */ /* Set ring buffer size */ @@ -1241,7 +1245,8 @@ int cayman_cp_resume(struct radeon_device *rdev) /* Initialize the ring buffer's read and write pointers */ WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); - WREG32(CP_RB2_WPTR, 0); + rdev->cp2.wptr = 0; + WREG32(CP_RB2_WPTR, rdev->cp2.wptr); /* set the wb address wether it's enabled or not */ WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); @@ -1253,7 +1258,6 @@ int cayman_cp_resume(struct radeon_device *rdev) WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8); rdev->cp2.rptr = RREG32(CP_RB2_RPTR); - rdev->cp2.wptr = RREG32(CP_RB2_WPTR); /* start the rings */ cayman_cp_start(rdev); diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index f2204cb1ccd..7fcdbbbf297 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -721,11 +721,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev, int r100_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, - unsigned num_pages, + unsigned num_gpu_pages, struct radeon_fence *fence) { uint32_t cur_pages; - uint32_t stride_bytes = PAGE_SIZE; + uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; uint32_t pitch; uint32_t stride_pixels; unsigned ndw; @@ -737,7 +737,7 @@ int r100_copy_blit(struct radeon_device *rdev, /* radeon pitch is /64 */ pitch = stride_bytes / 64; stride_pixels = stride_bytes / 4; - num_loops = DIV_ROUND_UP(num_pages, 8191); + num_loops = DIV_ROUND_UP(num_gpu_pages, 8191); /* Ask for enough room for blit + flush + fence */ ndw = 64 + (10 * num_loops); @@ -746,12 +746,12 @@ int r100_copy_blit(struct radeon_device *rdev, DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); return -EINVAL; } - while (num_pages > 0) { - cur_pages = num_pages; + while (num_gpu_pages > 0) { + cur_pages = num_gpu_pages; if (cur_pages > 8191) { cur_pages = 8191; } - num_pages -= cur_pages; + num_gpu_pages -= cur_pages; /* pages are in Y direction - height page width in X direction - width */ @@ -773,8 +773,8 @@ int r100_copy_blit(struct radeon_device *rdev, radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); radeon_ring_write(rdev, 0); radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); - radeon_ring_write(rdev, num_pages); - radeon_ring_write(rdev, num_pages); + radeon_ring_write(rdev, num_gpu_pages); + radeon_ring_write(rdev, num_gpu_pages); radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); } radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); @@ -990,7 +990,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) /* Force read & write ptr to 0 */ WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); WREG32(RADEON_CP_RB_RPTR_WR, 0); - WREG32(RADEON_CP_RB_WPTR, 0); + rdev->cp.wptr = 0; + WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); /* set the wb address whether it's enabled or not */ WREG32(R_00070C_CP_RB_RPTR_ADDR, @@ -1007,9 +1008,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) WREG32(RADEON_CP_RB_CNTL, tmp); udelay(10); rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); - rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR); - /* protect against crazy HW on resume */ - rdev->cp.wptr &= rdev->cp.ptr_mask; /* Set cp mode to bus mastering & enable cp*/ WREG32(RADEON_CP_CSQ_MODE, REG_SET(RADEON_INDIRECT2_START, indirect2_start) | diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index f2405830041..a1f3ba063c2 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c @@ -84,7 +84,7 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0) int r200_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, - unsigned num_pages, + unsigned num_gpu_pages, struct radeon_fence *fence) { uint32_t size; @@ -93,7 +93,7 @@ int r200_copy_dma(struct radeon_device *rdev, int r = 0; /* radeon pitch is /64 */ - size = num_pages << PAGE_SHIFT; + size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT; num_loops = DIV_ROUND_UP(size, 0x1FFFFF); r = radeon_ring_lock(rdev, num_loops * 4 + 64); if (r) { diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index aa5571b73aa..720dd99163f 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2209,7 +2209,8 @@ int r600_cp_resume(struct radeon_device *rdev) /* Initialize the ring buffer's read and write pointers */ WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); WREG32(CP_RB_RPTR_WR, 0); - WREG32(CP_RB_WPTR, 0); + rdev->cp.wptr = 0; + WREG32(CP_RB_WPTR, rdev->cp.wptr); /* set the wb address whether it's enabled or not */ WREG32(CP_RB_RPTR_ADDR, @@ -2231,7 +2232,6 @@ int r600_cp_resume(struct radeon_device *rdev) WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); rdev->cp.rptr = RREG32(CP_RB_RPTR); - rdev->cp.wptr = RREG32(CP_RB_WPTR); r600_cp_start(rdev); rdev->cp.ready = true; @@ -2353,21 +2353,23 @@ void r600_fence_ring_emit(struct radeon_device *rdev, } int r600_copy_blit(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_pages, struct radeon_fence *fence) + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_gpu_pages, + struct radeon_fence *fence) { int r; mutex_lock(&rdev->r600_blit.mutex); rdev->r600_blit.vb_ib = NULL; - r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); + r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE); if (r) { if (rdev->r600_blit.vb_ib) radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); mutex_unlock(&rdev->r600_blit.mutex); return r; } - r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); + r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE); r600_blit_done_copy(rdev, fence); mutex_unlock(&rdev->r600_blit.mutex); return 0; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 32807baf55e..c1e056b35b2 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -322,6 +322,7 @@ union radeon_gart_table { #define RADEON_GPU_PAGE_SIZE 4096 #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) +#define RADEON_GPU_PAGE_SHIFT 12 struct radeon_gart { dma_addr_t table_addr; @@ -914,17 +915,17 @@ struct radeon_asic { int (*copy_blit)(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, - unsigned num_pages, + unsigned num_gpu_pages, struct radeon_fence *fence); int (*copy_dma)(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, - unsigned num_pages, + unsigned num_gpu_pages, struct radeon_fence *fence); int (*copy)(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, - unsigned num_pages, + unsigned num_gpu_pages, struct radeon_fence *fence); uint32_t (*get_engine_clock)(struct radeon_device *rdev); void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 3d7a0d7c6a9..3dedaa07aac 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -75,7 +75,7 @@ uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg); int r100_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, - unsigned num_pages, + unsigned num_gpu_pages, struct radeon_fence *fence); int r100_set_surface_reg(struct radeon_device *rdev, int reg, uint32_t tiling_flags, uint32_t pitch, @@ -143,7 +143,7 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc); extern int r200_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, - unsigned num_pages, + unsigned num_gpu_pages, struct radeon_fence *fence); void r200_set_safe_registers(struct radeon_device *rdev); @@ -311,7 +311,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); int r600_ring_test(struct radeon_device *rdev); int r600_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, - unsigned num_pages, struct radeon_fence *fence); + unsigned num_gpu_pages, struct radeon_fence *fence); void r600_hpd_init(struct radeon_device *rdev); void r600_hpd_fini(struct radeon_device *rdev); bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); @@ -403,7 +403,7 @@ void evergreen_bandwidth_update(struct radeon_device *rdev); void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); int evergreen_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, - unsigned num_pages, struct radeon_fence *fence); + unsigned num_gpu_pages, struct radeon_fence *fence); void evergreen_hpd_init(struct radeon_device *rdev); void evergreen_hpd_fini(struct radeon_device *rdev); bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index dcd0863e31a..b6e18c8db9f 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c @@ -219,6 +219,9 @@ void radeon_get_clock_info(struct drm_device *dev) } else { DRM_INFO("Using generic clock info\n"); + /* may need to be per card */ + rdev->clock.max_pixel_clock = 35000; + if (rdev->flags & RADEON_IS_IGP) { p1pll->reference_freq = 1432; p2pll->reference_freq = 1432; diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index e0138b674ac..63675241c7f 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -3298,6 +3298,14 @@ void radeon_combios_asic_init(struct drm_device *dev) rdev->pdev->subsystem_device == 0x30a4) return; + /* quirk for rs4xx Compaq Presario V5245EU laptop to make it resume + * - it hangs on resume inside the dynclk 1 table. + */ + if (rdev->family == CHIP_RS480 && + rdev->pdev->subsystem_vendor == 0x103c && + rdev->pdev->subsystem_device == 0x30ae) + return; + /* DYN CLK 1 */ table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); if (table) diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 4f0c1ecac72..c4b8741dbf5 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1297,12 +1297,33 @@ radeon_dp_detect(struct drm_connector *connector, bool force) if (!radeon_dig_connector->edp_on) atombios_set_edp_panel_power(connector, ATOM_TRANSMITTER_ACTION_POWER_OFF); - } else { - /* need to setup ddc on the bridge */ - if (radeon_connector_encoder_is_dp_bridge(connector)) { + } else if (radeon_connector_encoder_is_dp_bridge(connector)) { + /* DP bridges are always DP */ + radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; + /* get the DPCD from the bridge */ + radeon_dp_getdpcd(radeon_connector); + + if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) + ret = connector_status_connected; + else { + /* need to setup ddc on the bridge */ if (encoder) radeon_atom_ext_encoder_setup_ddc(encoder); + if (radeon_ddc_probe(radeon_connector, + radeon_connector->requires_extended_probe)) + ret = connector_status_connected; + } + + if ((ret == connector_status_disconnected) && + radeon_connector->dac_load_detect) { + struct drm_encoder *encoder = radeon_best_single_encoder(connector); + struct drm_encoder_helper_funcs *encoder_funcs; + if (encoder) { + encoder_funcs = encoder->helper_private; + ret = encoder_funcs->detect(encoder, connector); + } } + } else { radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { ret = connector_status_connected; @@ -1318,16 +1339,6 @@ radeon_dp_detect(struct drm_connector *connector, bool force) ret = connector_status_connected; } } - - if ((ret == connector_status_disconnected) && - radeon_connector->dac_load_detect) { - struct drm_encoder *encoder = radeon_best_single_encoder(connector); - struct drm_encoder_helper_funcs *encoder_funcs; - if (encoder) { - encoder_funcs = encoder->helper_private; - ret = encoder_funcs->detect(encoder, connector); - } - } } radeon_connector_update_scratch_regs(connector, ret); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 1a858944e4f..6adb3e58aff 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -473,8 +473,8 @@ pflip_cleanup: spin_lock_irqsave(&dev->event_lock, flags); radeon_crtc->unpin_work = NULL; unlock_free: - drm_gem_object_unreference_unlocked(old_radeon_fb->obj); spin_unlock_irqrestore(&dev->event_lock, flags); + drm_gem_object_unreference_unlocked(old_radeon_fb->obj); radeon_fence_unref(&work->fence); kfree(work); @@ -707,16 +707,21 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) radeon_router_select_ddc_port(radeon_connector); if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || - (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { + (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) || + radeon_connector_encoder_is_dp_bridge(&radeon_connector->base)) { struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; + if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus) - radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); - } - if (!radeon_connector->ddc_bus) - return -1; - if (!radeon_connector->edid) { - radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); + radeon_connector->edid = drm_get_edid(&radeon_connector->base, + &dig->dp_i2c_bus->adapter); + else if (radeon_connector->ddc_bus && !radeon_connector->edid) + radeon_connector->edid = drm_get_edid(&radeon_connector->base, + &radeon_connector->ddc_bus->adapter); + } else { + if (radeon_connector->ddc_bus && !radeon_connector->edid) + radeon_connector->edid = drm_get_edid(&radeon_connector->base, + &radeon_connector->ddc_bus->adapter); } if (!radeon_connector->edid) { diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 319d85d7e75..13690f3eb4a 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -1507,7 +1507,14 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) switch (mode) { case DRM_MODE_DPMS_ON: args.ucAction = ATOM_ENABLE; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + /* workaround for DVOOutputControl on some RS690 systems */ + if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) { + u32 reg = RREG32(RADEON_BIOS_3_SCRATCH); + WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE); + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + WREG32(RADEON_BIOS_3_SCRATCH, reg); + } else + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { args.ucAction = ATOM_LCD_BLON; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 9b86fb0e412..0b5468bfaf5 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -277,7 +277,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, DRM_ERROR("Trying to move memory with CP turned off.\n"); return -EINVAL; } - r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); + + BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); + + r = radeon_copy(rdev, old_start, new_start, + new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ + fence); /* FIXME: handle copy error */ r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, evict, no_wait_reserve, no_wait_gpu, new_mem); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a4d38d85909..ef06194c5aa 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -394,7 +394,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { if (bo->ttm == NULL) { - ret = ttm_bo_add_ttm(bo, false); + bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); + ret = ttm_bo_add_ttm(bo, zero); if (ret) goto out_err; } |