From f92e93eb5f4d56d73215f089580d53597bacd468 Mon Sep 17 00:00:00 2001 From: Jerome Glisse Date: Mon, 22 Jun 2009 18:15:58 +0200 Subject: drm/radeon: fix radeon kms framebuffer device smem.start is a physical address which kernel can remap to access video memory of the fb buffer. We now pin the fb buffer into vram by doing so we are loosing vram but fbdev need to be reworked to allow change in framebuffer address. Signed-off-by: Jerome Glisse Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_object.c | 30 ------------------------------ 1 file changed, 30 deletions(-) (limited to 'drivers/gpu/drm/radeon/radeon_object.c') diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 983e8df5e00..bac0d06c52a 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -223,7 +223,6 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain, { uint32_t flags; uint32_t tmp; - void *fbptr; int r; flags = radeon_object_flags_from_domain(domain); @@ -242,10 +241,6 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain, DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); return r; } - if (robj->rdev->fbdev_robj == robj) { - mutex_lock(&robj->rdev->fbdev_info->lock); - radeon_object_kunmap(robj); - } tmp = robj->tobj.mem.placement; ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; @@ -261,23 +256,12 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain, DRM_ERROR("radeon: failed to pin object.\n"); } radeon_object_unreserve(robj); - if (robj->rdev->fbdev_robj == robj) { - if (!r) { - r = radeon_object_kmap(robj, &fbptr); - } - if (!r) { - robj->rdev->fbdev_info->screen_base = fbptr; - robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr; - } - mutex_unlock(&robj->rdev->fbdev_info->lock); - } return r; } void radeon_object_unpin(struct radeon_object *robj) { uint32_t flags; - void *fbptr; int r; spin_lock(&robj->tobj.lock); @@ -297,10 +281,6 @@ void radeon_object_unpin(struct radeon_object *robj) DRM_ERROR("radeon: failed to reserve object for unpinning it.\n"); return; } - if (robj->rdev->fbdev_robj == robj) { - mutex_lock(&robj->rdev->fbdev_info->lock); - radeon_object_kunmap(robj); - } flags = robj->tobj.mem.placement; robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT; r = ttm_buffer_object_validate(&robj->tobj, @@ -310,16 +290,6 @@ void radeon_object_unpin(struct radeon_object *robj) DRM_ERROR("radeon: failed to unpin buffer.\n"); } radeon_object_unreserve(robj); - if (robj->rdev->fbdev_robj == robj) { - if (!r) { - r = radeon_object_kmap(robj, &fbptr); - } - if (!r) { - robj->rdev->fbdev_info->screen_base = fbptr; - robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr; - } - mutex_unlock(&robj->rdev->fbdev_info->lock); - } } int radeon_object_wait(struct radeon_object *robj) -- cgit v1.2.3-70-g09d2 From e024e11070a0a0dc7163ce1ec2da354a638bdbed Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 24 Jun 2009 09:48:08 +1000 Subject: drm/radeon/kms: add initial colortiling support. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds new set/get tiling interfaces where the pitch and macro/micro tiling enables can be set. Along with a flag to decide if this object should have a surface when mapped. The only thing we need to allocate with a mapped surface should be the frontbuffer. Note rotate scanout shouldn't require one, and back/depth shouldn't either, though mesa needs some fixes. It fixes the TTM interfaces along Thomas's suggestions, and I've tested the surface stealing code with two X servers and not seen any lockdep issues. I've stopped tiling the fbcon frontbuffer, as I don't see there being any advantage other than testing, I've left the testing commands in there, just flip the fb_tiled to true in radeon_fb.c Open: Can we integrate endian swapping in with this? Future features: texture tiling - need to relocate texture registers TXOFFSET* with tiling info. This also merges Michel's cleanup surfaces regs at init time patch even though it makes sense on its own, this patch really relies on it. Some PowerMac firmwares set up a tiling surface at the beginning of VRAM which messes us up otherwise. that patch is: Signed-off-by: Michel Dänzer Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/atombios_crtc.c | 11 ++- drivers/gpu/drm/radeon/r100.c | 79 ++++++++++++++++- drivers/gpu/drm/radeon/r300.c | 51 ++++++++++- drivers/gpu/drm/radeon/r300_reg.h | 4 +- drivers/gpu/drm/radeon/radeon.h | 32 ++++++- drivers/gpu/drm/radeon/radeon_asic.h | 19 ++++ drivers/gpu/drm/radeon/radeon_device.c | 2 + drivers/gpu/drm/radeon/radeon_fb.c | 12 ++- drivers/gpu/drm/radeon/radeon_gem.c | 41 +++++++++ drivers/gpu/drm/radeon/radeon_kms.c | 2 + drivers/gpu/drm/radeon/radeon_legacy_crtc.c | 15 ++-- drivers/gpu/drm/radeon/radeon_object.c | 130 ++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/radeon_ttm.c | 2 + drivers/gpu/drm/ttm/ttm_bo.c | 5 +- drivers/gpu/drm/ttm/ttm_bo_vm.c | 3 + include/drm/radeon_drm.h | 23 ++++- include/drm/ttm/ttm_bo_driver.h | 15 ++++ 17 files changed, 427 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm/radeon/radeon_object.c') diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index e64a199b5ee..eac26cdb5da 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -327,7 +327,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct drm_gem_object *obj; struct drm_radeon_gem_object *obj_priv; uint64_t fb_location; - uint32_t fb_format, fb_pitch_pixels; + uint32_t fb_format, fb_pitch_pixels, tiling_flags; if (!crtc->fb) return -EINVAL; @@ -364,7 +364,14 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, return -EINVAL; } - /* TODO tiling */ + radeon_object_get_tiling_flags(obj->driver_private, + &tiling_flags, NULL); + if (tiling_flags & RADEON_TILING_MACRO) + fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE; + + if (tiling_flags & RADEON_TILING_MICRO) + fb_format |= AVIVO_D1GRPH_TILED; + if (radeon_crtc->crtc_id == 0) WREG32(AVIVO_D1VGA_CONTROL, 0); else diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 0d05909f03f..69bd7cb5997 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -909,6 +909,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, unsigned idx; bool onereg; int r; + u32 tile_flags = 0; ib = p->ib->ptr; ib_chunk = &p->chunks[p->chunk_ib_idx]; @@ -942,7 +943,20 @@ static int r100_packet0_check(struct radeon_cs_parser *p, } tmp = ib_chunk->kdata[idx] & 0x003fffff; tmp += (((u32)reloc->lobj.gpu_offset) >> 10); - ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp; + + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= RADEON_DST_TILE_MACRO; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { + if (reg == RADEON_SRC_PITCH_OFFSET) { + DRM_ERROR("Cannot src blit from microtiled surface\n"); + r100_cs_dump_packet(p, pkt); + return -EINVAL; + } + tile_flags |= RADEON_DST_TILE_MICRO; + } + + tmp |= tile_flags; + ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp; break; case RADEON_RB3D_DEPTHOFFSET: case RADEON_RB3D_COLOROFFSET: @@ -987,6 +1001,25 @@ static int r100_packet0_check(struct radeon_cs_parser *p, } ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); break; + case R300_RB3D_COLORPITCH0: + case RADEON_RB3D_COLORPITCH: + r = r100_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + idx, reg); + r100_cs_dump_packet(p, pkt); + return r; + } + + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= RADEON_COLOR_TILE_ENABLE; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; + + tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); + tmp |= tile_flags; + ib[idx] = tmp; + break; default: /* FIXME: we don't want to allow anyothers packet */ break; @@ -1707,3 +1740,47 @@ int r100_debugfs_mc_info_init(struct radeon_device *rdev) return 0; #endif } + +int r100_set_surface_reg(struct radeon_device *rdev, int reg, + uint32_t tiling_flags, uint32_t pitch, + uint32_t offset, uint32_t obj_size) +{ + int surf_index = reg * 16; + int flags = 0; + + /* r100/r200 divide by 16 */ + if (rdev->family < CHIP_R300) + flags = pitch / 16; + else + flags = pitch / 8; + + if (rdev->family <= CHIP_RS200) { + if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) + == (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) + flags |= RADEON_SURF_TILE_COLOR_BOTH; + if (tiling_flags & RADEON_TILING_MACRO) + flags |= RADEON_SURF_TILE_COLOR_MACRO; + } else if (rdev->family <= CHIP_RV280) { + if (tiling_flags & (RADEON_TILING_MACRO)) + flags |= R200_SURF_TILE_COLOR_MACRO; + if (tiling_flags & RADEON_TILING_MICRO) + flags |= R200_SURF_TILE_COLOR_MICRO; + } else { + if (tiling_flags & RADEON_TILING_MACRO) + flags |= R300_SURF_TILE_MACRO; + if (tiling_flags & RADEON_TILING_MICRO) + flags |= R300_SURF_TILE_MICRO; + } + + DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); + WREG32(RADEON_SURFACE0_INFO + surf_index, flags); + WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); + WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1); + return 0; +} + +void r100_clear_surface_reg(struct radeon_device *rdev, int reg) +{ + int surf_index = reg * 16; + WREG32(RADEON_SURFACE0_INFO + surf_index, 0); +} diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 0e0e094da50..28e5777658b 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -30,6 +30,7 @@ #include "drm.h" #include "radeon_reg.h" #include "radeon.h" +#include "radeon_drm.h" /* r300,r350,rv350,rv370,rv380 depends on : */ void r100_hdp_reset(struct radeon_device *rdev); @@ -1023,7 +1024,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, struct radeon_cs_reloc *reloc; struct r300_cs_track *track; volatile uint32_t *ib; - uint32_t tmp; + uint32_t tmp, tile_flags = 0; unsigned i; int r; @@ -1052,7 +1053,19 @@ static int r300_packet0_check(struct radeon_cs_parser *p, } tmp = ib_chunk->kdata[idx] & 0x003fffff; tmp += (((u32)reloc->lobj.gpu_offset) >> 10); - ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp; + + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= RADEON_DST_TILE_MACRO; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { + if (reg == RADEON_SRC_PITCH_OFFSET) { + DRM_ERROR("Cannot src blit from microtiled surface\n"); + r100_cs_dump_packet(p, pkt); + return -EINVAL; + } + tile_flags |= RADEON_DST_TILE_MICRO; + } + tmp |= tile_flags; + ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp; break; case R300_RB3D_COLOROFFSET0: case R300_RB3D_COLOROFFSET1: @@ -1141,6 +1154,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p, /* RB3D_COLORPITCH1 */ /* RB3D_COLORPITCH2 */ /* RB3D_COLORPITCH3 */ + r = r100_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + idx, reg); + r100_cs_dump_packet(p, pkt); + return r; + } + + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= R300_COLOR_TILE_ENABLE; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + tile_flags |= R300_COLOR_MICROTILE_ENABLE; + + tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); + tmp |= tile_flags; + ib[idx] = tmp; + i = (reg - 0x4E38) >> 2; track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE; switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) { @@ -1196,6 +1226,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p, break; case 0x4F24: /* ZB_DEPTHPITCH */ + r = r100_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", + idx, reg); + r100_cs_dump_packet(p, pkt); + return r; + } + + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= R300_DEPTHMACROTILE_ENABLE; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + tile_flags |= R300_DEPTHMICROTILE_TILED;; + + tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); + tmp |= tile_flags; + ib[idx] = tmp; + track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC; break; case 0x4104: diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h index 70f48609515..4b7afef35a6 100644 --- a/drivers/gpu/drm/radeon/r300_reg.h +++ b/drivers/gpu/drm/radeon/r300_reg.h @@ -27,7 +27,9 @@ #ifndef _R300_REG_H_ #define _R300_REG_H_ - +#define R300_SURF_TILE_MACRO (1<<16) +#define R300_SURF_TILE_MICRO (2<<16) +#define R300_SURF_TILE_BOTH (3<<16) #define R300_MC_INIT_MISC_LAT_TIMER 0x180 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 7f007185e7f..af12a2fe322 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -201,6 +201,14 @@ int radeon_fence_wait_last(struct radeon_device *rdev); struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); void radeon_fence_unref(struct radeon_fence **fence); +/* + * Tiling registers + */ +struct radeon_surface_reg { + struct radeon_object *robj; +}; + +#define RADEON_GEM_MAX_SURFACES 8 /* * Radeon buffer. @@ -213,6 +221,7 @@ struct radeon_object_list { uint64_t gpu_offset; unsigned rdomain; unsigned wdomain; + uint32_t tiling_flags; }; int radeon_object_init(struct radeon_device *rdev); @@ -242,8 +251,15 @@ void radeon_object_list_clean(struct list_head *head); int radeon_object_fbdev_mmap(struct radeon_object *robj, struct vm_area_struct *vma); unsigned long radeon_object_size(struct radeon_object *robj); - - +void radeon_object_clear_surface_reg(struct radeon_object *robj); +int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, + bool force_drop); +void radeon_object_set_tiling_flags(struct radeon_object *robj, + uint32_t tiling_flags, uint32_t pitch); +void radeon_object_get_tiling_flags(struct radeon_object *robj, uint32_t *tiling_flags, uint32_t *pitch); +void radeon_bo_move_notify(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem); +void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); /* * GEM objects. */ @@ -535,6 +551,11 @@ struct radeon_asic { void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); void (*set_clock_gating)(struct radeon_device *rdev, int enable); + + int (*set_surface_reg)(struct radeon_device *rdev, int reg, + uint32_t tiling_flags, uint32_t pitch, + uint32_t offset, uint32_t obj_size); + int (*clear_surface_reg)(struct radeon_device *rdev, int reg); }; union radeon_asic_config { @@ -568,6 +589,10 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); +int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); +int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); /* @@ -627,6 +652,7 @@ struct radeon_device { bool shutdown; bool suspend; bool need_dma32; + struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; }; int radeon_device_init(struct radeon_device *rdev, @@ -801,5 +827,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) #define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) +#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) +#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) #endif diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index e2e567395df..dd903d32940 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -71,6 +71,10 @@ int r100_copy_blit(struct radeon_device *rdev, uint64_t dst_offset, unsigned num_pages, struct radeon_fence *fence); +int r100_set_surface_reg(struct radeon_device *rdev, int reg, + uint32_t tiling_flags, uint32_t pitch, + uint32_t offset, uint32_t obj_size); +int r100_clear_surface_reg(struct radeon_device *rdev, int reg); static struct radeon_asic r100_asic = { .init = &r100_init, @@ -100,6 +104,8 @@ static struct radeon_asic r100_asic = { .set_memory_clock = NULL, .set_pcie_lanes = NULL, .set_clock_gating = &radeon_legacy_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, }; @@ -128,6 +134,7 @@ int r300_copy_dma(struct radeon_device *rdev, uint64_t dst_offset, unsigned num_pages, struct radeon_fence *fence); + static struct radeon_asic r300_asic = { .init = &r300_init, .errata = &r300_errata, @@ -156,6 +163,8 @@ static struct radeon_asic r300_asic = { .set_memory_clock = NULL, .set_pcie_lanes = &rv370_set_pcie_lanes, .set_clock_gating = &radeon_legacy_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, }; /* @@ -193,6 +202,8 @@ static struct radeon_asic r420_asic = { .set_memory_clock = &radeon_atom_set_memory_clock, .set_pcie_lanes = &rv370_set_pcie_lanes, .set_clock_gating = &radeon_atom_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, }; @@ -237,6 +248,8 @@ static struct radeon_asic rs400_asic = { .set_memory_clock = NULL, .set_pcie_lanes = NULL, .set_clock_gating = &radeon_legacy_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, }; @@ -322,6 +335,8 @@ static struct radeon_asic rs690_asic = { .set_memory_clock = &radeon_atom_set_memory_clock, .set_pcie_lanes = NULL, .set_clock_gating = &radeon_atom_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, }; @@ -367,6 +382,8 @@ static struct radeon_asic rv515_asic = { .set_memory_clock = &radeon_atom_set_memory_clock, .set_pcie_lanes = &rv370_set_pcie_lanes, .set_clock_gating = &radeon_atom_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, }; @@ -405,6 +422,8 @@ static struct radeon_asic r520_asic = { .set_memory_clock = &radeon_atom_set_memory_clock, .set_pcie_lanes = &rv370_set_pcie_lanes, .set_clock_gating = &radeon_atom_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, }; /* diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index cdef6eb01ba..f23083bbba3 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -48,6 +48,8 @@ static void radeon_surface_init(struct radeon_device *rdev) i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), 0); } + /* enable surfaces */ + WREG32(RADEON_SURFACE_CNTL, 0); } } diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 260870a29d8..36d2f5588f2 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -471,10 +471,10 @@ static struct notifier_block paniced = { .notifier_call = radeonfb_panic, }; -static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp) +static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) { int aligned = width; - int align_large = (ASIC_IS_AVIVO(rdev)); + int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; int pitch_mask = 0; switch (bpp / 8) { @@ -512,12 +512,13 @@ int radeonfb_create(struct radeon_device *rdev, u64 fb_gpuaddr; void *fbptr = NULL; unsigned long tmp; + bool fb_tiled = false; /* useful for testing */ mode_cmd.width = surface_width; mode_cmd.height = surface_height; mode_cmd.bpp = 32; /* need to align pitch with crtc limits */ - mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp) * ((mode_cmd.bpp + 1) / 8); + mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); mode_cmd.depth = 24; size = mode_cmd.pitch * mode_cmd.height; @@ -535,6 +536,8 @@ int radeonfb_create(struct radeon_device *rdev, } robj = gobj->driver_private; + if (fb_tiled) + radeon_object_set_tiling_flags(robj, RADEON_TILING_MACRO|RADEON_TILING_SURFACE, mode_cmd.pitch); mutex_lock(&rdev->ddev->struct_mutex); fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); if (fb == NULL) { @@ -563,6 +566,9 @@ int radeonfb_create(struct radeon_device *rdev, } rfbdev = info->par; + if (fb_tiled) + radeon_object_check_tiling(robj, 0, 0); + ret = radeon_object_kmap(robj, &fbptr); if (ret) { goto out_unref; diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index eb516034235..12542087b29 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -285,3 +285,44 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, mutex_unlock(&dev->struct_mutex); return r; } + +int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + struct drm_radeon_gem_set_tiling *args = data; + struct drm_gem_object *gobj; + struct radeon_object *robj; + int r = 0; + + DRM_DEBUG("%d \n", args->handle); + gobj = drm_gem_object_lookup(dev, filp, args->handle); + if (gobj == NULL) + return -EINVAL; + robj = gobj->driver_private; + radeon_object_set_tiling_flags(robj, args->tiling_flags, args->pitch); + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(gobj); + mutex_unlock(&dev->struct_mutex); + return r; +} + +int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + struct drm_radeon_gem_get_tiling *args = data; + struct drm_gem_object *gobj; + struct radeon_object *robj; + int r = 0; + + DRM_DEBUG("\n"); + gobj = drm_gem_object_lookup(dev, filp, args->handle); + if (gobj == NULL) + return -EINVAL; + robj = gobj->driver_private; + radeon_object_get_tiling_flags(robj, &args->tiling_flags, + &args->pitch); + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(gobj); + mutex_unlock(&dev->struct_mutex); + return r; +} diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 4612a7c146d..937a2f1cdb4 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -291,5 +291,7 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = { DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH), }; int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 14c1a5107fc..0613790e2a5 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -235,6 +235,7 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, uint64_t base; uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; uint32_t crtc_pitch, pitch_pixels; + uint32_t tiling_flags; DRM_DEBUG("\n"); @@ -258,8 +259,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, (crtc->fb->bits_per_pixel * 8)); crtc_pitch |= crtc_pitch << 16; - /* TODO tiling */ - if (0) { + radeon_object_get_tiling_flags(obj->driver_private, + &tiling_flags, NULL); + if (tiling_flags & RADEON_TILING_MICRO) + DRM_ERROR("trying to scanout microtiled buffer\n"); + + if (tiling_flags & RADEON_TILING_MACRO) { if (ASIC_IS_R300(rdev)) crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | R300_CRTC_MICRO_TILE_BUFFER_DIS | @@ -275,15 +280,13 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN; } - - /* TODO more tiling */ - if (0) { + if (tiling_flags & RADEON_TILING_MACRO) { if (ASIC_IS_R300(rdev)) { crtc_tile_x0_y0 = x | (y << 16); base &= ~0x7ff; } else { int byteshift = crtc->fb->bits_per_pixel >> 4; - int tile_addr = (((y >> 3) * crtc->fb->width + x) >> (8 - byteshift)) << 11; + int tile_addr = (((y >> 3) * pitch_pixels + x) >> (8 - byteshift)) << 11; base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8); crtc_offset_cntl |= (y % 16); } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index bac0d06c52a..d5b1fd562d8 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -44,6 +44,9 @@ struct radeon_object { uint64_t gpu_addr; void *kptr; bool is_iomem; + uint32_t tiling_flags; + uint32_t pitch; + int surface_reg; }; int radeon_ttm_init(struct radeon_device *rdev); @@ -70,6 +73,7 @@ static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj) robj = container_of(tobj, struct radeon_object, tobj); list_del_init(&robj->list); + radeon_object_clear_surface_reg(robj); kfree(robj); } @@ -141,6 +145,7 @@ int radeon_object_create(struct radeon_device *rdev, } robj->rdev = rdev; robj->gobj = gobj; + robj->surface_reg = -1; INIT_LIST_HEAD(&robj->list); flags = radeon_object_flags_from_domain(domain); @@ -435,6 +440,7 @@ int radeon_object_list_validate(struct list_head *head, void *fence) radeon_object_gpu_addr(robj); } lobj->gpu_offset = robj->gpu_addr; + lobj->tiling_flags = robj->tiling_flags; if (fence) { old_fence = (struct radeon_fence *)robj->tobj.sync_obj; robj->tobj.sync_obj = radeon_fence_ref(fence); @@ -479,3 +485,127 @@ unsigned long radeon_object_size(struct radeon_object *robj) { return robj->tobj.num_pages << PAGE_SHIFT; } + +int radeon_object_get_surface_reg(struct radeon_object *robj) +{ + struct radeon_device *rdev = robj->rdev; + struct radeon_surface_reg *reg; + struct radeon_object *old_object; + int steal; + int i; + + if (!robj->tiling_flags) + return 0; + + if (robj->surface_reg >= 0) { + reg = &rdev->surface_regs[robj->surface_reg]; + i = robj->surface_reg; + goto out; + } + + steal = -1; + for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { + + reg = &rdev->surface_regs[i]; + if (!reg->robj) + break; + + old_object = reg->robj; + if (old_object->pin_count == 0) + steal = i; + } + + /* if we are all out */ + if (i == RADEON_GEM_MAX_SURFACES) { + if (steal == -1) + return -ENOMEM; + /* find someone with a surface reg and nuke their BO */ + reg = &rdev->surface_regs[steal]; + old_object = reg->robj; + /* blow away the mapping */ + DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); + ttm_bo_unmap_virtual(&old_object->tobj); + old_object->surface_reg = -1; + i = steal; + } + + robj->surface_reg = i; + reg->robj = robj; + +out: + radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch, + robj->tobj.mem.mm_node->start << PAGE_SHIFT, + robj->tobj.num_pages << PAGE_SHIFT); + return 0; +} + +void radeon_object_clear_surface_reg(struct radeon_object *robj) +{ + struct radeon_device *rdev = robj->rdev; + struct radeon_surface_reg *reg; + + if (robj->surface_reg == -1) + return; + + reg = &rdev->surface_regs[robj->surface_reg]; + radeon_clear_surface_reg(rdev, robj->surface_reg); + + reg->robj = NULL; + robj->surface_reg = -1; +} + +void radeon_object_set_tiling_flags(struct radeon_object *robj, + uint32_t tiling_flags, uint32_t pitch) +{ + robj->tiling_flags = tiling_flags; + robj->pitch = pitch; +} + +void radeon_object_get_tiling_flags(struct radeon_object *robj, + uint32_t *tiling_flags, + uint32_t *pitch) +{ + if (tiling_flags) + *tiling_flags = robj->tiling_flags; + if (pitch) + *pitch = robj->pitch; +} + +int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, + bool force_drop) +{ + if (!(robj->tiling_flags & RADEON_TILING_SURFACE)) + return 0; + + if (force_drop) { + radeon_object_clear_surface_reg(robj); + return 0; + } + + if (robj->tobj.mem.mem_type != TTM_PL_VRAM) { + if (!has_moved) + return 0; + + if (robj->surface_reg >= 0) + radeon_object_clear_surface_reg(robj); + return 0; + } + + if ((robj->surface_reg >= 0) && !has_moved) + return 0; + + return radeon_object_get_surface_reg(robj); +} + +void radeon_bo_move_notify(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem) +{ + struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); + radeon_object_check_tiling(robj, 0, 1); +} + +void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) +{ + struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); + radeon_object_check_tiling(robj, 0, 0); +} diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 4ca9aa9203d..37e1cbcce3a 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -429,6 +429,8 @@ static struct ttm_bo_driver radeon_bo_driver = { .sync_obj_flush = &radeon_sync_obj_flush, .sync_obj_unref = &radeon_sync_obj_unref, .sync_obj_ref = &radeon_sync_obj_ref, + .move_notify = &radeon_bo_move_notify, + .fault_reserve_notify = &radeon_bo_fault_reserve_notify, }; int radeon_ttm_init(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index e55e7972c89..6538d423698 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -43,7 +43,6 @@ #define TTM_BO_HASH_ORDER 13 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); -static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); static inline uint32_t ttm_bo_type_flags(unsigned type) @@ -307,6 +306,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, } + if (bdev->driver->move_notify) + bdev->driver->move_notify(bo, mem); + if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); @@ -1451,6 +1453,7 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); } +EXPORT_SYMBOL(ttm_bo_unmap_virtual); static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) { diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 40b75032ea4..41c907f6c56 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -101,6 +101,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return VM_FAULT_NOPAGE; } + if (bdev->driver->fault_reserve_notify) + bdev->driver->fault_reserve_notify(bo); + /* * Wait for buffer data in transit, due to a pipelined * move. diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h index 41862e9a4c2..af4b4826997 100644 --- a/include/drm/radeon_drm.h +++ b/include/drm/radeon_drm.h @@ -506,6 +506,8 @@ typedef struct { #define DRM_RADEON_GEM_WAIT_IDLE 0x24 #define DRM_RADEON_CS 0x26 #define DRM_RADEON_INFO 0x27 +#define DRM_RADEON_GEM_SET_TILING 0x28 +#define DRM_RADEON_GEM_GET_TILING 0x29 #define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) #define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) @@ -544,7 +546,8 @@ typedef struct { #define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle) #define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs) #define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info) - +#define DRM_IOCTL_RADEON_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling) +#define DRM_IOCTL_RADEON_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling) typedef struct drm_radeon_init { enum { @@ -796,6 +799,24 @@ struct drm_radeon_gem_create { uint32_t flags; }; +#define RADEON_TILING_MACRO 0x1 +#define RADEON_TILING_MICRO 0x2 +#define RADEON_TILING_SWAP 0x4 +#define RADEON_TILING_SURFACE 0x8 /* this object requires a surface + * when mapped - i.e. front buffer */ + +struct drm_radeon_gem_set_tiling { + uint32_t handle; + uint32_t tiling_flags; + uint32_t pitch; +}; + +struct drm_radeon_gem_get_tiling { + uint32_t handle; + uint32_t tiling_flags; + uint32_t pitch; +}; + struct drm_radeon_gem_mmap { uint32_t handle; uint32_t pad; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index ea83dd23a4d..a68829db381 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -354,6 +354,14 @@ struct ttm_bo_driver { int (*sync_obj_flush) (void *sync_obj, void *sync_arg); void (*sync_obj_unref) (void **sync_obj); void *(*sync_obj_ref) (void *sync_obj); + + /* hook to notify driver about a driver move so it + * can do tiling things */ + void (*move_notify)(struct ttm_buffer_object *bo, + struct ttm_mem_reg *new_mem); + /* notify the driver we are taking a fault on this BO + * and have reserved it */ + void (*fault_reserve_notify)(struct ttm_buffer_object *bo); }; #define TTM_NUM_MEM_TYPES 8 @@ -653,6 +661,13 @@ extern int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_driver *driver, uint64_t file_page_offset, bool need_dma32); +/** + * ttm_bo_unmap_virtual + * + * @bo: tear down the virtual mappings for this BO + */ +extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); + /** * ttm_bo_reserve: * -- cgit v1.2.3-70-g09d2 From e46074effd5510e7a8fe34b93828d98a50835da2 Mon Sep 17 00:00:00 2001 From: Michel Dänzer Date: Tue, 28 Jul 2009 12:30:55 +0200 Subject: drm/radeon: Don't unreserve twice on failure to validate. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is done later in radeon_object_list_unvalidate(). Doing it twice triggers a BUG in TTM, rendering X on KMS unusable until reboot. Signed-off-by: Michel Dänzer Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_object.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/gpu/drm/radeon/radeon_object.c') diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index d5b1fd562d8..3961a44c5dc 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -433,7 +433,6 @@ int radeon_object_list_validate(struct list_head *head, void *fence) robj->tobj.proposed_placement, true, false); if (unlikely(r)) { - radeon_object_list_unreserve(head); DRM_ERROR("radeon: failed to validate.\n"); return r; } -- cgit v1.2.3-70-g09d2 From 664f86590295217b2319edf88830e87b800f6c4a Mon Sep 17 00:00:00 2001 From: Michel Dänzer Date: Tue, 28 Jul 2009 12:30:57 +0200 Subject: drm/radeon: Pay more attention to object placement requested by userspace. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously we were basically always setting the GTT and VRAM flags regardless of what userspace requested. Signed-off-by: Michel Dänzer Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_object.c | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/radeon/radeon_object.c') diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 3961a44c5dc..81573c3a9b4 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -103,16 +103,16 @@ static inline uint32_t radeon_object_flags_from_domain(uint32_t domain) { uint32_t flags = 0; if (domain & RADEON_GEM_DOMAIN_VRAM) { - flags |= TTM_PL_FLAG_VRAM; + flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; } if (domain & RADEON_GEM_DOMAIN_GTT) { - flags |= TTM_PL_FLAG_TT; + flags |= TTM_PL_FLAG_TT | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; } if (domain & RADEON_GEM_DOMAIN_CPU) { - flags |= TTM_PL_FLAG_SYSTEM; + flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; } if (!flags) { - flags |= TTM_PL_FLAG_SYSTEM; + flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; } return flags; } @@ -408,7 +408,6 @@ int radeon_object_list_validate(struct list_head *head, void *fence) struct radeon_object *robj; struct radeon_fence *old_fence = NULL; struct list_head *i; - uint32_t flags; int r; r = radeon_object_list_reserve(head); @@ -419,16 +418,14 @@ int radeon_object_list_validate(struct list_head *head, void *fence) list_for_each(i, head) { lobj = list_entry(i, struct radeon_object_list, list); robj = lobj->robj; - if (lobj->wdomain) { - flags = radeon_object_flags_from_domain(lobj->wdomain); - flags |= TTM_PL_FLAG_TT; - } else { - flags = radeon_object_flags_from_domain(lobj->rdomain); - flags |= TTM_PL_FLAG_TT; - flags |= TTM_PL_FLAG_VRAM; - } if (!robj->pin_count) { - robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING; + if (lobj->wdomain) { + robj->tobj.proposed_placement = + radeon_object_flags_from_domain(lobj->wdomain); + } else { + robj->tobj.proposed_placement = + radeon_object_flags_from_domain(lobj->rdomain); + } r = ttm_buffer_object_validate(&robj->tobj, robj->tobj.proposed_placement, true, false); -- cgit v1.2.3-70-g09d2 From 3b170c3b2e688665fbc2845ba5bb4304bf38a119 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Fri, 24 Jul 2009 13:47:45 +1000 Subject: drm/radeon/kms: allow interruptible waits for objects. Blocking here isn't something the X server mouse appreciates, avoid the block and let userspace retry the waits. libdrm_radeon userspace library is also expecting EBUSY not ERESTART Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_fence.c | 2 +- drivers/gpu/drm/radeon/radeon_object.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/radeon/radeon_object.c') diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 96afbf5ae2a..b4e48dd2e85 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -195,7 +195,7 @@ retry: r = wait_event_interruptible_timeout(rdev->fence_drv.queue, radeon_fence_signaled(fence), timeout); if (unlikely(r == -ERESTARTSYS)) { - return -ERESTART; + return -EBUSY; } } else { r = wait_event_timeout(rdev->fence_drv.queue, diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 81573c3a9b4..dd9ac2fed6d 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -309,7 +309,7 @@ int radeon_object_wait(struct radeon_object *robj) } spin_lock(&robj->tobj.lock); if (robj->tobj.sync_obj) { - r = ttm_bo_wait(&robj->tobj, true, false, false); + r = ttm_bo_wait(&robj->tobj, true, true, false); } spin_unlock(&robj->tobj.lock); radeon_object_unreserve(robj); -- cgit v1.2.3-70-g09d2 From 985fe845aea9cd56fd351800302270444556e45a Mon Sep 17 00:00:00 2001 From: Jerome Glisse Date: Wed, 29 Jul 2009 18:55:53 +0200 Subject: drm/radeon/kms: Fix caching mode selection for GTT object GTT object can either be cached,uncached or wc just let core ttm pick the best mode according to how the bo driver and GTT memory type was initialized. Signed-off-by: Jerome Glisse Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_object.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/radeon/radeon_object.c') diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index dd9ac2fed6d..e98cae3bf4a 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -106,7 +106,7 @@ static inline uint32_t radeon_object_flags_from_domain(uint32_t domain) flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; } if (domain & RADEON_GEM_DOMAIN_GTT) { - flags |= TTM_PL_FLAG_TT | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; + flags |= TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; } if (domain & RADEON_GEM_DOMAIN_CPU) { flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; -- cgit v1.2.3-70-g09d2 From cefb87efc9aa0288849149484870d5ab989fbd59 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Sun, 16 Aug 2009 21:05:45 +1000 Subject: drm/radeon/kms: implement bo busy check + current domain This implements the busy ioctl along with a current domain check. returns 0 or -EBUSY puts the current domain no matter what the answer. Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon.h | 1 + drivers/gpu/drm/radeon/radeon_gem.c | 22 +++++++++++++++++++++- drivers/gpu/drm/radeon/radeon_object.c | 19 +++++++++++++++++++ include/drm/radeon_drm.h | 2 +- 4 files changed, 42 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/radeon/radeon_object.c') diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 87170a56e37..79ad98264e3 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -242,6 +242,7 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain, uint64_t *gpu_addr); void radeon_object_unpin(struct radeon_object *robj); int radeon_object_wait(struct radeon_object *robj); +int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement); int radeon_object_evict_vram(struct radeon_device *rdev); int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset); void radeon_object_force_delete(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index cded5180c75..d4ceff13bbb 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -262,7 +262,27 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { - /* FIXME: implement */ + struct drm_radeon_gem_busy *args = data; + struct drm_gem_object *gobj; + struct radeon_object *robj; + int r; + uint32_t cur_placement; + + gobj = drm_gem_object_lookup(dev, filp, args->handle); + if (gobj == NULL) { + return -EINVAL; + } + robj = gobj->driver_private; + r = radeon_object_busy_domain(robj, &cur_placement); + if (cur_placement == TTM_PL_VRAM) + args->domain = RADEON_GEM_DOMAIN_VRAM; + if (cur_placement == TTM_PL_FLAG_TT) + args->domain = RADEON_GEM_DOMAIN_GTT; + if (cur_placement == TTM_PL_FLAG_SYSTEM) + args->domain = RADEON_GEM_DOMAIN_CPU; + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(gobj); + mutex_unlock(&dev->struct_mutex); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index e98cae3bf4a..b85fb83d7ae 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -316,6 +316,25 @@ int radeon_object_wait(struct radeon_object *robj) return r; } +int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement) +{ + int r = 0; + + r = radeon_object_reserve(robj, true); + if (unlikely(r != 0)) { + DRM_ERROR("radeon: failed to reserve object for waiting.\n"); + return r; + } + spin_lock(&robj->tobj.lock); + *cur_placement = robj->tobj.mem.mem_type; + if (robj->tobj.sync_obj) { + r = ttm_bo_wait(&robj->tobj, true, true, true); + } + spin_unlock(&robj->tobj.lock); + radeon_object_unreserve(robj); + return r; +} + int radeon_object_evict_vram(struct radeon_device *rdev) { if (rdev->flags & RADEON_IS_IGP) { diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h index af4b4826997..f81c3232acc 100644 --- a/include/drm/radeon_drm.h +++ b/include/drm/radeon_drm.h @@ -838,7 +838,7 @@ struct drm_radeon_gem_wait_idle { struct drm_radeon_gem_busy { uint32_t handle; - uint32_t busy; + uint32_t domain; }; struct drm_radeon_gem_pread { -- cgit v1.2.3-70-g09d2