diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-30 10:00:37 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-30 10:00:37 -0800 |
commit | 4a490b78cb7e0e5efa44425df72a9fedc1c36366 (patch) | |
tree | 8a867e39c4e555e4ba10772748b0bde8fe789e20 /drivers/gpu/drm/i915/intel_ringbuffer.c | |
parent | 8d91a42e54eebc43f4d8f6064751ccba73528275 (diff) | |
parent | d5757dbe79870d825d0dec30074d48683e1d7e9a (diff) |
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull DRM update from Dave Airlie:
"This is a bit larger due to me not bothering to do anything since
before Xmas, and other people working too hard after I had clearly
given up.
It's got the 3 main x86 driver fixes pulls, and a bunch of tegra
fixes, doesn't fix the Ironlake bug yet, but that does seem to be
getting closer.
- radeon: gpu reset fixes and userspace packet support
- i915: watermark fixes, workarounds, i830/845 fix,
- nouveau: nvd9/kepler microcode fixes, accel is now enabled and
working, gk106 support
- tegra: misc fixes."
* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (34 commits)
Revert "drm: tegra: protect DC register access with mutex"
drm: tegra: program only one window during modeset
drm: tegra: clean out old gem prototypes
drm: tegra: remove redundant tegra2_tmds_config entry
drm: tegra: protect DC register access with mutex
drm: tegra: don't leave clients host1x member uninitialized
drm: tegra: fix front_porch <-> back_porch mixup
drm/nve0/graph: fix fuc, and enable acceleration on all known chipsets
drm/nvc0/graph: fix fuc, and enable acceleration on GF119
drm/nouveau/bios: cache ramcfg strap on later chipsets
drm/nouveau/mxm: silence output if no bios data
drm/nouveau/bios: parse/display extra version component
drm/nouveau/bios: implement opcode 0xa9
drm/nouveau/bios: update gpio parsing apis to match current design
drm/nouveau: initial support for GK106
drm/radeon: add WAIT_UNTIL to evergreen VM safe reg list
drm/i915: disable shrinker lock stealing for create_mmap_offset
drm/i915: optionally disable shrinker lock stealing
drm/i915: fix flags in dma buf exporting
drm/radeon: add support for MEM_WRITE packet
...
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 76 |
1 files changed, 68 insertions, 8 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 2346b920bd8..ae253e04c39 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -547,9 +547,14 @@ static int init_render_ring(struct intel_ring_buffer *ring) static void render_ring_cleanup(struct intel_ring_buffer *ring) { + struct drm_device *dev = ring->dev; + if (!ring->private) return; + if (HAS_BROKEN_CS_TLB(dev)) + drm_gem_object_unreference(to_gem_object(ring->private)); + cleanup_pipe_control(ring); } @@ -969,6 +974,8 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, return 0; } +/* Just userspace ABI convention to limit the wa batch bo to a resonable size */ +#define I830_BATCH_LIMIT (256*1024) static int i830_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 len, @@ -976,15 +983,47 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring, { int ret; - ret = intel_ring_begin(ring, 4); - if (ret) - return ret; + if (flags & I915_DISPATCH_PINNED) { + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; - intel_ring_emit(ring, MI_BATCH_BUFFER); - intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); - intel_ring_emit(ring, offset + len - 8); - intel_ring_emit(ring, 0); - intel_ring_advance(ring); + intel_ring_emit(ring, MI_BATCH_BUFFER); + intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); + intel_ring_emit(ring, offset + len - 8); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + } else { + struct drm_i915_gem_object *obj = ring->private; + u32 cs_offset = obj->gtt_offset; + + if (len > I830_BATCH_LIMIT) + return -ENOSPC; + + ret = intel_ring_begin(ring, 9+3); + if (ret) + return ret; + /* Blit the batch (which has now all relocs applied) to the stable batch + * scratch bo area (so that the CS never stumbles over its tlb + * invalidation bug) ... */ + intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | + XY_SRC_COPY_BLT_WRITE_ALPHA | + XY_SRC_COPY_BLT_WRITE_RGB); + intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024); + intel_ring_emit(ring, cs_offset); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 4096); + intel_ring_emit(ring, offset); + intel_ring_emit(ring, MI_FLUSH); + + /* ... and execute it. */ + intel_ring_emit(ring, MI_BATCH_BUFFER); + intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); + intel_ring_emit(ring, cs_offset + len - 8); + intel_ring_advance(ring); + } return 0; } @@ -1596,6 +1635,27 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->init = init_render_ring; ring->cleanup = render_ring_cleanup; + /* Workaround batchbuffer to combat CS tlb bug. */ + if (HAS_BROKEN_CS_TLB(dev)) { + struct drm_i915_gem_object *obj; + int ret; + + obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); + if (obj == NULL) { + DRM_ERROR("Failed to allocate batch bo\n"); + return -ENOMEM; + } + + ret = i915_gem_object_pin(obj, 0, true, false); + if (ret != 0) { + drm_gem_object_unreference(&obj->base); + DRM_ERROR("Failed to ping batch bo\n"); + return ret; + } + + ring->private = obj; + } + return intel_init_ring_buffer(dev, ring); } |