summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c315
1 files changed, 223 insertions, 92 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index fba37e9f775..ef3d91dda71 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -31,6 +31,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
@@ -162,7 +163,7 @@ fast_shmem_read(struct page **pages,
static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
obj_priv->tiling_mode != I915_TILING_NONE;
@@ -263,7 +264,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pread *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
ssize_t remain;
loff_t offset, page_base;
char __user *user_data;
@@ -284,7 +285,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
if (ret != 0)
goto fail_put_pages;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = args->offset;
while (remain > 0) {
@@ -353,7 +354,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pread *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct mm_struct *mm = current->mm;
struct page **user_pages;
ssize_t remain;
@@ -402,7 +403,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
if (ret != 0)
goto fail_put_pages;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = args->offset;
while (remain > 0) {
@@ -478,7 +479,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EBADF;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
/* Bounds check source.
*
@@ -580,7 +581,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
drm_i915_private_t *dev_priv = dev->dev_private;
ssize_t remain;
loff_t offset, page_base;
@@ -604,7 +605,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
if (ret)
goto fail;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = obj_priv->gtt_offset + args->offset;
while (remain > 0) {
@@ -654,7 +655,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
drm_i915_private_t *dev_priv = dev->dev_private;
ssize_t remain;
loff_t gtt_page_base, offset;
@@ -698,7 +699,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
if (ret)
goto out_unpin_object;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = obj_priv->gtt_offset + args->offset;
while (remain > 0) {
@@ -760,7 +761,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
ssize_t remain;
loff_t offset, page_base;
char __user *user_data;
@@ -780,7 +781,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
if (ret != 0)
goto fail_put_pages;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = args->offset;
obj_priv->dirty = 1;
@@ -828,7 +829,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct mm_struct *mm = current->mm;
struct page **user_pages;
ssize_t remain;
@@ -876,7 +877,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
if (ret != 0)
goto fail_put_pages;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
offset = args->offset;
obj_priv->dirty = 1;
@@ -951,7 +952,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EBADF;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
/* Bounds check destination.
*
@@ -1033,7 +1034,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EBADF;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
mutex_lock(&dev->struct_mutex);
@@ -1095,7 +1096,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
DRM_INFO("%s: sw_finish %d (%p %zd)\n",
__func__, args->handle, obj, obj->size);
#endif
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
/* Pinned buffers may be scanout, so flush the cache */
if (obj_priv->pin_count)
@@ -1166,7 +1167,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_device *dev = obj->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
pgoff_t page_offset;
unsigned long pfn;
int ret = 0;
@@ -1233,7 +1234,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_map_list *list;
struct drm_local_map *map;
int ret = 0;
@@ -1304,7 +1305,7 @@ void
i915_gem_release_mmap(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if (dev->dev_mapping)
unmap_mapping_range(dev->dev_mapping,
@@ -1315,7 +1316,7 @@ static void
i915_gem_free_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_gem_mm *mm = dev->mm_private;
struct drm_map_list *list;
@@ -1346,7 +1347,7 @@ static uint32_t
i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int start, i;
/*
@@ -1405,7 +1406,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (obj_priv->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to mmap a purgeable buffer\n");
@@ -1449,7 +1450,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
void
i915_gem_object_put_pages(struct drm_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page_count = obj->size / PAGE_SIZE;
int i;
@@ -1466,9 +1467,6 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
obj_priv->dirty = 0;
for (i = 0; i < page_count; i++) {
- if (obj_priv->pages[i] == NULL)
- break;
-
if (obj_priv->dirty)
set_page_dirty(obj_priv->pages[i]);
@@ -1488,7 +1486,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
/* Add a reference if we're newly entering the active list. */
if (!obj_priv->active) {
@@ -1508,7 +1506,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
BUG_ON(!obj_priv->active);
list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
@@ -1519,7 +1517,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
static void
i915_gem_object_truncate(struct drm_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct inode *inode;
inode = obj->filp->f_path.dentry->d_inode;
@@ -1540,7 +1538,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
i915_verify_inactive(dev, __FILE__, __LINE__);
if (obj_priv->pin_count != 0)
@@ -1590,6 +1588,13 @@ i915_gem_process_flushing_list(struct drm_device *dev,
}
}
+#define PIPE_CONTROL_FLUSH(addr) \
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
+ PIPE_CONTROL_DEPTH_STALL); \
+ OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
+ OUT_RING(0); \
+ OUT_RING(0); \
+
/**
* Creates a new sequence number, emitting a write of it to the status page
* plus an interrupt, which will trigger i915_user_interrupt_handler.
@@ -1624,13 +1629,47 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
if (dev_priv->mm.next_gem_seqno == 0)
dev_priv->mm.next_gem_seqno++;
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(seqno);
+ if (HAS_PIPE_CONTROL(dev)) {
+ u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
+ /*
+ * Workaround qword write incoherence by flushing the
+ * PIPE_NOTIFY buffers out to memory before requesting
+ * an interrupt.
+ */
+ BEGIN_LP_RING(32);
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
+ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+ OUT_RING(seqno);
+ OUT_RING(0);
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128; /* write to separate cachelines */
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(scratch_addr);
+ OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
+ PIPE_CONTROL_NOTIFY);
+ OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
+ OUT_RING(seqno);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ } else {
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(seqno);
+
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+ }
DRM_DEBUG_DRIVER("%d\n", seqno);
@@ -1754,7 +1793,10 @@ i915_get_gem_seqno(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+ if (HAS_PIPE_CONTROL(dev))
+ return ((volatile u32 *)(dev_priv->seqno_page))[0];
+ else
+ return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
}
/**
@@ -1967,7 +2009,7 @@ static int
i915_gem_object_wait_rendering(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
/* This function only exists to support waiting for existing rendering,
@@ -1999,7 +2041,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret = 0;
#if WATCH_BUF
@@ -2175,7 +2217,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
#if WATCH_LRU
DRM_INFO("%s: evicting %p\n", __func__, obj);
#endif
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
BUG_ON(obj_priv->pin_count != 0);
BUG_ON(obj_priv->active);
@@ -2227,11 +2269,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
seqno = i915_add_request(dev, NULL, obj->write_domain);
if (seqno == 0)
return -ENOMEM;
-
- ret = i915_wait_request(dev, seqno);
- if (ret)
- return ret;
-
continue;
}
}
@@ -2251,12 +2288,11 @@ int
i915_gem_object_get_pages(struct drm_gem_object *obj,
gfp_t gfpmask)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page_count, i;
struct address_space *mapping;
struct inode *inode;
struct page *page;
- int ret;
if (obj_priv->pages_refcount++ != 0)
return 0;
@@ -2279,11 +2315,9 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
mapping_gfp_mask (mapping) |
__GFP_COLD |
gfpmask);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- i915_gem_object_put_pages(obj);
- return ret;
- }
+ if (IS_ERR(page))
+ goto err_pages;
+
obj_priv->pages[i] = page;
}
@@ -2291,6 +2325,15 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
i915_gem_object_do_bit_17_swizzle(obj);
return 0;
+
+err_pages:
+ while (i--)
+ page_cache_release(obj_priv->pages[i]);
+
+ drm_free_large(obj_priv->pages);
+ obj_priv->pages = NULL;
+ obj_priv->pages_refcount--;
+ return PTR_ERR(page);
}
static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
@@ -2298,7 +2341,7 @@ static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
struct drm_gem_object *obj = reg->obj;
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int regnum = obj_priv->fence_reg;
uint64_t val;
@@ -2320,7 +2363,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
struct drm_gem_object *obj = reg->obj;
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int regnum = obj_priv->fence_reg;
uint64_t val;
@@ -2340,7 +2383,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
struct drm_gem_object *obj = reg->obj;
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int regnum = obj_priv->fence_reg;
int tile_width;
uint32_t fence_reg, val;
@@ -2363,6 +2406,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
pitch_val = obj_priv->stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
+ if (obj_priv->tiling_mode == I915_TILING_Y &&
+ HAS_128_BYTE_Y_TILING(dev))
+ WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
+ else
+ WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
+
val = obj_priv->gtt_offset;
if (obj_priv->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
@@ -2382,7 +2431,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
struct drm_gem_object *obj = reg->obj;
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int regnum = obj_priv->fence_reg;
uint32_t val;
uint32_t pitch_val;
@@ -2426,7 +2475,7 @@ static int i915_find_fence_reg(struct drm_device *dev)
if (!reg->obj)
return i;
- obj_priv = reg->obj->driver_private;
+ obj_priv = to_intel_bo(reg->obj);
if (!obj_priv->pin_count)
avail++;
}
@@ -2481,7 +2530,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_i915_fence_reg *reg = NULL;
int ret;
@@ -2548,7 +2597,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if (IS_GEN6(dev)) {
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
@@ -2584,7 +2633,7 @@ int
i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
return 0;
@@ -2622,7 +2671,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_mm_node *free_space;
gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
int ret;
@@ -2729,7 +2778,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
void
i915_gem_clflush_object(struct drm_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
/* If we don't have a page list set up, then we're not pinned
* to GPU, and we can ignore the cache flush because it'll happen
@@ -2830,7 +2879,7 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
int
i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_write_domain, old_read_domains;
int ret;
@@ -2880,7 +2929,7 @@ int
i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_write_domain, old_read_domains;
int ret;
@@ -3093,7 +3142,7 @@ static void
i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
uint32_t old_read_domains;
@@ -3178,7 +3227,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
static void
i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if (!obj_priv->page_cpu_valid)
return;
@@ -3218,7 +3267,7 @@ static int
i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
uint64_t offset, uint64_t size)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_read_domains;
int i, ret;
@@ -3287,7 +3336,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int i, ret;
void __iomem *reloc_page;
bool need_fence;
@@ -3338,7 +3387,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
i915_gem_object_unpin(obj);
return -EBADF;
}
- target_obj_priv = target_obj->driver_private;
+ target_obj_priv = to_intel_bo(target_obj);
#if WATCH_RELOC
DRM_INFO("%s: obj %p offset %08x target %d "
@@ -3690,7 +3739,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
prepare_to_wait(&dev_priv->pending_flip_queue,
&wait, TASK_INTERRUPTIBLE);
for (i = 0; i < count; i++) {
- obj_priv = object_list[i]->driver_private;
+ obj_priv = to_intel_bo(object_list[i]);
if (atomic_read(&obj_priv->pending_flip) > 0)
break;
}
@@ -3799,7 +3848,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- obj_priv = object_list[i]->driver_private;
+ obj_priv = to_intel_bo(object_list[i]);
if (obj_priv->in_execbuffer) {
DRM_ERROR("Object %p appears more than once in object list\n",
object_list[i]);
@@ -3925,7 +3974,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_write_domain = obj->write_domain;
obj->write_domain = obj->pending_write_domain;
@@ -4000,7 +4049,7 @@ err:
for (i = 0; i < args->buffer_count; i++) {
if (object_list[i]) {
- obj_priv = object_list[i]->driver_private;
+ obj_priv = to_intel_bo(object_list[i]);
obj_priv->in_execbuffer = false;
}
drm_gem_object_unreference(object_list[i]);
@@ -4178,7 +4227,7 @@ int
i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -4211,7 +4260,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
i915_verify_inactive(dev, __FILE__, __LINE__);
obj_priv->pin_count--;
@@ -4251,7 +4300,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
mutex_unlock(&dev->struct_mutex);
return -EBADF;
}
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (obj_priv->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to pin a purgeable buffer\n");
@@ -4308,7 +4357,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
return -EBADF;
}
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (obj_priv->pin_filp != file_priv) {
DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
args->handle);
@@ -4350,7 +4399,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
*/
i915_gem_retire_requests(dev);
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
/* Don't count being on the flushing list against the object being
* done. Otherwise, a buffer left on the flushing list but not getting
* flushed (because nobody's flushing that domain) won't ever return
@@ -4396,7 +4445,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
}
mutex_lock(&dev->struct_mutex);
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (obj_priv->pin_count) {
drm_gem_object_unreference(obj);
@@ -4457,7 +4506,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
void i915_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
trace_i915_gem_object_destroy(obj);
@@ -4547,6 +4596,49 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
+/*
+ * 965+ support PIPE_CONTROL commands, which provide finer grained control
+ * over cache flushing.
+ */
+static int
+i915_gem_init_pipe_control(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ obj = drm_gem_object_alloc(dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate seqno page\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ obj_priv = to_intel_bo(obj);
+ obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+
+ ret = i915_gem_object_pin(obj, 4096);
+ if (ret)
+ goto err_unref;
+
+ dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
+ dev_priv->seqno_page = kmap(obj_priv->pages[0]);
+ if (dev_priv->seqno_page == NULL)
+ goto err_unpin;
+
+ dev_priv->seqno_obj = obj;
+ memset(dev_priv->seqno_page, 0, PAGE_SIZE);
+
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(obj);
+err:
+ return ret;
+}
+
static int
i915_gem_init_hws(struct drm_device *dev)
{
@@ -4564,15 +4656,16 @@ i915_gem_init_hws(struct drm_device *dev)
obj = drm_gem_object_alloc(dev, 4096);
if (obj == NULL) {
DRM_ERROR("Failed to allocate status page\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err;
}
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
ret = i915_gem_object_pin(obj, 4096);
if (ret != 0) {
drm_gem_object_unreference(obj);
- return ret;
+ goto err_unref;
}
dev_priv->status_gfx_addr = obj_priv->gtt_offset;
@@ -4581,10 +4674,16 @@ i915_gem_init_hws(struct drm_device *dev)
if (dev_priv->hw_status_page == NULL) {
DRM_ERROR("Failed to map status page.\n");
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_unpin;
+ }
+
+ if (HAS_PIPE_CONTROL(dev)) {
+ ret = i915_gem_init_pipe_control(dev);
+ if (ret)
+ goto err_unpin;
}
+
dev_priv->hws_obj = obj;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
if (IS_GEN6(dev)) {
@@ -4597,6 +4696,30 @@ i915_gem_init_hws(struct drm_device *dev)
DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(obj);
+err:
+ return 0;
+}
+
+static void
+i915_gem_cleanup_pipe_control(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj = dev_priv->seqno_obj;
+ obj_priv = to_intel_bo(obj);
+ kunmap(obj_priv->pages[0]);
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ dev_priv->seqno_obj = NULL;
+
+ dev_priv->seqno_page = NULL;
}
static void
@@ -4610,7 +4733,7 @@ i915_gem_cleanup_hws(struct drm_device *dev)
return;
obj = dev_priv->hws_obj;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
kunmap(obj_priv->pages[0]);
i915_gem_object_unpin(obj);
@@ -4620,6 +4743,9 @@ i915_gem_cleanup_hws(struct drm_device *dev)
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
dev_priv->hw_status_page = NULL;
+ if (HAS_PIPE_CONTROL(dev))
+ i915_gem_cleanup_pipe_control(dev);
+
/* Write high address into HWS_PGA when disabling. */
I915_WRITE(HWS_PGA, 0x1ffff000);
}
@@ -4644,7 +4770,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
i915_gem_cleanup_hws(dev);
return -ENOMEM;
}
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
ret = i915_gem_object_pin(obj, 4096);
if (ret != 0) {
@@ -4730,6 +4856,11 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
ring->space += ring->Size;
}
+ if (IS_I9XX(dev) && !IS_GEN3(dev)) {
+ I915_WRITE(MI_MODE,
+ (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
+ }
+
return 0;
}
@@ -4932,7 +5063,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
int ret;
int page_count;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (!obj_priv->phys_obj)
return;
@@ -4971,7 +5102,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
if (id > I915_MAX_PHYS_OBJECT)
return -EINVAL;
- obj_priv = obj->driver_private;
+ obj_priv = to_intel_bo(obj);
if (obj_priv->phys_obj) {
if (obj_priv->phys_obj->id == id)
@@ -5022,7 +5153,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
void *obj_addr;
int ret;
char __user *user_data;