summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c324
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c66
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c56
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h164
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c528
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c21
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c205
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c275
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c23
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c197
-rw-r--r--drivers/gpu/drm/i915/i915_mem.c387
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h132
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c4
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c388
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c23
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c16
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c23
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c210
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c31
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c34
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c18
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c210
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h35
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c43
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c4
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
32 files changed, 1920 insertions, 1518 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 808b255d7fc..ce7fc77678b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -3,7 +3,7 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm
-i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
+i915-y := i915_drv.o i915_dma.o i915_irq.o \
i915_debugfs.o \
i915_suspend.o \
i915_gem.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index deaa657292b..fdb7ccefffb 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -83,6 +83,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
B(supports_tv);
B(has_bsd_ring);
B(has_blt_ring);
+ B(has_llc);
#undef B
return 0;
@@ -563,45 +564,6 @@ static int i915_hws_info(struct seq_file *m, void *data)
return 0;
}
-static void i915_dump_object(struct seq_file *m,
- struct io_mapping *mapping,
- struct drm_i915_gem_object *obj)
-{
- int page, page_count, i;
-
- page_count = obj->base.size / PAGE_SIZE;
- for (page = 0; page < page_count; page++) {
- u32 *mem = io_mapping_map_wc(mapping,
- obj->gtt_offset + page * PAGE_SIZE);
- for (i = 0; i < PAGE_SIZE; i += 4)
- seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
- io_mapping_unmap(mem);
- }
-}
-
-static int i915_batchbuffer_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
- if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
- seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
- i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
- }
- }
-
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
static int i915_ringbuffer_data(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -668,9 +630,9 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
static const char *ring_str(int ring)
{
switch (ring) {
- case RING_RENDER: return " render";
- case RING_BSD: return " bsd";
- case RING_BLT: return " blt";
+ case RCS: return "render";
+ case VCS: return "bsd";
+ case BCS: return "blt";
default: return "";
}
}
@@ -713,7 +675,7 @@ static void print_error_buffers(struct seq_file *m,
seq_printf(m, "%s [%d]:\n", name, count);
while (count--) {
- seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s",
+ seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s",
err->gtt_offset,
err->size,
err->read_domains,
@@ -723,6 +685,7 @@ static void print_error_buffers(struct seq_file *m,
tiling_flag(err->tiling),
dirty_flag(err->dirty),
purgeable_flag(err->purgeable),
+ err->ring != -1 ? " " : "",
ring_str(err->ring),
cache_level_str(err->cache_level));
@@ -736,6 +699,38 @@ static void print_error_buffers(struct seq_file *m,
}
}
+static void i915_ring_error_state(struct seq_file *m,
+ struct drm_device *dev,
+ struct drm_i915_error_state *error,
+ unsigned ring)
+{
+ seq_printf(m, "%s command stream:\n", ring_str(ring));
+ seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
+ seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
+ seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
+ seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
+ seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
+ seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
+ if (ring == RCS && INTEL_INFO(dev)->gen >= 4) {
+ seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
+ seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
+ }
+ if (INTEL_INFO(dev)->gen >= 4)
+ seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
+ seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
+ if (INTEL_INFO(dev)->gen >= 6) {
+ seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
+ seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
+ seq_printf(m, " SYNC_0: 0x%08x\n",
+ error->semaphore_mboxes[ring][0]);
+ seq_printf(m, " SYNC_1: 0x%08x\n",
+ error->semaphore_mboxes[ring][1]);
+ }
+ seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
+ seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
+ seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
+}
+
static int i915_error_state(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -743,7 +738,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
unsigned long flags;
- int i, page, offset, elt;
+ int i, j, page, offset, elt;
spin_lock_irqsave(&dev_priv->error_lock, flags);
if (!dev_priv->first_error) {
@@ -758,35 +753,20 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
seq_printf(m, "EIR: 0x%08x\n", error->eir);
seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+
+ for (i = 0; i < dev_priv->num_fence_regs; i++)
+ seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
+
if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m, "ERROR: 0x%08x\n", error->error);
- seq_printf(m, "Blitter command stream:\n");
- seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd);
- seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir);
- seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr);
- seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone);
- seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno);
- seq_printf(m, "Video (BSD) command stream:\n");
- seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd);
- seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir);
- seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr);
- seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone);
- seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno);
- }
- seq_printf(m, "Render command stream:\n");
- seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
- seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
- seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
- seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
- if (INTEL_INFO(dev)->gen >= 4) {
- seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
- seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
+ seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
- seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
- seq_printf(m, " seqno: 0x%08x\n", error->seqno);
- for (i = 0; i < dev_priv->num_fence_regs; i++)
- seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
+ i915_ring_error_state(m, dev, error, RCS);
+ if (HAS_BLT(dev))
+ i915_ring_error_state(m, dev, error, BCS);
+ if (HAS_BSD(dev))
+ i915_ring_error_state(m, dev, error, VCS);
if (error->active_bo)
print_error_buffers(m, "Active",
@@ -798,10 +778,10 @@ static int i915_error_state(struct seq_file *m, void *unused)
error->pinned_bo,
error->pinned_bo_count);
- for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
- if (error->batchbuffer[i]) {
- struct drm_i915_error_object *obj = error->batchbuffer[i];
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+ struct drm_i915_error_object *obj;
+ if ((obj = error->ring[i].batchbuffer)) {
seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
@@ -813,11 +793,20 @@ static int i915_error_state(struct seq_file *m, void *unused)
}
}
}
- }
- for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) {
- if (error->ringbuffer[i]) {
- struct drm_i915_error_object *obj = error->ringbuffer[i];
+ if (error->ring[i].num_requests) {
+ seq_printf(m, "%s --- %d requests\n",
+ dev_priv->ring[i].name,
+ error->ring[i].num_requests);
+ for (j = 0; j < error->ring[i].num_requests; j++) {
+ seq_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
+ error->ring[i].requests[j].seqno,
+ error->ring[i].requests[j].jiffies,
+ error->ring[i].requests[j].tail);
+ }
+ }
+
+ if ((obj = error->ring[i].ringbuffer)) {
seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
@@ -1414,9 +1403,108 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
return 0;
}
+static const char *swizzle_string(unsigned swizzle)
+{
+ switch(swizzle) {
+ case I915_BIT_6_SWIZZLE_NONE:
+ return "none";
+ case I915_BIT_6_SWIZZLE_9:
+ return "bit9";
+ case I915_BIT_6_SWIZZLE_9_10:
+ return "bit9/bit10";
+ case I915_BIT_6_SWIZZLE_9_11:
+ return "bit9/bit11";
+ case I915_BIT_6_SWIZZLE_9_10_11:
+ return "bit9/bit10/bit11";
+ case I915_BIT_6_SWIZZLE_9_17:
+ return "bit9/bit17";
+ case I915_BIT_6_SWIZZLE_9_10_17:
+ return "bit9/bit10/bit17";
+ case I915_BIT_6_SWIZZLE_UNKNOWN:
+ return "unkown";
+ }
+
+ return "bug";
+}
+
+static int i915_swizzle_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+ seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
+ swizzle_string(dev_priv->mm.bit_6_swizzle_x));
+ seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
+ swizzle_string(dev_priv->mm.bit_6_swizzle_y));
+
+ if (IS_GEN3(dev) || IS_GEN4(dev)) {
+ seq_printf(m, "DDC = 0x%08x\n",
+ I915_READ(DCC));
+ seq_printf(m, "C0DRB3 = 0x%04x\n",
+ I915_READ16(C0DRB3));
+ seq_printf(m, "C1DRB3 = 0x%04x\n",
+ I915_READ16(C1DRB3));
+ } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
+ I915_READ(MAD_DIMM_C0));
+ seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
+ I915_READ(MAD_DIMM_C1));
+ seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
+ I915_READ(MAD_DIMM_C2));
+ seq_printf(m, "TILECTL = 0x%08x\n",
+ I915_READ(TILECTL));
+ seq_printf(m, "ARB_MODE = 0x%08x\n",
+ I915_READ(ARB_MODE));
+ seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
+ I915_READ(DISP_ARB_CTL));
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static int i915_ppgtt_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int i, ret;
+
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ if (INTEL_INFO(dev)->gen == 6)
+ seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
+
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ ring = &dev_priv->ring[i];
+
+ seq_printf(m, "%s\n", ring->name);
+ if (INTEL_INFO(dev)->gen == 7)
+ seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
+ seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
+ seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
+ seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
+ }
+ if (dev_priv->mm.aliasing_ppgtt) {
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+ seq_printf(m, "aliasing PPGTT:\n");
+ seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
+ }
+ seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
static int
-i915_wedged_open(struct inode *inode,
- struct file *filp)
+i915_debugfs_common_open(struct inode *inode,
+ struct file *filp)
{
filp->private_data = inode->i_private;
return 0;
@@ -1472,20 +1560,12 @@ i915_wedged_write(struct file *filp,
static const struct file_operations i915_wedged_fops = {
.owner = THIS_MODULE,
- .open = i915_wedged_open,
+ .open = i915_debugfs_common_open,
.read = i915_wedged_read,
.write = i915_wedged_write,
.llseek = default_llseek,
};
-static int
-i915_max_freq_open(struct inode *inode,
- struct file *filp)
-{
- filp->private_data = inode->i_private;
- return 0;
-}
-
static ssize_t
i915_max_freq_read(struct file *filp,
char __user *ubuf,
@@ -1542,20 +1622,12 @@ i915_max_freq_write(struct file *filp,
static const struct file_operations i915_max_freq_fops = {
.owner = THIS_MODULE,
- .open = i915_max_freq_open,
+ .open = i915_debugfs_common_open,
.read = i915_max_freq_read,
.write = i915_max_freq_write,
.llseek = default_llseek,
};
-static int
-i915_cache_sharing_open(struct inode *inode,
- struct file *filp)
-{
- filp->private_data = inode->i_private;
- return 0;
-}
-
static ssize_t
i915_cache_sharing_read(struct file *filp,
char __user *ubuf,
@@ -1621,7 +1693,7 @@ i915_cache_sharing_write(struct file *filp,
static const struct file_operations i915_cache_sharing_fops = {
.owner = THIS_MODULE,
- .open = i915_cache_sharing_open,
+ .open = i915_debugfs_common_open,
.read = i915_cache_sharing_read,
.write = i915_cache_sharing_write,
.llseek = default_llseek,
@@ -1653,21 +1725,6 @@ drm_add_fake_info_node(struct drm_minor *minor,
return 0;
}
-static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
-{
- struct drm_device *dev = minor->dev;
- struct dentry *ent;
-
- ent = debugfs_create_file("i915_wedged",
- S_IRUGO | S_IWUSR,
- root, dev,
- &i915_wedged_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
-
- return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
-}
-
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
@@ -1729,34 +1786,22 @@ static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
}
-static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor)
-{
- struct drm_device *dev = minor->dev;
- struct dentry *ent;
-
- ent = debugfs_create_file("i915_max_freq",
- S_IRUGO | S_IWUSR,
- root, dev,
- &i915_max_freq_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
-
- return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops);
-}
-
-static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor)
+static int i915_debugfs_create(struct dentry *root,
+ struct drm_minor *minor,
+ const char *name,
+ const struct file_operations *fops)
{
struct drm_device *dev = minor->dev;
struct dentry *ent;
- ent = debugfs_create_file("i915_cache_sharing",
+ ent = debugfs_create_file(name,
S_IRUGO | S_IWUSR,
root, dev,
- &i915_cache_sharing_fops);
+ fops);
if (IS_ERR(ent))
return PTR_ERR(ent);
- return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops);
+ return drm_add_fake_info_node(minor, ent, fops);
}
static struct drm_info_list i915_debugfs_list[] = {
@@ -1782,7 +1827,6 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
{"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
{"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
- {"i915_batchbuffers", i915_batchbuffer_info, 0},
{"i915_error_state", i915_error_state, 0},
{"i915_rstdby_delays", i915_rstdby_delays, 0},
{"i915_cur_delayinfo", i915_cur_delayinfo, 0},
@@ -1798,6 +1842,8 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_context_status", i915_context_status, 0},
{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
+ {"i915_swizzle_info", i915_swizzle_info, 0},
+ {"i915_ppgtt_info", i915_ppgtt_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
@@ -1805,17 +1851,25 @@ int i915_debugfs_init(struct drm_minor *minor)
{
int ret;
- ret = i915_wedged_create(minor->debugfs_root, minor);
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_wedged",
+ &i915_wedged_fops);
if (ret)
return ret;
ret = i915_forcewake_create(minor->debugfs_root, minor);
if (ret)
return ret;
- ret = i915_max_freq_create(minor->debugfs_root, minor);
+
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_max_freq",
+ &i915_max_freq_fops);
if (ret)
return ret;
- ret = i915_cache_sharing_create(minor->debugfs_root, minor);
+
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_cache_sharing",
+ &i915_cache_sharing_fops);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index ddfe3d902b2..9341eb8ce93 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -784,6 +784,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_GEN7_SOL_RESET:
value = 1;
break;
+ case I915_PARAM_HAS_LLC:
+ value = HAS_LLC(dev);
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -1193,22 +1196,39 @@ static int i915_load_gem_init(struct drm_device *dev)
/* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
- /* Let GEM Manage all of the aperture.
- *
- * However, leave one page at the end still bound to the scratch page.
- * There are a number of places where the hardware apparently
- * prefetches past the end of the object, and we've seen multiple
- * hangs with the GPU head pointer stuck in a batchbuffer bound
- * at the last page of the aperture. One page should be enough to
- * keep any prefetching inside of the aperture.
- */
- i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
-
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_init_ringbuffer(dev);
+ if (i915_enable_ppgtt && HAS_ALIASING_PPGTT(dev)) {
+ /* PPGTT pdes are stolen from global gtt ptes, so shrink the
+ * aperture accordingly when using aliasing ppgtt. */
+ gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+ /* For paranoia keep the guard page in between. */
+ gtt_size -= PAGE_SIZE;
+
+ i915_gem_do_init(dev, 0, mappable_size, gtt_size);
+
+ ret = i915_gem_init_aliasing_ppgtt(dev);
+ if (ret)
+ return ret;
+ } else {
+ /* Let GEM Manage all of the aperture.
+ *
+ * However, leave one page at the end still bound to the scratch
+ * page. There are a number of places where the hardware
+ * apparently prefetches past the end of the object, and we've
+ * seen multiple hangs with the GPU head pointer stuck in a
+ * batchbuffer bound at the last page of the aperture. One page
+ * should be enough to keep any prefetching inside of the
+ * aperture.
+ */
+ i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
+ }
+
+ ret = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
- if (ret)
+ if (ret) {
+ i915_gem_cleanup_aliasing_ppgtt(dev);
return ret;
+ }
/* Try to set up FBC with a reasonable compressed buffer size */
if (I915_HAS_FBC(dev) && i915_powersave) {
@@ -1295,6 +1315,7 @@ cleanup_gem:
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
+ i915_gem_cleanup_aliasing_ppgtt(dev);
cleanup_vga_switcheroo:
vga_switcheroo_unregister_client(dev->pdev);
cleanup_vga_client:
@@ -1930,6 +1951,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv;
}
+ pci_set_master(dev->pdev);
+
/* overlay on gen2 is broken and can't address above 1G */
if (IS_GEN2(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
@@ -2129,7 +2152,7 @@ int i915_driver_unload(struct drm_device *dev)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
mutex_lock(&dev->struct_mutex);
- ret = i915_gpu_idle(dev);
+ ret = i915_gpu_idle(dev, true);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
mutex_unlock(&dev->struct_mutex);
@@ -2182,6 +2205,7 @@ int i915_driver_unload(struct drm_device *dev)
i915_gem_free_all_phys_object(dev);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
+ i915_gem_cleanup_aliasing_ppgtt(dev);
if (I915_HAS_FBC(dev) && i915_powersave)
i915_cleanup_compression(dev);
drm_mm_takedown(&dev_priv->mm.stolen);
@@ -2247,18 +2271,12 @@ void i915_driver_lastclose(struct drm_device * dev)
i915_gem_lastclose(dev);
- if (dev_priv->agp_heap)
- i915_mem_takedown(&(dev_priv->agp_heap));
-
i915_dma_cleanup(dev);
}
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
i915_gem_release(dev, file_priv);
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_mem_release(dev, file_priv, dev_priv->agp_heap);
}
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
@@ -2277,11 +2295,11 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 308f8191356..1a7559b5999 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -103,6 +103,11 @@ MODULE_PARM_DESC(enable_hangcheck,
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
+bool i915_enable_ppgtt __read_mostly = 1;
+module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, bool, 0600);
+MODULE_PARM_DESC(i915_enable_ppgtt,
+ "Enable PPGTT (default: true)");
+
static struct drm_driver driver;
extern int intel_agp_enabled;
@@ -198,7 +203,7 @@ static const struct intel_device_info intel_pineview_info = {
static const struct intel_device_info intel_ironlake_d_info = {
.gen = 5,
- .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
};
@@ -214,6 +219,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
+ .has_llc = 1,
};
static const struct intel_device_info intel_sandybridge_m_info = {
@@ -222,6 +228,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_fbc = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
+ .has_llc = 1,
};
static const struct intel_device_info intel_ivybridge_d_info = {
@@ -229,6 +236,7 @@ static const struct intel_device_info intel_ivybridge_d_info = {
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
+ .has_llc = 1,
};
static const struct intel_device_info intel_ivybridge_m_info = {
@@ -237,6 +245,7 @@ static const struct intel_device_info intel_ivybridge_m_info = {
.has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */
.has_bsd_ring = 1,
.has_blt_ring = 1,
+ .has_llc = 1,
};
static const struct pci_device_id pciidlist[] = { /* aka */
@@ -376,16 +385,27 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
}
+static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
+{
+ u32 gtfifodbg;
+ gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
+ if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
+ "MMIO read or write has been dropped %x\n", gtfifodbg))
+ I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
+}
+
void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
- POSTING_READ(FORCEWAKE);
+ /* The below doubles as a POSTING_READ */
+ gen6_gt_check_fifodbg(dev_priv);
}
void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
- POSTING_READ(FORCEWAKE_MT);
+ /* The below doubles as a POSTING_READ */
+ gen6_gt_check_fifodbg(dev_priv);
}
/*
@@ -401,8 +421,10 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
}
-void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
+ int ret = 0;
+
if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
int loop = 500;
u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
@@ -410,10 +432,13 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
udelay(10);
fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
}
- WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES);
+ if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
+ ++ret;
dev_priv->gt_fifo_count = fifo;
}
dev_priv->gt_fifo_count--;
+
+ return ret;
}
static int i915_drm_freeze(struct drm_device *dev)
@@ -442,6 +467,10 @@ static int i915_drm_freeze(struct drm_device *dev)
/* Modeset on resume, not lid events */
dev_priv->modeset_on_lid = 0;
+ console_lock();
+ intel_fbdev_set_suspend(dev, 1);
+ console_unlock();
+
return 0;
}
@@ -494,7 +523,7 @@ static int i915_drm_thaw(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
- error = i915_gem_init_ringbuffer(dev);
+ error = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
if (HAS_PCH_SPLIT(dev))
@@ -514,6 +543,9 @@ static int i915_drm_thaw(struct drm_device *dev)
dev_priv->modeset_on_lid = 0;
+ console_lock();
+ intel_fbdev_set_suspend(dev, 0);
+ console_unlock();
return error;
}
@@ -633,7 +665,7 @@ static int gen6_do_reset(struct drm_device *dev, u8 flags)
}
/**
- * i965_reset - reset chip after a hang
+ * i915_reset - reset chip after a hang
* @dev: drm device to reset
* @flags: reset domains
*
@@ -709,12 +741,16 @@ int i915_reset(struct drm_device *dev, u8 flags)
!dev_priv->mm.suspended) {
dev_priv->mm.suspended = 0;
+ i915_gem_init_swizzling(dev);
+
dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
if (HAS_BSD(dev))
dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
if (HAS_BLT(dev))
dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
+ i915_gem_init_ppgtt(dev);
+
mutex_unlock(&dev->struct_mutex);
drm_irq_uninstall(dev);
drm_mode_config_reset(dev);
@@ -977,11 +1013,15 @@ __i915_read(64, q)
#define __i915_write(x, y) \
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
+ u32 __fifo_ret = 0; \
trace_i915_reg_rw(true, reg, val, sizeof(val)); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- __gen6_gt_wait_for_fifo(dev_priv); \
+ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
write##y(val, dev_priv->regs + reg); \
+ if (unlikely(__fifo_ret)) { \
+ gen6_gt_check_fifodbg(dev_priv); \
+ } \
}
__i915_write(8, b)
__i915_write(16, w)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9689ca38b2b..c0f19f57200 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -35,6 +35,7 @@
#include "intel_ringbuffer.h"
#include <linux/io-mapping.h>
#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
#include <drm/intel-gtt.h>
#include <linux/backlight.h>
@@ -135,6 +136,7 @@ struct drm_i915_fence_reg {
struct list_head lru_list;
struct drm_i915_gem_object *obj;
uint32_t setup_seqno;
+ int pin_count;
};
struct sdvo_device_mapping {
@@ -152,33 +154,40 @@ struct drm_i915_error_state {
u32 eir;
u32 pgtbl_er;
u32 pipestat[I915_MAX_PIPES];
- u32 ipeir;
- u32 ipehr;
- u32 instdone;
- u32 acthd;
+ u32 tail[I915_NUM_RINGS];
+ u32 head[I915_NUM_RINGS];
+ u32 ipeir[I915_NUM_RINGS];
+ u32 ipehr[I915_NUM_RINGS];
+ u32 instdone[I915_NUM_RINGS];
+ u32 acthd[I915_NUM_RINGS];
+ u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
+ /* our own tracking of ring head and tail */
+ u32 cpu_ring_head[I915_NUM_RINGS];
+ u32 cpu_ring_tail[I915_NUM_RINGS];
u32 error; /* gen6+ */
- u32 bcs_acthd; /* gen6+ blt engine */
- u32 bcs_ipehr;
- u32 bcs_ipeir;
- u32 bcs_instdone;
- u32 bcs_seqno;
- u32 vcs_acthd; /* gen6+ bsd engine */
- u32 vcs_ipehr;
- u32 vcs_ipeir;
- u32 vcs_instdone;
- u32 vcs_seqno;
- u32 instpm;
- u32 instps;
+ u32 instpm[I915_NUM_RINGS];
+ u32 instps[I915_NUM_RINGS];
u32 instdone1;
- u32 seqno;
+ u32 seqno[I915_NUM_RINGS];
u64 bbaddr;
+ u32 fault_reg[I915_NUM_RINGS];
+ u32 done_reg;
+ u32 faddr[I915_NUM_RINGS];
u64 fence[I915_MAX_NUM_FENCES];
struct timeval time;
- struct drm_i915_error_object {
- int page_count;
- u32 gtt_offset;
- u32 *pages[0];
- } *ringbuffer[I915_NUM_RINGS], *batchbuffer[I915_NUM_RINGS];
+ struct drm_i915_error_ring {
+ struct drm_i915_error_object {
+ int page_count;
+ u32 gtt_offset;
+ u32 *pages[0];
+ } *ringbuffer, *batchbuffer;
+ struct drm_i915_error_request {
+ long jiffies;
+ u32 seqno;
+ u32 tail;
+ } *requests;
+ int num_requests;
+ } ring[I915_NUM_RINGS];
struct drm_i915_error_buffer {
u32 size;
u32 name;
@@ -191,7 +200,7 @@ struct drm_i915_error_state {
u32 tiling:2;
u32 dirty:1;
u32 purgeable:1;
- u32 ring:4;
+ s32 ring:4;
u32 cache_level:2;
} *active_bo, *pinned_bo;
u32 active_bo_count, pinned_bo_count;
@@ -255,6 +264,17 @@ struct intel_device_info {
u8 supports_tv:1;
u8 has_bsd_ring:1;
u8 has_blt_ring:1;
+ u8 has_llc:1;
+};
+
+#define I915_PPGTT_PD_ENTRIES 512
+#define I915_PPGTT_PT_ENTRIES 1024
+struct i915_hw_ppgtt {
+ unsigned num_pd_entries;
+ struct page **pt_pages;
+ uint32_t pd_offset;
+ dma_addr_t *pt_dma_addr;
+ dma_addr_t scratch_page_dma_addr;
};
enum no_fbc_reason {
@@ -279,6 +299,16 @@ enum intel_pch {
struct intel_fbdev;
struct intel_fbc_work;
+struct intel_gmbus {
+ struct i2c_adapter adapter;
+ bool force_bit;
+ bool has_gpio;
+ u32 reg0;
+ u32 gpio_reg;
+ struct i2c_algo_bit_data bit_algo;
+ struct drm_i915_private *dev_priv;
+};
+
typedef struct drm_i915_private {
struct drm_device *dev;
@@ -296,11 +326,11 @@ typedef struct drm_i915_private {
/** gt_lock is also taken in irq contexts. */
struct spinlock gt_lock;
- struct intel_gmbus {
- struct i2c_adapter adapter;
- struct i2c_adapter *force_bit;
- u32 reg0;
- } *gmbus;
+ struct intel_gmbus *gmbus;
+
+ /** gmbus_mutex protects against concurrent usage of the single hw gmbus
+ * controller on different i2c buses. */
+ struct mutex gmbus_mutex;
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
@@ -335,7 +365,6 @@ typedef struct drm_i915_private {
int tex_lru_log_granularity;
int allow_batchbuffer;
- struct mem_block *agp_heap;
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
int num_pipe;
@@ -584,6 +613,9 @@ typedef struct drm_i915_private {
struct io_mapping *gtt_mapping;
int gtt_mtrr;
+ /** PPGTT used for aliasing the PPGTT with the GTT */
+ struct i915_hw_ppgtt *aliasing_ppgtt;
+
struct shrinker inactive_shrinker;
/**
@@ -749,6 +781,13 @@ typedef struct drm_i915_private {
struct drm_property *force_audio_property;
} drm_i915_private_t;
+enum hdmi_force_audio {
+ HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
+ HDMI_AUDIO_OFF, /* force turn off HDMI audio */
+ HDMI_AUDIO_AUTO, /* trust EDID */
+ HDMI_AUDIO_ON, /* force turn on HDMI audio */
+};
+
enum i915_cache_level {
I915_CACHE_NONE,
I915_CACHE_LLC,
@@ -841,6 +880,8 @@ struct drm_i915_gem_object {
unsigned int cache_level:2;
+ unsigned int has_aliasing_ppgtt_mapping:1;
+
struct page **pages;
/**
@@ -918,6 +959,9 @@ struct drm_i915_gem_request {
/** GEM sequence number associated with this request. */
uint32_t seqno;
+ /** Postion in the ringbuffer of the end of the request */
+ u32 tail;
+
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
@@ -974,8 +1018,11 @@ struct drm_i915_file_private {
#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
+#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
+#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6)
+
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
@@ -1018,6 +1065,7 @@ extern int i915_vbt_sdvo_panel_type __read_mostly;
extern int i915_enable_rc6 __read_mostly;
extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
+extern bool i915_enable_ppgtt __read_mostly;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
@@ -1079,18 +1127,6 @@ extern void i915_destroy_error_state(struct drm_device *dev);
#endif
-/* i915_mem.c */
-extern int i915_mem_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_mem_free(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_mem_init_heap(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern void i915_mem_takedown(struct mem_block **heap);
-extern void i915_mem_release(struct drm_device * dev,
- struct drm_file *file_priv, struct mem_block *heap);
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1170,37 +1206,55 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
-static inline u32
-i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
- return ring->outstanding_lazy_request = dev_priv->next_seqno;
-}
+u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
+static inline void
+i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ dev_priv->fence_regs[obj->fence_reg].pin_count++;
+ }
+}
+
+static inline void
+i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
+{
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ dev_priv->fence_regs[obj->fence_reg].pin_count--;
+ }
+}
+
void i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
+
void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
+int __must_check i915_gem_init_hw(struct drm_device *dev);
+void i915_gem_init_swizzling(struct drm_device *dev);
+void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
void i915_gem_do_init(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end);
-int __must_check i915_gpu_idle(struct drm_device *dev);
+int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire);
int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_request *request);
int __must_check i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno);
+ uint32_t seqno,
+ bool do_retire);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
@@ -1227,6 +1281,14 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
/* i915_gem_gtt.c */
+int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
+void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
+void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
+void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_gem_object *obj);
+
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
@@ -1365,7 +1427,7 @@ extern void intel_display_print_error_state(struct seq_file *m,
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
-void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
+int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e55badb2d86..1f441f5c240 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -58,6 +58,7 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
static int i915_gem_inactive_shrink(struct shrinker *shrinker,
struct shrink_control *sc);
+static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
@@ -258,73 +259,6 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
obj->tiling_mode != I915_TILING_NONE;
}
-static inline void
-slow_shmem_copy(struct page *dst_page,
- int dst_offset,
- struct page *src_page,
- int src_offset,
- int length)
-{
- char *dst_vaddr, *src_vaddr;
-
- dst_vaddr = kmap(dst_page);
- src_vaddr = kmap(src_page);
-
- memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
-
- kunmap(src_page);
- kunmap(dst_page);
-}
-
-static inline void
-slow_shmem_bit17_copy(struct page *gpu_page,
- int gpu_offset,
- struct page *cpu_page,
- int cpu_offset,
- int length,
- int is_read)
-{
- char *gpu_vaddr, *cpu_vaddr;
-
- /* Use the unswizzled path if this page isn't affected. */
- if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
- if (is_read)
- return slow_shmem_copy(cpu_page, cpu_offset,
- gpu_page, gpu_offset, length);
- else
- return slow_shmem_copy(gpu_page, gpu_offset,
- cpu_page, cpu_offset, length);
- }
-
- gpu_vaddr = kmap(gpu_page);
- cpu_vaddr = kmap(cpu_page);
-
- /* Copy the data, XORing A6 with A17 (1). The user already knows he's
- * XORing with the other bits (A9 for Y, A9 and A10 for X)
- */
- while (length > 0) {
- int cacheline_end = ALIGN(gpu_offset + 1, 64);
- int this_length = min(cacheline_end - gpu_offset, length);
- int swizzled_gpu_offset = gpu_offset ^ 64;
-
- if (is_read) {
- memcpy(cpu_vaddr + cpu_offset,
- gpu_vaddr + swizzled_gpu_offset,
- this_length);
- } else {
- memcpy(gpu_vaddr + swizzled_gpu_offset,
- cpu_vaddr + cpu_offset,
- this_length);
- }
- cpu_offset += this_length;
- gpu_offset += this_length;
- length -= this_length;
- }
-
- kunmap(cpu_page);
- kunmap(gpu_page);
-}
-
/**
* This is the fast shmem pread path, which attempts to copy_from_user directly
* from the backing pages of the object to the user's address space. On a
@@ -385,6 +319,58 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
return 0;
}
+static inline int
+__copy_to_user_swizzled(char __user *cpu_vaddr,
+ const char *gpu_vaddr, int gpu_offset,
+ int length)
+{
+ int ret, cpu_offset = 0;
+
+ while (length > 0) {
+ int cacheline_end = ALIGN(gpu_offset + 1, 64);
+ int this_length = min(cacheline_end - gpu_offset, length);
+ int swizzled_gpu_offset = gpu_offset ^ 64;
+
+ ret = __copy_to_user(cpu_vaddr + cpu_offset,
+ gpu_vaddr + swizzled_gpu_offset,
+ this_length);
+ if (ret)
+ return ret + length;
+
+ cpu_offset += this_length;
+ gpu_offset += this_length;
+ length -= this_length;
+ }
+
+ return 0;
+}
+
+static inline int
+__copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset,
+ const char *cpu_vaddr,
+ int length)
+{
+ int ret, cpu_offset = 0;
+
+ while (length > 0) {
+ int cacheline_end = ALIGN(gpu_offset + 1, 64);
+ int this_length = min(cacheline_end - gpu_offset, length);
+ int swizzled_gpu_offset = gpu_offset ^ 64;
+
+ ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
+ cpu_vaddr + cpu_offset,
+ this_length);
+ if (ret)
+ return ret + length;
+
+ cpu_offset += this_length;
+ gpu_offset += this_length;
+ length -= this_length;
+ }
+
+ return 0;
+}
+
/**
* This is the fallback shmem pread path, which allocates temporary storage
* in kernel space to copy_to_user into outside of the struct_mutex, so we
@@ -398,72 +384,34 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
struct drm_file *file)
{
struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
- struct mm_struct *mm = current->mm;
- struct page **user_pages;
+ char __user *user_data;
ssize_t remain;
- loff_t offset, pinned_pages, i;
- loff_t first_data_page, last_data_page, num_pages;
- int shmem_page_offset;
- int data_page_index, data_page_offset;
- int page_length;
- int ret;
- uint64_t data_ptr = args->data_ptr;
- int do_bit17_swizzling;
+ loff_t offset;
+ int shmem_page_offset, page_length, ret;
+ int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+ user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- /* Pin the user pages containing the data. We can't fault while
- * holding the struct mutex, yet we want to hold it while
- * dereferencing the user data.
- */
- first_data_page = data_ptr / PAGE_SIZE;
- last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
- num_pages = last_data_page - first_data_page + 1;
+ obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
- if (user_pages == NULL)
- return -ENOMEM;
+ offset = args->offset;
mutex_unlock(&dev->struct_mutex);
- down_read(&mm->mmap_sem);
- pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
- num_pages, 1, 0, user_pages, NULL);
- up_read(&mm->mmap_sem);
- mutex_lock(&dev->struct_mutex);
- if (pinned_pages < num_pages) {
- ret = -EFAULT;
- goto out;
- }
-
- ret = i915_gem_object_set_cpu_read_domain_range(obj,
- args->offset,
- args->size);
- if (ret)
- goto out;
-
- do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
-
- offset = args->offset;
while (remain > 0) {
struct page *page;
+ char *vaddr;
/* Operation in this page
*
* shmem_page_offset = offset within page in shmem file
- * data_page_index = page number in get_user_pages return
- * data_page_offset = offset with data_page_index page.
* page_length = bytes to copy for this page
*/
shmem_page_offset = offset_in_page(offset);
- data_page_index = data_ptr / PAGE_SIZE - first_data_page;
- data_page_offset = offset_in_page(data_ptr);
-
page_length = remain;
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;
- if ((data_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - data_page_offset;
page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page)) {
@@ -471,36 +419,38 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
goto out;
}
- if (do_bit17_swizzling) {
- slow_shmem_bit17_copy(page,
- shmem_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length,
- 1);
- } else {
- slow_shmem_copy(user_pages[data_page_index],
- data_page_offset,
- page,
- shmem_page_offset,
- page_length);
- }
+ page_do_bit17_swizzling = obj_do_bit17_swizzling &&
+ (page_to_phys(page) & (1 << 17)) != 0;
+
+ vaddr = kmap(page);
+ if (page_do_bit17_swizzling)
+ ret = __copy_to_user_swizzled(user_data,
+ vaddr, shmem_page_offset,
+ page_length);
+ else
+ ret = __copy_to_user(user_data,
+ vaddr + shmem_page_offset,
+ page_length);
+ kunmap(page);
mark_page_accessed(page);
page_cache_release(page);
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
+ }
+
remain -= page_length;
- data_ptr += page_length;
+ user_data += page_length;
offset += page_length;
}
out:
- for (i = 0; i < pinned_pages; i++) {
- SetPageDirty(user_pages[i]);
- mark_page_accessed(user_pages[i]);
- page_cache_release(user_pages[i]);
- }
- drm_free_large(user_pages);
+ mutex_lock(&dev->struct_mutex);
+ /* Fixup: Kill any reinstated backing storage pages */
+ if (obj->madv == __I915_MADV_PURGED)
+ i915_gem_object_truncate(obj);
return ret;
}
@@ -841,71 +791,36 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
struct drm_file *file)
{
struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
- struct mm_struct *mm = current->mm;
- struct page **user_pages;
ssize_t remain;
- loff_t offset, pinned_pages, i;
- loff_t first_data_page, last_data_page, num_pages;
- int shmem_page_offset;
- int data_page_index, data_page_offset;
- int page_length;
- int ret;
- uint64_t data_ptr = args->data_ptr;
- int do_bit17_swizzling;
+ loff_t offset;
+ char __user *user_data;
+ int shmem_page_offset, page_length, ret;
+ int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+ user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- /* Pin the user pages containing the data. We can't fault while
- * holding the struct mutex, and all of the pwrite implementations
- * want to hold it while dereferencing the user data.
- */
- first_data_page = data_ptr / PAGE_SIZE;
- last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
- num_pages = last_data_page - first_data_page + 1;
-
- user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
- if (user_pages == NULL)
- return -ENOMEM;
-
- mutex_unlock(&dev->struct_mutex);
- down_read(&mm->mmap_sem);
- pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
- num_pages, 0, 0, user_pages, NULL);
- up_read(&mm->mmap_sem);
- mutex_lock(&dev->struct_mutex);
- if (pinned_pages < num_pages) {
- ret = -EFAULT;
- goto out;
- }
-
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret)
- goto out;
-
- do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+ obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
offset = args->offset;
obj->dirty = 1;
+ mutex_unlock(&dev->struct_mutex);
+
while (remain > 0) {
struct page *page;
+ char *vaddr;
/* Operation in this page
*
* shmem_page_offset = offset within page in shmem file
- * data_page_index = page number in get_user_pages return
- * data_page_offset = offset with data_page_index page.
* page_length = bytes to copy for this page
*/
shmem_page_offset = offset_in_page(offset);
- data_page_index = data_ptr / PAGE_SIZE - first_data_page;
- data_page_offset = offset_in_page(data_ptr);
page_length = remain;
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;
- if ((data_page_offset + page_length) > PAGE_SIZE)
- page_length = PAGE_SIZE - data_page_offset;
page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page)) {
@@ -913,34 +828,45 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
goto out;
}
- if (do_bit17_swizzling) {
- slow_shmem_bit17_copy(page,
- shmem_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length,
- 0);
- } else {
- slow_shmem_copy(page,
- shmem_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length);
- }
+ page_do_bit17_swizzling = obj_do_bit17_swizzling &&
+ (page_to_phys(page) & (1 << 17)) != 0;
+
+ vaddr = kmap(page);
+ if (page_do_bit17_swizzling)
+ ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
+ user_data,
+ page_length);
+ else
+ ret = __copy_from_user(vaddr + shmem_page_offset,
+ user_data,
+ page_length);
+ kunmap(page);
set_page_dirty(page);
mark_page_accessed(page);
page_cache_release(page);
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
+ }
+
remain -= page_length;
- data_ptr += page_length;
+ user_data += page_length;
offset += page_length;
}
out:
- for (i = 0; i < pinned_pages; i++)
- page_cache_release(user_pages[i]);
- drm_free_large(user_pages);
+ mutex_lock(&dev->struct_mutex);
+ /* Fixup: Kill any reinstated backing storage pages */
+ if (obj->madv == __I915_MADV_PURGED)
+ i915_gem_object_truncate(obj);
+ /* and flush dirty cachelines in case the object isn't in the cpu write
+ * domain anymore. */
+ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ i915_gem_clflush_object(obj);
+ intel_gtt_chipset_flush();
+ }
return ret;
}
@@ -996,10 +922,13 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* pread/pwrite currently are reading and writing from the CPU
* perspective, requiring manual detiling by the client.
*/
- if (obj->phys_obj)
+ if (obj->phys_obj) {
ret = i915_gem_phys_pwrite(dev, obj, args, file);
- else if (obj->gtt_space &&
- obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ goto out;
+ }
+
+ if (obj->gtt_space &&
+ obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
ret = i915_gem_object_pin(obj, 0, true);
if (ret)
goto out;
@@ -1018,18 +947,24 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
out_unpin:
i915_gem_object_unpin(obj);
- } else {
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret)
- goto out;
- ret = -EFAULT;
- if (!i915_gem_object_needs_bit17_swizzle(obj))
- ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
- if (ret == -EFAULT)
- ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
+ if (ret != -EFAULT)
+ goto out;
+ /* Fall through to the shmfs paths because the gtt paths might
+ * fail with non-page-backed user pointers (e.g. gtt mappings
+ * when moving data between textures). */
}
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ if (ret)
+ goto out;
+
+ ret = -EFAULT;
+ if (!i915_gem_object_needs_bit17_swizzle(obj))
+ ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
+ if (ret == -EFAULT)
+ ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
+
out:
drm_gem_object_unreference(&obj->base);
unlock:
@@ -1141,7 +1076,6 @@ int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_mmap *args = data;
struct drm_gem_object *obj;
unsigned long addr;
@@ -1153,11 +1087,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
if (obj == NULL)
return -ENOENT;
- if (obj->size > dev_priv->mm.gtt_mappable_end) {
- drm_gem_object_unreference_unlocked(obj);
- return -E2BIG;
- }
-
down_write(&current->mm->mmap_sem);
addr = do_mmap(obj->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
@@ -1647,6 +1576,28 @@ i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
}
}
+static u32
+i915_gem_get_seqno(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 seqno = dev_priv->next_seqno;
+
+ /* reserve 0 for non-seqno */
+ if (++dev_priv->next_seqno == 0)
+ dev_priv->next_seqno = 1;
+
+ return seqno;
+}
+
+u32
+i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
+{
+ if (ring->outstanding_lazy_request == 0)
+ ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
+
+ return ring->outstanding_lazy_request;
+}
+
int
i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
@@ -1654,10 +1605,19 @@ i915_add_request(struct intel_ring_buffer *ring,
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
uint32_t seqno;
+ u32 request_ring_position;
int was_empty;
int ret;
BUG_ON(request == NULL);
+ seqno = i915_gem_next_request_seqno(ring);
+
+ /* Record the position of the start of the request so that
+ * should we detect the updated seqno part-way through the
+ * GPU processing the request, we never over-estimate the
+ * position of the head.
+ */
+ request_ring_position = intel_ring_get_tail(ring);
ret = ring->add_request(ring, &seqno);
if (ret)
@@ -1667,6 +1627,7 @@ i915_add_request(struct intel_ring_buffer *ring,
request->seqno = seqno;
request->ring = ring;
+ request->tail = request_ring_position;
request->emitted_jiffies = jiffies;
was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
@@ -1681,7 +1642,7 @@ i915_add_request(struct intel_ring_buffer *ring,
spin_unlock(&file_priv->mm.lock);
}
- ring->outstanding_lazy_request = false;
+ ring->outstanding_lazy_request = 0;
if (!dev_priv->mm.suspended) {
if (i915_enable_hangcheck) {
@@ -1803,7 +1764,7 @@ void i915_gem_reset(struct drm_device *dev)
/**
* This function clears the request list as sequence numbers are passed.
*/
-static void
+void
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{
uint32_t seqno;
@@ -1831,6 +1792,12 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
break;
trace_i915_gem_request_retire(ring, request->seqno);
+ /* We know the GPU must have read the request to have
+ * sent us the seqno + interrupt, so use the position
+ * of tail of the request to update the last known position
+ * of the GPU head.
+ */
+ ring->last_retired_head = request->tail;
list_del(&request->list);
i915_gem_request_remove_from_client(request);
@@ -1943,7 +1910,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
*/
int
i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno)
+ uint32_t seqno,
+ bool do_retire)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
u32 ier;
@@ -2017,17 +1985,12 @@ i915_wait_request(struct intel_ring_buffer *ring,
if (atomic_read(&dev_priv->mm.wedged))
ret = -EAGAIN;
- if (ret && ret != -ERESTARTSYS)
- DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
- __func__, ret, seqno, ring->get_seqno(ring),
- dev_priv->next_seqno);
-
/* Directly dispatch request retiring. While we have the work queue
* to handle this, the waiter on a request often wants an associated
* buffer to have made it to the inactive list, and we would need
* a separate wait queue to handle that.
*/
- if (ret == 0)
+ if (ret == 0 && do_retire)
i915_gem_retire_requests_ring(ring);
return ret;
@@ -2051,7 +2014,8 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
* it.
*/
if (obj->active) {
- ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
+ ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
+ true);
if (ret)
return ret;
}
@@ -2089,6 +2053,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
int
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
+ drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
int ret = 0;
if (obj->gtt_space == NULL)
@@ -2133,6 +2098,11 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
trace_i915_gem_object_unbind(obj);
i915_gem_gtt_unbind_object(obj);
+ if (obj->has_aliasing_ppgtt_mapping) {
+ i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
+ obj->has_aliasing_ppgtt_mapping = 0;
+ }
+
i915_gem_object_put_pages_gtt(obj);
list_del_init(&obj->gtt_list);
@@ -2172,7 +2142,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
return 0;
}
-static int i915_ring_idle(struct intel_ring_buffer *ring)
+static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
{
int ret;
@@ -2186,18 +2156,18 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
return ret;
}
- return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
+ return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
+ do_retire);
}
-int
-i915_gpu_idle(struct drm_device *dev)
+int i915_gpu_idle(struct drm_device *dev, bool do_retire)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret, i;
/* Flush everything onto the inactive list. */
for (i = 0; i < I915_NUM_RINGS; i++) {
- ret = i915_ring_idle(&dev_priv->ring[i]);
+ ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
if (ret)
return ret;
}
@@ -2400,7 +2370,8 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
if (!ring_passed_seqno(obj->last_fenced_ring,
obj->last_fenced_seqno)) {
ret = i915_wait_request(obj->last_fenced_ring,
- obj->last_fenced_seqno);
+ obj->last_fenced_seqno,
+ true);
if (ret)
return ret;
}
@@ -2432,6 +2403,8 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
+ WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count);
i915_gem_clear_fence_reg(obj->base.dev,
&dev_priv->fence_regs[obj->fence_reg]);
@@ -2456,7 +2429,7 @@ i915_find_fence_reg(struct drm_device *dev,
if (!reg->obj)
return reg;
- if (!reg->obj->pin_count)
+ if (!reg->pin_count)
avail = reg;
}
@@ -2466,7 +2439,7 @@ i915_find_fence_reg(struct drm_device *dev,
/* None available, try to steal one or wait for a user to finish */
avail = first = NULL;
list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
- if (reg->obj->pin_count)
+ if (reg->pin_count)
continue;
if (first == NULL)
@@ -2541,7 +2514,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (!ring_passed_seqno(obj->last_fenced_ring,
reg->setup_seqno)) {
ret = i915_wait_request(obj->last_fenced_ring,
- reg->setup_seqno);
+ reg->setup_seqno,
+ true);
if (ret)
return ret;
}
@@ -2560,7 +2534,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
reg = i915_find_fence_reg(dev, pipelined);
if (reg == NULL)
- return -ENOSPC;
+ return -EDEADLK;
ret = i915_gem_object_flush_fence(obj, pipelined);
if (ret)
@@ -2660,6 +2634,7 @@ i915_gem_clear_fence_reg(struct drm_device *dev,
list_del_init(&reg->lru_list);
reg->obj = NULL;
reg->setup_seqno = 0;
+ reg->pin_count = 0;
}
/**
@@ -2946,6 +2921,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
+ struct drm_device *dev = obj->base.dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
if (obj->cache_level == cache_level)
@@ -2974,6 +2951,9 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
}
i915_gem_gtt_rebind_object(obj, cache_level);
+ if (obj->has_aliasing_ppgtt_mapping)
+ i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
+ obj, cache_level);
}
if (cache_level == I915_CACHE_NONE) {
@@ -3084,10 +3064,13 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
return ret;
}
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret)
+ return ret;
+
/* Ensure that we invalidate the GPU's caches and TLBs. */
obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
-
- return i915_gem_object_wait_rendering(obj);
+ return 0;
}
/**
@@ -3619,8 +3602,8 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
- /* On Gen6, we can have the GPU use the LLC (the CPU
+ if (HAS_LLC(dev)) {
+ /* On some devices, we can have the GPU use the LLC (the CPU
* cache) for about a 10% performance improvement
* compared to uncached. Graphics requests other than
* display scanout are coherent with the CPU in
@@ -3710,7 +3693,7 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
- ret = i915_gpu_idle(dev);
+ ret = i915_gpu_idle(dev, true);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -3745,12 +3728,71 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
+void i915_gem_init_swizzling(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen < 5 ||
+ dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
+ return;
+
+ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ DISP_TILE_SURFACE_SWIZZLING);
+
+ if (IS_GEN5(dev))
+ return;
+
+ I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
+ if (IS_GEN6(dev))
+ I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB));
+ else
+ I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB));
+}
+
+void i915_gem_init_ppgtt(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t pd_offset;
+ struct intel_ring_buffer *ring;
+ int i;
+
+ if (!dev_priv->mm.aliasing_ppgtt)
+ return;
+
+ pd_offset = dev_priv->mm.aliasing_ppgtt->pd_offset;
+ pd_offset /= 64; /* in cachelines, */
+ pd_offset <<= 16;
+
+ if (INTEL_INFO(dev)->gen == 6) {
+ uint32_t ecochk = I915_READ(GAM_ECOCHK);
+ I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
+ ECOCHK_PPGTT_CACHE64B);
+ I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
+ } else if (INTEL_INFO(dev)->gen >= 7) {
+ I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
+ /* GFX_MODE is per-ring on gen7+ */
+ }
+
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ ring = &dev_priv->ring[i];
+
+ if (INTEL_INFO(dev)->gen >= 7)
+ I915_WRITE(RING_MODE_GEN7(ring),
+ GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
+
+ I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
+ }
+}
+
int
-i915_gem_init_ringbuffer(struct drm_device *dev)
+i915_gem_init_hw(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
+ i915_gem_init_swizzling(dev);
+
ret = intel_init_render_ring_buffer(dev);
if (ret)
return ret;
@@ -3769,6 +3811,8 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
dev_priv->next_seqno = 1;
+ i915_gem_init_ppgtt(dev);
+
return 0;
cleanup_bsd_ring:
@@ -3806,7 +3850,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
- ret = i915_gem_init_ringbuffer(dev);
+ ret = i915_gem_init_hw(dev);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -4201,7 +4245,7 @@ rescan:
* This has a dramatic impact to reduce the number of
* OOM-killer events whilst running the GPU aggressively.
*/
- if (i915_gpu_idle(dev) == 0)
+ if (i915_gpu_idle(dev, true) == 0)
goto rescan;
}
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index ead5d00f91b..21a82710f4b 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -36,7 +36,6 @@ static bool
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
{
list_add(&obj->exec_list, unwind);
- drm_gem_object_reference(&obj->base);
return drm_mm_scan_add_block(obj->gtt_space);
}
@@ -49,21 +48,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
struct drm_i915_gem_object *obj;
int ret = 0;
- i915_gem_retire_requests(dev);
-
- /* Re-check for free space after retiring requests */
- if (mappable) {
- if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
- min_size, alignment, 0,
- dev_priv->mm.gtt_mappable_end,
- 0))
- return 0;
- } else {
- if (drm_mm_search_free(&dev_priv->mm.gtt_space,
- min_size, alignment, 0))
- return 0;
- }
-
trace_i915_gem_evict(dev, min_size, alignment, mappable);
/*
@@ -139,7 +123,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
BUG_ON(ret);
list_del_init(&obj->exec_list);
- drm_gem_object_unreference(&obj->base);
}
/* We expect the caller to unpin, evict all and try again, or give up.
@@ -158,10 +141,10 @@ found:
exec_list);
if (drm_mm_scan_remove_block(obj->gtt_space)) {
list_move(&obj->exec_list, &eviction_list);
+ drm_gem_object_reference(&obj->base);
continue;
}
list_del_init(&obj->exec_list);
- drm_gem_object_unreference(&obj->base);
}
/* Unbinding will emit any required flushes */
@@ -195,7 +178,7 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
trace_i915_gem_evict_everything(dev, purgeable_only);
/* Flush everything (on to the inactive lists) and evict */
- ret = i915_gpu_idle(dev);
+ ret = i915_gpu_idle(dev, true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 65e1f0043f9..81687af0089 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -203,9 +203,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
cd->invalidate_domains |= invalidate_domains;
cd->flush_domains |= flush_domains;
if (flush_domains & I915_GEM_GPU_DOMAINS)
- cd->flush_rings |= obj->ring->id;
+ cd->flush_rings |= intel_ring_flag(obj->ring);
if (invalidate_domains & I915_GEM_GPU_DOMAINS)
- cd->flush_rings |= ring->id;
+ cd->flush_rings |= intel_ring_flag(ring);
}
struct eb_objects {
@@ -287,14 +287,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
* exec_object list, so it should have a GTT space bound by now.
*/
if (unlikely(target_offset == 0)) {
- DRM_ERROR("No GTT space found for object %d\n",
+ DRM_DEBUG("No GTT space found for object %d\n",
reloc->target_handle);
return ret;
}
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
- DRM_ERROR("reloc with multiple write domains: "
+ DRM_DEBUG("reloc with multiple write domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc->target_handle,
@@ -303,8 +303,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
reloc->write_domain);
return ret;
}
- if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) {
- DRM_ERROR("reloc with read/write CPU domains: "
+ if (unlikely((reloc->write_domain | reloc->read_domains)
+ & ~I915_GEM_GPU_DOMAINS)) {
+ DRM_DEBUG("reloc with read/write non-GPU domains: "
"obj %p target %d offset %d "
"read %08x write %08x",
obj, reloc->target_handle,
@@ -315,7 +316,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
}
if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
reloc->write_domain != target_obj->pending_write_domain)) {
- DRM_ERROR("Write domain conflict: "
+ DRM_DEBUG("Write domain conflict: "
"obj %p target %d offset %d "
"new %08x old %08x\n",
obj, reloc->target_handle,
@@ -336,7 +337,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
/* Check that the relocation address is valid... */
if (unlikely(reloc->offset > obj->base.size - 4)) {
- DRM_ERROR("Relocation beyond object bounds: "
+ DRM_DEBUG("Relocation beyond object bounds: "
"obj %p target %d offset %d size %d.\n",
obj, reloc->target_handle,
(int) reloc->offset,
@@ -344,7 +345,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return ret;
}
if (unlikely(reloc->offset & 3)) {
- DRM_ERROR("Relocation not 4-byte aligned: "
+ DRM_DEBUG("Relocation not 4-byte aligned: "
"obj %p target %d offset %d.\n",
obj, reloc->target_handle,
(int) reloc->offset);
@@ -461,11 +462,60 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
return ret;
}
+#define __EXEC_OBJECT_HAS_FENCE (1<<31)
+
+static int
+pin_and_fence_object(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+ bool need_fence, need_mappable;
+ int ret;
+
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+ need_mappable =
+ entry->relocation_count ? true : need_fence;
+
+ ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
+ if (ret)
+ return ret;
+
+ if (has_fenced_gpu_access) {
+ if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
+ if (obj->tiling_mode) {
+ ret = i915_gem_object_get_fence(obj, ring);
+ if (ret)
+ goto err_unpin;
+
+ entry->flags |= __EXEC_OBJECT_HAS_FENCE;
+ i915_gem_object_pin_fence(obj);
+ } else {
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ goto err_unpin;
+ }
+ }
+ obj->pending_fenced_gpu_access = need_fence;
+ }
+
+ entry->offset = obj->gtt_offset;
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+ return ret;
+}
+
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file,
struct list_head *objects)
{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_object *obj;
int ret, retry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
@@ -518,6 +568,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable;
+
if (!obj->gtt_space)
continue;
@@ -532,58 +583,55 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
else
- ret = i915_gem_object_pin(obj,
- entry->alignment,
- need_mappable);
+ ret = pin_and_fence_object(obj, ring);
if (ret)
goto err;
-
- entry++;
}
/* Bind fresh objects */
list_for_each_entry(obj, objects, exec_list) {
- struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
- bool need_fence;
+ if (obj->gtt_space)
+ continue;
- need_fence =
- has_fenced_gpu_access &&
- entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
- obj->tiling_mode != I915_TILING_NONE;
+ ret = pin_and_fence_object(obj, ring);
+ if (ret) {
+ int ret_ignore;
+
+ /* This can potentially raise a harmless
+ * -EINVAL if we failed to bind in the above
+ * call. It cannot raise -EINTR since we know
+ * that the bo is freshly bound and so will
+ * not need to be flushed or waited upon.
+ */
+ ret_ignore = i915_gem_object_unbind(obj);
+ (void)ret_ignore;
+ WARN_ON(obj->gtt_space);
+ break;
+ }
+ }
- if (!obj->gtt_space) {
- bool need_mappable =
- entry->relocation_count ? true : need_fence;
+ /* Decrement pin count for bound objects */
+ list_for_each_entry(obj, objects, exec_list) {
+ struct drm_i915_gem_exec_object2 *entry;
- ret = i915_gem_object_pin(obj,
- entry->alignment,
- need_mappable);
- if (ret)
- break;
- }
+ if (!obj->gtt_space)
+ continue;
- if (has_fenced_gpu_access) {
- if (need_fence) {
- ret = i915_gem_object_get_fence(obj, ring);
- if (ret)
- break;
- } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
- obj->tiling_mode == I915_TILING_NONE) {
- /* XXX pipelined! */
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- break;
- }
- obj->pending_fenced_gpu_access = need_fence;
+ entry = obj->exec_entry;
+ if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
+ i915_gem_object_unpin_fence(obj);
+ entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
}
- entry->offset = obj->gtt_offset;
- }
+ i915_gem_object_unpin(obj);
- /* Decrement pin count for bound objects */
- list_for_each_entry(obj, objects, exec_list) {
- if (obj->gtt_space)
- i915_gem_object_unpin(obj);
+ /* ... and ensure ppgtt mapping exist if needed. */
+ if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
+ i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
+ obj, obj->cache_level);
+
+ obj->has_aliasing_ppgtt_mapping = 1;
+ }
}
if (ret != -ENOSPC || retry > 1)
@@ -600,16 +648,19 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
} while (1);
err:
- obj = list_entry(obj->exec_list.prev,
- struct drm_i915_gem_object,
- exec_list);
- while (objects != &obj->exec_list) {
- if (obj->gtt_space)
- i915_gem_object_unpin(obj);
+ list_for_each_entry_continue_reverse(obj, objects, exec_list) {
+ struct drm_i915_gem_exec_object2 *entry;
+
+ if (!obj->gtt_space)
+ continue;
+
+ entry = obj->exec_entry;
+ if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
+ i915_gem_object_unpin_fence(obj);
+ entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
+ }
- obj = list_entry(obj->exec_list.prev,
- struct drm_i915_gem_object,
- exec_list);
+ i915_gem_object_unpin(obj);
}
return ret;
@@ -682,7 +733,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
- DRM_ERROR("Invalid object handle %d at index %d\n",
+ DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
ret = -ENOENT;
goto err;
@@ -1013,7 +1064,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
int ret, mode, i;
if (!i915_gem_check_execbuffer(args)) {
- DRM_ERROR("execbuf with invalid offset/length\n");
+ DRM_DEBUG("execbuf with invalid offset/length\n");
return -EINVAL;
}
@@ -1028,20 +1079,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
break;
case I915_EXEC_BSD:
if (!HAS_BSD(dev)) {
- DRM_ERROR("execbuf with invalid ring (BSD)\n");
+ DRM_DEBUG("execbuf with invalid ring (BSD)\n");
return -EINVAL;
}
ring = &dev_priv->ring[VCS];
break;
case I915_EXEC_BLT:
if (!HAS_BLT(dev)) {
- DRM_ERROR("execbuf with invalid ring (BLT)\n");
+ DRM_DEBUG("execbuf with invalid ring (BLT)\n");
return -EINVAL;
}
ring = &dev_priv->ring[BCS];
break;
default:
- DRM_ERROR("execbuf with unknown ring: %d\n",
+ DRM_DEBUG("execbuf with unknown ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
@@ -1067,18 +1118,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
break;
default:
- DRM_ERROR("execbuf with unknown constants: %d\n", mode);
+ DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
return -EINVAL;
}
if (args->buffer_count < 1) {
- DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+ DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
if (args->num_cliprects != 0) {
if (ring != &dev_priv->ring[RCS]) {
- DRM_ERROR("clip rectangles are only valid with the render ring\n");
+ DRM_DEBUG("clip rectangles are only valid with the render ring\n");
return -EINVAL;
}
@@ -1123,7 +1174,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
- DRM_ERROR("Invalid object handle %d at index %d\n",
+ DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
/* prevent error path from reading uninitialized data */
ret = -ENOENT;
@@ -1131,7 +1182,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
if (!list_empty(&obj->exec_list)) {
- DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n",
+ DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
ret = -EINVAL;
goto err;
@@ -1169,7 +1220,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* Set the pending read domains for the batch buffer to COMMAND */
if (batch_obj->base.pending_write_domain) {
- DRM_ERROR("Attempting to use self-modifying batch buffer\n");
+ DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
ret = -EINVAL;
goto err;
}
@@ -1186,7 +1237,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* so every billion or so execbuffers, we need to stall
* the GPU in order to reset the counters.
*/
- ret = i915_gpu_idle(dev);
+ ret = i915_gpu_idle(dev, true);
if (ret)
goto err;
@@ -1274,7 +1325,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
int ret, i;
if (args->buffer_count < 1) {
- DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+ DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
@@ -1282,7 +1333,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
if (exec_list == NULL || exec2_list == NULL) {
- DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
drm_free_large(exec_list);
drm_free_large(exec2_list);
@@ -1293,7 +1344,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
(uintptr_t) args->buffers_ptr,
sizeof(*exec_list) * args->buffer_count);
if (ret != 0) {
- DRM_ERROR("copy %d exec entries failed %d\n",
+ DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
drm_free_large(exec_list);
drm_free_large(exec2_list);
@@ -1334,7 +1385,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
sizeof(*exec_list) * args->buffer_count);
if (ret) {
ret = -EFAULT;
- DRM_ERROR("failed to copy %d exec entries "
+ DRM_DEBUG("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
}
@@ -1354,7 +1405,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
int ret;
if (args->buffer_count < 1) {
- DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
+ DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
return -EINVAL;
}
@@ -1364,7 +1415,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
exec2_list = drm_malloc_ab(sizeof(*exec2_list),
args->buffer_count);
if (exec2_list == NULL) {
- DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
return -ENOMEM;
}
@@ -1373,7 +1424,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
(uintptr_t) args->buffers_ptr,
sizeof(*exec2_list) * args->buffer_count);
if (ret != 0) {
- DRM_ERROR("copy %d exec entries failed %d\n",
+ DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
drm_free_large(exec2_list);
return -EFAULT;
@@ -1388,7 +1439,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
sizeof(*exec2_list) * args->buffer_count);
if (ret) {
ret = -EFAULT;
- DRM_ERROR("failed to copy %d exec entries "
+ DRM_DEBUG("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 6042c5e6d27..2eacd78bb93 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -29,6 +29,279 @@
#include "i915_trace.h"
#include "intel_drv.h"
+/* PPGTT support for Sandybdrige/Gen6 and later */
+static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
+ unsigned first_entry,
+ unsigned num_entries)
+{
+ uint32_t *pt_vaddr;
+ uint32_t scratch_pte;
+ unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
+ unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+ unsigned last_pte, i;
+
+ scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
+ scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
+
+ while (num_entries) {
+ last_pte = first_pte + num_entries;
+ if (last_pte > I915_PPGTT_PT_ENTRIES)
+ last_pte = I915_PPGTT_PT_ENTRIES;
+
+ pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
+
+ for (i = first_pte; i < last_pte; i++)
+ pt_vaddr[i] = scratch_pte;
+
+ kunmap_atomic(pt_vaddr);
+
+ num_entries -= last_pte - first_pte;
+ first_pte = 0;
+ act_pd++;
+ }
+}
+
+int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_hw_ppgtt *ppgtt;
+ uint32_t pd_entry;
+ unsigned first_pd_entry_in_global_pt;
+ uint32_t __iomem *pd_addr;
+ int i;
+ int ret = -ENOMEM;
+
+ /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
+ * entries. For aliasing ppgtt support we just steal them at the end for
+ * now. */
+ first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return ret;
+
+ ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
+ ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
+ GFP_KERNEL);
+ if (!ppgtt->pt_pages)
+ goto err_ppgtt;
+
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
+ if (!ppgtt->pt_pages[i])
+ goto err_pt_alloc;
+ }
+
+ if (dev_priv->mm.gtt->needs_dmar) {
+ ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
+ *ppgtt->num_pd_entries,
+ GFP_KERNEL);
+ if (!ppgtt->pt_dma_addr)
+ goto err_pt_alloc;
+ }
+
+ pd_addr = dev_priv->mm.gtt->gtt + first_pd_entry_in_global_pt;
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ dma_addr_t pt_addr;
+ if (dev_priv->mm.gtt->needs_dmar) {
+ pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
+ 0, 4096,
+ PCI_DMA_BIDIRECTIONAL);
+
+ if (pci_dma_mapping_error(dev->pdev,
+ pt_addr)) {
+ ret = -EIO;
+ goto err_pd_pin;
+
+ }
+ ppgtt->pt_dma_addr[i] = pt_addr;
+ } else
+ pt_addr = page_to_phys(ppgtt->pt_pages[i]);
+
+ pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
+ pd_entry |= GEN6_PDE_VALID;
+
+ writel(pd_entry, pd_addr + i);
+ }
+ readl(pd_addr);
+
+ ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
+
+ i915_ppgtt_clear_range(ppgtt, 0,
+ ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
+
+ ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
+
+ dev_priv->mm.aliasing_ppgtt = ppgtt;
+
+ return 0;
+
+err_pd_pin:
+ if (ppgtt->pt_dma_addr) {
+ for (i--; i >= 0; i--)
+ pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
+ 4096, PCI_DMA_BIDIRECTIONAL);
+ }
+err_pt_alloc:
+ kfree(ppgtt->pt_dma_addr);
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ if (ppgtt->pt_pages[i])
+ __free_page(ppgtt->pt_pages[i]);
+ }
+ kfree(ppgtt->pt_pages);
+err_ppgtt:
+ kfree(ppgtt);
+
+ return ret;
+}
+
+void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ int i;
+
+ if (!ppgtt)
+ return;
+
+ if (ppgtt->pt_dma_addr) {
+ for (i = 0; i < ppgtt->num_pd_entries; i++)
+ pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
+ 4096, PCI_DMA_BIDIRECTIONAL);
+ }
+
+ kfree(ppgtt->pt_dma_addr);
+ for (i = 0; i < ppgtt->num_pd_entries; i++)
+ __free_page(ppgtt->pt_pages[i]);
+ kfree(ppgtt->pt_pages);
+ kfree(ppgtt);
+}
+
+static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
+ struct scatterlist *sg_list,
+ unsigned sg_len,
+ unsigned first_entry,
+ uint32_t pte_flags)
+{
+ uint32_t *pt_vaddr, pte;
+ unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
+ unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+ unsigned i, j, m, segment_len;
+ dma_addr_t page_addr;
+ struct scatterlist *sg;
+
+ /* init sg walking */
+ sg = sg_list;
+ i = 0;
+ segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
+ m = 0;
+
+ while (i < sg_len) {
+ pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
+
+ for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
+ page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+ pte = GEN6_PTE_ADDR_ENCODE(page_addr);
+ pt_vaddr[j] = pte | pte_flags;
+
+ /* grab the next page */
+ m++;
+ if (m == segment_len) {
+ sg = sg_next(sg);
+ i++;
+ if (i == sg_len)
+ break;
+
+ segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
+ m = 0;
+ }
+ }
+
+ kunmap_atomic(pt_vaddr);
+
+ first_pte = 0;
+ act_pd++;
+ }
+}
+
+static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt,
+ unsigned first_entry, unsigned num_entries,
+ struct page **pages, uint32_t pte_flags)
+{
+ uint32_t *pt_vaddr, pte;
+ unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
+ unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
+ unsigned last_pte, i;
+ dma_addr_t page_addr;
+
+ while (num_entries) {
+ last_pte = first_pte + num_entries;
+ last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES);
+
+ pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
+
+ for (i = first_pte; i < last_pte; i++) {
+ page_addr = page_to_phys(*pages);
+ pte = GEN6_PTE_ADDR_ENCODE(page_addr);
+ pt_vaddr[i] = pte | pte_flags;
+
+ pages++;
+ }
+
+ kunmap_atomic(pt_vaddr);
+
+ num_entries -= last_pte - first_pte;
+ first_pte = 0;
+ act_pd++;
+ }
+}
+
+void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t pte_flags = GEN6_PTE_VALID;
+
+ switch (cache_level) {
+ case I915_CACHE_LLC_MLC:
+ pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
+ break;
+ case I915_CACHE_LLC:
+ pte_flags |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte_flags |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ BUG();
+ }
+
+ if (dev_priv->mm.gtt->needs_dmar) {
+ BUG_ON(!obj->sg_list);
+
+ i915_ppgtt_insert_sg_entries(ppgtt,
+ obj->sg_list,
+ obj->num_sg,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ pte_flags);
+ } else
+ i915_ppgtt_insert_pages(ppgtt,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT,
+ obj->pages,
+ pte_flags);
+}
+
+void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_gem_object *obj)
+{
+ i915_ppgtt_clear_range(ppgtt,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
+}
+
/* XXX kill agp_type! */
static unsigned int cache_level_to_agp_type(struct drm_device *dev,
enum i915_cache_level cache_level)
@@ -55,7 +328,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)
if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
dev_priv->mm.interruptible = false;
- if (i915_gpu_idle(dev_priv->dev)) {
+ if (i915_gpu_idle(dev_priv->dev, false)) {
DRM_ERROR("Couldn't idle GPU\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 31d334d9d9d..1a930666598 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -93,8 +93,23 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
if (INTEL_INFO(dev)->gen >= 6) {
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ uint32_t dimm_c0, dimm_c1;
+ dimm_c0 = I915_READ(MAD_DIMM_C0);
+ dimm_c1 = I915_READ(MAD_DIMM_C1);
+ dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
+ /* Enable swizzling when the channels are populated with
+ * identically sized dimms. We don't need to check the 3rd
+ * channel because no cpu with gpu attached ships in that
+ * configuration. Also, swizzling only makes sense for 2
+ * channels anyway. */
+ if (dimm_c0 == dimm_c1) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
} else if (IS_GEN5(dev)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
@@ -107,10 +122,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (IS_MOBILE(dev)) {
+ } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
uint32_t dcc;
- /* On mobile 9xx chipsets, channel interleave by the CPU is
+ /* On 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU
* nor the GPU do swizzling. For dual channel interleaved,
* the GPU's interleave is bit 9 and 10 for X tiled, and bit
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 5bd4361ea84..afd4e03e337 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -720,7 +720,6 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
reloc_offset = src->gtt_offset;
for (page = 0; page < page_count; page++) {
unsigned long flags;
- void __iomem *s;
void *d;
d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
@@ -728,10 +727,29 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
goto unwind;
local_irq_save(flags);
- s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- reloc_offset);
- memcpy_fromio(d, s, PAGE_SIZE);
- io_mapping_unmap_atomic(s);
+ if (reloc_offset < dev_priv->mm.gtt_mappable_end) {
+ void __iomem *s;
+
+ /* Simply ignore tiling or any overlapping fence.
+ * It's part of the error state, and this hopefully
+ * captures what the GPU read.
+ */
+
+ s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ reloc_offset);
+ memcpy_fromio(d, s, PAGE_SIZE);
+ io_mapping_unmap_atomic(s);
+ } else {
+ void *s;
+
+ drm_clflush_pages(&src->pages[page], 1);
+
+ s = kmap_atomic(src->pages[page]);
+ memcpy(d, s, PAGE_SIZE);
+ kunmap_atomic(s);
+
+ drm_clflush_pages(&src->pages[page], 1);
+ }
local_irq_restore(flags);
dst->pages[page] = d;
@@ -770,11 +788,11 @@ i915_error_state_free(struct drm_device *dev,
{
int i;
- for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++)
- i915_error_object_free(error->batchbuffer[i]);
-
- for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++)
- i915_error_object_free(error->ringbuffer[i]);
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+ i915_error_object_free(error->ring[i].batchbuffer);
+ i915_error_object_free(error->ring[i].ringbuffer);
+ kfree(error->ring[i].requests);
+ }
kfree(error->active_bo);
kfree(error->overlay);
@@ -804,7 +822,7 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err,
err->tiling = obj->tiling_mode;
err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED;
- err->ring = obj->ring ? obj->ring->id : 0;
+ err->ring = obj->ring ? obj->ring->id : -1;
err->cache_level = obj->cache_level;
if (++i == count)
@@ -876,6 +894,92 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
return NULL;
}
+static void i915_record_ring_state(struct drm_device *dev,
+ struct drm_i915_error_state *error,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
+ error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
+ error->semaphore_mboxes[ring->id][0]
+ = I915_READ(RING_SYNC_0(ring->mmio_base));
+ error->semaphore_mboxes[ring->id][1]
+ = I915_READ(RING_SYNC_1(ring->mmio_base));
+ }
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
+ error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
+ error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
+ error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
+ if (ring->id == RCS) {
+ error->instdone1 = I915_READ(INSTDONE1);
+ error->bbaddr = I915_READ64(BB_ADDR);
+ }
+ } else {
+ error->ipeir[ring->id] = I915_READ(IPEIR);
+ error->ipehr[ring->id] = I915_READ(IPEHR);
+ error->instdone[ring->id] = I915_READ(INSTDONE);
+ }
+
+ error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
+ error->seqno[ring->id] = ring->get_seqno(ring);
+ error->acthd[ring->id] = intel_ring_get_active_head(ring);
+ error->head[ring->id] = I915_READ_HEAD(ring);
+ error->tail[ring->id] = I915_READ_TAIL(ring);
+
+ error->cpu_ring_head[ring->id] = ring->head;
+ error->cpu_ring_tail[ring->id] = ring->tail;
+}
+
+static void i915_gem_record_rings(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *request;
+ int i, count;
+
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct intel_ring_buffer *ring = &dev_priv->ring[i];
+
+ if (ring->obj == NULL)
+ continue;
+
+ i915_record_ring_state(dev, error, ring);
+
+ error->ring[i].batchbuffer =
+ i915_error_first_batchbuffer(dev_priv, ring);
+
+ error->ring[i].ringbuffer =
+ i915_error_object_create(dev_priv, ring->obj);
+
+ count = 0;
+ list_for_each_entry(request, &ring->request_list, list)
+ count++;
+
+ error->ring[i].num_requests = count;
+ error->ring[i].requests =
+ kmalloc(count*sizeof(struct drm_i915_error_request),
+ GFP_ATOMIC);
+ if (error->ring[i].requests == NULL) {
+ error->ring[i].num_requests = 0;
+ continue;
+ }
+
+ count = 0;
+ list_for_each_entry(request, &ring->request_list, list) {
+ struct drm_i915_error_request *erq;
+
+ erq = &error->ring[i].requests[count++];
+ erq->seqno = request->seqno;
+ erq->jiffies = request->emitted_jiffies;
+ erq->tail = request->tail;
+ }
+ }
+}
+
/**
* i915_capture_error_state - capture an error record for later analysis
* @dev: drm device
@@ -900,7 +1004,7 @@ static void i915_capture_error_state(struct drm_device *dev)
return;
/* Account for pipe specific data like PIPE*STAT */
- error = kmalloc(sizeof(*error), GFP_ATOMIC);
+ error = kzalloc(sizeof(*error), GFP_ATOMIC);
if (!error) {
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
return;
@@ -909,59 +1013,18 @@ static void i915_capture_error_state(struct drm_device *dev)
DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
dev->primary->index);
- error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
for_each_pipe(pipe)
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
- error->instpm = I915_READ(INSTPM);
- error->error = 0;
+
if (INTEL_INFO(dev)->gen >= 6) {
error->error = I915_READ(ERROR_GEN6);
-
- error->bcs_acthd = I915_READ(BCS_ACTHD);
- error->bcs_ipehr = I915_READ(BCS_IPEHR);
- error->bcs_ipeir = I915_READ(BCS_IPEIR);
- error->bcs_instdone = I915_READ(BCS_INSTDONE);
- error->bcs_seqno = 0;
- if (dev_priv->ring[BCS].get_seqno)
- error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]);
-
- error->vcs_acthd = I915_READ(VCS_ACTHD);
- error->vcs_ipehr = I915_READ(VCS_IPEHR);
- error->vcs_ipeir = I915_READ(VCS_IPEIR);
- error->vcs_instdone = I915_READ(VCS_INSTDONE);
- error->vcs_seqno = 0;
- if (dev_priv->ring[VCS].get_seqno)
- error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]);
+ error->done_reg = I915_READ(DONE_REG);
}
- if (INTEL_INFO(dev)->gen >= 4) {
- error->ipeir = I915_READ(IPEIR_I965);
- error->ipehr = I915_READ(IPEHR_I965);
- error->instdone = I915_READ(INSTDONE_I965);
- error->instps = I915_READ(INSTPS);
- error->instdone1 = I915_READ(INSTDONE1);
- error->acthd = I915_READ(ACTHD_I965);
- error->bbaddr = I915_READ64(BB_ADDR);
- } else {
- error->ipeir = I915_READ(IPEIR);
- error->ipehr = I915_READ(IPEHR);
- error->instdone = I915_READ(INSTDONE);
- error->acthd = I915_READ(ACTHD);
- error->bbaddr = 0;
- }
- i915_gem_record_fences(dev, error);
-
- /* Record the active batch and ring buffers */
- for (i = 0; i < I915_NUM_RINGS; i++) {
- error->batchbuffer[i] =
- i915_error_first_batchbuffer(dev_priv,
- &dev_priv->ring[i]);
- error->ringbuffer[i] =
- i915_error_object_create(dev_priv,
- dev_priv->ring[i].obj);
- }
+ i915_gem_record_fences(dev, error);
+ i915_gem_record_rings(dev, error);
/* Record buffers on the active and pinned lists. */
error->active_bo = NULL;
@@ -1017,11 +1080,12 @@ void i915_destroy_error_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
+ unsigned long flags;
- spin_lock(&dev_priv->error_lock);
+ spin_lock_irqsave(&dev_priv->error_lock, flags);
error = dev_priv->first_error;
dev_priv->first_error = NULL;
- spin_unlock(&dev_priv->error_lock);
+ spin_unlock_irqrestore(&dev_priv->error_lock, flags);
if (error)
i915_error_state_free(dev, error);
@@ -1698,6 +1762,7 @@ void i915_hangcheck_elapsed(unsigned long data)
dev_priv->last_instdone1 == instdone1) {
if (dev_priv->hangcheck_count++ > 1) {
DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+ i915_handle_error(dev, true);
if (!IS_GEN2(dev)) {
/* Is the chip hanging on a WAIT_FOR_EVENT?
@@ -1705,7 +1770,6 @@ void i915_hangcheck_elapsed(unsigned long data)
* and break the hang. This should work on
* all but the second generation chipsets.
*/
-
if (kick_ring(&dev_priv->ring[RCS]))
goto repeat;
@@ -1718,7 +1782,6 @@ void i915_hangcheck_elapsed(unsigned long data)
goto repeat;
}
- i915_handle_error(dev, true);
return;
}
} else {
@@ -1752,18 +1815,6 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
I915_WRITE(HWSTAM, 0xeffe);
- if (IS_GEN6(dev)) {
- /* Workaround stalls observed on Sandy Bridge GPUs by
- * making the blitter command streamer generate a
- * write to the Hardware Status Page for
- * MI_USER_INTERRUPT. This appears to serialize the
- * previous seqno write out before the interrupt
- * happens.
- */
- I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
- I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT);
- }
-
/* XXX hotplug from PCH */
I915_WRITE(DEIMR, 0xffffffff);
diff --git a/drivers/gpu/drm/i915/i915_mem.c b/drivers/gpu/drm/i915/i915_mem.c
deleted file mode 100644
index cc8f6d49cf2..00000000000
--- a/drivers/gpu/drm/i915/i915_mem.c
+++ /dev/null
@@ -1,387 +0,0 @@
-/* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
- */
-/*
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-/* This memory manager is integrated into the global/local lru
- * mechanisms used by the clients. Specifically, it operates by
- * setting the 'in_use' fields of the global LRU to indicate whether
- * this region is privately allocated to a client.
- *
- * This does require the client to actually respect that field.
- *
- * Currently no effort is made to allocate 'private' memory in any
- * clever way - the LRU information isn't used to determine which
- * block to allocate, and the ring is drained prior to allocations --
- * in other words allocation is expensive.
- */
-static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
- drm_i915_sarea_t *sarea_priv = master_priv->sarea_priv;
- struct drm_tex_region *list;
- unsigned shift, nr;
- unsigned start;
- unsigned end;
- unsigned i;
- int age;
-
- shift = dev_priv->tex_lru_log_granularity;
- nr = I915_NR_TEX_REGIONS;
-
- start = p->start >> shift;
- end = (p->start + p->size - 1) >> shift;
-
- age = ++sarea_priv->texAge;
- list = sarea_priv->texList;
-
- /* Mark the regions with the new flag and update their age. Move
- * them to head of list to preserve LRU semantics.
- */
- for (i = start; i <= end; i++) {
- list[i].in_use = in_use;
- list[i].age = age;
-
- /* remove_from_list(i)
- */
- list[(unsigned)list[i].next].prev = list[i].prev;
- list[(unsigned)list[i].prev].next = list[i].next;
-
- /* insert_at_head(list, i)
- */
- list[i].prev = nr;
- list[i].next = list[nr].next;
- list[(unsigned)list[nr].next].prev = i;
- list[nr].next = i;
- }
-}
-
-/* Very simple allocator for agp memory, working on a static range
- * already mapped into each client's address space.
- */
-
-static struct mem_block *split_block(struct mem_block *p, int start, int size,
- struct drm_file *file_priv)
-{
- /* Maybe cut off the start of an existing block */
- if (start > p->start) {
- struct mem_block *newblock = kmalloc(sizeof(*newblock),
- GFP_KERNEL);
- if (!newblock)
- goto out;
- newblock->start = start;
- newblock->size = p->size - (start - p->start);
- newblock->file_priv = NULL;
- newblock->next = p->next;
- newblock->prev = p;
- p->next->prev = newblock;
- p->next = newblock;
- p->size -= newblock->size;
- p = newblock;
- }
-
- /* Maybe cut off the end of an existing block */
- if (size < p->size) {
- struct mem_block *newblock = kmalloc(sizeof(*newblock),
- GFP_KERNEL);
- if (!newblock)
- goto out;
- newblock->start = start + size;
- newblock->size = p->size - size;
- newblock->file_priv = NULL;
- newblock->next = p->next;
- newblock->prev = p;
- p->next->prev = newblock;
- p->next = newblock;
- p->size = size;
- }
-
- out:
- /* Our block is in the middle */
- p->file_priv = file_priv;
- return p;
-}
-
-static struct mem_block *alloc_block(struct mem_block *heap, int size,
- int align2, struct drm_file *file_priv)
-{
- struct mem_block *p;
- int mask = (1 << align2) - 1;
-
- for (p = heap->next; p != heap; p = p->next) {
- int start = (p->start + mask) & ~mask;
- if (p->file_priv == NULL && start + size <= p->start + p->size)
- return split_block(p, start, size, file_priv);
- }
-
- return NULL;
-}
-
-static struct mem_block *find_block(struct mem_block *heap, int start)
-{
- struct mem_block *p;
-
- for (p = heap->next; p != heap; p = p->next)
- if (p->start == start)
- return p;
-
- return NULL;
-}
-
-static void free_block(struct mem_block *p)
-{
- p->file_priv = NULL;
-
- /* Assumes a single contiguous range. Needs a special file_priv in
- * 'heap' to stop it being subsumed.
- */
- if (p->next->file_priv == NULL) {
- struct mem_block *q = p->next;
- p->size += q->size;
- p->next = q->next;
- p->next->prev = p;
- kfree(q);
- }
-
- if (p->prev->file_priv == NULL) {
- struct mem_block *q = p->prev;
- q->size += p->size;
- q->next = p->next;
- q->next->prev = q;
- kfree(p);
- }
-}
-
-/* Initialize. How to check for an uninitialized heap?
- */
-static int init_heap(struct mem_block **heap, int start, int size)
-{
- struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
-
- if (!blocks)
- return -ENOMEM;
-
- *heap = kmalloc(sizeof(**heap), GFP_KERNEL);
- if (!*heap) {
- kfree(blocks);
- return -ENOMEM;
- }
-
- blocks->start = start;
- blocks->size = size;
- blocks->file_priv = NULL;
- blocks->next = blocks->prev = *heap;
-
- memset(*heap, 0, sizeof(**heap));
- (*heap)->file_priv = (struct drm_file *) -1;
- (*heap)->next = (*heap)->prev = blocks;
- return 0;
-}
-
-/* Free all blocks associated with the releasing file.
- */
-void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv,
- struct mem_block *heap)
-{
- struct mem_block *p;
-
- if (!heap || !heap->next)
- return;
-
- for (p = heap->next; p != heap; p = p->next) {
- if (p->file_priv == file_priv) {
- p->file_priv = NULL;
- mark_block(dev, p, 0);
- }
- }
-
- /* Assumes a single contiguous range. Needs a special file_priv in
- * 'heap' to stop it being subsumed.
- */
- for (p = heap->next; p != heap; p = p->next) {
- while (p->file_priv == NULL && p->next->file_priv == NULL) {
- struct mem_block *q = p->next;
- p->size += q->size;
- p->next = q->next;
- p->next->prev = p;
- kfree(q);
- }
- }
-}
-
-/* Shutdown.
- */
-void i915_mem_takedown(struct mem_block **heap)
-{
- struct mem_block *p;
-
- if (!*heap)
- return;
-
- for (p = (*heap)->next; p != *heap;) {
- struct mem_block *q = p;
- p = p->next;
- kfree(q);
- }
-
- kfree(*heap);
- *heap = NULL;
-}
-
-static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
-{
- switch (region) {
- case I915_MEM_REGION_AGP:
- return &dev_priv->agp_heap;
- default:
- return NULL;
- }
-}
-
-/* IOCTL HANDLERS */
-
-int i915_mem_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_mem_alloc_t *alloc = data;
- struct mem_block *block, **heap;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- heap = get_heap(dev_priv, alloc->region);
- if (!heap || !*heap)
- return -EFAULT;
-
- /* Make things easier on ourselves: all allocations at least
- * 4k aligned.
- */
- if (alloc->alignment < 12)
- alloc->alignment = 12;
-
- block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
-
- if (!block)
- return -ENOMEM;
-
- mark_block(dev, block, 1);
-
- if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
- sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-int i915_mem_free(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_mem_free_t *memfree = data;
- struct mem_block *block, **heap;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- heap = get_heap(dev_priv, memfree->region);
- if (!heap || !*heap)
- return -EFAULT;
-
- block = find_block(*heap, memfree->region_offset);
- if (!block)
- return -EFAULT;
-
- if (block->file_priv != file_priv)
- return -EPERM;
-
- mark_block(dev, block, 0);
- free_block(block);
- return 0;
-}
-
-int i915_mem_init_heap(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_mem_init_heap_t *initheap = data;
- struct mem_block **heap;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- heap = get_heap(dev_priv, initheap->region);
- if (!heap)
- return -EFAULT;
-
- if (*heap) {
- DRM_ERROR("heap already initialized?");
- return -EFAULT;
- }
-
- return init_heap(heap, initheap->start, initheap->size);
-}
-
-int i915_mem_destroy_heap(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_mem_destroy_heap_t *destroyheap = data;
- struct mem_block **heap;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- heap = get_heap(dev_priv, destroyheap->region);
- if (!heap) {
- DRM_ERROR("get_heap failed");
- return -EFAULT;
- }
-
- if (!*heap) {
- DRM_ERROR("heap not initialized?");
- return -EFAULT;
- }
-
- i915_mem_takedown(heap);
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 558ac716a32..3886cf051ba 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -86,12 +86,45 @@
#define GEN6_MBC_SNPCR_LOW (2<<21)
#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
+#define GEN6_MBCTL 0x0907c
+#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
+#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
+#define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2)
+#define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1)
+#define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0)
+
#define GEN6_GDRST 0x941c
#define GEN6_GRDOM_FULL (1 << 0)
#define GEN6_GRDOM_RENDER (1 << 1)
#define GEN6_GRDOM_MEDIA (1 << 2)
#define GEN6_GRDOM_BLT (1 << 3)
+/* PPGTT stuff */
+#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
+
+#define GEN6_PDE_VALID (1 << 0)
+#define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */
+/* gen6+ has bit 11-4 for physical addr bit 39-32 */
+#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+
+#define GEN6_PTE_VALID (1 << 0)
+#define GEN6_PTE_UNCACHED (1 << 1)
+#define GEN6_PTE_CACHE_LLC (2 << 1)
+#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
+#define GEN6_PTE_CACHE_BITS (3 << 1)
+#define GEN6_PTE_GFDT (1 << 3)
+#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+
+#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228)
+#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518)
+#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
+#define PP_DIR_DCLV_2G 0xffffffff
+
+#define GAM_ECOCHK 0x4090
+#define ECOCHK_SNB_BIT (1<<10)
+#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
+#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
+
/* VGA stuff */
#define VGA_ST01_MDA 0x3ba
@@ -295,6 +328,12 @@
#define FENCE_REG_SANDYBRIDGE_0 0x100000
#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
+/* control register for cpu gtt access */
+#define TILECTL 0x101000
+#define TILECTL_SWZCTL (1 << 0)
+#define TILECTL_TLB_PREFETCH_DIS (1 << 2)
+#define TILECTL_BACKSNOOP_DIS (1 << 3)
+
/*
* Instruction and interrupt control regs
*/
@@ -318,7 +357,14 @@
#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
+#define ARB_MODE 0x04030
+#define ARB_MODE_SWIZZLE_SNB (1<<4)
+#define ARB_MODE_SWIZZLE_IVB (1<<5)
+#define ARB_MODE_ENABLE(x) GFX_MODE_ENABLE(x)
+#define ARB_MODE_DISABLE(x) GFX_MODE_DISABLE(x)
#define RENDER_HWS_PGA_GEN7 (0x04080)
+#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
+#define DONE_REG 0x40b0
#define BSD_HWS_PGA_GEN7 (0x04180)
#define BLT_HWS_PGA_GEN7 (0x04280)
#define RING_ACTHD(base) ((base)+0x74)
@@ -352,6 +398,12 @@
#define IPEIR_I965 0x02064
#define IPEHR_I965 0x02068
#define INSTDONE_I965 0x0206c
+#define RING_IPEIR(base) ((base)+0x64)
+#define RING_IPEHR(base) ((base)+0x68)
+#define RING_INSTDONE(base) ((base)+0x6c)
+#define RING_INSTPS(base) ((base)+0x70)
+#define RING_DMA_FADD(base) ((base)+0x78)
+#define RING_INSTPM(base) ((base)+0xc0)
#define INSTPS 0x02070 /* 965+ only */
#define INSTDONE1 0x0207c /* 965+ only */
#define ACTHD_I965 0x02074
@@ -365,14 +417,6 @@
#define INSTDONE 0x02090
#define NOPID 0x02094
#define HWSTAM 0x02098
-#define VCS_INSTDONE 0x1206C
-#define VCS_IPEIR 0x12064
-#define VCS_IPEHR 0x12068
-#define VCS_ACTHD 0x12074
-#define BCS_INSTDONE 0x2206C
-#define BCS_IPEIR 0x22064
-#define BCS_IPEHR 0x22068
-#define BCS_ACTHD 0x22074
#define ERROR_GEN6 0x040a0
@@ -391,10 +435,11 @@
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
-# define MI_FLUSH_ENABLE (1 << 11)
+# define MI_FLUSH_ENABLE (1 << 12)
#define GFX_MODE 0x02520
#define GFX_MODE_GEN7 0x0229c
+#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c)
#define GFX_RUN_LIST_ENABLE (1<<15)
#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
#define GFX_SURFACE_FAULT_ENABLE (1<<12)
@@ -1037,6 +1082,29 @@
#define C0DRB3 0x10206
#define C1DRB3 0x10606
+/** snb MCH registers for reading the DRAM channel configuration */
+#define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004)
+#define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008)
+#define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C)
+#define MAD_DIMM_ECC_MASK (0x3 << 24)
+#define MAD_DIMM_ECC_OFF (0x0 << 24)
+#define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24)
+#define MAD_DIMM_ECC_IO_OFF_LOGIC_ON (0x2 << 24)
+#define MAD_DIMM_ECC_ON (0x3 << 24)
+#define MAD_DIMM_ENH_INTERLEAVE (0x1 << 22)
+#define MAD_DIMM_RANK_INTERLEAVE (0x1 << 21)
+#define MAD_DIMM_B_WIDTH_X16 (0x1 << 20) /* X8 chips if unset */
+#define MAD_DIMM_A_WIDTH_X16 (0x1 << 19) /* X8 chips if unset */
+#define MAD_DIMM_B_DUAL_RANK (0x1 << 18)
+#define MAD_DIMM_A_DUAL_RANK (0x1 << 17)
+#define MAD_DIMM_A_SELECT (0x1 << 16)
+/* DIMM sizes are in multiples of 256mb. */
+#define MAD_DIMM_B_SIZE_SHIFT 8
+#define MAD_DIMM_B_SIZE_MASK (0xff << MAD_DIMM_B_SIZE_SHIFT)
+#define MAD_DIMM_A_SIZE_SHIFT 0
+#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
+
+
/* Clocking configuration register */
#define CLKCFG 0x10c00
#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
@@ -1316,6 +1384,7 @@
#define _VSYNC_A 0x60014
#define _PIPEASRC 0x6001c
#define _BCLRPAT_A 0x60020
+#define _VSYNCSHIFT_A 0x60028
/* Pipe B timing regs */
#define _HTOTAL_B 0x61000
@@ -1326,6 +1395,8 @@
#define _VSYNC_B 0x61014
#define _PIPEBSRC 0x6101c
#define _BCLRPAT_B 0x61020
+#define _VSYNCSHIFT_B 0x61028
+
#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B)
#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B)
@@ -1334,6 +1405,7 @@
#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B)
#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B)
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
+#define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
/* VGA port control */
#define ADPA 0x61100
@@ -2319,10 +2391,21 @@
#define PIPECONF_PALETTE 0
#define PIPECONF_GAMMA (1<<24)
#define PIPECONF_FORCE_BORDER (1<<25)
-#define PIPECONF_PROGRESSIVE (0 << 21)
-#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
-#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
#define PIPECONF_INTERLACE_MASK (7 << 21)
+/* Note that pre-gen3 does not support interlaced display directly. Panel
+ * fitting must be disabled on pre-ilk for interlaced. */
+#define PIPECONF_PROGRESSIVE (0 << 21)
+#define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL (4 << 21) /* gen4 only */
+#define PIPECONF_INTERLACE_W_SYNC_SHIFT (5 << 21) /* gen4 only */
+#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
+#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) /* gen3 only */
+/* Ironlake and later have a complete new set of values for interlaced. PFIT
+ * means panel fitter required, PF means progressive fetch, DBL means power
+ * saving pixel doubling. */
+#define PIPECONF_PFIT_PF_INTERLACED_ILK (1 << 21)
+#define PIPECONF_INTERLACED_ILK (3 << 21)
+#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
+#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
#define PIPECONF_BPP_MASK (0x000000e0)
#define PIPECONF_BPP_8 (0<<5)
@@ -3219,6 +3302,7 @@
#define _TRANS_VSYNC_A 0xe0014
#define TRANS_VSYNC_END_SHIFT 16
#define TRANS_VSYNC_START_SHIFT 0
+#define _TRANS_VSYNCSHIFT_A 0xe0028
#define _TRANSA_DATA_M1 0xe0030
#define _TRANSA_DATA_N1 0xe0034
@@ -3249,6 +3333,7 @@
#define _TRANS_VTOTAL_B 0xe100c
#define _TRANS_VBLANK_B 0xe1010
#define _TRANS_VSYNC_B 0xe1014
+#define _TRANS_VSYNCSHIFT_B 0xe1028
#define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B)
#define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B)
@@ -3256,6 +3341,8 @@
#define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B)
#define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B)
#define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B)
+#define TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _TRANS_VSYNCSHIFT_A, \
+ _TRANS_VSYNCSHIFT_B)
#define _TRANSB_DATA_M1 0xe1030
#define _TRANSB_DATA_N1 0xe1034
@@ -3289,7 +3376,10 @@
#define TRANS_FSYNC_DELAY_HB4 (3<<27)
#define TRANS_DP_AUDIO_ONLY (1<<26)
#define TRANS_DP_VIDEO_AUDIO (0<<26)
+#define TRANS_INTERLACE_MASK (7<<21)
#define TRANS_PROGRESSIVE (0<<21)
+#define TRANS_INTERLACED (3<<21)
+#define TRANS_LEGACY_INTERLACED_ILK (2<<21)
#define TRANS_8BPC (0<<5)
#define TRANS_10BPC (1<<5)
#define TRANS_6BPC (2<<5)
@@ -3628,6 +3718,12 @@
#define ECOBUS 0xa180
#define FORCEWAKE_MT_ENABLE (1<<5)
+#define GTFIFODBG 0x120000
+#define GT_FIFO_CPU_ERROR_MASK 7
+#define GT_FIFO_OVFERR (1<<2)
+#define GT_FIFO_IAWRERR (1<<1)
+#define GT_FIFO_IARDERR (1<<0)
+
#define GT_FIFO_FREE_ENTRIES 0x120008
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
@@ -3757,4 +3853,16 @@
*/
#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
+#define IBX_AUD_CONFIG_A 0xe2000
+#define CPT_AUD_CONFIG_A 0xe5000
+#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
+#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
+#define AUD_CONFIG_UPPER_N_SHIFT 20
+#define AUD_CONFIG_UPPER_N_VALUE (0xff << 20)
+#define AUD_CONFIG_LOWER_N_SHIFT 4
+#define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
+#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index cb912106d1a..bae3edf956a 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -208,7 +208,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev)
ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0);
if (ret < 0) {
- DRM_ERROR("failed to get supported _DSM functions\n");
+ DRM_DEBUG_KMS("failed to get supported _DSM functions\n");
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 63880e2e5cf..8168d8f8a63 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -572,7 +572,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
return;
}
- dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL);
+ dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
if (!dev_priv->child_dev) {
DRM_DEBUG_KMS("No memory space for child device\n");
return;
@@ -669,7 +669,7 @@ intel_parse_bios(struct drm_device *dev)
}
if (!vbt) {
- DRM_ERROR("VBT signature missing\n");
+ DRM_DEBUG_DRIVER("VBT signature missing\n");
pci_unmap_rom(pdev, bios);
return -1;
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index dd729d46a61..4d3d736a4f5 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -594,7 +594,10 @@ void intel_crt_init(struct drm_device *dev)
1 << INTEL_ANALOG_CLONE_BIT |
1 << INTEL_SDVO_LVDS_CLONE_BIT);
crt->base.crtc_mask = (1 << 0) | (1 << 1);
- connector->interlace_allowed = 1;
+ if (IS_GEN2(dev))
+ connector->interlace_allowed = 0;
+ else
+ connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 397087cf689..d514719f65e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -75,7 +75,7 @@ struct intel_limit {
intel_range_t dot, vco, n, m, m1, m2, p, p1;
intel_p2_t p2;
bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
- int, int, intel_clock_t *);
+ int, int, intel_clock_t *, intel_clock_t *);
};
/* FDI */
@@ -83,17 +83,21 @@ struct intel_limit {
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock);
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock);
static bool
intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock);
static bool
intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock);
static inline u32 /* units of 100MHz */
intel_fdi_link_freq(struct drm_device *dev)
@@ -515,7 +519,8 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
@@ -562,6 +567,9 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
+ if (match_clock &&
+ clock.p != match_clock->p)
+ continue;
this_err = abs(clock.dot - target);
if (this_err < err) {
@@ -578,7 +586,8 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -625,6 +634,9 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
+ if (match_clock &&
+ clock.p != match_clock->p)
+ continue;
this_err = abs(clock.dot - target);
if (this_err < err_most) {
@@ -642,7 +654,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
static bool
intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
@@ -668,7 +681,8 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
/* DisplayPort has only two frequencies, 162MHz and 270MHz */
static bool
intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
{
intel_clock_t clock;
if (target < 200000) {
@@ -922,6 +936,10 @@ void assert_pipe(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
+ /* if we need the pipe A quirk it must be always on */
+ if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
+ state = true;
+
reg = PIPECONF(pipe);
val = I915_READ(reg);
cur_state = !!(val & PIPECONF_ENABLE);
@@ -930,19 +948,24 @@ void assert_pipe(struct drm_i915_private *dev_priv,
pipe_name(pipe), state_string(state), state_string(cur_state));
}
-static void assert_plane_enabled(struct drm_i915_private *dev_priv,
- enum plane plane)
+static void assert_plane(struct drm_i915_private *dev_priv,
+ enum plane plane, bool state)
{
int reg;
u32 val;
+ bool cur_state;
reg = DSPCNTR(plane);
val = I915_READ(reg);
- WARN(!(val & DISPLAY_PLANE_ENABLE),
- "plane %c assertion failure, should be active but is disabled\n",
- plane_name(plane));
+ cur_state = !!(val & DISPLAY_PLANE_ENABLE);
+ WARN(cur_state != state,
+ "plane %c assertion failure (expected %s, current %s)\n",
+ plane_name(plane), state_string(state), state_string(cur_state));
}
+#define assert_plane_enabled(d, p) assert_plane(d, p, true)
+#define assert_plane_disabled(d, p) assert_plane(d, p, false)
+
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -951,8 +974,14 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
int cur_pipe;
/* Planes are fixed to pipes on ILK+ */
- if (HAS_PCH_SPLIT(dev_priv->dev))
+ if (HAS_PCH_SPLIT(dev_priv->dev)) {
+ reg = DSPCNTR(pipe);
+ val = I915_READ(reg);
+ WARN((val & DISPLAY_PLANE_ENABLE),
+ "plane %c assertion failure, should be disabled but not\n",
+ plane_name(pipe));
return;
+ }
/* Need to check both planes against the pipe */
for (i = 0; i < 2; i++) {
@@ -1071,7 +1100,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
{
u32 val = I915_READ(reg);
WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
- "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
+ "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
}
@@ -1237,7 +1266,8 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
- u32 val;
+ u32 val, pipeconf_val;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
@@ -1251,6 +1281,7 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
reg = TRANSCONF(pipe);
val = I915_READ(reg);
+ pipeconf_val = I915_READ(PIPECONF(pipe));
if (HAS_PCH_IBX(dev_priv->dev)) {
/*
@@ -1258,8 +1289,19 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
* that in pipeconf reg.
*/
val &= ~PIPE_BPC_MASK;
- val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
+ val |= pipeconf_val & PIPE_BPC_MASK;
}
+
+ val &= ~TRANS_INTERLACE_MASK;
+ if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
+ if (HAS_PCH_IBX(dev_priv->dev) &&
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
+ val |= TRANS_LEGACY_INTERLACED_ILK;
+ else
+ val |= TRANS_INTERLACED;
+ else
+ val |= TRANS_PROGRESSIVE;
+
I915_WRITE(reg, val | TRANS_ENABLE);
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %d\n", pipe);
@@ -2012,6 +2054,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
ret = i915_gem_object_get_fence(obj, pipelined);
if (ret)
goto err_unpin;
+
+ i915_gem_object_pin_fence(obj);
}
dev_priv->mm.interruptible = true;
@@ -2024,6 +2068,12 @@ err_interruptible:
return ret;
}
+void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_unpin_fence(obj);
+ i915_gem_object_unpin(obj);
+}
+
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y)
{
@@ -2255,7 +2305,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
LEAVE_ATOMIC_MODE_SET);
if (ret) {
- i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
+ intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
DRM_ERROR("failed to update base address\n");
return ret;
@@ -2263,7 +2313,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb) {
intel_wait_for_vblank(dev, intel_crtc->pipe);
- i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
+ intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
}
mutex_unlock(&dev->struct_mutex);
@@ -2936,6 +2986,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
+ I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
intel_fdi_normal_train(crtc);
@@ -3321,10 +3372,12 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
+ assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
if (crtc->fb) {
mutex_lock(&dev->struct_mutex);
- i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
+ intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
}
}
@@ -3398,11 +3451,8 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
return false;
}
- /* XXX some encoders set the crtcinfo, others don't.
- * Obviously we need some form of conflict resolution here...
- */
- if (adjusted_mode->crtc_htotal == 0)
- drm_mode_set_crtcinfo(adjusted_mode, 0);
+ /* All interlaced capable intel hw wants timings in frames. */
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
}
@@ -4521,6 +4571,7 @@ void sandybridge_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
int fbc_wm, plane_wm, cursor_wm;
unsigned int enabled;
@@ -4529,8 +4580,10 @@ void sandybridge_update_wm(struct drm_device *dev)
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
- I915_WRITE(WM0_PIPEA_ILK,
- (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ val = I915_READ(WM0_PIPEA_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEA_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
@@ -4541,8 +4594,10 @@ void sandybridge_update_wm(struct drm_device *dev)
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
- I915_WRITE(WM0_PIPEB_ILK,
- (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ val = I915_READ(WM0_PIPEB_ILK);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEB_ILK, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
@@ -4555,8 +4610,10 @@ void sandybridge_update_wm(struct drm_device *dev)
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
- I915_WRITE(WM0_PIPEC_IVB,
- (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ val = I915_READ(WM0_PIPEC_IVB);
+ val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
+ I915_WRITE(WM0_PIPEC_IVB, val |
+ ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
@@ -4709,6 +4766,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ u32 val;
int sprite_wm, reg;
int ret;
@@ -4735,7 +4793,9 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
return;
}
- I915_WRITE(reg, I915_READ(reg) | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
+ val = I915_READ(reg);
+ val &= ~WM0_PIPE_SPRITE_MASK;
+ I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
@@ -4977,6 +5037,82 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
return display_bpc != bpc;
}
+static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int refclk;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
+ refclk = dev_priv->lvds_ssc_freq * 1000;
+ DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+ refclk / 1000);
+ } else if (!IS_GEN2(dev)) {
+ refclk = 96000;
+ } else {
+ refclk = 48000;
+ }
+
+ return refclk;
+}
+
+static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock)
+{
+ /* SDVO TV has fixed PLL values depend on its clock range,
+ this mirrors vbios setting. */
+ if (adjusted_mode->clock >= 100000
+ && adjusted_mode->clock < 140500) {
+ clock->p1 = 2;
+ clock->p2 = 10;
+ clock->n = 3;
+ clock->m1 = 16;
+ clock->m2 = 8;
+ } else if (adjusted_mode->clock >= 140500
+ && adjusted_mode->clock <= 200000) {
+ clock->p1 = 1;
+ clock->p2 = 10;
+ clock->n = 6;
+ clock->m1 = 12;
+ clock->m2 = 8;
+ }
+}
+
+static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
+ intel_clock_t *clock,
+ intel_clock_t *reduced_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 fp, fp2 = 0;
+
+ if (IS_PINEVIEW(dev)) {
+ fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
+ if (reduced_clock)
+ fp2 = (1 << reduced_clock->n) << 16 |
+ reduced_clock->m1 << 8 | reduced_clock->m2;
+ } else {
+ fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
+ if (reduced_clock)
+ fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
+ reduced_clock->m2;
+ }
+
+ I915_WRITE(FP0(pipe), fp);
+
+ intel_crtc->lowfreq_avail = false;
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ reduced_clock && i915_powersave) {
+ I915_WRITE(FP1(pipe), fp2);
+ intel_crtc->lowfreq_avail = true;
+ } else {
+ I915_WRITE(FP1(pipe), fp);
+ }
+}
+
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -4990,7 +5126,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
- u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
+ u32 dpll, dspcntr, pipeconf, vsyncshift;
bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
@@ -5031,15 +5167,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
num_connectors++;
}
- if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
- refclk = dev_priv->lvds_ssc_freq * 1000;
- DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
- refclk / 1000);
- } else if (!IS_GEN2(dev)) {
- refclk = 96000;
- } else {
- refclk = 48000;
- }
+ refclk = i9xx_get_refclk(crtc, num_connectors);
/*
* Returns a set of divisors for the desired target clock with the given
@@ -5047,7 +5175,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
- ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
+ ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
+ &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
@@ -5057,53 +5186,24 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
intel_crtc_update_cursor(crtc, true);
if (is_lvds && dev_priv->lvds_downclock_avail) {
+ /*
+ * Ensure we match the reduced clock's P to the target clock.
+ * If the clocks don't match, we can't switch the display clock
+ * by using the FP0/FP1. In such case we will disable the LVDS
+ * downclock feature.
+ */
has_reduced_clock = limit->find_pll(limit, crtc,
dev_priv->lvds_downclock,
refclk,
+ &clock,
&reduced_clock);
- if (has_reduced_clock && (clock.p != reduced_clock.p)) {
- /*
- * If the different P is found, it means that we can't
- * switch the display clock by using the FP0/FP1.
- * In such case we will disable the LVDS downclock
- * feature.
- */
- DRM_DEBUG_KMS("Different P is found for "
- "LVDS clock/downclock\n");
- has_reduced_clock = 0;
- }
- }
- /* SDVO TV has fixed PLL values depend on its clock range,
- this mirrors vbios setting. */
- if (is_sdvo && is_tv) {
- if (adjusted_mode->clock >= 100000
- && adjusted_mode->clock < 140500) {
- clock.p1 = 2;
- clock.p2 = 10;
- clock.n = 3;
- clock.m1 = 16;
- clock.m2 = 8;
- } else if (adjusted_mode->clock >= 140500
- && adjusted_mode->clock <= 200000) {
- clock.p1 = 1;
- clock.p2 = 10;
- clock.n = 6;
- clock.m1 = 12;
- clock.m2 = 8;
- }
}
- if (IS_PINEVIEW(dev)) {
- fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
- if (has_reduced_clock)
- fp2 = (1 << reduced_clock.n) << 16 |
- reduced_clock.m1 << 8 | reduced_clock.m2;
- } else {
- fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
- if (has_reduced_clock)
- fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
- reduced_clock.m2;
- }
+ if (is_sdvo && is_tv)
+ i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
+
+ i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
+ &reduced_clock : NULL);
dpll = DPLL_VGA_MODE_DIS;
@@ -5177,8 +5277,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
- /* Ironlake's plane is forced to pipe, bit 24 is to
- enable color space conversion */
if (pipe == 0)
dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
else
@@ -5213,7 +5311,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
- I915_WRITE(FP0(pipe), fp);
I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
POSTING_READ(DPLL(pipe));
@@ -5300,34 +5397,32 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DPLL(pipe), dpll);
}
- intel_crtc->lowfreq_avail = false;
- if (is_lvds && has_reduced_clock && i915_powersave) {
- I915_WRITE(FP1(pipe), fp2);
- intel_crtc->lowfreq_avail = true;
- if (HAS_PIPE_CXSR(dev)) {
+ if (HAS_PIPE_CXSR(dev)) {
+ if (intel_crtc->lowfreq_avail) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
- }
- } else {
- I915_WRITE(FP1(pipe), fp);
- if (HAS_PIPE_CXSR(dev)) {
+ } else {
DRM_DEBUG_KMS("disabling CxSR downclocking\n");
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
}
}
pipeconf &= ~PIPECONF_INTERLACE_MASK;
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ if (!IS_GEN2(dev) &&
+ adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
/* the chip adds 2 halflines automatically */
- adjusted_mode->crtc_vdisplay -= 1;
adjusted_mode->crtc_vtotal -= 1;
- adjusted_mode->crtc_vblank_start -= 1;
adjusted_mode->crtc_vblank_end -= 1;
- adjusted_mode->crtc_vsync_end -= 1;
- adjusted_mode->crtc_vsync_start -= 1;
- } else
+ vsyncshift = adjusted_mode->crtc_hsync_start
+ - adjusted_mode->crtc_htotal/2;
+ } else {
pipeconf |= PIPECONF_PROGRESSIVE;
+ vsyncshift = 0;
+ }
+
+ if (!IS_GEN3(dev))
+ I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
@@ -5593,7 +5688,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
- ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
+ ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
+ &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
@@ -5603,21 +5699,17 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
intel_crtc_update_cursor(crtc, true);
if (is_lvds && dev_priv->lvds_downclock_avail) {
+ /*
+ * Ensure we match the reduced clock's P to the target clock.
+ * If the clocks don't match, we can't switch the display clock
+ * by using the FP0/FP1. In such case we will disable the LVDS
+ * downclock feature.
+ */
has_reduced_clock = limit->find_pll(limit, crtc,
dev_priv->lvds_downclock,
refclk,
+ &clock,
&reduced_clock);
- if (has_reduced_clock && (clock.p != reduced_clock.p)) {
- /*
- * If the different P is found, it means that we can't
- * switch the display clock by using the FP0/FP1.
- * In such case we will disable the LVDS downclock
- * feature.
- */
- DRM_DEBUG_KMS("Different P is found for "
- "LVDS clock/downclock\n");
- has_reduced_clock = 0;
- }
}
/* SDVO TV has fixed PLL values depend on its clock range,
this mirrors vbios setting. */
@@ -5914,16 +6006,17 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
+ pipeconf |= PIPECONF_INTERLACED_ILK;
/* the chip adds 2 halflines automatically */
- adjusted_mode->crtc_vdisplay -= 1;
adjusted_mode->crtc_vtotal -= 1;
- adjusted_mode->crtc_vblank_start -= 1;
adjusted_mode->crtc_vblank_end -= 1;
- adjusted_mode->crtc_vsync_end -= 1;
- adjusted_mode->crtc_vsync_start -= 1;
- } else
+ I915_WRITE(VSYNCSHIFT(pipe),
+ adjusted_mode->crtc_hsync_start
+ - adjusted_mode->crtc_htotal/2);
+ } else {
pipeconf |= PIPECONF_PROGRESSIVE;
+ I915_WRITE(VSYNCSHIFT(pipe), 0);
+ }
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
@@ -5966,12 +6059,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
intel_wait_for_vblank(dev, pipe);
- if (IS_GEN5(dev)) {
- /* enable address swizzle for tiling buffer */
- temp = I915_READ(DISP_ARB_CTL);
- I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
- }
-
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
@@ -6086,15 +6173,18 @@ static void ironlake_write_eld(struct drm_connector *connector,
uint32_t i;
int len;
int hdmiw_hdmiedid;
+ int aud_config;
int aud_cntl_st;
int aud_cntrl_st2;
if (HAS_PCH_IBX(connector->dev)) {
hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
+ aud_config = IBX_AUD_CONFIG_A;
aud_cntl_st = IBX_AUD_CNTL_ST_A;
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else {
hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
+ aud_config = CPT_AUD_CONFIG_A;
aud_cntl_st = CPT_AUD_CNTL_ST_A;
aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
}
@@ -6102,6 +6192,7 @@ static void ironlake_write_eld(struct drm_connector *connector,
i = to_intel_crtc(crtc)->pipe;
hdmiw_hdmiedid += i * 0x100;
aud_cntl_st += i * 0x100;
+ aud_config += i * 0x100;
DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
@@ -6121,7 +6212,9 @@ static void ironlake_write_eld(struct drm_connector *connector,
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
- }
+ I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
+ } else
+ I915_WRITE(aud_config, 0);
if (intel_eld_uptodate(connector,
aud_cntrl_st2, eldv,
@@ -6927,9 +7020,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
DRM_DEBUG_DRIVER("upclocking LVDS\n");
- /* Unlock panel regs */
- I915_WRITE(PP_CONTROL,
- I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
+ assert_panel_unlocked(dev_priv, pipe);
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
@@ -6938,9 +7029,6 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
dpll = I915_READ(dpll_reg);
if (dpll & DISPLAY_RATE_SELECT_FPA1)
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
-
- /* ...and lock them again */
- I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
}
/* Schedule downclock */
@@ -6970,9 +7058,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
DRM_DEBUG_DRIVER("downclocking LVDS\n");
- /* Unlock panel regs */
- I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
- PANEL_UNLOCK_REGS);
+ assert_panel_unlocked(dev_priv, pipe);
dpll |= DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
@@ -6980,9 +7066,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
dpll = I915_READ(dpll_reg);
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
-
- /* ...and lock them again */
- I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
}
}
@@ -7097,7 +7180,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
container_of(__work, struct intel_unpin_work, work);
mutex_lock(&work->dev->struct_mutex);
- i915_gem_object_unpin(work->old_fb_obj);
+ intel_unpin_fb_obj(work->old_fb_obj);
drm_gem_object_unreference(&work->pending_flip_obj->base);
drm_gem_object_unreference(&work->old_fb_obj->base);
@@ -7247,7 +7330,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitches[0]);
OUT_RING(obj->gtt_offset + offset);
- OUT_RING(MI_NOOP);
+ OUT_RING(0); /* aux display base address, unused */
ADVANCE_LP_RING();
out:
return ret;
@@ -7681,10 +7764,9 @@ static void intel_setup_outputs(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
bool dpd_is_edp = false;
- bool has_lvds = false;
+ bool has_lvds;
- if (IS_MOBILE(dev) && !IS_I830(dev))
- has_lvds = intel_lvds_init(dev);
+ has_lvds = intel_lvds_init(dev);
if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
/* disable the panel fitter on everything but LVDS */
I915_WRITE(PFIT_CONTROL, 0);
@@ -7840,7 +7922,8 @@ int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_VYUY:
break;
default:
- DRM_ERROR("unsupported pixel format\n");
+ DRM_DEBUG_KMS("unsupported pixel format %u\n",
+ mode_cmd->pixel_format);
return -EINVAL;
}
@@ -8162,6 +8245,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 pcu_mbox, rc6_mask = 0;
+ u32 gtfifodbg;
int cur_freq, min_freq, max_freq;
int i;
@@ -8173,6 +8257,13 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
*/
I915_WRITE(GEN6_RC_STATE, 0);
mutex_lock(&dev_priv->dev->struct_mutex);
+
+ /* Clear the DBG now so we don't confuse earlier errors */
+ if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+ DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
+ I915_WRITE(GTFIFODBG, gtfifodbg);
+ }
+
gen6_gt_force_wake_get(dev_priv);
/* disable the counters and set deterministic thresholds */
@@ -8959,8 +9050,6 @@ struct intel_quirk {
};
struct intel_quirk intel_quirks[] = {
- /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
- { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
/* HP Mini needs pipe A force quirk (LP: #322104) */
{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
@@ -9037,6 +9126,9 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.prefer_shadow = 1;
+
dev->mode_config.funcs = (void *)&intel_mode_funcs;
intel_init_quirks(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 94f860cce3f..110552ff302 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -49,7 +49,7 @@ struct intel_dp {
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
- int force_audio;
+ enum hdmi_force_audio force_audio;
uint32_t color_range;
int dpms_mode;
uint8_t link_bw;
@@ -352,7 +352,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
int recv_bytes;
uint32_t status;
uint32_t aux_clock_divider;
- int try, precharge;
+ int try, precharge = 5;
intel_dp_check_edp(intel_dp);
/* The clock divider is based off the hrawclk,
@@ -368,15 +368,10 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
else
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
} else if (HAS_PCH_SPLIT(dev))
- aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
+ aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */
else
aux_clock_divider = intel_hrawclk(dev) / 2;
- if (IS_GEN6(dev))
- precharge = 3;
- else
- precharge = 5;
-
/* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) {
status = I915_READ(ch_ctl);
@@ -421,6 +416,10 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+ if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR))
+ continue;
if (status & DP_AUX_CH_CTL_DONE)
break;
}
@@ -2117,8 +2116,8 @@ intel_dp_detect(struct drm_connector *connector, bool force)
if (status != connector_status_connected)
return status;
- if (intel_dp->force_audio) {
- intel_dp->has_audio = intel_dp->force_audio > 0;
+ if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
+ intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
} else {
edid = intel_dp_get_edid(connector, &intel_dp->adapter);
if (edid) {
@@ -2218,10 +2217,10 @@ intel_dp_set_property(struct drm_connector *connector,
intel_dp->force_audio = i;
- if (i == 0)
+ if (i == HDMI_AUDIO_AUTO)
has_audio = intel_dp_detect_audio(connector);
else
- has_audio = i > 0;
+ has_audio = (i == HDMI_AUDIO_ON);
if (has_audio == intel_dp->has_audio)
return 0;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1348705faf6..5a14149b379 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -374,6 +374,7 @@ extern void intel_init_emon(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined);
+extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
@@ -381,7 +382,7 @@ extern int intel_framebuffer_init(struct drm_device *dev,
struct drm_i915_gem_object *obj);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
-
+extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 6eda1b51c63..020a7d7f744 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -157,7 +157,6 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
C(vsync_end);
C(vtotal);
C(clock);
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
#undef C
}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 571375a3eef..19ecd78b8a2 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -152,11 +152,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
- info->pixmap.size = 64*1024;
- info->pixmap.buf_align = 8;
- info->pixmap.access_align = 32;
- info->pixmap.flags = FB_PIXMAP_SYSTEM;
- info->pixmap.scan_align = 1;
+ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
fb->width, fb->height,
@@ -258,6 +254,16 @@ void intel_fbdev_fini(struct drm_device *dev)
kfree(dev_priv->fbdev);
dev_priv->fbdev = NULL;
}
+
+void intel_fbdev_set_suspend(struct drm_device *dev, int state)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ if (!dev_priv->fbdev)
+ return;
+
+ fb_set_suspend(dev_priv->fbdev->helper.fbdev, state);
+}
+
MODULE_LICENSE("GPL and additional rights");
void intel_fb_output_poll_changed(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 64541f7ef90..cae3e5f17a4 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -44,7 +44,7 @@ struct intel_hdmi {
uint32_t color_range;
bool has_hdmi_sink;
bool has_audio;
- int force_audio;
+ enum hdmi_force_audio force_audio;
void (*write_infoframe)(struct drm_encoder *encoder,
struct dip_infoframe *frame);
};
@@ -339,7 +339,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
- intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
+ if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI)
+ intel_hdmi->has_hdmi_sink =
+ drm_detect_hdmi_monitor(edid);
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
}
connector->display_info.raw_edid = NULL;
@@ -347,8 +349,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
}
if (status == connector_status_connected) {
- if (intel_hdmi->force_audio)
- intel_hdmi->has_audio = intel_hdmi->force_audio > 0;
+ if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
+ intel_hdmi->has_audio =
+ (intel_hdmi->force_audio == HDMI_AUDIO_ON);
}
return status;
@@ -402,7 +405,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
return ret;
if (property == dev_priv->force_audio_property) {
- int i = val;
+ enum hdmi_force_audio i = val;
bool has_audio;
if (i == intel_hdmi->force_audio)
@@ -410,13 +413,13 @@ intel_hdmi_set_property(struct drm_connector *connector,
intel_hdmi->force_audio = i;
- if (i == 0)
+ if (i == HDMI_AUDIO_AUTO)
has_audio = intel_hdmi_detect_audio(connector);
else
- has_audio = i > 0;
+ has_audio = (i == HDMI_AUDIO_ON);
- if (has_audio == intel_hdmi->has_audio)
- return 0;
+ if (i == HDMI_AUDIO_OFF_DVI)
+ intel_hdmi->has_hdmi_sink = 0;
intel_hdmi->has_audio = has_audio;
goto done;
@@ -514,7 +517,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
intel_encoder->type = INTEL_OUTPUT_HDMI;
connector->polled = DRM_CONNECTOR_POLL_HPD;
- connector->interlace_allowed = 0;
+ connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index d30ccccb9d7..601c86e664a 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -37,7 +37,7 @@
/* Intel GPIO access functions */
-#define I2C_RISEFALL_TIME 20
+#define I2C_RISEFALL_TIME 10
static inline struct intel_gmbus *
to_intel_gmbus(struct i2c_adapter *i2c)
@@ -45,13 +45,6 @@ to_intel_gmbus(struct i2c_adapter *i2c)
return container_of(i2c, struct intel_gmbus, adapter);
}
-struct intel_gpio {
- struct i2c_adapter adapter;
- struct i2c_algo_bit_data algo;
- struct drm_i915_private *dev_priv;
- u32 reg;
-};
-
void
intel_i2c_reset(struct drm_device *dev)
{
@@ -78,15 +71,15 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
I915_WRITE(DSPCLK_GATE_D, val);
}
-static u32 get_reserved(struct intel_gpio *gpio)
+static u32 get_reserved(struct intel_gmbus *bus)
{
- struct drm_i915_private *dev_priv = gpio->dev_priv;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
struct drm_device *dev = dev_priv->dev;
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
if (!IS_I830(dev) && !IS_845G(dev))
- reserved = I915_READ_NOTRACE(gpio->reg) &
+ reserved = I915_READ_NOTRACE(bus->gpio_reg) &
(GPIO_DATA_PULLUP_DISABLE |
GPIO_CLOCK_PULLUP_DISABLE);
@@ -95,29 +88,29 @@ static u32 get_reserved(struct intel_gpio *gpio)
static int get_clock(void *data)
{
- struct intel_gpio *gpio = data;
- struct drm_i915_private *dev_priv = gpio->dev_priv;
- u32 reserved = get_reserved(gpio);
- I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
- I915_WRITE_NOTRACE(gpio->reg, reserved);
- return (I915_READ_NOTRACE(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
+ struct intel_gmbus *bus = data;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ u32 reserved = get_reserved(bus);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
+ return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0;
}
static int get_data(void *data)
{
- struct intel_gpio *gpio = data;
- struct drm_i915_private *dev_priv = gpio->dev_priv;
- u32 reserved = get_reserved(gpio);
- I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
- I915_WRITE_NOTRACE(gpio->reg, reserved);
- return (I915_READ_NOTRACE(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
+ struct intel_gmbus *bus = data;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ u32 reserved = get_reserved(bus);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
+ return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0;
}
static void set_clock(void *data, int state_high)
{
- struct intel_gpio *gpio = data;
- struct drm_i915_private *dev_priv = gpio->dev_priv;
- u32 reserved = get_reserved(gpio);
+ struct intel_gmbus *bus = data;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ u32 reserved = get_reserved(bus);
u32 clock_bits;
if (state_high)
@@ -126,15 +119,15 @@ static void set_clock(void *data, int state_high)
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
GPIO_CLOCK_VAL_MASK;
- I915_WRITE_NOTRACE(gpio->reg, reserved | clock_bits);
- POSTING_READ(gpio->reg);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved | clock_bits);
+ POSTING_READ(bus->gpio_reg);
}
static void set_data(void *data, int state_high)
{
- struct intel_gpio *gpio = data;
- struct drm_i915_private *dev_priv = gpio->dev_priv;
- u32 reserved = get_reserved(gpio);
+ struct intel_gmbus *bus = data;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ u32 reserved = get_reserved(bus);
u32 data_bits;
if (state_high)
@@ -143,13 +136,14 @@ static void set_data(void *data, int state_high)
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
- I915_WRITE_NOTRACE(gpio->reg, reserved | data_bits);
- POSTING_READ(gpio->reg);
+ I915_WRITE_NOTRACE(bus->gpio_reg, reserved | data_bits);
+ POSTING_READ(bus->gpio_reg);
}
-static struct i2c_adapter *
-intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
+static bool
+intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
{
+ struct drm_i915_private *dev_priv = bus->dev_priv;
static const int map_pin_to_reg[] = {
0,
GPIOB,
@@ -160,65 +154,48 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
0,
GPIOF,
};
- struct intel_gpio *gpio;
+ struct i2c_algo_bit_data *algo;
if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
- return NULL;
+ return false;
- gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
- if (gpio == NULL)
- return NULL;
+ algo = &bus->bit_algo;
- gpio->reg = map_pin_to_reg[pin];
+ bus->gpio_reg = map_pin_to_reg[pin];
if (HAS_PCH_SPLIT(dev_priv->dev))
- gpio->reg += PCH_GPIOA - GPIOA;
- gpio->dev_priv = dev_priv;
-
- snprintf(gpio->adapter.name, sizeof(gpio->adapter.name),
- "i915 GPIO%c", "?BACDE?F"[pin]);
- gpio->adapter.owner = THIS_MODULE;
- gpio->adapter.algo_data = &gpio->algo;
- gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
- gpio->algo.setsda = set_data;
- gpio->algo.setscl = set_clock;
- gpio->algo.getsda = get_data;
- gpio->algo.getscl = get_clock;
- gpio->algo.udelay = I2C_RISEFALL_TIME;
- gpio->algo.timeout = usecs_to_jiffies(2200);
- gpio->algo.data = gpio;
-
- if (i2c_bit_add_bus(&gpio->adapter))
- goto out_free;
-
- return &gpio->adapter;
-
-out_free:
- kfree(gpio);
- return NULL;
+ bus->gpio_reg += PCH_GPIOA - GPIOA;
+
+ bus->adapter.algo_data = algo;
+ algo->setsda = set_data;
+ algo->setscl = set_clock;
+ algo->getsda = get_data;
+ algo->getscl = get_clock;
+ algo->udelay = I2C_RISEFALL_TIME;
+ algo->timeout = usecs_to_jiffies(2200);
+ algo->data = bus;
+
+ return true;
}
static int
-intel_i2c_quirk_xfer(struct drm_i915_private *dev_priv,
- struct i2c_adapter *adapter,
+intel_i2c_quirk_xfer(struct intel_gmbus *bus,
struct i2c_msg *msgs,
int num)
{
- struct intel_gpio *gpio = container_of(adapter,
- struct intel_gpio,
- adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
int ret;
intel_i2c_reset(dev_priv->dev);
intel_i2c_quirk_set(dev_priv, true);
- set_data(gpio, 1);
- set_clock(gpio, 1);
+ set_data(bus, 1);
+ set_clock(bus, 1);
udelay(I2C_RISEFALL_TIME);
- ret = adapter->algo->master_xfer(adapter, msgs, num);
+ ret = i2c_bit_algo.master_xfer(&bus->adapter, msgs, num);
- set_data(gpio, 1);
- set_clock(gpio, 1);
+ set_data(bus, 1);
+ set_clock(bus, 1);
intel_i2c_quirk_set(dev_priv, false);
return ret;
@@ -232,12 +209,15 @@ gmbus_xfer(struct i2c_adapter *adapter,
struct intel_gmbus *bus = container_of(adapter,
struct intel_gmbus,
adapter);
- struct drm_i915_private *dev_priv = adapter->algo_data;
- int i, reg_offset;
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+ int i, reg_offset, ret;
- if (bus->force_bit)
- return intel_i2c_quirk_xfer(dev_priv,
- bus->force_bit, msgs, num);
+ mutex_lock(&dev_priv->gmbus_mutex);
+
+ if (bus->force_bit) {
+ ret = intel_i2c_quirk_xfer(bus, msgs, num);
+ goto out;
+ }
reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
@@ -249,7 +229,8 @@ gmbus_xfer(struct i2c_adapter *adapter,
if (msgs[i].flags & I2C_M_RD) {
I915_WRITE(GMBUS1 + reg_offset,
- GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
+ GMBUS_CYCLE_WAIT |
+ (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
(len << GMBUS_BYTE_COUNT_SHIFT) |
(msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_READ | GMBUS_SW_RDY);
@@ -278,7 +259,8 @@ gmbus_xfer(struct i2c_adapter *adapter,
I915_WRITE(GMBUS3 + reg_offset, val);
I915_WRITE(GMBUS1 + reg_offset,
- (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
+ GMBUS_CYCLE_WAIT |
+ (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
(msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
(msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
@@ -317,11 +299,15 @@ clear_err:
I915_WRITE(GMBUS1 + reg_offset, 0);
done:
- /* Mark the GMBUS interface as disabled. We will re-enable it at the
- * start of the next xfer, till then let it sleep.
+ /* Mark the GMBUS interface as disabled after waiting for idle.
+ * We will re-enable it at the start of the next xfer,
+ * till then let it sleep.
*/
+ if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 10))
+ DRM_INFO("GMBUS timed out waiting for idle\n");
I915_WRITE(GMBUS0 + reg_offset, 0);
- return i;
+ ret = i;
+ goto out;
timeout:
DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
@@ -329,23 +315,21 @@ timeout:
I915_WRITE(GMBUS0 + reg_offset, 0);
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
- bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
- if (!bus->force_bit)
- return -ENOMEM;
-
- return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
+ if (!bus->has_gpio) {
+ ret = -EIO;
+ } else {
+ bus->force_bit = true;
+ ret = intel_i2c_quirk_xfer(bus, msgs, num);
+ }
+out:
+ mutex_unlock(&dev_priv->gmbus_mutex);
+ return ret;
}
static u32 gmbus_func(struct i2c_adapter *adapter)
{
- struct intel_gmbus *bus = container_of(adapter,
- struct intel_gmbus,
- adapter);
-
- if (bus->force_bit)
- bus->force_bit->algo->functionality(bus->force_bit);
-
- return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ return i2c_bit_algo.functionality(adapter) &
+ (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
/* I2C_FUNC_10BIT_ADDR | */
I2C_FUNC_SMBUS_READ_BLOCK_DATA |
I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
@@ -375,11 +359,13 @@ int intel_setup_gmbus(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
- dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS,
+ dev_priv->gmbus = kcalloc(GMBUS_NUM_PORTS, sizeof(struct intel_gmbus),
GFP_KERNEL);
if (dev_priv->gmbus == NULL)
return -ENOMEM;
+ mutex_init(&dev_priv->gmbus_mutex);
+
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
@@ -391,7 +377,7 @@ int intel_setup_gmbus(struct drm_device *dev)
names[i]);
bus->adapter.dev.parent = &dev->pdev->dev;
- bus->adapter.algo_data = dev_priv;
+ bus->dev_priv = dev_priv;
bus->adapter.algo = &gmbus_algorithm;
ret = i2c_add_adapter(&bus->adapter);
@@ -401,8 +387,11 @@ int intel_setup_gmbus(struct drm_device *dev)
/* By default use a conservative clock rate */
bus->reg0 = i | GMBUS_RATE_100KHZ;
+ bus->has_gpio = intel_gpio_setup(bus, i);
+
/* XXX force bit banging until GMBUS is fully debugged */
- bus->force_bit = intel_gpio_create(dev_priv, i);
+ if (bus->has_gpio && IS_GEN2(dev))
+ bus->force_bit = true;
}
intel_i2c_reset(dev_priv->dev);
@@ -430,19 +419,8 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- if (force_bit) {
- if (bus->force_bit == NULL) {
- struct drm_i915_private *dev_priv = adapter->algo_data;
- bus->force_bit = intel_gpio_create(dev_priv,
- bus->reg0 & 0xff);
- }
- } else {
- if (bus->force_bit) {
- i2c_del_adapter(bus->force_bit);
- kfree(bus->force_bit);
- bus->force_bit = NULL;
- }
- }
+ if (bus->has_gpio)
+ bus->force_bit = force_bit;
}
void intel_teardown_gmbus(struct drm_device *dev)
@@ -455,10 +433,6 @@ void intel_teardown_gmbus(struct drm_device *dev)
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
- if (bus->force_bit) {
- i2c_del_adapter(bus->force_bit);
- kfree(bus->force_bit);
- }
i2c_del_adapter(&bus->adapter);
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index aa84832b0e1..c5c0973af8a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -739,6 +739,22 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Hewlett-Packard t5745",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_BOARD_NAME, "hp t5745"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Hewlett-Packard st5747",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_BOARD_NAME, "hp st5747"),
+ },
+ },
{ } /* terminating entry */
};
@@ -844,6 +860,18 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
return false;
}
+static bool intel_lvds_supported(struct drm_device *dev)
+{
+ /* With the introduction of the PCH we gained a dedicated
+ * LVDS presence pin, use it. */
+ if (HAS_PCH_SPLIT(dev))
+ return true;
+
+ /* Otherwise LVDS was only attached to mobile products,
+ * except for the inglorious 830gm */
+ return IS_MOBILE(dev) && !IS_I830(dev);
+}
+
/**
* intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
@@ -865,6 +893,9 @@ bool intel_lvds_init(struct drm_device *dev)
int pipe;
u8 pin;
+ if (!intel_lvds_supported(dev))
+ return false;
+
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds))
return false;
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index be2c6fe07d1..d1928e79d9b 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -28,6 +28,7 @@
#include <linux/fb.h>
#include <drm/drm_edid.h>
#include "drmP.h"
+#include "drm_edid.h"
#include "intel_drv.h"
#include "i915_drv.h"
@@ -42,13 +43,13 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
u8 buf[2];
struct i2c_msg msgs[] = {
{
- .addr = 0x50,
+ .addr = DDC_ADDR,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = 0x50,
+ .addr = DDC_ADDR,
.flags = I2C_M_RD,
.len = 1,
.buf = buf,
@@ -83,10 +84,11 @@ int intel_ddc_get_modes(struct drm_connector *connector,
return ret;
}
-static const char *force_audio_names[] = {
- "off",
- "auto",
- "on",
+static const struct drm_prop_enum_list force_audio_names[] = {
+ { HDMI_AUDIO_OFF_DVI, "force-dvi" },
+ { HDMI_AUDIO_OFF, "off" },
+ { HDMI_AUDIO_AUTO, "auto" },
+ { HDMI_AUDIO_ON, "on" },
};
void
@@ -95,27 +97,24 @@ intel_attach_force_audio_property(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_property *prop;
- int i;
prop = dev_priv->force_audio_property;
if (prop == NULL) {
- prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ prop = drm_property_create_enum(dev, 0,
"audio",
+ force_audio_names,
ARRAY_SIZE(force_audio_names));
if (prop == NULL)
return;
- for (i = 0; i < ARRAY_SIZE(force_audio_names); i++)
- drm_property_add_enum(prop, i, i-1, force_audio_names[i]);
-
dev_priv->force_audio_property = prop;
}
drm_connector_attach_property(connector, prop, 0);
}
-static const char *broadcast_rgb_names[] = {
- "Full",
- "Limited 16:235",
+static const struct drm_prop_enum_list broadcast_rgb_names[] = {
+ { 0, "Full" },
+ { 1, "Limited 16:235" },
};
void
@@ -124,19 +123,16 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_property *prop;
- int i;
prop = dev_priv->broadcast_rgb_property;
if (prop == NULL) {
- prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
"Broadcast RGB",
+ broadcast_rgb_names,
ARRAY_SIZE(broadcast_rgb_names));
if (prop == NULL)
return;
- for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++)
- drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]);
-
dev_priv->broadcast_rgb_property = prop;
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index cdf17d4cc1f..80b331c322f 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -25,8 +25,6 @@
*
* Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
*/
-
-#include <linux/seq_file.h>
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
@@ -227,7 +225,8 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
}
overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
- ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
+ ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
+ true);
if (ret)
return ret;
@@ -263,7 +262,7 @@ i830_activate_pipe_a(struct drm_device *dev)
DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
mode = drm_mode_duplicate(dev, &vesa_640x480);
- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ drm_mode_set_crtcinfo(mode, 0);
if (!drm_crtc_helper_set_mode(&crtc->base, mode,
crtc->base.x, crtc->base.y,
crtc->base.fb))
@@ -448,7 +447,8 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
if (overlay->last_flip_req == 0)
return 0;
- ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
+ ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
+ true);
if (ret)
return ret;
@@ -935,10 +935,10 @@ static int check_overlay_dst(struct intel_overlay *overlay,
{
struct drm_display_mode *mode = &overlay->crtc->base.mode;
- if (rec->dst_x < mode->crtc_hdisplay &&
- rec->dst_x + rec->dst_width <= mode->crtc_hdisplay &&
- rec->dst_y < mode->crtc_vdisplay &&
- rec->dst_y + rec->dst_height <= mode->crtc_vdisplay)
+ if (rec->dst_x < mode->hdisplay &&
+ rec->dst_x + rec->dst_width <= mode->hdisplay &&
+ rec->dst_y < mode->vdisplay &&
+ rec->dst_y + rec->dst_height <= mode->vdisplay)
return 0;
else
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 04d79fd1dc9..230a141dbea 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -48,7 +48,7 @@ intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
adjusted_mode->clock = fixed_mode->clock;
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
}
/* adjusted_mode has been preset to be the panel's fixed mode */
@@ -141,8 +141,8 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
dev_priv->saveBLC_PWM_CTL2 = val;
} else if (val == 0) {
I915_WRITE(BLC_PWM_PCH_CTL2,
- dev_priv->saveBLC_PWM_CTL);
- val = dev_priv->saveBLC_PWM_CTL;
+ dev_priv->saveBLC_PWM_CTL2);
+ val = dev_priv->saveBLC_PWM_CTL2;
}
} else {
val = I915_READ(BLC_PWM_CTL);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 536191540b0..fc66af6a944 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -52,20 +52,6 @@ static inline int ring_space(struct intel_ring_buffer *ring)
return space;
}
-static u32 i915_gem_get_seqno(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 seqno;
-
- seqno = dev_priv->next_seqno;
-
- /* reserve 0 for non-seqno */
- if (++dev_priv->next_seqno == 0)
- dev_priv->next_seqno = 1;
-
- return seqno;
-}
-
static int
render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains,
@@ -399,8 +385,6 @@ static int init_render_ring(struct intel_ring_buffer *ring)
if (INTEL_INFO(dev)->gen > 3) {
int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
- if (IS_GEN6(dev) || IS_GEN7(dev))
- mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
I915_WRITE(MI_MODE, mode);
if (IS_GEN7(dev))
I915_WRITE(GFX_MODE_GEN7,
@@ -467,7 +451,7 @@ gen6_add_request(struct intel_ring_buffer *ring,
mbox1_reg = ring->signal_mbox[0];
mbox2_reg = ring->signal_mbox[1];
- *seqno = i915_gem_get_seqno(ring->dev);
+ *seqno = i915_gem_next_request_seqno(ring);
update_mboxes(ring, *seqno, mbox1_reg);
update_mboxes(ring, *seqno, mbox2_reg);
@@ -565,8 +549,7 @@ static int
pc_render_add_request(struct intel_ring_buffer *ring,
u32 *result)
{
- struct drm_device *dev = ring->dev;
- u32 seqno = i915_gem_get_seqno(dev);
+ u32 seqno = i915_gem_next_request_seqno(ring);
struct pipe_control *pc = ring->private;
u32 scratch_addr = pc->gtt_offset + 128;
int ret;
@@ -600,6 +583,7 @@ pc_render_add_request(struct intel_ring_buffer *ring,
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 128;
PIPE_CONTROL_FLUSH(ring, scratch_addr);
+
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
@@ -617,8 +601,7 @@ static int
render_ring_add_request(struct intel_ring_buffer *ring,
u32 *result)
{
- struct drm_device *dev = ring->dev;
- u32 seqno = i915_gem_get_seqno(dev);
+ u32 seqno = i915_gem_next_request_seqno(ring);
int ret;
ret = intel_ring_begin(ring, 4);
@@ -744,13 +727,13 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
*/
if (IS_GEN7(dev)) {
switch (ring->id) {
- case RING_RENDER:
+ case RCS:
mmio = RENDER_HWS_PGA_GEN7;
break;
- case RING_BLT:
+ case BCS:
mmio = BLT_HWS_PGA_GEN7;
break;
- case RING_BSD:
+ case VCS:
mmio = BSD_HWS_PGA_GEN7;
break;
}
@@ -792,7 +775,7 @@ ring_add_request(struct intel_ring_buffer *ring,
if (ret)
return ret;
- seqno = i915_gem_get_seqno(ring->dev);
+ seqno = i915_gem_next_request_seqno(ring);
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
@@ -816,8 +799,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
/* It looks like we need to prevent the gt from suspending while waiting
* for an notifiy irq, otherwise irqs seem to get lost on at least the
* blt/bsd rings on ivb. */
- if (IS_GEN7(dev))
- gen6_gt_force_wake_get(dev_priv);
+ gen6_gt_force_wake_get(dev_priv);
spin_lock(&ring->irq_lock);
if (ring->irq_refcount++ == 0) {
@@ -844,8 +826,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
}
spin_unlock(&ring->irq_lock);
- if (IS_GEN7(dev))
- gen6_gt_force_wake_put(dev_priv);
+ gen6_gt_force_wake_put(dev_priv);
}
static bool
@@ -1127,11 +1108,93 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
return 0;
}
+static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ bool was_interruptible;
+ int ret;
+
+ /* XXX As we have not yet audited all the paths to check that
+ * they are ready for ERESTARTSYS from intel_ring_begin, do not
+ * allow us to be interruptible by a signal.
+ */
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ ret = i915_wait_request(ring, seqno, true);
+
+ dev_priv->mm.interruptible = was_interruptible;
+
+ return ret;
+}
+
+static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
+{
+ struct drm_i915_gem_request *request;
+ u32 seqno = 0;
+ int ret;
+
+ i915_gem_retire_requests_ring(ring);
+
+ if (ring->last_retired_head != -1) {
+ ring->head = ring->last_retired_head;
+ ring->last_retired_head = -1;
+ ring->space = ring_space(ring);
+ if (ring->space >= n)
+ return 0;
+ }
+
+ list_for_each_entry(request, &ring->request_list, list) {
+ int space;
+
+ if (request->tail == -1)
+ continue;
+
+ space = request->tail - (ring->tail + 8);
+ if (space < 0)
+ space += ring->size;
+ if (space >= n) {
+ seqno = request->seqno;
+ break;
+ }
+
+ /* Consume this request in case we need more space than
+ * is available and so need to prevent a race between
+ * updating last_retired_head and direct reads of
+ * I915_RING_HEAD. It also provides a nice sanity check.
+ */
+ request->tail = -1;
+ }
+
+ if (seqno == 0)
+ return -ENOSPC;
+
+ ret = intel_ring_wait_seqno(ring, seqno);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(ring->last_retired_head == -1))
+ return -ENOSPC;
+
+ ring->head = ring->last_retired_head;
+ ring->last_retired_head = -1;
+ ring->space = ring_space(ring);
+ if (WARN_ON(ring->space < n))
+ return -ENOSPC;
+
+ return 0;
+}
+
int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long end;
+ int ret;
+
+ ret = intel_ring_wait_request(ring, n);
+ if (ret != -ENOSPC)
+ return ret;
trace_i915_ring_wait_begin(ring);
if (drm_core_check_feature(dev, DRIVER_GEM))
@@ -1200,7 +1263,7 @@ void intel_ring_advance(struct intel_ring_buffer *ring)
static const struct intel_ring_buffer render_ring = {
.name = "render ring",
- .id = RING_RENDER,
+ .id = RCS,
.mmio_base = RENDER_RING_BASE,
.size = 32 * PAGE_SIZE,
.init = init_render_ring,
@@ -1223,7 +1286,7 @@ static const struct intel_ring_buffer render_ring = {
static const struct intel_ring_buffer bsd_ring = {
.name = "bsd ring",
- .id = RING_BSD,
+ .id = VCS,
.mmio_base = BSD_RING_BASE,
.size = 32 * PAGE_SIZE,
.init = init_ring_common,
@@ -1333,7 +1396,7 @@ gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
/* ring buffer for Video Codec for Gen6+ */
static const struct intel_ring_buffer gen6_bsd_ring = {
.name = "gen6 bsd ring",
- .id = RING_BSD,
+ .id = VCS,
.mmio_base = GEN6_BSD_RING_BASE,
.size = 32 * PAGE_SIZE,
.init = init_ring_common,
@@ -1369,79 +1432,13 @@ blt_ring_put_irq(struct intel_ring_buffer *ring)
GEN6_BLITTER_USER_INTERRUPT);
}
-
-/* Workaround for some stepping of SNB,
- * each time when BLT engine ring tail moved,
- * the first command in the ring to be parsed
- * should be MI_BATCH_BUFFER_START
- */
-#define NEED_BLT_WORKAROUND(dev) \
- (IS_GEN6(dev) && (dev->pdev->revision < 8))
-
-static inline struct drm_i915_gem_object *
-to_blt_workaround(struct intel_ring_buffer *ring)
-{
- return ring->private;
-}
-
-static int blt_ring_init(struct intel_ring_buffer *ring)
-{
- if (NEED_BLT_WORKAROUND(ring->dev)) {
- struct drm_i915_gem_object *obj;
- u32 *ptr;
- int ret;
-
- obj = i915_gem_alloc_object(ring->dev, 4096);
- if (obj == NULL)
- return -ENOMEM;
-
- ret = i915_gem_object_pin(obj, 4096, true);
- if (ret) {
- drm_gem_object_unreference(&obj->base);
- return ret;
- }
-
- ptr = kmap(obj->pages[0]);
- *ptr++ = MI_BATCH_BUFFER_END;
- *ptr++ = MI_NOOP;
- kunmap(obj->pages[0]);
-
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
- if (ret) {
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(&obj->base);
- return ret;
- }
-
- ring->private = obj;
- }
-
- return init_ring_common(ring);
-}
-
-static int blt_ring_begin(struct intel_ring_buffer *ring,
- int num_dwords)
-{
- if (ring->private) {
- int ret = intel_ring_begin(ring, num_dwords+2);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_BATCH_BUFFER_START);
- intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
-
- return 0;
- } else
- return intel_ring_begin(ring, 4);
-}
-
static int blt_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate, u32 flush)
{
uint32_t cmd;
int ret;
- ret = blt_ring_begin(ring, 4);
+ ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
@@ -1456,22 +1453,12 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
return 0;
}
-static void blt_ring_cleanup(struct intel_ring_buffer *ring)
-{
- if (!ring->private)
- return;
-
- i915_gem_object_unpin(ring->private);
- drm_gem_object_unreference(ring->private);
- ring->private = NULL;
-}
-
static const struct intel_ring_buffer gen6_blt_ring = {
.name = "blt ring",
- .id = RING_BLT,
+ .id = BCS,
.mmio_base = BLT_RING_BASE,
.size = 32 * PAGE_SIZE,
- .init = blt_ring_init,
+ .init = init_ring_common,
.write_tail = ring_write_tail,
.flush = blt_ring_flush,
.add_request = gen6_add_request,
@@ -1479,7 +1466,6 @@ static const struct intel_ring_buffer gen6_blt_ring = {
.irq_get = blt_ring_get_irq,
.irq_put = blt_ring_put_irq,
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
- .cleanup = blt_ring_cleanup,
.sync_to = gen6_blt_ring_sync_to,
.semaphore_register = {MI_SEMAPHORE_SYNC_BR,
MI_SEMAPHORE_SYNC_BV,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 68281c96c55..bc0365b8fa4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,13 +1,6 @@
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
-enum {
- RCS = 0x0,
- VCS,
- BCS,
- I915_NUM_RINGS,
-};
-
struct intel_hw_status_page {
u32 __iomem *page_addr;
unsigned int gfx_addr;
@@ -36,10 +29,11 @@ struct intel_hw_status_page {
struct intel_ring_buffer {
const char *name;
enum intel_ring_id {
- RING_RENDER = 0x1,
- RING_BSD = 0x2,
- RING_BLT = 0x4,
+ RCS = 0x0,
+ VCS,
+ BCS,
} id;
+#define I915_NUM_RINGS 3
u32 mmio_base;
void __iomem *virtual_start;
struct drm_device *dev;
@@ -52,6 +46,16 @@ struct intel_ring_buffer {
int effective_size;
struct intel_hw_status_page status_page;
+ /** We track the position of the requests in the ring buffer, and
+ * when each is retired we increment last_retired_head as the GPU
+ * must have finished processing the request and so we know we
+ * can advance the ringbuffer up to that position.
+ *
+ * last_retired_head is set to -1 after the value is consumed so
+ * we can detect new retirements.
+ */
+ u32 last_retired_head;
+
spinlock_t irq_lock;
u32 irq_refcount;
u32 irq_mask;
@@ -119,6 +123,12 @@ struct intel_ring_buffer {
void *private;
};
+static inline unsigned
+intel_ring_flag(struct intel_ring_buffer *ring)
+{
+ return 1 << ring->id;
+}
+
static inline u32
intel_ring_sync_index(struct intel_ring_buffer *ring,
struct intel_ring_buffer *other)
@@ -193,6 +203,11 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
+static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
+{
+ return ring->tail;
+}
+
static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
{
if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e334ec33a47..e36b171c1e7 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -148,7 +148,7 @@ struct intel_sdvo_connector {
/* Mark the type of connector */
uint16_t output_flag;
- int force_audio;
+ enum hdmi_force_audio force_audio;
/* This contains all current supported TV format */
u8 tv_format_supported[TV_FORMAT_NUM];
@@ -944,7 +944,6 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd);
- drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
}
@@ -1310,8 +1309,8 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
if (status == connector_status_connected) {
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- if (intel_sdvo_connector->force_audio)
- intel_sdvo->has_hdmi_audio = intel_sdvo_connector->force_audio > 0;
+ if (intel_sdvo_connector->force_audio != HDMI_AUDIO_AUTO)
+ intel_sdvo->has_hdmi_audio = (intel_sdvo_connector->force_audio == HDMI_AUDIO_ON);
}
return status;
@@ -1684,10 +1683,10 @@ intel_sdvo_set_property(struct drm_connector *connector,
intel_sdvo_connector->force_audio = i;
- if (i == 0)
+ if (i == HDMI_AUDIO_AUTO)
has_audio = intel_sdvo_detect_hdmi_audio(connector);
else
- has_audio = i > 0;
+ has_audio = (i == HDMI_AUDIO_ON);
if (has_audio == intel_sdvo->has_hdmi_audio)
return 0;
@@ -1985,7 +1984,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
drm_connector_helper_add(&connector->base.base,
&intel_sdvo_connector_helper_funcs);
- connector->base.base.interlace_allowed = 0;
+ connector->base.base.interlace_allowed = 1;
connector->base.base.doublescan_allowed = 0;
connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -2277,10 +2276,8 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->max_##name = data_value[0]; \
intel_sdvo_connector->cur_##name = response; \
intel_sdvo_connector->name = \
- drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \
+ drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
if (!intel_sdvo_connector->name) return false; \
- intel_sdvo_connector->name->values[0] = 0; \
- intel_sdvo_connector->name->values[1] = data_value[0]; \
drm_connector_attach_property(connector, \
intel_sdvo_connector->name, \
intel_sdvo_connector->cur_##name); \
@@ -2314,25 +2311,19 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->left_margin = data_value[0] - response;
intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin;
intel_sdvo_connector->left =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "left_margin", 2);
+ drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]);
if (!intel_sdvo_connector->left)
return false;
- intel_sdvo_connector->left->values[0] = 0;
- intel_sdvo_connector->left->values[1] = data_value[0];
drm_connector_attach_property(connector,
intel_sdvo_connector->left,
intel_sdvo_connector->left_margin);
intel_sdvo_connector->right =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "right_margin", 2);
+ drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]);
if (!intel_sdvo_connector->right)
return false;
- intel_sdvo_connector->right->values[0] = 0;
- intel_sdvo_connector->right->values[1] = data_value[0];
drm_connector_attach_property(connector,
intel_sdvo_connector->right,
intel_sdvo_connector->right_margin);
@@ -2356,25 +2347,21 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->top_margin = data_value[0] - response;
intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin;
intel_sdvo_connector->top =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "top_margin", 2);
+ drm_property_create_range(dev, 0,
+ "top_margin", 0, data_value[0]);
if (!intel_sdvo_connector->top)
return false;
- intel_sdvo_connector->top->values[0] = 0;
- intel_sdvo_connector->top->values[1] = data_value[0];
drm_connector_attach_property(connector,
intel_sdvo_connector->top,
intel_sdvo_connector->top_margin);
intel_sdvo_connector->bottom =
- drm_property_create(dev, DRM_MODE_PROP_RANGE,
- "bottom_margin", 2);
+ drm_property_create_range(dev, 0,
+ "bottom_margin", 0, data_value[0]);
if (!intel_sdvo_connector->bottom)
return false;
- intel_sdvo_connector->bottom->values[0] = 0;
- intel_sdvo_connector->bottom->values[1] = data_value[0];
drm_connector_attach_property(connector,
intel_sdvo_connector->bottom,
intel_sdvo_connector->bottom_margin);
@@ -2403,12 +2390,10 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->max_dot_crawl = 1;
intel_sdvo_connector->cur_dot_crawl = response & 0x1;
intel_sdvo_connector->dot_crawl =
- drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2);
+ drm_property_create_range(dev, 0, "dot_crawl", 0, 1);
if (!intel_sdvo_connector->dot_crawl)
return false;
- intel_sdvo_connector->dot_crawl->values[0] = 0;
- intel_sdvo_connector->dot_crawl->values[1] = 1;
drm_connector_attach_property(connector,
intel_sdvo_connector->dot_crawl,
intel_sdvo_connector->cur_dot_crawl);
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index a0835040c86..7aa0450399a 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -501,7 +501,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
mutex_lock(&dev->struct_mutex);
}
- i915_gem_object_unpin(old_obj);
+ intel_unpin_fb_obj(old_obj);
}
out_unlock:
@@ -528,7 +528,7 @@ intel_disable_plane(struct drm_plane *plane)
goto out;
mutex_lock(&dev->struct_mutex);
- i915_gem_object_unpin(intel_plane->obj);
+ intel_unpin_fb_obj(intel_plane->obj);
intel_plane->obj = NULL;
mutex_unlock(&dev->struct_mutex);
out:
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 1571be37ce3..05f765ef546 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1240,7 +1240,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
int type;
mode = reported_modes[0];
- drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
+ drm_mode_set_crtcinfo(&mode, 0);
if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
type = intel_tv_detect_type(intel_tv, connector);