summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2011-07-15 20:11:34 -0600
committerGrant Likely <grant.likely@secretlab.ca>2011-07-15 20:11:34 -0600
commit8c11642a50555e584774737f7c296f9aece310cf (patch)
tree1ff8dfaf05479593ef2c50378a68dfc6aec495a5 /drivers/gpu/drm/i915
parent5d10302f46df1d9a85c34ea97f9b6c29e414482e (diff)
parent620917de59eeb934b9f8cf35cc2d95c1ac8ed0fc (diff)
Merge commit 'v3.0-rc7' into devicetree/next
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c16
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c34
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c11
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h35
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c52
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c81
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c16
-rw-r--r--drivers/gpu/drm/i915/intel_display.c308
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c12
11 files changed, 349 insertions, 221 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4d46441cbe2..0a893f7400f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1207,13 +1207,17 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ret)
return ret;
- seq_printf(m, "power context ");
- describe_obj(m, dev_priv->pwrctx);
- seq_printf(m, "\n");
+ if (dev_priv->pwrctx) {
+ seq_printf(m, "power context ");
+ describe_obj(m, dev_priv->pwrctx);
+ seq_printf(m, "\n");
+ }
- seq_printf(m, "render context ");
- describe_obj(m, dev_priv->renderctx);
- seq_printf(m, "\n");
+ if (dev_priv->renderctx) {
+ seq_printf(m, "render context ");
+ describe_obj(m, dev_priv->renderctx);
+ seq_printf(m, "\n");
+ }
mutex_unlock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 0239e9974bf..e1787022d6c 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1266,30 +1266,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_modeset_gem_init(dev);
- if (IS_IVYBRIDGE(dev)) {
- /* Share pre & uninstall handlers with ILK/SNB */
- dev->driver->irq_handler = ivybridge_irq_handler;
- dev->driver->irq_preinstall = ironlake_irq_preinstall;
- dev->driver->irq_postinstall = ivybridge_irq_postinstall;
- dev->driver->irq_uninstall = ironlake_irq_uninstall;
- dev->driver->enable_vblank = ivybridge_enable_vblank;
- dev->driver->disable_vblank = ivybridge_disable_vblank;
- } else if (HAS_PCH_SPLIT(dev)) {
- dev->driver->irq_handler = ironlake_irq_handler;
- dev->driver->irq_preinstall = ironlake_irq_preinstall;
- dev->driver->irq_postinstall = ironlake_irq_postinstall;
- dev->driver->irq_uninstall = ironlake_irq_uninstall;
- dev->driver->enable_vblank = ironlake_enable_vblank;
- dev->driver->disable_vblank = ironlake_disable_vblank;
- } else {
- dev->driver->irq_preinstall = i915_driver_irq_preinstall;
- dev->driver->irq_postinstall = i915_driver_irq_postinstall;
- dev->driver->irq_uninstall = i915_driver_irq_uninstall;
- dev->driver->irq_handler = i915_driver_irq_handler;
- dev->driver->enable_vblank = i915_enable_vblank;
- dev->driver->disable_vblank = i915_disable_vblank;
- }
-
ret = drm_irq_install(dev);
if (ret)
goto cleanup_gem;
@@ -2017,12 +1993,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* enable GEM by default */
dev_priv->has_gem = 1;
- dev->driver->get_vblank_counter = i915_get_vblank_counter;
- dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
- dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
- dev->driver->get_vblank_counter = gm45_get_vblank_counter;
- }
+ intel_irq_init(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
@@ -2182,9 +2153,8 @@ int i915_driver_unload(struct drm_device *dev)
/* Flush any outstanding unpin_work. */
flush_workqueue(dev_priv->wq);
- i915_gem_free_all_phys_object(dev);
-
mutex_lock(&dev->struct_mutex);
+ i915_gem_free_all_phys_object(dev);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
if (I915_HAS_FBC(dev) && i915_powersave)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0defd427059..013d304455b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -579,6 +579,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
} else switch (INTEL_INFO(dev)->gen) {
case 6:
ret = gen6_do_reset(dev, flags);
+ /* If reset with a user forcewake, try to restore */
+ if (atomic_read(&dev_priv->forcewake_count))
+ __gen6_gt_force_wake_get(dev_priv);
break;
case 5:
ret = ironlake_do_reset(dev, flags);
@@ -762,14 +765,6 @@ static struct drm_driver driver = {
.resume = i915_resume,
.device_is_agp = i915_driver_device_is_agp,
- .enable_vblank = i915_enable_vblank,
- .disable_vblank = i915_disable_vblank,
- .get_vblank_timestamp = i915_get_vblank_timestamp,
- .get_scanout_position = i915_get_crtc_scanoutpos,
- .irq_preinstall = i915_driver_irq_preinstall,
- .irq_postinstall = i915_driver_irq_postinstall,
- .irq_uninstall = i915_driver_irq_uninstall,
- .irq_handler = i915_driver_irq_handler,
.reclaim_buffers = drm_core_reclaim_buffers,
.master_create = i915_master_create,
.master_destroy = i915_master_destroy,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f63ee162f12..f245c588ae9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -211,6 +211,9 @@ struct drm_i915_display_funcs {
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
void (*init_pch_clock_gating)(struct drm_device *dev);
+ int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -994,8 +997,6 @@ extern unsigned int i915_enable_fbc;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
-extern void i915_save_display(struct drm_device *dev);
-extern void i915_restore_display(struct drm_device *dev);
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
@@ -1030,33 +1031,12 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
extern int i915_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
-extern void i915_driver_irq_preinstall(struct drm_device * dev);
-extern int i915_driver_irq_postinstall(struct drm_device *dev);
-extern void i915_driver_irq_uninstall(struct drm_device * dev);
-
-extern irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS);
-extern void ironlake_irq_preinstall(struct drm_device *dev);
-extern int ironlake_irq_postinstall(struct drm_device *dev);
-extern void ironlake_irq_uninstall(struct drm_device *dev);
-
-extern irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS);
-extern void ivybridge_irq_preinstall(struct drm_device *dev);
-extern int ivybridge_irq_postinstall(struct drm_device *dev);
-extern void ivybridge_irq_uninstall(struct drm_device *dev);
+extern void intel_irq_init(struct drm_device *dev);
extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int i915_enable_vblank(struct drm_device *dev, int crtc);
-extern void i915_disable_vblank(struct drm_device *dev, int crtc);
-extern int ironlake_enable_vblank(struct drm_device *dev, int crtc);
-extern void ironlake_disable_vblank(struct drm_device *dev, int crtc);
-extern int ivybridge_enable_vblank(struct drm_device *dev, int crtc);
-extern void ivybridge_disable_vblank(struct drm_device *dev, int crtc);
-extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
-extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
extern int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1067,13 +1047,6 @@ void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
void intel_enable_asle (struct drm_device *dev);
-int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
- int *max_error,
- struct timeval *vblank_time,
- unsigned flags);
-
-int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
- int *vpos, int *hpos);
#ifdef CONFIG_DEBUG_FS
extern void i915_destroy_error_state(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 94c84d74410..5c0d1247f45 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -31,6 +31,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
@@ -359,8 +360,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page))
return PTR_ERR(page);
@@ -463,8 +463,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
@@ -797,8 +796,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page))
return PTR_ERR(page);
@@ -907,8 +905,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
@@ -1219,11 +1216,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = i915_gem_object_bind_to_gtt(obj, 0, true);
if (ret)
goto unlock;
- }
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
- if (ret)
- goto unlock;
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret)
+ goto unlock;
+ }
if (obj->tiling_mode == I915_TILING_NONE)
ret = i915_gem_object_put_fence(obj);
@@ -1558,12 +1555,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
inode = obj->base.filp->f_path.dentry->d_inode;
mapping = inode->i_mapping;
+ gfpmask |= mapping_gfp_mask(mapping);
+
for (i = 0; i < page_count; i++) {
- page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER |
- __GFP_COLD |
- __GFP_RECLAIMABLE |
- gfpmask);
+ page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
if (IS_ERR(page))
goto err_pages;
@@ -1701,13 +1696,10 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
/* Our goal here is to return as much of the memory as
* is possible back to the system as we are called from OOM.
* To do this we must instruct the shmfs to drop all of its
- * backing pages, *now*. Here we mirror the actions taken
- * when by shmem_delete_inode() to release the backing store.
+ * backing pages, *now*.
*/
inode = obj->base.filp->f_path.dentry->d_inode;
- truncate_inode_pages(inode->i_mapping, 0);
- if (inode->i_op->truncate_range)
- inode->i_op->truncate_range(inode, 0, (loff_t)-1);
+ shmem_truncate_range(inode, 0, (loff_t)-1);
obj->madv = __I915_MADV_PURGED;
}
@@ -2080,8 +2072,8 @@ i915_wait_request(struct intel_ring_buffer *ring,
if (!ier) {
DRM_ERROR("something (likely vbetool) disabled "
"interrupts, re-enabling\n");
- i915_driver_irq_preinstall(ring->dev);
- i915_driver_irq_postinstall(ring->dev);
+ ring->dev->driver->irq_preinstall(ring->dev);
+ ring->dev->driver->irq_postinstall(ring->dev);
}
trace_i915_gem_request_wait_begin(ring, seqno);
@@ -2926,8 +2918,6 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
*/
wmb();
- i915_gem_release_mmap(obj);
-
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
@@ -3567,6 +3557,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
+ struct address_space *mapping;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (obj == NULL)
@@ -3577,6 +3568,9 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
return NULL;
}
+ mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+
i915_gem_info_add_obj(dev_priv, size);
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
@@ -3952,8 +3946,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
page_count = obj->base.size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- struct page *page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ struct page *page = shmem_read_mapping_page(mapping, i);
if (!IS_ERR(page)) {
char *dst = kmap_atomic(page);
memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
@@ -4014,8 +4007,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
struct page *page;
char *dst, *src;
- page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
return PTR_ERR(page);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 20a4cc5b818..4934cf84c32 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -187,10 +187,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
- /* blow away mappings if mapped through GTT */
- if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
- i915_gem_release_mmap(obj);
-
if (obj->base.pending_write_domain)
cd->flips |= atomic_read(&obj->pending_flip);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 9e34a1abeb6..3b03f85ea62 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -152,7 +152,7 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
-u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
+static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long high_frame;
@@ -184,7 +184,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
return (high1 << 8) | low;
}
-u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
+static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int reg = PIPE_FRMCOUNT_GM45(pipe);
@@ -198,7 +198,7 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
return I915_READ(reg);
}
-int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
int *vpos, int *hpos)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -264,7 +264,7 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
return ret;
}
-int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
+static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
int *max_error,
struct timeval *vblank_time,
unsigned flags)
@@ -462,7 +462,7 @@ static void pch_irq_handler(struct drm_device *dev)
DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
}
-irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -550,7 +550,7 @@ done:
return ret;
}
-irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1209,7 +1209,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
}
}
-irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1454,7 +1454,7 @@ int i915_irq_wait(struct drm_device *dev, void *data,
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
-int i915_enable_vblank(struct drm_device *dev, int pipe)
+static int i915_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -1478,7 +1478,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
return 0;
}
-int ironlake_enable_vblank(struct drm_device *dev, int pipe)
+static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -1494,7 +1494,7 @@ int ironlake_enable_vblank(struct drm_device *dev, int pipe)
return 0;
}
-int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
+static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -1513,7 +1513,7 @@ int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
-void i915_disable_vblank(struct drm_device *dev, int pipe)
+static void i915_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -1529,7 +1529,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-void ironlake_disable_vblank(struct drm_device *dev, int pipe)
+static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -1540,7 +1540,7 @@ void ironlake_disable_vblank(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
+static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -1728,7 +1728,7 @@ repeat:
/* drm_dma.h hooks
*/
-void ironlake_irq_preinstall(struct drm_device *dev)
+static void ironlake_irq_preinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1740,7 +1740,7 @@ void ironlake_irq_preinstall(struct drm_device *dev)
INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
I915_WRITE(HWSTAM, 0xeffe);
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
/* Workaround stalls observed on Sandy Bridge GPUs by
* making the blitter command streamer generate a
* write to the Hardware Status Page for
@@ -1749,6 +1749,7 @@ void ironlake_irq_preinstall(struct drm_device *dev)
* happens.
*/
I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
+ I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT);
}
/* XXX hotplug from PCH */
@@ -1768,7 +1769,7 @@ void ironlake_irq_preinstall(struct drm_device *dev)
POSTING_READ(SDEIER);
}
-int ironlake_irq_postinstall(struct drm_device *dev)
+static int ironlake_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
@@ -1840,7 +1841,7 @@ int ironlake_irq_postinstall(struct drm_device *dev)
return 0;
}
-int ivybridge_irq_postinstall(struct drm_device *dev)
+static int ivybridge_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
@@ -1890,7 +1891,7 @@ int ivybridge_irq_postinstall(struct drm_device *dev)
return 0;
}
-void i915_driver_irq_preinstall(struct drm_device * dev)
+static void i915_driver_irq_preinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
@@ -1917,7 +1918,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
* Must be called after intel_modeset_init or hotplug interrupts won't be
* enabled correctly.
*/
-int i915_driver_irq_postinstall(struct drm_device *dev)
+static int i915_driver_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
@@ -1993,7 +1994,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
return 0;
}
-void ironlake_irq_uninstall(struct drm_device *dev)
+static void ironlake_irq_uninstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2013,7 +2014,7 @@ void ironlake_irq_uninstall(struct drm_device *dev)
I915_WRITE(GTIIR, I915_READ(GTIIR));
}
-void i915_driver_irq_uninstall(struct drm_device * dev)
+static void i915_driver_irq_uninstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
@@ -2039,3 +2040,41 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
I915_WRITE(IIR, I915_READ(IIR));
}
+
+void intel_irq_init(struct drm_device *dev)
+{
+ dev->driver->get_vblank_counter = i915_get_vblank_counter;
+ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+ if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+ dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
+ dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+ }
+
+
+ dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
+ dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+
+ if (IS_IVYBRIDGE(dev)) {
+ /* Share pre & uninstall handlers with ILK/SNB */
+ dev->driver->irq_handler = ivybridge_irq_handler;
+ dev->driver->irq_preinstall = ironlake_irq_preinstall;
+ dev->driver->irq_postinstall = ivybridge_irq_postinstall;
+ dev->driver->irq_uninstall = ironlake_irq_uninstall;
+ dev->driver->enable_vblank = ivybridge_enable_vblank;
+ dev->driver->disable_vblank = ivybridge_disable_vblank;
+ } else if (HAS_PCH_SPLIT(dev)) {
+ dev->driver->irq_handler = ironlake_irq_handler;
+ dev->driver->irq_preinstall = ironlake_irq_preinstall;
+ dev->driver->irq_postinstall = ironlake_irq_postinstall;
+ dev->driver->irq_uninstall = ironlake_irq_uninstall;
+ dev->driver->enable_vblank = ironlake_enable_vblank;
+ dev->driver->disable_vblank = ironlake_disable_vblank;
+ } else {
+ dev->driver->irq_preinstall = i915_driver_irq_preinstall;
+ dev->driver->irq_postinstall = i915_driver_irq_postinstall;
+ dev->driver->irq_uninstall = i915_driver_irq_uninstall;
+ dev->driver->irq_handler = i915_driver_irq_handler;
+ dev->driver->enable_vblank = i915_enable_vblank;
+ dev->driver->disable_vblank = i915_disable_vblank;
+ }
+}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2f967af8e62..5d5def756c9 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -531,6 +531,7 @@
#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0
#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
+#define GEN6_BSD_HWSTAM 0x12098
#define GEN6_BSD_IMR 0x120a8
#define GEN6_BSD_USER_INTERRUPT (1 << 12)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 60a94d2b526..5257cfc34c3 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -597,7 +597,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
return;
}
-void i915_save_display(struct drm_device *dev)
+static void i915_save_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -689,7 +689,7 @@ void i915_save_display(struct drm_device *dev)
i915_save_vga(dev);
}
-void i915_restore_display(struct drm_device *dev)
+static void i915_restore_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -780,6 +780,7 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
else
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+
I915_WRITE(VGA0, dev_priv->saveVGA0);
I915_WRITE(VGA1, dev_priv->saveVGA1);
I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
@@ -796,6 +797,8 @@ int i915_save_state(struct drm_device *dev)
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
+ mutex_lock(&dev->struct_mutex);
+
/* Hardware status page */
dev_priv->saveHWS = I915_READ(HWS_PGA);
@@ -835,6 +838,8 @@ int i915_save_state(struct drm_device *dev)
for (i = 0; i < 3; i++)
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
@@ -845,6 +850,8 @@ int i915_restore_state(struct drm_device *dev)
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
+ mutex_lock(&dev->struct_mutex);
+
/* Hardware status page */
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
@@ -862,6 +869,7 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(IER, dev_priv->saveIER);
I915_WRITE(IMR, dev_priv->saveIMR);
}
+ mutex_unlock(&dev->struct_mutex);
intel_init_clock_gating(dev);
@@ -873,6 +881,8 @@ int i915_restore_state(struct drm_device *dev)
if (IS_GEN6(dev))
gen6_enable_rps(dev_priv);
+ mutex_lock(&dev->struct_mutex);
+
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
@@ -886,6 +896,8 @@ int i915_restore_state(struct drm_device *dev)
for (i = 0; i < 3; i++)
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+ mutex_unlock(&dev->struct_mutex);
+
intel_i2c_reset(dev);
return 0;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 81a9059b6a9..21b6f93fe91 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4687,6 +4687,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
+ intel_enable_plane(dev_priv, plane, pipe);
ret = intel_pipe_set_base(crtc, x, y, old_fb);
@@ -5217,8 +5218,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
- if (!HAS_PCH_SPLIT(dev))
- intel_enable_plane(dev_priv, plane, pipe);
ret = intel_pipe_set_base(crtc, x, y, old_fb);
@@ -6262,6 +6261,197 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+static int intel_gen2_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long offset;
+ u32 flip_mask;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+ goto out;
+
+ /* Offset into the new buffer for cases of shared fbs between CRTCs */
+ offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+
+ ret = BEGIN_LP_RING(6);
+ if (ret)
+ goto out;
+
+ /* Can't queue multiple flips, so wait for the previous
+ * one to finish before executing the next.
+ */
+ if (intel_crtc->plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+ OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch);
+ OUT_RING(obj->gtt_offset + offset);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+out:
+ return ret;
+}
+
+static int intel_gen3_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long offset;
+ u32 flip_mask;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+ goto out;
+
+ /* Offset into the new buffer for cases of shared fbs between CRTCs */
+ offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+
+ ret = BEGIN_LP_RING(6);
+ if (ret)
+ goto out;
+
+ if (intel_crtc->plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+ OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_DISPLAY_FLIP_I915 |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch);
+ OUT_RING(obj->gtt_offset + offset);
+ OUT_RING(MI_NOOP);
+
+ ADVANCE_LP_RING();
+out:
+ return ret;
+}
+
+static int intel_gen4_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t pf, pipesrc;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+ goto out;
+
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ goto out;
+
+ /* i965+ uses the linear or tiled offsets from the
+ * Display Registers (which do not change across a page-flip)
+ * so we need only reprogram the base address.
+ */
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch);
+ OUT_RING(obj->gtt_offset | obj->tiling_mode);
+
+ /* XXX Enabling the panel-fitter across page-flip is so far
+ * untested on non-native modes, so ignore it for now.
+ * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
+ */
+ pf = 0;
+ pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ OUT_RING(pf | pipesrc);
+ ADVANCE_LP_RING();
+out:
+ return ret;
+}
+
+static int intel_gen6_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t pf, pipesrc;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+ goto out;
+
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ goto out;
+
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch | obj->tiling_mode);
+ OUT_RING(obj->gtt_offset);
+
+ pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
+ pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ OUT_RING(pf | pipesrc);
+ ADVANCE_LP_RING();
+out:
+ return ret;
+}
+
+/*
+ * On gen7 we currently use the blit ring because (in early silicon at least)
+ * the render ring doesn't give us interrpts for page flip completion, which
+ * means clients will hang after the first flip is queued. Fortunately the
+ * blit ring generates interrupts properly, so use it instead.
+ */
+static int intel_gen7_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+ if (ret)
+ goto out;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ goto out;
+
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
+ intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
+ intel_ring_emit(ring, (obj->gtt_offset));
+ intel_ring_emit(ring, (MI_NOOP));
+ intel_ring_advance(ring);
+out:
+ return ret;
+}
+
+static int intel_default_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ return -ENODEV;
+}
+
static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event)
@@ -6272,9 +6462,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_i915_gem_object *obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
- unsigned long flags, offset;
- int pipe = intel_crtc->pipe;
- u32 pf, pipesrc;
+ unsigned long flags;
int ret;
work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -6303,9 +6491,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
obj = intel_fb->obj;
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
- if (ret)
- goto cleanup_work;
/* Reference the objects for the scheduled work. */
drm_gem_object_reference(&work->old_fb_obj->base);
@@ -6317,91 +6502,18 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (ret)
goto cleanup_objs;
- if (IS_GEN3(dev) || IS_GEN2(dev)) {
- u32 flip_mask;
-
- /* Can't queue multiple flips, so wait for the previous
- * one to finish before executing the next.
- */
- ret = BEGIN_LP_RING(2);
- if (ret)
- goto cleanup_objs;
-
- if (intel_crtc->plane)
- flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
- else
- flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
- }
-
work->pending_flip_obj = obj;
work->enable_stall_check = true;
- /* Offset into the new buffer for cases of shared fbs between CRTCs */
- offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
-
- ret = BEGIN_LP_RING(4);
- if (ret)
- goto cleanup_objs;
-
/* Block clients from rendering to the new back buffer until
* the flip occurs and the object is no longer visible.
*/
atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
- switch (INTEL_INFO(dev)->gen) {
- case 2:
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
- OUT_RING(obj->gtt_offset + offset);
- OUT_RING(MI_NOOP);
- break;
-
- case 3:
- OUT_RING(MI_DISPLAY_FLIP_I915 |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
- OUT_RING(obj->gtt_offset + offset);
- OUT_RING(MI_NOOP);
- break;
-
- case 4:
- case 5:
- /* i965+ uses the linear or tiled offsets from the
- * Display Registers (which do not change across a page-flip)
- * so we need only reprogram the base address.
- */
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
- OUT_RING(obj->gtt_offset | obj->tiling_mode);
-
- /* XXX Enabling the panel-fitter across page-flip is so far
- * untested on non-native modes, so ignore it for now.
- * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
- */
- pf = 0;
- pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
- OUT_RING(pf | pipesrc);
- break;
-
- case 6:
- case 7:
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch | obj->tiling_mode);
- OUT_RING(obj->gtt_offset);
-
- pf = I915_READ(PF_CTL(pipe)) & PF_ENABLE;
- pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
- OUT_RING(pf | pipesrc);
- break;
- }
- ADVANCE_LP_RING();
+ ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
+ if (ret)
+ goto cleanup_pending;
mutex_unlock(&dev->struct_mutex);
@@ -6409,10 +6521,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
+cleanup_pending:
+ atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
cleanup_objs:
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
-cleanup_work:
mutex_unlock(&dev->struct_mutex);
spin_lock_irqsave(&dev->event_lock, flags);
@@ -7657,6 +7770,31 @@ static void intel_init_display(struct drm_device *dev)
else
dev_priv->display.get_fifo_size = i830_get_fifo_size;
}
+
+ /* Default just returns -ENODEV to indicate unsupported */
+ dev_priv->display.queue_flip = intel_default_queue_flip;
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 2:
+ dev_priv->display.queue_flip = intel_gen2_queue_flip;
+ break;
+
+ case 3:
+ dev_priv->display.queue_flip = intel_gen3_queue_flip;
+ break;
+
+ case 4:
+ case 5:
+ dev_priv->display.queue_flip = intel_gen4_queue_flip;
+ break;
+
+ case 6:
+ dev_priv->display.queue_flip = intel_gen6_queue_flip;
+ break;
+ case 7:
+ dev_priv->display.queue_flip = intel_gen7_queue_flip;
+ break;
+ }
}
/*
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a670c006982..9e2959bc91c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1409,6 +1409,11 @@ void intel_setup_overlay(struct drm_device *dev)
overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
if (!overlay)
return;
+
+ mutex_lock(&dev->struct_mutex);
+ if (WARN_ON(dev_priv->overlay))
+ goto out_free;
+
overlay->dev = dev;
reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
@@ -1448,7 +1453,7 @@ void intel_setup_overlay(struct drm_device *dev)
regs = intel_overlay_map_regs(overlay);
if (!regs)
- goto out_free_bo;
+ goto out_unpin_bo;
memset(regs, 0, sizeof(struct overlay_registers));
update_polyphase_filter(regs);
@@ -1457,14 +1462,17 @@ void intel_setup_overlay(struct drm_device *dev)
intel_overlay_unmap_regs(overlay, regs);
dev_priv->overlay = overlay;
+ mutex_unlock(&dev->struct_mutex);
DRM_INFO("initialized overlay support\n");
return;
out_unpin_bo:
- i915_gem_object_unpin(reg_bo);
+ if (!OVERLAY_NEEDS_PHYSICAL(dev))
+ i915_gem_object_unpin(reg_bo);
out_free_bo:
drm_gem_object_unreference(&reg_bo->base);
out_free:
+ mutex_unlock(&dev->struct_mutex);
kfree(overlay);
return;
}