summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h22
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c196
-rw-r--r--drivers/gpu/drm/i915/i915_gem_proc.c15
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c277
6 files changed, 292 insertions, 227 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 5ba78e4fd2b..d8fb5d8ee7e 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -3,13 +3,14 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm
-i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o \
+i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
i915_suspend.o \
i915_gem.o \
i915_gem_debug.o \
i915_gem_proc.o \
i915_gem_tiling.o
+i915-$(CONFIG_ACPI) += i915_opregion.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
obj-$(CONFIG_DRM_I915) += i915.o
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index db34780edbb..256e22963ae 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -844,8 +844,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
* correctly in testing on 945G.
* This may be a side effect of MSI having been made available for PEG
* and the registers being closely associated.
+ *
+ * According to chipset errata, on the 965GM, MSI interrupts may
+ * be lost or delayed
*/
- if (!IS_I945G(dev) && !IS_I945GM(dev))
+ if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev))
if (pci_enable_msi(dev->pdev))
DRM_ERROR("failed to enable MSI\n");
@@ -957,6 +960,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index eae4ed3956e..572dcd0e3e0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -31,6 +31,7 @@
#define _I915_DRV_H_
#include "i915_reg.h"
+#include <linux/io-mapping.h>
/* General customization:
*/
@@ -90,7 +91,7 @@ struct mem_block {
typedef struct _drm_i915_vbl_swap {
struct list_head head;
drm_drawable_t drw_id;
- unsigned int plane;
+ unsigned int pipe;
unsigned int sequence;
} drm_i915_vbl_swap_t;
@@ -240,9 +241,14 @@ typedef struct drm_i915_private {
u8 saveDACDATA[256*3]; /* 256 3-byte colors */
u8 saveCR[37];
+ /** Work task for vblank-related ring access */
+ struct work_struct vblank_work;
+
struct {
struct drm_mm gtt_space;
+ struct io_mapping *gtt_mapping;
+
/**
* List of objects currently involved in rendering from the
* ringbuffer.
@@ -285,9 +291,6 @@ typedef struct drm_i915_private {
*/
struct delayed_work retire_work;
- /** Work task for vblank-related ring access */
- struct work_struct vblank_work;
-
uint32_t next_gem_seqno;
/**
@@ -441,7 +444,7 @@ extern int i915_irq_wait(struct drm_device *dev, void *data,
void i915_user_irq_get(struct drm_device *dev);
void i915_user_irq_put(struct drm_device *dev);
-extern void i915_gem_vblank_work_handler(struct work_struct *work);
+extern void i915_vblank_work_handler(struct work_struct *work);
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
extern void i915_driver_irq_preinstall(struct drm_device * dev);
extern int i915_driver_irq_postinstall(struct drm_device *dev);
@@ -502,6 +505,8 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_proc_init(struct drm_minor *minor);
void i915_gem_proc_cleanup(struct drm_minor *minor);
@@ -539,11 +544,18 @@ extern int i915_restore_state(struct drm_device *dev);
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
+#ifdef CONFIG_ACPI
/* i915_opregion.c */
extern int intel_opregion_init(struct drm_device *dev);
extern void intel_opregion_free(struct drm_device *dev);
extern void opregion_asle_intr(struct drm_device *dev);
extern void opregion_enable_asle(struct drm_device *dev);
+#else
+static inline int intel_opregion_init(struct drm_device *dev) { return 0; }
+static inline void intel_opregion_free(struct drm_device *dev) { return; }
+static inline void opregion_asle_intr(struct drm_device *dev) { return; }
+static inline void opregion_enable_asle(struct drm_device *dev) { return; }
+#endif
/**
* Lock test for when it's just for synchronization of ring access.
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9ac73dd1b42..b0ec73fa6a9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -79,6 +79,28 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
return 0;
}
+int
+i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_get_aperture *args = data;
+ struct drm_i915_gem_object *obj_priv;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ args->aper_size = dev->gtt_total;
+ args->aper_available_size = args->aper_size;
+
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+ if (obj_priv->pin_count > 0)
+ args->aper_available_size -= obj_priv->obj->size;
+ }
+
+ return 0;
+}
+
/**
* Creates a new mm object and returns a handle to it.
@@ -171,21 +193,64 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
return 0;
}
+/* This is the fast write path which cannot handle
+ * page faults in the source data
+ */
+
+static inline int
+fast_user_write(struct io_mapping *mapping,
+ loff_t page_base, int page_offset,
+ char __user *user_data,
+ int length)
+{
+ char *vaddr_atomic;
+ unsigned long unwritten;
+
+ vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
+ unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
+ user_data, length);
+ io_mapping_unmap_atomic(vaddr_atomic);
+ if (unwritten)
+ return -EFAULT;
+ return 0;
+}
+
+/* Here's the write path which can sleep for
+ * page faults
+ */
+
+static inline int
+slow_user_write(struct io_mapping *mapping,
+ loff_t page_base, int page_offset,
+ char __user *user_data,
+ int length)
+{
+ char __iomem *vaddr;
+ unsigned long unwritten;
+
+ vaddr = io_mapping_map_wc(mapping, page_base);
+ if (vaddr == NULL)
+ return -EFAULT;
+ unwritten = __copy_from_user(vaddr + page_offset,
+ user_data, length);
+ io_mapping_unmap(vaddr);
+ if (unwritten)
+ return -EFAULT;
+ return 0;
+}
+
static int
i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
ssize_t remain;
- loff_t offset;
+ loff_t offset, page_base;
char __user *user_data;
- char __iomem *vaddr;
- char *vaddr_atomic;
- int i, o, l;
- int ret = 0;
- unsigned long pfn;
- unsigned long unwritten;
+ int page_offset, page_length;
+ int ret;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
@@ -211,67 +276,35 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
while (remain > 0) {
/* Operation in this page
*
- * i = page number
- * o = offset within page
- * l = bytes to copy
+ * page_base = page offset within aperture
+ * page_offset = offset within page
+ * page_length = bytes to copy for this page
*/
- i = offset >> PAGE_SHIFT;
- o = offset & (PAGE_SIZE-1);
- l = remain;
- if ((o + l) > PAGE_SIZE)
- l = PAGE_SIZE - o;
-
- pfn = (dev->agp->base >> PAGE_SHIFT) + i;
-
-#ifdef CONFIG_HIGHMEM
- /* This is a workaround for the low performance of iounmap
- * (approximate 10% cpu cost on normal 3D workloads).
- * kmap_atomic on HIGHMEM kernels happens to let us map card
- * memory without taking IPIs. When the vmap rework lands
- * we should be able to dump this hack.
+ page_base = (offset & ~(PAGE_SIZE-1));
+ page_offset = offset & (PAGE_SIZE-1);
+ page_length = remain;
+ if ((page_offset + remain) > PAGE_SIZE)
+ page_length = PAGE_SIZE - page_offset;
+
+ ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
+ page_offset, user_data, page_length);
+
+ /* If we get a fault while copying data, then (presumably) our
+ * source page isn't available. In this case, use the
+ * non-atomic function
*/
- vaddr_atomic = kmap_atomic_pfn(pfn, KM_USER0);
-#if WATCH_PWRITE
- DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
- i, o, l, pfn, vaddr_atomic);
-#endif
- unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + o,
- user_data, l);
- kunmap_atomic(vaddr_atomic, KM_USER0);
-
- if (unwritten)
-#endif /* CONFIG_HIGHMEM */
- {
- vaddr = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
-#if WATCH_PWRITE
- DRM_INFO("pwrite slow i %d o %d l %d "
- "pfn %ld vaddr %p\n",
- i, o, l, pfn, vaddr);
-#endif
- if (vaddr == NULL) {
- ret = -EFAULT;
- goto fail;
- }
- unwritten = __copy_from_user(vaddr + o, user_data, l);
-#if WATCH_PWRITE
- DRM_INFO("unwritten %ld\n", unwritten);
-#endif
- iounmap(vaddr);
- if (unwritten) {
- ret = -EFAULT;
+ if (ret) {
+ ret = slow_user_write (dev_priv->mm.gtt_mapping,
+ page_base, page_offset,
+ user_data, page_length);
+ if (ret)
goto fail;
- }
}
- remain -= l;
- user_data += l;
- offset += l;
+ remain -= page_length;
+ user_data += page_length;
+ offset += page_length;
}
-#if WATCH_PWRITE && 1
- i915_gem_clflush_object(obj);
- i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
- i915_gem_clflush_object(obj);
-#endif
fail:
i915_gem_object_unpin(obj);
@@ -1489,12 +1522,12 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
struct drm_i915_gem_exec_object *entry)
{
struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_relocation_entry reloc;
struct drm_i915_gem_relocation_entry __user *relocs;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int i, ret;
- uint32_t last_reloc_offset = -1;
- void __iomem *reloc_page = NULL;
+ void __iomem *reloc_page;
/* Choose the GTT offset for our buffer and put it there. */
ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
@@ -1617,26 +1650,11 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
* perform.
*/
reloc_offset = obj_priv->gtt_offset + reloc.offset;
- if (reloc_page == NULL ||
- (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
- (reloc_offset & ~(PAGE_SIZE - 1))) {
- if (reloc_page != NULL)
- iounmap(reloc_page);
-
- reloc_page = ioremap_wc(dev->agp->base +
- (reloc_offset &
- ~(PAGE_SIZE - 1)),
- PAGE_SIZE);
- last_reloc_offset = reloc_offset;
- if (reloc_page == NULL) {
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -ENOMEM;
- }
- }
-
+ reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ (reloc_offset &
+ ~(PAGE_SIZE - 1)));
reloc_entry = (uint32_t __iomem *)(reloc_page +
- (reloc_offset & (PAGE_SIZE - 1)));
+ (reloc_offset & (PAGE_SIZE - 1)));
reloc_val = target_obj_priv->gtt_offset + reloc.delta;
#if WATCH_BUF
@@ -1645,6 +1663,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
readl(reloc_entry), reloc_val);
#endif
writel(reloc_val, reloc_entry);
+ io_mapping_unmap_atomic(reloc_page);
/* Write the updated presumed offset for this entry back out
* to the user.
@@ -1660,9 +1679,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
drm_gem_object_unreference(target_obj);
}
- if (reloc_page != NULL)
- iounmap(reloc_page);
-
#if WATCH_BUF
if (0)
i915_gem_dump_object(obj, 128, __func__, ~0);
@@ -2504,6 +2520,10 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
if (ret != 0)
return ret;
+ dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base,
+ dev->agp->agp_info.aper_size
+ * 1024 * 1024);
+
mutex_lock(&dev->struct_mutex);
BUG_ON(!list_empty(&dev_priv->mm.active_list));
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
@@ -2521,11 +2541,13 @@ int
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
ret = i915_gem_idle(dev);
drm_irq_uninstall(dev);
+ io_mapping_free(dev_priv->mm.gtt_mapping);
return ret;
}
@@ -2550,8 +2572,6 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.request_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
- INIT_WORK(&dev_priv->mm.vblank_work,
- i915_gem_vblank_work_handler);
dev_priv->mm.next_gem_seqno = 1;
i915_gem_detect_bit_6_swizzle(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
index 15d4160415b..93de15b4c9a 100644
--- a/drivers/gpu/drm/i915/i915_gem_proc.c
+++ b/drivers/gpu/drm/i915/i915_gem_proc.c
@@ -192,7 +192,12 @@ static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
*start = &buf[offset];
*eof = 0;
- DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
+ if (dev_priv->hw_status_page != NULL) {
+ DRM_PROC_PRINT("Current sequence: %d\n",
+ i915_get_gem_seqno(dev));
+ } else {
+ DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
+ }
DRM_PROC_PRINT("Waiter sequence: %d\n",
dev_priv->mm.waiting_gem_seqno);
DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
@@ -230,8 +235,12 @@ static int i915_interrupt_info(char *buf, char **start, off_t offset,
I915_READ(PIPEBSTAT));
DRM_PROC_PRINT("Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
- DRM_PROC_PRINT("Current sequence: %d\n",
- i915_get_gem_seqno(dev));
+ if (dev_priv->hw_status_page != NULL) {
+ DRM_PROC_PRINT("Current sequence: %d\n",
+ i915_get_gem_seqno(dev));
+ } else {
+ DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
+ }
DRM_PROC_PRINT("Waiter sequence: %d\n",
dev_priv->mm.waiting_gem_seqno);
DRM_PROC_PRINT("IRQ sequence: %d\n",
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index baae511c785..26f48932a51 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -60,43 +60,6 @@ i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
}
/**
- * i915_get_pipe - return the the pipe associated with a given plane
- * @dev: DRM device
- * @plane: plane to look for
- *
- * The Intel Mesa & 2D drivers call the vblank routines with a plane number
- * rather than a pipe number, since they may not always be equal. This routine
- * maps the given @plane back to a pipe number.
- */
-static int
-i915_get_pipe(struct drm_device *dev, int plane)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 dspcntr;
-
- dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
-
- return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
-}
-
-/**
- * i915_get_plane - return the the plane associated with a given pipe
- * @dev: DRM device
- * @pipe: pipe to look for
- *
- * The Intel Mesa & 2D drivers call the vblank routines with a plane number
- * rather than a plane number, since they may not always be equal. This routine
- * maps the given @pipe back to a plane number.
- */
-static int
-i915_get_plane(struct drm_device *dev, int pipe)
-{
- if (i915_get_pipe(dev, 0) == pipe)
- return 0;
- return 1;
-}
-
-/**
* i915_pipe_enabled - check if a pipe is enabled
* @dev: DRM device
* @pipe: pipe to check
@@ -121,6 +84,9 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
* Emit blits for scheduled buffer swaps.
*
* This function will be called with the HW lock held.
+ * Because this function must grab the ring mutex (dev->struct_mutex),
+ * it can no longer run at soft irq time. We'll fix this when we do
+ * the DRI2 swap buffer work.
*/
static void i915_vblank_tasklet(struct drm_device *dev)
{
@@ -141,6 +107,8 @@ static void i915_vblank_tasklet(struct drm_device *dev)
u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
RING_LOCALS;
+ mutex_lock(&dev->struct_mutex);
+
if (IS_I965G(dev) && sarea_priv->front_tiled) {
cmd |= XY_SRC_COPY_BLT_DST_TILED;
dst_pitch >>= 2;
@@ -165,7 +133,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
drm_i915_vbl_swap_t *vbl_swap =
list_entry(list, drm_i915_vbl_swap_t, head);
- int pipe = i915_get_pipe(dev, vbl_swap->plane);
+ int pipe = vbl_swap->pipe;
if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
continue;
@@ -179,20 +147,19 @@ static void i915_vblank_tasklet(struct drm_device *dev)
drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
- if (!drw) {
- spin_unlock(&dev->drw_lock);
- drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
- spin_lock(&dev_priv->swaps_lock);
- continue;
- }
-
list_for_each(hit, &hits) {
drm_i915_vbl_swap_t *swap_cmp =
list_entry(hit, drm_i915_vbl_swap_t, head);
struct drm_drawable_info *drw_cmp =
drm_get_drawable_info(dev, swap_cmp->drw_id);
- if (drw_cmp &&
+ /* Make sure both drawables are still
+ * around and have some rectangles before
+ * we look inside to order them for the
+ * blts below.
+ */
+ if (drw_cmp && drw_cmp->num_rects > 0 &&
+ drw && drw->num_rects > 0 &&
drw_cmp->rects[0].y1 > drw->rects[0].y1) {
list_add_tail(list, hit);
break;
@@ -212,6 +179,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
if (nhits == 0) {
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
+ mutex_unlock(&dev->struct_mutex);
return;
}
@@ -265,18 +233,21 @@ static void i915_vblank_tasklet(struct drm_device *dev)
drm_i915_vbl_swap_t *swap_hit =
list_entry(hit, drm_i915_vbl_swap_t, head);
struct drm_clip_rect *rect;
- int num_rects, plane;
+ int num_rects, pipe;
unsigned short top, bottom;
drw = drm_get_drawable_info(dev, swap_hit->drw_id);
+ /* The drawable may have been destroyed since
+ * the vblank swap was queued
+ */
if (!drw)
continue;
rect = drw->rects;
- plane = swap_hit->plane;
- top = upper[plane];
- bottom = lower[plane];
+ pipe = swap_hit->pipe;
+ top = upper[pipe];
+ bottom = lower[pipe];
for (num_rects = drw->num_rects; num_rects--; rect++) {
int y1 = max(rect->y1, top);
@@ -302,6 +273,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
}
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+ mutex_unlock(&dev->struct_mutex);
list_for_each_safe(hit, tmp, &hits) {
drm_i915_vbl_swap_t *swap_hit =
@@ -313,15 +285,16 @@ static void i915_vblank_tasklet(struct drm_device *dev)
}
}
-u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
+/* Called from drm generic code, passed a 'crtc', which
+ * we use as a pipe index
+ */
+u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long high_frame;
unsigned long low_frame;
u32 high1, high2, low, count;
- int pipe;
- pipe = i915_get_pipe(dev, plane);
high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
@@ -350,18 +323,37 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
}
void
-i915_gem_vblank_work_handler(struct work_struct *work)
+i915_vblank_work_handler(struct work_struct *work)
{
- drm_i915_private_t *dev_priv;
- struct drm_device *dev;
+ drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+ vblank_work);
+ struct drm_device *dev = dev_priv->dev;
+ unsigned long irqflags;
- dev_priv = container_of(work, drm_i915_private_t,
- mm.vblank_work);
- dev = dev_priv->dev;
+ if (dev->lock.hw_lock == NULL) {
+ i915_vblank_tasklet(dev);
+ return;
+ }
+
+ spin_lock_irqsave(&dev->tasklet_lock, irqflags);
+ dev->locked_tasklet_func = i915_vblank_tasklet;
+ spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+
+ /* Try to get the lock now, if this fails, the lock
+ * holder will execute the tasklet during unlock
+ */
+ if (!drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT))
+ return;
+
+ dev->lock.lock_time = jiffies;
+ atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
+
+ spin_lock_irqsave(&dev->tasklet_lock, irqflags);
+ dev->locked_tasklet_func = NULL;
+ spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
- mutex_lock(&dev->struct_mutex);
i915_vblank_tasklet(dev);
- mutex_unlock(&dev->struct_mutex);
+ drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
}
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
@@ -398,7 +390,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
PIPE_VBLANK_INTERRUPT_STATUS)) {
vblank++;
- drm_handle_vblank(dev, i915_get_plane(dev, 0));
+ drm_handle_vblank(dev, 0);
}
I915_WRITE(PIPEASTAT, pipea_stats);
@@ -416,7 +408,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
PIPE_VBLANK_INTERRUPT_STATUS)) {
vblank++;
- drm_handle_vblank(dev, i915_get_plane(dev, 1));
+ drm_handle_vblank(dev, 1);
}
if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS)
@@ -441,12 +433,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
if (iir & I915_ASLE_INTERRUPT)
opregion_asle_intr(dev);
- if (vblank && dev_priv->swaps_pending > 0) {
- if (dev_priv->ring.ring_obj == NULL)
- drm_locked_tasklet(dev, i915_vblank_tasklet);
- else
- schedule_work(&dev_priv->mm.vblank_work);
- }
+ if (vblank && dev_priv->swaps_pending > 0)
+ schedule_work(&dev_priv->vblank_work);
return IRQ_HANDLED;
}
@@ -481,22 +469,24 @@ static int i915_emit_irq(struct drm_device * dev)
void i915_user_irq_get(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
- spin_lock(&dev_priv->user_irq_lock);
+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
- spin_unlock(&dev_priv->user_irq_lock);
+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}
void i915_user_irq_put(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long irqflags;
- spin_lock(&dev_priv->user_irq_lock);
+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
- spin_unlock(&dev_priv->user_irq_lock);
+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@@ -578,74 +568,95 @@ int i915_irq_wait(struct drm_device *dev, void *data,
return i915_wait_irq(dev, irqwait->irq_seq);
}
-int i915_enable_vblank(struct drm_device *dev, int plane)
+/* Called from drm generic code, passed 'crtc' which
+ * we use as a pipe index
+ */
+int i915_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- int pipe = i915_get_pipe(dev, plane);
u32 pipestat_reg = 0;
u32 pipestat;
+ u32 interrupt = 0;
+ unsigned long irqflags;
switch (pipe) {
case 0:
pipestat_reg = PIPEASTAT;
- i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
+ interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
break;
case 1:
pipestat_reg = PIPEBSTAT;
- i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
+ interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
break;
default:
DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
pipe);
- break;
+ return 0;
}
- if (pipestat_reg) {
- pipestat = I915_READ(pipestat_reg);
- if (IS_I965G(dev))
- pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
- else
- pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
- /* Clear any stale interrupt status */
- pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
- PIPE_VBLANK_INTERRUPT_STATUS);
- I915_WRITE(pipestat_reg, pipestat);
- }
+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ /* Enabling vblank events in IMR comes before PIPESTAT write, or
+ * there's a race where the PIPESTAT vblank bit gets set to 1, so
+ * the OR of enabled PIPESTAT bits goes to 1, so the PIPExEVENT in
+ * ISR flashes to 1, but the IIR bit doesn't get set to 1 because
+ * IMR masks it. It doesn't ever get set after we clear the masking
+ * in IMR because the ISR bit is edge, not level-triggered, on the
+ * OR of PIPESTAT bits.
+ */
+ i915_enable_irq(dev_priv, interrupt);
+ pipestat = I915_READ(pipestat_reg);
+ if (IS_I965G(dev))
+ pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
+ else
+ pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
+ /* Clear any stale interrupt status */
+ pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
+ PIPE_VBLANK_INTERRUPT_STATUS);
+ I915_WRITE(pipestat_reg, pipestat);
+ (void) I915_READ(pipestat_reg); /* Posting read */
+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
return 0;
}
-void i915_disable_vblank(struct drm_device *dev, int plane)
+/* Called from drm generic code, passed 'crtc' which
+ * we use as a pipe index
+ */
+void i915_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- int pipe = i915_get_pipe(dev, plane);
u32 pipestat_reg = 0;
u32 pipestat;
+ u32 interrupt = 0;
+ unsigned long irqflags;
switch (pipe) {
case 0:
pipestat_reg = PIPEASTAT;
- i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
+ interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
break;
case 1:
pipestat_reg = PIPEBSTAT;
- i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
+ interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
break;
default:
DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
pipe);
+ return;
break;
}
- if (pipestat_reg) {
- pipestat = I915_READ(pipestat_reg);
- pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
- PIPE_VBLANK_INTERRUPT_ENABLE);
- /* Clear any stale interrupt status */
- pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
- PIPE_VBLANK_INTERRUPT_STATUS);
- I915_WRITE(pipestat_reg, pipestat);
- }
+ spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ i915_disable_irq(dev_priv, interrupt);
+ pipestat = I915_READ(pipestat_reg);
+ pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
+ PIPE_VBLANK_INTERRUPT_ENABLE);
+ /* Clear any stale interrupt status */
+ pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
+ PIPE_VBLANK_INTERRUPT_STATUS);
+ I915_WRITE(pipestat_reg, pipestat);
+ (void) I915_READ(pipestat_reg); /* Posting read */
+ spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}
/* Set the vblank monitor pipe
@@ -687,8 +698,8 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_vblank_swap_t *swap = data;
- drm_i915_vbl_swap_t *vbl_swap;
- unsigned int pipe, seqtype, curseq, plane;
+ drm_i915_vbl_swap_t *vbl_swap, *vbl_old;
+ unsigned int pipe, seqtype, curseq;
unsigned long irqflags;
struct list_head *list;
int ret;
@@ -709,8 +720,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
return -EINVAL;
}
- plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
- pipe = i915_get_pipe(dev, plane);
+ pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
@@ -751,44 +761,52 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
}
}
+ vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
+
+ if (!vbl_swap) {
+ DRM_ERROR("Failed to allocate memory to queue swap\n");
+ drm_vblank_put(dev, pipe);
+ return -ENOMEM;
+ }
+
+ vbl_swap->drw_id = swap->drawable;
+ vbl_swap->pipe = pipe;
+ vbl_swap->sequence = swap->sequence;
+
spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
list_for_each(list, &dev_priv->vbl_swaps.head) {
- vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
+ vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);
- if (vbl_swap->drw_id == swap->drawable &&
- vbl_swap->plane == plane &&
- vbl_swap->sequence == swap->sequence) {
+ if (vbl_old->drw_id == swap->drawable &&
+ vbl_old->pipe == pipe &&
+ vbl_old->sequence == swap->sequence) {
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
+ drm_vblank_put(dev, pipe);
+ drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
DRM_DEBUG("Already scheduled\n");
return 0;
}
}
- spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
-
- if (dev_priv->swaps_pending >= 100) {
+ if (dev_priv->swaps_pending >= 10) {
DRM_DEBUG("Too many swaps queued\n");
+ DRM_DEBUG(" pipe 0: %d pipe 1: %d\n",
+ drm_vblank_count(dev, 0),
+ drm_vblank_count(dev, 1));
+
+ list_for_each(list, &dev_priv->vbl_swaps.head) {
+ vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);
+ DRM_DEBUG("\tdrw %x pipe %d seq %x\n",
+ vbl_old->drw_id, vbl_old->pipe,
+ vbl_old->sequence);
+ }
+ spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
drm_vblank_put(dev, pipe);
+ drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
return -EBUSY;
}
- vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
-
- if (!vbl_swap) {
- DRM_ERROR("Failed to allocate memory to queue swap\n");
- drm_vblank_put(dev, pipe);
- return -ENOMEM;
- }
-
- DRM_DEBUG("\n");
-
- vbl_swap->drw_id = swap->drawable;
- vbl_swap->plane = plane;
- vbl_swap->sequence = swap->sequence;
-
- spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
-
list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
dev_priv->swaps_pending++;
@@ -815,6 +833,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
spin_lock_init(&dev_priv->swaps_lock);
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
+ INIT_WORK(&dev_priv->vblank_work, i915_vblank_work_handler);
dev_priv->swaps_pending = 0;
/* Set initial unmasked IRQs to just the selected vblank pipes. */