summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c119
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c9
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h315
-rw-r--r--drivers/gpu/drm/i915/i915_trace_points.c11
6 files changed, 447 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 5269dfa5f62..fa7b9be096b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -9,6 +9,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
i915_gem.o \
i915_gem_debug.o \
i915_gem_tiling.o \
+ i915_trace_points.o \
intel_display.o \
intel_crt.o \
intel_lvds.o \
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 59826c5b876..ae7ec039002 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -33,6 +33,7 @@
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "i915_trace.h"
/* Really want an OS-independent resettable timer. Would like to have
* this loop run for (eg) 3 sec, but have the timer reset every time
@@ -49,14 +50,18 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
int i;
+ trace_i915_ring_wait_begin (dev);
+
for (i = 0; i < 100000; i++) {
ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
acthd = I915_READ(acthd_reg);
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
- if (ring->space >= n)
+ if (ring->space >= n) {
+ trace_i915_ring_wait_end (dev);
return 0;
+ }
if (dev->primary->master) {
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -76,6 +81,7 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
}
+ trace_i915_ring_wait_end (dev);
return -EBUSY;
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dea9ac06985..67e2cd5636e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,6 +29,7 @@
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "i915_trace.h"
#include "intel_drv.h"
#include <linux/swap.h>
#include <linux/pci.h>
@@ -1618,8 +1619,14 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
if ((obj->write_domain & flush_domains) ==
obj->write_domain) {
+ uint32_t old_write_domain = obj->write_domain;
+
obj->write_domain = 0;
i915_gem_object_move_to_active(obj, seqno);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
}
@@ -1667,6 +1674,8 @@ i915_gem_retire_request(struct drm_device *dev,
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ trace_i915_gem_request_retire(dev, request->seqno);
+
/* Move any buffers on the active list that are no longer referenced
* by the ringbuffer to the flushing/inactive lists as appropriate.
*/
@@ -1810,6 +1819,8 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
i915_driver_irq_postinstall(dev);
}
+ trace_i915_gem_request_wait_begin(dev, seqno);
+
dev_priv->mm.waiting_gem_seqno = seqno;
i915_user_irq_get(dev);
ret = wait_event_interruptible(dev_priv->irq_queue,
@@ -1818,6 +1829,8 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
atomic_read(&dev_priv->mm.wedged));
i915_user_irq_put(dev);
dev_priv->mm.waiting_gem_seqno = 0;
+
+ trace_i915_gem_request_wait_end(dev, seqno);
}
if (atomic_read(&dev_priv->mm.wedged))
ret = -EIO;
@@ -1850,6 +1863,8 @@ i915_gem_flush(struct drm_device *dev,
DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
invalidate_domains, flush_domains);
#endif
+ trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
+ invalidate_domains, flush_domains);
if (flush_domains & I915_GEM_DOMAIN_CPU)
drm_agp_chipset_flush(dev);
@@ -2003,6 +2018,8 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
if (!list_empty(&obj_priv->list))
list_del_init(&obj_priv->list);
+ trace_i915_gem_object_unbind(obj);
+
return 0;
}
@@ -2452,6 +2469,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
else
i830_write_fence_reg(reg);
+ trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
+
return 0;
}
@@ -2650,6 +2669,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
+ trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
+
return 0;
}
@@ -2665,6 +2686,8 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
if (obj_priv->pages == NULL)
return;
+ trace_i915_gem_object_clflush(obj);
+
drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
}
@@ -2674,21 +2697,29 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
uint32_t seqno;
+ uint32_t old_write_domain;
if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
return;
/* Queue the GPU write cache flushing we need. */
+ old_write_domain = obj->write_domain;
i915_gem_flush(dev, 0, obj->write_domain);
seqno = i915_add_request(dev, NULL, obj->write_domain);
obj->write_domain = 0;
i915_gem_object_move_to_active(obj, seqno);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
/** Flushes the GTT write domain for the object if it's dirty. */
static void
i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
{
+ uint32_t old_write_domain;
+
if (obj->write_domain != I915_GEM_DOMAIN_GTT)
return;
@@ -2696,7 +2727,12 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
* to it immediately go to main memory as far as we know, so there's
* no chipset flush. It also doesn't land in render cache.
*/
+ old_write_domain = obj->write_domain;
obj->write_domain = 0;
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
/** Flushes the CPU write domain for the object if it's dirty. */
@@ -2704,13 +2740,19 @@ static void
i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ uint32_t old_write_domain;
if (obj->write_domain != I915_GEM_DOMAIN_CPU)
return;
i915_gem_clflush_object(obj);
drm_agp_chipset_flush(dev);
+ old_write_domain = obj->write_domain;
obj->write_domain = 0;
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
/**
@@ -2723,6 +2765,7 @@ int
i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ uint32_t old_write_domain, old_read_domains;
int ret;
/* Not valid to be called on unbound objects. */
@@ -2735,6 +2778,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
if (ret != 0)
return ret;
+ old_write_domain = obj->write_domain;
+ old_read_domains = obj->read_domains;
+
/* If we're writing through the GTT domain, then CPU and GPU caches
* will need to be invalidated at next use.
*/
@@ -2753,6 +2799,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
obj_priv->dirty = 1;
}
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+
return 0;
}
@@ -2765,6 +2815,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
static int
i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
{
+ uint32_t old_write_domain, old_read_domains;
int ret;
i915_gem_object_flush_gpu_write_domain(obj);
@@ -2780,6 +2831,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
*/
i915_gem_object_set_to_full_cpu_read_domain(obj);
+ old_write_domain = obj->write_domain;
+ old_read_domains = obj->read_domains;
+
/* Flush the CPU cache if it's still invalid. */
if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj);
@@ -2800,6 +2854,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
obj->write_domain = I915_GEM_DOMAIN_CPU;
}
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+
return 0;
}
@@ -2921,6 +2979,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
struct drm_i915_gem_object *obj_priv = obj->driver_private;
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
+ uint32_t old_read_domains;
BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
@@ -2967,6 +3026,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
i915_gem_clflush_object(obj);
}
+ old_read_domains = obj->read_domains;
+
/* The actual obj->write_domain will be updated with
* pending_write_domain after we emit the accumulated flush for all
* of our domain changes in execbuffers (which clears objects'
@@ -2985,6 +3046,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
obj->read_domains, obj->write_domain,
dev->invalidate_domains, dev->flush_domains);
#endif
+
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ obj->write_domain);
}
/**
@@ -3037,6 +3102,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
uint64_t offset, uint64_t size)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ uint32_t old_read_domains;
int i, ret;
if (offset == 0 && size == obj->size)
@@ -3083,8 +3149,13 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
*/
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+ old_read_domains = obj->read_domains;
obj->read_domains |= I915_GEM_DOMAIN_CPU;
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ obj->write_domain);
+
return 0;
}
@@ -3282,6 +3353,8 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
exec_len = (uint32_t) exec->batch_len;
+ trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno);
+
count = nbox ? nbox : 1;
for (i = 0; i < count; i++) {
@@ -3660,8 +3733,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
+ uint32_t old_write_domain = obj->write_domain;
obj->write_domain = obj->pending_write_domain;
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -4050,6 +4127,8 @@ int i915_gem_init_object(struct drm_gem_object *obj)
INIT_LIST_HEAD(&obj_priv->fence_list);
obj_priv->madv = I915_MADV_WILLNEED;
+ trace_i915_gem_object_create(obj);
+
return 0;
}
@@ -4058,6 +4137,8 @@ void i915_gem_free_object(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ trace_i915_gem_object_destroy(obj);
+
while (obj_priv->pin_count > 0)
i915_gem_object_unpin(obj);
@@ -4186,24 +4267,36 @@ i915_gem_idle(struct drm_device *dev)
* the GPU domains and just stuff them onto inactive.
*/
while (!list_empty(&dev_priv->mm.active_list)) {
- struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *obj;
+ uint32_t old_write_domain;
- obj_priv = list_first_entry(&dev_priv->mm.active_list,
- struct drm_i915_gem_object,
- list);
- obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
- i915_gem_object_move_to_inactive(obj_priv->obj);
+ obj = list_first_entry(&dev_priv->mm.active_list,
+ struct drm_i915_gem_object,
+ list)->obj;
+ old_write_domain = obj->write_domain;
+ obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
+ i915_gem_object_move_to_inactive(obj);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
spin_unlock(&dev_priv->mm.active_list_lock);
while (!list_empty(&dev_priv->mm.flushing_list)) {
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- list);
- obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
- i915_gem_object_move_to_inactive(obj_priv->obj);
+ struct drm_gem_object *obj;
+ uint32_t old_write_domain;
+
+ obj = list_first_entry(&dev_priv->mm.flushing_list,
+ struct drm_i915_gem_object,
+ list)->obj;
+ old_write_domain = obj->write_domain;
+ obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
+ i915_gem_object_move_to_inactive(obj);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 13e664ddb61..4dfeec7cdd4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -31,6 +31,7 @@
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "i915_trace.h"
#include "intel_drv.h"
#define MAX_NOPID ((u32)~0)
@@ -279,7 +280,9 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
}
if (gt_iir & GT_USER_INTERRUPT) {
- dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
+ u32 seqno = i915_get_gem_seqno(dev);
+ dev_priv->mm.irq_gem_seqno = seqno;
+ trace_i915_gem_request_complete(dev, seqno);
DRM_WAKEUP(&dev_priv->irq_queue);
}
@@ -622,7 +625,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
}
if (iir & I915_USER_INTERRUPT) {
- dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
+ u32 seqno = i915_get_gem_seqno(dev);
+ dev_priv->mm.irq_gem_seqno = seqno;
+ trace_i915_gem_request_complete(dev, seqno);
DRM_WAKEUP(&dev_priv->irq_queue);
dev_priv->hangcheck_count = 0;
mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
new file mode 100644
index 00000000000..5567a40816f
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -0,0 +1,315 @@
+#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _I915_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drmP.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM i915
+#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
+#define TRACE_INCLUDE_FILE i915_trace
+
+/* object tracking */
+
+TRACE_EVENT(i915_gem_object_create,
+
+ TP_PROTO(struct drm_gem_object *obj),
+
+ TP_ARGS(obj),
+
+ TP_STRUCT__entry(
+ __field(struct drm_gem_object *, obj)
+ __field(u32, size)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->size = obj->size;
+ ),
+
+ TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
+);
+
+TRACE_EVENT(i915_gem_object_bind,
+
+ TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset),
+
+ TP_ARGS(obj, gtt_offset),
+
+ TP_STRUCT__entry(
+ __field(struct drm_gem_object *, obj)
+ __field(u32, gtt_offset)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->gtt_offset = gtt_offset;
+ ),
+
+ TP_printk("obj=%p, gtt_offset=%08x",
+ __entry->obj, __entry->gtt_offset)
+);
+
+TRACE_EVENT(i915_gem_object_clflush,
+
+ TP_PROTO(struct drm_gem_object *obj),
+
+ TP_ARGS(obj),
+
+ TP_STRUCT__entry(
+ __field(struct drm_gem_object *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ ),
+
+ TP_printk("obj=%p", __entry->obj)
+);
+
+TRACE_EVENT(i915_gem_object_change_domain,
+
+ TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
+
+ TP_ARGS(obj, old_read_domains, old_write_domain),
+
+ TP_STRUCT__entry(
+ __field(struct drm_gem_object *, obj)
+ __field(u32, read_domains)
+ __field(u32, write_domain)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->read_domains = obj->read_domains | (old_read_domains << 16);
+ __entry->write_domain = obj->write_domain | (old_write_domain << 16);
+ ),
+
+ TP_printk("obj=%p, read=%04x, write=%04x",
+ __entry->obj,
+ __entry->read_domains, __entry->write_domain)
+);
+
+TRACE_EVENT(i915_gem_object_get_fence,
+
+ TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode),
+
+ TP_ARGS(obj, fence, tiling_mode),
+
+ TP_STRUCT__entry(
+ __field(struct drm_gem_object *, obj)
+ __field(int, fence)
+ __field(int, tiling_mode)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->fence = fence;
+ __entry->tiling_mode = tiling_mode;
+ ),
+
+ TP_printk("obj=%p, fence=%d, tiling=%d",
+ __entry->obj, __entry->fence, __entry->tiling_mode)
+);
+
+TRACE_EVENT(i915_gem_object_unbind,
+
+ TP_PROTO(struct drm_gem_object *obj),
+
+ TP_ARGS(obj),
+
+ TP_STRUCT__entry(
+ __field(struct drm_gem_object *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ ),
+
+ TP_printk("obj=%p", __entry->obj)
+);
+
+TRACE_EVENT(i915_gem_object_destroy,
+
+ TP_PROTO(struct drm_gem_object *obj),
+
+ TP_ARGS(obj),
+
+ TP_STRUCT__entry(
+ __field(struct drm_gem_object *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ ),
+
+ TP_printk("obj=%p", __entry->obj)
+);
+
+/* batch tracing */
+
+TRACE_EVENT(i915_gem_request_submit,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(struct drm_device *, dev)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+TRACE_EVENT(i915_gem_request_flush,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno,
+ u32 flush_domains, u32 invalidate_domains),
+
+ TP_ARGS(dev, seqno, flush_domains, invalidate_domains),
+
+ TP_STRUCT__entry(
+ __field(struct drm_device *, dev)
+ __field(u32, seqno)
+ __field(u32, flush_domains)
+ __field(u32, invalidate_domains)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->seqno = seqno;
+ __entry->flush_domains = flush_domains;
+ __entry->invalidate_domains = invalidate_domains;
+ ),
+
+ TP_printk("dev=%p, seqno=%u, flush=%04x, invalidate=%04x",
+ __entry->dev, __entry->seqno,
+ __entry->flush_domains, __entry->invalidate_domains)
+);
+
+
+TRACE_EVENT(i915_gem_request_complete,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(struct drm_device *, dev)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+TRACE_EVENT(i915_gem_request_retire,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(struct drm_device *, dev)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+TRACE_EVENT(i915_gem_request_wait_begin,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(struct drm_device *, dev)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+TRACE_EVENT(i915_gem_request_wait_end,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(struct drm_device *, dev)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+TRACE_EVENT(i915_ring_wait_begin,
+
+ TP_PROTO(struct drm_device *dev),
+
+ TP_ARGS(dev),
+
+ TP_STRUCT__entry(
+ __field(struct drm_device *, dev)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev;
+ ),
+
+ TP_printk("dev=%p", __entry->dev)
+);
+
+TRACE_EVENT(i915_ring_wait_end,
+
+ TP_PROTO(struct drm_device *dev),
+
+ TP_ARGS(dev),
+
+ TP_STRUCT__entry(
+ __field(struct drm_device *, dev)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev;
+ ),
+
+ TP_printk("dev=%p", __entry->dev)
+);
+
+#endif /* _I915_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/i915_trace_points.c b/drivers/gpu/drm/i915/i915_trace_points.c
new file mode 100644
index 00000000000..ead876eb6ea
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_trace_points.c
@@ -0,0 +1,11 @@
+/*
+ * Copyright © 2009 Intel Corporation
+ *
+ * Authors:
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ */
+
+#include "i915_drv.h"
+
+#define CREATE_TRACE_POINTS
+#include "i915_trace.h"