diff options
author | Dave Airlie <airlied@redhat.com> | 2010-11-09 13:26:13 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-11-09 13:26:13 +1000 |
commit | 91839fd577abc5fb39fb2238e05e847c70c9dec3 (patch) | |
tree | 86238c628c368aab28e96d61de99b7d739eec1ff | |
parent | a7bcf21e60c73cb7f7c13fad928967d7e47c3cac (diff) | |
parent | 3f8ff0e72d75fdbe7f2cba2c4015fd9fdd9e13fd (diff) |
Merge branch 'drm-intel-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel
* 'drm-intel-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel:
drm/i915: Fix LVDS fixed-mode regression from 219adae1
drm/i915/ringbuffer: Use the HEAD auto-reporting mechanism
drm/i915: Avoid might_fault during pwrite whilst holding our mutex
agp/intel: fix cache control for sandybridge
agp/intel: restore cache behavior on sandybridge
drm/i915; Don't apply Ironlake FDI clock workaround to Sandybridge
drm/i915: Fix KMS regression on Sandybridge/CPT
i915: reprogram power monitoring registers on resume
drm/i915: SNB BLT workaround
drm/i915: Fix the graphics frequency clamping at init and when IPS is active.
drm/i915: Allow powersave modparam to be adjusted at runtime.
drm/i915: Apply big hammer to serialise buffer access between rings
drm/i915: opregion_setup: iounmap correct address
drm/i915: Flush read-only buffers from the active list upon idle as well
i915: signedness bug in check_overlay_src()
drm/i915: Fix typo from "Enable DisplayPort Audio"
-rw-r--r-- | drivers/char/agp/intel-gtt.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 115 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_evict.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_suspend.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 70 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_dp.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_drv.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_lvds.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_opregion.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_overlay.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 129 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 3 |
14 files changed, 267 insertions, 96 deletions
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 6b6760ea243..9272c38dd3c 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1210,14 +1210,14 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry, unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; u32 pte_flags; - if (type_mask == AGP_USER_UNCACHED_MEMORY) + if (type_mask == AGP_USER_MEMORY) pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { - pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; + pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; if (gfdt) pte_flags |= GEN6_PTE_GFDT; } else { /* set 'normal'/'cached' to LLC by default */ - pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; + pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; if (gfdt) pte_flags |= GEN6_PTE_GFDT; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 3467dd42076..80745f85902 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -44,7 +44,7 @@ unsigned int i915_fbpercrtc = 0; module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); unsigned int i915_powersave = 1; -module_param_named(powersave, i915_powersave, int, 0400); +module_param_named(powersave, i915_powersave, int, 0600); unsigned int i915_lvds_downclock = 0; module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2c2c19b6285..90414ae86af 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1321,6 +1321,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) +#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) #define PRIMARY_RINGBUFFER_SIZE (128*1024) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8eb8453208b..eba9b161522 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2172,7 +2172,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) static int i915_ring_idle(struct drm_device *dev, struct intel_ring_buffer *ring) { - if (list_empty(&ring->gpu_write_list)) + if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) return 0; i915_gem_flush_ring(dev, NULL, ring, @@ -2190,9 +2190,7 @@ i915_gpu_idle(struct drm_device *dev) int ret; lists_empty = (list_empty(&dev_priv->mm.flushing_list) && - list_empty(&dev_priv->render_ring.active_list) && - list_empty(&dev_priv->bsd_ring.active_list) && - list_empty(&dev_priv->blt_ring.active_list)); + list_empty(&dev_priv->mm.active_list)); if (lists_empty) return 0; @@ -3108,7 +3106,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, * write domain */ if (obj->write_domain && - obj->write_domain != obj->pending_read_domains) { + (obj->write_domain != obj->pending_read_domains || + obj_priv->ring != ring)) { flush_domains |= obj->write_domain; invalidate_domains |= obj->pending_read_domains & ~obj->write_domain; @@ -3497,6 +3496,52 @@ i915_gem_execbuffer_pin(struct drm_device *dev, return 0; } +static int +i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, + struct drm_file *file, + struct intel_ring_buffer *ring, + struct drm_gem_object **objects, + int count) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret, i; + + /* Zero the global flush/invalidate flags. These + * will be modified as new domains are computed + * for each object + */ + dev->invalidate_domains = 0; + dev->flush_domains = 0; + dev_priv->mm.flush_rings = 0; + for (i = 0; i < count; i++) + i915_gem_object_set_to_gpu_domain(objects[i], ring); + + if (dev->invalidate_domains | dev->flush_domains) { +#if WATCH_EXEC + DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", + __func__, + dev->invalidate_domains, + dev->flush_domains); +#endif + i915_gem_flush(dev, file, + dev->invalidate_domains, + dev->flush_domains, + dev_priv->mm.flush_rings); + } + + for (i = 0; i < count; i++) { + struct drm_i915_gem_object *obj = to_intel_bo(objects[i]); + /* XXX replace with semaphores */ + if (obj->ring && ring != obj->ring) { + ret = i915_gem_object_wait_rendering(&obj->base, true); + if (ret) + return ret; + } + } + + return 0; +} + /* Throttle our rendering by waiting until the ring has completed our requests * emitted over 20 msec ago. * @@ -3757,33 +3802,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } - /* Zero the global flush/invalidate flags. These - * will be modified as new domains are computed - * for each object - */ - dev->invalidate_domains = 0; - dev->flush_domains = 0; - dev_priv->mm.flush_rings = 0; - - for (i = 0; i < args->buffer_count; i++) { - struct drm_gem_object *obj = object_list[i]; - - /* Compute new gpu domains and update invalidate/flush */ - i915_gem_object_set_to_gpu_domain(obj, ring); - } - - if (dev->invalidate_domains | dev->flush_domains) { -#if WATCH_EXEC - DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", - __func__, - dev->invalidate_domains, - dev->flush_domains); -#endif - i915_gem_flush(dev, file, - dev->invalidate_domains, - dev->flush_domains, - dev_priv->mm.flush_rings); - } + ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring, + object_list, args->buffer_count); + if (ret) + goto err; for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; @@ -4856,17 +4878,24 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, struct drm_file *file_priv) { struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - void *obj_addr; - int ret; - char __user *user_data; + void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset; + char __user *user_data = (char __user *) (uintptr_t) args->data_ptr; - user_data = (char __user *) (uintptr_t) args->data_ptr; - obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; + DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size); - DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size); - ret = copy_from_user(obj_addr, user_data, args->size); - if (ret) - return -EFAULT; + if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { + unsigned long unwritten; + + /* The physical object once assigned is fixed for the lifetime + * of the obj, so we can safely drop the lock and continue + * to access vaddr. + */ + mutex_unlock(&dev->struct_mutex); + unwritten = copy_from_user(vaddr, user_data, args->size); + mutex_lock(&dev->struct_mutex); + if (unwritten) + return -EFAULT; + } drm_agp_chipset_flush(dev); return 0; @@ -4900,9 +4929,7 @@ i915_gpu_is_active(struct drm_device *dev) int lists_empty; lists_empty = list_empty(&dev_priv->mm.flushing_list) && - list_empty(&dev_priv->render_ring.active_list) && - list_empty(&dev_priv->bsd_ring.active_list) && - list_empty(&dev_priv->blt_ring.active_list); + list_empty(&dev_priv->mm.active_list); return !lists_empty; } diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 43a4013f53f..d8ae7d1d0cc 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -165,9 +165,7 @@ i915_gem_evict_everything(struct drm_device *dev) lists_empty = (list_empty(&dev_priv->mm.inactive_list) && list_empty(&dev_priv->mm.flushing_list) && - list_empty(&dev_priv->render_ring.active_list) && - list_empty(&dev_priv->bsd_ring.active_list) && - list_empty(&dev_priv->blt_ring.active_list)); + list_empty(&dev_priv->mm.active_list)); if (lists_empty) return -ENOSPC; @@ -184,9 +182,7 @@ i915_gem_evict_everything(struct drm_device *dev) lists_empty = (list_empty(&dev_priv->mm.inactive_list) && list_empty(&dev_priv->mm.flushing_list) && - list_empty(&dev_priv->render_ring.active_list) && - list_empty(&dev_priv->bsd_ring.active_list) && - list_empty(&dev_priv->blt_ring.active_list)); + list_empty(&dev_priv->mm.active_list)); BUG_ON(!lists_empty); return 0; diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 989c19d2d95..454c064f8ef 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -862,8 +862,10 @@ int i915_restore_state(struct drm_device *dev) /* Clock gating state */ intel_init_clock_gating(dev); - if (HAS_PCH_SPLIT(dev)) + if (HAS_PCH_SPLIT(dev)) { ironlake_enable_drps(dev); + intel_init_emon(dev); + } /* Cache mode state */ I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 990f065374b..48d8fd686ea 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1681,6 +1681,37 @@ static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) udelay(500); } +static void intel_fdi_normal_train(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + u32 reg, temp; + + /* enable normal train */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; + I915_WRITE(reg, temp); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + if (HAS_PCH_CPT(dev)) { + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_NORMAL_CPT; + } else { + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_NONE; + } + I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); + + /* wait one idle pattern time */ + POSTING_READ(reg); + udelay(1000); +} + /* The FDI link training functions for ILK/Ibexpeak. */ static void ironlake_fdi_link_train(struct drm_crtc *crtc) { @@ -1767,27 +1798,6 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) DRM_DEBUG_KMS("FDI train done\n"); - /* enable normal train */ - reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; - I915_WRITE(reg, temp); - - reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); - if (HAS_PCH_CPT(dev)) { - temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; - temp |= FDI_LINK_TRAIN_NORMAL_CPT; - } else { - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_NONE; - } - I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); - - /* wait one idle pattern time */ - POSTING_READ(reg); - udelay(1000); } static const int const snb_b_fdi_train_param [] = { @@ -2090,6 +2100,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); + intel_fdi_normal_train(crtc); + /* For PCH DP, enable TRANS_DP_CTL */ if (HAS_PCH_CPT(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { @@ -2200,9 +2212,10 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) udelay(100); /* Ironlake workaround, disable clock pointer after downing FDI */ - I915_WRITE(FDI_RX_CHICKEN(pipe), - I915_READ(FDI_RX_CHICKEN(pipe) & - ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); + if (HAS_PCH_IBX(dev)) + I915_WRITE(FDI_RX_CHICKEN(pipe), + I915_READ(FDI_RX_CHICKEN(pipe) & + ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); /* still set train pattern 1 */ reg = FDI_TX_CTL(pipe); @@ -5581,20 +5594,19 @@ void ironlake_enable_drps(struct drm_device *dev) fmin = (rgvmodectl & MEMMODE_FMIN_MASK); fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT; - fstart = fmax; vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; - dev_priv->fmax = fstart; /* IPS callback will increase this */ + dev_priv->fmax = fmax; /* IPS callback will increase this */ dev_priv->fstart = fstart; - dev_priv->max_delay = fmax; + dev_priv->max_delay = fstart; dev_priv->min_delay = fmin; dev_priv->cur_delay = fstart; - DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin, - fstart); + DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", + fmax, fmin, fstart); I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 891f4f1d63b..c8e00555331 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1517,7 +1517,7 @@ g4x_dp_detect(struct intel_dp *intel_dp) status = connector_status_connected; } - return bit; + return status; } /** diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 9af9f86a876..21551fe7454 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -296,6 +296,7 @@ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, extern void intel_init_clock_gating(struct drm_device *dev); extern void ironlake_enable_drps(struct drm_device *dev); extern void ironlake_disable_drps(struct drm_device *dev); +extern void intel_init_emon(struct drm_device *dev); extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index f1a649990ea..4324a326f98 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -481,11 +481,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector) struct drm_device *dev = connector->dev; struct drm_display_mode *mode; - if (intel_lvds->edid) { - drm_mode_connector_update_edid_property(connector, - intel_lvds->edid); + if (intel_lvds->edid) return drm_add_edid_modes(connector, intel_lvds->edid); - } mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); if (mode == 0) @@ -939,7 +936,16 @@ void intel_lvds_init(struct drm_device *dev) */ intel_lvds->edid = drm_get_edid(connector, &dev_priv->gmbus[pin].adapter); - + if (intel_lvds->edid) { + if (drm_add_edid_modes(connector, + intel_lvds->edid)) { + drm_mode_connector_update_edid_property(connector, + intel_lvds->edid); + } else { + kfree(intel_lvds->edid); + intel_lvds->edid = NULL; + } + } if (!intel_lvds->edid) { /* Didn't get an EDID, so * Set wide sync ranges so we get all modes diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 917c7dc3cd6..9b0d9a867ae 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -512,6 +512,6 @@ int intel_opregion_setup(struct drm_device *dev) return 0; err_out: - iounmap(opregion->header); + iounmap(base); return err; } diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index afb96d25219..02ff0a481f4 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -946,7 +946,9 @@ static int check_overlay_src(struct drm_device *dev, { int uv_hscale = uv_hsubsampling(rec->flags); int uv_vscale = uv_vsubsampling(rec->flags); - u32 stride_mask, depth, tmp; + u32 stride_mask; + int depth; + u32 tmp; /* check src dimensions */ if (IS_845G(dev) || IS_I830(dev)) { diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 09f2dc353ae..b83306f9244 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -177,7 +177,7 @@ static int init_ring_common(struct drm_device *dev, I915_WRITE_CTL(ring, ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) - | RING_NO_REPORT | RING_VALID); + | RING_REPORT_64K | RING_VALID); head = I915_READ_HEAD(ring) & HEAD_ADDR; /* If the head is still not zero, the ring is dead */ @@ -654,6 +654,10 @@ void intel_cleanup_ring_buffer(struct drm_device *dev, i915_gem_object_unpin(ring->gem_object); drm_gem_object_unreference(ring->gem_object); ring->gem_object = NULL; + + if (ring->cleanup) + ring->cleanup(ring); + cleanup_status_page(dev, ring); } @@ -688,6 +692,17 @@ int intel_wait_ring_buffer(struct drm_device *dev, { unsigned long end; drm_i915_private_t *dev_priv = dev->dev_private; + u32 head; + + head = intel_read_status_page(ring, 4); + if (head) { + ring->head = head & HEAD_ADDR; + ring->space = ring->head - (ring->tail + 8); + if (ring->space < 0) + ring->space += ring->size; + if (ring->space >= n) + return 0; + } trace_i915_ring_wait_begin (dev); end = jiffies + 3 * HZ; @@ -854,19 +869,125 @@ blt_ring_put_user_irq(struct drm_device *dev, /* do nothing */ } + +/* Workaround for some stepping of SNB, + * each time when BLT engine ring tail moved, + * the first command in the ring to be parsed + * should be MI_BATCH_BUFFER_START + */ +#define NEED_BLT_WORKAROUND(dev) \ + (IS_GEN6(dev) && (dev->pdev->revision < 8)) + +static inline struct drm_i915_gem_object * +to_blt_workaround(struct intel_ring_buffer *ring) +{ + return ring->private; +} + +static int blt_ring_init(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + if (NEED_BLT_WORKAROUND(dev)) { + struct drm_i915_gem_object *obj; + u32 __iomem *ptr; + int ret; + + obj = to_intel_bo(i915_gem_alloc_object(dev, 4096)); + if (obj == NULL) + return -ENOMEM; + + ret = i915_gem_object_pin(&obj->base, 4096); + if (ret) { + drm_gem_object_unreference(&obj->base); + return ret; + } + + ptr = kmap(obj->pages[0]); + iowrite32(MI_BATCH_BUFFER_END, ptr); + iowrite32(MI_NOOP, ptr+1); + kunmap(obj->pages[0]); + + ret = i915_gem_object_set_to_gtt_domain(&obj->base, false); + if (ret) { + i915_gem_object_unpin(&obj->base); + drm_gem_object_unreference(&obj->base); + return ret; + } + + ring->private = obj; + } + + return init_ring_common(dev, ring); +} + +static void blt_ring_begin(struct drm_device *dev, + struct intel_ring_buffer *ring, + int num_dwords) +{ + if (ring->private) { + intel_ring_begin(dev, ring, num_dwords+2); + intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START); + intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset); + } else + intel_ring_begin(dev, ring, 4); +} + +static void blt_ring_flush(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains) +{ + blt_ring_begin(dev, ring, 4); + intel_ring_emit(dev, ring, MI_FLUSH_DW); + intel_ring_emit(dev, ring, 0); + intel_ring_emit(dev, ring, 0); + intel_ring_emit(dev, ring, 0); + intel_ring_advance(dev, ring); +} + +static u32 +blt_ring_add_request(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 flush_domains) +{ + u32 seqno = i915_gem_get_seqno(dev); + + blt_ring_begin(dev, ring, 4); + intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(dev, ring, + I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(dev, ring, seqno); + intel_ring_emit(dev, ring, MI_USER_INTERRUPT); + intel_ring_advance(dev, ring); + + DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); + return seqno; +} + +static void blt_ring_cleanup(struct intel_ring_buffer *ring) +{ + if (!ring->private) + return; + + i915_gem_object_unpin(ring->private); + drm_gem_object_unreference(ring->private); + ring->private = NULL; +} + static const struct intel_ring_buffer gen6_blt_ring = { .name = "blt ring", .id = RING_BLT, .mmio_base = BLT_RING_BASE, .size = 32 * PAGE_SIZE, - .init = init_ring_common, + .init = blt_ring_init, .write_tail = ring_write_tail, - .flush = gen6_ring_flush, - .add_request = ring_add_request, + .flush = blt_ring_flush, + .add_request = blt_ring_add_request, .get_seqno = ring_status_page_get_seqno, .user_irq_get = blt_ring_get_user_irq, .user_irq_put = blt_ring_put_user_irq, .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, + .cleanup = blt_ring_cleanup, }; int intel_init_render_ring_buffer(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index a05aff0e576..3126c268198 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -63,6 +63,7 @@ struct intel_ring_buffer { struct drm_i915_gem_execbuffer2 *exec, struct drm_clip_rect *cliprects, uint64_t exec_offset); + void (*cleanup)(struct intel_ring_buffer *ring); /** * List of objects currently involved in rendering from the @@ -98,6 +99,8 @@ struct intel_ring_buffer { wait_queue_head_t irq_queue; drm_local_map_t map; + + void *private; }; static inline u32 |