summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-04-28 08:56:39 +0100
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-05-24 17:54:00 +0200
commit7b9e0ae6da0a7eaf2680a1a788f08df123724f3b (patch)
tree34098c8670c181ac16210d7647057cbc22ceb6f1 /drivers/gpu/drm
parent59d92bfa5f0cdf57f82f5181b0ad6af75c3fdf41 (diff)
drm/i915: Always update RPS interrupts thresholds along with frequency
In order to avoid missed down-interrupts when coming out of RC6, it is advised that we always reset the down-threshold upon a PM event. This is due to that the PM unit goes through a little dance when coming out of RC6, it first brings the GPU up at the lowest frequency then a short time later it restores the thresholds. During that interval, the down-interval may expire and the interrupt be suppressed. Now aware of the dance taking place within the GPU when coming out of RC6, one wonders what other writes need to be queued in the fifo buffer in order to be properly sequenced; setting the RP state appears to be one. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=44006 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Acked-by: Ben Widawsky <ben@bwidawsk.net> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c37
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c57
2 files changed, 47 insertions, 47 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index cc4a6330761..1417660a93e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -350,8 +350,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
rps_work);
- u8 new_delay = dev_priv->cur_delay;
u32 pm_iir, pm_imr;
+ u8 new_delay;
spin_lock_irq(&dev_priv->rps_lock);
pm_iir = dev_priv->pm_iir;
@@ -360,41 +360,18 @@ static void gen6_pm_rps_work(struct work_struct *work)
I915_WRITE(GEN6_PMIMR, 0);
spin_unlock_irq(&dev_priv->rps_lock);
- if (!pm_iir)
+ if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return;
mutex_lock(&dev_priv->dev->struct_mutex);
- if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
- if (dev_priv->cur_delay != dev_priv->max_delay)
- new_delay = dev_priv->cur_delay + 1;
- if (new_delay > dev_priv->max_delay)
- new_delay = dev_priv->max_delay;
- } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
- gen6_gt_force_wake_get(dev_priv);
- if (dev_priv->cur_delay != dev_priv->min_delay)
- new_delay = dev_priv->cur_delay - 1;
- if (new_delay < dev_priv->min_delay) {
- new_delay = dev_priv->min_delay;
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
- ((new_delay << 16) & 0x3f0000));
- } else {
- /* Make sure we continue to get down interrupts
- * until we hit the minimum frequency */
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
- }
- gen6_gt_force_wake_put(dev_priv);
- }
+
+ if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
+ new_delay = dev_priv->cur_delay + 1;
+ else
+ new_delay = dev_priv->cur_delay - 1;
gen6_set_rps(dev_priv->dev, new_delay);
- dev_priv->cur_delay = new_delay;
- /*
- * rps_lock not held here because clearing is non-destructive. There is
- * an *extremely* unlikely race with gen6_rps_enable() that is prevented
- * by holding struct_mutex for the duration of the write.
- */
mutex_unlock(&dev_priv->dev->struct_mutex);
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 8e79ff67ec9..bbca4e403b8 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2270,10 +2270,33 @@ void ironlake_disable_drps(struct drm_device *dev)
void gen6_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 swreq;
+ u32 limits;
- swreq = (val & 0x3ff) << 25;
- I915_WRITE(GEN6_RPNSWREQ, swreq);
+ limits = 0;
+ if (val >= dev_priv->max_delay)
+ val = dev_priv->max_delay;
+ else
+ limits |= dev_priv->max_delay << 24;
+
+ if (val <= dev_priv->min_delay)
+ val = dev_priv->min_delay;
+ else
+ limits |= dev_priv->min_delay << 16;
+
+ if (val == dev_priv->cur_delay)
+ return;
+
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN6_FREQUENCY(val) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+
+ /* Make sure we continue to get interrupts
+ * until we hit the minimum or maximum frequencies.
+ */
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+
+ dev_priv->cur_delay = val;
}
void gen6_disable_rps(struct drm_device *dev)
@@ -2327,11 +2350,10 @@ int intel_enable_rc6(const struct drm_device *dev)
void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_ring_buffer *ring;
- u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+ u32 rp_state_cap;
+ u32 gt_perf_status;
u32 pcu_mbox, rc6_mask = 0;
u32 gtfifodbg;
- int cur_freq, min_freq, max_freq;
int rc6_mode;
int i;
@@ -2352,6 +2374,14 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
gen6_gt_force_wake_get(dev_priv);
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+
+ /* In units of 100MHz */
+ dev_priv->max_delay = rp_state_cap & 0xff;
+ dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16;
+ dev_priv->cur_delay = 0;
+
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -2399,8 +2429,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- 18 << 24 |
- 6 << 16);
+ dev_priv->max_delay << 24 |
+ dev_priv->min_delay << 16);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
I915_WRITE(GEN6_RP_UP_EI, 100000);
@@ -2426,10 +2456,6 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
- min_freq = (rp_state_cap & 0xff0000) >> 16;
- max_freq = rp_state_cap & 0xff;
- cur_freq = (gt_perf_status & 0xff00) >> 8;
-
/* Check for overclock support */
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
@@ -2440,14 +2466,11 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
if (pcu_mbox & (1<<31)) { /* OC supported */
- max_freq = pcu_mbox & 0xff;
+ dev_priv->max_delay = pcu_mbox & 0xff;
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
}
- /* In units of 100MHz */
- dev_priv->max_delay = max_freq;
- dev_priv->min_delay = min_freq;
- dev_priv->cur_delay = cur_freq;
+ gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
/* requires MSI enabled */
I915_WRITE(GEN6_PMIER,